blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e50c10f7e8e12f489c20e5d6da2a38db2c7cade8 | Python | ZeroMVP/ProgLife | /AiSD/1_Week/InsertionSort.py | UTF-8 | 557 | 3 | 3 | [] | no_license | f = open('input.txt')
data = []
for line in f:
data.append(line)
n = int(data[0])
array = data[1].split(' ')
array = [int(item) for item in array]
change_array = [1,]
for j in range(2, n + 1):
i = j - 1
array[i - 1]
while i > 0 and array[i] < array[i - 1]:
c = array[i - 1]
array[i - 1] = array[i]
array[i] = c
i = i - 1
change_array.append(i + 1)
str1 = ' '.join(str(e) for e in change_array)
str2 = ' '.join(str(e) for e in array)
f = open('output.txt', 'w')
f.write(str1)
f.write('\n')
f.write(str2) | true |
ef7f6469ec88476da95ed9b092012b8dde033f1c | Python | JiahangGu/leetcode | /Medium Topic/src/20-10-4-59-rotate-matrix-ii.py | UTF-8 | 949 | 3.390625 | 3 | [] | no_license | #!/usr/bin/env python
# encoding: utf-8
# @Time:2020/10/4 16:49
# @Author:JiahangGu
from typing import List
class Solution:
def generateMatrix(self, n: int) -> List[List[int]]:
"""
类似前一个旋转矩阵的做法,模拟。只不过这个题是给定的数字要求生成,思路都是一样的,从外层开始生成,到最内层结束。
:param n:
:return:
"""
ans = [[-1 for _ in range(n)] for _ in range(n)]
dr = [0, 1, 0, -1]
dc = [1, 0, -1, 0]
r, c, di = 0, 0, 0
cur = 1
for _ in range(n ** 2):
ans[r][c] = cur
cur += 1
rr = r + dr[di]
cc = c + dc[di]
if 0 <= rr < n and 0 <= cc < n and ans[rr][cc] == -1:
r = rr
c = cc
else:
di = (di + 1) % 4
r += dr[di]
c += dc[di]
return ans
| true |
21bcdd920004b57b9fa2cb4367203004d5fc5463 | Python | shenzebang/Sinkhorn_Descent | /utils/base_module.py | UTF-8 | 4,787 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python
# encoding: utf-8
import torch.nn as nn
import torch
import numpy as np
# input: batch_size * nc * 64 * 64
# output: batch_size * k * 1 * 1
class Encoder(nn.Module):
def __init__(self, isize, nc, k=100, ndf=64, bias=False):
super(Encoder, self).__init__()
assert isize % 16 == 0, "isize has to be a multiple of 16"
# input is nc x isize x isize
main = nn.Sequential()
main.add_module('initial_conv-{0}-{1}'.format(nc, ndf),
nn.Conv2d(nc, ndf, 5, 1, 2, bias=bias))
main.add_module('initial_relu-{0}'.format(ndf),
nn.LeakyReLU(0.2, inplace=True))
csize, cndf = isize, ndf
while csize > 4:
in_feat = cndf
out_feat = cndf * 2
main.add_module('pyramid-{0}-{1}-conv'.format(in_feat, out_feat),
nn.Conv2d(in_feat, out_feat, 5, 2, 2, bias=bias))
main.add_module('pyramid-{0}-batchnorm'.format(out_feat),
nn.BatchNorm2d(out_feat))
main.add_module('pyramid-{0}-relu'.format(out_feat),
nn.LeakyReLU(0.2, inplace=True))
cndf = cndf * 2
csize = csize / 2
main.add_module('final-{0}-{1}-conv'.format(cndf, 1),
nn.Conv2d(cndf, k, 4, 1, 0, bias=bias))
self.main = main
def forward(self, input):
output = self.main(input)
output = output.view([-1, np.prod(output.shape[1:])])
# norm = torch.norm(output, dim=1, keepdim=True)
# output = output/torch.norm(output, dim=1, keepdim=True)
return output
# input: batch_size * k * 1 * 1
# output: batch_size * nc * image_size * image_size
class Decoder(nn.Module):
def __init__(self, isize, nc, k=100, ngf=64):
super(Decoder, self).__init__()
assert isize % 16 == 0, "isize has to be a multiple of 16"
cngf, tisize = ngf // 2, 4
while tisize != isize:
cngf = cngf * 2
tisize = tisize * 2
main = nn.Sequential()
main.add_module('initial-{0}-{1}-convt'.format(k, cngf), nn.ConvTranspose2d(k, cngf, 4, 1, 0, bias=False))
main.add_module('initial-{0}-batchnorm'.format(cngf), nn.BatchNorm2d(cngf))
main.add_module('initial-{0}-relu'.format(cngf), nn.ReLU(True))
csize = 4
while csize < isize // 2:
main.add_module('pyramid-{0}-{1}-convt'.format(cngf, cngf // 2),
nn.ConvTranspose2d(cngf, cngf // 2, 4, 2, 1, bias=False))
main.add_module('pyramid-{0}-batchnorm'.format(cngf // 2),
nn.BatchNorm2d(cngf // 2))
main.add_module('pyramid-{0}-relu'.format(cngf // 2),
nn.ReLU(True))
cngf = cngf // 2
csize = csize * 2
main.add_module('final-{0}-{1}-convt'.format(cngf, nc), nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False))
main.add_module('final-{0}-tanh'.format(nc),
nn.Tanh())
self.main = main
def forward(self, input):
output = self.main(input)
return output
def grad_norm(m, norm_type=2):
total_norm = 0.0
for p in m.parameters():
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm ** norm_type
total_norm = total_norm ** (1. / norm_type)
return total_norm
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.1)
m.bias.data.fill_(0)
def _squared_distances(x, y):
if x.dim() == 2:
D_xx = (x*x).sum(-1).unsqueeze(1) # (N,1)
D_xy = torch.matmul( x, y.permute(1,0) ) # (N,D) @ (D,M) = (N,M)
D_yy = (y*y).sum(-1).unsqueeze(0) # (1,M)
elif x.dim() == 3: # Batch computation
D_xx = (x*x).sum(-1).unsqueeze(2) # (B,N,1)
D_xy = torch.matmul( x, y.permute(0,2,1) ) # (B,N,D) @ (B,D,M) = (B,N,M)
D_yy = (y*y).sum(-1).unsqueeze(1) # (B,1,M)
else:
print("x.shape : ", x.shape)
raise ValueError("Incorrect number of dimensions")
return D_xx - 2*D_xy + D_yy
class GroundCost(nn.Module):
def __init__(self, encoder, shape):
super(GroundCost, self).__init__()
self.φ = encoder
self.shape = shape
def forward(self, x, y):
φ_x = self.φ(x.view(self.shape)).squeeze()
φ_y = self.φ(y.view(self.shape)).squeeze()
cost = _squared_distances(φ_x, φ_y)/2
return cost
| true |
e88d6ddcfc0ca0946cfaeccbc4b8cf1e2fe50ce3 | Python | samkit-jain/Handwriting-Recognition | /Python/dataset.py | UTF-8 | 7,628 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | import argparse
import cv2
import gzip
import numpy as np
import os
import pathlib
import requests
import shutil
import struct
import tempfile
import zipfile
from os import path as osp
from progress.bar import IncrementalBar
from typing import Dict
class DatasetGenerator:
"""
Class to download and create dataset for training and testing.
"""
# link to download EMNIST dataset
emnist_dataset_url = 'https://www.itl.nist.gov/iaui/vip/cs_links/EMNIST/gzip.zip'
def __init__(self, balanced: bool = True, download_dir_path: str = '', data_dir_path: str = ''):
self.balanced = balanced # whether to use the balanced dataset or not
self.download_dir_path = download_dir_path
self.data_dir_path = data_dir_path
if not(self.download_dir_path and self.download_dir_path.strip()):
self.download_dir_path = osp.abspath('download/')
if not(self.data_dir_path and self.data_dir_path.strip()):
self.data_dir_path = osp.abspath('data/')
pathlib.Path(self.download_dir_path).mkdir(parents=True, exist_ok=True)
pathlib.Path(self.data_dir_path).mkdir(parents=True, exist_ok=True)
def create(self):
"""
Method to setup EMNIST dataset.
Note: When saving to data dir, existing data is not removed.
"""
print('Setting up EMNIST dataset')
file_path = osp.abspath(osp.join(self.download_dir_path, osp.basename(DatasetGenerator.emnist_dataset_url)))
# step 1: download
DatasetGenerator.download_file(url=DatasetGenerator.emnist_dataset_url, dest=file_path)
# step 2.1: extract main zip file
DatasetGenerator.extract_zip_file(zip_fp=file_path)
# create list to store idx file paths and label mappings
dtype = 'balanced' if self.balanced else 'byclass'
# idx paths saved as (image, label) pair
idx_paths = [
(
osp.join(osp.dirname(file_path), 'gzip', f'emnist-{dtype}-train-images-idx3-ubyte.gz'),
osp.join(osp.dirname(file_path), 'gzip', f'emnist-{dtype}-train-labels-idx1-ubyte.gz')
),
(
osp.join(osp.dirname(file_path), 'gzip', f'emnist-{dtype}-test-images-idx3-ubyte.gz'),
osp.join(osp.dirname(file_path), 'gzip', f'emnist-{dtype}-test-labels-idx1-ubyte.gz')
)
]
label_mapping = {}
with open(osp.join(osp.dirname(file_path), 'gzip', f'emnist-{dtype}-mapping.txt'), mode='r') as lm:
for line in lm:
key, value = line.split()
label_mapping[key] = chr(int(value))
# step 2.2: extract smaller gzip files
for idx_pair in idx_paths:
for idx_path in idx_pair:
DatasetGenerator.extract_gzip_file(gzip_fp=idx_path)
# step 3: save image files
for idx_pair in idx_paths:
self.idx_to_image(
image_file=osp.splitext(idx_pair[0])[0],
label_file=osp.splitext(idx_pair[1])[0],
label_mapping=label_mapping
)
@staticmethod
def download_file(url: str, dest: str):
"""
Method to download a file from url and save at dest
"""
print(f'Downloading file from {url} and saving to {dest}')
response = requests.get(url, stream=True)
total_size = int(response.headers.get('content-length'))
chunk_size = 4096
total_steps = int(total_size / chunk_size)
progress_bar = IncrementalBar(max=total_steps, suffix='%(percent).1f%%')
with open(dest, mode='wb') as fd:
for chunk in response.iter_content(chunk_size=chunk_size):
fd.write(chunk)
progress_bar.next()
progress_bar.finish()
@staticmethod
def extract_zip_file(zip_fp: str):
"""
Method to extract a zip file and save it in the same directory as the zip file
"""
print(f'Extracting {zip_fp}')
with zipfile.ZipFile(zip_fp, 'r') as unzipped:
unzipped.extractall(osp.dirname(zip_fp))
@staticmethod
def extract_gzip_file(gzip_fp: str):
"""
Method to extract a gzip file and save it in the same directory as the gzip file
"""
print(f'Extracting {gzip_fp}')
with gzip.open(gzip_fp, 'rb') as zipped:
with open(osp.splitext(gzip_fp)[0], mode='wb') as unzipped:
shutil.copyfileobj(zipped, unzipped)
def idx_to_image(self, image_file: str, label_file: str, label_mapping: Dict[str, str] = None):
print(f'Converting {image_file} to image files')
with open(image_file, mode='rb') as image_stream, open(label_file, mode='rb') as label_stream:
# save images dataset
magic, num_images = struct.unpack('>II', image_stream.read(8))
if magic != 2051:
raise ValueError('Magic number invalid')
num_rows, num_cols = struct.unpack('>II', image_stream.read(8))
images = np.fromfile(image_stream, dtype=np.dtype(np.uint8).newbyteorder('>'))
images = images.reshape((num_images, num_rows, num_cols))
# save labels dataset
magic, num_labels = struct.unpack('>II', label_stream.read(8))
if magic != 2049:
raise ValueError('Magic number invalid')
labels = np.fromfile(label_stream, dtype=np.dtype(np.uint8).newbyteorder('>'))
labels = labels.astype('str')
labels = np.vectorize(lambda x: label_mapping[x])(labels) if label_mapping is not None else labels
progress_bar = IncrementalBar(max=len(labels), suffix='%(percent).1f%%')
# create missing directories
for unique_label in np.unique(labels):
label_folder = osp.abspath(osp.join(self.data_dir_path, unique_label))
pathlib.Path(label_folder).mkdir(parents=True, exist_ok=True)
# save images to data directory
for label, image in zip(labels, images):
label_folder = osp.abspath(osp.join(self.data_dir_path, label))
image_fd, image_dest = tempfile.mkstemp(dir=label_folder, suffix='.png')
cv2.imwrite(f'{image_dest}', image.T)
os.close(image_fd) # if not closed, can get OSError: [Errno 24] Too many open files
progress_bar.next()
progress_bar.finish()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""
Script to download and create training data from EMNIST. Full dataset would be downloaded irrespective of
unbalanced truth value.
""",
usage='%(prog)s [options]',
)
parser.add_argument(
'-dop',
'--download-path',
dest='download_path',
type=str,
help='Path where dataset should be downloaded.',
)
parser.add_argument(
'-dap',
'--data-path',
dest='data_path',
type=str,
help='Path where dataset images should be saved.',
)
parser.add_argument(
'-ub',
'--unbalanced',
dest='unbalanced',
action='store_true',
default=False,
help='Whether to use the unbalanced dataset or not',
)
args = parser.parse_args()
creator = DatasetGenerator(balanced=not args.unbalanced,
download_dir_path=args.download_path,
data_dir_path=args.data_path)
creator.create()
| true |
f5b1d9120f62e68e3efa2f1bf4db6018b1a68018 | Python | jkarns275/examm_vis | /analysis/op_log.py | UTF-8 | 3,702 | 2.8125 | 3 | [] | no_license | import numpy as np
import csv
import matplotlib.pyplot as plt
import matplotlib as mpl
from collections import OrderedDict
class DataPoint:
NODE_TYPES = [
"simple",
"jordan",
"elman",
"UGRNN",
"MGU",
"GRU",
"LSTM",
"delta",
]
OP_LOG_ORDERING = [
"genomes",
"crossover",
"island_crossover",
"clone",
"add_edge",
"add_recurrent_edge",
"enable_edge",
"disable_edge",
"enable_node",
"disable_node",
]
OPS_WITH_NODE_TYPE = [
"add_node",
"split_node",
"merge_node",
"split_edge",
]
OP_LOG_HEADER = []
OP_VAR_NAMES = []
def __init__(self, *argv):
assert len(argv) >= len(DataPoint.OP_VAR_NAMES)
for name, value in zip(DataPoint.OP_VAR_NAMES, argv):
self.__setattr__(name, value)
def as_dict(self):
d = {}
for name in DataPoint.OP_VAR_NAMES:
d[name] = self.__getattribute__(name)
return d
def __str__(self):
return f"inserted genomes: {self.genomes}, bp_epochs: {self.bp_epochs}, " + \
f"time: {self.time}, best mae: {self.mae}, best mse: {self.mse}, " + \
f"enabled nodes: {self.nodes}, enabled edges: {self.edges}, " + \
f"enabled recursive edges: {self.rec_edges}"
# Statically initialize DataPoint.OP_LOG_HEADER
for op in DataPoint.OPS_WITH_NODE_TYPE:
for node_ty in DataPoint.NODE_TYPES:
DataPoint.OP_LOG_ORDERING.append(f"{op}({node_ty})")
for op in DataPoint.OP_LOG_ORDERING:
DataPoint.OP_LOG_HEADER.append(f"{op} Generated")
DataPoint.OP_LOG_HEADER.append(f"{op} Inserted")
for op in DataPoint.OP_LOG_ORDERING:
new_op: str = op.lower()
new_op = new_op.replace("(", "_")
new_op = new_op.replace(")", "")
DataPoint.OP_VAR_NAMES.append(new_op)
class OperatorLog:
def __init__(self, file_path, name, fold):
# python csv docs say use newline='' of using a file object
self.data = []
self.name = name
self.fold = fold
try:
with open(file_path, newline='') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',')
# verify that the header order matches the one we've computed here
for row in csv_reader:
for i, label in enumerate(DataPoint.OP_LOG_HEADER):
row[i] = row[i].strip()
assert label == row[i].strip()
# break because we are only concerned with the header
break
for row in csv_reader:
dp = DataPoint(*row)
self.data.append(dp)
self.valid = len(self.data) > 0
if self.valid:
self.breakthroughs = len(set(self.query(lambda l: l.mse)))
print(f"{file_path} op log len: {len(self)}")
except Exception as e:
if type(e) == AssertionError:
raise e
print("Oh")
print(str(e))
self.valid = False
def display(self):
# print(f" fold {self.fold}: len={len(self)}; min_mse={min(self.query(lambda l: l.mse))}; breakthrough_count={self.breakthroughs}")
pass
def get_row_dict(self, index):
dp = self.data[index]
return dp.as_dict()
def __getitem__(self, item):
assert type(item) == int
return self.data[item]
def __len__(self):
return len(self.data)
def query(self, l):
return list(map(l, self.data))
| true |
495365630ab04e058401a6b1bf226162a22ff992 | Python | berezovskiydenis/ComputerScience | /adt/stack.py | UTF-8 | 915 | 4.34375 | 4 | [] | no_license |
class Stack:
"""A stack is an collection that is based on the last-in-first-out
(LIFO) policy.
"""
def __init__(self):
"""Create an empty stack."""
self.items = []
self._counter = 0
def push(self, item):
"""Add an item to stack."""
self.items.append(item)
def pop(self):
"""Remove the most recently added item."""
return self.items.pop()
def is_empty(self):
"""Is the stack empty."""
return True if self.size() == 0 else False
def size(self):
"""Number of items in the stack."""
return len(self.items)
def __iter__(self):
return self
def __next__(self):
if self.size() == self._counter:
self._counter = 0
raise StopIteration
else:
item = self.items[self._counter]
self._counter += 1
return item
| true |
bd9098740417051957fb7a5b6eace2793c083e0e | Python | KULDEEPMALIKM41/Practices | /Python/Python Basics/78.exercise.py | UTF-8 | 621 | 3.765625 | 4 | [] | no_license |
s='universal informatic'
s1='i'
print('element is found and index number is: ',s.find(s1,5))
#if element is not found in string by find function then this function is return -1.
s=input('enter string data : ')
s1=input('what is search : ')
w=int(input('where start searching from : '))
s=s.find(s1,w)
if s==-1:
print('element is not found')
else:
print('element is found and index number is: ',s)
s=input('enter string data : ')
s1=input('what is search : ')
flage=0
for i in range(len(s)):
if s[i]==s1:
print('element is found and index number is : ',i)
flage=1
if flage==0:
print('element is not found') | true |
c91efd770935f02971baed81ff08ac084161e710 | Python | Nikoletazl/Basics-Python | /while_lab/sequence_2k+1.py | UTF-8 | 83 | 3.5 | 4 | [] | no_license | number = int(input())
n = 1
while n <= number:
print(n)
n = 2 * n + 1
| true |
57b2da4212f99c389b178b49578bc050a5b284d0 | Python | nogorka/lighness-tracker-iot | /findBoard2.py | UTF-8 | 4,119 | 2.53125 | 3 | [] | no_license | from pymongo import MongoClient
import pymongo.errors as mongoerr
from serial.tools import list_ports
import serial
import time
from datetime import datetime
def find_port():
ports = list_ports.comports()
for port in ports:
if port.manufacturer == "FTDI":
print("checking ", port.device)
s = serial.Serial(port.device, 115200, timeout=3)
message = "~"
s.write(message.encode())
response = s.readline()
response = response.decode().strip()
s.close()
print(f'close {port.device}')
if (response == "Elka"):
print("port found\n")
return port.device
else:
print("not correct port\n")
def open_port():
comport_name = find_port()
print("port is", comport_name)
s = serial.Serial(comport_name, 115200, timeout=2)
print("port open")
return s
def open_mongo():
client = MongoClient(
host="roboforge.ru",
username="admin",
password="pinboard123",
authSource="admin",
serverSelectionTimeoutMS=5000,
socketTimeoutMS=2000
)
return client
def get_value():
try:
message = "&"
s.write(message.encode())
response = s.readline()
value = int(response.decode().strip())
print("recived: ", value, end=" ")
return value
except (ValueError, UnicodeDecodeError) as e:
print("get value error: ", e)
return None
def send_temp_records():
global client
global TEMPDATA
if (len(TEMPDATA) != 0):
print("\n tempdata started saving")
for record in TEMPDATA:
try:
coll.insert_one(record)
except (
mongoerr.DuplicateKeyError, mongoerr.WriteError,
mongoerr.BulkWriteError, mongoerr.InvalidId,
mongoerr.WriteConcernError, UnicodeDecodeError) as e:
print(f"write error, don't save record from temp\n record: {record}\n error: \t {e}")
client = open_mongo()
print("\nreopened mongo\n")
print("tempdata saved\n")
TEMPDATA = []
def send_record(record):
global client
global TEMPDATA
try:
coll.insert_one(record)
print(f"\tsent: {record['datetime']} {record['value']}")
except (
mongoerr.DuplicateKeyError,
mongoerr.WriteError,
mongoerr.BulkWriteError,
mongoerr.InvalidId,
mongoerr.WriteConcernError,
UnicodeDecodeError
) as e:
print("write error, don't save record, error: \n", e)
client = open_mongo()
print("\nreopened mongo\n")
except (
mongoerr.NetworkTimeout,
mongoerr.ServerSelectionTimeoutError,
mongoerr.ExecutionTimeout,
mongoerr.AutoReconnect,
UnicodeDecodeError) as e:
TEMPDATA.append(record)
print("save record, stoped with error: \n", e)
client = open_mongo()
print("\nreopened mongo\n")
TEMPDATA = []
client = open_mongo()
print(client.list_database_names())
db = client["chernogor"]
coll = db["photoresistor"]
# while True:
# try:
while True:
try:
s = open_port()
while True:
dt = datetime.now()
if dt.second == 0:
value = get_value()
if value:
record = {"datetime": dt, "value": value}
send_record(record)
send_temp_records()
time.sleep(40)
time.sleep(0.4)
except (serial.serialutil.SerialException, UnicodeDecodeError) as e:
print("wait 10 secs, error: ", e)
time.sleep(10)
except (mongoerr.AutoReconnect, UnicodeDecodeError) as e:
print("autoreconect\t", e)
client = open_mongo()
print("tried to connect")
# except (mongoerr.AutoReconect, UnicodeDecodeError as e):
# client = open_mongo()
# print("\nreopened mongo\n")
# sleep(40)
| true |
6c22260c7ff1658c5bca04cff31adba1126842c3 | Python | DallasDominguez1/ece16sp2020-ajito44 | /Lab1Code/Numpy Array 2.py | UTF-8 | 260 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 2 12:22:21 2020
@author: joyceeito
"""
import numpy as np
test_2D_array = np.array ([(0,10,4,12),
(1,20,3,41)])
print(test_2D_array)
print(test_2D_array.shape) | true |
30a4922c4d5a2da30f2da0fd88b96bab3c21a7f0 | Python | Yamatomo1912/Python_Test | /体重入力.py | UTF-8 | 3,874 | 3.21875 | 3 | [] | no_license | import datetime
import os
import openpyxl
def isfloat(paramter):
if not paramter.isdecimal():
try:
float(paramter)
return True
except ValueError:
return False
else:
return False
def Input_data(str_message):
def DecimalJudg(data):
if isfloat(data) == False:
print("数値以外の文字を入力しないでください\nもう一度入力してください。\n")
return False
return True
input_judg = True
while (input_judg == True):
data = input(str_message)
if DecimalJudg(data) == False:
input_judg = True
continue
input_judg = False
return data
if __name__ == '__main__':
print("20時になりました、体重計に乗って体重・体脂肪率等記録してください。")
message_list = [
"体重(kg)を入力してください。\n",
"BMI値を入力してください。\n",
"体脂肪率(%)を入力してください。\n",
"体水分率(%)を入力してください。\n",
"骨格筋率(%)を入力してください。\n",
"基礎代謝(kcal)を入力してください。\n",
"除脂肪体重(kg)を入力してください。\n",
"皮下脂肪(%)を入力してください。\n",
"内臓脂肪(%)を入力してください。\n",
"筋肉の重さ(kg)を入力してください。\n",
"骨量(kg)を入力してください。\n",
"タンパク質(%)を入力してください。\n",
"体内年齢を入力してください。\n"]
data = []
for message in message_list:
data.append(Input_data(message))
# データ作成
koumoku_list = []
for m in message_list:
koumoku_list.append(m.replace("を入力してください。\n",""))
now = datetime.datetime.now()
filename = str(now.year)+"年.xlsx"
if os.path.exists(filename) == False: # ファイルが存在しなかった場合(新規作成)
wb = openpyxl.Workbook()
ws = wb.worksheets[0] # 先頭のシート取得
ws.title = str(now.month).lstrip("0")+"月" # シート名変更
# 項目名追加
count = 2
ws.cell(1,1).value = "日付"
for k in koumoku_list:
ws.cell(1,count).value = k
count += 1
# 新規データ追加
count = 2
ws.cell(2,1).value = str(now.month).lstrip("0")+"月"+str(now.day).lstrip("0")+"日"
for d in data:
ws.cell(2,count).value = float(d)
count += 1
wb.save(filename)
else: # 既存データが存在した場合
workbook = openpyxl.load_workbook(filename)
# シート名チェック
worksheet = None
sheet_check = False # Trueの場合はシートが存在する
for ws in workbook.sheetnames:
if ws == str(now.month).lstrip("0")+"月":
worksheet = workbook[ws] # シートが存在する場合そのシートを選択する
sheet_check = True
if sheet_check == False: # シートが存在しない場合は新規で追加する
worksheet = workbook.create_sheet(title=str(now.month).lstrip("0")+"月")
# 項目名追加
count = 2
worksheet.cell(1,1).value = "日付"
for k in koumoku_list:
worksheet.cell(1,count).value = k
count += 1
# データ追加
rows = worksheet.max_row+1
worksheet.cell(rows,1).value = str(now.month).lstrip("0")+"月"+str(now.day).lstrip("0")+"月"
count = 2
for d in data:
worksheet.cell(rows,count).value = float(d)
count += 1
workbook.save(filename) | true |
f6abd0b830da271c4275479f43d6c8ba98a91834 | Python | marko-13/siap-lyrics-to-genre | /utils/bow.py | UTF-8 | 1,832 | 2.9375 | 3 | [] | no_license | from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
import re
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
def get_bow():
nltk.download('punkt')
nltk.download('stopwords')
# U corpus listi ce se nalaziti 10 stringova, svaki string je skup svih lyricsa tog zanra
corpus = []
corpus_temp = []
# U tokenized corpus listi ce se nalaziti liste svih reci iz lyricsa jednog zanra
tokenized_corpus = []
y_label = []
temp_genre = ['blues', 'classical', 'country', 'disco', 'hip hop', 'jazz', 'metal',
'pop', 'reggae', 'rock']
for genre in temp_genre:
df = pd.read_csv('../data/lyrics_csv/' + genre + '.csv', delimiter=';', encoding='utf-8')
for index, row in df.iterrows():
corpus_temp.append(re.sub('[^A-Za-z ]', ' ', row['lyrics']).lower())
y_label.append(genre)
# corpus.append(corpus_temp)
# tokenized_corpus.append(word_tokenize(corpus_temp))
# corpus_temp = []
# for tokenized_corpus_temp in tokenized_corpus:
# for word in tokenized_corpus_temp:
# if word in stopwords.words('english'):
# tokenized_corpus_temp.remove(word)
stemmer = PorterStemmer()
matrix = CountVectorizer()
X = matrix.fit_transform(corpus_temp).toarray()
print(X.shape)
x_train, x_test, y_train, y_test = train_test_split(X, y_label)
classifier = GaussianNB()
classifier.fit(x_train, y_train)
y_pred = classifier.predict(x_test)
accuracy = accuracy_score(y_test, y_pred)
print(accuracy)
get_bow()
| true |
a193f96e6025d56dc2e09ceb5658d72d61167b59 | Python | Issam-b/triple-T | /server/server_player.py | UTF-8 | 5,298 | 2.546875 | 3 | [] | no_license | #! /usr/bin/python3
import random
import string
import time
import threading
import helpers as constants
from helpers import setup_logger
from helpers import cmd
from helpers import cmd_buffer
from helpers import game_config
import server_game as sg
# command codenames between sever and client
message_length = int(game_config.get("OTHER", "message_length"))
DEBUG = True if game_config.get("DEBUG", "DEBUG") == "True" else False
logger = setup_logger("settings.conf", "server")
class Player:
"""Player class to keep track of availble players and their status"""
# count of players connected since the server started
def __init__(self, conn):
"""called when new player created."""
sg.GameServer.all_players_count += 1
self.conn = conn
self.is_waiting = True
self.id = sg.GameServer.all_players_count
logger.info("player with id: " + str(self.id) + " has joined.")
self.role = ""
self.opponenet = ""
self.disconected = False
self.cmd_buffer = cmd_buffer
self.dropped = False
def send(self, command, msg):
"""Send data from server to player"""
try:
self.conn.send((command + msg).encode())
if DEBUG:
logger.info("sending: " + str(command + msg) +
" to: " + str(self.id))
except Exception:
logger.error("Send failed to " + str(self.id))
# assume player connection is lost if an error accured
self.lost_connection()
def receive_populate_buffer(self, timeout=5, size=message_length):
"""receive commands and save to buffer untill read by server"""
try:
# self.conn.settimeout(timeout)
msg = self.conn.recv(size).decode()
if len(msg) > 0:
if DEBUG:
logger.info("received: " + str(msg) +
" from: " + str(self.id))
recv_cmd = ""
recv_cmd = self.find_cmd_buffer_key(
constants.cmd, msg, recv_cmd)
if recv_cmd == "board":
self.cmd_buffer[str(recv_cmd)] = msg[1:]
elif recv_cmd == "quit":
self.cmd_buffer[str(recv_cmd)] = True
else:
self.cmd_buffer[str(recv_cmd)] = msg[1]
return True
else:
return False
except Exception as e:
logger.error("Failed while receiving msg from " +
str(self.id) + " : " + str(e))
return False
def find_cmd_buffer_key(self, cmd, msg, recv_cmd):
items = cmd.items()
for key, cmd in items:
if cmd == msg[0]:
recv_cmd = key
break
return recv_cmd
def receive_with_wait(self, expected_command, timeout=0):
"""Receive data from player and check the validity of the data."""
# TODO use locks here
if timeout == 0:
while self.cmd_buffer[str(expected_command)] == "" and not self.disconected:
time.sleep(0.1)
else:
time_to_wait = int((round(time.time() + timeout) * 1000))
while self.cmd_buffer[str(expected_command)] == "" and not self.disconected:
current_time = int(round(time.time() * 1000))
if time_to_wait - current_time <= 0:
return -1
time.sleep(0.1)
cmd_to_return = self.cmd_buffer[str(expected_command)]
self.cmd_buffer[str(expected_command)] = ""
return cmd_to_return
def read_buffer(self, expected_command, clear=True):
"""read the current data from buffer"""
cmd_to_return = self.cmd_buffer[str(expected_command)]
if clear:
self.cmd_buffer[str(expected_command)] = ""
return cmd_to_return
def send_game_info(self):
"""send the player his assigned role and matched opponenet."""
success_state = False
msg = str(self.id) + str(self.role)
logger.info("Sending game info to: " + str(self.id))
self.send(cmd["game_info"], msg)
try:
received_confirm_msg = int(self.receive_with_wait("confirm"))
expected_msg = int(cmd["confirm_states"]["game_info_received"])
if received_confirm_msg != expected_msg:
self.lost_connection()
self.remove_player_from_server("waitlist", "active")
else:
self.remove_player_from_server("waitlist")
success_state = True
except Exception:
logger.error("Expected a valid confirm integer")
return success_state
def remove_player_from_server(self, waitlist="", active=""):
if waitlist == "waitlist" and self in sg.GameServer.players_waitlist:
sg.GameServer.players_waitlist.remove(self)
if active == "active" and self in sg.GameServer.active_players:
sg.GameServer.active_players.remove(self)
def lost_connection(self):
"""Called when connection with player is lost"""
self.disconected = True
logger.warning("Player: " + str(self.id) + " has lost connection!")
| true |
dd36b77fcf06ac52090a28ae075f31071d7992be | Python | varunpatel07/Data_Structure__Algorithms_in_python | /Array problem/2.11.py | UTF-8 | 579 | 4.125 | 4 | [] | no_license | """
Given an array of positive and negative numbers. Find if there is a subarray (of size at-least one) with 0 sum.
Example 1:
Input:
5
4 2 -3 1 6
"""
#O(n^2)
def sol1(arr):
for i in range(len(arr)):
for j in range(i+1,len(arr)):
if(sum(arr[i:j])==0):
return "yes"
return "no"
def sol2(arr):
dummy=[]
sumv=0
for item in arr:
sumv+=item
if(sumv==0 or sumv in dummy):
return "YES"
else:
dummy.append(sumv)
return "NO"
arr=[4,2,-4,1,6]
print(sol1(arr))
print(sol2(arr)) | true |
bddfaca2c8ec7d865b2fef389d53037c324066ac | Python | Mithun691/SOC_2021 | /Week1/Day1_Probability_basics/Q2_Simulation.py | UTF-8 | 816 | 4.09375 | 4 | [] | no_license | import random
import math
def simulate():
"""
return True if the needle falls on the line , False otherwise
"""
spacing = 1.0
stick_length = 0.5
#Randomly choose the positon at which it falls
pos_x = spacing*random.random() #random number between 0-1
angle = math.pi*random.random() #random orientaion i.e. angle with horizontal
#Check if it falls on a line
leftmost = pos_x-stick_length*math.cos(angle)
rightmost = pos_x+stick_length*math.cos(angle)
if(leftmost<0 or rightmost>1):
return True
else:
return False
n_iterations = 10000
crossing = 0
for iteration in range(n_iterations):
if(simulate()):
crossing += 1
prob = crossing/n_iterations
print("Probability of stick crossing the lines is {}".format(prob)) | true |
683d78e41f56dfaabb11374e6cd192a3b314e37b | Python | itsN1X/zeroclickinfo-fathead | /lib/fathead/firefox_about_config/parse.py | UTF-8 | 3,871 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python2
from BeautifulSoup import BeautifulSoup, NavigableString
import urllib
import string
import re
class Entry(object):
def __init__(self, name, value, description, url):
self.name = name
self.value = value
self.description = description
self.url = url
def __str__(self):
fields = [
self.name, # title
'A', # type
'', # redirect
'', # otheruses
'', # categories
'', # references
'', # see_also
'', # further_reading
'', # external_links
'', # disambiguation
'', # images
self.description, # abstract
self.url # source_url
]
return '%s' % ('\t'.join(fields))
class Parser(object):
def __init__(self, input='download/About:config_entries'):
self.soup = BeautifulSoup(open(input))
# Requires trailing / for relative link replacement
self.baseURL = "http://kb.mozillazine.org/"
def findEntries(self):
self.entries = []
headers = map(lambda x: x.string, self.soup.findAll('h1')[2:])
table = self.soup.findAll('div', id="bodyContent")[0]
for table in table.findAll('table'):
header = True
for tr in table.findAll('tr'):
if header:
header = False
continue
i = 0
for th in tr.findAll('td'):
description = ''
if i == 0:
name = ''.join(th.b.findAll(text=True)).replace(' ','')
anchor = string.capitalize(urllib.quote(name.split('.')[0])) + "."
if anchor in headers:
url = self.baseURL + 'About:config_entries#' + anchor
else:
url = self.baseURL + 'About:config_entries'
elif i == 1:
value = th.text
elif i == 2:
if value:
article = 'a'
if value[0] == 'I': article += 'n'
optionType = "it accepts " + article + " " + value.lower() + "."
synopsis = '"' + name + '"' + ' is a configuration option ' \
'for the Firefox web browser; ' + optionType + "<br>"
for tag in th.findAll('br'):
tag.insert(0, NavigableString("\n"))
description = ''.join(th.findAll(text=True))
description = description.rstrip().replace('\n', '<br>').strip()
expandedURL = 'href="' + self.baseURL
description = description.replace('href="/', expandedURL)
description = re.sub('<\s*b\s*>', '<i>', description)
description = re.sub('<\s*/\s*b\s*>', '</i>', description)
description = '<blockquote>' + description + '</blockquote>'
description = synopsis + description
i = -1
self.entries.append(Entry(name, value, description.strip(), url))
i += 1
if __name__ == "__main__":
parser = Parser()
parser.findEntries()
with open('output.txt', 'w') as file:
for entry in parser.entries:
file.write(entry.__str__().encode('UTF-8') + '\n')
| true |
1a6cc33207d1ae7e7086d2962d0bd6399b17daca | Python | mandarborkar/hello-world | /.idea/avent4a.py | UTF-8 | 2,424 | 3.0625 | 3 | [] | no_license | #from aocd import get_data
#mylist = get_data(day=4)
f1 = open("/Users/mborkar/PycharmProjects/hello-world/avent4input.txt", "r")
mypassporstring = f1.read()
passport = mypassporstring.split("\n\n")
def HasRequiredPassportInformation (passportdetails):
# print (passportdetails)
RequiredPassportInformation = ["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"]
for requireddetails in RequiredPassportInformation:
if requireddetails not in passportdetails :
return 'Invalid'
return 'Valid'
def GetPassportdetails (singlepassport):
passportattribute = singlepassport.split (" ")
# print (passportattribute)
passportattributename = []
for i in range (0,len(passportattribute)) :
passportattributename.append (passportattribute[i].split (":")[0])
return passportattributename
# cleaning up new lines and multiple spaces for each of the passports
validpassportcount = 0
for i in range (0,len(passport)):
passport[i] = passport[i].replace ("\n"," ").strip ()
passportdetails = GetPassportdetails (passport[i])
passportstatus = HasRequiredPassportInformation (passportdetails)
passport[i] = passport[i] + " " + passportstatus
if passportstatus == 'Valid':
validpassportcount +=1
print (passport[i])
del passportdetails
#print (passport)
print ('Valid passports = ' + str(validpassportcount))
'''
mylist = mypassporstring.split("\n\n")
numberofpassports=0
passport = []
newpassportcomingup=True
for i in range (0,len(mylist)):
if newpassportcomingup :
passport.append(mylist[i])
if mylist[i] != "\n" :
passport[numberofpassports] = passport[numberofpassports] + mylist[i]
newpassportcomingup = False
else:
newpassportcomingup=True
numberofpassports += 1
ValidPassportcount=0
for i in range (0,len(passport)):
passport[i]=passport[i].replace("\n"," ")
if passport[i].count("byr") != 0 and passport[i].count("iyr") != 0 and passport[i].count("eyr") != 0 and passport[i].count("hgt") != 0 and passport[i].count("hcl") != 0 and passport[i].count("ecl") != 0 and passport[i].count("pid") != 0 :
passport[i] = passport[i] + " Valid"
ValidPassportcount += 1
else:
passport[i] = passport[i] + " Invalid"
print ("passport " + str(i) + " = " + passport[i])
print (str(numberofpassports))
print (str(ValidPassportcount))
print (passport[0])
''' | true |
a12912fca8d5c3e07c80bba95c4687502c2d9e4a | Python | Raspeball/Tippekupong | /tippekupongen.py | UTF-8 | 1,625 | 2.96875 | 3 | [] | no_license | ## Import ##
import easygui as egui
import math
import random
## --- --- ##
#
#
#
def main():
#
#
## Global variables ##
kamper = 12 #alltid 12 kamper på tippekupongen
pure_res = [] #for kun resultatene, for å kunne velge gardering
present_res = [] #liste for å presentere resultatene
possible_res = {0:"H",1:"U",2:"B"}
#
#
def tippekupong():
for k in range(kamper):
res = random.choice(list(possible_res.keys()))
pure_res.append([res])#liste av lister, der resultatet av en kamp er en liste
#resultatet av en gardert kamp er en liste med lengde > 1
#
#
#
#
#
def gardering():
#gardering tar inn kupongen, og plukker ut to villkårlige kamper som skal halvgarderes
#dette tilsvarer en kupong på 4 rekker jfr. Norsk Tipping
gard_res = list(possible_res.keys())
gard_kamper = random.sample(range(0,len(pure_res)),2)
for g in range(len(gard_kamper)):
gard_res.remove(pure_res[gard_kamper[g]][0])#pure_res liste av liste
gard_choice = random.choice(gard_res)
pure_res[gard_kamper[g]].append(gard_choice)
pure_res[gard_kamper[g]].sort()#sorterer resultatene slik at de kommer "HUB"
gard_res = list(possible_res.keys())
#
#
#
#
#
def tippetekst(): #skriver resultatene i tekstform
for r in range(len(pure_res)):
res_tekst = "Kamp nr. " + str((r+1)) + ": "
for s in pure_res[r]:
res_tekst += possible_res[s]
#
present_res.append(res_tekst)
#
#
#
#
#
## Call functions ##
tippekupong()
gardering()
tippetekst()
## Print results to screen ##
#print(pure_res)
for r in present_res:
print(r,end="\n")
#
#
#
#
## Run program ##
main()
#
| true |
7512dd93bf29fad6691c72980dc7cec2eaeac56c | Python | christian-saldana/Leetcode | /Easy/palindrome_number.py | UTF-8 | 1,531 | 4.78125 | 5 | [] | no_license | """Given an integer x, return true if x is palindrome integer.
An integer is a palindrome when it reads the same backward as forward. For example, 121 is palindrome while 123 is not.
For this problem I gave two solutions.
In this first solution I use mathematics to reverse the string. """
def palNum(x):
# We first initialize the reverse number variable and make a copy of the input.
revs_number = 0
a = x
# Next we set up a while loop because we will be decreasing x until it is 0.
while (x > 0):
# % gives us the remainder. In this case if we divide by 10 the remainder will be the ones spot.
remainder = x % 10
# Here we multiply the previous value of revs_number by 10. This makes more sense as you continue with the loop.
# Then add the remainder.
revs_number = (revs_number * 10) + remainder
# We now want the next digit of x. The // operator rounds the integer down to the nearest whole number after division.
# For example: 121 // 10 = 12.
x = x // 10
# Finally we check if the original input is equal to the reversed integer.
return revs_number == a
"""This second solution is very simple.
Turn the integer into a string and then check if the string is the same when its elements are reversed.
The a[::-1] has the syntax "list[<start>:<stop>:<step>]".
So, when you do a[::-1], it starts from the end towards the first taking each element. So it reverses a. """
def palN(x):
a = str(x)
return a == a[::-1]
| true |
a2c9485d359bd5dbbab0ec7c19e2083cf2065c99 | Python | lrbrwnn1/HIVE-Automated-Data-Conversion-and-Coalation-Suite | /main.py | UTF-8 | 3,144 | 2.828125 | 3 | [
"MIT"
] | permissive | import Bio
from Bio import Entrez
from Bio import Medline
#list of all keywords that can be used to retrieve info from the medline db:
#https://biopython-tutorial.readthedocs.io/en/latest/notebooks/09%20-%20Accessing%20NCBIs%20Entrez%20databases.html
def fetch_details(id_list):
try:
ids = ','.join(id_list)#Combines id's from id_list
Entrez.email = 'example@something.com'
handle = Entrez.efetch(db="pubmed", id=ids, rettype="medline", retmode="text")
records = Medline.parse(handle)
return records
except:
pass
#for each author tag (1 per line):
#1 - Go through the list of pmid's and get info on each
#2 - Append author_tag
#3 - Write to file
def pubScraper():
count = 0
print("\n\nNow scraping publications, please wait...\n\n")
f = open('pubs.txt', 'a+')
with open('author2citation.txt') as a:
for line in a:
investigator_tag = (line.split('\t')[0])
pmidList = (line.split('\t')[1]).rstrip()
pmidList = pmidList.split(',')
print(investigator_tag, pmidList)
papers = fetch_details(pmidList)
for paper in papers:
count+=1
f.write(paper.get("PMID", "?")+"\t") #PUBMED ID
f.write(paper.get("TI", "?")+"\t") #Title
f.write(str(paper.get("AU", "?"))+"\t") #Author List
f.write(paper.get("AB", "?")+"\t") #Abstract
f.write(paper.get("SO", "?")+"\t") #Source
f.write(paper.get("DP", "?")+"\t") #Date Published
f.write(str(paper.get("MH", "?"))+"\t") #Mesh Headers
f.write(investigator_tag+"\n")
print("Finished")
def authorFormatter():
f = open('facultyListFormatted.txt', 'a+')
with open('facultyList.txt') as a:
#this can be reduced to one or two lines, I've expanded it and placed print statements for clarity.
for line in a:
indexKey = (line.split('\t')[0])
fullName = (line.split('\t')[1])[1:-1] #This is to trim off quotation marks that appear in this particular text file.
email = (line.split('\t')[2])
if email.endswith('oakland.edu'): #Splits uncategorized researchers by their affiliation based on email domain
affiliation = "Oakland University"
else:
affiliation = "Beaumont Research Institute"
lastName = (line.split('\t')[3])
firstName = (line.split('\t')[4])
location = (line.split('\t')[6])[1:-1]
print(indexKey)
print(fullName)
print(email)
print(lastName)
print(firstName)
print(location)
print(affiliation + "\n")
f.write(indexKey + "\t")
f.write(fullName + "\t") #This includes title (MD, PhD, etc) whereas lastName and firstName do not.
f.write(email + "\t")
f.write(lastName + "\t")
f.write(firstName + "\t")
f.write(location + "\t")
f.write(affiliation + "\n")
print("Finished")
if __name__ == '__main__':
#simple terminal GUI
print("===================================================")
print("HIVE Automated Conversion and Data Coalation Suite")
print("===================================================\n")
print("Please select a functionality:\n 1 - Publication Scraper \n 2 - Author file formatter")
inputType = int(input(""))
if inputType == 1:
pubScraper()
if inputType == 2:
authorFormatter()
| true |
10b4b536a3005861e150e0f9ba2009e4695a969a | Python | JebediahKermanBadS/Calculator | /calculator.py | UTF-8 | 1,166 | 3.796875 | 4 | [] | no_license | """Module description
"""
from postfix_calculator import PostfixCalculator
def main():
"""Main method
"""
calculator = PostfixCalculator()
while True:
infix_expression = input("Enter your calculation: ")
if infix_expression[0] == "&":
break
for _c in calculator.operatorList:
infix_expression = infix_expression.replace(_c, f" {_c} ")
for _c in calculator.functionList:
infix_expression = infix_expression.replace(_c, f" {_c} ")
infix_expression = infix_expression.replace("(", " ( ")
infix_expression = infix_expression.replace(")", " ) ")
while infix_expression.__contains__(" "):
infix_expression = infix_expression.replace(" ", " ")
if infix_expression.endswith(" "):
infix_expression = infix_expression[:-1]
if infix_expression.startswith(" "):
infix_expression = infix_expression[1:]
postfix_expression = calculator.convert_infix_to_postfix(infix_expression)
print(f" = {calculator.evaluate_postfix_expression(postfix_expression)}")
if __name__ == "__main__":
main()
| true |
7ae6dfddb89628c67cb30956e9012c9a2a1b50d5 | Python | ISE-DeepLearning/CoverageDNN | /process/process_coverage_percentile3_mix.py | UTF-8 | 10,870 | 2.609375 | 3 | [
"MIT"
] | permissive | import os
import numpy as np
import data_provider
import gc
import time
from itertools import combinations, permutations
'''
本脚本是將样本的激活数据的边覆盖情况具现化保存
数据结构边写边考虑
'''
'''
目前只考虑1-1的三种情况的覆盖
写尽可能考虑可拓展性
'''
batch_size = 30000
def check_dir(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def bitwise_or(array_data):
result = array_data[0]
for data in array_data:
result = np.bitwise_or(result, data)
print(result.shape)
return result
# 获取两层之间的每个数据的对应的
def process_three_layer(layer_nums, m, n, k, active_datas1, active_datas2, active_datas3, save_paths, layer_index,
type=None):
# 首先样本数据量需要一致对吧
if len(active_datas1) != len(active_datas2):
raise RuntimeError("the dimension of two layer active datas are not compatible")
if type is None:
type = 0
layer1_neuron_nums = np.shape(active_datas1[0])[1]
layer2_neuron_nums = np.shape(active_datas2[0])[1]
layer3_neuron_nums = np.shape(active_datas3[0])[1]
print('layer1 neuron nums is ', layer1_neuron_nums)
print('layer2 neuron nums is ', layer2_neuron_nums)
print('layer3 neuron nums is ', layer3_neuron_nums)
# 直接使用python库自带的组合情况计算的工具类来产生组合的情况
# 获取的是itertools的combinations的object,可以list转,但是本身是可以迭代的。
layer1_combination = list(combinations(range(layer1_neuron_nums), m))
layer2_combination = list(combinations(range(layer2_neuron_nums), n))
layer3_combination = list(combinations(range(layer3_neuron_nums), k))
# m+n个情况里面的全覆盖的总数是 2^(m+n)
condition_nums = 2 ** (m + n + k)
if type == 1:
condition_nums = 2 ** (m + n)
# i am not sure this should be write down , it could be a specific condition~ maybe~
elif type == 2:
condition_nums = 1
# process_data 是指对应的样本在每个组合中对应的覆盖情况
process_data = []
batch_index = 0
# 记录condition的index
condition_index = 0
result = np.zeros((len(save_paths),), dtype=np.int)
combination_nums = len(layer1_combination) * len(layer2_combination) * len(layer3_combination)
print('codition_nums is ', len(layer1_combination) * len(layer2_combination) * len(layer3_combination))
process_data = [[], [], [], [], []]
for comb1 in layer1_combination:
for comb2 in layer2_combination:
for comb3 in layer3_combination:
condition_index += 1
print(condition_index)
# 取出3层的数据
for i in range(len(save_paths)):
# print(i, end=' ')
save_path = save_paths[i]
# print(save_path)
datas1 = active_datas1[i]
datas2 = active_datas2[i]
datas3 = active_datas3[i]
temp_data = np.zeros((condition_nums,))
sample_nums = np.shape(datas1)[0]
print('sample_nums ', sample_nums)
for j in range(sample_nums):
cover_data = []
data1 = [datas1[j][index] for index in comb1]
data2 = [datas2[j][index] for index in comb2]
data3 = [datas3[j][index] for index in comb3]
# 全覆盖
if type == 0:
data1.extend(data2)
data1.extend(data3)
value = cal_2_to_10_value(data1)
temp_data[int(value)] = 1
elif type == 1:
data3 = np.array(data3)
# 如果输出端全激活
if len(data3) == len(data3[data3 == 1]):
data1.extend(data2)
value = cal_2_to_10_value(data1)
temp_data[int(value)] = 1
else:
data1.extend(data2)
data1.extend(data3)
data1 = np.array(data1)
if len(data1) == len(data1[data1 == 1]):
temp_data[0] = 1
# cover_data.append(cal_2_to_10_value(list(temp_data)))
# print(cal_2_to_10_value(list(temp_data)))
result[i] = np.bitwise_or(result[i], int(cal_2_to_10_value(list(temp_data))))
# process_data.append(cover_data)
process_data[i].append(result[i])
if len(process_data[i]) == batch_size or condition_index == combination_nums:
# process_data = np.transpose(process_data, (1, 0))
# result = bitwise_or(process_data)
np.save(os.path.join(save_path,
str(layer_nums) + '_hidden_layers_coverage_total_layer_index_' + str(
layer_index) + '_batch_' + str(
batch_index) + '.npy'), process_data[i])
batch_index += 1
result[i] = 0
process_data[i] = []
# process_data = np.array(process_data)
# process_data = process_data.transpose((1, 0))
# print(process_data[0])
# print(process_data.shape)
# return process_data
# 二进制转十进制
# 计算的是unsigned 非负数
def cal_2_to_10_value(datas):
# 反序容易计算
datas.reverse()
result = 0
for i in range(len(datas)):
result += (datas[i] * (2 ** i))
return result
m = 1
n = 1
k = 1
# 以0为激活的阈值
# threshold = 0
percentile = 0
# 数据集是mnist
dataset = 'mnist'
# 0-全覆盖
# 1-输出端激活覆盖
# 2-输入输出端均激活的覆盖
type = 2
if __name__ == '__main__':
for percentile in [75]:
for type in [0, 1, 2]:
for layer_nums in [3]:
if type == 0 and layer_nums == 3:
continue
print('percentile is ', percentile)
print('layer_num is ', layer_nums)
print('type is ', type)
now = time.time()
right_datas = data_provider.get_right_active_data('../data/mnist', dataset, layer_nums)
# process_right_datas and save coverage data
# 默认我们用相邻两层
result_for_right = np.empty((len(right_datas), 0))
for layer_num in range(layer_nums - 2):
save_paths = []
datas_1_list = []
datas_2_list = []
datas_3_list = []
layer1 = layer_num
layer2 = layer_num + 1
layer3 = layer_num + 2
datas1 = np.array([list(item) for item in right_datas[:, layer1]])
datas2 = np.array([list(item) for item in right_datas[:, layer2]])
datas3 = np.array([list(item) for item in right_datas[:, layer3]])
print(datas1.shape)
print(datas2.shape)
print(datas3.shape)
threshold1 = np.percentile(datas1, percentile)
threshold2 = np.percentile(datas2, percentile)
threshold3 = np.percentile(datas3, percentile)
datas1[datas1 > threshold1] = 1
datas2[datas2 > threshold2] = 1
datas3[datas3 > threshold3] = 1
datas1[datas1 <= threshold1] = 0
datas2[datas2 <= threshold2] = 0
datas3[datas3 <= threshold3] = 0
save_path = '../data/mnist/mnist_right_active_data/coverage/percentile' + str(
percentile) + '/adjacent_' + str(
m) + '_' + str(n) + '_' + str(k) + '/type' + str(type)
check_dir(save_path)
save_paths.append(save_path)
datas_1_list.append(datas1)
datas_2_list.append(datas2)
datas_3_list.append(datas3)
# 错误数据的激活信息保存
for attack_type in ['fgsm', 'gaussian_noise', 'saliency_map', 'uniform_noise']:
wrong_datas = data_provider.get_wrong_active_data_with_attack_type('../data/mnist', dataset,
layer_nums,
attack_type)
# process_wrong_datas and save coverage data
layer1 = layer_num
layer2 = layer_num + 1
layer3 = layer_num + 2
datas1 = np.array([list(item) for item in wrong_datas[:, layer1]])
datas2 = np.array([list(item) for item in wrong_datas[:, layer2]])
datas3 = np.array([list(item) for item in wrong_datas[:, layer3]])
print(datas1.shape)
print(datas2.shape)
print(datas3.shape)
threshold1 = np.percentile(datas1, percentile)
threshold2 = np.percentile(datas2, percentile)
threshold3 = np.percentile(datas3, percentile)
datas1[datas1 > threshold1] = 1
datas2[datas2 > threshold2] = 1
datas3[datas3 > threshold3] = 1
datas1[datas1 <= threshold1] = 0
datas2[datas2 <= threshold2] = 0
datas3[datas3 <= threshold3] = 0
save_path = '../data/mnist/mnist_wrong_active_data/coverage/' + attack_type + '/percentile' + str(
percentile) + '/adjacent_' + str(m) + '_' + str(n) + '_' + str(k) + '/type' + str(type)
check_dir(save_path)
save_paths.append(save_path)
datas_1_list.append(datas1)
datas_2_list.append(datas2)
datas_3_list.append(datas3)
process_three_layer(layer_nums, m, n, k, datas_1_list, datas_2_list, datas_3_list, save_paths,
layer_num, type)
| true |
cc10f449d0ae2ae326463ff2bc8ba30952ee96dc | Python | krajeevin/importdata | /que.py | UTF-8 | 687 | 3.03125 | 3 | [] | no_license |
import matplotlib.pyplot as plt
import datetime
import pandas.io.data as web
import pandas as pd
dfold = pd.read_csv('Nifty2015.csv', index_col = 'Date', parse_dates=True)
dfnew= pd.read_csv('Nifty2015New.csv', index_col = 'Date', parse_dates=True)
dfnew = dfnew.drop_duplicates().fillna(0)
dfold=dfold.drop_duplicates().fillna(0)
print dfnew.tail(5)
print dfold.tail(5)
#dfupdate=pd.merge(dfold, dfnew, on='Date') # Pankaj: This won't work because you have declared Date column as index while reading file.
dfupdate=pd.merge(dfold, dfnew, left_index=True, right_index=True) # Here I am telling to join on index column from both tables.
dfupdate.to_csv('NiftyFirst.csv', index=True)
| true |
3afc2315cbb6f447585e1c34c8855e24b8bf32eb | Python | jdum/odfdo | /recipes/add_logo_on_presentation.py | UTF-8 | 3,077 | 3.15625 | 3 | [
"Apache-2.0",
"CC-BY-3.0",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-free-unknown"
] | permissive | #!/usr/bin/env python
"""
Insert an image (e.g. the logo of an event, organization or a Creative Commons
attribution) with size x,y at position x2,y2 on a number of slides in a
presentation slide deck.
Exemple:
./add_logo_on_presentation.py -i newlogo.png -r 1-8 -s 4.00 presentation_logo.odp
"""
import sys
import os
from optparse import OptionParser
from odfdo import Document, Frame
# Readng image size requires a graphic library
# The standard PIL lib may have different modules names on different OS
try:
from PIL import Image
PIL_ok = True
except:
PIL_ok = False
print("No image size detection. " "You should install Python Imaging Library")
modified_file_suffix = "new"
image_position = ("1.50cm", "1.50cm")
title = "New Logo"
text = "The new logo with blue background"
def make_image_size(path, size):
try:
w, h = Image.open(path).size
except OSError:
print("error reading", path)
return None
ratio = max(w / size, h / size)
return (f"{w / ratio:.2f}cm", f"{h / ratio:.2f}cm")
def main():
usage = "usage: %prog -i IMAGE -r RANGE -s SIZE PRESENTATION"
description = "Add an image on some pages of a presentation."
parser = OptionParser(usage, description=description)
parser.add_option(
"-i",
"--image",
dest="image",
help="Image to be added",
action="store",
type="string",
)
parser.add_option(
"-r",
"--range",
dest="range",
help="Range of the slides",
action="store",
type="string",
)
parser.add_option(
"-s",
"--size",
dest="size",
help="max width in cm of the image",
action="store",
type="float",
)
options, source = parser.parse_args()
if not source or not options.image or not options.range or not options.size:
print("need options !")
parser.print_help()
exit(0)
lst = options.range.split("-")
start = int(lst[0]) - 1
end = int(lst[1])
file_name = source[0]
image_size = make_image_size(options.image, float(options.size))
presentation = Document(file_name)
presentation_body = presentation.body
uri = presentation.add_file(options.image)
# Append all the component
for i in range(start, end):
# Create a frame for the image
image_frame = Frame.image_frame(
image=uri,
text="", # Text over the image object
size=image_size, # Display size of image
anchor_type="page",
page_number=None,
position=image_position,
style=None,
)
image_frame.svg_title = title
image_frame.svg_description = text
slide = presentation_body.get_draw_page(position=i)
slide.append(image_frame)
# Finally save the result
name_parts = file_name.split(".")
name_parts.insert(-1, modified_file_suffix)
new_name = ".".join(name_parts)
presentation.save(new_name)
if __name__ == "__main__":
main()
| true |
8fcd0d830f2b8a341216641e366c77134e68bdc1 | Python | saviaga/Coding_E | /longest-substring-without-repeating-characters/longest-substring-without-repeating-characters.py | UTF-8 | 437 | 3.15625 | 3 | [] | no_license | class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
dict_char = {}
start = 0
max_lenght = 0
for end in range(len(s)):
end_char = s[end]
if end_char in dict_char:
start = max(start,dict_char[end_char]+1)
dict_char[end_char] = end
max_lenght = max(max_lenght,end-start+1)
return max_lenght
| true |
850ebf0c12c7f7987544cf7ae190c526dd53dfad | Python | Jalbanese1441/Waterloo-CS-Circles-Solutions | /11C Secure the Perimeter.py | UTF-8 | 164 | 2.890625 | 3 | [] | no_license | def trianglePerimeter(xA, yA, xB, yB, xC, yC):
a=distance2D(xA, yA, xB, yB)
b=distance2D(xA, yA, xC, yC)
c =distance2D(xC, yC, xB, yB)
return (a+b+c)
| true |
a87a1aea974786d558f2812f4f4f99e70d939ad1 | Python | vitay/ANNarchy_future | /ANNarchy_future/parser/CodeGeneration.py | UTF-8 | 2,634 | 3.25 | 3 | [
"MIT"
] | permissive | import sys
import logging
import string
import sympy as sp
from sympy.codegen.rewriting import optims_c99, optimize, ReplaceOptim
from sympy.core.mul import Mul
from sympy.core.expr import UnevaluatedExpr
def ccode(eq) -> str:
"""Transforms a sympy expression into C99 code.
Applies C99 optimizations (`sympy.codegen.rewriting.optims_c99`).
Expands `pow(x; 2)` into `x*x` and `pow(x, 3)` into `x*x*x` for performance.
Args:
eq (sympy expression): expression to convert.
Returns:
a string representing the C code.
"""
# If the rhs is a int or float (v = 0.0), cast it to a symbol to avoid numerical errors.
if isinstance(eq, (float)):
eq = sp.Symbol(str(float(eq)))
elif isinstance(eq, (int)):
eq = sp.Symbol(str(int(eq)))
# Optimize for C99
try:
eq = optimize(eq, optims_c99)
except:
logger = logging.getLogger(__name__)
logger.exception(str(eq))
sys.exit(1)
# Explicitly expand the use of pow(x, 2)
pow2 = ReplaceOptim(
lambda p: p.is_Pow and p.exp == 2,
lambda p: UnevaluatedExpr(Mul(p.base, p.base, evaluate=False))
)
eq = pow2(eq)
# Explicitly expand the use of pow(x, 3)
pow3 = ReplaceOptim(
lambda p: p.is_Pow and p.exp == 3,
lambda p: UnevaluatedExpr(Mul(Mul(p.base, p.base, evaluate=False), p.base, evaluate=False))
)
eq = pow3(eq)
# Get the equivalent C code
eq = sp.ccode(
eq,
)
# Remove the extralines of Piecewise
return " ".join(eq.replace('\n', ' ').split())
def code_generation(eq, correspondance:dict = {}) -> str:
"""Gets a dictionary of correspondances and changes all symbols in the sympy expression.
Calls `eq.subs()` and `ccode`.
Args:
eq (sympy expression): expression.
correspondance (dict): dictionary of correspondances.
Returns:
a string representing the C code.
Example:
>>> code_generation(
sp.Symbol(tau) * sp.Symbol(r),
{'tau': 'this->tau', 'r': 'this->r[i]'})
this->tau*this->r[i]
"""
# If the rhs is a int or float (v = 0.0), cast it to a symbol to avoid numerical errors.
if isinstance(eq, (float)):
eq = sp.Symbol(str(float(eq)))
elif isinstance(eq, (int)):
eq = sp.Symbol(str(int(eq)))
# Build a dictionary of sp.Symbols()
replacements = {}
for pre, post in correspondance.items():
replacements[sp.Symbol(pre)] = sp.Symbol(post)
# Replace the symbols
new_eq = eq.subs(replacements)
return ccode(new_eq)
| true |
2f65a6780227483aef13e6b14337bc0ba163f94c | Python | gloryxiao/python-core | /src/py2/base_dev/multiprocessing_test.py | UTF-8 | 275 | 2.75 | 3 | [] | no_license | # coding=utf-8
from multiprocessing.dummy import Pool
def f(x):
return x*x
def callback(arg):
print "hello, %s" % arg
if __name__ == "__main__":
pool = Pool(processes=4)
res = pool.apply_async(f, (10,), callback=callback)
print res.get(timeout=1)
| true |
dc174795f97cee1d4d306ec7d84da94cb72b7c27 | Python | Luismaprzzzz/AppWeb | /main.py | UTF-8 | 358 | 2.515625 | 3 | [] | no_license | #importacion de librerias
from flask import Flask, render_template
#creacion de obj de flask
app = Flask(__name__)
#creacion de ruta para la pagina principal
@app.route("/")
#creacion de funcion para index
def index():
return render_template("index.html")
#definicion de servidor web
if __name__ == '__main__':
app.run(port = 3000,debug = True)
| true |
b4f7164ad26db48836a2cad7f8c3305de122874e | Python | Iandonmorgan/classes | /companies-and-employees.py | UTF-8 | 773 | 3.46875 | 3 | [] | no_license | from companies import Company
from employees_departments import Employee
amazon = Company("Amazon", "123 Fake St.", "Global Domination")
walmart = Company("Wal-Mart", "456 Nota Real St.", "Global Domination")
mike = Employee("Mike")
michael = Employee("Michael")
bruce = Employee("Bruce")
billy = Employee("Billy")
bob = Employee("Bob")
amazon.hire(mike, "President")
walmart.hire(michael, "President")
walmart.hire(bruce, "Vice President")
amazon.hire(billy, "Vice President")
walmart.hire(bob, "Secretary")
companies = [amazon, walmart]
for company in companies:
print(f'{company.business_name} is in the {company.industry_type} industry and has the following employees:')
for employee in company.employees:
print(f'* {employee.name}')
print(f'') | true |
e78ba89d2f2740b4a34799f89a88aa1ebc886033 | Python | pranjalc1/USACO-Python-Practice | /USACO Comp Problems/Bronze 2017 January/Bronze_2017_January_3.py | UTF-8 | 696 | 2.84375 | 3 | [] | no_license | filein = open('cowtip.in','r')
fileout = open('cowtip.out','w')
text = filein.readlines()
N = int(text.pop(0).strip())
grid = [[int(i) for i in line.strip()] for line in text]
filein.close()
numflips = 0
while True:
sumoflists = sum([sum(line) for line in grid])
if sumoflists == 0:
break
for i in range(N-1,-1,-1):
if sum(grid[i]) > 0:
maxdown = i
break
for j in range(N-1,-1,-1):
if grid[maxdown][j] == 1:
maxright = j
break
for x in range(maxright+1):
for y in range(maxdown+1):
grid[y][x] = 1 - grid[y][x]
numflips += 1
fileout.write(str(numflips) + '\n')
fileout.close()
| true |
58bb1479d8a32756456089265f6850a4ecc52be2 | Python | aascode/deepdepdetect | /src/models/dnn.py | UTF-8 | 6,248 | 2.796875 | 3 | [] | no_license | from models.model import Model
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from keras import backend
class DNN(Model):
def __init__(self, train_x, train_y, test_x, test_y, learning_rate):
Model.__init__(self)
self.train_x = train_x
self.train_y = train_y
self.test_x = test_x
self.test_y = test_y
self.learning_rate = learning_rate
def train(self):
# train_x = self.min_max_normalized(self.train_x)
# test_x = self.min_max_normalized(self.test_x)
# train_x = np.nan_to_num(train_x)
# test_x = np.nan_to_num(test_x)
train_x = self.train_x
test_x = self.test_x
train_y = self.train_y
test_y = self.test_y
model = keras.Sequential()
model.add(keras.layers.Dense(300, activation='relu'))
model.add(keras.layers.Dense(300, activation='relu'))
model.add(keras.layers.Dense(300, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(
optimizer=tf.train.AdamOptimizer(learning_rate=self.learning_rate),
loss='binary_crossentropy',
metrics=['accuracy', rmse, 'mae']
)
history = model.fit(
train_x,
train_y,
batch_size=10,
epochs=30,
validation_data=(test_x, test_y),
verbose=1
)
history_dict = history.history
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'b', label='train accuracy')
plt.plot(epochs, val_acc, '#000000', label='test accuracy')
plt.title('Training and validation accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend()
plt.show()
return model
class CrossValidationDNN(Model):
def __init__(self, train_x, train_y, test_x, test_y, final_x, final_y, learning_rate, folds):
Model.__init__(self)
self.train_x = train_x
self.train_y = train_y
self.test_x = test_x
self.test_y = test_y
self.final_x = final_x
self.final_y = final_y
self.learning_rate = learning_rate
self.folds = folds
self.model = None
def create(self):
self.model = None # Clear model.
model = keras.Sequential()
model.add(keras.layers.Dense(300, activation='relu'))
model.add(keras.layers.Dense(300, activation='relu'))
model.add(keras.layers.Dense(300, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(
optimizer=tf.train.AdamOptimizer(learning_rate=self.learning_rate),
loss='binary_crossentropy',
metrics=['accuracy', rmse, 'mae']
)
self.model = model
def train(self, x_train, y_train, x_val, y_val):
history = self.model.fit(
x_train,
y_train,
batch_size=10,
epochs=200,
validation_data=(x_val, y_val),
verbose=0
)
return history
@staticmethod
def _plot_training(history):
history_dict = history.history
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
#plt.plot(epochs, acc, 'b', label='train accuracy')
#plt.plot(epochs, val_acc, '#000000', label='test accuracy')
#plt.title('Training and validation accuracy')
#plt.xlabel('epoch')
#plt.ylabel('accuracy')
#plt.legend()
'''
plt.title('Root mean square error of model over time')
plt.xlabel('epoch')
plt.ylabel('RMSE')
plt.plot(epochs, history.history['rmse'], label="Training RMSE")
plt.plot(epochs, history.history['val_rmse'], label="Testing RMSE")
print(history.history['val_rmse'])
plt.show()
'''
#plt.show()
def train_with_cross_validation(self):
skf = StratifiedKFold(n_splits=self.folds, shuffle=True)
root_mean_squared_error = []
acc = []
val_root_mean_squared_errors = []
val_acc = []
for index, (train_indices, val_indices) in enumerate(skf.split(self.train_x, self.train_y)):
print("Training on fold " + str(index+1) + "/"+str(self.folds)+"...")
# Generate batches from indices.
x_train, x_val = self.train_x[train_indices], self.train_x[val_indices]
y_train, y_val = self.train_y[train_indices], self.train_y[val_indices]
self.create()
print("Training new iteration on " + str(x_train.shape[0]) + " training samples, " + str(x_val.shape[0]) +
" validation samples, this may be a while...")
history = self.train(x_train, y_train, x_val, y_val)
self._plot_training(history)
accuracy_history = history.history['acc']
val_accuracy_history = history.history['val_acc']
root_mean_squared_error.append(history.history['rmse'][-1])
val_root_mean_squared_errors.append(history.history['val_rmse'][-1])
acc.append(accuracy_history[-1])
val_acc.append(val_accuracy_history[-1])
print(
"Last training accuracy: "
+ str(accuracy_history[-1])
+ ", last validation accuracy: "
+ str(val_accuracy_history[-1])
)
print("Avg rmse: "+str(sum(root_mean_squared_error)/len(root_mean_squared_error))
+ ", Avg val_rmse: "+str(+sum(val_root_mean_squared_errors)/len(val_root_mean_squared_errors)))
print("Avg acc: "+str(sum(acc)/len(acc))
+ ", Avg val_acc: "+str(+sum(val_acc)/len(val_acc)))
def rmse(y_true, y_pred):
return backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))
| true |
c199e38f28ab596f916b9015411795e321eaf0e1 | Python | alexandre146/avaliar | /media/codigos/42/42sol218.py | UTF-8 | 166 | 3.421875 | 3 | [] | no_license | n = -1
n = int(input())
if (n < 16):
print ("nao eleitor")
elif (n >= 18) & (n <= 65):
print("eleitor obrigatorio")
else:
print("eleitor facultativo")
| true |
6b1b8b54bdb62de53aabc5cc73930eed0e2de567 | Python | TorchAblaze/spooky_manor | /locations.py | UTF-8 | 14,620 | 3.21875 | 3 | [] | no_license | inventory = ['Raincoat (worn)', 'Padlock key', 'Parcel']
# GATE
GATE_NORTH = {
"message": "Eager to get out of the pouring rain, you hurry north up the dark cobblestone path towards the front door of the manor.",
"points":0,
"location":2,
}
BIKE = {
"message":"You get off your bike, put the kick stand down and lock your bike on the metal fencing next to the gate.",
"points":5,
}
GATE_COMMANDS = {
"EXAMINE BIKE":"The basket contains a small parcel. A heavy chain and padlock are wrapped around the bike's frame. You are currently on the bicycle.",
"EXAMINE PARCEL":"A box wrapped in paper and tied with twine. It's addressed to 'Lord Alastair Spooky.'",
"SHAKE PARCEL":"It rattles.",
"OPEN PARCEL":"It's not yours to open!",
"EXAMINE RAINCOAT":"Your dark-green raincoat marks you as a courier for Parcel-E-Delivery. There is a padlock key in the pocket. You are wearing the raincoat.",
"EXAMINE GATE":"The wrought-iron gate features the Spooky family crest. It's closed.",
"EXAMINE PATH":"The neglected path looks hazardous to your bike's fragile tires.",
"USE PADLOCK":BIKE,
"LOCK BIKE":BIKE,
"DROP PARCEL":"That's not proper couriership :(",
"GET OFF BIKE":"You get off your bike.",
"EXAMINE COBBLESTONE":"The neglected path looks hazardous to your bike's fragile tires.",
# "EXIT EAST": ,
# "EXIT WEST": ,
"EXIT NORTH":GATE_NORTH,
"NORTH":GATE_NORTH,
"GO NORTH":GATE_NORTH,
}
GATE_INTRO = "\nTHE GATE:\nYou stop your bicycle by a forbidding wrought-iron gate.\nA cobblestone path winds its way to the north. To the east\nand west streatches a dark and lonely road. It is raining.\n\nYou are wearing a ranincoat and riding a bike."
# TODO: ALL ROOM INTROS (including this one) NEED TO BE CHANGED depending on how the player interacts their surroundings (taking things, missing things, using things, etc.)
# EXITS: NORTH: FRONT DOOR, EAST/WEST: ENDING
# FRONT DOOR
FDOOR_SOUTH = {
"message":"You head south on the cobblestone path leading down towards the gate.",
"location":1,
}
KNOCK = {
"message":"Nobody answers, but the door cracks open, seemingly on its own! You push the door inward and enter the dimly lit manor as lightning cracks in the distance.",
"location":3
}
FRONT_DOOR_COMMANDS = {
"KNOCK":KNOCK,
"KNOCK DOOR":KNOCK,
"KNOCK ON DOOR":KNOCK,
"SOUTH":FDOOR_SOUTH,
"GO SOUTH":FDOOR_SOUTH,
"EXIT SOUTH": FDOOR_SOUTH,
"ENTER DOOR":"It's locked.",
"OPEN DOOR":"It's locked.",
}
FRONT_DOOR_INTRO = "\nFRONT DOOR:\nYou step up to Spooky Manor's imposing front door. There is a brass knocker here."
# TODO: If player did not lock bike or rode the bike to the front door, the bike will have to be described as stolen and/or broken, otherwise, the bike stays locked at the GATE until player unlocks it
# TODO: After player knocks, let the player decide if they want to enter the door or not
# VESTIBULE
COAT = {
"message":"You hang your coat on the coat tree.",
"remove":"Raincoat (worn)",
}
VESTIBULE_NORTH = {
"restriction in":"Raincoat (worn)",
"message1":"And drip water all over the floor? Perhaps it would be polite to hang up your raincoat?",
"message2":"You walk north toward the great hall. The front door swings shut and locks behind you, which offers this chilling challenge: find a way out!",
"new location":4
}
VESTIBULE_COMMANDS = {
"GO NORTH":VESTIBULE_NORTH,
"NORTH":VESTIBULE_NORTH,
"EXAMINE COAT TREE":"It's currently empty.",
"HANG COAT":COAT,
"HANG RAINCOAT":COAT,
"HANG RAINCOAT ON COAT TREE":COAT,
"HANG COAT ON COAT TREE":COAT,
}
VESTIBULE_INTRO = "\nVESTIBULE:\nYou are standing in a vestibule. Rain drips from your coat onto the floor. To the north is the manor's great hall. There is a coat tree here."
# TODO: Have the raincoat be dry if the player explores a few rooms, FRONT DOOR option not available only after player hangs coat, player won't be able to open it until player has key
# TODO: Because hanging the coat it what prompts the player to go north, every time the player reenters this room, they will have to hang their raincoat even if they don't have one :(
# GREAT HALL
GREAT_HALL_SOUTH = {
"message":"You head south toward the vestibule.",
"location":3,
}
GREAT_HALL_EAST = {
"message":"You walk toward the east wing of the manor. You feel as though the eyes of the people in the portrait follow you.",
"location":9,
}
GREAT_HALL_WEST = {
"message":"You walk to the west wing of the manor, the old floor creaking beneath your feet.",
"location":5
}
GREAT_HALL_UP = {
"message":"You walk past the portraits and up the carpeted steps to the second floor.",
"location":18,
}
GREAT_HALL_COMMANDS = {
"EXAMINE PAINTINGS":"You see portraits of a distinguished-looking man and a pale-skinned woman.",
"EXAMINE MIRROR":"You can see yourself.", # TODO: add{wearing a smoking jacket/a wolf pelt} if they are in the inventory
"SOUTH":GREAT_HALL_SOUTH,
"EXIT SOUTH":GREAT_HALL_SOUTH,
"GO SOUTH":GREAT_HALL_SOUTH,
"EAST":GREAT_HALL_EAST,
"EXIT EAST":GREAT_HALL_EAST,
"GO EAST":GREAT_HALL_EAST,
"WEST":GREAT_HALL_WEST,
"EXIT WEST":GREAT_HALL_WEST,
"UP":GREAT_HALL_UP,
"GO UP":GREAT_HALL_UP,
"EXIT UP":GREAT_HALL_UP,
"GO UP STAIRS":GREAT_HALL_UP,
"GO UP STAIRCASE":GREAT_HALL_UP,
}
GREAT_HALL_INTRO = "\nGREAT HALL:\nArchways lead to the east and west wings of the manor.\nThere are some oil paintings and a large mirror hanging\non the wall. A staircase leads up to the second floor. The\nvestibule is to the south."
# TODO: Add options to "examine mirror": WEREWOLF(You notice you have pointy ears, sharp teeth and hair covering most of your face.) VAMPIRE(You can't see your reflection in the mirror!)
# EXITS: SOUTH: VESTIBULE, EAST: LIBRARY, WEST: DINING ROOM, UP: HALLWAY
# DINING ROOM
DINING_ROOM_NORTH = {
"message":"You head north to leave the dining room.",
"location":7,
}
DINING_ROOM_SOUTH = {
"message":"You walk south out of the dining room.",
"location":6,
}
DINING_ROOM_EAST = {
"message":"You head east back to the Great Hall.",
"location":4,
}
PHEASANT = {
"restriction out":"Meat cleaver",
"message1":"It's still attached.",
"message2":"You take the drumstick. Oof, it's kind of heavy, you don't think you can carry more than one.",
}
DINING_ROOM_COMMANDS = {
"EXAMINE PHEASANT":"The meat looks to be cold and unappetizing.",
"TAKE PHEASANT":"You ponder carrying around a whole roast pheasant and decide against it.",
"TAKE DRUMSTICK":PHEASANT,
"USE CLEAVER ON PHEASANT":"You hack off one of the drumsticks to carry around with you, county fair-style.",
"EAT DRUMSTICK":"You're not hungry.",
"NORTH":DINING_ROOM_NORTH,
"EXIT NORTH":DINING_ROOM_NORTH,
"GO NORTH":DINING_ROOM_NORTH,
"SOUTH":DINING_ROOM_SOUTH,
"EXIT SOUTH":DINING_ROOM_SOUTH,
"GO SOUTH":DINING_ROOM_SOUTH,
"EAST":DINING_ROOM_EAST,
"EXIT EAST":DINING_ROOM_EAST,
"GO EAST":DINING_ROOM_EAST,
}
DINING_ROOM_INTRO = "\nDINING ROOM:\nThe dining room contains a long banquet table. A whole\nroast pheasant, complete with drumsticks, rests in the\ncenter of the table. Exits are to the north, south and east."
# TODO: WEREWOLF: "You smell wolf to the north and smoke to the\nsouth. The meat on the table smells delicious!"
# TODO: Player can only carry one drumstick at a time
# TODO: Drumsticks can be eaten by werewolf players
# TODO: USE CLEAVER ON PHEASANT can only work if player has cleaver in their inventory and EAT DRUMSTICK should only work if player cut the pheasant with cleaver and/or has DRUMSTICK already in their inventory
# TODO: Player should be allowed to TAKE DRUMSTICK if they used the cleaver on the phesant
# TODO: There are only two drumsticks in this game. After they are succesfully eaten, they should no longer be available.
# EXITS: NORTH: KITCHEN, SOUTH: LOUNGE, EAST: GREAT HALL
# LOUNGE
LOUNGE_NORTH = {
"message":"You walk out back toward the Dining Room.",
"location":5,
}
TAKE_SMOKING_JACKET = {
"message":"You take the smoking jacket.",
"add":"Smoking Jacket (worn)",
}
LOUNGE_COMMANDS = {
"EXAMINE JACKET":"The red satin jacket bears the Spooky family crest.",
"DRINK BRANDY":"No drinking on the job!",
"TAKE BRANDY":"You'll spill it.",
"EXAMINE CHAIR":"A compfy-looking overstuffed chair with wooden legs. There is a smoking jacket folded over on one of its arm.",
"EXAMINE FIRE":"The fire brightens up the room, casting shadows along the wall. The cackling sound and warmth are welcoming in an otherwise quiet and drafty manor.",
"EXAMINE HEARTH":"The fire brightens up the room, casting shadows along the wall. The cackling sound and warmth are welcoming in an otherwise quiet and drafty manor.",
"EXAMINE BRANDY":"A footed glass filled with some type of alcohol.",
"EXAMINE SNITFTER":"A footed glass filled with some type of alcohol.",
"TAKE SMOKING JACKET":TAKE_SMOKING_JACKET,
"TAKE JACKET":TAKE_SMOKING_JACKET,
"NORTH":LOUNGE_NORTH,
"EXIT NORTH":LOUNGE_NORTH,
"GO NORTH":LOUNGE_NORTH,
}
LOUNGE_INTRO = "\nLOUNGE:\nYou enter the lounge, where a fire is roaring in the hearth.\nA smoking jacket rests on the arm of an overstuffed chair.\nThere is a snifter of brandy here."
# TODO: WEREWOLF: You smell meat to the north
# TODO: VAMPIRE: Panicked by the sight of an open flame, you flee the fire! ~Vampires retreat north to the DINING ROOM
# TODO: WET PLAYER: To recover from being cold and wet, the player must WEAR JACKET or WEAR WOLF PELT, then SIT BY FIRE and DRINK BRANDY (Player must be allowed to DRINK BRANDY in this case)
# TODO: ITEMS: if player takes smoking jacket, smoking jacket comments should be removed
# EXITS: NORTH: DINING ROOM
# KITCHEN
KITCHEN_SOUTH = {
"message":"You walk toward the dining room.",
"location":5,
}
KITCHEN_DOWN = {
"message":"You open the cellar door and down a dark set of stairs. You hear the sound of low growls coming from below....",
"location":8,
}
OLIVE_OIL = {
"message":"You take the olive oil.",
"add":"Olive oil",
}
MEAT_CLEAVER = {
"message":"You take the meat cleaver.",
"add":"Meat cleaver",
}
KITCHEN_COMMANDS = {
"EXAMINE OLIVE OIL":"The bottle is almost empty.",
"EXAMINE OIL":"The bottle is almost empty.",
"EXAMINE CELLAR DOOR":"A closed wooden door with a handle. It doesn't appear to be locked and you can hear rustling through the door.",
"EXAMINE MEAT CLEAVER":"A cutting utensil...for meat.",
"SOUTH":KITCHEN_SOUTH,
"EXIT SOUTH":KITCHEN_SOUTH,
"GO SOUTH":KITCHEN_SOUTH,
"DOWN":KITCHEN_DOWN,
"EXIT DOWN":KITCHEN_DOWN,
"GO DOWN":KITCHEN_DOWN,
"ENTER CELLAR DOOR":KITCHEN_DOWN,
"TAKE OLIVE OIL":OLIVE_OIL,
"TAKE OIL":OLIVE_OIL,
"TAKE MEAT CLEAVER":MEAT_CLEAVER,
"TAKE CLEAVER":MEAT_CLEAVER,
"DRINK OLIVE OIL":"Uh...You don't exactly find these things appetizing. You don't know why you would come up with such a silly idea."
}
KITCHEN_INTRO = "\nKITCHEN:\nThe kitchen is old-fashioned, devoid of most modern\nconviences. There is a meat cleaver here. There is a\nsmall bottle of olive oil here. A cellar door leads down."
# TODO: WEREWOLF: "You smell meat to the south and wolf downstairs."
# EXITS: SOUTH: DINING ROOM, DOWN: DARK CELLAR
# DARK CELLAR
DARK_CELLAR_UP = {
"message":"Unbeliving of what you just saw, you head back up the dark cellar stairs questioning why you ever took this job.",
"location":7,
}
GARLIC = {
"message":"You take the garlic cloves.",
"add":"Garlic cloves",
}
DARK_CELLAR_COMMANDS = {
"EXAMINE WOLF":"The savage beast strains at the chain around its neck.",
"USE WOLFSBANE ON DRUMSTICK":"You rub the herb onto the meat.",
"EXAMINE MAN":"A tall, gaunt man with thinning hair and a penchil-thin mustache. He's currently covering his naked body with a large tin of peaches he took from a shelf.",
"TALK TO MAN":"The man replies 'I was attacked while hunting with Lord Spooky last week. Everything after that is just a blur.'",
"DROP PARCEL":"Pretty sure the parcel isn't addressed to a wolf.",
"THROW PARCEL":"You ponder throwing the parcel at the beast, but delivering the parcel to the right person is why you entered this wretched place. Don't lose hope now.",
"TAKE CLOVES OF GARLIC":GARLIC,
"TAKE CLOVES":GARLIC,
"TAKE GARLIC":GARLIC,
"TAKE GARLIC CLOVES":GARLIC,
"UP":DARK_CELLAR_UP,
"EXIT UP":DARK_CELLAR_UP,
"GO UP":DARK_CELLAR_UP,
"EXIT CELLAR":DARK_CELLAR_UP,
}
DARK_CELLAR_INTRO = "\nDARK CELLAR:\nYou enter the dark cellar and see a monstrous wolf\nchained to the wall! There are cloves of garlic here."
# TODO: ITEMS: Garlic(see VAMPIRE)
# TODO: USE WOLFSBANE ON DRUMSTICK should only work if player has WOLFSBANE and DRUMSTICK in inventory
# TODO: Werewolf will eat DRUMSTICK whether it is covered in WOLFSBANE or not, but it must be covered in WOLFSBANE for werewolf to change back to human form
# TODO: If the player stays here for too many turns without curing Manfred, or tries to ATTACK WOLF with the MEAT CLEAVER or the SILVER PEN (and if in inventory), the wolf snaps its chain and attacks:
# The wolf overpowers you, and you feel its sharp teeth and claws savage your flesh. Hours later, you wake up, uninjured but wearing bloody, torn clothing. Your senses seeem especially keen, and your fingernails are long and sharp. The wolf is nowhere to be found.
# TODO: WEREWOLF: The player is now a werewolf! The werewolf has a keen sense of smell, is allergic to silver and wolfsbane and has sharp claws.
# TODO: WEREWOLF: If the player uses WOLFSBANE and DRUMSTICK on themself at any point in the game, so long as they are in his inventory, it will cure them of lycanthropy
# TODO: VAMPIRE: A vampire player character may not take the garlic. If attacked by the wolf, the vampire will assume mist form, drift off to the Graveyard and revert back, weak and injured. A vampire player must return to their grave to rest before continuing.
# If werewolf is returned to human form:
# TODO: TALK TO MAN should only work if the werewolf was returned to human form
# TODO: EXAMINE MAN should only work if the werewolf was returned to human form
# TODO: Manfred will only leave the cellar once given the SMOKING JACKET, the RAINCOAT, or the WOLF PELT. Once he has one of these, he will leave and wait for the player by the door to the Master Suite.
# EXITS: UP: KITCHEN
| true |
4214e452ba282cd737051659c7836deb38659451 | Python | markhorsfield/Get-Programming-with-Python-in-Motion | /u8m3-working_with_your_own_object_types.py | UTF-8 | 2,086 | 3.75 | 4 | [] | no_license | class Circle(object):
def __init__(self):
self.radius = 0
def change_radius(self, radius):
self.radius = radius
def get_radius(self):
return self.radius
class Stack(object):
def __init__(self):
self.stack = []
def get_stack_elements(self):
return self.stack.copy()
def add_one(self, item):
self.stack.append(item)
def remove_one(self):
self.stack.pop()
def add_many(self, item, n):
for i in range(n):
self.stack.append(item)
def remove_many(self, n):
for i in range(n):
self.stack.pop()
def size(self):
return len(self.stack)
def prettyprint(self):
for thing in self.stack[::-1]:
print('|_',thing,'_|')
pancakes = Stack()
pancakes.add_one("blueberry")
pancakes.add_many("chocolate",4)
pancakes.remove_one()
print(pancakes.size())
pancakes.prettyprint()
circles = Stack()
one_circle = Circle()
one_circle.change_radius(2)
circles.add_one(one_circle)
print(one_circle)
# add the same circle object 4 times
one_circle = Circle()
one_circle.change_radius(1)
circles.add_many(one_circle, 4)
print(circles.size())
circles.prettyprint()
circles = Stack()
one_circle = Circle()
one_circle.change_radius(2)
circles.add_one(one_circle)
print(one_circle)
# add 4 separate circle objects
for i in range(4):
one_circle = Circle()
one_circle.change_radius(1)
circles.add_one(one_circle)
print(circles.size())
circles.prettyprint()
##############################
########## QUICKCHECK ########
##############################
class Queue(object):
def __init__(self):
self.queue = []
def get_queue_elements(self):
return self.queue.copy()
def add_one(self, item):
self.queue.append(item)
def remove_one(self):
self.queue.pop(0)
def size(self):
return len(self.queue)
def prettyprint(self):
for thing in self.queue[::-1]:
print('|_',thing,'_|')
a = Queue()
a.add_one(3)
a.add_one(1)
a.remove_one()
a.prettyprint()
| true |
1991047aa90a6a0aff766f1b6a307e3d4a6c0d19 | Python | acehu970601/Motors-Temperature-Prediction | /src/MotorTemp.py | UTF-8 | 1,367 | 2.546875 | 3 | [] | no_license | import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
class MLProj:
def __init__(self):
self.dataset = pd.read_csv("pmsm_temperature_data.csv")
def ProfilePlot(self, filename="plots/ProfileID.png", plot=True):
#Prifile ID#
exp_time_count = self.dataset.profile_id.value_counts().sort_values()
fig = plt.figure(figsize=(17, 12))
sns.barplot(y=exp_time_count.values, x=exp_time_count.index,
order=exp_time_count.index, orient="v")
plt.title("Sample counts for different profiles", fontsize=16)
plt.ylabel("Sample count", fontsize=14)
plt.xlabel("Profile ID", fontsize=14)
fig.savefig(filename)
if plot:
plt.show()
def CorrPlot(self, filename="plots/CorrPlot.png", plot=True):
#Correlation Map#
corrmat = self.dataset.corr()
fig = plt.figure(figsize=(15, 8))
cmap = sns.diverging_palette(240, 10, as_cmap=True)
sns.heatmap(corrmat, annot=True, linewidths=.5, fmt='.2f', mask=np.zeros_like(
corrmat, dtype=np.bool), cmap=cmap, square=False)
plt.tight_layout()
fig.savefig(filename)
if plot:
plt.show()
if __name__ == "__main__":
P = MLProj()
# P.ProfilePlot(plot=False)
P.CorrPlot(plot=True)
| true |
515657a0f6b2f45bc342111557b17215a64ed341 | Python | bryanee23/jobsearch_public | /classes.py | UTF-8 | 2,082 | 2.78125 | 3 | [
"MIT"
] | permissive | import time, random, re
import pandas as pd
from selenium import webdriver
from directory import EXCEL_PATH, CONFIG_FILE
browser = webdriver.Chrome('') ## add file path of chrom webdriver
class Browser_ctrls:
def __init__(self):
self.delay = [2,3]
def open_target_page(self, target_page):
browser.get(target_page)
def _pause(self, start, end):
time.sleep(random.randint(start, end))
def pause(self):
timer = random.randint(int(self.delay[0]), int(self.delay[1]))
time.sleep(timer)
def scroll_to_bottom(self):
browser.execute_script("window.scrollTo(0,document.body.scrollHeight)")
def scroll_through_page(self):
browser.execute_script("window.scrollBy(0,100)")
def _strip_spaces(self, text):
text = text.split('\n')
text = text[1]
text = re.sub(' +', ' ', text).strip()
return text
class LinkedIn_login(Browser_ctrls):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.get_userInfo()
def get_userInfo(self):
userInfo = []
with open(f'{CONFIG_FILE}.txt', 'r') as f:
user_info = f.read().split('\n')
return user_info
def login(self, login_page):
browser.get(login_page)
user_info = self.get_userInfo()
elementIDs = ['username', 'password']
for info, elementID in zip(user_info, elementIDs):
if elementID != 'password':
self.pause()
browser.find_element_by_id(elementID).send_keys(info)
else:
self.pause()
browser.find_element_by_id(elementID).send_keys(info)
browser.find_element_by_id(elementID).submit()
self.pause()
for _ in range(random.randint(1,2)):
self.scroll_to_bottom()
self.pause()
self._pause(1,2)
class Pandas_Ops:
def sort_ascending_column(self, df):
df = df.sort_values(by='Company', ascending=True)
return df
def write_excel(self, df, title):
with pd.ExcelWriter(f'{EXCEL_PATH}{title}.xlsx') as writer:
df.to_excel(writer, sheet_name='Main', index=False)
df.to_excel(writer, sheet_name='raw data', index=False)
| true |
1b4f3241593ebb9b9dfbd9847a1507435e15310d | Python | Nobuho/PDFeditor | /PDF_fromIMG.py | UTF-8 | 995 | 2.578125 | 3 | [] | no_license | from PIL import Image
import img2pdf
import os
from glob import glob
# 各画像を格納したフォルダの直下に、画像があるようにすること
# PDFのファイル名はフォルダ名になる
# 画像を格納したフォルダの親フォルダ
path = "C:\\_Python\\PDFeditor\\img"
# 画像の拡張子 *pngはダメらいしい?未検証
ext = ".jpg"
path += "\\**\\"
for p in [i for i in glob(path)]:
search_path = p + "*" + ext
folder_name = os.path.basename(p.rstrip(os.sep))
print(folder_name)
for i in glob(search_path):
with Image.open(i) as src:
data = src.getdata()
mode = src.mode
size = src.size
with Image.new(mode, size) as dst:
dst.putdata(data)
dst.save(i)
if len([i for i in glob(search_path)]) == 0:
continue
else:
with open(folder_name + ".pdf", "wb") as files:
files.write(img2pdf.convert([i for i in glob(search_path)]))
| true |
12bfeac0179652b63dc5d36bd5bb198585f2144f | Python | RoxyKang/CG-calculations | /compute_angle.py | UTF-8 | 210 | 2.859375 | 3 | [] | no_license | import numpy as np
v0 = np.array([0.5801, -0.8146, 0])
v1 = np.array([0.9655, -0.2604, 0])
v0 = v0/np.linalg.norm(v0)
v1 = v1/np.linalg.norm(v1)
theta1 = np.arccos(np.dot(v0, v1))
print(np.rad2deg(theta1)) | true |
9dca1691ab6b50f603081f313020133131a3158b | Python | bfazzani/find-a-home | /machineLearning.py | UTF-8 | 3,527 | 2.578125 | 3 | [] | no_license |
# !/usr/bin/env python
import sys
if sys.version_info[0] >= 3:
import PySimpleGUI as sg
else:
import PySimpleGUI27 as sg
def MachineLearningGUI():
sg.SetOptions(text_justification='right')
flags = [[sg.Checkbox('Normalize', size=(12, 1), default=True), sg.Checkbox('Verbose', size=(20, 1))],
[sg.Checkbox('Cluster', size=(12, 1)), sg.Checkbox('Flush Output', size=(20, 1), default=True)],
[sg.Checkbox('Write Results', size=(12, 1)), sg.Checkbox('Keep Intermediate Data', size=(20, 1))],
[sg.Checkbox('Normalize', size=(12, 1), default=True), sg.Checkbox('Verbose', size=(20, 1))],
[sg.Checkbox('Cluster', size=(12, 1)), sg.Checkbox('Flush Output', size=(20, 1), default=True)],
[sg.Checkbox('Write Results', size=(12, 1)), sg.Checkbox('Keep Intermediate Data', size=(20, 1))], ]
loss_functions = [
[sg.Radio('Cross-Entropy', 'loss', size=(12, 1)), sg.Radio('Logistic', 'loss', default=True, size=(12, 1))],
[sg.Radio('Hinge', 'loss', size=(12, 1)), sg.Radio('Huber', 'loss', size=(12, 1))],
[sg.Radio('Kullerback', 'loss', size=(12, 1)), sg.Radio('MAE(L1)', 'loss', size=(12, 1))],
[sg.Radio('MSE(L2)', 'loss', size=(12, 1)), sg.Radio('MB(L0)', 'loss', size=(12, 1))], ]
command_line_parms = [
[sg.Text('Passes', size=(8, 1)), sg.Spin(values=[i for i in range(1, 1000)], initial_value=20, size=(6, 1)),
sg.Text('Steps', size=(8, 1), pad=((7, 3))),
sg.Spin(values=[i for i in range(1, 1000)], initial_value=20, size=(6, 1))],
[sg.Text('ooa', size=(8, 1)), sg.In(default_text='6', size=(8, 1)), sg.Text('nn', size=(8, 1)),
sg.In(default_text='10', size=(10, 1))],
[sg.Text('q', size=(8, 1)), sg.In(default_text='ff', size=(8, 1)), sg.Text('ngram', size=(8, 1)),
sg.In(default_text='5', size=(10, 1))],
[sg.Text('l', size=(8, 1)), sg.In(default_text='0.4', size=(8, 1)), sg.Text('Layers', size=(8, 1)),
sg.Drop(values=('BatchNorm', 'other'), auto_size_text=True)], ]
layout = [[sg.Frame('Command Line Parameteres', command_line_parms, title_color='green', font='Any 12')],
[sg.Frame('Flags', flags, font='Any 12', title_color='blue')],
[sg.Frame('Loss Functions', loss_functions, font='Any 12', title_color='red')],
[sg.Submit(), sg.Cancel()]]
window = sg.Window('Machine Learning Front End', font=("Helvetica", 12)).Layout(layout)
button, values = window.Read()
sg.SetOptions(text_justification='left')
print(button, values)
def CustomMeter():
# layout the form
layout = [[sg.Text('A custom progress meter')],
[sg.ProgressBar(1000, orientation='h', size=(20, 20), key='progress')],
[sg.Cancel()]]
# create the form`
window = sg.Window('Custom Progress Meter').Layout(layout)
progress_bar = window.FindElement('progress')
# loop that would normally do something useful
for i in range(1000):
# check to see if the cancel button was clicked and exit loop if clicked
event, values = window.Read(timeout=0, timeout_key='timeout')
if event == 'Cancel' or event == None:
break
# update bar with loop value +1 so that bar eventually reaches the maximum
progress_bar.UpdateBar(i + 1)
# done with loop... need to destroy the window as it's still open
window.CloseNonBlocking()
if __name__ == '__main__':
CustomMeter()
MachineLearningGUI()
| true |
967b639b58ad233bad237081c4de3c1cbb17d2f6 | Python | rmallensb/python-yubico-client | /demo/example.py | UTF-8 | 1,001 | 2.6875 | 3 | [
"Python-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | import sys
from yubico_client import Yubico
from yubico_client import yubico_exceptions
from yubico_client.py3 import PY3
if PY3:
raw_input = input
client_id = raw_input('Enter your client id: ')
secret_key = raw_input('Enter your secret key (optional): ')
token = raw_input('Enter OTP token: ')
if not secret_key:
secret_key = None
client = Yubico(client_id, secret_key)
try:
status = client.verify(token)
except yubico_exceptions.InvalidClientIdError:
e = sys.exc_info()[1]
print('Client with id %s does not exist' % (e.client_id))
sys.exit(1)
except yubico_exceptions.SignatureVerificationError:
print('Signature verification failed')
sys.exit(1)
except yubico_exceptions.StatusCodeError:
e = sys.exc_info()[1]
print('Negative status code was returned: %s' % (e.status_code))
sys.exit(1)
if status:
print('Success, the provided OTP is valid')
else:
print('No response from the servers or received other negative '
'status code')
| true |
e345498999d56d9a78c334fb41806dfd6bd5933f | Python | anmol1432/learning_Django | /polls/admin.py | UTF-8 | 635 | 2.5625 | 3 | [] | no_license | '''
Make the poll app modifiable in the admin
But where’s our poll app? It’s not displayed on the admin index page.
Only one more thing to do: we need to tell the admin that Question objects have an admin interface.
To do this, open the polls/admin.py file, and edit it to look like this:
polls/admin.py¶
from django.contrib import admin
from .models import Question
admin.site.register(Question)
'''
from django.contrib import admin
from .models import Question, Choice
from django.contrib.admin import AdminSite
# Register your models here to show in admin pannel.
admin.site.register(Question)
admin.site.register(Choice) | true |
77647fd85117ea95343f961890811742f3febbe8 | Python | JamesPC44/USC_UpperDivision_Fall2018 | /respectful-reception/src/solve.py | UTF-8 | 1,604 | 3.59375 | 4 | [] | no_license | from pdb import set_trace
class Node(object):
def __init__(self, val):
self.left = None
self.right = None
self.val = val
self.count = 1
self.size = 1
class Tree(object):
def __init__(self):
self.root = None
def insert_and_count(self, val):
if self.root is None:
self.root = Node(val)
return 0
current_node = self.root
last_node = self.root
count = 0
while not current_node is None and current_node.val != val:
last_node = current_node
current_node.size += 1
if val < current_node.val:
current_node = current_node.left
elif val > current_node.val:
count += current_node.count
if not current_node.left is None:
count += current_node.left.size
current_node = current_node.right
if not current_node is None:
count += current_node.count
if not current_node.left is None:
count += current_node.left.size
current_node.count += 1
current_node.size += 1
else:
new_node = Node(val)
if val < last_node.val:
last_node.left = new_node
if val > last_node.val:
last_node.right = new_node
return count
def main():
n = int(input())
tree = Tree()
for _ in range(n):
age = int(input())
print(tree.insert_and_count(age))
if __name__ == "__main__":
main()
| true |
396eb6f714b19ad9149739ea0775fabb0c31ed94 | Python | krthkj/rfi-file-monitor | /rfi_file_monitor/preferenceswindow.py | UTF-8 | 8,720 | 2.625 | 3 | [
"BSD-3-Clause"
] | permissive | import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, GObject
import yaml
from typing import Dict, Any, Final
import logging
from .preferences import Preference, BooleanPreference, ListPreference, DictPreference, StringPreference
from .utils import EXPAND_AND_FILL, PREFERENCES_CONFIG_FILE
logger = logging.getLogger(__name__)
class PreferenceValueCellRenderer(Gtk.CellRenderer):
@GObject.Property(type=str)
def key(self):
return self._key
@key.setter
def key(self, value):
self._key = value
self._set_renderer(value)
def __init__(self, prefs: Dict[Preference, Any], list_store: Gtk.ListStore, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prefs = prefs
self._list_store = list_store
self._key: str = ""
self._renderer: Gtk.CellRenderer = None
# our renderers
self._toggle_renderer = Gtk.CellRendererToggle(activatable=True, radio=False)
self._combo_renderer = Gtk.CellRendererCombo(has_entry=False)
self._text_renderer = Gtk.CellRendererText(editable=True)
# our combo models
self._combo_models: Final[Dict[Preference, Gtk.ListStore]] = dict()
# connect signal handlers
self._toggle_renderer.connect("toggled", self._toggle_cb)
self._combo_renderer.connect("changed", self._changed_cb)
self._text_renderer.connect("edited", self._edited_cb)
def _get_key_from_model(self, path: str) -> str:
return self._list_store[path][0]
def _edited_cb(self, combo: Gtk.CellRendererText, path: str, new_text: str):
key: str = self._get_key_from_model(path)
pref: Preference = self._get_pref_for_key(key)
self._prefs[pref] = new_text
# update config file
self._update_config_file()
def _changed_cb(self, combo: Gtk.CellRendererCombo, path: str, new_iter: Gtk.TreeIter):
key: str = self._get_key_from_model(path)
pref: Preference = self._get_pref_for_key(key)
store = self._combo_models[pref]
new_value = store[new_iter][0]
self._prefs[pref] = new_value
# update config file
self._update_config_file()
def _toggle_cb(self, renderer: Gtk.CellRendererToggle, path: str):
key: str = self._get_key_from_model(path)
pref: Preference = self._get_pref_for_key(key)
self._prefs[pref] = not self._prefs[pref]
# update config file
self._update_config_file()
def _update_config_file(self):
# write prefs into dictionary format
yaml_dict = dict()
for _pref, _value in self._prefs.items():
yaml_dict[_pref.key] = _value
try:
# ensure parent directories of preferences file have been created
PREFERENCES_CONFIG_FILE.parent.mkdir(mode=0o700, parents=True, exist_ok=True)
# open for writing
with PREFERENCES_CONFIG_FILE.open('w') as f:
logger.debug(f'Writing preferences to {str(PREFERENCES_CONFIG_FILE)}')
yaml.safe_dump(data=yaml_dict, stream=f)
except Exception:
logger.exception(f'Could not write to {str(PREFERENCES_CONFIG_FILE)}')
def _get_pref_for_key(self, key) -> Preference:
# given a key, get the corresponding Preference class
for _pref in self._prefs:
if _pref.key == key:
return _pref
def _set_renderer(self, key: str):
pref: Preference = self._get_pref_for_key(key)
if isinstance(pref, BooleanPreference):
self._renderer = self._toggle_renderer
# the mode has to be set for both self and child!!!
self.props.mode = Gtk.CellRendererMode.ACTIVATABLE
self._renderer.props.mode = Gtk.CellRendererMode.ACTIVATABLE
self._renderer.props.active = self._prefs[pref]
self._renderer.props.activatable = True
elif isinstance(pref, ListPreference) or isinstance(pref, DictPreference):
self._renderer = self._combo_renderer
self.props.mode = Gtk.CellRendererMode.EDITABLE
current_value = self._prefs[pref]
if pref not in self._combo_models:
# create new model
store = Gtk.ListStore(str)
for _val in pref.values:
store.append([_val])
self._combo_models[pref] = store
else:
store = self._combo_models[pref]
self._renderer.props.model = store
self._renderer.props.text = current_value
self._renderer.props.text_column = 0
self._renderer.props.editable = True
self._renderer.props.mode = Gtk.CellRendererMode.EDITABLE
elif isinstance(pref, StringPreference):
self._renderer = self._text_renderer
self.props.mode = Gtk.CellRendererMode.EDITABLE
current_value = self._prefs[pref]
self._renderer.props.mode = Gtk.CellRendererMode.EDITABLE
self._renderer.props.editable = True
self._renderer.props.text = current_value
else:
raise NotImplementedError
# these methods define how the renderer should do its drawing.
# we just need to redirect it to the appropriate child renderer.
def do_activate(self, event, widget, path, background_area, cell_area, flags):
return type(self._renderer).do_activate(self._renderer, event, widget, path, background_area, cell_area, flags)
def do_get_aligned_area(self, widget, flags, cell_area):
return type(self._renderer).do_get_aligned_area(self._renderer, widget, flags, cell_area)
def do_get_preferred_height(self, widget):
return type(self._renderer).do_get_preferred_height(self._renderer, widget)
def do_get_preferred_height_for_width(self, widget, width):
return type(self._renderer).do_get_preferred_height_for_width(self._renderer, widget, width)
def do_get_preferred_width(self, widget):
return type(self._renderer).do_get_preferred_width(self._renderer, widget)
def do_get_preferred_width_for_height(self, widget, height):
return type(self._renderer).do_get_preferred_width_for_height(self._renderer, widget, height)
def do_get_request_mode(self):
return type(self._renderer).do_get_request_mode(self._renderer)
def do_get_size(self, widget, cell_area):
return type(self._renderer).do_get_size(self._renderer, widget, cell_area)
def do_render(self, cr, widget, background_area, cell_area, flags):
type(self._renderer).do_render(self._renderer, cr, widget, background_area, cell_area, flags)
def do_start_editing(self, event, widget, path, background_area, cell_area, flags):
return type(self._renderer).do_start_editing(self._renderer, event, widget, path, background_area, cell_area, flags)
class PreferencesWindow(Gtk.Window):
def __init__(self, prefs: Dict[Preference, Any], *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_default_size(500, 500)
self._prefs: Dict[Preference, Any] = prefs
grid = Gtk.Grid(**EXPAND_AND_FILL)
self.add(grid)
nb = Gtk.Notebook(**EXPAND_AND_FILL)
grid.attach(nb, 0, 0, 1, 1)
config_page = Gtk.Grid(
**EXPAND_AND_FILL,
border_width=5,
row_spacing=5,
column_spacing=5)
nb.append_page(config_page, Gtk.Label('Configuration options'))
frame_child = Gtk.Label(
label='<b>Settings specific to this machine, usable by operations</b>',
use_markup=True,
halign=Gtk.Align.FILL, valign=Gtk.Align.CENTER,
hexpand=False, vexpand=False)
frame = Gtk.Frame(
height_request=50,
halign=Gtk.Align.FILL, valign=Gtk.Align.START,
hexpand=True, vexpand=False)
frame.add(frame_child)
config_page.attach(
frame,
0, 0, 1, 1)
store = Gtk.ListStore(str, str)
for _pref in self._prefs:
store.append([_pref.key, _pref.description])
tv = Gtk.TreeView(model=store, tooltip_column=1, **EXPAND_AND_FILL)
config_page.attach(tv, 0, 1, 1, 1)
key_renderer = Gtk.CellRendererText()
value_renderer = PreferenceValueCellRenderer(prefs=self._prefs, list_store=store)
key_column = Gtk.TreeViewColumn("Key", key_renderer, text=0)
value_column = Gtk.TreeViewColumn("Value", value_renderer, key=0)
tv.append_column(key_column)
tv.append_column(value_column)
grid.show_all()
| true |
935271773d16ecba9f67fc0d83b03e7240e167cf | Python | Brian-Jiang/TheGhostGame | /GameLogic.py | UTF-8 | 1,639 | 3.578125 | 4 | [] | no_license | from _Helper import *
from random import randrange
class GhostGame:
def __init__(self, word_bank: set):
self.score = [0, 0] # human : computer
self.word = ''
self.turn = randrange(0,2) # 0: human 1: computer
self.words = word_bank
def guess_char(self, char):
self.word += char
def proceed(self):
self.turn = 0 if self.turn == 1 else 1
def challenge(self):
if self._check_prefix(self.word):
self.end_round(self.turn)
else:
self.end_round(0 if self.turn == 1 else 1)
def end_round(self, winner: 'int, 0 or 1'):
self.score[winner] += 1
if winner == 0: print('You ', end='')
else: print('AI ', end='')
print('won, score is\tyou', self.score[0], self.score[1], 'AI')
if self.score[winner] >= 5:
self.end_game(winner)
else:
self._clear_board()
def check_complete(self):
if check_complete_word(self.word, self.words):
self.end_round(0 if self.turn == 1 else 1)
# def check_if_complete_word(self):
# result = check_complete_word(self.word, self.words)
# return True if result == 1 else False
def end_game(self, winner: 'int, 0 or 1'):
self.turn = -1
print('Game end, the winner is', end='')
if winner == 0: print(' you!!')
else: print(' AI!')
print('\nThank you for Playing the Ghost Game.')
def _clear_board(self):
self.word = ''
self.turn = randrange(0,2)
def _check_prefix(self, prefix):
return find_prefix(prefix, self.words)
| true |
6c9786b04ac3e3fd9613ae2d445b240bd607bec4 | Python | kamtim/executors_scrapping | /database/database_setup.py | UTF-8 | 994 | 2.515625 | 3 | [] | no_license | import sys
# для настройки баз данных
from sqlalchemy import Column, ForeignKey, Integer, String
# для определения таблицы и модели
from sqlalchemy.ext.declarative import declarative_base
# для создания отношений между таблицами
from sqlalchemy.orm import relationship
# для настроек
from sqlalchemy import create_engine
# создание экземпляра declarative_base
Base = declarative_base()
# здесь добавим классы
# создает экземпляр create_engine в конце файла
# engine = create_engine('sqlite:////Users/study_kam/services_scrapping/database/freelancers-collection.db')
class Freelancer(Base):
__tablename__ = 'freelancer'
id = Column(Integer, primary_key=True)
name_text = Column(String(250), nullable=False)
url = Column(String(250), nullable=False)
skills = Column(String(250))
image = Column(String(250)) | true |
06787318059faa7b0308a04c6f5dcd9e5049c9ed | Python | pjok1122/baekjoon-online-judge-practice | /greedy/Matrix(1080).py | UTF-8 | 1,619 | 3.890625 | 4 | [] | no_license | '''
[문제]
3x3 부분행렬의 값을 전부 0->1 ,1->0으로 뒤집을 수 있는 연산을 가지고,
A행렬 -> B행렬로 만드는 최소 연산 횟수를 구하여라.
[문제 풀기 전 생각할 것]
(0,0) (N-1,0), (0, M-1), (N-1,M-1)의 값을 결정할 수 있는 부분행렬은 딱 1개밖에 존재하지 않는다.
즉, (0,0)에서 3x3 매트릭스를 그려서, A[0][0] != B[0][0] 이라면 3x3 매트릭스에 해당하는 값을 전부 뒤집는다.
이제, (0,1)에 영향을 주는 매트릭스는 (0,1)을 꼭지점으로 하는 매트릭스 하나뿐이다. 마찬가지로 A[0][1] != B[0][1]을 확인한다.
위의 예시처럼 → 방향으로 순서대로 확인을 해나간다.
[알고리즘]
1. 3x3매트릭스의 특성을 고려하면 x의 범위는 [0,N-2] 이고 y의 범위는 [0, M-2]이다.
2. [i][j]를 하나씩 늘려가며, flip(x,y)연산을 호출한다.
3. 마지막에는 A행렬과 B행렬이 같은지를 확인하고 같다면 flip 호출 횟수를, 다르다면 -1을 반환한다.
'''
N, M =map(int,input().split())
A = [list(map(int,list(input()))) for _ in range(N)]
B = [list(map(int,list(input()))) for _ in range(N)]
def flip(x,y):
for i in range(x,x+3):
for j in range(y,y+3):
A[i][j] = 1 - A[i][j]
def checkEquality():
for i in range(N):
for j in range(M):
if A[i][j] !=B[i][j]:
return 0
return 1
cnt = 0
for i in range(0,N-2):
for j in range(0,M-2):
if A[i][j] !=B[i][j]:
flip(i,j)
cnt+=1
if checkEquality():
print(cnt)
else:
print(-1) | true |
2380d2a8484c51e2b1452428c20a1a66e423d24d | Python | KrzysiekPienkowski/PythonProjects | /second.py | UTF-8 | 66 | 3.09375 | 3 | [] | no_license | cars = ["bmw","audi","skoda"]
for car in cars:
print(car.title()) | true |
86f16b98ee5061c25432f5ac61b51170e3ee947d | Python | yujinnnnn/python_practice | /py.script/CH06/restful_insert.py | UTF-8 | 1,163 | 3.140625 | 3 | [] | no_license | import requests
import json
# 호출방식 : response = web_request(method_name='GET', url=url, dict_data=data)
def web_request(method_name, url, dict_data, is_urlencoded=True):
method_name = method_name.upper()
if method_name not in ('GET','POST'):
print("다시 함수 호출해주세요. Method가 다릅니다.")
return 0
if method_name == 'GET':
response = requests.get(url=url, params=dict_data)
if method_name == 'POST':
if is_urlencoded is True:
response = requests.post(url=url, data=dict_data, headers={'content_type' :'application/x-www-form-urlencoded'})
else:
response = requests.post(url=url, data=json.dumps(dict_data), headers={'content_type' :'application/json'})
dict_meta = {'status_code': response.status_code, 'ok':response.ok, 'encoding': response.encoding, 'content_type':response.headers['content_type']}
if 'json' in str(response.headers['content_type']): #JSON 형태의 경우
return {**dict_meta, **response.json()}
else: #문자열 형태의 경우
return {**dict_meta, **{'text':response.text}} | true |
5578a72171bb9e7fba7382881e869fd83a6c48ad | Python | colinveal/hidden_markov_modeling | /TUF_Analysis/TUF_functions2.py | UTF-8 | 9,370 | 2.9375 | 3 | [] | no_license | import statistics, math
def bam_strip(Chr, file, start, stop): # file must be SAM/BAM format #op = operation
errors = 0
adjusted = 0
bam_out = [] # Chr, file, op must be type str # Chr must be name of chrom in bam file
for pileupcolumn in file.pileup(Chr ,start, stop, truncate=True,
ignore_orphans=False):
temp = []
temp.append(pileupcolumn.reference_pos)
temp.append(pileupcolumn.n) # number of reads mapping to this column
try:
temp.append(pileupcolumn.get_query_sequences(add_indels=True)) # appending bases
bam_out.append(temp)
except:
try:
temp.append(pileupcolumn.get_query_sequences(add_indels=False)) # appending bases
temp.extend("*") # flag to show indels not assessed.
bam_out.append(temp)
#print("adjusted base ", "\n", temp)
if len(temp) == 4:
adjusted += 1
except:
errors += 1
#(print(pileupcolumn.reference_pos))
print("errors = ", errors, "adjusted = ", adjusted)
return bam_out
def indels(Chr, samfile, start, stop): # find all indels and put in dictionary
indels = {}
for i, pileupcolumn in enumerate(samfile.pileup(Chr ,start, stop,
truncate=True, ignore_orphans=False)):
indel_count = 0
for pileupread in pileupcolumn.pileups:
if (pileupread.indel != 0): #start counting indels whenfirst indel is encountered
indel_count +=1
if (indel_count == pileupcolumn.n) and pileupcolumn.n > 1: # homozygous indels.
indel_1 = {str(pileupcolumn.pos):
(pileupcolumn.get_query_sequences(add_indels=True))}
indels.update(indel_1)
elif indel_count >= 0.5* pileupcolumn.n and pileupcolumn.n > 1: # heterozygous indels.
indel_2 = {str(pileupcolumn.pos):
(pileupcolumn.get_query_sequences(add_indels=True))}
indels.update(indel_2)
elif (indel_count > 0): # spontaneous indels.
indel_3 = {str(pileupcolumn.pos):
(pileupcolumn.get_query_sequences(add_indels=True))}
indels.update(indel_3)
return indels
def is_mapped(start, stop, fasta): # need to make sure all skpped windows are put into cur_wins.
n_count = 0
#loop_control = "on"
for x in fasta[int(start):int(stop)]: # string.count() may be faster.
if x == "N":
n_count +=1
if n_count >= 0.5*(int(start)-int(stop)): # arbritrarily chosen value, can be altered.
#loop_control = "off"
#print(f"referene map for gap {start}:{stop} = False")
return False #print(f"referene map for gap {start}:{stop} = False")
#print(f"referene map for gap {start}:{stop} = True")
return True
#use the fasta sequence to alter the GC counts: if gap or no reads/bases then use the bases in the fasta file for the skipped positions to calculate the gc content of window.
def gc_count(data): #gc_counts takes a list as the itterable/data
gc_count = 0
at_count = 0
n_count = 0
for win_element in data: # CALCULATING GC CONTENT FOR NORMAL WINDOWS
# print(x, x[2])
if win_element[2] == "C" or win_element[2] == "c" or win_element[2] == "G" or win_element[2] == "g":
gc_count += 1
# print("gc_count =", gc_count)
elif win_element[2] != "N" or win_element[2] != "n":
at_count += 1
# winsize = (winsize - 1) # new method which excludes any positions marked N from the calculation, allowing the GC average (here) and sum RD for a window (sum_rd function) to be adjusted.
if gc_count == 0:
return 0
elif at_count == 0:
return 1
else:
return (gc_count/(gc_count+at_count))
def common_bases(data, fasta):
from collections import Counter
for i, x in enumerate(data):
try:
if x[1] == 1 and len(x) < 3:
x.append((fasta[x[0] + 1])) # appending the corresponding base for this position from hg38 ref fasta
a = 1
print("filling the list")
elif x[2] == '': # change to searching fasta and appending corresponding base in fasta file instead.
del(x[2])
x.insert(2, (fasta[x[0] + 1])) # plus one because of the 0 indexing of fasta list compared to the 1 indexing of the bam positions.
#print("adding fasta bases")
a = 2
else:
common_count = Counter(x[2]).most_common(1) # provides the element which reaches the highest occurance first.
del(x[2:])
if len(common_count[0][0]) > 1: # when the most common mapped base to the position is an indel then all elements of the string are appended to the list (see screenshot on windows staff account).
indel = common_count[0][0]
x.extend(indel[0])
else:
x.extend(common_count[0][0]) # extend adds the new elements into the list, not into the list as a separate list.
a = 3 # indexing list to get tuple, indexing tuple to get only first element (base).
except Exception as e:
print(e, x)
print(a)
return
def sum_rd(data, expected_size, net_indel): # we only adjust for indels, not for reference mapped gaps.
sums = []
winsize = 0
for x in data:
if x[2] != "N" or x[2] != "n": #if the base for the position = "N"
sums.append(x[1]) # new method which excludes any positions marked N from the calculation, allowing the GC average (here) and sum RD for a window (sum_rd function) to be adjusted.
winsize += 1 # we do not adjust winsize for any skipped positions that mapped to a base in the refenece fasta as these may be bases that are supressed by tuf so scaling up the rd of the window would make them seem regular.
# similarly its important to scale up windows with Ns as these are unmapped positions, that will lower the rd of windows and make finding TUF too dificult.
if net_indel == None:
return sum(sums)
else:
if net_indel != None: # we use winsize to adjust for the indels but not compensating for the gap_size
adjusted_rd = (round(sum(sums)/(winsize + net_indel))*expected_size) # always divide by winsize as we do not want to compensate for reference mapped gaps, these could represent tuf regions/cores.
return adjusted_rd
def get_winsize(data):
adj_winsize = 0
for x in data:
if x[2] != "N" or x[2] != "n": #if the base for the position = "N"
adj_winsize += 1
#print(x[2])
else:
print("N")
return
return adj_winsize
def find_r(data, r_len):
nbtemp = []
r_values = []
r_num = 1
for i, x in enumerate(data):
nbtemp.append(x[1])
if i == (r_len * r_num): # using i, not x[0] because there are too many gaps, e.g. pos 1Mb = i 900kb
r_values.append((statistics.mean(nbtemp)**2)/(statistics.stdev(nbtemp)**2 - statistics.mean(nbtemp))) # cannot use numpy, error with package taking too long to fix.
nbtemp = []
r_num += 1
return r_values
def nb_transformation(x, r, decimals): # x = window
return round(float(2*(r**float(0.5))*math.log(((float(x)+0.25)/((100*r)-0.5))**0.5+(1+((float(x)+0.25)/((100*float(r))-0.5)))**0.5)), decimals)
def poisson_transformation(x, decimals):
return round(float(2*(((float(x)+0.25)/100)**0.5)), decimals)
def cat_json(output_filename, input_filenames):
def mangle(s): # String, e.g. infile name.
return s.strip()[1:-1]
with open(output_filename, "w") as outfile:
first = True
for infile_name in input_filenames:
with open(infile_name) as infile:
if first:
outfile.write('{')
first = False
else:
outfile.write(',')
#print(infile[1:-1])
outfile.write(mangle(infile.read()))
outfile.write('}')
def cat_win(data, r, gc=None, sum=None): #concatenate data from bam_list/cur_win into windows, gc and sum = optional parameters.
# data = cur_win. cw_start/end = cur_win start/end. If gaps contained to one win, cw_start/end need no counter.
# when cur_win (data) is input to function, the range of the window is specified is specified, but the fucntion will work without specifying a range.
window = []
window.insert(0, data[-1][0]) # the end position of the window.
if sum != None:
window.insert(1, (sum))
window.insert(2, poisson_transformation(window[1], decimals=2))
window.insert(3, nb_transformation(window[1], r, decimals=2))
window.insert(4, get_winsize(data)) # using the winsize function that removes any Ns from winsize.
if gc != None:
window.insert(5, round(gc, 2))
return window | true |
ccbd93dfafe0a72eb6ab677d44474d777890aad8 | Python | onedx1943/Remark | /Other/toGif.v2.0.py | UTF-8 | 1,931 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | import ffmpy
import os
import json
import math
import subprocess
import time
current_dir = os.path.dirname(__file__)
input_paths = os.path.join(current_dir, 'mp4')
output_dir = os.path.join(current_dir, 'gif')
gif_list = []
mp4_files = os.listdir(input_paths)
gif_files = os.listdir(output_dir)
for file in gif_files:
gif_name = str(os.path.splitext(file)[0]).lower()
gif_list.append(gif_name)
for file in mp4_files:
file_ext = str(os.path.splitext(file)[-1]).lower()
if file_ext != '.mp4':
continue
name_text = str(os.path.splitext(file)[0]).lower()
if name_text in gif_list:
continue
print('开始转换MP4:%s' % file)
file_name = os.path.join(input_paths, file)
gif_name = name_text + '.gif'
gif_path = os.path.join(output_dir, gif_name)
print(file_name, gif_path)
ff = ffmpy.FFmpeg(
inputs={file_name: None},
outputs={gif_path: None}
)
ff.run()
time.sleep(1)
gif_size = os.path.getsize(gif_path)
if gif_size > 10 * 1024 * 1024:
strCmd = 'ffprobe -v quiet -print_format json -show_format -show_streams -i %s' % file_name
mystring = os.popen(strCmd).read()
time.sleep(1)
streams = json.loads(mystring)
width = float(streams['streams'][0]['width'])
height = float(streams['streams'][0]['height'])
cmd = 'ffmpeg -i %s -r 15 %s -y' % (file_name, gif_path)
subprocess.call(cmd)
time.sleep(1)
for i in range(9):
gif_size = os.path.getsize(gif_path)
if gif_size > 10 * 1024 * 1024:
rate = math.sqrt(gif_size / ((9 - i) * 1024 * 1024))
new_width = math.floor(width / rate)
new_height = math.floor(height / rate)
cmd = 'ffmpeg -i %s -r 15 -s %sx%s %s -y' % (file_name, new_width, new_height, gif_path)
subprocess.call(cmd)
time.sleep(1)
| true |
0ebd19ea70f5ccb0b57a244af1a8c0b31e1358fa | Python | ypochien/black_csv | /FTPDTA015.py | UTF-8 | 789 | 2.796875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import csv
import HTML
HTML_HEADER = '''
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head>
<style>
table {
border-collapse: collapse;
align: center;
}
table, td, th {
border: 1px solid black;
font-size: 14px;
}
</style>\r'''
with open('data/FTPDTA015.csv' , mode='r' , encoding='cp950') as csv_file:
reader = csv.reader(csv_file)
data015 = list(reader)
t = HTML.Table(data015[2:])
with open('out/FTPDTA015.html' , mode='w' , encoding='utf-8') as html_file:
html_file.write(HTML_HEADER)
html_file.write(data015[0][0] + '<BR>\r\n')
html_file.write(data015[1][0] + '<BR>\r\n')
html_file.write(str(t))
| true |
fcae60d3f26f0b4b867b88801810704a25c4c700 | Python | DeepakKumar-Mahadevan/repo01 | /Programs-01/extractBankStatementExcel02.py | UTF-8 | 5,349 | 3.015625 | 3 | [] | no_license | import xlrd
from xlrd.sheet import ctype_text
import datetime
def getCellDetails(row_num,col_num):
cell = first_sheet.cell(row_num,col_num)
cell_type = ctype_text.get(cell.ctype,'Unknown Type')
cell_value = cell.value
return (cell,cell_type,cell_value)
def convertIsoDate(date_str):
try:
iso_date = datetime.datetime.strptime(date_str, "%d/%m/%Y").strftime("%Y-%m-%d")
except ValueError:
try:
iso_date = datetime.datetime.strptime(date_str, "%d/%m/%y").strftime("%Y-%m-%d")
except ValueError:
try:
iso_date = datetime.datetime.strptime(date_str, "%d-%m-%Y").strftime("%Y-%m-%d")
except ValueError:
try:
iso_date = datetime.datetime.strptime(date_str, "%d-%m-%y").strftime("%Y-%m-%d")
except ValueError:
iso_date = 'Invalid Date format'
return(iso_date)
print(100*'=')
path = "C:/Users/MRSD/Downloads/"
file_name = input("Enter your input CSV file name:\n")
full_file_name = path + file_name
bank = input("Enter bank name:\n")
xl = xlrd.open_workbook(full_file_name)
sheet_names = xl.sheet_names()
print("Sheet Names", sheet_names)
first_sheet = xl.sheet_by_index(0)
print("First Sheet: " + first_sheet.name)
num_rows = first_sheet.nrows
num_cols = first_sheet.ncols
print("No of Rows : " + str(num_rows))
print("No of Columns : " + str(num_cols))
header_found = 'N'
firt_header_line_found = 'N'
txn_count = 0
# Read through all Rows/Columns/Cells
print(70*'-')
for row_num in range(0,num_rows):
# Get First Cell details for each row
current_row = first_sheet.row(row_num)
first_cell = first_sheet.cell(row_num,0)
first_cell_type = ctype_text.get(first_cell.ctype,'Unknown Type')
first_cell_value = first_cell.value
if bank == '1': #Axis bank
# Once header is found extract the txn records
if header_found == 'Y':
if first_cell_type != 'text' and \
first_cell_value != '\t':
#print('Row number ' + str(row_num) + ' : ' + str(current_row))
txn_count = txn_count + 1
txn_date = ''
txn_no = ''
txn_desc = ''
debit = ''
credit = ''
balance = ''
(cell,cell_type,cell_value) = getCellDetails(row_num,1)
txn_date = convertIsoDate(cell_value)
(cell,cell_type,cell_value) = getCellDetails(row_num,2)
txn_no = cell_value.strip()
(cell,cell_type,cell_value) = getCellDetails(row_num,3)
txn_desc = cell_value.strip()
(cell,cell_type,cell_value) = getCellDetails(row_num,4)
debit = cell_value.strip()
(cell,cell_type,cell_value) = getCellDetails(row_num,5)
credit = cell_value.strip()
(cell,cell_type,cell_value) = getCellDetails(row_num,6)
balance = cell_value.strip()
#print('(txn_date,txn_no,txn_desc,debit,credit,balance) = (%s,%s,%s,%s,%s,%s)' % (txn_date,txn_no,txn_desc,debit,credit,balance))
print('%s|%s|%s|%s|%s|%s' % (txn_date,txn_no,txn_desc,debit,credit,balance))
if first_cell_type == 'text' and \
first_cell_value == '\t':
header_found = 'E'
# Find header record and set flag
if first_cell_value == 'SRL NO':
header_found = 'Y'
print('txn_date|txn_no|txn_desc|debit|credit|balance')
else: #HDFC bank
# Once header is found extract the txn records
if header_found == 'Y':
if first_cell_type != 'empty':
#print('Row number ' + str(row_num) + ' : ' + str(current_row))
txn_count = txn_count + 1
txn_date = ''
txn_no = ''
txn_desc = ''
debit = ''
credit = ''
balance = ''
(cell,cell_type,cell_value) = getCellDetails(row_num,0)
txn_date = convertIsoDate(cell_value)
(cell,cell_type,cell_value) = getCellDetails(row_num,2)
txn_no = cell_value.strip()
(cell,cell_type,cell_value) = getCellDetails(row_num,1)
txn_desc = cell_value.strip()
(cell,cell_type,cell_value) = getCellDetails(row_num,4)
debit = cell_value
(cell,cell_type,cell_value) = getCellDetails(row_num,5)
credit = cell_value
(cell,cell_type,cell_value) = getCellDetails(row_num,6)
balance = cell_value
#print('(txn_date,txn_no,txn_desc,debit,credit,balance) = (%s,%s,%s,%s,%s,%s)' % (txn_date,txn_no,txn_desc,debit,credit,balance))
print('%s|%s|%s|%s|%s|%s' % (txn_date,txn_no,txn_desc,debit,credit,balance))
if first_cell_type == 'empty':
header_found = 'E'
firt_header_line_found = 'E'
# Find header record and set flag
if firt_header_line_found == 'Y':
if first_cell_value == '********':
header_found = 'Y'
print('txn_date|txn_no|txn_desc|debit|credit|balance')
if first_cell_value == 'Date':
firt_header_line_found = 'Y'
print(70*'-')
print('No of txn records: ' + str(txn_count))
print(100*'=') | true |
3b440192080f232ad67b475775d143bfe04e555a | Python | awoerp/Seljan-Scheduler | /GUI Development/Server/ServerFunctions.py | UTF-8 | 2,893 | 2.640625 | 3 | [] | no_license | from cPickle import dumps, loads
from Command_Codes import codes
from time import sleep
class ServerFunctions():
def __init__(self, log, users, parameters, currentWorkOrders):
self.handler = None
self.log = log
self.users = users
self.parameters = parameters
self.currentWorkOrders = currentWorkOrders
self.delayRatio = 0.1/1000 #delay 0.1 seconds per 5000 characters
def SendUserNameList(self):
usernameList = []
for user in self.users.users:
usernameList.append(user.name)
self.Send(usernameList)
self.log.WriteToLog("UserNameList Requested")
def Login(self, userName, password):
for user in self.users.users:
if user.name == userName:
targetedUser = user
self.log.WriteToLog("LoginRequest for %s" % (targetedUser.name))
self.log.WriteToLog("Given Password: %s" % (password))
if targetedUser.password == password:
self.Send(targetedUser)
self.log.WriteToLog("Login was successful")
else:
self.Send("") # TODO: This should send back a negative response not
self.log.WriteToLog("Password was incorrect")
def CreateWorkOrder(self, serializedNewWorkOrder):
self.log.WriteToLog("Work Order Creation Attempt")
try:
newWorkOrder = loads(serializedNewWorkOrder)
newWorkOrder.jobNumber = self.parameters.parameters["jobNumber"]
self.currentWorkOrders.AddWorkOrder(newWorkOrder)
self.parameters.parameters["jobNumber"] += 1 # Increase "jobNumber" by 1
self.Send(codes["True"])
self.parameters.UpdateParameters() # Saves the new job number in ROM
self.log.WriteToLog("Successfully created work order: %s" % str(newWorkOrder.jobNumber))
except:
self.log.WriteToLog("Error: Could not create work order")
self.Send(codes["False"])
def SendCurrentWorkOrdersMessageSize(self, size):
self.Send(size)
self.log.WriteToLog("Current work orders message size request: %s Bytes" % str(size))
def SendCurrentWorkOrders(self, workOrdersArray):
self.Send(workOrdersArray)
self.log.WriteToLog("Current work orders request")
def SetHandler(self, handler):
self.handler = handler
def Send(self, message):
serializedMessage = dumps(message)
loads(serializedMessage)
messageLength = len(serializedMessage)
messageSize = format(messageLength, '04x')
print("messageSize = " + messageSize)
self.handler.SendMessage("0x" + messageSize + serializedMessage)
print("Message Length = " + str(messageLength))
print("Delay Time = " + str(self.delayRatio * messageLength))
sleep(self.delayRatio * messageLength)
| true |
16d51c0f1e1ee2b1ef0f81fed2d3db32ce57ecea | Python | ido90/Elevators | /Passenger.py | UTF-8 | 1,056 | 2.71875 | 3 | [] | no_license |
import numpy as np
from MyTools import *
class Arrival:
def __init__(self, t, n, d, xi, xf):
assert(t>=0 and n>=1 and d>=0 and xi>=0 and xf>=0 and xi!=xf)
assert_integers(n,xi,xf)
self.t = t # arrival time
self.n = n # number of passengers
self.d = d # passengers delay in entrance
self.xi = xi # initial floor
self.xf = xf # destination floor
def print(self, i=None):
if i is None:
print(f"{self.t:.1f}:\t{self.n:d}\t({self.d:.1f})\t{self.xi:d} -> {self.xf:d}")
else:
print(f"({i:03d})\t{self.t:.1f}:\t{self.n:d}\t({self.d:.1f})\t{self.xi:d} -> {self.xf:d}")
class Passenger:
def __init__(self, a):
self.t0 = a.t
self.d = a.d
self.xi = a.xi
self.xf = a.xf
# initialization
self.assigned_el = -1
self.t1 = np.inf # pick-up time
self.t2 = np.inf # destination time
self.indirect_motion = 0 # number of intervals of motion against destination
| true |
682378a90da8bb832070f2b2dfab393d2cd8b897 | Python | julienchurch/flask_austin | /controllers/auth.py | UTF-8 | 759 | 2.578125 | 3 | [] | no_license | from imports import *
from flask.views import MethodView
from models.user import User, query_user
def load_user(email):
return query_user(email)
class Login(MethodView):
def get(self):
title = 'Log in.'
return render_template('auth/login.html',title=title)
def post(self):
form = request.form
email = form.get('email', '')
password = form.get('password', '')
user = query_user(email)
if user:
if user.verify_password(password):
login_user(user)
return redirect(url_for('home'))
else:
return 'Bad password'
return 'That user doesn\'t exist.'
class Logout(MethodView):
def get(self):
logout_user()
return redirect(url_for('home'))
def post(self):
pass
| true |
eb50960cf814ba8b93848a0663df966b123f75a3 | Python | dqii/thesis | /warmup/learning/algs.py | UTF-8 | 1,306 | 3.109375 | 3 | [] | no_license | import numpy as np
# HINGE / MAX MARGIN (treating margin as a confidence)
def get_hinge_prediction(w, x):
score = get_hinge_score(w, x)
return np.argmax(score)
def get_hinge_score(w, x):
return np.dot(w, x)
def get_hinge_loss(y, score):
y = y*2 - 1
return np.sum(np.maximum(0, 1 - np.multiply(y,score)))
def get_hinge_gradient(x, y, score):
y = y*2 - 1
gradient = -np.outer(y, x)
gradient[np.multiply(y,score) > 1] = 0
return gradient
hinge = {'predict': get_hinge_prediction, 'score': get_hinge_score,
'loss': get_hinge_loss, 'gradient': get_hinge_gradient}
# LOGISTIC
def get_logistic_prediction(w, x):
score = get_logistic_score(w, x)
return np.argmax(score)
def get_logistic_score(w, x):
exp_terms = np.exp(np.dot(w, x))
return exp_terms / sum(exp_terms)
def get_log_loss(y, score):
return -np.dot(y, np.log(score))
def get_log_loss_gradient(x, y, score):
return -np.outer(y - score, x)
logistic = {'predict': get_logistic_prediction, 'score': get_logistic_score,
'loss': get_log_loss, 'gradient': get_log_loss_gradient}
# SQUARE
def get_square_prediction(w, x):
return
def get_square_score(w, x):
return
def get_square_loss(y, p):
return
def get_square_gradient(x, y, p):
return
# BINARY
| true |
8047fc5f2bd75861c7b51e49ab0b742f5e457bca | Python | pat-nel87/glucoTools | /archived/CLItools/cliGraph.py | UTF-8 | 4,063 | 2.90625 | 3 | [] | no_license | # graphClass modified
# for CLItools
#
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.dates import DayLocator, HourLocator, DateFormatter
from datetime import date, time, datetime
class graphData:
def graphQuery(self,filterList):
print("\n")
graphFile = self.fileGraph
graphFile = graphFile + ".png"
myDates = []
mySugars = []
for i in range(len(filterList)):
myDates.append(filterList[i][0])
mySugars.append(float(filterList[i][1]))
x = matplotlib.dates.date2num(myDates)
y = mySugars
fig = matplotlib.pyplot.figure()
matplotlib.pyplot.plot_date(x, y, 'o-', label="mg/dl")
fig.autofmt_xdate()
fig.savefig(graphFile)
plt.show()
def timeFilter(self, filterList):
# will filter by datetime
# determines choice by length of array passed in
# instantiation of the class
queryList = []
choice = int(len(self.dateFilter))
myYear = self.dateFilter[0]
if choice == 1:
for i in range(len(filterList)):
if filterList[i][0].year == int(myYear):
queryList.append(filterList[i])
elif choice == 2:
myMonth = self.dateFilter[1]
for i in range(len(filterList)):
if filterList[i][0].year == int(myYear):
if filterList[i][0].month == int(myMonth):
queryList.append(filterList[i])
elif choice == 3:
myMonth = self.dateFilter[1]
myDay = self.dateFilter[2]
for i in range(len(filterList)):
if filterList[i][0].year == int(myYear):
if filterList[i][0].month == int(myMonth):
if filterList[i][0].day == int(myDay):
queryList.append(filterList[i])
return self.graphQuery(queryList)
def filterList(self):
# creates new list of lists with 2 indices
# myList[n][0] datetime object
# myList[n][1] blood glucose reading
myList = self.allReadings
tempList = []
for i in range(len(myList)):
tempDate = date.fromisoformat(str(myList[i][0]))
tempTime = time.fromisoformat(str(myList[i][1]))
tempDateTime = datetime.combine(tempDate, tempTime)
tempList.append([tempDateTime, myList[i][2]])
return self.timeFilter(tempList)
def fileClean(self):
file = self.fileIn
edit = open(file, "r")
edit.seek(0,0)
for line in edit:
lin = edit.readline()
try:
reading = [lin[1]]
for i in range(2,11):
reading[0] = reading[0] + lin[i]
self.dates.append(reading[0])
reading.append(lin[12])
for i in range(13,20):
reading[1] = reading[1] + lin[i]
self.times.append(reading[1])
reading.append(lin[23])
for i in range(24,28):
reading[2] = reading[2] + lin[i]
self.bloodSugar.append(reading[2])
self.allReadings.append(reading)
except IndexError:
print("Processing Completed")
#break
return self.filterList()
def __init__(self, fileIn, fileGraph, dateFilter):
# fileIn = name of file with readings
# fileGraph = select name for graph image file to be generated
# dateFilter, filtering parameters Year, month ,day in list.
self.allReadings = []
self.dates = []
self.times = []
self.bloodSugar = []
self.fileGraph = fileGraph
self.fileIn = fileIn
self.dateFilter = dateFilter
self.fileClean()
#TESTS
# newGraph = graphData(fileIn = "patreading.txt", fileGraph="clitest1", dateFilter=[2021,3,25])
| true |
569147e52257107a5b2938c6922939b553204910 | Python | wangm12/LeetCodeExercise | /prev/SubarraySumEqualsK560.py | UTF-8 | 1,163 | 3.53125 | 4 | [] | no_license | import collections
# Subarray Sum Equals K
# We cannot use sliding window in this question is because there
# might exist negative number
def subarraySum(nums, k):
# edge case
if not nums:
return 0
dic = collections.defaultdict(int)
dic[0] = 1
output = 0
currentSum = 0
for n in nums:
currentSum += n
if currentSum - k in dic:
output += dic[currentSum-k]
dic[currentSum] += 1
return output
def OnlyPositive(nums, k):
if not nums:
return 0
left, right = -1, -1
totalSum = 0
output = 0
while right < len(nums):
if totalSum == k:
output += 1
right += 1
if right < len(nums):
totalSum += nums[right]
else:
break
elif totalSum < k:
right += 1
if right < len(nums):
totalSum += nums[right]
else:
break
else:
while right > left and totalSum > k:
left += 1
totalSum -= nums[left]
return output
test = [1,2,3,1,2]
print OnlyPositive(test, 3)
| true |
22718091b9722ee35423ce6937ae481bc81a8fcc | Python | yagays/tdd-by-example-py | /chp11/test_money.py | UTF-8 | 461 | 3.46875 | 3 | [] | no_license | import pytest
from money import Money
def test_multiplication():
five = Money.dollar(5)
assert Money.dollar(10) == five.times(2)
assert Money.dollar(15) == five.times(3)
def test_equality():
assert Money.dollar(5) == Money.dollar(5)
assert Money.dollar(5) != Money.dollar(6)
assert Money.franc(5) != Money.dollar(5)
def test_currency():
assert "USD" == Money.dollar(1).currency()
assert "CHF" == Money.franc(1).currency()
| true |
fe1f8bdc2f2acb486fdd95b4f5afb836d1a6646f | Python | Azure/azure-sdk-for-python | /sdk/identity/azure-identity/azure/identity/aio/_credentials/client_assertion.py | UTF-8 | 2,807 | 2.609375 | 3 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from typing import Any, Callable, Optional
from azure.core.credentials import AccessToken
from .._internal import AadClient, AsyncContextManager
from .._internal.get_token_mixin import GetTokenMixin
class ClientAssertionCredential(AsyncContextManager, GetTokenMixin):
"""Authenticates a service principal with a JWT assertion.
This credential is for advanced scenarios. :class:`~azure.identity.CertificateCredential` has a more
convenient API for the most common assertion scenario, authenticating a service principal with a certificate.
:param str tenant_id: ID of the principal's tenant. Also called its "directory" ID.
:param str client_id: The principal's client ID
:param func: A callable that returns a string assertion. The credential will call this every time it
acquires a new token.
:paramtype func: Callable[[], str]
:keyword str authority: Authority of an Azure Active Directory endpoint, for example
"login.microsoftonline.com", the authority for Azure Public Cloud (which is the default).
:class:`~azure.identity.AzureAuthorityHosts` defines authorities for other clouds.
:keyword List[str] additionally_allowed_tenants: Specifies tenants in addition to the specified "tenant_id"
for which the credential may acquire tokens. Add the wildcard value "*" to allow the credential to
acquire tokens for any tenant the application can access.
.. admonition:: Example:
.. literalinclude:: ../samples/credential_creation_code_snippets.py
:start-after: [START create_client_assertion_credential_async]
:end-before: [END create_client_assertion_credential_async]
:language: python
:dedent: 4
:caption: Create a ClientAssertionCredential.
"""
def __init__(self, tenant_id: str, client_id: str, func: Callable[[], str], **kwargs: Any) -> None:
self._func = func
self._client = AadClient(tenant_id, client_id, **kwargs)
super().__init__(**kwargs)
async def __aenter__(self):
await self._client.__aenter__()
return self
async def close(self) -> None:
"""Close the credential's transport session."""
await self._client.close()
async def _acquire_token_silently(self, *scopes: str, **kwargs: Any) -> Optional[AccessToken]:
return self._client.get_cached_access_token(scopes, **kwargs)
async def _request_token(self, *scopes: str, **kwargs: Any) -> AccessToken:
assertion = self._func()
token = await self._client.obtain_token_by_jwt_assertion(scopes, assertion, **kwargs)
return token
| true |
0f9204859ca9aa5ef557e04cc04c059b6b384807 | Python | pipcet/simulavr | /examples/python/example.py | UTF-8 | 3,639 | 2.625 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
# Python test script as demonstration of using pysimulavr in unit tests
from unittest import TestSuite, TextTestRunner, TestCase, defaultTestLoader
from sys import argv
import pysimulavr
from ex_utils import SimulavrAdapter
class TestBaseClass(TestCase, SimulavrAdapter):
def setUp(self):
proc, elffile = argv[1].split(":")
self.device = self.loadDevice(proc, elffile)
def tearDown(self):
del self.device
def test_01(self):
"just run 3000 ns + 250 ns"
n = 3000
self.doRun(n)
self.assertEqual(self.getCurrentTime(), n)
self.doStep()
self.assertEqual(self.getCurrentTime(), n + self.device.GetClockFreq())
def test_02(self):
"just run 2 steps"
self.doStep()
self.assertEqual(self.getCurrentTime(), 0)
self.doStep()
self.assertEqual(self.getCurrentTime(), self.device.GetClockFreq())
def test_03(self):
"check PC and PC size"
self.assertEqual(self.device.PC_size, 2)
self.doStep()
self.doStep()
self.assertEqual(self.device.PC, 0x8c / 2)
def test_04(self):
"check address of data symbols"
# they begin normally at address 0x100
self.assertEqual(self.device.data.GetAddressAtSymbol("timer2_ticks"), 0x100)
def addr2word(self, addr):
d1 = self.device.getRWMem(addr + 1)
d2 = self.device.getRWMem(addr)
return d2 + (d1 << 8)
def test_05(self):
"access to data by symbol"
addr = self.device.data.GetAddressAtSymbol("timer2_ticks")
o = 10000 # duration of interrupt function, about 10us
d = 2000000 # timer period 2ms
self.doRun(o * 2) # skip initialisation
self.assertEqual(self.addr2word(addr), 0)
self.doRun(d + o)
self.assertEqual(self.addr2word(addr), 1)
self.doRun((d * 3) + o)
self.assertEqual(self.addr2word(addr), 3)
def test_06(self):
"write access to data by symbol"
addr = self.device.data.GetAddressAtSymbol("timer2_ticks")
o = 10000 # duration of interrupt function, about 10us
d = 2000000 # timer period 2ms
self.doRun(o * 2) # skip initialisation
self.assertEqual(self.addr2word(addr), 0)
self.device.setRWMem(addr, 2)
self.doRun(d)
self.assertEqual(self.addr2word(addr), 2)
self.doRun(d + o)
self.assertEqual(self.addr2word(addr), 3)
def test_07(self):
"test toggle output pin"
o = 10000 # duration of interrupt function, about 10us
d = 2000000 # timer period 2ms
self.assertEqual(self.device.GetPin("A0").toChar(), "t")
self.doRun(o * 2) # skip initialisation
# now output should be set to LOW
self.assertEqual(self.device.GetPin("A0").toChar(), "L")
self.doRun(d + o * 2) # reaction to timer interrupt about 20us after!
self.assertEqual(self.device.GetPin("A0").toChar(), "H")
self.doRun(d * 2 + o * 2)
self.assertEqual(self.device.GetPin("A0").toChar(), "L")
def test_08(self):
"work with breakpoints"
bpaddr = self.device.Flash.GetAddressAtSymbol("main")
self.device.BP.AddBreakpoint(bpaddr)
# run to breakpoint
self.doRun(10000)
self.doStep(4) # call to main
self.assertEqual(self.device.PC, bpaddr)
self.doStep(4) # 4 steps more, do nothing because of breakpoint
self.assertEqual(self.device.PC, bpaddr)
self.device.BP.RemoveBreakpoint(bpaddr)
self.doStep(2) # push needs 2 steps
self.assertEqual(self.device.PC, bpaddr + 1)
if __name__ == "__main__":
allTestsFrom = defaultTestLoader.loadTestsFromTestCase
suite = TestSuite()
suite.addTests(allTestsFrom(TestBaseClass))
TextTestRunner(verbosity = 2).run(suite)
# EOF
| true |
3c6e7bf33df6ac702ebf0108431ab52bf4204d3a | Python | teolopera/PYTHON---Primeros-Pasos | /07-Funciones/main.py | UTF-8 | 444 | 4.5 | 4 | [] | no_license | # Funcion con retorno
def suma(num1, num2):
return num1 * num2
# El metodo pass sigue con la ejecucion del programa
def funcion():
pass
resultado = suma(3, 10)
print(resultado)
# Parametros opcionales
def getEmpleado( nombre, dni = None ):
print(f'El nombre es { nombre }')
print(f'El dni es { dni }')
# Funciones lambda - Year -> Parametro : Retorno
getYear = lambda year: f'El año es { year }'
print(getYear(2020))
| true |
4603f55c49185c9268c63845ff0543890966767c | Python | avinsit123/keyphrase-gan | /utils/string_helper.py | UTF-8 | 5,488 | 2.734375 | 3 | [
"MIT"
] | permissive | from nltk.stem.porter import *
stemmer = PorterStemmer()
import pykp
def prediction_to_sentence(prediction, idx2word, vocab_size, oov, eos_idx, unk_idx=None, replace_unk=False, src_word_list=None, attn_dist=None):
"""
:param prediction: a list of 0 dim tensor
:param attn_dist: tensor with size [trg_len, src_len]
:return: a list of words, does not include the final EOS
"""
sentence = []
for i, pred in enumerate(prediction):
_pred = int(pred.item()) # convert zero dim tensor to int
if i == len(prediction) - 1 and _pred == eos_idx: # ignore the final EOS token
break
replace_unk = False
if _pred < vocab_size:
if _pred == unk_idx and replace_unk:
assert src_word_list is not None and attn_dist is not None, "If you need to replace unk, you must supply src_word_list and attn_dist"
#_, max_attn_idx = attn_dist[i].max(0)
_, max_attn_idx = attn_dist[i].topk(2, dim=0)
if max_attn_idx[0] < len(src_word_list):
word = src_word_list[int(max_attn_idx[0].item())]
else:
word = src_word_list[int(max_attn_idx[1].item())]
#word = pykp.io.EOS_WORD
else:
#word = idx2word[_pred]
#print("The ",_pred," is")
#word = idx2word[_pred]
word = _pred
else:
#word = oov[pred - vocab_size]
#word = oov[_pred - vocab_size]
word = _pred - vocab_size
sentence.append(word)
return sentence
def convert_list_to_kphs(keyphrases):
total = []
one_list = []
for i,keyphrase in enumerate(keyphrases):
# one_keyphrase = []
# mapper = map(keyphrase,str)
# mapper = ' '.join(mapper)
# print(mapper)
one_keyphrase = []
for i,word_keyphrase in enumerate(keyphrase):
#print(word_keyphrase)
if int(word_keyphrase.item()) == 2:
if one_keyphrase != []:
one_list.append(one_keyphrase)
break
elif int(word_keyphrase.item()) != 4 and int(word_keyphrase.item()) != 5:
# print("fdedh")
one_keyphrase.append(int(word_keyphrase.item()))
else:
if one_keyphrase != []:
one_list.append(one_keyphrase)
one_keyphrase = []
total.append(one_list)
one_list = []
#print(total[0])
return total
def stem_str_2d_list(str_2dlist):
# stem every word in a list of word list
# str_list is a list of word list
stemmed_str_2dlist = []
for str_list in str_2dlist:
stemmed_str_list = [stem_word_list(word_list) for word_list in str_list]
stemmed_str_2dlist.append(stemmed_str_list)
return stemmed_str_2dlist
def stem_str_list(str_list):
# stem every word in a list of word list
# str_list is a list of word list
stemmed_str_list = []
for word_list in str_list:
stemmed_word_list = stem_word_list(word_list)
stemmed_str_list.append(stemmed_word_list)
return stemmed_str_list
def stem_word_list(word_list):
return [stemmer.stem(w.strip().lower()) for w in word_list]
"""
def split_concated_keyphrases(word_list, delimiter_word):
tmp_pred_str_list = []
tmp_word_list = []
for word in word_list:
if word != delimiter_word:
tmp_word_list.append(word)
else:
if len(tmp_word_list) > 0:
tmp_pred_str_list.append(tmp_word_list)
tmp_word_list = []
if len(tmp_word_list) > 0: # append the final keyphrase to the pred_str_list
tmp_pred_str_list.append(tmp_word_list)
return tmp_pred_str_list
"""
def split_word_list_by_delimiter(word_list, keyphrase_delimiter, include_present_absent_delimiter=False, present_absent_delimiter=None):
"""
Convert a word list into a list of keyphrase, each keyphrase is a word list.
:param word_list: word list of concated keyprhases, separated by a delimiter
:param keyphrase_delimiter
:param include_present_absent_delimiter: if it is true, the final list of keyphrase will include the present_absent_delimiter as one of the keyphrase, e.g., [['kp11', 'kp12'], ['<peos>'], ['kp21', 'kp22']]
:param present_absent_delimiter
:return: a list of keyphrase, each keyphrase is a word list.
"""
if include_present_absent_delimiter:
assert present_absent_delimiter is not None
tmp_pred_str_list = []
tmp_word_list = []
for word in word_list:
#print("The asf IS",keyphrase_delimiter)
keyphrase_delimiter = 4
if word == keyphrase_delimiter:
if len(tmp_word_list) > 0:
tmp_pred_str_list.append(tmp_word_list)
tmp_word_list = []
elif word == present_absent_delimiter and include_present_absent_delimiter:
if len(tmp_word_list) > 0:
tmp_pred_str_list.append(tmp_word_list)
tmp_word_list = []
tmp_pred_str_list.append([present_absent_delimiter])
else:
tmp_word_list.append(word)
if len(tmp_word_list) > 0: # append the final keyphrase to the pred_str_list
tmp_pred_str_list.append(tmp_word_list)
return tmp_pred_str_list
| true |
da3049f562f286c12065a53be0fa1978104e6a04 | Python | eaniyom/python-challenge-solutions | /Aniyom Ebenezer/Phase 2/TUPLE/Day_53_Challenge_Solution/Question 3 Solution.py | UTF-8 | 413 | 4.03125 | 4 | [
"MIT"
] | permissive | '''
Write a Python program to slice a tuple.
'''
tuplex = (2, 4, 3, 5, 4, 6, 7, 8, 6, 1)
slice = tuplex[3:5]
print(slice)
_slice = tuplex[:6]
print(_slice)
_slice = tuplex[5:]
print(_slice)
_slice = tuplex[:]
print(_slice)
_slice = tuplex[-8:-4]
print(_slice)
tuplex = tuple("HELLO WORLD")
print(tuplex)
_slice = tuplex[2:9:2]
print(_slice)
_slice = tuplex[::4]
print(_slice)
_slice = tuplex[9:2:-4]
print(_slice) | true |
5dfce259327f73f9ad690b3c3279921b7bde6584 | Python | ahmaddroobi99/Projects_Python | /GuessTHeSecretNumber.py | UTF-8 | 951 | 4.125 | 4 | [] | no_license | import random
def guess (x):
random_number =random.randint(1,x)
print(random_number)
guess =0
while guess !=random_number :
guess =int(input(f'Guess numper betwwen 1 and {x}::'))
if guess <random_number :
print("Sory, guess agin .Too low")
elif guess> random_number:
print("Sorry ,guess..Too hight")
print(f"yah,Congratulation. you have guessded the number..{random_number} ")
def computer_guess (x):
low =1
high =x
feedback=''
while feedback != 'c' :
if low!=high :
guess = random.randint(low, high)
else :
guess=low
feedback = input(f'is {guess} too high (h) ,Too low ,orr correct (c)').lower()
if feedback == 'h':
high =guess -1
elif feedback =="l":
low =guess+1
print(f"yah ,the Computer guessed your number ,{guess},correctly")
# guess(50)
computer_guess(500) | true |
1e9b6bacf9cb71af4f36b9777e08a2bd25436a84 | Python | CMUN-Group/Hipertermia | /HornoMemmert/version3/solicitudAtmoWeb.py | UTF-8 | 664 | 3.03125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 8 12:44:03 2019
@author: nixtropy
"""
import requests
from time import sleep
def setTemp(temp, DEBUG):
if (DEBUG):
print ("Este es un valor de prueba para " + temp + " grados celcius")
else:
URL = "http://192.168.100.100/atmoweb"
temperatura = temp
PARAMS = {"TempSet":temperatura}
try:
r = requests.get(url = URL, params = PARAMS)
print (r.url)
except requests.exceptions.ConnectionError as e:
print ("No se pudo realizar la conexión")
print (e)
setTemp("50", True)
print("acabe")
| true |
441e6ea3af5ae28b5f109e1841e23bc32aaa7bb2 | Python | jiangyingli/python20190601 | /王小丫/课.py | UTF-8 | 683 | 3.65625 | 4 | [] | no_license | #class定义类 ,一类事物的抽象如:狗,猫,(模板)
#变量:属性
#age=0
#color=""
#函数:功能
#def eat(self):
# from
# cat=Cat()
# cat.age=5
# cat.color()
# print(cat.color)
# print(cat.age)
# cat.eat()
# print(cat.eat())
# cat.name()
# print()
#排序
#list=[2,4,5,3,]
#list.sort(reverse=Ture)
#print(list)
#手动排序
#for
#if (list[i]>list
#
#
#
#
list=[2,4,1,6,3,7]
for j in range (len(list)-1):
for i in range(len(list)-1-j):
if(list[i]>list[i+1]):
list[i],list[i+1]=list[i+1],list[i]
for i in range(len()):
print(list[i],end="")
#数据库MySQL(存数据的)
#数据库实例
#
#
#
#
| true |
1325a89d6ac0915855b7c9c8195c8bda5b8ad2f0 | Python | FDUCSLG/PRML-2019Spring-FDU | /assignment-3/16307130215/model.py | UTF-8 | 2,773 | 2.6875 | 3 | [] | no_license | import torch
import torch.nn as nn
from torch.autograd import Variable
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size):
super(LSTM, self).__init__()
self.hidden_size = hidden_size
self.f_gate = nn.Linear(input_size + hidden_size, hidden_size)
self.i_gate = nn.Linear(input_size + hidden_size, hidden_size)
self.o_gate = nn.Linear(input_size + hidden_size, hidden_size)
self.C_gate = nn.Linear(input_size + hidden_size, hidden_size)
# self.f_gate.weight.data.fill_(0)
# self.i_gate.weight.data.fill_(0)
# self.o_gate.weight.data.fill_(0)
# self.C_gate.weight.data.fill_(0)
#input:[seq_len,batch,input_size]
def forward(self, input, hidden = None, cell = None):
# print(self.f_gate.weight.data)
seq_len, batch_size, _ = input.shape
output = []
if hidden is None:
hidden = input.data.new(batch_size, self.hidden_size).fill_(0).float()
cell = input.data.new(batch_size, self.hidden_size).fill_(0).float()
for t in range(seq_len):
z = torch.cat((hidden, input[t]), 1)
f = torch.sigmoid(self.f_gate(z))
i = torch.sigmoid(self.i_gate(z))
o = torch.sigmoid(self.o_gate(z))
C_bar = torch.tanh(self.C_gate(z))
cell = torch.add(torch.mul(cell, f), torch.mul(C_bar, i))
hidden = torch.mul(torch.tanh(cell), o)
output.append(hidden.unsqueeze(0))
output = torch.cat(output, dim=0)
return output, hidden, cell
class PoetryModel(nn.Module):
def __init__(self, vocab_size, embedding_size, hidden_size):
super(PoetryModel,self).__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.embeddings = nn.Embedding(vocab_size, embedding_size)
self.lstm = LSTM(embedding_size, hidden_size)
self.transfrom = nn.Linear(hidden_size, vocab_size)
# self.embeddings.weight.data.fill_(0)
# self.transfrom.weight.data.fill_(0)
#input:[seq_len,batch]
def forward(self, input, hidden = None, ceil = None):
input = input.transpose(1,0) #input:[batch,seq_len]->[seq_len,batch]
seq_len, batch_size = input.shape
if hidden is None:
hidden = input.data.new(batch_size, self.hidden_size).fill_(0).float()
ceil = input.data.new(batch_size, self.hidden_size).fill_(0).float()
embeds = self.embeddings(input)
output, hidden, ceil = self.lstm(embeds, hidden, ceil)
output = output.transpose(1,0).contiguous()
output = self.transfrom(output.view(seq_len*batch_size, -1))
return {"output":output, "hidden":hidden, "ceil":ceil}
| true |
fdaaa7df76a2d0c0c5023756bf87c806d6098494 | Python | kjfdf/PyQT5 | /Event3.py | UTF-8 | 948 | 2.859375 | 3 | [] | no_license | import sys
from PyQt5.QtWidgets import QMainWindow,QPushButton,QApplication
class Exam(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
btn1=QPushButton("Button 1",self) #상단에 버튼2개 만들기
btn1.move(30,50)
btn2=QPushButton("Button2",self)
btn2.move(150,50)
btn1.clicked.connect(self.buttonClicked)
btn2.clicked.connect(self.buttonClicked) #버튼 누르면 발생하는 이벤트 정의
self.statusBar()
self.setGeometry(300, 100, 900, 900)
self.setWindowTitle('NCS/EMG')
self.show()
def buttonClicked(self): #buttonClicked함수정의, 하단의 statusbar에 메세지가 나오게 함
sender=self.sender()
self.statusBar().showMessage(sender.text()+' was pressed')
if __name__ == '__main__':
app = QApplication(sys.argv)
w = Exam()
sys.exit(app.exec_()) | true |
95909be7f56c0d6d9304824d8a6bf06fd44352c8 | Python | AsherThomasBabu/Algorithmic-Toolbox | /week2_algorithmic_warmup/4_least_common_multiple/lcm.py | UTF-8 | 265 | 3.375 | 3 | [] | no_license | # Uses python3
import sys
def lcm_naive(a, b):
for l in range(1, a*b + 1):
if (l * a)%b==0:
return l*a
return a*b
if __name__ == '__main__':
input = sys.stdin.read()
a, b = map(int, input.split())
print(lcm_naive(a, b))
| true |
ad500fe672cb3adf9854348df5aa5006c60dd460 | Python | MustafaGokkaya/bot-yazma-egitimi | /teachers/mfatiharslan/browser_navigation.py | UTF-8 | 751 | 3.1875 | 3 | [] | no_license | """
browser_navigation: Tarayıcıda gezinme işlemleri
"""
from selenium import webdriver
import settings
import time
# tarayıcı nesnesi oluşturalım
driver = webdriver.Chrome(settings.driver_path)
# adrese git
driver.get("https://istanbulakademi.meb.gov.tr")
# bulunduğum adresi yazdıralım
print(driver.current_url)
time.sleep(2)
# yeni adrese git
driver.get("https://istanbulakademi.meb.gov.tr/akademiler.php?pID=615")
print(driver.current_url)
time.sleep(2)
# bir önceki sayfaya dön
driver.back()
print(driver.current_url)
time.sleep(2)
# bir sonraki sayfa dön
driver.forward()
print(driver.current_url)
time.sleep(2)
# sayfa başlığını yazdıralım
print(driver.title)
time.sleep(2)
# tarayıcıyı kapat
driver.close()
| true |
045a0d738d2addeb8915971c1b07d82781d63cb5 | Python | scottkwon/Projects | /Python/flask_fundamentals/TMNT/tmnt.py | UTF-8 | 528 | 2.546875 | 3 | [] | no_license | from flask import Flask,render_template,request, redirect
app = Flask(__name__)
@app.route('/')
def Home():
return render_template('index.html')
@app.route('/<color>')
def Color(color):
if color.lower() == "blue":
src='img/Leonardo.jpg'
elif color.lower() == "orange":
src='img/Michelangelo.jpg'
elif color.lower() == "red":
src='img/Raphael.jpg'
elif color.lower() == "purple":
src='img/Donatello.jpg'
return render_template('ninja.html', src=src)
app.run(debug=True) | true |
fb8d3e5ef7ac60ac4651f514a046b5c8f6f68576 | Python | hashtaginfosec/thinkful-unit2 | /argparse/arg-tutorial.py | UTF-8 | 449 | 3.125 | 3 | [] | no_license | import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--add", nargs="+", help="Creates a local account")
parser.add_argument("-d", "--delete", nargs="+", help="Deletes a local account")
args = parser.parse_args()
if args.add:
for each_user in args.add:
print("Creating user " + each_user)
elif args.delete:
for each_user in args.add:
print("Deleting user " + each_user)
else:
parser.print_help() | true |
d74b282ecc965d20ea430de55d804da5e0a30e5c | Python | neo13/Locating-Twitter-Users | /Crawler/file.py | UTF-8 | 656 | 2.59375 | 3 | [
"MIT"
] | permissive | from os import listdir
from os.path import isfile, join, getsize
class File(object):
def __init__(self):
super(File, self).__init__()
@staticmethod
def getFile():
directory = "./temp/"
onlyfiles = [ f for f in listdir(directory) if isfile(join(directory,f)) ]
onlyfiles.sort()
if len(onlyfiles) == 0 :
return open(join(directory,str(0)), "a")
if getsize(join(directory,onlyfiles[-1])) > long(15728640) :
return open(join(directory,str(int(onlyfiles[-1]) + 1)), "a")
else:
return open(join(directory,onlyfiles[-1]), "a")
@staticmethod
def write(value):
f = File.getFile()
f.write(value)
f.close() | true |
783a5c2a7d07e2e4457c0a8c3c44d0d570517c7b | Python | martakedzior/python-course | /00-intro/Praca_Domowa_1/zad5.py | UTF-8 | 548 | 4.53125 | 5 | [] | no_license | # Zadanie 5
# Napisz program, który pyta użytkownika o 2 liczby
# a następnie dzieli jedna przez drugą.
# Pokaż ile razy pierwsza liczba mieści się w drugiej
# oraz jaka jest reszta tego dzielenia.
print("Podaj proszę 2 liczby: ")
print()
user_input1 = int(input("Pierwsza liczba: "))
user_input2 = int(input("Druga liczba: "))
result = user_input1 // user_input2
modulo = user_input1 % user_input2
print(f"Pierwsza liczba mieści się w drugiej: {result} razy.")
print(f"Reszta dzielenia pierwszej liczby przez drugą to: {modulo}.") | true |
dd5f331add89001d9ff88d9f89c54bb097d13b01 | Python | adbmd/picture_text | /picture_text/src/hac_tools.py | UTF-8 | 8,931 | 3.03125 | 3 | [
"Apache-2.0"
] | permissive | from picture_text.src.utils import flatten_list
from scipy.cluster.hierarchy import to_tree
import matplotlib.pyplot as plt
from scipy.cluster import hierarchy
import fastcluster
import numpy as np
class HAC():
def __init__(self, linkage_table, parent=None):
"""
Instantiates a class, starting with a fastcluster or scipy HAC linkage table and helping the move to a treemap
Alternatively this can also receive a ready linkage table or a subset thereof for the cases where only a part of the tree is being analysed
Args:
linkage_table (list or dict):
Linkage table produced as an output of a HAC algorithm (fastcluster or scipy)
OR
a dictionary table subset thereof
parent (int or string, optional): Parent ID value to be used as parent of this dataset
>>> X=[[x] for x in [1001,1000,1,10,99,100,101]]
>>> z=fastcluster.single(X)
>>> hac = HAC(z) # Linkage table case
>>> hac.tbl
{0: [0, '', '', 0, 1], 1: [1, '', '', 0, 1], 2: [2, '', '', 0, 1], 3: [3, '', '', 0, 1], 4: [4, '', '', 0, 1], 5: [5, '', '', 0, 1], 6: [6, '', '', 0, 1], 7: [7, 0, 1, 1.0, 2], 8: [8, 5, 6, 1.0, 2], 9: [9, 4, 8, 1.0, 3], 10: [10, 2, 3, 9.0, 2], 11: [11, 9, 10, 89.0, 5], 12: [12, 7, 11, 899.0, 7]}
>>> hac.tbl_clusters
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
>>> z2={8: [8, '', '', 0, 1], 9: [9, '', '', 0, 1], 20: [20, 8, 9, 2.0, 2]}
>>> hac2=HAC(z2) # Dictionary case
>>> hac2.tbl
{8: [8, '', '', 0, 1], 9: [9, '', '', 0, 1], 20: [20, 8, 9, 2.0, 2]}
>>> hac2.tbl_clusters
[8, 9, 20]
"""
if parent == None:
self.parent = -1
else:
self.parent = parent
if not isinstance(linkage_table, dict):
self.linkage_table = linkage_table
self.rootnode, self.nodelist = to_tree(self.linkage_table, rd=True)
self.tbl = {i: [i, left_clust(nd), right_clust(nd), nd.dist, nd.count] for (i, nd) in enumerate(self.nodelist)}
else:
self.tbl = linkage_table
self.tbl_clusters = list(self.tbl.keys())
self.tbl_clusters.sort()
def dendrogram(self, **kwargs):
"""
Create a dendrogram from the data in the class
"""
plt.figure()
dn = hierarchy.dendrogram(self.linkage_table, **kwargs)
def get_members(self, cluster_id):
"""
Get list of member and cluster ids for a certain node id
Args:
cluster_id (int): Cluster ID to get members for
Returns:
members (list): list of original datapoints belonging to this cluster ID
clusters (list): list of subclusters belonging to this cluster ID
table (dict): full table with all members both cluster and datapoints
>>> X=[[x] for x in [1001,1000,1,10,99,100,101]]
>>> z=fastcluster.single(X)
>>> hac = HAC(z)
>>> m, c, t = hac.get_members(3)
>>> m
[3]
>>> c
[]
>>> t
{3: [3, '', '', 0, 1]}
>>> m, c, t = hac.get_members(12)
>>> m
[0, 1, 2, 3, 4, 5, 6]
>>> c
[7, 8, 9, 10, 11, 12]
>>> t
{0: [0, '', '', 0, 1], 1: [1, '', '', 0, 1], 2: [2, '', '', 0, 1], 3: [3, '', '', 0, 1], 4: [4, '', '', 0, 1], 5: [5, '', '', 0, 1], 6: [6, '', '', 0, 1], 7: [7, 0, 1, 1.0, 2], 8: [8, 5, 6, 1.0, 2], 9: [9, 4, 8, 1.0, 3], 10: [10, 2, 3, 9.0, 2], 11: [11, 9, 10, 89.0, 5], 12: [12, 7, 11, 899.0, 7]}
"""
memb=[]
get_idx=new_memb=[cluster_id]
while len(new_memb)>0:
memb = memb + new_memb
new_memb = []
for idx in get_idx:
new_memb = new_memb + [m for m in self.tbl[idx][1:3] if m != '']
get_idx = new_memb
memb.sort()
members = [m for m in memb if self.tbl[m][1] == '']
clusters = [m for m in memb if not self.tbl[m][1] == '']
# Contains the full list of members
table = {m: self.tbl[m] for m in memb}
return members, clusters, table
def top_n_clusters(self,nr_clusters):
"""
>>> X=[[x] for x in [1001,1000,1,10,99,100,101]]
>>> z=fastcluster.single(X)
>>> hac = HAC(z)
>>> child_id, child_size, total_size = hac.top_n_clusters(3)
>>> child_id
[2, 3, 9, 7]
>>> child_size
[1, 1, 3, 2]
>>> total_size
7
"""
clust_id=self.tbl_clusters[-nr_clusters:]
clust_id=[c for c in clust_id if self.tbl[c][1]!='']
top_n=[self.tbl[c] for c in clust_id if self.tbl[c][1]!='']
child_id = flatten_list([t[1:3] for t in top_n])
child_id = [c for c in child_id if c not in clust_id]
child_size = [self.tbl[c][4] for c in child_id]
total_size = sum(child_size)
return child_id, child_size, total_size
def top_n_good_clusters(self,nr_clusters,min_size=0.1,max_extension=1.0):
"""
Returns the members, clusters and linkage tables of the top N clusters.
Extends the number of clusters to a specified limit if very small clusters are found
Args:
nr_clusters (int): Number of clusters to return
min_size (float, optional): Minimal size for a cluster, as a % of total number of observations in X,
defaults to 0.1 (meaning the smallest cluster should be at least 10% of overall size)
max_extension (float, optional): Percent extension to nr_splits if min_size not met by all clusters, defaults to 1.0
Example:
- if nr_splits = 3, min_size = 0.1, max_extension=1
- max_extension = 1 means up to 100% increase in nr_splits, i.e. up to 6 splits in this case
- only 1 out of 3 clusters initially are > 10%
- Initially this will add 2 more splits (3 - 1) to a total of 5 which is less then the max_extension allowance of 6
- If again 2 of the 5 are under 10%, this would mean increasing number of splits to 7, however, the max is 6 so we end up with 6
Returns:
res (list): List of dictionaries containing details (ids, parent, members, table, size) of all relevant clusters found
>>> X=[[x] for x in [1001,1000,1,10,99,100,101]]
>>> z=fastcluster.single(X)
>>> hac = HAC(z)
>>> res = hac.top_n_good_clusters(3)
>>> res
{9: {'cluster_id': 9, 'cluster_parent': -1, 'cluster_members': [4, 5, 6], 'cluster_table': {4: [4, '', '', 0, 1], 5: [5, '', '', 0, 1], 6: [6, '', '', 0, 1], 8: [8, 5, 6, 1.0, 2], 9: [9, 4, 8, 1.0, 3]}, 'cluster_size': 3}, 10: {'cluster_id': 10, 'cluster_parent': -1, 'cluster_members': [2, 3], 'cluster_table': {2: [2, '', '', 0, 1], 3: [3, '', '', 0, 1], 10: [10, 2, 3, 9.0, 2]}, 'cluster_size': 2}, 7: {'cluster_id': 7, 'cluster_parent': -1, 'cluster_members': [0, 1], 'cluster_table': {0: [0, '', '', 0, 1], 1: [1, '', '', 0, 1], 7: [7, 0, 1, 1.0, 2]}, 'cluster_size': 2}}
"""
nr_clusters = nr_clusters - 1
max_num_clusters = int(nr_clusters * (1 + max_extension))
check = True
# While number of tiny clusters (< min_size) is non-zero keep increasing
while check and nr_clusters < max_num_clusters:
clust_id, clust_size, total_size=self.top_n_clusters(nr_clusters)
nr_tiny_clusters = sum([c / total_size < min_size for c in clust_size])
check = nr_tiny_clusters > 0
nr_clusters = min(nr_clusters + nr_tiny_clusters, max_num_clusters)
clust_id, clust_size, total_size=self.top_n_clusters(nr_clusters)
res = {}
for c in clust_id:
m,_,t = self.get_members(c)
res[c]={
'cluster_id': c,
'cluster_parent': self.parent,
'cluster_members': m,
'cluster_table': t,
'cluster_size': len(m),
}
# Check counts still match and no datapoints lost
assert(total_size==sum([res[c]['cluster_size'] for c in clust_id]))
return res
def left_clust(nd):
"""
Returns the id of the left cluster belonging to a node
Args:
nd (node): NOde from nodelist in scipy.cluster.hierarchy.to_tree
Returns:
id or '' if the node is just a datapoint and not a cluster
"""
try:
return nd.get_left().get_id()
except:
return ''
def right_clust(nd):
"""
Returns the id of the right cluster belonging to a node
Args:
nd (node): NOde from nodelist in scipy.cluster.hierarchy.to_tree
Returns:
id or '' if the node is just a datapoint and not a cluster
"""
try:
return nd.get_right().get_id()
except:
return '' | true |
11df6c4237ed4afb3fb15c2fa85613dae1812c75 | Python | MarkTiukov/UMLproject | /UML/charts/ClassChart.py | UTF-8 | 1,376 | 3.140625 | 3 | [] | no_license | import tkinter as tk
import Colors
from charts.Chart import Chart
class ClassChart(Chart):
def __init__(self,
canvas: tk.Canvas,
charts: list,
arrows: list,
x=0, y=0,
width=160,
height=200,
boundColor=Colors.BLACK,
thickness=4,
backgroundColor=Colors.LIGHT_LIGHT_GREY,
name="Class"):
super().__init__(canvas, charts, arrows, x, y, width, height,
boundColor,
thickness, backgroundColor)
self.name = tk.Text(self.frame, width=self.width, height=1,
bg=self.backgroundColor, wrap=tk.WORD)
self.name.insert(1.0, name)
self.name.tag_configure("center", justify='center')
self.name.tag_add("center", "1.0", "end")
self.fields = tk.Text(self.frame, width=self.width, height=5,
bg=self.backgroundColor)
self.methods = tk.Text(self.frame, width=self.width,
bg=self.backgroundColor)
def draw(self):
self.name.pack()
self.fields.pack()
self.methods.pack()
self.frame.place(x=self.x, y=self.y, width=self.width,
height=self.height)
super().draw()
| true |
f4807e72908c214175d66c3404943dab38f4296e | Python | komo-fr/AtCoder | /abc/088/c.py | UTF-8 | 653 | 3.546875 | 4 | [] | no_license | # https://atcoder.jp/contests/abc088/tasks/abc088_c
# C - Takahashi's Information
c_matrix = []
for _ in range(3):
c_list = list(map(int, input().split()))
c_matrix.append(c_list)
a_matrix = []
a_list = []
b_list = []
a_list.append(0) # a1
b_list.append(c_matrix[0][0])
b_list.append(c_matrix[0][1])
b_list.append(c_matrix[0][2])
a_list.append(c_matrix[1][0] - b_list[0])
a_list.append(c_matrix[2][0] - b_list[0])
is_ok = True
for i in range(3):
for j in range(3):
if a_list[i] + b_list[j] != c_matrix[i][j]:
is_ok = False
break
if not is_ok:
break
ans = 'Yes' if is_ok else 'No'
print(ans)
| true |
ca6fd34392eed7f874e09af4752571d6510c708b | Python | jetbridge/jetkit-flask | /jetkit/db/utils.py | UTF-8 | 1,318 | 3.015625 | 3 | [] | no_license | from sqlalchemy.event import listen
from sqlalchemy import Table
from functools import partial
def escape_like(query: str, escape_character: str) -> str:
"""
Escape special characters that are used in SQL's LIKE and ILIKE.
Chosen escape character is prepended to each occurrence of a special character.
Escape character is considered a special character too.
WARNING: Do not forget to specify escape character to SQLAlchemy when actually
performing `like`s and `ilike`s:
>>> escape_character = "~"
>>> search_query = "99.5%"
>>> search_query = escape_like(search_query, escape_character)
>>> Record.query.like(search_query, escape=escape_character) # do not forget `escape`
"""
assert len(escape_character) == 1
# It is crucial that `escape_character` is replaced first
cases = [escape_character, "%", "_"]
for case in cases:
query = query.replace(case, escape_character + case)
return query
def on_table_create(class_, ddl):
"""Run DDL on model class `class_` after creation, whether in migration or in deploy (as in tests)."""
def listener(tablename, ddl, table, bind, **kw):
if table.name == tablename:
ddl(table, bind, **kw)
listen(Table, "after_create", partial(listener, class_.__table__.name, ddl))
| true |
2dbfc39e6c106d1f28727f45df69a04538a5caa9 | Python | Scruf/BluebiteAssignment | /question1.py | UTF-8 | 835 | 3.84375 | 4 | [] | no_license | def base36_to_base10(sample):
num = 0
#Check to see if sample is empty
if not sample:
#Its empty so we return error meesage
return {"error":"Cannot convert empty string"}
#store alphabet and numbers to user in conversion
alphabet = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O',
'P','Q','R','S','T','U','V','W','X','Y','Z']
#Iterate over sample charater by character
for i,s in enumerate(sample):
#Check to see if the character is digit or character
if s.isalnum():
#If it is character or digit so we calculate
num += alphabet.index(s.upper())*(36**(len(sample)-i-1))
else:
#Otherwise its not so we return error
return {"error":"Invalid character"}
#Return data back
return {"data":num}
if __name__ == '__main__':
print(base36_to_base10(""))
| true |
02c8ba0209aa75cf237732a607f22ec69836debf | Python | panda1909/beedeeprinting | /core/validators.py | UTF-8 | 433 | 2.796875 | 3 | [] | no_license | def validate_file_extension(value):
import os
from django.core.exceptions import ValidationError
ext = os.path.splitext(value.name)[1] # [0] returns path+filename
valid_extensions = ['.jpg', '.jpeg', '.gif', '.png', '.eps', '.ai', '.pdf', '.zip', '.tar', '.rar', '.cdr', '.psd', '.tif', '.csv', '.xls', '.xlsx']
if not ext.lower() in valid_extensions:
raise ValidationError('Unsupported file extension.') | true |
ad738fdaa666811e2b8795eb3a7f117f7fac2eba | Python | TahjidEshan/PIXOR | /srcs/testset.py | UTF-8 | 1,672 | 2.609375 | 3 | [
"MIT"
] | permissive | import glob
import pandas as pd
import numpy as np
import os.path
from PIL import Image
class TestSet(object):
def __init__(self, basedir, bagname):
self.basedir = basedir
self.bagname = bagname
self.data_path = os.path.join(basedir, bagname)
self.imtype = 'png'
self._get_file_lists()
def _get_file_lists(self):
"""Find and list data files for each sensor."""
self.cam_files = sorted(glob.glob(
os.path.join(self.data_path, 'img', '*.{}'.format(self.imtype))))
self.velo_files = sorted(glob.glob(
os.path.join(self.data_path, 'pc_data', '*.bin')))
self.cam_stamps = pd.read_csv(os.path.join(self.data_path, 'img', 'imgtimestamps.csv'))
self.velo_stamps = pd.read_csv(os.path.join(self.data_path, 'velo', 'velotimestamps.csv'))
def get_cam2(self, idx):
"""Load an image from file."""
mode = "RGB"
return Image.open(self.cam_files[idx]).convert('RGB')
def get_velo(self, idx):
"""Read velodyne [x,y,z,reflectance] scan at the specified index."""
scan = np.fromfile(self.velo_files[idx], dtype=np.float32)
return scan.reshape((-1, 4))
def test():
basedir = '/mnt/ssd2/od/testset'
date = '_2018-10-30-15-25-07'
dataset = TestSet(basedir, date)
print(dataset.velo_files)
im = dataset.get_cam2(0)
print(im.width)
print(im.height)
scan = dataset.get_velo(0)
np.set_printoptions(precision=3)
print(scan.mean(axis=0))
print(scan.max(axis=0))
print(scan.min(axis=0))
if __name__=="__main__":
test()
| true |
71aac90ce2ed4cd19d79a976528c6722e7ea11ec | Python | Jiachengliu1/Machine-Learning-Algorithms-for-Data-Science | /EM_GMM/EM_GMM.py | UTF-8 | 2,578 | 3.078125 | 3 | [] | no_license | import numpy as np
import random
def load_data(filename):
data = []
f = open(filename)
for row in f:
row = row.strip('\n').split(',')
row = [float(element) for element in row]
data.append(row)
return data
def Gaussian(data, mean, cov):
d = np.shape(cov)[0]
cov_determinant = np.linalg.det(cov)
cov_inverse = np.linalg.inv(cov)
x = - 1/2 * np.dot(np.dot((data - mean).T, cov_inverse), (data - mean))
base = 1/(np.power(2 * np.pi, d/2))
normal_distribution = base * np.power(cov_determinant, -0.5) * np.exp(x)
return normal_distribution
def E_step(data, K, mean, cov, amp):
N = data.shape[1]
E_weight = np.zeros(150*3).reshape(150, 3)
for i in range(N):
top = [amp[c] * Gaussian(data[:,i], mean[c], cov[c]) for c in range(K)]
bottom = np.sum(top)
for c in range(K):
E_weight[i][c] = top[c] / bottom
return E_weight
def M_step(data, K, weight):
mean = []
cov = []
amp = []
N = data.shape[1]
for c in range(K):
Nc = np.sum(weight[i][c] for i in range(N))
mean.append((1.0 / Nc) * np.sum([weight[i][c] * data[:,i] for i in range(N)], axis = 0))
cov.append((1.0 / Nc) * np.sum([weight[i][c] * (data[:,i] - mean[c]).reshape(2,1) * (data[:,i] - mean[c]).reshape(2,1).T for i in range(N)], axis = 0))
amp.append(Nc / N)
return mean, cov, amp
def GMM(data, K):
N = data.shape[1]
d = data.shape[0]
weight = np.random.rand(N, K)
for i in range(N):
random1 = random.random()
random2 = random.random()
random3 = random.random()
random_sum = random1 + random2 + random3
weight[i] = [random1/random_sum, random2/random_sum, random3/random_sum]
cur_weight = weight
iteration = 0
while True:
mean, cov, amp = M_step(data, K, cur_weight)
new_weight = E_step(data, K, mean, cov, amp)
if (np.abs(new_weight-cur_weight) < 0.000001).all() or iteration > 1000:
break
iteration += 1
cur_weight = new_weight.copy()
return mean, cov, amp, iteration
data = np.mat(load_data('clusters.txt'))
data = data.T
results = GMM(data, 3)
print('Results of self-implemented Gaussian Mixture Model are:')
print('Mean:', results[0])
print('Covariance:', results[1])
print('Amplitude:', results[2])
print('Iteration counts:', results[3])
| true |
15b275a8ab74282214595af5f8cb5c46bd6821bc | Python | JeromeLee-ljl/leetcode | /001~050/_012_integer_to _roman.py | UTF-8 | 1,246 | 4 | 4 | [] | no_license | class Solution:
Roman = ('I', 'V', 'X', 'L', 'C', 'D', 'M')
def intToRoman(self, num):
"""
:type num: int
:rtype: str
Given an integer, convert it to a roman numeral.
Input is guaranteed to be within the range from 1 to 3999.
"""
if num > 3999:
raise ValueError
decimals = []
roman_str = ''
while num != 0:
n = num % 10
num //= 10
decimals.append(n)
for i in range(len(decimals) * 2 - 1, 0, -2):
n = decimals[i // 2]
if n < 5:
if n < 4:
roman_str += self.Roman[i - 1] * n
else:
roman_str += self.Roman[i - 1] + self.Roman[i]
else:
n -= 5
if n < 4:
roman_str += self.Roman[i] + self.Roman[i - 1] * n
else:
roman_str += self.Roman[i - 1] + self.Roman[i + 1]
return roman_str
def _test():
solution = Solution()
print(solution.intToRoman(3))
print(solution.intToRoman(4))
print(solution.intToRoman(25))
print(solution.intToRoman(199))
if __name__ == "__main__":
_test()
| true |
b8bbaeaf7dd06d30cc1fbcd322bfc814c1ad8bb0 | Python | liuwei881/leetcode | /Partition_Equal_Subset_Sum.py | UTF-8 | 785 | 3.59375 | 4 | [] | no_license | # coding=utf-8
"""
Given a non-empty array containing only positive integers,
find if the array can be partitioned into two subsets such that
the sum of elements in both subsets is equal.
Note:
Each of the array element will not exceed 100.
The array size will not exceed 200.
Example 1:
Input: [1, 5, 11, 5]
Output: true
Explanation: The array can be partitioned as [1, 5, 5] and [11].
Example 2:
Input: [1, 2, 3, 5]
Output: false
Explanation: The array cannot be partitioned into equal sum subsets.
"""
class Solution(object):
def canPartition(self, nums):
sums = sum(nums)
if sums & 1:
return False
nset = set([0])
for n in nums:
for m in nset.copy():
nset.add(m + n)
return sums / 2 in nset | true |
9417606a771c90ce10382261aa6d6bcaa31d24e1 | Python | R-H-T/GWLocationAPI | /app.py | UTF-8 | 3,595 | 2.59375 | 3 | [] | no_license | # coding: utf-8
__author__ = "Roberth Hansson-Tornéus"
__copyright__ = "Copyright ©2018 – Roberth Hansson-Tornéus"
from model import User
from flask import Flask, jsonify, request, g
from flask_httpauth import HTTPBasicAuth
from model.database import DatabaseManager
# ENCODING
import sys
import codecs
from controller import IndexController, FindController, LocationsController
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.stderr = codecs.getreader('utf8')(sys.stderr)
# APP
app = Flask(__name__)
app.config.from_object('config')
# Template
app.jinja_env.globals['app_title'] = 'GW\'s Location API'
# Navigation
link_home = dict(title='Home', href='./')
link_find_location = dict(title='Find location', href='./find/')
app.jinja_env.globals['home_links'] = [link_home, link_find_location]
# DATABASE
DatabaseManager()
# Security
auth = HTTPBasicAuth()
# INDEX
@auth.verify_password
def verify_password(email, password):
user = DatabaseManager.session.query(User).filter_by(email=email).first()
if not user or not user.verify_password(password):
return False
g.user = user
return True
@app.route('/', methods=['GET'])
def index():
"""Main Page"""
index_controller = IndexController('Welcome to GW Locations!')
content = """
This is a RESTful API demo written by {author}.<br />
Follow the instructions in the README found within this project's directory. <br />
""".format(author=__author__)
index_controller.add_to_args({'content': content})
return index_controller.render_view()
# FIND LOCATION
@app.route('/find/<location>', methods=['GET'])
def find(location):
"""Find a location returns a JSON of found results"""
find_controller = FindController()
locations = find_controller.search_location(location)
status = 'OK'
if len(locations) is 0:
status = 'ZERO_RESULTS'
return jsonify(results=[i.serialize_excluding_id for i in locations], status=status)
# LOCATIONS
@app.route('/locations', methods=['GET'])
@app.route('/locations/', methods=['GET'])
def get_all_locations():
locations_controller = LocationsController()
locations = locations_controller.get_all()
return jsonify(results=[i.serialize for i in locations], status='ZERO_RESULTS')
@app.route('/locations', methods=['POST'])
@app.route('/locations/', methods=['POST'])
@auth.login_required
def create_location():
name = request.args.get('name', '')
latitude = request.args.get('latitude', '')
longitude = request.args.get('longitude', '')
locations_controller = LocationsController()
success = locations_controller.create_location(name=name,
latitude=latitude,
longitude=longitude)
if success:
result = "Object created."
status = "OK"
else:
result = "Feature not yet configured."
status = "TODO"
return jsonify(result=result, status=status)
@app.route("/locations/<int:_id>", methods=['GET'])
def get_location_by_id(_id):
method = request.method
# get by id
if method == 'GET':
return 'Searching for location with id %s' % _id
@app.route("/locations/<int:_id>", methods=['PUT', 'DELETE'])
@auth.login_required
def location_by_id(_id):
method = request.method
# update
if method == 'PUT':
return 'Updating item #%s if authorized...' % _id
# delete
elif method == 'DELETE':
return 'Deleting item #%s...' % _id
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| true |
3de0c5012dcde55118c18d15098191b4962746c5 | Python | chrifl13/is105 | /lab/python/exercises/ex1.py | UTF-8 | 379 | 3.515625 | 4 | [] | no_license | # "#" gjør det mulig for meg å skrive ting på i scriptet som python vil ignorere pga #. NEdenfor vil "print" utløse at teksten innen for " " vil bli printet frem på skjermen.
print "hello world!"
print "hello again"
print "i like typing this"
print "this is fun."
print 'yay printing.'
print "i'd much rather you 'not'."
print 'i "said" do not touch this.'
print "helge"
| true |
4f44c26facbe04d6ad46b12459e4d8a6beb35c82 | Python | ninastoessinger/Suffixer | /Suffixer.roboFontExt/lib/suffixer.py | UTF-8 | 6,738 | 2.640625 | 3 | [
"MIT"
] | permissive | """
RoboFont extension to change/append/replace glyph name suffixes
v1.2 / Nina Stoessinger / February 2020
With thanks to Frederik Berlaen, David Jonathan Ross, Ryan Bugden
"""
from AppKit import NSApp, NSMenuItem, NSAlternateKeyMask, NSCommandKeyMask
from mojo.tools import CallbackWrapper
from mojo.extensions import registerExtensionDefaults, getExtensionDefault, setExtensionDefault
from mojo.UI import Message
from vanilla import *
class Suffixer:
def __init__(self):
""" Add the "Change Suffixes" menu item to the Font menu. """
title = "Change Suffixes..."
fontMenu = NSApp().mainMenu().itemWithTitle_("Font")
if not fontMenu:
print("Suffixer: Error, aborting")
return
fontMenu = fontMenu.submenu()
if fontMenu.itemWithTitle_(title):
return
index = fontMenu.indexOfItemWithTitle_("Add Glyphs")
self.target = CallbackWrapper(self.openWindow)
newItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(title, "action:", "S")
newItem.setKeyEquivalentModifierMask_(NSAlternateKeyMask | NSCommandKeyMask);
newItem.setTarget_(self.target)
fontMenu.insertItem_atIndex_(newItem, index+1)
def openWindow(self, sender=None):
""" Initialize the input window. """
presets = [
"case", "dnom", "fina", "hist", "init", "isol", "locl", "lnum", "medi", "numr", "onum", "ordn", "tnum",
"pcap", "salt", "sinf", "smcp", "ss01", "ss02", "ss03", "ss04", "ss05", "ss06", "ss07", "ss08",
"ss09", "ss10", "ss11", "ss12", "ss13", "ss14", "ss15", "ss16", "ss17", "ss18", "ss19", "ss20",
"subs", "sups", "swsh", "titl", "zero" ]
presetList = " ".join(presets)
registerExtensionDefaults({"nl.typologic.suffixer.presetSuffixes" : presetList})
currentPresets = getExtensionDefault("nl.typologic.suffixer.presetSuffixes").split()
self.f = CurrentFont()
if self.f is None:
print("Suffixer: No font open")
return
existingSuffixes = []
for g in self.f:
suf = self._findSuffix(g.name)
if suf != None and suf not in existingSuffixes:
existingSuffixes.append(suf)
existingSuffixes.sort()
currentSuffix = ""
if CurrentGlyph() is not None:
currentSuffix = self._findSuffix(CurrentGlyph().name)
elif self.f.selection is not None:
for gn in self.f.selection:
currentSuffix = self._findSuffix(gn)
if currentSuffix != None:
break
self.w = FloatingWindow((300, 166), "Suffixer")
p = 10
h = 20
y1, y2, y3, y4 = 15, 49, 82, 135
w1, x2 = 160, 180
self.w.labelTwo = TextBox((p, y1, w1, h), "Add suffix to glyph names:")
self.w.dotTwo = TextBox((x2, y1, 15, h), ".")
self.w.newSuffix = ComboBox((x2+p, y1, -p, h), currentPresets)
self.w.replace = CheckBox((p+2, y2, w1, h), "Replace existing suffix:", callback=self.replaceCheckCallback)
self.w.dotOne = TextBox((x2, y2, 15, h), ".")
self.w.oldSuffix = PopUpButton((x2+p, y2, -p, h), existingSuffixes)
if currentSuffix != "" and currentSuffix != None:
self.w.oldSuffix.set(existingSuffixes.index(currentSuffix))
self.w.scope = RadioGroup((p, y3, -p, h*2), ["Target selected glyphs", "Replace all in current font"], isVertical=True)
self.w.scope.set(0)
currentState = 0 if currentSuffix == "" or currentSuffix == None else 1
self.w.replace.set(currentState)
self.w.scope.enable(currentState)
self.w.submit = Button((p, y4, -p, h), "Change suffixes", callback=self.replaceSuffixes)
self.w.setDefaultButton(self.w.submit)
self.w.open()
self.w.makeKey()
def replaceCheckCallback(self, sender):
""" Toggle UI options depending on selection whether to replace or append the new suffix. """
if self.w.replace.get() == False:
self.w.scope.set(0)
self.w.scope.enable(0)
else:
self.w.scope.enable(1)
def _findSuffix(self, gname):
""" Find the suffix (if any) in a given glyph name. """
i = gname.find(".")
if i != -1 and i != 0:
return gname[i+1:]
else:
return None
def replaceSuffixes(self, sender):
""" Handle replacing/appending of suffixes. """
mode = "replace" if self.w.replace.get() == 1 else "append"
oldSuffix = self.w.oldSuffix.getItems()[self.w.oldSuffix.get()]
enteredSuffix = self.w.newSuffix.get()
suffixes_in = [oldSuffix, enteredSuffix]
suffixes = [] # build proper suffixes list
for s in suffixes_in:
if s is not None and len(s) > 0:
if s[0] == ".":
s = s[1:] # handle suffixes without periods
suffixes.append(s)
if mode == "replace" and suffixes[0] == suffixes[1]:
Message(u"Cannot replace a suffix with itself.\nI mean I could, but there seems to be little point :)")
elif mode == "append" and suffixes[1] == "":
Message(u"Cannot append an empty suffix.\n(Or you could just pretend I've already done it.)")
else:
scope = self.f.keys() if self.w.scope.get() == 1 else self.f.selectedGlyphNames
if mode == "replace":
for gname in scope:
if gname.endswith(suffixes[0]):
sufLen = len(suffixes[0])
if len(suffixes[1]) > 0:
newName = gname[:-sufLen] + suffixes[1]
else:
sufLenWithPeriod = sufLen+1
newName = gname[:-sufLenWithPeriod]
self._changeGlyphname(gname, newName)
elif mode == "append":
for gname in scope:
newName = gname + "." + suffixes[1]
self._changeGlyphname(gname, newName)
self.f.changed()
# store new values as defaults
savedPresets = getExtensionDefault("nl.typologic.suffixer.presetSuffixes")
if enteredSuffix != "" and enteredSuffix not in savedPresets:
savedPresetsList = savedPresets.split()
savedPresetsList.append(enteredSuffix)
savedPresetsList.sort()
newPresets = " ".join(savedPresetsList)
setExtensionDefault("nl.typologic.suffixer.presetSuffixes", newPresets)
self.w.close()
def _changeGlyphname(self, gname, newName):
""" Assign a new glyphname to a glyph. """
print("Suffixer: Changing name of %s to %s" % (gname, newName))
self.f[gname].prepareUndo("Change Suffix")
# check if new name is already in use
if newName in self.f.keys():
i = 1
while (newName + ".copy_" + str(i)) in self.f.keys():
i = i+1
cp = newName + ".copy_"+str(i)
self.f.renameGlyph(newName, cp, renameComponents=True, renameGroups=True, renameKerning=True)
self.f[cp].unicode = None
print("Suffixer: A glyph named %s was already present in the font. It has been renamed to %s." % (newName, cp))
### Note for future development:
### At this point there is also the question which glyph existing composites should refer to
### Think about how to address this
# actual renaming of targeted glyph
self.f.renameGlyph(gname, newName, renameComponents=True, renameGroups=True, renameKerning=True)
self.f[newName].autoUnicodes()
self.f[newName].performUndo()
Suffixer()
| true |
a63c3f3442153fe2a8216875dd072e375fcaa635 | Python | mohmutho/fund_python | /superlist.py | UTF-8 | 192 | 3.234375 | 3 | [] | no_license | class superlist(list):
def __len__(self):
return 1000
superlist1 = superlist()
print(len(superlist1))
superlist1.append(5)
print(superlist1[0])
print(issubclass(superlist, list)) | true |
4ff5c75b10fbfa3b5489b55963b009003198fc37 | Python | djfigs1/Scouts-Pancake-Factory | /elements/GameElements/GameOverDoors.py | UTF-8 | 2,493 | 2.625 | 3 | [] | no_license | import pygame, os
import elements.HUDElements.ScaleUtility as SU
from elements.SPFScreenObject import SPFScreenObject
class GameOverDoors(SPFScreenObject):
def __init__(self, surface):
SPFScreenObject.__init__(self, surface)
self.surface = surface
self.timeTilDeploy = 5000
self.isDeployed = False
self.width = 0
self.height = pygame.display.Info().current_h
self.speed = 15
self.increment = 20
self.deployTime = 0
self.accumTime = 0
self.color = (255,0,0)
self.imageDoor = pygame.image.load(os.path.join(os.path.dirname(__file__), '../../resource/images/game/spf_gameover_door.png')).convert()
self.imageDoor = pygame.transform.smoothscale(self.imageDoor, SU.scalePos(self.imageDoor.get_rect().width, self.imageDoor.get_rect().height))
self.imageLogo = pygame.image.load(os.path.join(os.path.dirname(__file__), '../../resource/images/game/spf_gameover_logo.png'))
self.imageLogo = pygame.transform.smoothscale(self.imageLogo, SU.scalePos(self.imageLogo.get_rect().width, self.imageLogo.get_rect().height))
self.flippedDoor = pygame.transform.flip(self.imageDoor, True, False)
def deploy(self):
self.isDeployed = True
def blit(self):
if (not self.width >= SU.scaleValue(1920) / 2):
if (self.accumTime >= self.speed):
self.accumTime = 0
self.width += SU.scaleValue(self.increment)
if (self.width >= SU.scaleValue(1920) / 2):
slam = pygame.mixer.Sound(os.path.join(os.path.dirname(__file__), '../../resource/sound/game/mm_door_close.wav'))
slam.play()
#pygame.draw.rect(self.surface, self.color, (0, 0, self.width, self.height))
#.draw.rect(self.surface, self.color, (SU.scaleValue(1920) - self.width, 0, self.width, self.height))
self.surface.blit(self.imageDoor, (self.width-self.imageDoor.get_rect().width,0))
self.surface.blit(self.flippedDoor, (SU.scaleValue(1920) - self.width, 0))
#self.surface.blit(self.imageLogo, (self.width-self.imageDoor.get_rect().width,SU.scaleValue(1080) / 2 - self.imageLogo.get_rect().height / 2))
def update(self, clock):
if (self.isDeployed):
self.accumTime += clock.get_time()
if (self.deployTime >= self.timeTilDeploy):
self.blit()
else:
self.deployTime += clock.get_time()
| true |
1ce56377b39aab6c7d279b46b41e109a2ac73313 | Python | ddas-04/FP-MRAM-FiPy-Stable | /Uniaxial_Field_only/FP_H_uni_only_Behtash_parameters.py | UTF-8 | 8,332 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
##################################################
# Fokker-Planck equation Solution using FiPy module #
# Author: Debasis Das #
# Comment: Behtash's parameters are used #
##################################################
print('Import starts')
from fipy import FaceVariable, CellVariable, Gmsh2DIn3DSpace, VTKViewer, TransientTerm, ExplicitDiffusionTerm, DiffusionTerm, ExponentialConvectionTerm, DefaultSolver
from fipy.variables.variable import Variable
from fipy.tools import numerix
import time
import pickle
from shutil import copyfile
print('Import complete')
# ### Uniaxial Anisotropy function
def H_UniaxialAnisotropy(mUnit, uAxis, Ku2, Msat):
global mu0
############ calculate normalized direction #####
uAxisNorm = numerix.linalg.norm(uAxis)
uAxisUnit = uAxis / uAxisNorm
#################################################
mArray = mUnit
#################################################
#################################################
# repeat uniaxial direction vector for n times
# where n= number of cells. uAxisUnit=3X1
# uAxisArr=nX3, represents uniaxial direction for
# each cells in the unit sphere
#################################################
uAxisArr = numerix.tile(uAxisUnit, (len(mUnit[0]), 1))
uAxisArr = numerix.transpose(uAxisArr) # converted to 3Xn
mdotu = numerix.dot(mArray, uAxisArr) # projection of m along uniaxial direction
scaleFac = numerix.multiply(mdotu, (2.0 * Ku2 / (mu0*Msat))) # calculate the magnitude in A/m
Heff = numerix.zeros((3, len(scaleFac)), 'd') # Uniaxial vector for each cell
Heff[0] = numerix.multiply(scaleFac, uAxisArr[0])
Heff[1] = numerix.multiply(scaleFac, uAxisArr[1])
Heff[2] = numerix.multiply(scaleFac, uAxisArr[2])
# unit is in A/m
return Heff
# ### Real Time LLG function
def Calculate_dmdt(mAllCell,HeffBase):
global alphaDamping, gamFac, mu0
H=HeffBase
m=mAllCell
mXH=numerix.cross(m,H,axisa=0,axisb=0)
precisionTerm=numerix.transpose(mXH)
mXmXH=numerix.cross(m,precisionTerm,axisa=0,axisb=0)
dampingTerm=(alphaDamping)*numerix.transpose(mXmXH)
constant_factor=-(gamFac*mu0)/(1+alphaDamping**2)
dmdt=(constant_factor)*(precisionTerm+dampingTerm)
return dmdt
# ### Mesh section
print('Meshing starts')
#mesh = Gmsh2DIn3DSpace('''
# radius = 1.0;
# cellSize = 0.008;
# // create inner 1/8 shell
# Point(1) = {0, 0, 0, cellSize};
# Point(2) = {-radius, 0, 0, cellSize};
# Point(3) = {0, radius, 0, cellSize};
# Point(4) = {0, 0, radius, cellSize};
# Circle(1) = {2, 1, 3};
# Circle(2) = {4, 1, 2};
# Circle(3) = {4, 1, 3};
# Line Loop(1) = {1, -3, 2} ;
# Ruled Surface(1) = {1};
# // create remaining 7/8 inner shells
# t1[] = Rotate {{0,0,1},{0,0,0},Pi/2} {Duplicata{Surface{1};}};
# t2[] = Rotate {{0,0,1},{0,0,0},Pi} {Duplicata{Surface{1};}};
# t3[] = Rotate {{0,0,1},{0,0,0},Pi*3/2} {Duplicata{Surface{1};}};
# t4[] = Rotate {{0,1,0},{0,0,0},-Pi/2} {Duplicata{Surface{1};}};
# t5[] = Rotate {{0,0,1},{0,0,0},Pi/2} {Duplicata{Surface{t4[0]};}};
# t6[] = Rotate {{0,0,1},{0,0,0},Pi} {Duplicata{Surface{t4[0]};}};
# t7[] = Rotate {{0,0,1},{0,0,0},Pi*3/2} {Duplicata{Surface{t4[0]};}};
# // create entire inner and outer shell
# Surface Loop(100)={1,t1[0],t2[0],t3[0],t7[0],t4[0],t5[0],t6[0]};
#''', order=2).extrude(extrudeFunc=lambda r: 1.00001 * r) # doctest: +GMSH
mesh=pickle.load(open("mesh_details_cellsize_0pt008_extrude_1pt00001.p","rb"))
print('Meshing Done')
#pickle.dump( mesh, open( "mesh_details_cellsize_0pt008_extrude_1pt00001.p", "wb" ) )
gridCoor = mesh.cellCenters
mUnit = gridCoor
mNorm = numerix.linalg.norm(mUnit,axis=0)
print('max mNorm='+str(max(mNorm)))
print('min mNorm='+str(min(mNorm)))
mAllCell = mUnit / mNorm # m values around the sphere surface are normalized
# ### Constant terms
kBoltzmann = 1.38064852e-23 #in J/K
mu0 = 4*numerix.pi * 1.0e-7 #in N/A^2
# ### Parameter values
gamFac = 1.7595e11 # in rad/(s.T)
#gamFac = (1.7595e11)/(2*numerix.pi) # in 1/(s.T)
alphaDamping = 0.027
Temperature = 300 # in K
Msat =1.23e6 # in A/m
thickness=2e-9
#length=50e-9
#width=3*length
magVolume = 1.0e-9 * (40e-9 * 40e-9) * (numerix.pi/4.0) # in m^3
#magVolume=(numerix.pi/4)*length*width*thickness
delta=43.0
Eb=delta*kBoltzmann*Temperature
Ku2=Eb/magVolume
#Ku2 = 2.245e5 # in J/m^3
#print('Ku2 = '+str(Ku2))
D = alphaDamping * gamFac * kBoltzmann * Temperature / ((1+alphaDamping**2)*Msat * magVolume) # unit 1/s
# ### Calculation of uniaxial anisotropy field
th=numerix.pi/6.0
ph=0.0*numerix.pi
ux=numerix.sin(th)*numerix.cos(ph)
uy=numerix.sin(th)*numerix.sin(ph)
uz=numerix.cos(th)
uAxis = numerix.array([[ux,uy,uz]])
#uAxis=numerix.array([[0.0,0.0,1.0]])
HuniaxBase = H_UniaxialAnisotropy(mAllCell, uAxis, Ku2, Msat)
print('Max_Huni='+str(max(HuniaxBase[2,:])))
print('Min_Huni='+str(min(HuniaxBase[2,:])))
#exit()
HeffBase = HuniaxBase #+ HdemagBase # Effective Field
# ### Time section
dexp=-40.0
limit=0.0
incr=0.05
number_of_steps=int((limit-dexp)/incr)+1
max_val_phi=numerix.zeros(number_of_steps+1)
percentage_val=numerix.zeros(number_of_steps+1)
# ### Define cell variable and viewer
phi = CellVariable(name=r"$\Phi$",mesh=mesh,value=0.25/numerix.pi)
max_val_phi[0]=max(phi)
viewer=VTKViewer(vars=phi,datamin=0., datamax=1.)
viewer.plot(filename="trial.vtk")
#exit()
# ### Store the initial .vtk file
t_i=0
filename = str(t_i).zfill(5)
dest_name = './with_axis_0_VTK_files/with_axis_0_img_' + str(filename) + '.vtk' #Path and name of the intermediate file. The Green Part should be changed to your path & name
copyfile('./trial.vtk',dest_name)
t_i=t_i+1
# ### Arguments calculation of Fipy
dmdt_val=Calculate_dmdt(mAllCell,HeffBase)
# Converting into cell variable type
dmdt=CellVariable(mesh=mesh, value=dmdt_val)
Dval=CellVariable(mesh=mesh, value=D)
# ### Fipy Calculation loop starts
time_per_loop=numerix.zeros(number_of_steps)
loop=numerix.zeros(number_of_steps)
while t_i<=number_of_steps:
start_import = time.time()
print('*************************************************************')
percentage=float(float(t_i)/(float(number_of_steps)))*100.0
percentage_val[t_i]=percentage
print('Completed = '+str(percentage)+'%')
print('dexp='+str(dexp))
#timestep=min(1e-3, numerix.exp(dexp))
timestep=numerix.exp(dexp)
print('timestep='+str(timestep))
eqI = (TransientTerm()== DiffusionTerm(coeff=Dval)- ExponentialConvectionTerm(coeff=dmdt))
eqI.solve(var=phi,dt=timestep)
print('Max phi='+str(max(phi)))
max_val_phi[t_i]=max(phi)
if __name__ == "__main__":#Parameters to be changed are in the seciton below.
viewer.plot(filename="trial.vtk") #It will only save the final vtk file. You can change the name
if not t_i == 0:
filename = str(t_i).zfill(5)
dest_name = './with_axis_0_VTK_files/with_axis_0_img_' + str(filename) + '.vtk' #Path and name of the intermediate file. The Green Part should be changed to your path & name
copyfile('./trial.vtk',dest_name) #Specify the path of your trial.vtk file
dexp=dexp+0.05
loop[t_i-1]=t_i
end_import = time.time()
time_per_loop[t_i-1]=end_import-start_import
t_i=t_i+1
#if max(phi)>1:
#exit()
print('*************************************************************')
'''
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size':20})
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(1, 1, 1)
line, = ax.plot(percentage_val,max_val_phi)
plt.grid()
plt.xlabel('Code percentage')
plt.ylabel('Max value of rho')
plt.title('cellsize=0.06')
plt.savefig('0pt06_max_rho.png')
plt.show()
'''
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size':20})
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(1, 1, 1)
line, = ax.plot(loop, time_per_loop)
plt.grid()
plt.xlabel('number of loops')
plt.ylabel('time per loop(second)')
plt.title('cellsize=0.008')
plt.savefig('time_per_loop_0pt008.png')
plt.show()
| true |
92dd1232feb0a9ffb067a3b7ef80f0aa09650fd5 | Python | ymccarter/flashcard_project | /working_mihon/automationPython/Lesson29.py | UTF-8 | 1,218 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env python3
import re, pyperclip
# create a regex for phone number
PhoneRegex=re.compile(r'''
#415-555-0000, 555-0000, (415) 555-0000, 555-0000 ext 12345, ext 12345, x12345
(
((\d\d\d)|(\(\d\d\d\)))? # area code
(\s|-) #first separator
\d\d\d #first 3 digitss
- #separator
\d\d\d\d # last 4 digits
(((ext(\.)?\s)|x) # extension word part
(\d{2,5}))? # extension number
)
''',re.VERBOSE)
#TODO: create a regex for email address
EmailRegex =re.compile(r'''
#something.+something@something.com
[a-zA-Z0-9_.+]+ #name part
@ # @symbol part
[a-zA-Z0-9_.+]+ # domain part
''',re.VERBOSE)
#Yukie's version : (\w+.)?\w+(.)?\w+@\w+(\.\w+.)?(\w+)?(\.\w+)?(\.\w+)?(\.\w+)?
#TODO: get the text off the clipboard
text=pyperclip.paste()
#TODO: Extract the email/phone from the text
extractedPhone=PhoneRegex.findall(text)
#TODO: copy the extract email/phone to the clipboard
extractedEmail=EmailRegex.findall(text)
allPhoneNumbers=[]
for phonenumber in extractedPhone:
allPhoneNumbers.append(phonenumber[0])
print(allPhoneNumbers)
#print(extractedPhone)
#'\n'.join(allPhoneNumbers)
#'\n'.join(extractedEmail)
results='\n'.join(allPhoneNumbers)+'\n'+'\n'.join(extractedEmail)
pyperclip.copy(results)
| true |
6b5e4c13b7023bc1838325609b2fd685f7f6263f | Python | chen12356/Linux-mysql-orm-tornado | /9-Flask知识点/简单小项目/员工管理/EmpManager/App/views.py | UTF-8 | 1,707 | 2.5625 | 3 | [] | no_license |
from flask import Blueprint, render_template, request, redirect, url_for
from App.models import User, db
blue = Blueprint('blue',__name__)
@blue.route('/index/')
def index():
print('1234')
return 'index'
@blue.route('/userList/')
def userList():
users = User.query.all()
return render_template('userList.html',users=users)
@blue.route('/userAdd/',methods=['get','post'])
def userAdd():
if request.method == 'GET':
return render_template('userAdd.html')
elif request.method == 'POST':
name = request.form.get('name')
age = request.form.get('age')
gender = request.form.get('sex')
user = User()
user.name = name
user.age = age
user.gender = gender
db.session.add(user)
db.session.commit()
return redirect(url_for('blue.userList'))
@blue.route('/userDelete/')
def userDelete():
id = request.args.get('id')
user = User.query.get(id)
db.session.delete(user)
db.session.commit()
return redirect(url_for('blue.userList'))
@blue.route('/userUpdate/',methods=['get','post'])
def userUpdate():
if request.method == 'GET':
id = request.args.get('id')
user = User.query.get(id)
return render_template('userUpdate.html',user=user)
if request.method == 'POST':
id = request.form.get('id')
user = User.query.get(id)
name = request.form.get('name')
age = request.form.get('age')
gender = request.form.get('sex')
user.name = name
user.age = age
user.gender = gender
db.session.add(user)
db.session.commit()
return redirect(url_for('blue.userList')) | true |
d2f39ea5f59b0182d36475cb2cefe520d3a239f0 | Python | regardscitoyens/anpy | /tests/test_date_utils.py | UTF-8 | 502 | 2.90625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from anpy.utils import extract_datetime
def test_extract_date():
assert extract_datetime('15 mai 2013 à 14 heures 30') == datetime(2013, 5, 15, 14, 30)
assert extract_datetime('lundi 17 juin 2013') == datetime(2013, 6, 17)
assert extract_datetime('mercredi 11 septembre 2013') == datetime(2013, 9, 11)
assert extract_datetime('24 mars 2015 à 17 heures') == datetime(2015, 3, 24, 17, 0)
| true |
ad63a0ee022bda2db39fc0d993f7bcfd34e20874 | Python | ankitkparashar/python | /HackerRank/queensAttacks.py | UTF-8 | 2,259 | 3.25 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 19 15:23:43 2020
@author: Ankit Parashar
"""
#! queensAttack.py
def queensAttack(n, k, r_q, c_q, obstacles):
attacks = 0
if n == 1:
return 0
else:
if obstacles != []:
obstacles = [(obs[0], obs[1]) for obs in obstacles]
r, c = r_q, c_q
#to the right
while c_q < n:
c_q += 1
if (r_q, c_q) in obstacles:
break
else:
attacks += 1
#to the left
r_q, c_q = r, c
while c_q > 1:
c_q -= 1
if (r_q, c_q) in obstacles:
break
else:
attacks += 1
#downwards
r_q, c_q = r, c
while r_q < n:
r_q += 1
if (r_q, c_q) in obstacles:
break
else:
attacks += 1
#upwards
r_q, c_q = r, c
while r_q > 1:
r_q -= 1
if (r_q, c_q) in obstacles:
break
else:
attacks += 1
#toprightdiagonal
r_q, c_q = r, c
while ((r_q > 1) & (c_q < n)):
r_q -= 1
c_q += 1
if (r_q, c_q) in obstacles:
break
else:
attacks += 1
#belowrightdiagonal
r_q, c_q = r, c
while ((r_q < n) & (c_q < n)):
r_q += 1
c_q += 1
if (r_q, c_q) in obstacles:
break
else:
attacks += 1
#topleftdiagonal
r_q, c_q = r, c
while ((r_q > 1) & (c_q > 1)):
r_q -= 1
c_q -= 1
if (r_q, c_q) in obstacles:
break
else:
attacks += 1
#belowleftdiagonal
r_q, c_q = r, c
while ((r_q < n) & (c_q > 1)):
r_q += 1
c_q -= 1
if (r_q, c_q) in obstacles:
break
else:
attacks += 1
return attacks
#print(queensAttack(1, 0, 1, 1, obstacles=[]))
print(queensAttack(5, 3, 4, 3, obstacles=[[5, 5], [4, 2], [2, 3]]))
#print(queensAttack(8, 1, 4, 4, obstacles=[(3, 5)])) | true |
ff53504f2f98c9a584b510abd9070d5e5832a282 | Python | gurkslask/hamcwebc | /IO_server/models.py | UTF-8 | 1,818 | 2.546875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""Database module, including the SQLAlchemy database object and DB-related utilities."""
from sqlalchemy import Column, Float, String, DateTime, ForeignKey, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.sql import func
# from standard_models import CRUDMixin, SurrogatePK, Model
Base = declarative_base()
class Sensor(Base):
"""Sensor class that connects to limits, alarm and trends."""
__tablename__ = 'sensors'
id = Column(Integer, primary_key=True)
name = Column(String(80), unique=True)
value = Column(Float)
limits = relationship('SensorLimit', backref='sensors')
timedata = relationship('SensorTimeData', backref='sensors')
# trends_id = Column(Integer, ForeignKey('trend.id'))
def __repr__(self):
"""Print data."""
return 'Name: {}, Value: {}'.format(self.name, self.value)
class SensorLimit(Base):
"""Sensor limits that connects to Sensor."""
__tablename__ = 'sensorlimits'
id = Column(Integer, primary_key=True)
name = Column(String, unique=True)
value = Column(Float)
sensor_id = Column(Integer, ForeignKey('sensors.id'))
def __repr__(self):
"""Print data."""
return 'Name: {}, Value: {}, sensor_id: {}'.format(self.name, self.value, self.sensor_id)
class SensorTimeData(Base):
"""Sensor logging with timestamp."""
__tablename__ = 'sensortimedata'
id = Column(Integer, primary_key=True)
data = Column(Float)
time = Column(DateTime(timezone=True), server_default=func.now())
sensor_id = Column(Integer, ForeignKey('sensors.id'))
def __repr__(self):
"""Print data."""
return 'Data: {}, Time: {}, sensor_id: {}'.format(self.data, self.time, self.sensor_id)
| true |
61838bb77cb57844a6ddb2bf046907678c5a5fce | Python | axsjo/Distributed-CyberBullying-Detection | /ChatClient.py | UTF-8 | 4,204 | 2.71875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 1 23:36:31 2017
@author: rohit
"""
from socket import *
import threading
import tkinter
from tkinter import *
from functools import partial
from AES_Security import *
clientSocket = None
clientChatTextDictionary = {}
clientInputTextDictionary = {}
clientSendButtonDictionary = {}
private_key ="CyberBullying"
top = tkinter.Tk()
scrollBar = Scrollbar(top, orient="vertical")
scrollBar.pack(side="right",fill="y")
new_user_frame = Frame(top)
new_user_frame.pack()
user_name_label = Label(new_user_frame, text="Currently Logged in User Name:", relief=RAISED)
user_name_label.pack()
logged_in_user_name = tkinter.StringVar()
logged_in_user_name_label = Label(new_user_frame, textvariable=logged_in_user_name, relief=RAISED)
logged_in_user_name_label.pack()
user_name_text = Text(new_user_frame, height=1)
user_name_text.pack()
def new_chat_window():
user_name = user_name_text.get("1.0","end-1c")
user_name_text.delete("1.0", "end-1c")
print("Opening chat window for user: ",user_name)
new_chat_frame = Frame(top)
new_chat_frame.pack()
user_chat_label = Label(new_chat_frame, text=user_name, relief=RAISED)
user_chat_label.pack()
user_chat_text = Text(new_chat_frame, height=5)
user_chat_text.pack()
clientChatTextDictionary.update({user_name: user_chat_text})
user_input_text = Text(new_chat_frame, height=1)
user_input_text.pack()
clientInputTextDictionary.update({user_name: user_input_text})
def send_msg(toWhom, user_input_text, user_chat_text):
towhom = toWhom
msg = user_input_text.get("1.0", "end-1c")
user_input_text.delete("1.0","end-1c")
concatinatedMsg = towhom + "~~" + msg
#Encrypt data before sending
aes_algo = AESAlgorithm(private_key)
encrypted_msg = aes_algo.encrypt(concatinatedMsg)
clientSocket.send(encrypted_msg)
user_chat_str = user_chat_text.get("1.0", "end-1c")
user_chat_text.delete("1.0", "end-1c")
user_chat_text.insert("end", user_chat_str + "\nYou:" + msg)
user_chat_text.see(tkinter.END)
user_chat_text.update()
#send_button = tkinter.Button(new_chat_frame, text="Send", command=lambda :send_msg(user_name,user_input_text.get("1.0","end-1c")))
send_button = tkinter.Button(new_chat_frame, text="Send", command=lambda: send_msg(user_name, user_input_text,user_chat_text))
send_button.pack()
clientSendButtonDictionary.update({user_name: send_button})
new_chat_button = tkinter.Button(new_user_frame, text="Chat", command=new_chat_window)
new_chat_button.pack()
class ServerListener(threading.Thread):
def __init__(self, clientSocket):
threading.Thread.__init__(self)
self.clientSocket = clientSocket
def run(self):
while True:
encrypted_msg = self.clientSocket.recv(5000)
#Decrypt data before sending
aes_algo = AESAlgorithm(private_key)
response = aes_algo.decrypt(encrypted_msg)
#if response is None:
#time.sleep(10)
# continue
#else:
print("Original msg received on client side:",response)
fromUser = response.split("~~")[0]
responseMsg = response.split("~~")[1]
print("\nFrom :", fromUser)
print("\nMsg :", responseMsg)
user_chat_text = clientChatTextDictionary.get(fromUser)
user_chat_str = user_chat_text.get("1.0", "end-1c")
user_chat_text.delete("1.0", "end-1c")
user_chat_text.insert("end", user_chat_str + "\n" +fromUser+":" +responseMsg)
user_chat_text.see(tkinter.END)
user_chat_text.update()
serverName = "149.162.174.254"
serverPort = 12000
#serverName = sys.argv[1]
#serverPort = int(sys.argv[2])
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((serverName,serverPort))
username = input ("Enter Username:")
logged_in_user_name.set(username)
#TODO add user and password to handle login
clientSocket.send(username.encode())
thread = ServerListener(clientSocket)
thread.start()
top.mainloop()
clientSocket.close()
| true |
dd5968660a447eca77ea6dbd6c444e627ac7f6aa | Python | RickyL-2000/ZJUI-lib | /PHYS211/Calculations_and_Notes/unit12_hw3.py | UTF-8 | 268 | 2.78125 | 3 | [] | no_license | from scipy.optimize import fsolve
m1 = 670
v1 = 8.9
vf = 5.3
def f(m2):
return m1*v1 - (m1 + m2)*vf
m2 = fsolve(f,100)
delta_k = 0.5*(m1+m2)*vf**2 - 0.5*m1*v1**2
v2 = -6.4
vt = (m1*v1 + m2*v2)/(m1+m2)
P1_f = m1*vt
print(m2)
print(delta_k)
print(vt)
print(P1_f) | true |
83b93c75a49c212011e3dddde08d86bd25d89313 | Python | bistenofficial/BottleWebProject_C822_5_KKM | /BottleWebProject_C822_5_KKM/Check_Monte_Karlo_NEGR.py | UTF-8 | 220 | 3.28125 | 3 | [] | no_license | import re
#Метод проверяет правильность ввода телефона
def check_string(str):
pattern=r"^\d*(\.\d+)?$"
number_re=re.compile(pattern)
return bool(number_re.findall(str))
| true |