seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
36961636463 | import os.path
base_path = 'scispacy_pipeline_output/'
#function to parse the scipacy output(the text files produced)
def parse_output(n):
cui_dict = {}
for j in range(0,n):
#select a patient text file path
pat_path = base_path + "patient" +str(j) + ".txt"
if os.path.isfile(pat_path):
with open(pat_path, 'r') as f:
document=f.read().split('\n')
f.close()
#the output from scispacy follow a precise order (lines):
# -concept name
# -concept CUI
# -concept score for the entity linking
# -concept definition
# -concept TUI(s)
# -concept aliases
#We are interested in the concept name and CUI (first and second line), that's the motivation for the counters count_concept_name and count_concept_cui.
#when we find a first or a second line for a concept we keep the lines, otherwise we skip to the next concept output (+8 each counter)
count_concept_name = 0
count_concept_cui = 1
cui_list = []
current_concept_name=document[0]
for i in range(len(document)): #extracting the name of the entity
if i ==count_concept_name:
if len(document[i].split())==1 : #only save the mention with one word
concept_name=document[i]
count_concept_name+=8
if concept_name!=current_concept_name:
current_concept_name=concept_name
cui_list = []
else:
count_concept_name+=8
count_concept_cui+=8
elif i == count_concept_cui: #extracting the cui of the entity
cui=document[i]
count_concept_cui+=8
cui_list.append(cui)
if concept_name in cui_dict.keys():
continue
else:
cui_dict[concept_name] = cui_list
return cui_dict
#parse the output giving a number of patient --> 100000 is used to indicate a large number of patients and to create the path in line 11
#the if in line 13 check if the number is valid (if a patient with the current number exists)
cui_dict=parse_output(100000)
#to create the concept:CUIs dictionary.
#Note that the output is preprocessed in order to eliminate symbols (/ \ ' ^ * = ~) contained in their names
with open("map_concept_CUIs.txt", 'w',encoding='utf-8') as f:
for key,value in cui_dict.items():
if key.find("\\") == -1 and key.find("/") == -1 and key.find("'") == -1 and key.find("=") == -1 and key.find("*") == -1 and key.find("~")==-1 and key.find("^")== -1 : #eliminate the concept with \ or more \ in the name for preprocessing
if len(value)==1:
CUIs = value[0]
f.write(key + ' ' + CUIs + '\n')
elif len(value)==2:
CUIs = value[0] + ' ' + value[1]
f.write(key + ' ' + CUIs + '\n')
elif len(value)>2:
CUIs = value[0] + ' ' + value[1] + ' ' + value[2]
f.write(key + ' ' + CUIs + '\n')
else:
print(key)
else:
continue
f.close()
| Sep905/pre-trained_wv_with_kb | conceptExtraction_entityLinking/parsing_scispacy_output.py | parsing_scispacy_output.py | py | 3,761 | python | en | code | 0 | github-code | 36 |
29673546988 | import boto3
import gzip
import json
import os
from math import ceil
from pymongo import MongoClient
def load_file(filepath):
documents = {}
lines = open(filepath, 'r').read().splitlines()
for line in lines:
columns = line.split('\t')
documents[columns[0]] = columns[1]
return documents
def get_document_dict(remote_bucket, remote_filename):
with open('/tmp/source.gz', 'wb') as dest:
gcp_client.download_fileobj(remote_bucket, remote_filename, dest)
with gzip.open('/tmp/source.gz', 'rb') as gzfile:
byte_contents = gzfile.read()
with open('/tmp/source.tsv', 'wb') as tsvfile:
count = tsvfile.write(byte_contents)
return load_file('/tmp/source.tsv')
def check_existence(document_dict):
id_list = ['PMID:' + document_id for document_id in document_dict.keys()]
print(id_list[:10])
print(len(id_list))
found_ids = []
subs = ceil(len(id_list) / 10000)
for i in range(subs):
start = i * 10000
end = min(start + 10000, len(id_list))
sublist = [doc['document_id'] for doc in collection.find({'document_id': {'$in': id_list[start:end]}})]
found_ids.extend(sublist)
print(f'{len(sublist)} | {len(found_ids)}')
unfound_ids = set(id_list) - set(found_ids)
print(len(unfound_ids))
missing_dict = {}
for unfound_id in unfound_ids:
document_id = unfound_id.replace('PMID:', '')
if document_id not in document_dict:
print('not sure what to do with this ID: ' + document_id)
continue
filename = document_dict[document_id]
if filename not in missing_dict:
missing_dict[filename] = []
missing_dict[filename].append(document_id)
return missing_dict
def check_nonexistence(document_dict):
id_list = ['PMID:' + document_id for document_id in document_dict.keys()]
print(id_list[:10])
print(len(id_list))
found_ids = []
subs = ceil(len(id_list) / 10000)
for i in range(subs):
start = i * 10000
end = min(start + 10000, len(id_list))
sublist = [doc['document_id'] for doc in collection.find({'document_id': {'$in': id_list[start:end]}})]
found_ids.extend(sublist)
print(f'{len(sublist)} | {len(found_ids)}')
print(len(found_ids))
found_dict = {}
for found_id in found_ids:
document_id = found_id.replace('PMID:', '')
if document_id not in document_dict:
print('not sure what to do with this ID:' + document_id)
continue
filename = document_dict[document_id]
if filename not in found_dict:
found_dict[filename] = []
found_dict[filename].append(document_id)
return found_dict
def lambda_handler(event, context):
if 'body' in event:
body = json.loads(event['body'])
else:
body = event
if os.environ and 'connection_string' in os.environ:
client = MongoClient(os.environ['connection_string'])
else:
return 'Could not get database connection information', 500
if 'source' not in body:
return 'No source information provided', 400
source_info = body['source']
global gcp_client
global collection
gcp_client = boto3.client(
's3',
region_name='auto',
endpoint_url='https://storage.googleapis.com',
aws_access_key_id=source_info['hmac_key_id'],
aws_secret_access_key=source_info['hmac_secret']
)
db = client['test']
collection = db['documentMetadata']
main_dict = get_document_dict(source_info['bucket'], source_info['filepath'])
if 'deleted' in source_info['filepath']:
return check_nonexistence(main_dict)
return check_existence(main_dict)
| edgargaticaCU/DocumentMetadataAPI | data_checker.py | data_checker.py | py | 3,767 | python | en | code | 0 | github-code | 36 |
41826934545 | # import sqlite library
import sqlite3
# create a database and make a connection.
conn = sqlite3.connect("first.db")
cursor = conn.cursor()
sql = """UPDATE programs
SET program_level = 'Master''s'
WHERE program_name IN ('Anthropology', 'Biology')"""
cursor.execute(sql)
sql = """INSERT INTO students(student, id_program) VALUES
('Josefina', 3),
('Cecilia', 2),
('Nico', 2),
('Sarah', 1)
"""
cursor.execute(sql)
conn.commit()
| Ngue-Um/DHRI-June2018-Courses-databases | scripts/challenge.py | challenge.py | py | 430 | python | en | code | 0 | github-code | 36 |
70064817065 | import jsonschema
from API.validation import error_format
class Validate_AddChannel():
def __init__(self, data):
self.data = data
self.schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"type": {"type": "string"},
},
"required": ["name", "type"]
}
def validate_data(self):
try:
jsonschema.validate(instance=self.data, schema=self.schema)
return True
except jsonschema.exceptions.ValidationError as error:
# If the error is from jsonSchema there's been a validation error so we can give a good error output
return error_format.FormatValidationError(error).schema_validation_error()
except Exception as e:
# Otherwise, something else has happened, and we need to figure out what...
print(e)
return error_format.UnknownError(str(e)).unknown_errorunknown_error()
| OStillman/ODACShows | API/validation/add_channel_validation.py | add_channel_validation.py | py | 1,067 | python | en | code | 0 | github-code | 36 |
22453632625 | import urllib
from bs4 import BeautifulSoup
import re
def fetch(url):
return urllib.urlopen(url).read()
def extractss(data):
regex = '\"storyboard\_spec\"\:.+'
code = re.findall(regex,data)[0].split(',')[0]
highestres = code.split('|')[-1]
images = int(highestres.split('#')[2]) + 1
imagespershot = int(highestres.split('#')[3])
size = (int(highestres.split('#')[0]),int(highestres.split('#')[1]))
return code.split('#')[-1].replace('"',''), images, imagespershot**2, size
def createssurl(sigh,videoid,images,imagespershot):
url = 'https://i.ytimg.com/sb/%s/storyboard3_L2/M%d.jpg?sigh=%s'
urls = []
for i in range(images/imagespershot + 1):
ssurls = url % ( videoid, i, sigh )
urls.append(ssurls)
return urls
def processvideourl(vurl):
videoid = vurl.split('=')[1]
sigh, images, imagespershot, size = extractss(fetch(vurl))
return createssurl(sigh,videoid,images,imagespershot)
vurl = 'https://www.youtube.com/watch?v=DU5mHVR6IJ4'
#print processvideourl(vurl)
| karthiknrao/videocontext | videositecrawlers.py | videositecrawlers.py | py | 1,042 | python | en | code | 0 | github-code | 36 |
71038363623 | import collections
import numpy as np
from math import pi
def log_gaussian_prob(obs, mu, sig):
num = (obs - mu) ** 2
denum = 2 * sig ** 2
# norm = 1 / sqrt(2 * pi * sig ** 2)
# prob = norm * exp(-num/denum)
log_prob = (-num / denum) + 0.5 * (np.log(2) + np.log(pi) + 2 * np.log(sig))
return log_prob
class GNB(object):
def __init__(self):
self.possible_labels = ['left', 'keep', 'right']
self.is_trained = False
self._log_prior_by_label = collections.defaultdict(float)
# order in the list is [s, d, s_dot, d_dot]
self._label_to_feature_means = {key: [] for key in self.possible_labels}
self._label_to_feature_stds = {key: [] for key in self.possible_labels}
def _get_label_counts(self, labels):
label_to_counts = collections.defaultdict(int)
for l in labels:
label_to_counts[l] += 1
return label_to_counts
def _group_data_by_label(self, data, labels):
label_to_data = dict()
for label in self.possible_labels:
label_to_data[label] = []
for label, data_point in zip(labels, data):
label_to_data[label].append(data_point)
return label_to_data
def train(self, data, labels):
"""
Trains the classifier with N data points and labels.
INPUTS
data - array of N observations
- Each observation is a tuple with 4 values: s, d,
s_dot and d_dot.
- Example : [
[3.5, 0.1, 5.9, -0.02],
[8.0, -0.3, 3.0, 2.2],
...
]
labels - array of N labels
- Each label is one of "left", "keep", or "right".
"""
# prior: p(label = left_or_kepp_or_right)
# likelihood: p(feature1, feature2, ..., feature_n | label)
N = len(labels)
label_to_counts = self._get_label_counts(labels)
for key in self.possible_labels:
self._log_prior_by_label[key] = np.log(label_to_counts[key]) - np.log(N)
label_to_data = self._group_data_by_label(data, labels)
for label, data in label_to_data.items():
data = np.array(data)
means = np.mean(data, axis=0)
stds = np.std(data, axis=0)
self._label_to_feature_means[label] = means
self._label_to_feature_stds[label] = stds
self.is_trained = True
def predict(self, observation):
"""
Once trained, this method is called and expected to return
a predicted behavior for the given observation.
INPUTS
observation - a 4 tuple with s, d, s_dot, d_dot.
- Example: [3.5, 0.1, 8.5, -0.2]
OUTPUT
A label representing the best guess of the classifier. Can
be one of "left", "keep" or "right".
"""
if not self.is_trained:
print("Classifier has not been trained! ")
print("Please train it before predicting!")
return
MAP_estimates = dict()
for label in self.possible_labels:
# use log convert product to sum
log_product = self._log_prior_by_label[label]
for i, feature_val in enumerate(observation):
log_product += log_gaussian_prob(feature_val,
self._label_to_feature_means[label][i],
self._label_to_feature_stds[label][i])
MAP_estimates[label] = log_product
# MAP_estimates contains likelihood*prior that is not normalized
# in other words, it is not divided by the total probability
# P(s=observation[0], d=observation[1], s_dot=observation[2], d_dot=observation[3])
# because it is the same for all four labels, we only need to compare them to decide
# which label to take. so comparing numerators suffices to do the job
prediction = 'None'
max_prob = 0
for label, prob in MAP_estimates.items():
if prob > max_prob:
prediction = label
max_prob = prob
return prediction
| Xiaohong-Deng/algorithms | AIML/gaussianNaiveBayes/classifier.py | classifier.py | py | 3,774 | python | en | code | 0 | github-code | 36 |
8451867993 | from collections import deque
from collections import namedtuple
PairedTasks = namedtuple('PairedTasks', ('task_1', 'task_2'))
def compute_task_assignment(task_durations: list):
durations = deque(sorted(task_durations))
total_time = 0
while durations:
total_time = max(total_time, durations.popleft() + durations.pop())
# return total_time
task_durations.sort()
return [
PairedTasks(task_durations[i], task_durations[~i]) for i in range(len(task_durations)//2)
]
if __name__ == '__main__':
print(compute_task_assignment([3, 8, 1, 4,])) | kashyapa/coding-problems | epi/revise-daily/6_greedy_algorithms/1_compute_optimum_task_assignment.py | 1_compute_optimum_task_assignment.py | py | 594 | python | en | code | 0 | github-code | 36 |
73290357865 | import json
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
# 读取JSON文件
with open('./47_data.json') as f:
data = json.load(f)
# 将数据转换为NumPy数组
images = np.array(data)
# 创建一个2x4的子图,用于显示8张图片
fig, axs = plt.subplots(2, 4)
# 迭代显示每张图片
for i, ax in enumerate(axs.flatten()):
# 从NumPy数组中取出一张图片
image = images[i]
# 将1D数组重塑为28x28的2D数组
image = image.reshape(28, 28)
print(type(image))
# 显示图片
ax.imshow(image, cmap='gray')
ax.axis('off')
# 调整子图间距
plt.tight_layout()
# 显示图片
plt.show()
| LazySheeeeep/Trustworthy_AI-Assignments | 1/testing_images_showcase.py | testing_images_showcase.py | py | 696 | python | zh | code | 0 | github-code | 36 |
4193791513 | import pandas as pd
import numpy as np
# # series--->dataframe
# d = {'one': pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
# 'two': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
# df = pd.DataFrame(d)
# print(df)
# print(df.index)
# print(df.columns)
# # index:行标签 columns:列标签
# # dict ---> dataframe
# d2 = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20}]
# print(pd.DataFrame(d2))
#
# # list ---> dataframe
# d3 = {'one': [1., 2., 3., 4.],
# 'two': [4., 3., 2., 1.]}
# print(pd.DataFrame(d3))
#
# # tuple ---> 多层索引dataframe
# d4 = {
# ('a', 'b'): {('A', 'B'): 1, ('A', 'C'): 2},
# ('a', 'a'): {('A', 'C'): 3, ('A', 'B'): 4},
# ('a', 'c'): {('A', 'B'): 5, ('A', 'C'): 6},
# ('b', 'a'): {('A', 'C'): 7, ('A', 'B'): 8},
# ('b', 'b'): {('A', 'D'): 9, ('A', 'B'): 10}
# }
# print(pd.DataFrame(d4))
area = pd.Series({
'wuhan': 100,
'changsha': 220,
'shanghai': 330,
'beijing': 900,
'guangzhou': 110
})
pop = pd.Series({
'wuhan': 38332521,
'changsha': 26448193,
'shanghai': 19651127,
'beijing': 19552860,
'guangzhou': 12882135
})
data = pd.DataFrame({
'area': area,
'pop': pop
})
# print(data)
# 建议使用data['area']
# print(data['area'])
# print(data.area)
data['density'] = data['pop']/data['area']
# print(data.values)
# print(data.values[0])
# print(data.T)
# print(data)
# data.iloc[1, 0] = 250
# print(data)
# print(data.loc[data['area'] > 200, ['area', 'pop']])
# print(data['area'] > 200)
# print(data[data['area'] > 200])
# print(data['pop'])
# print(data['changsha':'beijing'])
# print(data[0:2])
# print(data.iloc[1:3, :2])
# print(data.iloc[0, 2])
# print(data.loc[:'shanghai', :'pop'])
# data = pd.DataFrame(np.random.randint(1, 10, (3, 5)), columns=['a', 'b', 'c', 'd', 'e'])
# print('data:\n', data)
# print('-------------------')
# print(np.power(data, 2))
# x = pd.DataFrame(np.random.randint(1, 6, (2, 3)), columns=list('abc'))
# print('x:\n', x)
# print('-------------------')
# y = pd.DataFrame(np.random.randint(1, 6, (3, 2)), columns=list('ab'))
# print('y:\n', y)
# print('------x+y:含缺失值:Na----------')
# print( x + y)
# print('-------x中所有元素的平均值---------')
# fill = x.stack().mean() # x中所有元素的平均值
# print(fill)
# print('------缺失值填充----------')
# print(x.add(y, fill_value=fill))
# x = pd.DataFrame(np.random.randint(1, 10, (3, 4)), columns=list('abcd'))
# print('x:\n', x)
# print('-------------------')
# gap = x['b']
# print(gap)
# # 每列减列值
# print('-------列运算:axis=0------------')
# print(x.subtract(gap, axis=0))
# # 每列减行值
# print('-------行运算------------')
# delta = x.iloc[0]
# print(delta)
# print('-------------------')
# print(x.subtract(delta))
# x = np.array([1, np.nan, 3, 4])
# print('x:', x)
# print(x.dtype)
# y = np.array([1, 2, 3, 4])
# print(y.dtype)
# x = pd.Series([1, np.nan, 'hello', None])
# print('origional_x:\n', x)
# print('-----series-dropna-------')
# print(x.dropna())
#
# print('***********分割线***************')
#
# y = pd.DataFrame([
# [1, np.nan, 2],
# [2, 3, 5],
# [None, 4, 6]
# ])
# print('origional_y:\n', y)
# print('-------dataframe-dropna-----------')
# # 默认剔除所有含缺失值的行
# print(y.dropna())
# print('-------------------------')
# # 剔除所有含缺失值的列
# print(y.dropna(axis='columns'))
# print('***********分割线***************')
#
# y[3] = [None, None, None]
# print('new_y:\n', y)
# print(y.dropna(axis='columns', how='all'))
# print(y.dropna(axis='columns', thresh=3))
# print('-----------y_T-------------')
# print('y_T:\n', y.T)
# print(y.T.dropna(how='all'))
# print(y.T.dropna(axis='rows', thresh=3))
x = pd.Series([1, np.nan, 2, None, 3], index=list('abcde'))
print('x:\n', x)
print('------fillna with 0------')
print(x.fillna(0))
print('------fillna with forward-fill------')
print(x.fillna(method='ffill'))
print('------fillna with back-fill------')
print(x.fillna(method='bfill'))
print('***********分割线***************')
y = pd.DataFrame([
[1, np.nan, 2, None],
[2, 3, 5, None],
[None, 4, 6, None]
])
print('y:\n', y)
print('------fillna with 0------')
print(y.fillna(0))
print('------fillna with forward-fill based on rows------')
print(y.fillna(method='ffill', axis=1))
print('------fillna with back-fill based on columns------')
print(y.fillna(method='bfill', axis=0))
| Marcia0526/data_analyst | dataframe_demo.py | dataframe_demo.py | py | 4,426 | python | en | code | 0 | github-code | 36 |
30466827827 | class Solution:
def binaryTreePaths(self, root: TreeNode) -> List[str]:
if not root:
return []
path = []
res = []
self.dfs(root, path, res)
return res
def dfs(self, root, path, res):
path.append(str(root.val))
# base case
if not root.left and not root.right:
res.append(''.join(path))
# general case
if root.left:
path.append('->')
self.dfs(root.left, path, res)
path.pop()
if root.right:
path.append('->')
self.dfs(root.right, path, res)
path.pop()
path.pop()
# 16 叉
import random
class MyTreeNode:
def __init__(self, val):
self.val = val
self.chlidren = [None] * 16
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
# traverse
class Solution:
def binaryTreePaths(self, root: TreeNode) -> List[str]:
if not root:
return []
path = []
res = []
new_root = self.copy_tree(root)
self.dfs(new_root, path, res)
return res
def dfs(self, root, path, res):
path.append(str(root.val))
# base case
if not any(root.chlidren) and path:
res.append(''.join(path))
# general case
for child in root.chlidren:
if child:
path.append('->')
self.dfs(child, path, res)
path.pop()
path.pop()
def copy_tree(self, root):
if not root:
return None
my_root = MyTreeNode(root.val)
my_root.chlidren[random.randint(0, 7)] = self.copy_tree(root.left)
my_root.chlidren[random.randint(0, 7) + 8] = self.copy_tree(root.right)
return my_root
class Solution:
def binaryTreePaths(self, root: TreeNode) -> List[str]:
if not root:
return []
paths = []
stack = [(root, str(root.val))]
while stack:
node, path = stack.pop()
if not node.left and not node.right:
paths.append(path)
if node.left:
stack.append((node.left, path + '->' + str(node.left.val)))
if node.right:
stack.append((node.right, path + '->' + str(node.right.val)))
return paths
if __name__ == '__main__':
# root = [10,5,-3,3,2,null,11,3,-2,null,1]
# 10
# / \
# 5 -3
# / \ \
# 3 2 11
# / \ \
#3 -2 1
P = TreeNode(10)
P.left = TreeNode(5)
P.left.left = TreeNode(3)
P.left.left.left = TreeNode(3)
P.left.left.right = TreeNode(-2)
P.left.right = TreeNode(2)
P.left.right.right = TreeNode(1)
P.right = TreeNode(3)
P.right.right = TreeNode(11)
# Q = Node(26)
# Q.left = Node(10)
# Q.left.left = Node(4)
# Q.left.right = Node(6)
# Q.right = Node(3)
# # Q.right.right = Node(3)
s = Solution5()
print(s.binaryTreePaths(P))
| dundunmao/LeetCode2019 | 257. binary tree paths.py | 257. binary tree paths.py | py | 3,061 | python | en | code | 0 | github-code | 36 |
27275697128 | """Very simple example using a pair of Lennard-Jones particles.
This script has several pieces to pay attention to:
- Importing the pieces from wepy to run a WExplore simulation.
- Definition of a distance metric for this system and process.
- Definition of the components used in the simulation: resampler,
boundary conditions, runner.
- Definition of the reporters which will write the data out.
- Create the work mapper for a non-parallel run.
- Construct the simulation manager with all the parts.
- Actually run the simulation.
"""
import sys
from copy import copy
import os
import os.path as osp
import numpy as np
import openmm.app as omma
import openmm as omm
import simtk.unit as unit
from openmm_systems.test_systems import LennardJonesPair
import mdtraj as mdj
from wepy.util.mdtraj import mdtraj_to_json_topology
from wepy.sim_manager import Manager
from wepy.resampling.distances.distance import Distance
from wepy.resampling.resamplers.wexplore import WExploreResampler
from wepy.walker import Walker
from wepy.runners.openmm import OpenMMRunner, OpenMMState
from wepy.runners.openmm import UNIT_NAMES, GET_STATE_KWARG_DEFAULTS
from wepy.work_mapper.mapper import Mapper
from wepy.boundary_conditions.receptor import UnbindingBC
from wepy.reporter.hdf5 import WepyHDF5Reporter
from wepy.reporter.dashboard import DashboardReporter
from wepy.reporter.receptor.dashboard import ReceptorBCDashboardSection
from wepy.reporter.wexplore.dashboard import WExploreDashboardSection
from wepy.reporter.openmm import OpenMMRunnerDashboardSection
from scipy.spatial.distance import euclidean
## PARAMETERS
# Platform used for OpenMM which uses different hardware computation
# kernels. Options are: Reference, CPU, OpenCL, CUDA.
# we use the Reference platform because this is just a test
PLATFORM = 'Reference'
# Langevin Integrator
TEMPERATURE= 300.0*unit.kelvin
FRICTION_COEFFICIENT = 1/unit.picosecond
# step size of time integrations
STEP_SIZE = 0.002*unit.picoseconds
# Resampler parameters
# the maximum weight allowed for a walker
PMAX = 0.5
# the minimum weight allowed for a walker
PMIN = 1e-12
# the maximum number of regions allowed under each parent region
MAX_N_REGIONS = (10, 10, 10, 10)
# the maximum size of regions, new regions will be created if a walker
# is beyond this distance from each voronoi image unless there is an
# already maximal number of regions
MAX_REGION_SIZES = (1, 0.5, .35, .25) # nanometers
# boundary condition parameters
# maximum distance between between any atom of the ligand and any
# other atom of the protein, if the shortest such atom-atom distance
# is larger than this the ligand will be considered unbound and
# restarted in the initial state
CUTOFF_DISTANCE = 1.0 # nm
# reporting parameters
# these are the properties of the states (i.e. from OpenMM) which will
# be saved into the HDF5
SAVE_FIELDS = ('positions', 'box_vectors', 'velocities')
## INPUTS/OUTPUTS
# the inputs directory
inputs_dir = osp.realpath('input')
# the outputs path
outputs_dir = osp.realpath('_output/we')
# make the outputs dir if it doesn't exist
os.makedirs(outputs_dir, exist_ok=True)
# inputs filenames
json_top_filename = "pair.top.json"
# outputs
hdf5_filename = 'results.wepy.h5'
dashboard_filename = 'wepy.dash.org'
# normalize the output paths
hdf5_path = osp.join(outputs_dir, hdf5_filename)
dashboard_path = osp.join(outputs_dir, dashboard_filename)
## System and OpenMMRunner
# make the test system
test_sys = LennardJonesPair()
# make the integrator
integrator = omm.LangevinIntegrator(TEMPERATURE, FRICTION_COEFFICIENT, STEP_SIZE)
# make a context and set the positions
context = omm.Context(test_sys.system, copy(integrator))
context.setPositions(test_sys.positions)
# get the data from this context so we have a state to start the
# simulation with
get_state_kwargs = dict(GET_STATE_KWARG_DEFAULTS)
init_sim_state = context.getState(**get_state_kwargs)
init_state = OpenMMState(init_sim_state)
# initialize the runner
runner = OpenMMRunner(test_sys.system, test_sys.topology, integrator, platform=PLATFORM)
## Distance Metric
# we define a simple distance metric for this system, assuming the
# positions are in a 'positions' field
class PairDistance(Distance):
def __init__(self, metric=euclidean):
self.metric = metric
def image(self, state):
return state['positions']
def image_distance(self, image_a, image_b):
dist_a = self.metric(image_a[0], image_a[1])
dist_b = self.metric(image_b[0], image_b[1])
return np.abs(dist_a - dist_b)
# make a distance object which can be used to compute the distance
# between two walkers, for our scorer class
distance = PairDistance()
## Resampler
resampler = WExploreResampler(distance=distance,
init_state=init_state,
max_region_sizes=MAX_REGION_SIZES,
max_n_regions=MAX_N_REGIONS,
pmin=PMIN, pmax=PMAX)
## Boundary Conditions
# the mdtraj here is needed for the distance function
mdtraj_topology = mdj.Topology.from_openmm(test_sys.topology)
json_str_top = mdtraj_to_json_topology(mdtraj_topology)
# initialize the unbinding boundary conditions
ubc = UnbindingBC(cutoff_distance=CUTOFF_DISTANCE,
initial_state=init_state,
topology=json_str_top,
ligand_idxs=np.array(test_sys.ligand_indices),
receptor_idxs=np.array(test_sys.receptor_indices))
## Reporters
# make a dictionary of units for adding to the HDF5
units = dict(UNIT_NAMES)
# open it in truncate mode first, then switch after first run
hdf5_reporter = WepyHDF5Reporter(file_path=hdf5_path, mode='w',
save_fields=SAVE_FIELDS,
resampler=resampler,
boundary_conditions=ubc,
topology=json_str_top,
units=units)
wexplore_dash = WExploreDashboardSection(resampler=resampler)
openmm_dash = OpenMMRunnerDashboardSection(runner=runner,
step_time=STEP_SIZE)
ubc_dash = ReceptorBCDashboardSection(bc=ubc)
dashboard_reporter = DashboardReporter(
file_path=dashboard_path,
mode='w',
resampler_dash=wexplore_dash,
runner_dash=openmm_dash,
bc_dash=ubc_dash,
)
reporters = [hdf5_reporter, dashboard_reporter]
## Work Mapper
# a simple work mapper
mapper = Mapper()
## Run the simulation
if __name__ == "__main__":
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
print("arguments: n_cycles, n_steps, n_walkers")
else:
n_cycles = int(sys.argv[1])
n_steps = int(sys.argv[2])
n_walkers = int(sys.argv[3])
print("Number of steps: {}".format(n_steps))
print("Number of cycles: {}".format(n_cycles))
# create the initial walkers
init_weight = 1.0 / n_walkers
init_walkers = [Walker(OpenMMState(init_sim_state), init_weight) for i in range(n_walkers)]
# initialize the simulation manager
sim_manager = Manager(init_walkers,
runner=runner,
resampler=resampler,
boundary_conditions=ubc,
work_mapper=mapper,
reporters=reporters)
# make a number of steps for each cycle. In principle it could be
# different each cycle
steps = [n_steps for i in range(n_cycles)]
# actually run the simulation
print("Starting run")
sim_manager.run_simulation(n_cycles, steps)
print("Finished run")
| ADicksonLab/wepy | info/examples/Lennard_Jones_Pair/source/we.py | we.py | py | 7,753 | python | en | code | 44 | github-code | 36 |
21130491503 | import psycopg2
from config import host, user, password, db_name
try:
# подключение к существующей БД
connection = psycopg2.connect(
host=host,
user=user,
password=password,
database=db_name
)
connection.autocommit = True
# курсор для предоставления операций над БД
# cursor = connection.cursor()
# проверка версии БД
with connection.cursor() as cursor:
cursor.execute(
'select version();'
)
print(f'Server version: {cursor.fetchone()}')
# создание таблицы size
with connection.cursor() as cursor:
cursor.execute(
'''CREATE TABLE IF NOT EXISTS size(
id serial PRIMARY KEY,
name varchar(50));
'''
)
print(f'Таблица size успешно создана!')
with connection.cursor() as cursor:
cursor.execute(
'''CREATE TABLE IF NOT EXISTS category(
id serial PRIMARY KEY,
name varchar(50));
'''
)
print(f'Таблица category успешно создана!')
with connection.cursor() as cursor:
cursor.execute(
'''CREATE TABLE IF NOT EXISTS category(
id serial PRIMARY KEY,
name varchar(50));
'''
)
print(f'Таблица category успешно создана!')
with connection.cursor() as cursor:
cursor.execute(
'''CREATE TABLE IF NOT EXISTS discount(
id serial PRIMARY KEY,
value integer);
'''
)
print(f'Таблица discount успешно создана!')
with connection.cursor() as cursor:
cursor.execute(
'''CREATE TABLE IF NOT EXISTS goods(
id serial PRIMARY KEY,
name varchar(50),
description text,
calory integer,
protein integer ,
fat integer,
weight integer ,
price integer ,
category_id integer references category(id),
discount_id integer references discount(id));
'''
)
print(f'Таблица goods успешно создана!')
with connection.cursor() as cursor:
cursor.execute(
'''CREATE TABLE IF NOT EXISTS goods_size(
id serial PRIMARY KEY,
goods_id integer references goods(id),
size_id integer references size(id));
'''
)
print(f'Таблица discount успешно создана!')
# # вставка данных в таблицу
# with connection.cursor() as cursor:
# cursor.execute(
# '''INSERT INTO users(firs_name, nick_name)
# VALUES ('Igor', 'proger3000');
# '''
# )
# print(f'Данные успешно добавлены!')
#
# # выборка данных из таблицы
# with connection.cursor() as cursor:
# cursor.execute(
# '''SELECT nick_name FROM users WHERE firs_name = 'Igor';
# '''
# )
# print(cursor.fetchone())
#
# with connection.cursor() as cursor:
# cursor.execute(
# '''DROP TABLE users;
# '''
# )
# print('Таблица удалена!')
except Exception as e:
print('Ошибка в процессе выполнения PostgresQL', e)
finally:
if connection:
cursor.close()
connection.close()
print('[INFO] PostgreSQL соединение закрыто!') | Tosic48/pizzeria | main2.py | main2.py | py | 3,728 | python | ru | code | 0 | github-code | 36 |
9194727466 | import unittest
import torch
import lightly
class TestNestedImports(unittest.TestCase):
def test_nested_imports(self):
# active learning
lightly.active_learning.agents.agent.ActiveLearningAgent
lightly.active_learning.agents.ActiveLearningAgent
lightly.active_learning.config.sampler_config.SamplerConfig
lightly.active_learning.config.SamplerConfig
lightly.active_learning.scorers.classification.ScorerClassification
lightly.active_learning.scorers.ScorerClassification
lightly.active_learning.scorers.detection.ScorerObjectDetection
lightly.active_learning.scorers.ScorerObjectDetection
lightly.active_learning.utils.bounding_box.BoundingBox
lightly.active_learning.utils.BoundingBox
lightly.active_learning.utils.object_detection_output.ObjectDetectionOutput
lightly.active_learning.utils.ObjectDetectionOutput
# api imports
lightly.api.api_workflow_client.ApiWorkflowClient
lightly.api.ApiWorkflowClient
lightly.api.bitmask.BitMask
# data imports
lightly.data.LightlyDataset
lightly.data.dataset.LightlyDataset
lightly.data.BaseCollateFunction
lightly.data.collate.BaseCollateFunction
lightly.data.ImageCollateFunction
lightly.data.collate.ImageCollateFunction
lightly.data.MoCoCollateFunction
lightly.data.collate.MoCoCollateFunction
lightly.data.SimCLRCollateFunction
lightly.data.collate.SimCLRCollateFunction
lightly.data.imagenet_normalize
lightly.data.collate.imagenet_normalize
# embedding imports
lightly.embedding.BaseEmbedding
lightly.embedding._base.BaseEmbedding
lightly.embedding.SelfSupervisedEmbedding
lightly.embedding.embedding.SelfSupervisedEmbedding
# loss imports
lightly.loss.NTXentLoss
lightly.loss.ntx_ent_loss.NTXentLoss
lightly.loss.SymNegCosineSimilarityLoss
lightly.loss.sym_neg_cos_sim_loss.SymNegCosineSimilarityLoss
lightly.loss.memory_bank.MemoryBankModule
lightly.loss.regularizer.CO2Regularizer
lightly.loss.regularizer.co2.CO2Regularizer
# models imports
lightly.models.ResNetGenerator
lightly.models.resnet.ResNetGenerator
lightly.models.SimCLR
lightly.models.simclr.SimCLR
lightly.models.MoCo
lightly.models.moco.MoCo
lightly.models.SimSiam
lightly.models.simsiam.SimSiam
lightly.models.ZOO
lightly.models.zoo.ZOO
lightly.models.checkpoints
lightly.models.zoo.checkpoints
lightly.models.batchnorm.get_norm_layer
# transforms imports
lightly.transforms.GaussianBlur
lightly.transforms.gaussian_blur.GaussianBlur
lightly.transforms.RandomRotate
lightly.transforms.rotation.RandomRotate
# utils imports
lightly.utils.save_embeddings
lightly.utils.io.save_embeddings
lightly.utils.load_embeddings
lightly.utils.io.load_embeddings
lightly.utils.load_embeddings_as_dict
lightly.utils.io.load_embeddings_as_dict
lightly.utils.fit_pca
lightly.utils.embeddings_2d.fit_pca
# core imports
lightly.train_model_and_embed_images
lightly.core.train_model_and_embed_images
lightly.train_embedding_model
lightly.core.train_embedding_model
lightly.embed_images
lightly.core.embed_images | tibe97/thesis-self-supervised-learning | tests/imports/test_nested_imports.py | test_nested_imports.py | py | 3,548 | python | en | code | 2 | github-code | 36 |
3955175218 | # -*- coding: utf-8 -*-
import argparse
import os, sys
import codecs
from collections import Counter
import json
import numpy as np
import torch
import copy
install_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
print(install_path)
sys.path.append(install_path)
import xslu.Constants as Constants
from xslu.utils import process_sent, process_word
root_dir = '../../../dstc3/'
def get_lack_acts(train_file, test_file):
with codecs.open(train_file, 'r') as f:
lines = f.readlines()
labels = [line.split('\t<=>\t')[1].strip() for line in lines]
train_labels = []
for label in labels:
lis = label.strip().split(';')
if len(lis) > 0:
train_labels.extend(lis)
train_labels = ['-'.join(label.strip().split('-')[0:2]) for label in train_labels]
with codecs.open(test_file, 'r') as f:
lines = f.readlines()
labels = [line.split('\t<=>\t')[1].strip() for line in lines]
test_labels = []
for label in labels:
lis = label.strip().split(';')
if len(lis) > 0:
test_labels.extend(lis)
test_labels = ['-'.join(label.strip().split('-')[0:2]) for label in test_labels]
labels = set(test_labels) - set(train_labels)
labels = list(labels)
print(labels)
labels = []
for label in train_labels:
labels.extend(label.split('-'))
train_labels = labels
labels = []
for label in test_labels:
labels.extend(label.split('-'))
test_labels = labels
labels = set(test_labels) - set(train_labels)
labels = list(labels)
print(labels)
if __name__ == '__main__':
get_lack_acts(root_dir+'manual/dstc2_seed_1.train', root_dir+'manual/test')
#get_lack_acts(root_dir+'manual/test', root_dir+'manual/dstc2_seed_0.train')
| ZiJianZhao/Unaligned-SLU | dstc3/text/stat.py | stat.py | py | 1,886 | python | en | code | 1 | github-code | 36 |
74328503143 | import logging
import pandas as pd
from openfisca_ceq.tools.data import config_parser, year_by_country
from openfisca_ceq.tools.data_ceq_correspondence import (
ceq_input_by_harmonized_variable,
ceq_intermediate_by_harmonized_variable,
data_by_model_weight_variable,
model_by_data_id_variable,
model_by_data_role_index_variable,
non_ceq_input_by_harmonized_variable,
variables_by_entity,
)
log = logging.getLogger(__name__)
missing_revenus_by_country = {
'cote_d_ivoire': [
# 'rev_i_independants',
],
'mali': [
'cov_i_type_ecole',
# 'rev_i_independants_taxe',
# 'rev_i_independants_Ntaxe',
'rev_i_locatifs',
'rev_i_autres_revenus_capital',
'rev_i_pensions',
'rev_i_transferts_publics',
],
'senegal': [
# 'rev_i_independants',
],
}
def build_income_dataframes(country):
year = year_by_country[country]
income_data_path = config_parser.get(country, 'revenus_harmonises_{}'.format(year))
model_variable_by_person_variable = dict()
variables = [
ceq_input_by_harmonized_variable,
ceq_intermediate_by_harmonized_variable,
model_by_data_id_variable,
non_ceq_input_by_harmonized_variable,
]
for item in variables:
model_variable_by_person_variable.update(item)
income = pd.read_stata(income_data_path)
for variable in income.columns:
if variable.startswith("rev"):
assert income[variable].notnull().any(), "{} income variable for {} is all null".format(
variable, country)
assert (
set(model_variable_by_person_variable.keys()).difference(
set(missing_revenus_by_country.get(country, []))
)
<= set(income.columns)
), \
"Missing {} in {} income data source".format(
set(model_variable_by_person_variable.keys()).difference(
set(missing_revenus_by_country.get(country, []))
).difference(set(income.columns)),
country,
)
data_by_model_id_variable = {v: k for k, v in model_by_data_id_variable.items()}
data_by_model_role_index_variable = {v: k for k, v in model_by_data_role_index_variable.items()}
dataframe_by_entity = dict()
for entity, variables in variables_by_entity.items():
data_entity_id = data_by_model_id_variable["{}_id".format(entity)]
data_entity_weight = data_by_model_weight_variable["person_weight"]
filtered_variables = list(
set(variables).difference(
set(missing_revenus_by_country.get(country, [])))
)
data_group_entity_ids = list()
data_group_entity_role_index = list()
if entity == 'person':
for group_entity in variables_by_entity.keys():
if group_entity == 'person':
continue
data_group_entity_ids += [data_by_model_id_variable["{}_id".format(group_entity)]]
data_group_entity_role_index += [data_by_model_role_index_variable["{}_role_index".format(group_entity)]]
dataframe = income[
filtered_variables
+ [
data_entity_id,
data_entity_weight,
]
+ data_group_entity_ids
+ data_group_entity_role_index
].copy()
if entity != 'person':
person_weight_variable = data_by_model_weight_variable["person_weight"]
group_id_variable = data_by_model_id_variable["{}_id".format(group_entity)]
household_weight = dataframe.groupby(group_id_variable)[person_weight_variable].mean()
weight_by_group_ok = dataframe.groupby(group_id_variable)[person_weight_variable].nunique() == 1
problematic_group_id = weight_by_group_ok.reset_index().query(
"~{}".format(person_weight_variable)
)[group_id_variable].tolist()
assert weight_by_group_ok.all(), "Problematic weights:\n{}".format(
dataframe.loc[dataframe[group_id_variable].isin(problematic_group_id)]
)
dataframe = dataframe.groupby(data_by_model_id_variable["{}_id".format(group_entity)]).sum()
del dataframe[data_by_model_weight_variable["person_weight"]]
dataframe['household_weight'] = household_weight.values
dataframe = dataframe.reset_index()
dataframe_by_entity[entity] = dataframe
log.info("For country {}: {} persons and {} households".format(
country, len(dataframe_by_entity["person"]), len(dataframe_by_entity["household"])
))
assert len(dataframe_by_entity["person"]) == dataframe_by_entity["person"].pers_id.nunique()
assert len(dataframe_by_entity["household"]) == dataframe_by_entity["person"].hh_id.nunique()
return dataframe_by_entity["person"], dataframe_by_entity["household"]
if __name__ == "__main__":
# for country in year_by_country.keys():
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
country = "senegal"
person_dataframe, household_dataframe = build_income_dataframes(country)
| openfisca/openfisca-ceq | openfisca_ceq/tools/data/income_loader.py | income_loader.py | py | 5,243 | python | en | code | 0 | github-code | 36 |
41896592263 |
import random
words = ["skykreeper", "winterland", "starwars"]
chosen_words = random.choice(words)
stages=['''
<====>
| |
O |
/|\ |
/ \ |
|
============
''', '''
<====>
| |
O |
/|\ |
/ |
|
============
''', '''
<====>
| |
O |
/|\ |
|
|
============
''', '''
<====>
| |
O |
/ \ |
|
|
============
''', '''
<====>
| |
O |
\ |
|
|
============
''', '''
<====>
| |
O |
|
|
|
============
''', '''
<====>
| |
|
|
|
|
============
''']
# print(chosen_words,"\n")
lives=6
list1=[]
word_length=len(chosen_words)
for _ in range(word_length):
list1 +="_"
game_end=False
while not game_end:
guess = input("Guess a letter..\n").lower()
for position in range(word_length):
letter= chosen_words[position]
if letter==guess:
list1[position]=letter
if guess not in chosen_words:
lives -= 1
if lives==0:
game_end=True
print("You Lose!!")
print(f"{' '.join(list1)}")
print(f"Remaining Lives : {lives}")
if not "_" in list1:
game_end=True
print("You Win!!")
print(stages[lives])
| Kumar6174/Hangman-Game-Using-Python | Hangman_Game.py | Hangman_Game.py | py | 1,524 | python | en | code | 0 | github-code | 36 |
2861916089 | '''
В файле config.py располагаются
изменяемые значения для
необходимой настройки скрипта
'''
# Для GUI-приложения
# инфо о таблице, "БД" и шаблонах
path_db = 'created_text_docs'
path_db_bta = 'Созданные документы БТА'
path_db_main_KS = f'{path_db}/Круглосуточный стационар'
path_db_main_DS = f'{path_db}/Дневной стационар'
path_db_main_BTA_KS = f'{path_db_bta}/Круглосуточный стационар'
path_db_main_BTA_DS = f'{path_db_bta}/Дневной стационар'
# путь к шаблонам
path_templates = 'templates'
path_templates_bta = 'templates/BT-A'
# путь к папке со списками пациентов
path_lists = 'Списки пациентов'
# значения для диапазона артериального давления (шаг 5)
blood_presure_sist_min = 110
blood_presure_sist_max = 135
blood_presure_diast_min = 70
blood_presure_diast_max = 90
# значения для диапазона ЧСС (шаг 2)
heart_rate_min = 60
heart_rate_max = 80
# значения для диапазона ЧДД (шаг 1)
breath_rate_min = 16
breath_rate_max = 19
# значения для диапазона температуры (шаг 1)
# в генераторе '36,' + str(value)
# т.е. по умолчанию от 36,3 до 36,8
body_temperature_min = 3
body_temperature_max = 8
# значения для диапазона сатурации (шаг 1)
saturation_min = 98
saturation_max = 99
# коэфициент для определения типа дневника на таймлайне
# т.е. после какого процента пройденного тайлайна,
# меняем дневники на следующую генерацию
frontier = 40 # от 0 до 100
# коэфициент на таймлайне для определения ВАШ в дневнике наблюдение
frontier_VAS_1 = 25 # от 0 до 100
frontier_VAS_2 = 70 # от 0 до 100
# словарь для создания полного имени врача
doc_dict = {
'Шилов И.С.': ('Шилов Илья Сергеевич', '3482'),
'Тыричев С.В.': ('Тыричев Сергей Васильевич', '1429'),
'Шадрин А.А.': ('Шадрин Александр Андреевич', '3397'),
'Тимофеев А.П.': ('Тимофеев Андрей Петрович', '3643'),
'Селезнёва С.И.': ('Селезнёва Светлана Игоревна', ''),
'Александрова В.В.': ('Александрова Вероника Васильевна', ''),
}
# списки Членов МДРК
mdrk_members = {
'lfk': ['Девяткова И.В.'],
'psy': ['Мунирова Р.Р.',
'Ивонина Е.А.',
'Субботина О.И.'],
'logo': ['Мельчакова В.М.',
'Кичигина А.А.'],
'physio': ['Мерзлякова Т.Б.']
}
| Spike2250/WoM | wom/settings/config.py | config.py | py | 3,136 | python | ru | code | 0 | github-code | 36 |
35609459703 | import streamlit as st
import cv2
import time
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from streamlit_lottie import st_lottie
# Initialize the parameters
confThreshold = 0.2 #Confidence threshold
nmsThreshold = 0.4 #Non-maximum suppression threshold
inpWidth = 416 #Width of network's input image
inpHeight = 416 #Height of network's input image
def obj_detection(my_img):
st.set_option('deprecation.showPyplotGlobalUse', False)
column1, column2 = st.beta_columns(2)
column1.subheader("Input image")
st.text("")
# plt.figure(figsize=(16, 16))
# plt.imshow(my_img)
# original = Image.open(image)
#col1.header("Original")
if my_img.mode != 'RGB':
my_img = my_img.convert('RGB')
column1.image(my_img, use_column_width=True)
# column1.pyplot(use_column_width=True)
# YOLO model : # load the YOLO network
# net = cv2.dnn.readNet("yolov3_training_last.weights","yolov3_testing.cfg")
# net = cv2.dnn.readNetFromDarknet("yolov4-custom.cfg","yolov4-custom_best.weights" )
net = cv2.dnn.readNet('yolov4-custom_best.weights', 'yolov4-custom.cfg')
# labels = []
# with open("classes.txt", "r") as f:
# labels = [line.strip() for line in f.readlines()]
# loading all the class labels (objects)
classes = []
with open("classes.txt", "r") as f:
classes = f.read().splitlines()
# names_of_layer = net.getLayerNames()
# output_layers = [names_of_layer[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# generating colors for each object for later plotting
font = cv2.FONT_HERSHEY_PLAIN
colors = np.random.uniform(0, 255, size=(100, 3))
# colors = np.random.uniform(0, 255, size=(len(classes), 3))
print("Colors:", colors)
# Image loading
newImage = np.array(my_img.convert('RGB'))
img = cv2.cvtColor(newImage, 1)
height, width, channels = img.shape
# Objects detection (Converting into blobs)
# (image, scalefactor, size, mean(mean subtraction from each layer), swapRB(Blue to red), crop)
# blob = cv2.dnn.blobFromImage(img, 0.00392, (inpWidth, inpHeight), (0, 0, 0), True,
# crop=False)
blob = cv2.dnn.blobFromImage(img, 1/255, (416, 416), (0,0,0), swapRB=True, crop=False)
# sets the blob as the input of the network
net.setInput(blob)
# outputs = net.forward(output_layers)
output_layers_names = net.getUnconnectedOutLayersNames()
# layerOutputs = net.forward(output_layers_names)
# get all the layer names
# ln = net.getLayerNames()
# ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# names_of_layer = net.getLayerNames()
# output_layers = [names_of_layer[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# feed forward (inference) and get the network output
# measure how much it took in seconds
# start = time.perf_counter()
# outputs = net.forward(output_layers)
outputs = net.forward(output_layers_names)
# time_took = time.perf_counter() - start
# print(f"Time took: {time_took:.2f}s")
# The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
t, _ = net.getPerfProfile()
infLabel = 'Inference time: %.2f ms' % (t * 1000.0 / cv2.getTickFrequency())
# cv2.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
classID = []
confidences = []
boxes = []
# SHOWING INFORMATION CONTAINED IN 'outputs' VARIABLE ON THE SCREEN
# loop over each of the layer outputs
for op in outputs:
for detection in op:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.2:
# OBJECT DETECTED
# Get the coordinates of object: center,width,height
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width) # width is the original width of image
h = int(detection[3] * height) # height is the original height of the image
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
# RECTANGLE COORDINATES
x = int(center_x - w / 2) # Top-Left x
y = int(center_y - h / 2) # Top-left y
# To organize the objects in array so that we can extract them later
boxes.append([x, y, w, h])
confidences.append(float(confidence))
classID.append(class_id)
# score_threshold = st.sidebar.slider("Confidence_threshold", 0.00, 1.00, 0.5, 0.01)
# nms_threshold = st.sidebar.slider("NMS_threshold", 0.00, 1.00, 0.4, 0.01)
score_threshold = 0.2
st.sidebar.info(f"Confidence_threshold:{ score_threshold }")
nms_threshold = 0.4
st.sidebar.info(f"NMS_threshold :{nms_threshold} ")
st.sidebar.success(infLabel)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, score_threshold, nms_threshold)
print("DNN Index:", indexes)
font = cv2.FONT_HERSHEY_SIMPLEX
items = []
for i in range(len(boxes)):
if i in indexes.flatten():
x, y, w, h = boxes[i]
# To get the name of object
label = str.upper((classes[classID[i]]))
# label = str(classes[class_ids[i]])
confidence = str(round(confidences[i], 2))
print("value of i:", i)
color = colors[i]
cv2.rectangle(img, (x, y), (x + w, y + h), color, 3)
cv2.putText(img, label + " " + confidence, (x, y + 10), font, 0.25, (0, 0, 255), 1)
items.append(label)
st.text("")
st.spinner('Model working....')
column2.subheader("Output image")
st.text("")
# plt.figure(figsize=(15, 15))
# plt.imshow(img)
# column2.pyplot(use_column_width=True)
column2.image(img, use_column_width=True)
if len(indexes) > 1:
st.success("Found {} Objects - {}".format(len(indexes), [item for item in set(items)]))
st.balloons()
elif len(indexes) == 1:
st.success("Found {} Object - {}".format(len(indexes), [item for item in set(items)]))
st.balloons()
else:
st.warning("Found {} Object - {}".format(len(indexes), [item for item in set(items)]))
# with open("custom.css") as f:
# st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
st.title('Welcome to Vehicle Classification App')
st.markdown("Welcome to this Computer Vision application that classifies Vehicles on Indian Highways. The Vehicles are classified into Seven different classes namely: Bus, Truck, Car, Jeep,Van,miniTruck and Lorry.It will find Person too if present.")
def main():
st.write(
"You can view Vehicle Classification using YOLO model here. Select one of the following options to proceed:")
choice = st.radio("", ("Default", "Choose an image of your choice"))
# st.write()
if choice == "Choose an image of your choice":
# st.set_option('deprecation.showfileUploaderEncoding', False)
image_file = st.file_uploader("Upload", type=['jpg', 'png', 'jpeg'])
if image_file is not None:
my_img = Image.open(image_file)
obj_detection(my_img)
elif choice == "Default":
my_img = Image.open("v999.jpg")
obj_detection(my_img)
if __name__ == '__main__':
main()
| nlkkumar/vehicle-class-yolov4 | nlk-vehi-class-classification.py | nlk-vehi-class-classification.py | py | 7,582 | python | en | code | 1 | github-code | 36 |
41646833738 | import datetime, requests, csv, argparse
class MarketwatchScraper():
def __init__(self, stock: str = "AAPL", timeout: int = 1) -> None:
self.stock = stock
self.timeout = timeout
pass
def scrape(self) -> None:
self.saveToFile(self.getURLS())
def saveToFile(self, urls: list) -> None:
localFile = f"{self.stock.lower()}.csv"
with open(localFile, "w") as f:
f.write("Date,Open,High,Low,Close,Volume\n")
f.close()
for url in urls:
print(f"Getting data from url {url}...")
try:
resp = requests.get(url, timeout=self.timeout, headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 OPR/96.0.0.0 (Edition std-1)"
})
if resp.status_code != 200:
print(f"Error! Status code: {resp.status_code}")
continue
except Exception as e:
print(f"Error! Exception: {e}")
continue
data = resp.content.decode("utf-8")
csvData = csv.reader(data.splitlines(), delimiter=",")
next(csvData)
with open(localFile, "a") as f:
print(f"Writing data to {localFile}...")
writer = csv.writer(f)
for row in csvData:
writer.writerow(row)
f.close()
def getURLS(self) -> list:
urls = []
startDate = datetime.datetime(1970, 1, 1)
endDate = datetime.datetime.today()
if endDate > datetime.datetime.today():
endDate = datetime.datetime.today()
while startDate < endDate:
date1 = startDate.strftime("%m/%d/%Y%%2000:00:00")
date2 = (startDate + datetime.timedelta(days=366)).strftime("%m/%d/%Y%%2000:00:00")
url = f"https://www.marketwatch.com/investing/stock/{self.stock}/downloaddatapartial?startdate={date1}&enddate={date2}&daterange=d30&frequency=p1d&csvdownload=true&downloadpartial=false&newdates=false"
print(f"Added URL for {startDate.strftime('%m/%d/%Y')} to {(startDate + datetime.timedelta(days=366)).strftime('%m/%d/%Y')}")
urls.append(url)
startDate = startDate + datetime.timedelta(days=365)
return urls
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--stock", type=str, required=True)
parser.add_argument("--timeout", type=int, required=False)
args = parser.parse_args()
scraper = MarketwatchScraper(stock=args.stock, timeout=args.timeout if args.timeout else 1)
scraper.scrape() | chaarlottte/MarketWatch-Scraper | scrape.py | scrape.py | py | 2,814 | python | en | code | 6 | github-code | 36 |
18252810201 | from functools import lru_cache
from typing import List
class Solution:
def maxCoins(self, nums: List[int]) -> int:
@lru_cache(None)
def dfs(l, r):
if l > r:
return 0
# if (l, r) in dic:
# return dic[(l, r)]
# dic[(l, r)] = 0
res = 0
for i in range(l, r+1):
cur = nums[l-1] * nums[i] * nums[r+1]
cur += dfs(l, i-1) + dfs(i+1, r)
# dic[(l, r)] = max(cur, dic[(l, r)])
res = max(cur, res)
return res
nums = [1] + nums + [1]
# dic = {}
return dfs(1, len(nums)-2)
solution = Solution()
assert solution.maxCoins([3,1,5,8]) == 167, "Should be 167"
| hujienan/Jet-Algorithm | leetcode/312. Burst Balloons/index.py | index.py | py | 797 | python | en | code | 0 | github-code | 36 |
23380020206 | from tkinter import *
class nutnhan:
def __init__(self,master):
frame=Frame(master)
frame.pack()
self.printbutton=Button(frame,text="print",command=self.printmassage)
self.printbutton.pack()
self.quitbutton=Button(frame,text="quit",command=frame.quit)
self.quitbutton.pack()
def printmassage(self):
print("fuck you")
root=Tk()
b=nutnhan(root);
root.mainloop(); | nguyenbuitk/python-tutorial | 03_LT/Game/Tkinter/tkinter 08 class.py | tkinter 08 class.py | py | 401 | python | en | code | 0 | github-code | 36 |
34181109892 | #tempurature range is 4-95 degrees C
#we use a GEN2 tempurature module
#tolerance on read about 5 degrees for heating (at least with IR thermometer)
# can hold steady at a tempurature really well though would need to test more at differing tempurature
#idling tempurature 55 C can hold within .5 C of tempurature since readout is in integers
#if using capsules/containers for the temp module to use it easier set module to opentrons_24_aluminumblock_nest_1.5ml_snapcap or something similar
#lab has both aluminumblock 24 well and 96 well plates
temp=4
from opentrons import protocol_api
metadata = {
'protocolName': 'Temp_module_test_cooling',
'author': 'parke',
'description':'protocol to run temp_module',
'apiLevel':'2.10'
}
def run(protocol: protocol_api.ProtocolContext):
temp_mod = protocol.load_module('temperature module gen2', '4')
plate = temp_mod.load_labware('corning_96_wellplate_360ul_flat')
temp_mod.set_temperature(celsius=temp)
temp_mod.status # 'holding at target'
temp_mod.deactivate()
temp_mod.status # 'idle'
| MyersResearchGroup/OpenTrons_OT2_Protocols | temperature_module/tempurature_module.py | tempurature_module.py | py | 1,122 | python | en | code | 0 | github-code | 36 |
16172950617 | """This script creates a regression test over metarl-TRPO and baselines-TRPO.
Unlike metarl, baselines doesn't set max_path_length. It keeps steps the action
until it's done. So we introduced tests.wrappers.AutoStopEnv wrapper to set
done=True when it reaches max_path_length. We also need to change the
metarl.tf.samplers.BatchSampler to smooth the reward curve.
"""
import datetime
import multiprocessing
import os.path as osp
import random
from baselines import logger as baselines_logger
from baselines.bench import benchmarks
from baselines.common import set_global_seeds
from baselines.common.tf_util import _PLACEHOLDER_CACHE
from baselines.logger import configure
from baselines.ppo1.mlp_policy import MlpPolicy
from baselines.trpo_mpi import trpo_mpi
import dowel
from dowel import logger as dowel_logger
import gym
import pytest
import tensorflow as tf
import torch
from metarl.envs import normalize
from metarl.experiment import deterministic, LocalRunner
from metarl.np.baselines import LinearFeatureBaseline
from metarl.tf.algos import TRPO
from metarl.tf.baselines import GaussianMLPBaseline
from metarl.tf.envs import TfEnv
from metarl.tf.experiment import LocalTFRunner
from metarl.tf.optimizers import FirstOrderOptimizer
from metarl.tf.policies import GaussianMLPPolicy
from metarl.torch.algos import TRPO as PyTorch_TRPO
from metarl.torch.policies import GaussianMLPPolicy as PyTorch_GMP
from tests import benchmark_helper
from tests.fixtures import snapshot_config
import tests.helpers as Rh
from tests.wrappers import AutoStopEnv
hyper_parameters = {
'hidden_sizes': [64, 32], # following openai/spinning
'max_kl': 0.01,
'gae_lambda': 0.97,
'discount': 0.99,
'max_path_length': 100,
'cg_iters': 10,
'batch_size': 2048,
'n_epochs': 500,
'n_trials': 10,
'training_epochs': 3,
'learning_rate': 1e-3
}
class TestBenchmarkPPO: # pylint: disable=too-few-public-methods
"""Compare benchmarks between metarl and baselines."""
@pytest.mark.huge
def test_benchmark_trpo(self): # pylint: disable=no-self-use
"""Compare benchmarks between metarl and baselines."""
mujoco1m = benchmarks.get_benchmark('Mujoco1M')
timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f')
benchmark_dir = './data/local/benchmarks/trpo/%s/' % timestamp
result_json = {}
for task in mujoco1m['tasks']:
env_id = task['env_id']
env = gym.make(env_id)
baseline_env = AutoStopEnv(env_name=env_id, max_path_length=100)
seeds = random.sample(range(100), hyper_parameters['n_trials'])
task_dir = osp.join(benchmark_dir, env_id)
plt_file = osp.join(benchmark_dir,
'{}_benchmark.png'.format(env_id))
baselines_csvs = []
metarl_tf_csvs = []
metarl_pytorch_csvs = []
for trial in range(hyper_parameters['n_trials']):
_PLACEHOLDER_CACHE.clear()
seed = seeds[trial]
trial_dir = task_dir + '/trial_%d_seed_%d' % (trial + 1, seed)
metarl_tf_dir = trial_dir + '/metarl'
metarl_pytorch_dir = trial_dir + '/metarl_pytorch'
baselines_dir = trial_dir + '/baselines'
# Run metarl algorithms
# env.reset()
# metarl_pytorch_csv = run_metarl_pytorch(
# env, seed, metarl_pytorch_dir)
# pylint: disable=not-context-manager
with tf.Graph().as_default():
env.reset()
metarl_tf_csv = run_metarl(env, seed, metarl_tf_dir)
# Run baseline algorithms
baseline_env.reset()
baselines_csv = run_baselines(baseline_env, seed,
baselines_dir)
metarl_tf_csvs.append(metarl_tf_csv)
# metarl_pytorch_csvs.append(metarl_pytorch_csv)
baselines_csvs.append(baselines_csv)
env.close()
# benchmark_helper.plot_average_over_trials(
# [baselines_csvs, metarl_tf_csvs, metarl_pytorch_csvs],
# [
# 'eprewmean', 'Evaluation/AverageReturn',
# 'Evaluation/AverageReturn'
# ],
# plt_file=plt_file,
# env_id=env_id,
# x_label='Iteration',
# y_label='Evaluation/AverageReturn',
# names=['baseline', 'metarl-TensorFlow', 'metarl-PyTorch'],
# )
benchmark_helper.plot_average_over_trials_with_x(
[baselines_csvs, metarl_tf_csvs],
['EpRewMean', 'Evaluation/AverageReturn'],
['TimestepsSoFar', 'TotalEnvSteps'],
plt_file=plt_file,
env_id=env_id,
x_label='EnvTimeStep',
y_label='Performance',
names=['baseline', 'metarl-TensorFlow'],
)
# Rh.relplot(g_csvs=metarl_tf_csvs,
# b_csvs=baselines_csvs,
# g_x='TotalEnvSteps',
# g_y='Evaluation/AverageReturn',
# g_z='MetaRL',
# b_x='TimestepsSoFar',
# b_y='EpRewMean',
# b_z='Openai/Baseline',
# trials=hyper_parameters['n_trials'],
# seeds=seeds,
# plt_file=plt_file,
# env_id=env_id,
# x_label='EnvTimeStep',
# y_label='Performance')
# result_json[env_id] = benchmark_helper.create_json(
# [baselines_csvs, metarl_tf_csvs, metarl_pytorch_csvs],
# seeds=seeds,
# trials=hyper_parameters['n_trials'],
# xs=['nupdates', 'Iteration', 'Iteration'],
# ys=[
# 'eprewmean', 'Evaluation/AverageReturn',
# 'Evaluation/AverageReturn'
# ],
# factors=[hyper_parameters['batch_size']] * 3,
# names=['baseline', 'metarl-TF', 'metarl-PT'])
Rh.write_file(result_json, 'TRPO')
def run_metarl_pytorch(env, seed, log_dir):
"""Create metarl PyTorch PPO model and training.
Args:
env (dict): Environment of the task.
seed (int): Random positive integer for the trial.
log_dir (str): Log dir path.
Returns:
str: Path to output csv file
"""
env = TfEnv(normalize(env))
deterministic.set_seed(seed)
runner = LocalRunner(snapshot_config)
policy = PyTorch_GMP(env.spec,
hidden_sizes=hyper_parameters['hidden_sizes'],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = PyTorch_TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_kl_step=hyper_parameters['max_kl'],
max_path_length=hyper_parameters['max_path_length'],
discount=hyper_parameters['discount'],
gae_lambda=hyper_parameters['gae_lambda'])
# Set up logger since we are not using run_experiment
tabular_log_file = osp.join(log_dir, 'progress.csv')
dowel_logger.add_output(dowel.StdOutput())
dowel_logger.add_output(dowel.CsvOutput(tabular_log_file))
dowel_logger.add_output(dowel.TensorBoardOutput(log_dir))
runner.setup(algo, env)
runner.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size'])
dowel_logger.remove_all()
return tabular_log_file
def run_metarl(env, seed, log_dir):
"""Create metarl Tensorflow PPO model and training.
Args:
env (dict): Environment of the task.
seed (int): Random positive integer for the trial.
log_dir (str): Log dir path.
Returns:
str: Path to output csv file
"""
deterministic.set_seed(seed)
with LocalTFRunner(snapshot_config) as runner:
env = TfEnv(normalize(env))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=hyper_parameters['hidden_sizes'],
hidden_nonlinearity=tf.nn.tanh,
output_nonlinearity=None,
)
# baseline = LinearFeatureBaseline(env_spec=env.spec)
baseline = GaussianMLPBaseline(
env_spec=env.spec,
regressor_args=dict(
hidden_sizes=hyper_parameters['hidden_sizes'],
use_trust_region=False,
# optimizer=FirstOrderOptimizer,
# optimizer_args=dict(
# batch_size=hyper_parameters['batch_size'],
# max_epochs=hyper_parameters['training_epochs'],
# tf_optimizer_args=dict(
# learning_rate=hyper_parameters['learning_rate'],
# ),
# ),
),
)
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=hyper_parameters['max_path_length'],
discount=hyper_parameters['discount'],
gae_lambda=hyper_parameters['gae_lambda'],
max_kl_step=hyper_parameters['max_kl'])
# Set up logger since we are not using run_experiment
tabular_log_file = osp.join(log_dir, 'progress.csv')
dowel_logger.add_output(dowel.CsvOutput(tabular_log_file))
dowel_logger.add_output(dowel.StdOutput())
dowel_logger.add_output(dowel.TensorBoardOutput(log_dir))
runner.setup(algo, env)
runner.train(n_epochs=hyper_parameters['n_epochs'],
batch_size=hyper_parameters['batch_size'])
dowel_logger.remove_all()
return tabular_log_file
def run_baselines(env, seed, log_dir):
"""Create Baseline model and training.
Args:
env (dict): Environment of the task.
seed (int): Random positive integer for the trial.
log_dir (str): Log dir path.
Returns:
str: Path to output csv file
"""
ncpu = max(multiprocessing.cpu_count() // 2, 1)
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=ncpu,
inter_op_parallelism_threads=ncpu)
tf.compat.v1.Session(config=config).__enter__()
# Set up logger for baselines
configure(dir=log_dir, format_strs=['stdout', 'log', 'csv', 'tensorboard'])
baselines_logger.info('rank {}: seed={}, logdir={}'.format(
0, seed, baselines_logger.get_dir()))
set_global_seeds(seed)
def policy_fn(name, ob_space, ac_space):
"""Create policy for baselines.
Args:
name (str): Policy name.
ob_space (gym.spaces.Box) : Observation space.
ac_space (gym.spaces.Box) : Action space.
Returns:
baselines.ppo1.mlp_policy: MLP policy for baselines.
"""
return MlpPolicy(name=name,
ob_space=ob_space,
ac_space=ac_space,
hid_size=hyper_parameters['hidden_sizes'][0],
num_hid_layers=len(hyper_parameters['hidden_sizes']))
trpo_mpi.learn(env,
policy_fn,
timesteps_per_batch=hyper_parameters['batch_size'],
max_kl=hyper_parameters['max_kl'],
cg_iters=hyper_parameters['cg_iters'],
# cg_damping=0.1,
max_timesteps=(hyper_parameters['batch_size'] *
hyper_parameters['n_epochs']),
gamma=hyper_parameters['discount'],
lam=hyper_parameters['gae_lambda'],
vf_iters=hyper_parameters['training_epochs'],
vf_stepsize=hyper_parameters['learning_rate'])
return osp.join(log_dir, 'progress.csv')
| icml2020submission6857/metarl | tests/benchmarks/metarl/tf/algos/test_benchmark_trpo.py | test_benchmark_trpo.py | py | 12,332 | python | en | code | 2 | github-code | 36 |
29013837779 | import pickle
import math
if __name__ == '__main__':
with open('dict.txt', 'rb') as f:
dict_ = pickle.load(f)
data = []
with open('../NSL-KDD/KDDTrain+_20Percent.arff', 'r') as f: #KDDTest-21 KDDTrain+ KDDTest+ KDDTrain+_20Percent
for line in f.readlines():
if line[0] != '@':
num = len(data)
data.append([])
tem = line.strip().split(',')
for i in range(len(tem)):
if i in dict_:
data[num].append(dict_[tem[i]]/dict_[i])
else:
if '.' in tem[i]:
data[num].append(float(tem[i]))
else:
data[num].append(int(tem[i]))
maxa = []
mina = []
for x in data[0]:
maxa.append(x)
mina.append(x)
for line in data:
for i in range(len(line)):
maxa[i] = max(maxa[i], line[i])
mina[i] = min(mina[i], line[i])
for i in range(len(maxa)):
if maxa[i] > 10000:
maxt = mina[i]
mint = mina[i]
for j in range(len(data)):
if data[j][i] > 0:
data[j][i] = math.log10(data[j][i])
maxt = max(maxt, data[j][i])
mint = min(mint, data[j][i])
maxa[i] = maxt
mina[i] = mint
for i in range(len(maxa)):
if maxa[i] > 1:
# maxt = mina[i]
# mint = mina[i]
for j in range(len(data)):
if maxa[i]-mina[i] > 0:
data[j][i] = (data[j][i]-mina[i])/(maxa[i]-mina[i])
# maxt = max(maxt, data[j][i])
# mint = max(mint, data[j][i])
# maxa[i] = maxt
# mina[i] = mint
with open('../data/KDDTrain+_20Percent.txt', 'w') as f: ##KDDTest-21 KDDTrain+ KDDTest+ KDDTrain+_20Percent
for i in range(len(data)):
for j in range(len(data[i])):
f.write(str(data[i][j]))
if j != len(data[i])-1:
f.write(',')
f.write('\n')
# protocol_type = ['tcp','udp', 'icmp'];
# service = ['aol', 'auth', 'bgp', 'courier', 'csnet_ns', 'ctf', 'daytime', 'discard', 'domain', \
# 'domain_u', 'echo', 'eco_i', 'ecr_i', 'efs', 'exec', 'finger', 'ftp', 'ftp_data', 'gopher', \
# 'harvest', 'hostnames', 'http', 'http_2784', 'http_443', 'http_8001', 'imap4', 'IRC', 'iso_tsap', \
# 'klogin', 'kshell', 'ldap', 'link', 'login', 'mtp', 'name', 'netbios_dgm', 'netbios_ns', \
# 'netbios_ssn', 'netstat', 'nnsp', 'nntp', 'ntp_u', 'other', 'pm_dump', 'pop_2', 'pop_3', \
# 'printer', 'private', 'red_i', 'remote_job', 'rje', 'shell', 'smtp', 'sql_net', 'ssh', 'sunrpc',\
# 'supdup', 'systat', 'telnet', 'tftp_u', 'tim_i', 'time', 'urh_i', 'urp_i', 'uucp', 'uucp_path', \
# 'vmnet', 'whois', 'X11', 'Z39_50'];
# flag = ['OTH', 'REJ', 'RSTO', 'RSTOS0', 'RSTR', 'S0', 'S1', 'S2', 'S3', 'SF', 'SH'];
# Class = ['normal', 'anomaly'];
# dict = {}
# for i in range(len(protocol_type)):
# dict[protocol_type[i]] = i+1;
# for i in range(len(service)):
# dict[service[i]] = i+1;
# for i in range(len(flag)):
# dict[flag[i]] = i+1;
# for i in range(len(Class)):
# dict[Class[i]] = i;
# dict[1] = 3;
# dict[2] = 70;
# dict[3] = 11;
# dict[41] = 1;
#
# with open('dict.txt', 'wb') as f:
# pickle.dump(dict, f);
| MrDuGitHub/NSL-KDD | code/norm.py | norm.py | py | 3,451 | python | en | code | 0 | github-code | 36 |
33521363177 | import os, re, importlib.util
import LogManager
from Module import Module
from ModuleThreadHandler import ModuleThreadHandler, ThreadTask
class ModuleRunner:
def __init__(self, parent):
self.parent = parent
self.logger = LogManager.create_logger('MODULES')
self.closing = False
self.thread_handler = ModuleThreadHandler(self.logger)
self.logger.info('Initialising modules...')
self.get_modules()
self.logger.info(f"Currently running {len(self.modules)} module{'s' if len(self.modules) != 1 else ''}...")
def close(self):
self.thread_handler.close()
self.closing = True
def get_modules(self):
self.modules = []
self.bad_modules = []
# Get possible files that could contain a module
module_filenames = []
x = '/' if os.path.sep == '/' else r'\\'
pattern = re.compile(f'^.{x}modules{x}[^{x}]+$')
for subdir, _, files in os.walk(os.path.join(os.curdir, 'modules')):
if pattern.match(subdir):
if 'main.py' in files:
module_filenames.append(os.path.join(subdir, 'main.py'))
else:
self.bad_modules.append((subdir.split(os.path.sep)[-1], 'No main.py file'))
# Go through files and try to import a class
for filename in module_filenames:
spec = importlib.util.spec_from_file_location(filename[2:-3].replace(os.path.sep, '.'), filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
if not hasattr(module, 'export'):
self.bad_modules.append((filename.split(os.path.sep)[-2], 'No exported class in main.py'))
elif not Module.is_valid(module.export):
self.bad_modules.append((filename.split(os.path.sep)[-2], 'Exported module is invalid'))
else:
self.init_module(module.export)
for module_name, reason in self.bad_modules:
self.logger.warning(f'Installed module `{module_name}` cannot be loaded: {reason}')
def run_modules(self, minutes_past):
for module in [m for m in self.modules if m.should_occur(minutes_past)]:
self.run_module(module)
def run_module(self, module):
self.thread_handler.add_task(module, ThreadTask.RUN)
def init_module(self, base_class):
module = Module(self, base_class)
self.thread_handler.add_task(module, ThreadTask.INIT, -1)
self.modules.append(module)
| gregormaclaine/AutoHome | ModuleRunner.py | ModuleRunner.py | py | 2,346 | python | en | code | 0 | github-code | 36 |
3095634142 | import re
from mparser import *
from mlexer import *
# if re.match(r'.+ while .+', "x = x + 1 if x < 10 while k == 0"):
# print('MATCH')
# else:
# print('NO MATCH')
lex = Lexer()
par = Parser(lex)
code = "x = x + 1 if x < 10 while y < 5"
code = '''
while x > 5
x = 1
y = x - 1
end
'''
code = '''
if x < 5
x = x - 2
y = y + 1
elsif x > 4
z = x * 2
else
y = y / 2
end
'''
pattern = r'while(.+\n)*end'
if re.match(pattern, code.strip('\n')):
print('MATCH')
else:
print('NOT MATCH')
print(par.parseCode(code.strip('\n')))
| ptq204/CS320-Ruby-lexer-parser | main.py | main.py | py | 539 | python | en | code | 0 | github-code | 36 |
30452253568 | import pandas as pd
import requests
from bs4 import BeautifulSoup
u = 'https://www.amazon.in/OnePlus-Nord-Gray-128GB-Storage/product-reviews/B08695ZSP6/ref=cm_cr_arp_d_paging_btm_next_2?ie=UTF8&reviewerType=all_reviews&pageNumber='
def amazon(link,Number_of_pages):
r={}
allreview1 = pd.DataFrame(r)
name=[]
body = []
star_rating = []
review = []
urls=[]
for i in range(1,Number_of_pages+1):
i=str(i)
a=u+i
urls.append(a)
for i in urls:
data = requests.get(i)
data
data.content
soup = BeautifulSoup(data.content,'html.parser')
soup.title
# if any tag i.e.div, a,span,i,etc has class use it in soup.findAll
# div is not compulsory always
#if span doec not has itemprop use parent class (could be a, div, span,i, etc)
name1 = soup.findAll('div', class_=['a-profile-content'])
ct=0
for i in name1:
if ct>=2:
name.append(i.find('span', class_=['a-profile-name']).text)
ct+=1
title1 = soup.findAll('a', attrs={'data-hook' : 'review-title'}, class_=['a-size-base a-link-normal review-title a-color-base review-title-content a-text-bold'])
for i in title1:
review.append(i.find('span').text)
rating = soup.findAll('i', attrs={'data-hook' : 'review-star-rating'})
for i in rating:
star_rating.append(i.find('span', class_=['a-icon-alt']).text)
body1 = soup.findAll('span', attrs={'data-hook' : 'review-body'},class_=['a-size-base review-text review-text-content'])
for i in body1:
body.append(i.find('span').text)
allreview1['name'] = name
allreview1['review'] = review
allreview1['star_rating'] = star_rating
allreview1['body'] = body
allreview1.to_csv(r'C:\...\allreview1.csv')
amazon(u,3)
| SHRIKAR5/Amazon-review-webscraping | amazon_review-webscraping.py | amazon_review-webscraping.py | py | 2,060 | python | en | code | 0 | github-code | 36 |
496383977 | from dagster_datadog import datadog_resource
from dagster import ModeDefinition, execute_solid, solid
from dagster.seven import mock
@mock.patch('datadog.statsd.timing')
@mock.patch('datadog.statsd.timed')
@mock.patch('datadog.statsd.service_check')
@mock.patch('datadog.statsd.set')
@mock.patch('datadog.statsd.distribution')
@mock.patch('datadog.statsd.histogram')
@mock.patch('datadog.statsd.decrement')
@mock.patch('datadog.statsd.increment')
@mock.patch('datadog.statsd.gauge')
@mock.patch('datadog.statsd.event')
def test_datadog_resource(
event,
gauge,
increment,
decrement,
histogram,
distribution,
statsd_set,
service_check,
timed,
timing,
):
@solid(required_resource_keys={'datadog'})
def datadog_solid(context):
assert context.resources.datadog
# event
context.resources.datadog.event('Man down!', 'This server needs assistance.')
event.assert_called_with('Man down!', 'This server needs assistance.')
# gauge
context.resources.datadog.gauge('users.online', 1001, tags=["protocol:http"])
gauge.assert_called_with('users.online', 1001, tags=["protocol:http"])
# increment
context.resources.datadog.increment('page.views')
increment.assert_called_with('page.views')
# decrement
context.resources.datadog.decrement('page.views')
decrement.assert_called_with('page.views')
context.resources.datadog.histogram('album.photo.count', 26, tags=["gender:female"])
histogram.assert_called_with('album.photo.count', 26, tags=["gender:female"])
context.resources.datadog.distribution('album.photo.count', 26, tags=["color:blue"])
distribution.assert_called_with('album.photo.count', 26, tags=["color:blue"])
context.resources.datadog.set('visitors.uniques', 999, tags=["browser:ie"])
statsd_set.assert_called_with('visitors.uniques', 999, tags=["browser:ie"])
context.resources.datadog.service_check('svc.check_name', context.resources.datadog.WARNING)
service_check.assert_called_with('svc.check_name', context.resources.datadog.WARNING)
context.resources.datadog.timing("query.response.time", 1234)
timing.assert_called_with("query.response.time", 1234)
@context.resources.datadog.timed('run_fn')
def run_fn():
pass
run_fn()
timed.assert_called_with('run_fn')
result = execute_solid(
datadog_solid,
environment_dict={
'resources': {'datadog': {'config': {'api_key': 'NOT_USED', 'app_key': 'NOT_USED'}}}
},
mode_def=ModeDefinition(resource_defs={'datadog': datadog_resource}),
)
assert result.success
| helloworld/continuous-dagster | deploy/dagster_modules/libraries/dagster-datadog/dagster_datadog_tests/test_resources.py | test_resources.py | py | 2,747 | python | en | code | 2 | github-code | 36 |
5054088480 | from abei.implements.service_basic import ServiceBasic
from abei.implements.util import (
FileLikeWrapper,
LazyProperty,
)
from abei.interfaces import (
IProcedure,
IProcedureLink,
IProcedureFactory,
IProcedureJointFactory,
IProcedureBuilder,
service_entry as _,
)
from .procedure_joint_basic import (
joint_validate_dependents,
)
keyword_procedure_signature = 'fn'
keyword_procedure_input_signatures = 'args'
keyword_procedure_output_signatures = 'return'
keyword_procedure_outputs = 'out'
keyword_procedure_document = 'doc'
keyword_joints = 'statements'
keyword_joint_name = 'name'
keyword_joint_procedure = 'call'
keyword_joint_inputs = 'in'
def parse_signature(signature):
signature_list = signature.split('@')
if len(signature_list) > 2:
raise ValueError('invalid signature {}'.format(signature))
if len(signature_list) == 2:
return signature_list[0], signature_list[1]
return signature_list[0], None
class ProcedureJointBuilder(object):
def __init__(
self,
procedure_builder,
procedure_site,
procedure,
data,
):
self.procedure_builder = procedure_builder
self.procedure_site = procedure_site
self.procedure = procedure
assert isinstance(data, dict)
self.data = data
@property
def name(self):
return self.data.get(keyword_joint_name)
@property
def inputs(self):
return self.data.get(keyword_joint_inputs)
@LazyProperty
def instance(self):
joint_procedure_signature = self.data.get(keyword_joint_procedure)
signature, site = parse_signature(joint_procedure_signature)
joint_procedure = self.procedure_site.get_procedure(
signature,
site=site,
)
return self.procedure_builder.procedure_joint_factory.create(
joint_procedure,
self.procedure,
signature=self.name,
)
class ProcedureBuilder(ServiceBasic, IProcedureBuilder):
def __init__(self, service_site, **kwargs):
self.service_site = service_site
@LazyProperty
def procedure_factory(self):
return self.service_site.get_service(_(IProcedureFactory))
@LazyProperty
def procedure_joint_factory(self):
return self.service_site.get_service(_(IProcedureJointFactory))
@classmethod
def get_dependencies(cls):
return ['PyYAML']
def load_procedure_data(self, procedure_site, procedure_data_object):
raise NotImplementedError()
def load_procedure(self, procedure_site, procedure_object):
if not isinstance(procedure_object, dict):
raise ValueError(
'invalid procedure in configuration file')
def get_full_signature(sig):
sig, site = parse_signature(sig)
data = procedure_site.get_data_class(sig, site=site)
return data.get_signature()
input_signatures = procedure_object.get(
keyword_procedure_input_signatures, [])
if not isinstance(input_signatures, list):
raise ValueError(
'invalid procedure input signatures')
input_signatures = [get_full_signature(
sig) for sig in input_signatures]
output_signatures = procedure_object.get(
keyword_procedure_output_signatures, [])
if not isinstance(output_signatures, list):
raise ValueError(
'invalid procedure output signatures')
output_signatures = [get_full_signature(
sig) for sig in output_signatures]
procedure = self.procedure_factory.create(
'composite',
signature=str(procedure_object.get(
keyword_procedure_signature, '')),
docstring=str(procedure_object.get(
keyword_procedure_document, '')),
input_signatures=input_signatures,
output_signatures=output_signatures,
)
assert (
isinstance(procedure, IProcedure) and
isinstance(procedure, IProcedureLink)
)
procedure_joints = procedure_object.get(keyword_joints, [])
procedure_joints = [
ProcedureJointBuilder(
self,
procedure_site,
procedure,
jt
) for jt in procedure_joints
]
procedure_joints = {jt.name: jt for jt in procedure_joints}
self.load_joints(
procedure_site,
procedure,
procedure_joints,
)
procedure_output_joints = procedure_object.get(
keyword_procedure_outputs, [])
if not isinstance(procedure_output_joints, list):
raise ValueError('invalid procedure joints')
output_joints, output_indices = self.load_joint_inputs(
procedure_output_joints, procedure_joints)
for j in output_joints:
if j is not None:
joint_validate_dependents(j)
procedure.set_joints(output_joints, output_indices)
procedure_site.register_procedure(procedure)
def load_joints(self, procedure_site, procedure, joint_objects):
if not isinstance(joint_objects, dict):
raise ValueError('invalid procedure joints')
# connect joints
for joint_signature, joint_object in joint_objects.items():
joint_inputs = joint_object.inputs
if not isinstance(joint_inputs, list):
raise ValueError('invalid procedure joint config')
joint_object.instance.set_joints(
*self.load_joint_inputs(joint_inputs, joint_objects))
@staticmethod
def load_joint_inputs(joint_inputs, joint_objects):
input_joints = []
input_indices = []
for joint_input in joint_inputs:
if not isinstance(joint_input, str):
raise ValueError('invalid procedure joint input')
if joint_input.startswith('$'):
joint_input = joint_input.strip('$')
if not joint_input.isdigit():
raise ValueError('invalid joint input')
input_joints.append(None)
input_indices.append(int(joint_input))
else:
joint_input_tokens = joint_input.split('[')
if len(joint_input_tokens) != 2:
raise ValueError('invalid joint input')
joint_input_joint, joint_input_index = joint_input_tokens
joint_input_joint = joint_input_joint.strip()
joint_input_index = joint_input_index.strip(']').strip()
if joint_input_joint not in joint_objects:
raise ValueError('invalid joint')
if not joint_input_index.isdigit():
raise ValueError('invalid joint input')
input_joints.append(
joint_objects[joint_input_joint].instance)
input_indices.append(int(joint_input_index))
return input_joints, input_indices
def load_object(self, procedure_site, config_object):
if not isinstance(config_object, (tuple, list)):
raise ValueError('invalid procedure configuration file')
for config_item in config_object:
self.load_procedure(procedure_site, config_item)
def load_json(self, procedure_site, file_or_filename):
import json
with FileLikeWrapper(file_or_filename) as file:
self.load_object(procedure_site, json.loads(file.read()))
def save_json(self, procedure_site, file_or_filename):
raise NotImplementedError
def load_yaml(self, procedure_site, file_or_filename):
import yaml
with FileLikeWrapper(file_or_filename) as file:
self.load_object(procedure_site, yaml.safe_load(file))
def save_yaml(self, procedure_site, file_or_filename):
raise NotImplementedError
| mind-bricks/abei | abei/implements/procedure_builder.py | procedure_builder.py | py | 8,029 | python | en | code | 0 | github-code | 36 |
29390462152 | class Solution:
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
graph = defaultdict(defaultdict)
for (dividend, divisor), value in zip(equations, values):
graph[dividend][divisor] = value
graph[divisor][dividend] = 1 / value
results = []
def dfs(current, target, cumulative_product):
visited.add(current)
res = -1.0
if target in graph[current]:
res = cumulative_product * graph[current][target]
else:
for neighbor, value in graph[current].items():
if neighbor in visited:
continue
res = dfs(neighbor, target, cumulative_product * value)
if res != -1.0:
break
visited.remove(current)
return res
for dividend, divisor in queries:
if dividend not in graph or divisor not in graph:
results.append(-1.0)
elif dividend == divisor:
results.append(1.0)
else:
visited = set()
results.append(dfs(dividend, divisor, 1))
return results | AnotherPianist/LeetCode | 0399-evaluate-division/0399-evaluate-division.py | 0399-evaluate-division.py | py | 1,336 | python | en | code | 1 | github-code | 36 |
15991381065 | import torch.nn as nn
try:
from .resnet import resnet50_v1b
except:
from resnet import resnet50_v1b
import torch.nn.functional as F
import torch
class SegBaseModel(nn.Module):
r"""Base Model for Semantic Segmentation
Parameters
----------
backbone : string
Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50',
'resnet101' or 'resnet152').
"""
def __init__(self, nclass, aux, backbone='resnet50', dilated=True, pretrained_base=False, **kwargs):
super(SegBaseModel, self).__init__()
self.aux = aux
self.nclass = nclass
if backbone == 'resnet50':
self.pretrained = resnet50_v1b(pretrained=pretrained_base, dilated=dilated, **kwargs)
def base_forward(self, x):
"""forwarding pre-trained network"""
x = self.pretrained.conv1(x)
x = self.pretrained.bn1(x)
x = self.pretrained.relu(x)
x = self.pretrained.maxpool(x)
c1 = self.pretrained.layer1(x)
c2 = self.pretrained.layer2(c1)
c3 = self.pretrained.layer3(c2)
c4 = self.pretrained.layer4(c3)
return c1, c2, c3, c4
class _FCNHead(nn.Module):
def __init__(self, in_channels, channels, norm_layer=nn.BatchNorm2d, **kwargs):
super(_FCNHead, self).__init__()
inter_channels = in_channels // 4
self.block = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels),
nn.ReLU(inplace=True),
nn.Dropout(0.1),
nn.Conv2d(inter_channels, channels, 1)
)
def forward(self, x):
return self.block(x)
class _PositionAttentionModule(nn.Module):
""" Position attention module"""
def __init__(self, in_channels, **kwargs):
super(_PositionAttentionModule, self).__init__()
self.conv_b = nn.Conv2d(in_channels, in_channels // 8, 1)
self.conv_c = nn.Conv2d(in_channels, in_channels // 8, 1)
self.conv_d = nn.Conv2d(in_channels, in_channels, 1)
self.alpha = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
batch_size, _, height, width = x.size()
feat_b = self.conv_b(x).view(batch_size, -1, height * width).permute(0, 2, 1)
feat_c = self.conv_c(x).view(batch_size, -1, height * width)
attention_s = self.softmax(torch.bmm(feat_b, feat_c))
feat_d = self.conv_d(x).view(batch_size, -1, height * width)
feat_e = torch.bmm(feat_d, attention_s.permute(0, 2, 1)).view(batch_size, -1, height, width)
out = self.alpha * feat_e + x
return out
class _ChannelAttentionModule(nn.Module):
"""Channel attention module"""
def __init__(self, **kwargs):
super(_ChannelAttentionModule, self).__init__()
self.beta = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
batch_size, _, height, width = x.size()
feat_a = x.view(batch_size, -1, height * width)
feat_a_transpose = x.view(batch_size, -1, height * width).permute(0, 2, 1)
attention = torch.bmm(feat_a, feat_a_transpose)
attention_new = torch.max(attention, dim=-1, keepdim=True)[0].expand_as(attention) - attention
attention = self.softmax(attention_new)
feat_e = torch.bmm(attention, feat_a).view(batch_size, -1, height, width)
out = self.beta * feat_e + x
return out
class _DAHead(nn.Module):
def __init__(self, in_channels, nclass, aux=True, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs):
super(_DAHead, self).__init__()
self.aux = aux
inter_channels = in_channels // 4
self.conv_p1 = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.conv_c1 = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.pam = _PositionAttentionModule(inter_channels, **kwargs)
self.cam = _ChannelAttentionModule(**kwargs)
self.conv_p2 = nn.Sequential(
nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.conv_c2 = nn.Sequential(
nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),
norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
self.out = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(inter_channels, nclass, 1)
)
if aux:
self.conv_p3 = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(inter_channels, nclass, 1)
)
self.conv_c3 = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(inter_channels, nclass, 1)
)
def forward(self, x):
feat_p = self.conv_p1(x)
feat_p = self.pam(feat_p)
feat_p = self.conv_p2(feat_p)
feat_c = self.conv_c1(x)
feat_c = self.cam(feat_c)
feat_c = self.conv_c2(feat_c)
feat_fusion = feat_p + feat_c
outputs = []
fusion_out = self.out(feat_fusion)
outputs.append(fusion_out)
if self.aux:
p_out = self.conv_p3(feat_p)
c_out = self.conv_c3(feat_c)
outputs.append(p_out)
outputs.append(c_out)
return tuple(outputs)
class DANet(SegBaseModel):
r"""Pyramid Scene Parsing Network
Parameters
----------
nclass : int
Number of categories for the training dataset.
backbone : string
Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50',
'resnet101' or 'resnet152').
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`;
for Synchronized Cross-GPU BachNormalization).
aux : bool
Auxiliary loss.
Reference:
Jun Fu, Jing Liu, Haijie Tian, Yong Li, Yongjun Bao, Zhiwei Fang,and Hanqing Lu.
"Dual Attention Network for Scene Segmentation." *CVPR*, 2019
"""
def __init__(self, nclass, backbone='resnet50', aux=False, pretrained_base=False, **kwargs):
super(DANet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs)
self.head = _DAHead(2048, nclass, aux, **kwargs)
def forward(self, x):
size = x.size()[2:]
_, _, c3, c4 = self.base_forward(x)
outputs = []
x = self.head(c4)
x0 = F.interpolate(x[0], size, mode='bilinear', align_corners=True)
if self.aux:
x1 = F.interpolate(x[1], size, mode='bilinear', align_corners=True)
x2 = F.interpolate(x[2], size, mode='bilinear', align_corners=True)
outputs.append(x0)
outputs.append(x1)
outputs.append(x2)
return outputs
return x0
if __name__ == '__main__':
from tools.flops_params_fps_count import flops_params_fps
model = DANet(nclass=6)
flops_params_fps(model)
| zyxu1996/Efficient-Transformer | models/danet.py | danet.py | py | 7,495 | python | en | code | 67 | github-code | 36 |
74201126823 | import re
import os
def get_ckpt_epoch( checkpoint_dir ):
epochs_list = [0]
for fname in os.listdir(checkpoint_dir):
mtc = re.match( r'.*model\.(\d+)', fname )
if not mtc:
continue
epochs_list.append(int( mtc.groups()[0]) )
return max(epochs_list)
def clear_old_ckpt( checkpoint_dir, keep=5 ):
fnames = list()
for fname in os.listdir(checkpoint_dir):
mtc = re.match( r'.*model\.(\d+)', fname )
if not mtc:
continue
fnames.append(fname)
if len(fnames)>keep:
[ os.remove(os.path.join(checkpoint_dir,fname))
for fname in sorted(fnames)[:-keep] ]
return
| cmranieri/flood-detection | src/ml_utils.py | ml_utils.py | py | 673 | python | en | code | 0 | github-code | 36 |
13125376994 | """Implementation of Rectangle class"""
class Rectangle:
"""A Rectangle.
Args:
width: width of rectangle
height: height of rectangle
Both should be positive numbers
"""
def __init__(self, width, height):
self.width = width
self.height = height
def area(self):
return self.height * self.width
rect = Rectangle(100, 20)
print('Wysokość: ', rect.height)
print('Szerokość: ', rect.width)
print('Pole to: ', rect.area()) | pawel123789/day7 | 1.py | 1.py | py | 495 | python | en | code | 0 | github-code | 36 |
31453786297 | import os, sys, pygame
class Animation:
def __init__(self, names_files, cd_images, screen, width_screen, height_screen, sounds, colorkey=None, music=None):
self.images = []
for elem in names_files:
self.images.append(pygame.transform.scale(self.load_image(elem, colorkey=colorkey), (width_screen, height_screen)))
self.images_cd = cd_images
self.common_cd = 5
self.screen = screen
self.music = music
self.sounds = sounds
def load_image(self, name, colorkey=None):
fullname = os.path.join('images', name)
if not os.path.isfile(fullname):
print(f"Файл с изображением '{fullname}' не найден")
image = pygame.image.load(fullname)
if colorkey is not None:
image = image.convert()
if colorkey == -1:
colorkey = image.get_at((0, 0))
image.set_colorkey(colorkey)
else:
image = image.convert_alpha()
return image
def play(self):
if self.images != []:
number = 0
fps = 10
clock = pygame.time.Clock()
running = True
cd = self.set_cd(number) * fps
pygame.mixer.music.load(self.music)
while running:
screen.fill((0, 0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
if cd == 0:
print('here')
number += 1
if number < len(self.images):
cd = self.set_cd(number) * fps
else:
return True
cd -= 1
screen.blit(self.images[number], (0, 0))
clock.tick(fps)
pygame.display.update()
else:
print('в этой анимвции нету изображений')
def set_cd(self, number):
if 0 <= number < len(self.images_cd):
return self.images_cd[number]
return self.common_cd
if __name__ == '__main__':
pygame.init()
pygame.display.set_caption('свой курсор мыши')
size = width, height = 300, 450
screen = pygame.display.set_mode(size)
animation = Animation(['third.png', 'first.jpg', 'second.jpg'], [], screen, width, height, colorkey=-1)
running = True
while running:
screen.fill((0, 0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
animation.play()
pygame.display.update() | ScarletFlame611/1984-game | animation_comics.py | animation_comics.py | py | 2,789 | python | en | code | 0 | github-code | 36 |
74873018984 | from flask_wtf import Form
from wtforms import StringField,BooleanField,PasswordField,IntegerField,SelectField
from wtforms.validators import DataRequired, ValidationError
from webapp.Models.db_basic import Session
from webapp.Models.prod_cat import Prod_cat
from webapp.Models.prod_sub_cat import Prod_sub_cat
def levelone_not_exists_check(self,field):
session = Session()
if not session.query(Prod_cat).filter_by(prod_cat_name=field.data).first() is None:
raise ValidationError( 'Level One Category %s is already exists !' % field.data )
def leveltwo_not_exists_check(self,field):
session = Session()
if not session.query(Prod_sub_cat).filter_by(prod_cat_sub_name=field.data).first() is None:
raise ValidationError( 'Level One Category %s is already exists !' % field.data )
class DeleteLevelOneForm(Form):
prod_cat_id = IntegerField('Level One Category ID', validators = [DataRequired()])
class CreateNewLevelOneForm(Form):
prod_cat_name = StringField('Level One Category Name', validators = [DataRequired(),levelone_not_exists_check])
prod_cat_desc = StringField('Level One Category Description')
prod_cat_order = IntegerField('Level One Order Number')
class UpdateLevelOneForm(Form):
prod_cat_id = IntegerField('Product Categroy ID',validators = [DataRequired()])
prod_cat_name = StringField('Level One Category Name', validators = [DataRequired()])
prod_cat_desc = StringField('Level One Category Description')
prod_cat_order = IntegerField('Level One Order Number')
valid_flg = BooleanField('Valid Flag')
class DeleteLevelTwoForm(Form):
prod_cat_sub_id = IntegerField('Level One Category ID', validators = [DataRequired()])
class CreateNewLevelTwoForm(Form):
prod_cat_sub_name = StringField('Level One Category Name', validators = [DataRequired(),levelone_not_exists_check])
prod_cat_id = SelectField('Product Category ID',choices=[],coerce=int)#,choices=[(i.prod_cat_id,i.prod_cat_name) for i in level_one_list])
prod_cat_sub_desc = StringField('Level One Category Description')
class UpdateLevelTwoForm(Form):
prod_cat_sub_id = IntegerField('Product Subcategroy ID',validators = [DataRequired()])
prod_cat_id = SelectField('Product Category ID',choices=[],coerce=int)
prod_cat_sub_name = StringField('Level One Category Name', validators = [DataRequired()])
prod_cat_sub_desc = StringField('Level One Category Description')
valid_flg = BooleanField('Valid Flag') | kangnwh/Emall | webapp/viewrouting/admin/forms/category_forms.py | category_forms.py | py | 2,511 | python | en | code | 1 | github-code | 36 |
38642289342 | import nltk
import pickle
from utils import tokenize_document
resume_file = open('../assets/resume.txt', 'r')
resume = resume_file.read()
resume_file.close()
tokenizer = nltk.RegexpTokenizer(r'\w+')
resume_tokenized = tokenize_document(resume, tokenizer)
print(resume_tokenized)
pickle.dump(resume_tokenized, open('../assets/resume_tokens.p', 'wb'))
| anishLearnsToCode/stop-words-removal | src/driver.py | driver.py | py | 352 | python | en | code | 0 | github-code | 36 |
32480271471 | from django.urls import include, path
from rest_framework.routers import DefaultRouter
from .views import (IngredientViewSet, LikedRecipeDetailView, RecipeViewSet,
SubscribtionListView, SubscriptionDetailView, TagViewSet)
v1_router = DefaultRouter()
v1_router.register(
'tags',
TagViewSet,
basename='tags-list'
)
v1_router.register(
'recipes',
RecipeViewSet,
basename='recipes-list'
)
v1_router.register(
'ingredients',
IngredientViewSet,
basename='ingredients-list'
)
urlpatterns = [
path('',
include(v1_router.urls)),
path('users/subscriptions/',
SubscribtionListView.as_view(),
name='subscription'),
path('users/<int:id>/subscribe/',
SubscriptionDetailView.as_view(),
name='subscribe'),
path('recipes/<int:id>/favorite/',
LikedRecipeDetailView.as_view(),
name='favorite'),
]
| JCoffeeYP/foodgram-project-react | backend/cookbook/urls.py | urls.py | py | 917 | python | en | code | 0 | github-code | 36 |
27016642063 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 1 17:38:34 2019
@author: Martín Márquez Cervantes
"""
print("UNion de las tres compuertas, aun por trabajar")
import matplotlib.pyplot as plt
import numpy as np
class MyNeuron:
#VAriable w global
WG=0
xTestPredic=0
#entrenamiento perceptron
def training(self,X,Y):
#inicializar w con valores pseudo-aleatorios
w = np.random.rand(3)
#Nota: Dimensiones de w son el numero de columnas de x+1
#-----------------------
#Paso 2 : Agregar una columna de unos a la matriz x
X=np.append(np.ones((X.shape[0],1)),X,axis=1)
#Antes de modificar w guardamos para impresión
wInicial = w
XN = X.shape[0] #40
#Algoritmo perceptron
for i in range(1,21):
for j in range(XN):
if np.dot(w,X[j]) >= 0:
y=1
else:
y=0
w=w+(Y[j]-y)*X[j]
#guardamos w modificada en wG para despues utilizarla en algoritmo predictivo
self.WG=w
#impersión de resultados
print("\n\n")
print("w Inicial: "+str(wInicial))
print("w Final: "+str(w))
#graficación de vectores
plt.plot(wInicial,'.-')
plt.plot(w,'.-')
print("\n\n")
print('Linea azul W Inicial')
print('Linea naranja W Final aplicando algoritmo')
#predice si esta aprovado o no
def predic(self,i,xTest):
#calcular y la salida de la prediccion
y=np.dot(self.WG,xTest[i]) #producto interno entre w y x
#clasificación
if y>=0:
return 1
else:
return 0
#return 1/(1+np.exp(-y))
def comparar(self,XT,Predicciones):
TP = 0
FP = 0
FN = 0
TN = 0
#contar true positive, true negative, false negative, false positive
for i in range(XT.shape[0]):
if XT[i] ==1 and Predicciones[i]==1:
TP +=1
if XT[i] ==0 and Predicciones[i]==0:
TN +=1
if XT[i] ==1 and Predicciones[i]==0:
FN +=1
if XT[i] ==0 and Predicciones[i]==1:
FP +=1
print("\n\nTP = "+str(TP)+"\nTN = "+str(TN)+"\nFP = "+str(FP)+"\nFN = "+str(FN))
print("\nMatrix Confussion")
MatrixConfussion = np.array([TP,TN,FP,FN])
print(MatrixConfussion.reshape(2,2))
#calculo de Precisión de clasificación
ClassificationAccuary = ( (TP+TN)/(TP+TN+FN+FP) )*100
print("\nPrecisión de clasificación: "+str(ClassificationAccuary)+" %")
#calculo de Presición
Presicion = TP/(TP+FP)*100
print("Precisión : "+str(Presicion)+" %")
#calculo de Presición
Recall = TP/(TP+FN)*100
print("Recall : "+str(Recall)+" %")
#calculo de F-Score
FScore = 2*( (Presicion*Recall)/(Presicion+Recall) )
print("F-Score : "+str(FScore))
clf = MyNeuron()
#Entradas AND y OR
TotalElementos = 10
ceros = np.random.uniform(0,0.4,TotalElementos)
unos = np.random.uniform(0.75,0.9,TotalElementos)
numRenglones = ceros.shape[0]*4
#Conjunto de datos entrenamiento
X = np.append(ceros,ceros)
X = np.append(X,unos)
X = np.append(X,unos)
X = np.append(X,ceros)
X = np.append(X,unos)
X = np.append(X,ceros)
X = np.append(X,unos)
X = X.reshape(numRenglones,2,order=True)
#Entradas Not
unosNot = np.random.uniform(0.75,0.9,TotalElementos*2)
#Conjunto de datos entrenamiento
Xnot = np.append(ceros,ceros)
Xnot = np.append(Xnot,unosNot)
Xnot = Xnot.reshape(numRenglones,1)
#Clases para And
YAND = np.zeros([TotalElementos*3,1])
YAND = np.append(Y,np.ones([TotalElementos,1]))
YAND.reshape(numRenglones,1)
#Clases para Or
YOR = np.zeros([TotalElementos,1])
YOR = np.append(Y,np.ones([TotalElementos*3,1]))
YOR.reshape(numRenglones,1)
#Clases para Not
YNOT = np.zeros([TotalElementos*2,1])
YNOT = np.append(Y,np.ones([TotalElementos*2,1]))
YNOT.reshape(numRenglones,1)
clf.training(X,YAND)
#conjuntos de datos de prueba 20 elementos
cerosTest = np.zeros(5)
unosTest = np.ones(5)
#Conjunto de datos
XT = np.append(cerosTest,cerosTest)
XT = np.append(XT,unosTest)
XT = np.append(XT,unosTest)
XT = np.append(XT,cerosTest)
XT = np.append(XT,unosTest)
XT = np.append(XT,cerosTest)
XT = np.append(XT,unosTest)
XT = XT.reshape(20,2,order=True)
YT = np.zeros(15)
YT = np.append(YT,np.ones(5))
YT.reshape(YT.size,1)
XT=np.append(np.ones((XT.shape[0],1)),XT,axis=1)
Predicciones = []
for i in range(XT.shape[0]):
Predicciones.append(clf.predic(i,XT))
Predicciones = np.array(Predicciones)
#impresión
print("\n\n")
for i in range(XT.shape[0]):
print("Indice " +str(i) +" prediccion " +str(Predicciones[i]))
#Impresión en el método de los calculos Clasificación precisión, precisión,recall y F-score
clf.comparar(YT,Predicciones)
| MarqCervMartin/RedesNeuronales | Laboratorio5/CompuertaAndOrNot.py | CompuertaAndOrNot.py | py | 5,014 | python | es | code | 0 | github-code | 36 |
71845268583 | import structmanager.sol200.output_codes as output_codes_SOL200
def constrain_buckling(panelcomp, eig=1.0):
OUTC = output_codes_SOL200.OUTC
eid = panelcomp.get_central_element().eid
dcid = panelcomp.constraints['buckling']
# reading membrane force Nxx
code_Nxx = OUTC['FORCE']['CQUAD4']['Membrane force x']
dresp_Nxx = DRESP1('PCfNxx', 'FORCE', 'ELEM', region=None,
atta=code_Nxx, attb=None, atti=eid)
panelcomp.add_dresp(dresp_Nxx)
# reading membrane force Nyy
code_Nyy = OUTC['FORCE']['CQUAD4']['Membrane force y']
dresp_Nyy = DRESP1('PCfNyy', 'FORCE', 'ELEM', region=None,
atta=code_Nyy, attb=None, atti=eid)
panelcomp.add_dresp(dresp_Nyy)
# reading membrane force Nxy
code_Nxy = OUTC['FORCE']['CQUAD4']['Membrane force xy']
dresp_Nxy = DRESP1('PCfNxy', 'FORCE', 'ELEM', region=None,
atta=code_Nxy, attb=None, atti=eid)
panelcomp.add_dresp(dresp_Nxy)
# calculating buckling eigenvalue using an external subroutine
# all parameters (desvars, dtables, dresp's) that this DRESP3 needs to run are listed below
# creating DRESP3
dresp = DRESP3('PCBUCK1', 'PCBUCK', 'BUCK_PC')
dresp.add_dvar(panelcomp.dvars['PCt'].id)
#dresp.add_dvar(dvar_t.id)
dresp.add_dtable(panelcomp.dtables['PCa'][0])
dresp.add_dtable(panelcomp.dtables['PCb'][0])
dresp.add_dtable(panelcomp.dtables['PCr'][0])
dresp.add_dtable(panelcomp.dtables['PCE1'][0])
dresp.add_dtable(panelcomp.dtables['PCE2'][0])
dresp.add_dtable(panelcomp.dtables['PCG12'][0])
dresp.add_dtable(panelcomp.dtables['PCn12'][0])
dresp.add_dtable(panelcomp.dtables['PCn21'][0])
dresp.add_dresp1(dresp_Nxx.id)
dresp.add_dresp1(dresp_Nyy.id)
dresp.add_dresp1(dresp_Nxy.id)
panelcomp.add_dresp(dresp)
# applying constraint of buckling: lower eigenvalue >= 1.0
panelcomp.add_constraint(dcid, dresp, eig, None)
| compmech/structmanager | structmanager/optimization/sol200/elements2d/composite_panel/constraints.py | constraints.py | py | 1,966 | python | en | code | 1 | github-code | 36 |
21683749340 | #!/usr/bin/env python3
import os
import re
import click
import json
import logging
import zipfile
import portalocker
import contextlib
import traceback
import imghdr
import multiprocessing
import functools
import threading
import time
import sys
import ctypes
import psutil
from pathlib import Path
from functools import partial
from tqdm import tqdm
from atomicwrites import atomic_write
from contextlib import contextmanager, nullcontext
from functools import partial
from origami.core.time import elapsed_timer
from origami.batch.core.io import *
from origami.batch.core.utils import Spinner
from origami.batch.core.mutex import DatabaseMutex, FileMutex, DummyMutex
def qt_app():
try:
from PySide2 import QtGui
except ImportError:
from PySide6 import QtGui
os.environ["QT_QPA_PLATFORM"] = "offscreen"
return QtGui.QGuiApplication()
class WatchdogState(enum.Enum):
RUNNING = 0
DONE = 1
CANCEL = 2
class StopWatch:
def __init__(self):
self._last_reset = time.time()
def reset(self):
self._last_reset = time.time()
@property
def age(self):
return time.time() - self._last_reset
class SharedMemoryStopWatch:
def __init__(self):
self._shared = multiprocessing.Value('L', int(time.time()))
def reset(self):
with self._shared.get_lock():
self._shared.value = int(time.time())
@property
def age(self):
with self._shared.get_lock():
return time.time() - self._shared.value
WorkSetEntry = collections.namedtuple(
'WorkSetEntry', ['path', 'pid', 'age'])
class SharedMemoryWorkSet:
def __init__(self, access, n):
assert n >= 1
self._array = multiprocessing.Array(
ctypes.c_int64, n * 4)
# each slot has 4 integer entries:
# 0: value
# 1: pid
# 2: timestamp
# 3: not used
self._n = n
for i in range(self._n * 4):
self._array[i] = -1
self._access = access
def _cleanup(self):
with self._array.get_lock():
for i in range(self._n):
pid = self._array[4 * i + 1]
if pid >= 0 and not psutil.pid_exists(pid):
logging.warning(f"removing killed pid {pid} from work set.")
self._array[4 * i] = -1
self._array[4 * i + 1] = -1
self._array[4 * i + 2] = -1
def add(self, value):
assert value >= 0
with self._array.get_lock():
self._cleanup()
free = None
for i in range(self._n):
if self._array[4 * i] == value:
return
elif free is None and self._array[4 * i] < 0:
free = i
if free is None:
raise RuntimeError(
f"no free slots for adding {value}, pid {os.getpid()}: {self.active}")
self._array[4 * free] = value
self._array[4 * free + 1] = int(os.getpid())
self._array[4 * free + 2] = int(time.time())
def remove(self, value):
assert value >= 0
with self._array.get_lock():
found = None
for i in range(self._n):
if self._array[4 * i] == value:
found = i
break
assert found is not None
self._array[4 * found] = -1
self._array[4 * found + 1] = -1
self._array[4 * found + 2] = -1
@property
def active(self):
result = []
with self._array.get_lock():
self._cleanup()
for i in range(self._n):
if self._array[4 * i] >= 0:
result.append(WorkSetEntry(
path=self._access(self._array[4 * i]),
pid=self._array[4 * i + 1],
age=int(time.time() - self._array[4 * i + 2])))
return result
def print(self):
active = self.active
if active:
logging.error(f"{len(active)} entries in work set:")
for i, entry in enumerate(active):
logging.error(f" ({i + 1}) {entry}]")
else:
logging.error("no entries in work set.")
global global_stop_watch
global_stop_watch = SharedMemoryStopWatch()
# global_stop_watch needs to be global indeed, as pickling
# over the fork in imap_unordered will not work otherwise.
global global_work_set
class Watchdog(threading.Thread):
def __init__(self, pool, stop_watch, work_set, timeout):
threading.Thread.__init__(self)
self._pool = pool
self._timeout = timeout
self._stop_watch = stop_watch
self._work_set = work_set
self._state = WatchdogState.RUNNING
self._cond = threading.Condition()
stop_watch.reset()
def _print_work_set(self):
self._work_set.print()
def _cancel(self):
if self._state != WatchdogState.CANCEL:
logging.error("no new results after %d s. stopping." % self._stop_watch.age)
self._print_work_set()
self._state = WatchdogState.CANCEL
self._pool.terminate()
t = threading.Thread(target=lambda: self._pool.join(), args=())
t.start()
self._stop_watch.reset()
elif self._state == WatchdogState.CANCEL:
logging.error("stopping failed. killing process.")
self._print_work_set()
os._exit(1)
def run(self):
with self._cond:
while True:
self._cond.wait(
max(0, self._timeout - self._stop_watch.age))
if self._state == WatchdogState.DONE:
break
if self._stop_watch.age > self._timeout:
self._cancel()
def set_is_done(self):
with self._cond:
if self._state == WatchdogState.RUNNING:
self._state = WatchdogState.DONE
self._cond.notify()
def is_cancelled(self):
return self._state == WatchdogState.CANCEL
def chunks(items, n):
for i in range(0, len(items), n):
yield items[i:i + n]
class Processor:
def __init__(self, options, needs_qt=False):
self._overwrite = options.get("overwrite", False)
self._processes = options.get("processes", 1)
self._timeout = options.get("alive", 600)
self._name = options.get("name", "")
self._verbose = False
self._lock_strategy = options.get("lock_strategy", "DB")
self._lock_level = options.get("lock_level", "PAGE")
self._lock_timeout = options.get("lock_timeout", "60")
self._max_lock_age = options.get("max_lock_age")
self._lock_chunk_size = 25
self._mutex = None
if self._lock_strategy == "DB":
self._lock_database = options.get("lock_database")
elif self._lock_strategy in ("FILE", "NONE"):
pass
else:
raise ValueError(self._lock_strategy)
if needs_qt:
self._qt_app = qt_app()
if self._processes > 1:
logging.warning(
"this batch does not support multiple processes.")
self._processes = 1 # cannot safely fork here.
else:
self._qt_app = None
if options.get("profile"):
from profiling.sampling import SamplingProfiler
self._profiler = SamplingProfiler()
self._overwrite = True # profile implies overwrite
else:
self._profiler = None
self._print_paths = False
self._plain = options.get("plain")
if self._plain:
self._print_paths = True
self._debug_write = options.get("debug_write", False)
self._track_changes = options.get("track_changes", False)
@staticmethod
def options(f):
options = [
click.option(
'--processes',
type=int,
default=1,
help="Number of parallel processes to employ."),
click.option(
'--alive',
type=int,
default=600,
help="Seconds to wait after inactive process is killed."),
click.option(
'--name',
type=str,
default="",
help="Only process paths that conform to the given pattern."),
click.option(
'--lock-strategy',
type=click.Choice(['FILE', 'DB', 'NONE'], case_sensitive=False),
default="DB",
help="How to implement locking for concurrency."),
click.option(
'--lock-level',
type=click.Choice(['PAGE', 'TASK'], case_sensitive=False),
default="PAGE",
help="Lock granularity."),
click.option(
'--lock-database',
type=click.Path(),
required=False,
help="Mutex database path used for concurrent processing"),
click.option(
'--lock-timeout',
type=int,
default=60,
required=False,
help="Seconds to wait to acquire locking. NFS volumes might need high values."),
click.option(
'--max-lock-age',
type=int,
default=600,
required=False,
help="Maximum age of a lock in seconds until it is considered invalid."),
click.option(
'--overwrite',
is_flag=True,
default=False,
help="Recompute and overwrite existing result files."),
click.option(
'--profile',
is_flag=True,
default=False,
help="Enable profiling and show results."),
click.option(
'--plain',
is_flag=True,
default=False,
help="Print plain output that is friendly to piping."),
click.option(
'--debug-write',
is_flag=True,
default=False,
help="Debug which files are written."),
click.option(
'--track-changes',
type=str,
default="",
help="Recompute files and track changes with given tag.")
]
return functools.reduce(lambda x, opt: opt(x), options, f)
@property
def processor_name(self):
return self.__class__.__name__
def is_image(self, path):
# imghdr might be the perfect tool for this, but
# it fails to detect some valid images. so we go
# with extenstions for the most part.
# see https://stackoverflow.com/questions/36870661/
# imghdr-python-cant-detec-type-of-some-images-image-extension
if path.suffix.lower() in (".jpg", ".png", ".tif", ".tiff"):
return True
return imghdr.what(path) is not None
def should_process(self, page_path):
return True
def prepare_process(self, page_path):
artifacts = self.artifacts()
if self._track_changes:
file_writer = TrackChangeWriter(self._track_changes)
else:
file_writer = AtomicFileWriter(overwrite=self._overwrite)
if self._debug_write:
file_writer = DebuggingFileWriter(file_writer)
kwargs = dict()
for arg, spec in artifacts:
f = spec.instantiate(
page_path=page_path,
processor=self,
file_writer=file_writer)
f.fix_inconsistent()
if not f.is_ready():
if self._verbose:
print("skipping %s: missing %s" % (page_path, f.missing))
return False
kwargs[arg] = f
return kwargs
def _trigger_process1(self, p, kwargs, locked):
work = locked
if not locked:
logging.warning(f"failed to obtain lock for {p}. ignoring.")
try:
if work:
# a concurrent worker might already have done this.
for f in kwargs.values():
if not f.is_ready():
work = False
break
if work:
with elapsed_timer() as elapsed:
data_path = find_data_path(p)
data_path.mkdir(exist_ok=True)
runtime_info = self.process(p, **kwargs)
if runtime_info is None:
runtime_info = dict()
runtime_info["status"] = "COMPLETED"
runtime_info["elapsed"] = round(elapsed(), 2)
self._update_runtime_info(
p, {self.processor_name: runtime_info})
except KeyboardInterrupt:
logging.exception("Interrupted at %s." % p)
raise
except:
logging.exception("Failed to process %s." % p)
runtime_info = dict(
status="FAILED",
traceback=traceback.format_exc())
self._update_runtime_info(p, {
self.processor_name: runtime_info})
finally:
# free memory allocated in cached io.Reader
# attributes. this can get substantial for
# long runs.
kwargs.clear()
def _trigger_process(self, chunk):
if self._lock_level == "PAGE":
lock_actor_name = "page"
elif self._lock_level == "TASK":
lock_actor_name = self.processor_name
else:
raise ValueError(self._lock_level)
with self._mutex.lock(
lock_actor_name,
[str(p) for _, p, _ in chunk]) as locked:
for i, p, kwargs in chunk:
global_work_set.add(i)
try:
self._trigger_process1(p, kwargs, locked)
finally:
global_work_set.remove(i)
yield i, p
def _trigger_process_async(self, chunk):
results = []
for i, p in self._trigger_process(chunk):
results.append((i, p))
global_stop_watch.reset()
return results
def _process_queue(self, queued):
global global_work_set
global_work_set = SharedMemoryWorkSet(
lambda i: queued[i][1], max(1, self._processes))
with self._profiler or nullcontext():
chunked_queue_gen = chunks(queued, self._lock_chunk_size)
def iprogress(i):
nd = len(str(len(queued)))
return f"[{str(i + 1).rjust(nd)} / {len(queued)}]"
if self._processes > 1:
with multiprocessing.Pool(self._processes, maxtasksperchild=4) as pool:
watchdog = Watchdog(
pool=pool,
stop_watch=global_stop_watch,
work_set=global_work_set,
timeout=self._timeout)
watchdog.start()
with tqdm(total=len(queued), disable=self._print_paths) as progress:
for chunk in pool.imap_unordered(
self._trigger_process_async, chunked_queue_gen):
if self._print_paths:
for i, p in chunk:
print(f"{iprogress(i)} {p}", flush=True)
else:
progress.update(len(chunk))
global_stop_watch.reset()
if watchdog.is_cancelled():
watchdog.kill()
sys.exit(1)
else:
watchdog.set_is_done()
else:
with tqdm(total=len(queued), disable=self._print_paths) as progress:
for chunk in chunked_queue_gen:
for i, p in self._trigger_process(chunk):
if self._print_paths:
print(f"{iprogress(i)} {p}", flush=True)
else:
progress.update(1)
def _build_queue(self, path):
path = Path(path)
if not path.exists():
raise FileNotFoundError("%s does not exist." % path)
queued = []
counts = dict(images=0)
def add_path(p):
if not p.exists():
print("skipping %s: path does not exist." % p)
return
if self._name and not re.search(self._name, str(p)):
return
if not self.is_image(p):
if self._verbose:
print("skipping %s: not an image." % p)
return
counts['images'] += 1
if not self.should_process(p):
if self._verbose:
print("skipping %s: should_process is False" % p)
return
kwargs = self.prepare_process(p)
if kwargs is not False:
queued.append((len(queued), p, kwargs))
if not path.is_dir():
if path.suffix == ".txt":
with open(path, "r") as f:
for line in f:
line = line.strip()
if line:
add_path(Path(line))
else:
raise FileNotFoundError(
"%s is not a valid path or text file of paths." % path)
else:
print(f"scanning {path}... ", flush=True, end="")
with Spinner(disable=self._plain):
for folder, dirs, filenames in os.walk(path):
folder = Path(folder)
if folder.name.endswith(".out"):
dirs.clear()
continue
else:
dirs.sort()
for filename in sorted(filenames):
add_path(folder / filename)
print("done.", flush=True)
print(f"{counts['images']} documents found, {len(queued)} ready to process.")
return queued
def traverse(self, path: Path):
print(f"running {self.processor_name}.", flush=True)
queued = self._build_queue(path)
if self._lock_strategy == "DB":
if self._lock_database:
db_path = Path(self._lock_database)
elif Path(path).is_dir():
db_path = Path(path) / "origami.lock.db"
else:
db_path = Path(path).parent / "origami.lock.db"
self._mutex = DatabaseMutex(
db_path, timeout=self._lock_timeout)
self._mutex.clear_locks(self._max_lock_age)
elif self._lock_strategy == "FILE":
self._mutex = FileMutex()
elif self._lock_strategy == "NONE":
self._mutex = DummyMutex()
else:
raise ValueError(self._lock_strategy)
try:
self._process_queue(queued)
finally:
self._mutex = None
if self._profiler:
self._profiler.run_viewer()
def process(self, p: Path):
pass
def lock_or_open(self, path, mode):
if self._lock_strategy == "FILE":
return portalocker.Lock(
path,
mode,
flags=portalocker.LOCK_EX,
timeout=1,
fail_when_locked=True)
else:
return open(path, mode)
def _update_json(self, page_path, artifact, updates):
try:
data_path = find_data_path(page_path)
json_path = data_path / artifact.filename()
new_json_path = json_path.parent / (
json_path.stem + ".updated" + json_path.suffix)
if new_json_path.exists():
os.remove(new_json_path)
if json_path.exists():
with open(json_path, "r") as f:
file_data = f.read()
data = json.loads(file_data)
else:
data = dict()
for k, v in updates.items():
if v is None:
del data[k]
else:
data[k] = v
with open(new_json_path, "w") as f:
json.dump(data, f)
if json_path.exists():
os.remove(json_path)
os.rename(new_json_path, json_path)
except:
logging.error(traceback.format_exc())
def _update_runtime_info(self, page_path, updates):
self._update_json(page_path, Artifact.RUNTIME, updates)
| poke1024/origami | origami/batch/core/processor.py | processor.py | py | 16,273 | python | en | code | 69 | github-code | 36 |
33905718453 | days_vacation = int(input())
left_money = 0
for day in range(1, days_vacation+1):
max_money = 60
count_products = 0
if left_money > 0:
max_money += left_money
while max_money > 0:
price_product = input()
if price_product == "Day over":
if max_money > 0:
print(f"Money left from today: {max_money:.2f}. You've bought {count_products} products.")
break
else:
price_product = float(price_product)
if max_money < price_product:
pass
else:
max_money -= price_product
count_products += 1
if max_money == 0:
print(f"Daily limit exceeded! You've bought {count_products} products.")
if max_money > 0:
left_money = max_money
| IvayloSavov/Programming-basics | exams/27_28_July/trip_expenses.py | trip_expenses.py | py | 836 | python | en | code | 0 | github-code | 36 |
22840734386 |
from urllib.request import urlopen
import json
def E(X):
'''
:param X: [(xi, pi), ...]
:return: int expected value of random variable X
'''
return sum([e[0] * e[1] for e in X])
url = 'https://api.blockchain.info/charts/market-price?timespan=1year&format=json'
http_file = urlopen(url)
lines = http_file.readlines()
s = [str(line, encoding = 'utf-8') for line in lines]
t =''
for line in s:
t += line
s = json.loads(t)
values = s.get('values')
X = []
P = [0.1550018235964546, 0.20615242538328463, 0.27418272575976854, 0.36466302526049216]
for i in range(1, len(values)):
e2, e1 = values[i], values[i-1]
dzeta = e2.get('y')/e1.get('y')
quarter = int(i//91.25)
X.append((dzeta-1, P[quarter]/91))
if __name__ == '__main__':
print(E(X)) # revenue per day
k = (E(X)) * 30 # revenue per month
def kbn(month_sum):
return month_sum/k
if __name__ == '__main__':
print(1/k)
| naurlaunim/other | btc_calc.py | btc_calc.py | py | 922 | python | en | code | 0 | github-code | 36 |
41554946028 | # coding: utf8
# Create by Narata on 2018/4/10
import urllib.request
import re
def get_url_list():
url = 'http://www.budejie.com/video/'
req = urllib.request.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36')
response = urllib.request.urlopen(req)
html = response.read().decode('utf8')
print(type(html))
reg = r'data-mp4="(.*?)"'
url_list = re.findall(reg, html)
for url in url_list:
urllib.request.urlretrieve(url, 'mp4/{}.mp4'.format(url.split('/')[-1].split('.')[0]))
print(url)
get_url_list() | narata/spider | demo/video.py | video.py | py | 663 | python | en | code | 0 | github-code | 36 |
18483593432 | import random
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageFilter
from torchvision import datasets, transforms
class RandomGaussianBlur(object):
def __call__(self, image):
if random.random() < 0.5:
image = image.filter(ImageFilter.GaussianBlur(
radius=random.random()))
return image
class RandomSaltPepperBlur(object):
def __call__(self, image, prob=0.05):
image = np.array(image)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
rdn = random.random()
if rdn < prob:
image[i][j] = round(random.random()) * 255
return Image.fromarray(image)
def Mnist(data_dir="data", input_size=(224, 224), train=True):
if train:
tsf = transforms.Compose([
transforms.RandomRotation(30),
transforms.Resize(input_size),
RandomGaussianBlur(),
RandomSaltPepperBlur(),
transforms.ToTensor(),
])
else:
tsf = transforms.Compose([
transforms.Resize(input_size),
transforms.ToTensor(),
])
dataset = datasets.MNIST(data_dir,
train=train,
transform=tsf,
download=True)
return dataset
if __name__ == '__main__':
import cv2
from torch.utils.data import DataLoader
dataset = Mnist(train=False)
mm = DataLoader(dataset)
for i, sample in enumerate(mm):
if i > 5: break;
img = sample[0].numpy().reshape((224, 224))
plt.imshow(img)
cv2.imwrite('demo/img_{}.png'.format(i), img*255)
plt.show()
| TWSFar/GhostNet-MNIST | datasets/mnist.py | mnist.py | py | 1,738 | python | en | code | 5 | github-code | 36 |
8754323425 | # -*- coding: utf-8 -*-
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import re
from odoo import api, fields, models
class OFCalculationHeatLossLine(models.Model):
_name = 'of.calculation.heat.loss.line'
_description = u"Appareils compatibles pour la déperdition de chaleur"
calculation_heat_loss_id = fields.Many2one(
comodel_name='of.calculation.heat.loss', string=u"Calcul de déperdition de chaleur")
product_id = fields.Many2one(comodel_name='product.template', string=u"Article", store=True)
brand_name = fields.Many2one(
comodel_name='of.product.brand', string=u"Marque", related='product_id.brand_id', store=True)
list_price = fields.Float(string=u"Prix de vente", related="product_id.list_price")
power_char = fields.Char(string=u"Puissance nominale", related="product_id.of_puissance_nom", store=True)
power = fields.Float(string=u"Puissance nominale", compute='_compute_power')
to_print = fields.Boolean(string=u"Impression", help=u"Activer / Désactiver l'impression")
@api.depends('power_char')
def _compute_power(self):
for line in self:
try:
# Extract float from string
line.power = re.findall(r"[-+]?(?:\d*\.*\d+)", line.power_char.replace(',','.'))[0]
except ValueError:
line.power = 0.0
| odof/openfire | of_calculation_heat_loss/models/of_calculation_heat_loss_line.py | of_calculation_heat_loss_line.py | py | 1,369 | python | en | code | 3 | github-code | 36 |
1098578046 | #
# This file is part of the Robotic Observatory Control Kit (rockit)
#
# rockit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rockit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with rockit. If not, see <http://www.gnu.org/licenses/>.
"""Constants and status codes used by domed"""
from rockit.common import TFmt
class CommandStatus:
"""Numeric return codes"""
# General error codes
Succeeded = 0
Failed = 1
Blocked = 2
HeartbeatTimedOut = 3
HeartbeatCloseInProgress = 4
HeartbeatUnavailable = 5
HeartbeatInvalidTimeout = 6
EngineeringModeRequiresHeartbeatDisabled = 7
EngineeringModeActive = 8
InvalidControlIP = 10
_messages = {
# General error codes
1: 'error: command failed',
2: 'error: another command is already running',
10: 'error: command not accepted from this IP',
# dome specific codes
3: 'error: heartbeat monitor has tripped',
4: 'error: heartbeat monitor is closing the dome',
5: 'error: heartbeat monitor is not available',
6: 'error: heartbeat timeout must be less than 120s',
7: 'error: heartbeat monitor must be disabled before enabling engineering mode',
8: 'error: dome is in engineering mode',
-100: 'error: terminated by user',
-101: 'error: unable to communicate with dome daemon'
}
@classmethod
def message(cls, error_code):
"""Returns a human readable string describing an error code"""
if error_code in cls._messages:
return cls._messages[error_code]
return f'error: Unknown error code {error_code}'
class DomeShutterStatus:
"""Status of the dome shutters"""
Closed, Open, PartiallyOpen, Opening, Closing, HeartbeatMonitorForceClosing = range(6)
_labels = {
0: 'CLOSED',
1: 'OPEN',
2: 'PARTIALLY OPEN',
3: 'OPENING',
4: 'CLOSING',
5: 'FORCE CLOSING',
}
_formats = {
0: TFmt.Red + TFmt.Bold,
1: TFmt.Green + TFmt.Bold,
2: TFmt.Cyan + TFmt.Bold,
3: TFmt.Yellow + TFmt.Bold,
4: TFmt.Yellow + TFmt.Bold,
5: TFmt.Red + TFmt.Bold,
}
@classmethod
def label(cls, status, formatting=False):
"""
Returns a human readable string describing a status
Set formatting=true to enable terminal formatting characters
"""
if formatting:
if status in cls._formats and status in cls._formats:
return cls._formats[status] + cls._labels[status] + TFmt.Clear
return TFmt.Red + TFmt.Bold + 'UNKNOWN' + TFmt.Clear
if status in cls._labels:
return cls._labels[status]
return 'UNKNOWN'
class DomeHeartbeatStatus:
"""Status of the dome heartbeat monitoring"""
Disabled, Active, TrippedClosing, TrippedIdle, Unavailable = range(5)
_labels = {
0: 'DISABLED',
1: 'ACTIVE',
2: 'CLOSING DOME',
3: 'TRIPPED',
4: 'UNAVAILABLE',
}
_formats = {
0: TFmt.Bold,
1: TFmt.Green + TFmt.Bold,
2: TFmt.Red + TFmt.Bold,
3: TFmt.Red + TFmt.Bold,
4: TFmt.Yellow + TFmt.Bold,
}
@classmethod
def label(cls, status, formatting=False):
"""
Returns a human readable string describing a status
Set formatting=true to enable terminal formatting characters
"""
if formatting:
if status in cls._formats and status in cls._formats:
return cls._formats[status] + cls._labels[status] + TFmt.Clear
return TFmt.Red + TFmt.Bold + 'UNKNOWN' + TFmt.Clear
if status in cls._labels:
return cls._labels[status]
return 'UNKNOWN'
| warwick-one-metre/domed | rockit/dome/constants.py | constants.py | py | 4,220 | python | en | code | 0 | github-code | 36 |
31638903937 | import json
import numpy as np
import matplotlib.pyplot as plt
def read_file():
# 读取数据
hero_list = json.load(open("./file/heroskin.json", 'r', encoding='utf-8'))
hero_skin_data = hero_list["hero_skin_data"]
return hero_skin_data
def get_hero_skin_count():
hero_skin_data = read_file()
# 所有英雄、皮肤个数
hero_name_list = []
hero_skin_count = []
for hero_skin_item in hero_skin_data:
temp_name = hero_skin_item["hero_name"]
temp_skin_count = hero_skin_item["hero_skin_count"]
hero_name_list.append(temp_name)
hero_skin_count.append(temp_skin_count)
return hero_name_list, hero_skin_count
pass
def drawLine():
# 显示负号
plt.rcParams['axes.unicode_minus'] = False
# 显示中文标签
plt.rcParams['font.sans-serif'] = ['SimHei']
# 显示负号
plt.rcParams['axes.unicode_minus'] = False
# 英雄数据
hero_name_list, hero_skin_count = get_hero_skin_count()
# 设置坐标数据 数组
x_text = hero_name_list
y_text = hero_skin_count
# 设置窗口展示大小
plt.figure(figsize=(20, 8), dpi=80)
# 显示网格
plt.grid(True, linestyle="--", color='gray', linewidth='0.5', axis='both')
# 标题
plt.title('英雄皮肤数据折线图')
# 设置坐标轴名称
plt.xlabel('英雄名称')
plt.ylabel('皮肤数量')
# 设置x轴文字角度
plt.xticks(rotation=60, fontsize=9)
# 设置间隔
plt.xlim(-0.5, 100)
# 柱形图
bar = plt.bar(x=x_text, height=y_text, color='steelblue', alpha=0.8)
# 折线图
line = plt.plot(x_text, y_text, color='red', linewidth=1, marker='o', markerfacecolor='salmon', markersize=3)
# 设置数字标签
for x, y in zip(x_text, y_text):
plt.text(x, y, y, ha='center', va='bottom', fontsize=10)
# 设置图例样式
plt.legend(line, ("英雄皮肤数量",), loc='upper left')
# 设置平均线
avg = np.mean(y_text)
plt.axhline(y=avg, color="green")
# 显示
plt.show()
def main():
drawLine()
if __name__ == '__main__':
main() | N-Wind/League-of-Legends | hero/heroSkinLine.py | heroSkinLine.py | py | 2,138 | python | en | code | 0 | github-code | 36 |
70767542184 | from sokoban.fast_sokoban import extract_all_patterns
import numpy as np
class MinimalLocalForwardModel:
def __init__(self, model, mask, span, remember_predictions=False):
self.model = model
self.mask = mask
self.span = span
self.remember_predictions = remember_predictions
if self.remember_predictions:
self.known_predictions = dict()
pass
def extract_unknown_patterns(self, game_state, action, mask, span):
"""
:param prev_game_state:
:param action:
:param game_state:
:param mask:
:param span:
:param data_set:
:return:
"""
prediction_mask = np.zeros(game_state.shape, dtype=np.bool)
result = np.zeros(game_state.shape, dtype=np.int)
data_set = []
# only iterate over positions that were affected by the game state's changes
positions = [(x, y) for x in range(game_state.shape[0]) for y in range(game_state.shape[1])]
ext_game_state_grid = np.pad(game_state, span, "constant", constant_values=1)
for i, (x, y) in enumerate(positions):
el = ext_game_state_grid[span + x - span: span + x + span + 1, span + y - span: span + y + span + 1][
mask].tolist()
el.append(action)
el = tuple(el)
if el in self.known_predictions:
result[x, y] = self.known_predictions[el]
else:
prediction_mask[x, y] = 1
data_set.append(el)
return data_set, prediction_mask, result
def predict(self, level, action):
if self.remember_predictions:
data, prediction_mask, result = self.extract_unknown_patterns(level, action, self.mask, self.span)
if len(data) > 0:
prediction = self.model.predict(data)
result[prediction_mask] = prediction
for el, pred in zip(data, prediction):
self.known_predictions[el] = pred
return result
else:
data = extract_all_patterns(level, action, self.mask, self.span)
return self.model.predict(data).reshape(level.shape)
| ADockhorn/Active-Forward-Model-Learning | activestateexploration/simple_lfm.py | simple_lfm.py | py | 2,206 | python | en | code | 0 | github-code | 36 |
23406422554 | # -*- coding: utf-8 -*-
import cottonformation as ctf
from cottonformation.res import iam, awslambda
# create a ``Template`` object to represent your cloudformation template
tpl = ctf.Template(
Description="Aws Lambda Versioning Example",
)
iam_role_for_lambda = iam.Role(
"IamRoleForLambdaExecution",
rp_AssumeRolePolicyDocument=ctf.helpers.iam.AssumeRolePolicyBuilder(
ctf.helpers.iam.ServicePrincipal.awslambda()
).build(),
p_RoleName="lbd-versioning-poc",
p_ManagedPolicyArns=[
ctf.helpers.iam.AwsManagedPolicy.AmazonDynamoDBFullAccess,
]
)
tpl.add(iam_role_for_lambda)
lbd_func = awslambda.Function(
"LbdFuncVersioningPOC",
rp_Code=awslambda.PropFunctionCode(
p_S3Bucket="sanhe-admin-for-everything",
p_S3Key="lambda/MacHu-GWU/lbd-versioning/066212d310fb9d829154d197be860d0f.zip",
),
rp_Role=iam_role_for_lambda.rv_Arn,
p_FunctionName="lbd-versioning-poc",
p_MemorySize=256,
p_Timeout=3,
p_Runtime=ctf.helpers.awslambda.LambdaRuntime.python36,
p_Handler="lbd_handler.main",
ra_DependsOn=iam_role_for_lambda,
p_Tags=ctf.Tag.make_many(Stage="Dev", Description="Changed"),
)
tpl.add(lbd_func)
if __name__ == "__main__":
import boto3
boto_ses = boto3.session.Session(profile_name="sanhe")
env = ctf.Env(boto_ses=boto_ses)
env.deploy(
template=tpl,
stack_name="lbd-versioning-poc",
bucket_name="sanhe-admin-for-everything",
include_iam=True,
) | MacHu-GWU/Dev-Exp-Share | docs/source/01-AWS/01-All-AWS-Services-Root/01-Compute/02-AWS-Lambda-Root/05-Versioning/deploy.py | deploy.py | py | 1,506 | python | en | code | 3 | github-code | 36 |
1316799012 | # !pip install xlsxwriter
import pandas as pd
df = pd.DataFrame([{'foo':i,'bar':2*i,'baz':3*i} for i in range(76)])
writer = pd.ExcelWriter('sample_xl.xlsx',engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
df.to_excel(writer, sheet_name='Sheet1')
# Get the xlsxwriter workbook and worksheet objects.
workbook = writer.book
worksheet = writer.sheets['Sheet1']
# Add some cell formats.
format1 = workbook.add_format({'bg_color': '#FFC7CE',
'font_color': '#9C0006'})
format2 = workbook.add_format({'bg_color': '#C6EFCE',
'font_color': '#006100'})
# How to add conditional formatting:
# Write a conditional format over a range.
worksheet.conditional_format('B2:D77', {'type': 'cell',
'criteria': '>=',
'value': 50,
'format': format1})
worksheet.conditional_format('B2:D77', {'type': 'cell',
'criteria': '<',
'value': 50,
'format': format2})
# Note: It isn't possible to format any cells that already have a format such
# as the index or headers or any cells that contain dates or datetimes.
# Set the column width and format.
# worksheet.set_column('B:B', 18, format1)
# Set the format but not the column width.
# worksheet.set_column('C:C', None, format2)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
| rtahmasbi/Applications_Tempalete | P_ExcelWriter.py | P_ExcelWriter.py | py | 1,569 | python | en | code | 0 | github-code | 36 |
17966322508 | """Config files
"""
import logging
import os
import sys
from pathlib import Path
import torch
MAIN_PATH = Path(__file__).resolve().parents[1]
DATA_PATH = MAIN_PATH / "data"
DEPLOY_PATH = MAIN_PATH / "src" / "deploy"
ARTIFACT_PATH = MAIN_PATH / "artifacts"
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
NUM_WORKERS = os.cpu_count()
EPOCHS = 20
LEARNING_RATE = 1e-4
EPS = 1e-8
BATCH_SIZE = 32
CLIP = 1.5
ALPHA = 1000.0
HIDDEN_SIZE = 2
### Class Translation
fmnist_classes = [
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
cifar10_classes = [
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]
CLASSES = {
"mnist": {digit: digit for digit in range(10)},
"fmnist": {label: idx for idx, label in enumerate(fmnist_classes)},
"cifar10": {label: idx for idx, label in enumerate(cifar10_classes)},
}
### Model Params
MODEL_PARAMS = {
"BaseVAE": {},
"DeepVAE": {},
"ConvVAE": {"kernel_size": 3},
"BaseCVAE": {},
"DeepCVAE": {},
"ConvCVAE": {},
"GAN": {},
"CGAN": {},
"ConvGAN": {},
"ConvCGAN": {},
}
### Logging configurations
LOGGER = logging.getLogger(__name__)
stream_handler = logging.StreamHandler(sys.stdout)
if not (ARTIFACT_PATH / "model_ckpt").exists():
(ARTIFACT_PATH / "model_ckpt").mkdir(parents=True)
file_handler = logging.FileHandler(
filename=str(ARTIFACT_PATH / "model_ckpt" / "logfile.log")
)
formatter = logging.Formatter("%(asctime)s:%(levelname)s: %(message)s")
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
LOGGER.setLevel(logging.INFO)
LOGGER.addHandler(stream_handler)
LOGGER.addHandler(file_handler)
| benjaminlq/Image-Generation | src/config.py | config.py | py | 1,838 | python | en | code | 0 | github-code | 36 |
34181502633 | from typing import Dict, Optional, Type, Union, Callable, Any
from types import TracebackType
from functools import wraps
from threading import Lock
from qiskit_ibm_provider.utils.converters import hms_to_seconds
from qiskit_ibm_runtime import QiskitRuntimeService
from .runtime_job import RuntimeJob
from .utils.result_decoder import ResultDecoder
from .ibm_backend import IBMBackend
from .utils.default_session import set_cm_session
from .utils.deprecation import deprecate_arguments
def _active_session(func): # type: ignore
"""Decorator used to ensure the session is active."""
@wraps(func)
def _wrapper(self, *args, **kwargs): # type: ignore
if not self._active:
raise RuntimeError("The session is closed.")
return func(self, *args, **kwargs)
return _wrapper
class Session:
"""Class for creating a flexible Qiskit Runtime session.
A Qiskit Runtime ``session`` allows you to group a collection of iterative calls to
the quantum computer. A session is started when the first job within the session
is started. Subsequent jobs within the session are prioritized by the scheduler.
Data used within a session, such as transpiled circuits, is also cached to avoid
unnecessary overhead.
You can open a Qiskit Runtime session using this ``Session`` class and submit jobs
to one or more primitives.
For example::
from qiskit.test.reference_circuits import ReferenceCircuits
from qiskit_ibm_runtime import Sampler, Session, Options
options = Options(optimization_level=3)
with Session(backend="ibmq_qasm_simulator") as session:
sampler = Sampler(session=session, options=options)
job = sampler.run(ReferenceCircuits.bell())
print(f"Sampler job ID: {job.job_id()}")
print(f"Sampler job result: {job.result()}")
"""
def __init__(
self,
service: Optional[QiskitRuntimeService] = None,
backend: Optional[Union[str, IBMBackend]] = None,
max_time: Optional[Union[int, str]] = None,
): # pylint: disable=line-too-long
"""Session constructor.
Args:
service: Optional instance of the ``QiskitRuntimeService`` class.
If ``None``, the service associated with the backend, if known, is used.
Otherwise ``QiskitRuntimeService()`` is used to initialize
your default saved account.
backend: Optional instance of :class:`qiskit_ibm_runtime.IBMBackend` class or
string name of backend. An instance of :class:`qiskit_ibm_provider.IBMBackend` will not work.
If not specified, a backend will be selected automatically (IBM Cloud channel only).
max_time: (EXPERIMENTAL setting, can break between releases without warning)
Maximum amount of time, a runtime session can be open before being
forcibly closed. Can be specified as seconds (int) or a string like "2h 30m 40s".
This value must be less than the
`system imposed maximum
<https://docs.quantum.ibm.com/run/max-execution-time>`_.
Raises:
ValueError: If an input value is invalid.
"""
if service is None:
if isinstance(backend, IBMBackend):
self._service = backend.service
else:
self._service = (
QiskitRuntimeService()
if QiskitRuntimeService.global_service is None
else QiskitRuntimeService.global_service
)
else:
self._service = service
if self._service.channel == "ibm_quantum" and not backend:
raise ValueError('"backend" is required for ``ibm_quantum`` channel.')
self._instance = None
if isinstance(backend, IBMBackend):
self._instance = backend._instance
backend = backend.name
self._backend = backend
self._setup_lock = Lock()
self._session_id: Optional[str] = None
self._active = True
self._max_time = (
max_time
if max_time is None or isinstance(max_time, int)
else hms_to_seconds(max_time, "Invalid max_time value: ")
)
@_active_session
def run(
self,
program_id: str,
inputs: Dict,
options: Optional[Dict] = None,
callback: Optional[Callable] = None,
result_decoder: Optional[Type[ResultDecoder]] = None,
) -> RuntimeJob:
"""Run a program in the session.
Args:
program_id: Program ID.
inputs: Program input parameters. These input values are passed
to the runtime program.
options: Runtime options that control the execution environment.
See :class:`qiskit_ibm_runtime.RuntimeOptions` for all available options.
callback: Callback function to be invoked for any interim results and final result.
Returns:
Submitted job.
"""
options = options or {}
if "instance" not in options:
options["instance"] = self._instance
options["backend"] = self._backend
if not self._session_id:
# Make sure only one thread can send the session starter job.
self._setup_lock.acquire()
# TODO: What happens if session max time != first job max time?
# Use session max time if this is first job.
options["session_time"] = self._max_time
try:
job = self._service.run(
program_id=program_id,
options=options,
inputs=inputs,
session_id=self._session_id,
start_session=self._session_id is None,
callback=callback,
result_decoder=result_decoder,
)
if self._session_id is None:
self._session_id = job.job_id()
finally:
if self._setup_lock.locked():
self._setup_lock.release()
if self._backend is None:
self._backend = job.backend().name
return job
def cancel(self) -> None:
"""Cancel all pending jobs in a session."""
self._active = False
if self._session_id:
self._service._api_client.cancel_session(self._session_id)
def close(self) -> None:
"""Close the session so new jobs will no longer be accepted, but existing
queued or running jobs will run to completion. The session will be terminated once there
are no more pending jobs."""
self._active = False
if self._session_id:
self._service._api_client.close_session(self._session_id)
def backend(self) -> Optional[str]:
"""Return backend for this session.
Returns:
Backend for this session. None if unknown.
"""
return self._backend
def status(self) -> Optional[str]:
"""Return current session status.
Returns:
The current status of the session, including:
Pending: Session is created but not active.
It will become active when the next job of this session is dequeued.
In progress, accepting new jobs: session is active and accepting new jobs.
In progress, not accepting new jobs: session is active and not accepting new jobs.
Closed: max_time expired or session was explicitly closed.
None: status details are not available.
"""
details = self.details()
if details:
state = details["state"]
accepting_jobs = details["accepting_jobs"]
if state in ["open", "inactive"]:
return "Pending"
if state == "active" and accepting_jobs:
return "In progress, accepting new jobs"
if state == "active" and not accepting_jobs:
return "In progress, not accepting new jobs"
return state.capitalize()
return None
def details(self) -> Optional[Dict[str, Any]]:
"""Return session details.
Returns:
A dictionary with the sessions details, including:
id: id of the session.
backend_name: backend used for the session.
interactive_timeout: The maximum idle time (in seconds) between jobs that
is allowed to occur before the session is deactivated.
max_time: Maximum allowed time (in seconds) for the session, subject to plan limits.
active_timeout: The maximum time (in seconds) a session can stay active.
state: State of the session - open, active, inactive, or closed.
accepting_jobs: Whether or not the session is accepting jobs.
last_job_started: Timestamp of when the last job in the session started.
last_job_completed: Timestamp of when the last job in the session completed.
started_at: Timestamp of when the session was started.
closed_at: Timestamp of when the session was closed.
"""
if self._session_id:
response = self._service._api_client.session_details(self._session_id)
if response:
return {
"id": response.get("id"),
"backend_name": response.get("backend_name"),
"interactive_timeout": response.get("interactive_ttl"),
"max_time": response.get("max_ttl"),
"active_timeout": response.get("active_ttl"),
"state": response.get("state"),
"accepting_jobs": response.get("accepting_jobs"),
"last_job_started": response.get("last_job_started"),
"last_job_completed": response.get("last_job_completed"),
"started_at": response.get("started_at"),
"closed_at": response.get("closed_at"),
}
return None
@property
def session_id(self) -> str:
"""Return the session ID.
Returns:
Session ID. None until a job runs in the session.
"""
return self._session_id
@property
def service(self) -> QiskitRuntimeService:
"""Return service associated with this session.
Returns:
:class:`qiskit_ibm_runtime.QiskitRuntimeService` associated with this session.
"""
return self._service
@classmethod
def from_id(
cls,
session_id: str,
service: Optional[QiskitRuntimeService] = None,
backend: Optional[Union[str, IBMBackend]] = None,
) -> "Session":
"""Construct a Session object with a given session_id
Args:
session_id: the id of the session to be created. This must be an already
existing session id.
service: instance of the ``QiskitRuntimeService`` class.
backend: instance of :class:`qiskit_ibm_runtime.IBMBackend` class or
string name of backend.
Returns:
A new Session with the given ``session_id``
"""
if backend:
deprecate_arguments("backend", "0.15.0", "Sessions do not support multiple backends.")
session = cls(service, backend)
session._session_id = session_id
return session
def __enter__(self) -> "Session":
set_cm_session(self)
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
set_cm_session(None)
self.close()
| Qiskit/qiskit-ibm-runtime | qiskit_ibm_runtime/session.py | session.py | py | 11,847 | python | en | code | 106 | github-code | 36 |
177355358 | def kerulet(r):
pi = 3.14
return(2*r*pi)
def terulet(r):
pi = 3.14
return(r**2)*pi
sugar = float(input("A kör sugara: "))
K = kerulet(sugar)
T = terulet(sugar)
print("A kör kerülete: {0} egység.".format(K))
print("A kör területe: {0} négyzetegység.".format(T)) | martinez7200/progalapok | kor_kerulet.py | kor_kerulet.py | py | 294 | python | hu | code | 0 | github-code | 36 |
26967199764 | from flask import Flask, jsonify, request, make_response
from functions import calculate_penalty,possession
app = Flask(__name__)
@app.route('/')
def index():
return 'This is index page'
@app.route('/penalties', methods={'POST'})
def getpenalities():
drug_class=request.form.get('drug_class')
culpability = request.form.get('culpability')
harm = request.form.get('harm')
print((drug_class,culpability,harm))
result = calculate_penalty(drug_class,culpability,int(harm))
return jsonify( response = result, status=200,message="success" )
@app.route('/possession_of_a_drug', methods={'POST'})
def possession_of_a_drugapi():
drug_class = request.form.get('drug_class')
result = possession(drug_class)
return jsonify(response=result, status=200, message="success")
if __name__ == "__main__":
app.run(debug=True)
| Mubashar2014/penalties | main.py | main.py | py | 899 | python | en | code | 0 | github-code | 36 |
7499185502 |
"""
Created on Sat Oct 20 16:01:38 2018
@author: Varun
"""
import numpy as np
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras import applications
import shutil
#reads in csv file
df = pd.read_csv("data/train_target.csv")
df1 = df['Id']
testLabels = []
validateLabels = []
jpg = []
i = 0
j = 0
#keeps track of number of files
trainMale = 0
trainFemale = 0
validateMale = 0
validateFemale = 0
leastGender = 0
#separates data into two Male/ Female Directories
while i < 28360:
print(df1.iloc[i])
imageName = ('data/train/' + df1.iloc[i])
testTarget = df.iloc[i][1]
if (testTarget == 1):
trainMale = trainMale +1
shutil.copy(imageName, "data/Split/Male/Male" + str (trainMale) + ".jpg" )
else:
trainFemale = trainFemale +1
shutil.copy(imageName, "data/Split/Female/Female" + str (trainFemale) + ".jpg" )
i = i+1
if(trainMale > trainFemale):
leastGender = trainFemale
else:
leastGender = trainMale
print(leastGender)
k = 1
while(k<leastGender +1):
genderSwitch = 0
imageName = ("data/Split/")
if(k < 1 +leastGender - 2000):
if(genderSwitch == 0):
shutil.move(imageName+"Male/Male"+ str (k) + ".jpg", "data/Ultimate_train/Male/Male" + str (k) + ".jpg" )
genderSwitch = 1
if(genderSwitch == 1):
shutil.move(imageName+"Female/Female"+ str (k) + ".jpg", "data/Ultimate_train/Female/Female" + str (k) + ".jpg" )
genderSwitch = 0
else:
if(genderSwitch == 0):
shutil.move(imageName+"Male/Male"+ str (k) + ".jpg", "data/Ultimate_validation/Male/Male" + str (k) + ".jpg" )
genderSwitch = 1
if(genderSwitch == 1):
shutil.move(imageName+"Female/Female"+ str (k) + ".jpg", "data/Ultimate_validation/Female/Female" + str (k) + ".jpg" )
genderSwitch = 0
k = k+1
FemCounter = 1;
while(k<(leastGender*2) +1):
imageName = ("data/Split/")
if(k < (leastGender*2) - 2000 +1):
shutil.move(imageName+"Male/Male"+ str (k) + ".jpg", "data/Ultimate_train/Male/Male" + str (k) + ".jpg" )
shutil.copy("data/training_set1/Female/Female" + str (FemCounter) + ".jpg", "data/Ultimate_train/Female/Female" + str (k) + ".jpg" )
else:
shutil.move(imageName+"Male/Male"+ str (k) + ".jpg", "data/Ultimate_validation/Male/Male" + str (k) + ".jpg" )
shutil.copy("data/validation_set1/Female/Female" + str (FemCounter) + ".jpg", "data/Ultimate_validation/Female/Female" + str (k) + ".jpg" )
k = k+1
FemCounter = FemCounter +1
FemCounter = 1;
while(k < (leastGender*3) - 2000 +1):
shutil.move(imageName+"Male/Male"+ str (k) + ".jpg", "data/Ultimate_train/Male/Male" + str (k) + ".jpg" )
shutil.copy("data/training_set1/Female/Female" + str (FemCounter) + ".jpg", "data/Ultimate_train/Female/Female" + str (k) + ".jpg" )
k = k+1
FemCounter = FemCounter +1
| varunvenkitachalam/GenderDetector_MachineLearning | UltimateGenderSplit.py | UltimateGenderSplit.py | py | 3,040 | python | en | code | 0 | github-code | 36 |
70970697063 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 07:57:10 2020
@author: KANNAN
"""
from flask import Flask, render_template, request
import pandas as pd
#import sklearn
import pickle
model = pickle.load(open("flight_rf.pkl", "rb"))
app = Flask(__name__)
@app.route('/')
def home():
return render_template('Airlines.html')
@app.route('/Predict', methods = ["GET", "POST"])
def Predict():
if request.method == "POST":
#Date of Journey
# Departure DMY
date_dep = request.form["Departure_Date"]
Dep_day = int(pd.to_datetime(date_dep, format="%Y-%m-%dT%H:%M").day)
Dep_month = int(pd.to_datetime(date_dep, format="%Y-%m-%dT%H:%M").month)
Dep_year = int(pd.to_datetime(date_dep, format="%Y-%m-%dT%H:%M").year)
# Departure Time
Dep_hour = int(pd.to_datetime(date_dep, format="%Y-%m-%dT%H:%M").hour)
Dep_minute = int(pd.to_datetime(date_dep, format="%Y-%m-%dT%H:%M").minute)
date_arrival = request.form["Arrival_Date"]
#Arrival DMY
Arrival_day = int(pd.to_datetime(date_arrival, format="%Y-%m-%dT%H:%M").day)
Arrival_month = int(pd.to_datetime(date_arrival, format="%Y-%m-%dT%H:%M").month)
Arrival_year = int(pd.to_datetime(date_arrival, format="%Y-%m-%dT%H:%M").year)
# Arrival Time
Arrival_hour = int(pd.to_datetime(date_arrival, format="%Y-%m-%dT%H:%M").hour)
Arrival_minute = int(pd.to_datetime(date_arrival, format="%Y-%m-%dT%H:%M").minute)
# Duration in hrs
from datetime import datetime
dep_date = datetime(Dep_year,Dep_month,Dep_day,Dep_hour,Dep_minute)
arrival_date = datetime(Arrival_year,Arrival_month,Arrival_day,Arrival_hour,Arrival_minute)
diff = arrival_date - dep_date
t = pd.to_datetime(diff,format="%H:%M:%S")
duration_hour = t.hour
duration_minute = t.minute
# Source
source = request.form["Source"]
if (source == 'New Delhi'):
s_New_Delhi = 1
s_Kolkata = 0
s_Mumbai = 0
s_Chennai = 0
elif (source == 'Kolkata'):
s_New_Delhi = 0
s_Kolkata = 1
s_Mumbai = 0
s_Chennai = 0
elif (source == 'Mumbai'):
s_New_Delhi = 0
s_Kolkata = 0
s_Mumbai = 1
s_Chennai = 0
elif (source == 'Chennai'):
s_New_Delhi = 0
s_Kolkata = 0
s_Mumbai = 0
s_Chennai = 1
else:
s_New_Delhi = 0
s_Kolkata = 0
s_Mumbai = 0
s_Chennai = 0
# Destination
destination = request.form["Destination"]
if (destination == 'Cochin'):
d_Cochin = 1
d_New_Delhi = 0
d_Hyderabad = 0
d_Kolkata = 0
elif (destination == 'New Delhi'):
d_Cochin = 0
d_New_Delhi = 1
d_Hyderabad = 0
d_Kolkata = 0
elif (destination == 'Hyderabad'):
d_Cochin = 0
d_New_Delhi = 0
d_Hyderabad = 1
d_Kolkata = 0
elif (destination == 'Kolkata'):
d_Cochin = 0
d_New_Delhi = 0
d_Hyderabad = 0
d_Kolkata = 1
else:
d_Cochin = 0
d_Delhi = 0
d_New_Delhi = 0
d_Hyderabad = 0
d_Kolkata = 0
# Airline
airline = request.form["Airline"]
if(airline=='Jet Airways'):
Jet_Airways = 1
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='IndiGo'):
Jet_Airways = 0
IndiGo = 1
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Air India'):
Jet_Airways = 0
IndiGo = 0
Air_India = 1
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Multiple carriers'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 1
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='SpiceJet'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 1
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Vistara'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 1
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='GoAir'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 1
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Multiple carriers Premium economy'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 1
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Jet Airways Business'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 1
Vistara_Premium_economy = 0
Trujet = 0
elif (airline=='Vistara Premium economy'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 1
Trujet = 0
elif (airline=='Trujet'):
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 1
else:
Jet_Airways = 0
IndiGo = 0
Air_India = 0
Multiple_carriers = 0
SpiceJet = 0
Vistara = 0
GoAir = 0
Multiple_carriers_Premium_economy = 0
Jet_Airways_Business = 0
Vistara_Premium_economy = 0
Trujet = 0
# Total_Stops
stops = int(request.form["Total_Stops"])
prediction=model.predict([[
stops,
Dep_day,
Dep_month,
Dep_hour,
Dep_minute,
Arrival_hour,
Arrival_minute,
duration_hour,
duration_minute,
Air_India,
GoAir,
IndiGo,
Jet_Airways,
Jet_Airways_Business,
Multiple_carriers,
Multiple_carriers_Premium_economy,
SpiceJet,
Trujet,
Vistara,
Vistara_Premium_economy,
s_Chennai,
s_Kolkata,
s_Mumbai,
s_New_Delhi,
d_Cochin,
d_Hyderabad,
d_Kolkata,
d_New_Delhi
]])
output=round(prediction[0],2)
return render_template('Airlines.html',prediction_text = "Your Flight Fare is {} INR".format(output))
return render_template('Airlines.html')
if __name__ == '__main__':
app.run()
| GuruYohesh/ML | Flight Fare Prediction/app.py | app.py | py | 9,556 | python | en | code | 0 | github-code | 36 |
12487085880 | """
Lists Basics - More Exercises
Check your code: https://judge.softuni.bg/Contests/Practice/Index/1726#3
SUPyF2 Lists More Exercise - 04. Battle Ships
Problem:
You will be given a number n representing the number of rows of the field.
On the next n lines you will receive each row of the field as a string with numbers separated by a space.
Each number greater than zero represents a ship with a health equal to the number value.
After that you will receive the squares that are being attacked in the format: "{row}-{col} {row}-{col}".
Each time a square is being attacked, if there is a ship there (number greater than 0) you should reduce its value.
After the attacks have ended, print how many ships were destroyed (if its value has reached zero)
Example:
Input:
3
1 0 0 1
2 0 0 0
0 3 0 1
0-0 1-0 2-1 2-1 2-1 1-1 2-1
Output:
2
"""
rows = int(input())
all_ships = []
count_destroyed_ships = 0
for row in range(rows):
all_ships += [[int(ship) for ship in input().split()]]
attacks = input().split(" ")
for attack in attacks:
attack = attack.split("-")
row = int(attack[0])
col = int(attack[1])
if all_ships[row][col] > 0:
all_ships[row][col] -= 1
if all_ships[row][col] == 0:
count_destroyed_ships += 1
print(count_destroyed_ships)
| SimeonTsvetanov/Coding-Lessons | SoftUni Lessons/Python Development/Python Fundamentals September 2019/Problems And Files/10 EXERCISE LISTS BASICS - Дата 4-ти октомври, 1430 - 1730/More Exercises/04. Battle Ships.py | 04. Battle Ships.py | py | 1,335 | python | en | code | 9 | github-code | 36 |
27053438479 | import pytest
from subarraySum import Solution
@pytest.mark.parametrize("nums, k, expected", [
([], 2, 0),
([1, 1, 1], 2, 2),
([1, 1, 1, 1], 2, 3)
])
def test_subarraySum(nums, k, expected):
actual = Solution().subarraySum(nums, k)
assert actual == expected
| ikedaosushi/leetcode | problems/python/tests/test_subarraySum.py | test_subarraySum.py | py | 280 | python | en | code | 1 | github-code | 36 |
3202338276 | import numpy as np
import sys
import time
sys.path.append("Interface/python/")
from init import NeuronLayerBox
import cv2
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
if __name__ == '__main__':
NLB=NeuronLayerBox(step_ms=1,model=1,spike=0,restore=0)
input_src=[]
img=cv2.imread("load_data/input.bmp")
img=rgb2gray(img).astype(int)
input_src.append(img)
NLB.step(20)
NLB.input(input_src)
for i in range(50):
NLB.step(5)
a=(NLB.output()['11']/max(np.max(NLB.output()['11']),0.0000001))*255
cv2.imshow("1.jpg",a)
cv2.waitKey(1)
time.sleep(10)
NLB.save()
NLB.exit()
| Megatron2032/NeuronLayerBox | NeuronLayerBox1.1/main.py | main.py | py | 641 | python | en | code | 0 | github-code | 36 |
18297590625 | import csv
import json
BATCH_ID='4574137'
ROUND='1'
with open('wrong_hit.txt'.format(BATCH_ID)) as f:
wrong_hit = f.read().splitlines()
# print(len(wrong_hit))
save_data = []
csv_columns = []
all = 0
wrong = 0
with open('csv/Batch_{}_batch_results{}.csv'.format(BATCH_ID, '_'+ROUND if ROUND != '0' else ''), newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['AssignmentStatus'] != 'Submitted':
continue
all += 1
time_missing = False
label_missing = False
for i in range(1, 6):
label_exist = len(row["Answer.verb{}".format(i)]) > 2 and len(row["Answer.object{}".format(i)]) > 2
time_exist = len(row["Answer.start{}".format(i)]) > 2 and len(row["Answer.end{}".format(i)]) > 2
# print(row["Answer.verb{}".format(i)], row["Answer.object{}".format(i)], row["Answer.start{}".format(i)], row["Answer.end{}".format(i)])
# print(label_exist, time_exist)
if (label_exist == False and time_exist == False):
continue
if (label_exist != time_exist):
label_missing = True
break
for j in range (1, 5):
index = ["zero", "one", "two", "three"]
if j == 1:
start = row["Answer.start{}".format(i)]
end = row["Answer.end{}".format(i)]
else:
try:
if row["Answer.morethan{}{}".format(index[j-1], i)] != "on":
continue
start = row["Answer.start{}_{}".format(i,j)]
end = row["Answer.end{}_{}".format(i,j)]
except:
break
if (len(start) > 2) != (len(end) > 2):
time_missing = True
break
if time_missing or label_missing:
row['Reject']='Incomplete annotations'
wrong += 1
elif row['HITId'] in wrong_hit:
row['Reject']='Wrong annotations'
wrong += 1
else:
row['Approve']='x'
save_data.append(row)
csv_columns = row.keys()
print('Wrong annos {}/{}={:.2%}'.format(wrong, all, wrong/all))
with open('csv/rev_{}.csv'.format(BATCH_ID+"_"+ROUND if ROUND != '0' else BATCH_ID), 'w') as save_csv:
writer = csv.DictWriter(save_csv, fieldnames=csv_columns)
writer.writeheader()
for row in save_data:
writer.writerow(row) | BernieZhu/Trashbot-Dataset | tools/review_batch.py | review_batch.py | py | 2,552 | python | en | code | 0 | github-code | 36 |
9997089912 | import sys
import codecs
def readFile(tweetFileName,classFileName) :
classMap = dict()
fin1 = codecs.open(tweetFileName)
fin2 = codecs.open(classFileName)
for inputline1 in fin1 :
inputline1 = inputline1.strip('\r\n')
inputline2 = fin2.next().strip('\r\n')
if inputline2 not in classMap :
classMap[inputline2] = []
classMap[inputline2].append(inputline1)
else :
if(len(classMap[inputline2]) < 750) :
classMap[inputline2].append(inputline1)
fin1.close()
fin2.close()
fout1 = codecs.open('newinput_step5-T.txt','w')
fout2 = codecs.open('newclassFileName-T.txt','w')
for key in classMap :
for element in classMap[key] :
fout1.write(element + '\r\n')
fout2.write(key + '\r\n')
fout1.close()
fout2.close()
if __name__ == '__main__' :
tweetFileName = '../data/input_step5-T.txt'
classFileName = '../data/classFileName-T.txt'
readFile(tweetFileName,classFileName)
| satheeshkumark/NLP | Sentiment_Analysis/scripts/formEqualDistribution.py | formEqualDistribution.py | py | 905 | python | en | code | 0 | github-code | 36 |
17440403303 | from tensorflow.keras import layers, models
import glob
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from datetime import datetime
width = 75
height = 100
channel = 1
def load_data():
images = np.array([]).reshape(0, height, width)
labels = np.array([])
dictionary = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'A': 10,
'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15, 'G': 16, 'H': 17, 'I': 18, 'J': 19, 'K': 20,
'L': 21, 'M': 22, 'N': 23, 'P': 24, 'Q': 25, 'R': 26, 'S': 27, 'T': 28, 'U': 29, 'V': 30,
'W': 31, 'X': 32, 'Y': 33, 'Z': 34}
directories = [directory for directory in glob.glob('datasets/BelgianLicencePlates/TrainLetters/*')]
for directory in directories:
file_list = glob.glob(directory + '/*.jpg')
sub_images = np.array([np.array(Image.open(file_name)) for file_name in file_list])
sub_labels = [dictionary[directory[-1]]] * len(sub_images)
images = np.append(images, sub_images, axis=0)
labels = np.append(labels, sub_labels, axis=0)
x_train, x_test, y_train, y_test = train_test_split(images, labels, test_size=0.2, random_state=42, shuffle=True)
return (x_train, y_train), (x_test, y_test)
(train_images, train_labels), (test_images, test_labels) = load_data()
train_images = train_images.reshape((train_images.shape[0], height, width, channel))
test_images = test_images.reshape((test_images.shape[0], height, width, channel))
train_images, test_images = train_images / 255.0, test_images / 255.0
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(height, width, channel)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(35, activation='softmax'))
start = datetime.now().replace(microsecond=0)
model.summary()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=8)
end = datetime.now().replace(microsecond=0)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy: ', test_acc)
print('Test loss: ', test_loss)
print('Training duration: ', (end - start))
model.save('models/character_recognition_cnn.h5')
print('> Saved model to disk <')
| NikolaBrodic/VehicleLicencePlateAndLogoRecognition | character_recognition_cnn.py | character_recognition_cnn.py | py | 2,599 | python | en | code | 1 | github-code | 36 |
41551040241 | import cv2
import os
import numpy as np
import json
import mmcv
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import matplotlib.pyplot as plt
from glob import glob
import ast
from mmrotate.core import poly2obb_np
def poly2obb_np_oc(poly):
"""Convert polygons to oriented bounding boxes.
Args:
polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3]
Returns:
obbs (ndarray): [x_ctr,y_ctr,w,h,angle]
"""
bboxps = np.array(poly).reshape((4, 2))
rbbox = cv2.minAreaRect(bboxps)
x, y, w, h, a = rbbox[0][0], rbbox[0][1], rbbox[1][0], rbbox[1][1], rbbox[
2]
if w < 2 or h < 2:
return
while not 0 < a <= 90:
if a == -90:
a += 180
else:
a += 90
w, h = h, w
a = a / 180 * np.pi
assert 0 < a <= np.pi / 2
return x, y, w, h, a
def _get_adaptive_scales(areas, min_area=800, max_area=30000):
"""Get adaptive scales according to areas.
The scale range is [0.5, 1.0]. When the area is less than
``'min_area'``, the scale is 0.5 while the area is larger than
``'max_area'``, the scale is 1.0.
Args:
areas (ndarray): The areas of bboxes or masks with the
shape of (n, ).
min_area (int): Lower bound areas for adaptive scales.
Default: 800.
max_area (int): Upper bound areas for adaptive scales.
Default: 30000.
Returns:
ndarray: The adaotive scales with the shape of (n, ).
"""
scales = 0.5 + (areas - min_area) / (max_area - min_area)
scales = np.clip(scales, 0.5, 1.0)
return scales
def draw_rbboxes(ax, bboxes, color='g', alpha=0.3, thickness=2):
"""Draw oriented bounding boxes on the axes.
Args:
ax (matplotlib.Axes): The input axes.
bboxes (ndarray): The input bounding boxes with the shape
of (n, 5).
color (list[tuple] | matplotlib.color): the colors for each
bounding boxes.
alpha (float): Transparency of bounding boxes. Default: 0.8.
thickness (int): Thickness of lines. Default: 2.
Returns:
matplotlib.Axes: The result axes.
"""
polygons = []
for i, bbox in enumerate(bboxes):
xc, yc, w, h, ag = bbox[:5]
wx, wy = w / 2 * np.cos(ag), w / 2 * np.sin(ag)
hx, hy = -h / 2 * np.sin(ag), h / 2 * np.cos(ag)
p1 = (xc - wx - hx, yc - wy - hy)
p2 = (xc + wx - hx, yc + wy - hy)
p3 = (xc + wx + hx, yc + wy + hy)
p4 = (xc - wx + hx, yc - wy + hy)
poly = np.int0(np.array([p1, p2, p3, p4]))
polygons.append(Polygon(poly))
p = PatchCollection(
polygons,
facecolor='none',
edgecolors=color,
linewidths=thickness,
alpha=alpha)
ax.add_collection(p)
return ax
# arirang json to txt
ann_path = '/data/2_data_server/cv_data/arirang/validate_objects_labeling_json/'
# out_path = '/data/2_data_server/cv_data/arirang/val/annfiles/'
# ann_path = '/data/2_data_server/cv_data/arirang/validate_objects_labeling_json/'
label_test = []
for i in glob(ann_path+'*.json'):
# print(i)
with open(i) as f:
json_data = json.load(f)
img_id = json_data['features'][0]['properties']['image_id']
for j in range(len(json_data['features'])):
bbox_info = json_data['features'][j]['properties']['object_imcoords']
bbox_info = ast.literal_eval(bbox_info)
bbox_info = list(bbox_info)
bbox_label = json_data['features'][j]['properties']['type_name'].replace(" ","-")
bbox_id = json_data['features'][j]['properties']['type_id']
if bbox_label == "military-aircraft":
print(img_id)
exit()
if label_test == []:
# label_test.append(bbox_id)
label_test.append(bbox_label)
if bbox_label not in label_test:
# label_test.append(bbox_id)
label_test.append(bbox_label)
# first [:4] 지운 후
# if j == 0:
# with open(out_path+img_id[:-4]+'.txt',"w") as (fw):
# for k in range(len(bbox_info)):
# fw.write(str(int(bbox_info[k])))
# fw.write(" ")
# # fw.write(bbox_info)
# # fw.write(" ")
# fw.write(bbox_label)
# fw.write(" ")
# fw.write("0\n")
# else:
# with open(out_path+img_id[:-4]+'.txt',"a") as (fw):
# for k in range(len(bbox_info)):
# fw.write(str(int(bbox_info[k])))
# fw.write(" ")
# # fw.write(bbox_info)
# # fw.write(" ")
# fw.write(bbox_label)
# fw.write(" ")
# fw.write("0\n")
# aitod json to txt
# ann_path = '/data/2_data_server/cv_data/ai_todv2/aitodv2_train.json'
# out_path = '/data/2_data_server/cv_data/ai_todv2/train/annfiles/'
# # ann_path = '/data/2_data_server/cv_data/arirang/validate_objects_labeling_json/'
# label_test = []
# for i in glob(ann_path):
# # print(i)
# with open(i) as f:
# json_data = json.load(f)
# img_id = json_data['features'][0]['properties']['image_id']
# for j in range(len(json_data['features'])):
# bbox_info = json_data['features'][j]['properties']['object_imcoords']
# bbox_info = ast.literal_eval(bbox_info)
# bbox_info = list(bbox_info)
# bbox_label = json_data['features'][j]['properties']['type_name'].replace(" ","-")
# # bbox_id = json_data['features'][j]['properties']['type_id']
# # if label_test == []:
# # # label_test.append(bbox_id)
# # label_test.append(bbox_label)
# # if bbox_label not in label_test:
# # # label_test.append(bbox_id)
# # label_test.append(bbox_label)
# # first [:4] 지운 후
# if j == 0:
# with open(out_path+img_id[:-4]+'.txt',"w") as (fw):
# for k in range(len(bbox_info)):
# fw.write(str(int(bbox_info[k])))
# fw.write(" ")
# # fw.write(bbox_info)
# # fw.write(" ")
# fw.write(bbox_label)
# fw.write(" ")
# fw.write("0\n")
# else:
# with open(out_path+img_id[:-4]+'.txt',"a") as (fw):
# for k in range(len(bbox_info)):
# fw.write(str(int(bbox_info[k])))
# fw.write(" ")
# # fw.write(bbox_info)
# # fw.write(" ")
# fw.write(bbox_label)
# fw.write(" ")
# fw.write("0\n")
# min,max 출력
# ann_path = '/data/2_data_server/cv_data/arirang_split/train_ms/annfiles/'
# num_min = 100000
# num_max = 0
# num_total = 0
# for i in glob(ann_path+'*.txt'):
# # print(i)
# num_lines = sum(1 for line in open(i))
# num_min = min(num_lines, num_min)
# num_max = max(num_lines, num_max)
# if num_max == 1891:
# print(i)
# exit()
# print(num_min,num_max)
# # gt 개수
# ann_path = '/data/2_data_server/cv_data/arirang_split/val_ms/annfiles/'
# # CLASSES = ('small-ship', 'large-ship', 'civilian-aircraft', 'military-aircraft', 'small-car', 'bus', 'truck', 'train', 'crane', 'bridge',
# # 'oil-tank', 'dam', 'outdoor-playground', 'helipad', 'roundabout', 'indoor-playground','helicopter','individual-container','grouped-container','swimming-pool','etc')
# CLASSES = ('small-ship', 'large-ship', 'civilian-aircraft', 'military-aircraft', 'small-car', 'bus', 'truck', 'train')
# label_cnt = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
# cls_map = {c: i
# for i, c in enumerate(CLASSES)
# }
# for i in glob(ann_path+'*.txt'):
# # print(i)
# f = open(i,"r")
# lines = f.readlines()
# for line in lines:
# label = line.split()
# cls_name = label[8]
# if cls_name == 'military-aircraft':
# print(i)
# exit()
# # label = cls_map[cls_name]
# # label_cnt[label] = label_cnt[label] + 1
# # print(label_cnt)
##### size check
# ann_path = '/data/2_data_server/cv_data/arirang/validate_objects_labeling_json/'
# # out_path = '/data/2_data_server/cv_data/arirang/val/annfiles/'
# # ann_path = '/data/2_data_server/cv_data/arirang/validate_objects_labeling_json/'
# label_test = []
# c1 = 0
# c2 = 0
# c3 = 0
# c4 = 0
# c5 = 0
# c6 = 0
# c7 = 0
# c8 = 0
# c9 = 0
# c10 = 0
# c11 = 0
# c12 = 0
# c13 = 0
# c14 = 0
# c15 = 0
# c16 = 0
# c17 = 0
# c18 = 0
# c19 = 0
# c20 = 0
# c21 = 0
# c1_num = 0
# c2_num = 0
# c3_num = 0
# c4_num = 0
# c5_num = 0
# c6_num = 0
# c7_num = 0
# c8_num = 0
# c9_num = 0
# c10_num = 0
# c11_num = 0
# c12_num = 0
# c13_num = 0
# c14_num = 0
# c15_num = 0
# c16_num = 0
# c17_num = 0
# c18_num = 0
# c19_num = 0
# c20_num = 0
# c21_num = 0
# for i in glob(ann_path+'*.json'):
# # print(i)
# with open(i) as f:
# json_data = json.load(f)
# for j in range(len(json_data['features'])):
# bbox_info = json_data['features'][j]['properties']['object_imcoords']
# bbox_info = ast.literal_eval(bbox_info)
# poly = np.array(bbox_info,dtype=np.float32)
# poly = poly2obb_np(poly)
# if poly is not None:
# w = poly[2]
# h = poly[3]
# area = w*h
# # area =
# bbox_label = json_data['features'][j]['properties']['type_name'].replace(" ","-")
# if bbox_label =="small-ship":
# c1 += 1
# c1_num += area
# if bbox_label =="large-ship":
# c2 += 1
# c2_num += area
# if bbox_label =="civilian-aircraft":
# c3 += 1
# c3_num += area
# if bbox_label =="military-aircraft":
# c4 += 1
# c4_num += area
# if bbox_label =="small-car":
# c5 += 1
# c5_num += area
# if bbox_label =="bus":
# c6 += 1
# c6_num += area
# if bbox_label =="truck":
# c7 += 1
# c7_num += area
# if bbox_label =="train":
# c8 += 1
# c8_num += area
# if bbox_label =="crane":
# c9 += 1
# c9_num += area
# if bbox_label =="bridge":
# c10 += 1
# c10_num += area
# if bbox_label =="oil-tank":
# c11 += 1
# c11_num += area
# if bbox_label =="dam":
# c12 += 1
# c12_num += area
# if bbox_label =="outdoor-playground":
# c13 += 1
# c13_num += area
# if bbox_label =="helipad":
# c14 += 1
# c14_num += area
# if bbox_label =="roundabout":
# c15 += 1
# c15_num += area
# if bbox_label =="indoor-playground":
# c16 += 1
# c16_num += area
# if bbox_label =="helicopter":
# c17 += 1
# c17_num += area
# if bbox_label =="individual-container":
# c18 += 1
# c18_num += area
# if bbox_label =="grouped-container":
# c19 += 1
# c19_num += area
# if bbox_label =="swimming-pool":
# c20 += 1
# c20_num += area
# print("c1------")
# print(c1,c1_num)
# print("------")
# print("c2------")
# print(c2,c2_num)
# print("------")
# print("c3------")
# print(c3,c3_num)
# print("------")
# print("c4------")
# print(c4,c4_num)
# print("------")
# print("c5------")
# print(c5,c5_num)
# print("------")
# print("c6------")
# print(c6,c6_num)
# print("------")
# print("c7------")
# print(c7,c7_num)
# print("------")
# print("c8------")
# print(c8,c8_num)
# print("------")
# print("c9------")
# print(c9,c9_num)
# print("------")
# print("c10------")
# print(c10,c10_num)
# print("------")
# print("c11------")
# print(c11,c11_num)
# print("------")
# print("c12------")
# print(c12,c12_num)
# print("------")
# print("c13------")
# print(c13,c13_num)
# print("------")
# print("c14------")
# print(c14,c14_num)
# print("------")
# print("c15------")
# print(c15,c15_num)
# print("------")
# print("c16------")
# print(c16,c16_num)
# print("------")
# print("c17------")
# print(c17,c17_num)
# print("------")
# print("c18------")
# print(c18,c18_num)
# print("------")
# print("c19------")
# print(c19,c19_num)
# print("------")
# print("c20------")
# print(c20,c20_num)
# print("------")
| parkyongjun1/rotated_deformabledetr | AO2-DETR/tools/arirang_json_to_txt.py | arirang_json_to_txt.py | py | 12,936 | python | en | code | 0 | github-code | 36 |
19848816787 | import json
no = 0
groups = 0
reserve = []
data = {
"no": no,
"groups": groups,
"reserve": reserve
}
with open("data.json", "w") as f:
json.dump(data, f)
opend = open("./data.json","r")
loaded = json.load(opend)
print(loaded)
print("no: ", loaded["no"], "groups: ", loaded["groups"])
| hmjn023/dev-hmjn | js.py | js.py | py | 318 | python | en | code | 1 | github-code | 36 |
19033935152 | """Module contains functionality that parses main page for RedHat vulnerabilities."""
import time
import lxml
import lxml.etree
from selenium import webdriver
from selenium.common.exceptions import WebDriverException, NoSuchElementException
from selenium.webdriver.chrome.options import Options
from cve_connector.vendor_cve.implementation.parsers.general_and_format_parsers\
.html_parser import HtmlParser
from cve_connector.vendor_cve.implementation.utilities.utility_functions import string_to_date
class RedHatMainPageParser(HtmlParser):
"""
Class providing functionality for parsing RedHat main page.
"""
def __init__(self, url, logger, from_date=None, to_date=None):
super().__init__(url, from_date, to_date)
self.date_format = '%d %b %Y' # 20 Apr 2018
self.driver = None
self.entry_added = False
self.last_added = False
self.first_cve_on_page = ''
self.logger = logger
try:
self.load_content()
except ValueError:
self.logger.error('Unable to load content from {0}'.format(self.url))
def get_content_from_ulr(self):
"""
Gets and returns content from URL.
:return: content
"""
if not self.url:
raise ValueError('Url must not be empty.')
options = Options()
options.add_argument('--headless')
options.add_argument('--no-sandbox')
driver = webdriver.Chrome(chrome_options=options)
driver.get(self.url)
driver.implicitly_wait(10)
content = driver.page_source
self.driver = driver
return content
def parse(self):
"""
Provides parsing functionality.
:return: None
"""
try:
loading = True
while loading:
loading = self.parse_current_page_content()
if loading:
loaded = False
while not loaded:
loaded = self.load_next_page()
except ValueError as val_err:
self.logger.error('Error while parsing RH.')
self.logger.error(val_err)
finally:
self.driver.close()
def do_click(self):
"""
Accomplishes clicking on a web page.
:return: True if successful
"""
try:
elm = self.driver.find_element_by_link_text("›")
elm.click()
return True
except WebDriverException:
return False
def load_next_page(self):
"""
Load web page.
:return: True if successful
"""
driver = self.driver
try:
click = False
start = time.time()
end = time.time()
while not click:
if (end - start) > 120:
raise ValueError('RedHat page could not be loaded.')
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
click = self.do_click()
self.logger.debug("CLICK in RedHat")
end = time.time()
time.sleep(3)
content = driver.page_source
self.data = lxml.etree.HTML(content.replace('<br>', ''))
self.driver = driver
return True
except (NoSuchElementException, WebDriverException):
return False
def parse_current_page_content(self):
"""
Parses current page in property data.
:return: True if next page is needed to load, False otherwise.
"""
table_rows = self.data.xpath(
'.//table[contains(@summary, "Common Vulnerabilities and Exposures")]//tbody/tr')
for row in table_rows:
url_list = row.xpath('.//th//a/@href')
date_str_list = row.xpath('.//td//time/text()')
if len(url_list) != 1 or len(date_str_list) != 1:
raise ValueError('Format of data provided in RH table has changed.')
date = string_to_date(date_str_list[0], self.date_format)
if date < self.from_date:
return False
if self.from_date <= date <= self.to_date:
self.entities.append(url_list[0])
self.entry_added = True
self.last_added = True
else:
self.last_added = False
if not self.last_added and self.entry_added:
return False
return self.last_added or not self.entry_added
| CSIRT-MU/CRUSOE | crusoe_observe/cve-connector/cve_connector/vendor_cve/implementation/parsers/vendor_parsers/redhat_parsers/red_hat_main_page_parser.py | red_hat_main_page_parser.py | py | 4,549 | python | en | code | 9 | github-code | 36 |
36538905715 | import matplotlib.pyplot as plt
from pymol import cmd
from multiprocessing import Pool, cpu_count
from tqdm import tqdm # Import tqdm for progress bar
# Define a function to calculate RMSD for a single frame
def calculate_rmsd(frame_number, object_1, reference_object):
cmd.frame(frame_number)
#cmd.align(object_1, reference_object, frame_number, 1)
return cmd.rms_cur(object_1, reference_object, frame_number, 0)
# Define the process_frame function outside of RMSD_vs_frame
def process_frame(args):
return calculate_rmsd(*args)
# Define a function to plot RMSD vs frame number
def RMSD_vs_frame(object_1, stride=1, num_process=1):
# Initialize an empty list to store RMSDs
RMSDs = []
reference_object = 'ref'
cmd.create(reference_object, object_1, 1, 0)
# Calculate total number of frames
total_frames = cmd.count_states(object_1)
# Set default number of processes if not specified
if num_process == 1:
num_process = min(cpu_count(), total_frames) # Use the number of CPU cores or total_frames, whichever is smaller
# Create a list of frame numbers to process
frames_to_process = range(1, total_frames + 1, stride)
# Create a list of arguments for the process_frame function
args_list = [(frame_number, object_1, reference_object) for frame_number in frames_to_process]
# Create a pool of workers for multiprocessing
with Pool(processes=num_process) as pool, tqdm(total=len(args_list), desc="Calculating RMSD") as pbar:
# Use multiprocessing to calculate RMSD for each frame
for rmsd in pool.imap_unordered(process_frame, args_list):
RMSDs.append(rmsd)
pbar.update(1) # Update the progress bar
# Create x and y data for the plot
x1 = frames_to_process # Frame numbers will be on the x-axis
y1 = RMSDs # RMSDs will be on the y-axis
# Plot the data
plt.figure()
plt.plot(x1, y1, label=object_1, color='blue', linewidth=2)
plt.xlabel('Frame', fontsize=12)
plt.ylabel('Distance', fontsize=12)
plt.title('Distance vs Frame', fontsize=16)
plt.legend(fontsize=10)
plt.grid(True, linestyle='--', linewidth=0.5, alpha=0.7)
plt.tick_params(axis='both', which='major', labelsize=10)
plt.tight_layout() # Adjust the spacing for better layout
plt.show() # Display the plot
# Usage: RMSD_vs_frame('object_name', stride=1, num_process=1)
print("USAGE: RMSD_vs_frame('protein selection', stride=5, num_process=4)")
| raafik980/charmm-md-analysis-in-pymol | 02_rmsd_vs_frame_parallel.py | 02_rmsd_vs_frame_parallel.py | py | 2,523 | python | en | code | 0 | github-code | 36 |
6795368681 | from django.conf import settings
from django.db.models import Q
from django_filters import rest_framework as filters
from ...models import Event
class EventFilterSet(filters.FilterSet):
category = filters.MultipleChoiceFilter(
method='filter_category',
label='Category',
choices=settings.EVENT_TYPE_CHOICES,
)
follow_type = filters.MultipleChoiceFilter(
method='filter_follow_type',
label='Follow Type',
choices=settings.EVENT_FOLLOW_MODE_CHOICES,
)
languages = filters.CharFilter(
method='filter_languages',
)
location = filters.CharFilter(
method='filter_location',
)
class Meta:
model = Event
fields = [
'category',
'follow_type',
'languages',
'location'
]
def filter_category(self, queryset, name, value):
query = Q()
for query_value in value:
query |= Q(category__code=query_value)
return queryset.filter(query)
def filter_follow_type(self, queryset, name, value):
query = Q()
for query_value in value:
query |= Q(follow_type__contains=query_value)
return queryset.filter(query)
def filter_languages(self, queryset, name, value):
query = Q()
for language in value.split(','):
query |= Q(languages__contains=[language])
return queryset.filter(query)
def filter_location(self, queryset, name, value):
query = Q()
for location in value.split(','):
query |= Q(location__icontains=location)
return queryset.filter(query)
| tomasgarzon/exo-services | service-exo-events/event/api/filters/event.py | event.py | py | 1,666 | python | en | code | 0 | github-code | 36 |
14551270733 | from io import StringIO
from itertools import chain
from typing import Any, Dict, List, Union
import simplejson as json
from jubeatools import song as jbt
from jubeatools.formats.filetypes import SongFile
from jubeatools.utils import lcm
from ..tools import make_memon_dumper
from . import schema
def _long_note_tail_value_v0(note: jbt.LongNote) -> int:
dx = note.tail_tip.x - note.position.x
dy = note.tail_tip.y - note.position.y
try:
return schema.X_Y_OFFSET_TO_P_VALUE[dx, dy]
except KeyError:
raise ValueError(
f"memon cannot represent a long note with its tail starting ({dx}, {dy}) away from the note"
) from None
def _get_timing(song: jbt.Song) -> jbt.Timing:
if song.common_timing is not None:
return song.common_timing
else:
return next(
chart.timing for chart in song.charts.values() if chart.timing is not None
)
def _raise_if_unfit_for_v0(song: jbt.Song, version: str) -> None:
"""Raises an exception if the Song object is ill-formed or contains information
that cannot be represented in a memon v0.x.y file (includes legacy)"""
if song.common_timing is None and all(
chart.timing is None for chart in song.charts.values()
):
raise ValueError("The song has no timing information")
chart_timings = [
chart.timing for chart in song.charts.values() if chart.timing is not None
]
if chart_timings:
first_one = chart_timings[0]
if any(t != first_one for t in chart_timings):
raise ValueError(
f"memon:{version} cannot represent a song with per-chart timing"
)
timing = _get_timing(song)
number_of_timing_events = len(timing.events)
if number_of_timing_events != 1:
if number_of_timing_events == 0:
raise ValueError("The song has no BPM")
else:
raise ValueError(f"memon:{version} does not handle BPM changes")
event = timing.events[0]
if event.BPM <= 0:
raise ValueError(f"memon:{version} only accepts strictly positive BPMs")
if event.time != 0:
raise ValueError(f"memon:{version} only accepts a BPM on the first beat")
for difficulty, chart in song.charts.items():
if len(set(chart.notes)) != len(chart.notes):
raise ValueError(
f"{difficulty} chart has duplicate notes, these cannot be represented"
)
def _dump_to_json(memon: dict) -> bytes:
memon_fp = StringIO()
json.dump(memon, memon_fp, use_decimal=True, indent=4)
return memon_fp.getvalue().encode("utf-8")
def _compute_resolution(notes: List[Union[jbt.TapNote, jbt.LongNote]]) -> int:
return lcm(
*chain(
iter(note.time.denominator for note in notes),
iter(
note.duration.denominator
for note in notes
if isinstance(note, jbt.LongNote)
),
)
)
def _dump_memon_note_v0(
note: Union[jbt.TapNote, jbt.LongNote], resolution: int
) -> Dict[str, int]:
"""converts a note into the {n, t, l, p} form"""
memon_note = {
"n": note.position.index,
"t": note.time.numerator * (resolution // note.time.denominator),
"l": 0,
"p": 0,
}
if isinstance(note, jbt.LongNote):
memon_note["l"] = note.duration.numerator * (
resolution // note.duration.denominator
)
memon_note["p"] = _long_note_tail_value_v0(note)
return memon_note
def _dump_memon_legacy(song: jbt.Song, **kwargs: Any) -> SongFile:
_raise_if_unfit_for_v0(song, "legacy")
timing = _get_timing(song)
memon: Dict[str, Any] = {
"metadata": {
"song title": song.metadata.title,
"artist": song.metadata.artist,
"music path": str(song.metadata.audio),
"jacket path": str(song.metadata.cover),
"BPM": timing.events[0].BPM,
"offset": -timing.beat_zero_offset,
},
"data": [],
}
for difficulty, chart in song.charts.items():
resolution = _compute_resolution(chart.notes)
memon["data"].append(
{
"dif_name": difficulty,
"level": chart.level,
"resolution": resolution,
"notes": [
_dump_memon_note_v0(note, resolution)
for note in sorted(
set(chart.notes), key=lambda n: (n.time, n.position)
)
],
}
)
return SongFile(contents=_dump_to_json(memon), song=song)
dump_memon_legacy = make_memon_dumper(_dump_memon_legacy)
def _dump_memon_0_1_0(song: jbt.Song, **kwargs: Any) -> SongFile:
_raise_if_unfit_for_v0(song, "v0.1.0")
timing = _get_timing(song)
memon: Dict[str, Any] = {
"version": "0.1.0",
"metadata": {
"song title": song.metadata.title,
"artist": song.metadata.artist,
"music path": str(song.metadata.audio),
"album cover path": str(song.metadata.cover),
"BPM": timing.events[0].BPM,
"offset": -timing.beat_zero_offset,
},
"data": dict(),
}
for difficulty, chart in song.charts.items():
resolution = _compute_resolution(chart.notes)
memon["data"][difficulty] = {
"level": chart.level,
"resolution": resolution,
"notes": [
_dump_memon_note_v0(note, resolution)
for note in sorted(set(chart.notes), key=lambda n: (n.time, n.position))
],
}
return SongFile(contents=_dump_to_json(memon), song=song)
dump_memon_0_1_0 = make_memon_dumper(_dump_memon_0_1_0)
def _dump_memon_0_2_0(song: jbt.Song, **kwargs: Any) -> SongFile:
_raise_if_unfit_for_v0(song, "v0.2.0")
timing = _get_timing(song)
memon: Dict[str, Any] = {
"version": "0.2.0",
"metadata": {
"song title": song.metadata.title,
"artist": song.metadata.artist,
"music path": str(song.metadata.audio),
"album cover path": str(song.metadata.cover),
"BPM": timing.events[0].BPM,
"offset": -timing.beat_zero_offset,
},
"data": {},
}
if song.metadata.preview is not None:
memon["metadata"]["preview"] = {
"position": song.metadata.preview.start,
"length": song.metadata.preview.length,
}
for difficulty, chart in song.charts.items():
resolution = _compute_resolution(chart.notes)
memon["data"][difficulty] = {
"level": chart.level,
"resolution": resolution,
"notes": [
_dump_memon_note_v0(note, resolution)
for note in sorted(set(chart.notes), key=lambda n: (n.time, n.position))
],
}
return SongFile(contents=_dump_to_json(memon), song=song)
dump_memon_0_2_0 = make_memon_dumper(_dump_memon_0_2_0)
def _dump_memon_0_3_0(song: jbt.Song, **kwargs: Any) -> SongFile:
_raise_if_unfit_for_v0(song, "v0.3.0")
timing = _get_timing(song)
memon: Dict[str, Any] = {
"version": "0.3.0",
"metadata": {
"song title": song.metadata.title,
"artist": song.metadata.artist,
"BPM": timing.events[0].BPM,
"offset": -timing.beat_zero_offset,
},
"data": {},
}
if song.metadata.audio is not None:
memon["metadata"]["music path"] = str(song.metadata.audio)
if song.metadata.cover is not None:
memon["metadata"]["album cover path"] = str(song.metadata.cover)
if song.metadata.preview is not None:
memon["metadata"]["preview"] = {
"position": song.metadata.preview.start,
"length": song.metadata.preview.length,
}
if song.metadata.preview_file is not None:
memon["metadata"]["preview path"] = str(song.metadata.preview_file)
for difficulty, chart in song.charts.items():
resolution = _compute_resolution(chart.notes)
memon["data"][difficulty] = {
"level": chart.level,
"resolution": resolution,
"notes": [
_dump_memon_note_v0(note, resolution)
for note in sorted(set(chart.notes), key=lambda n: (n.time, n.position))
],
}
return SongFile(contents=_dump_to_json(memon), song=song)
dump_memon_0_3_0 = make_memon_dumper(_dump_memon_0_3_0)
| Stepland/jubeatools | jubeatools/formats/memon/v0/dump.py | dump.py | py | 8,590 | python | en | code | 4 | github-code | 36 |
9507730487 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 13 18:52:23 2020
@author: Hasnain Khan
"""
import numpy as np
import cv2
from matplotlib import pyplot as plt
# Convolv function for convolving the kernel with real image matrix
def convolve_np(image, kernel):
X_height = image.shape[0]
X_width = image.shape[1]
F_height = kernel.shape[0]
F_width = kernel.shape[1]
H = int((F_height - 1) / 2)
W = int((F_width - 1) / 2)
out = np.zeros((X_height, X_width))
for i in np.arange(H, X_height-H):
for j in np.arange(W, X_width-W):
sum = 0
for k in np.arange(-H, H+1):
for l in np.arange(-W, W+1):
a = image[i+k, j+l]
w = kernel[H+k, W+l]
sum += (w * a)
out[i,j] = sum
return out # Returning the Convolved Image
def box_blur():
img = cv2.imread('Lenna.png', 0)
# Sobel Operator for Horizontal Edge Detection
Hx = np.array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
# Sobel Operator for Vertical Edge Detection
Hy = np.array([[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]])
img_x = convolve_np(img, Hx) / 8.0 # Output of Sobel Horizontal
img_y = convolve_np(img, Hy) / 8.0 # Output of Sobel Vertical
if __name__ == '__main__':
box_blur()
| HasnainKhanNiazi/Convolutional-Kernels | Sobel_Operator.py | Sobel_Operator.py | py | 1,479 | python | en | code | 1 | github-code | 36 |
29231023064 | #!/bin/python
import math
import os
import random
import re
import sys
# Complete the workbook function below.
def workbook(n, k, arr):
pages = 0
problemsInPage = 0
specialProblems = 0
pageNumber = 1
listOfProblems = dict()
chapterNumber = 1
for i in arr:
pages = (i + k - 1)/k
for j in xrange(pages):
if pageNumber >=(j*k)+1 and pageNumber <=min((j+1)*k,i):
specialProblems += 1
pageNumber += 1
return specialProblems
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = raw_input().split()
n = int(nk[0])
k = int(nk[1])
arr = map(int, raw_input().rstrip().split())
result = workbook(n, k, arr)
fptr.write(str(result) + '\n')
fptr.close()
| rsundar/data-structures | hackerrank solutions/lisas-workbook.py | lisas-workbook.py | py | 851 | python | en | code | 0 | github-code | 36 |
29632863333 | import discord
from discord.ext import commands
from datetime import datetime
from fun_config import *
class Work(commands.Cog):
def __init__(self, client: commands.Bot):
self.client = client
@commands.command()
@commands.cooldown(1, 86400, commands.BucketType.user)
async def sleep(self, ctx):
# Get the data as users
users = await get_inventory_data()
# Check the user's max energy
max_energy = int(users[str(ctx.author.id)]['Stats']['MaxEnergy'])
users[str(ctx.author.id)]['Stats']['Energy'] = max_energy
with open(inventory_json_file, "w") as json_file:
json.dump(users, json_file, indent=1)
await ctx.reply("You slept and refilled your energy!")
async def setup(client: commands.Bot) -> None:
await client.add_cog(Work(client))
| Maghish/HoeX | cogs/work.py | work.py | py | 835 | python | en | code | 1 | github-code | 36 |
15202662310 | def swapFileData():
first=input("pls enter first file name:")
second=input("pls enter second file name:")
data_a=open(second.txt)
data_b=open(first.txt)
print(first)
print(second)
swapFileData()
| siddhant15oo/pro-98 | SwappingFile.py | SwappingFile.py | py | 222 | python | en | code | 0 | github-code | 36 |
70721444264 | import numpy as np
import plotly.offline as ply
import plotly.graph_objs as go
from scipy import stats as st
from scipy import signal as sg
import source_localization as src
file_name = "/home/dima/Projects/Touchdown/Data/test.bin"
ch_total = 6
length = 2**15 + 100000
fs = 100e6
t_pulse_width = 100e-6
t_period = 200e-6
snr = 1000000 # dB
start_period = 1
f0 = 50e6
post_id = 0
serial = 0
y = st.norm.rvs(size=(ch_total, length), scale=10**(-snr/20)) +\
1j * st.norm.rvs(size=(ch_total, length), scale=10**(-snr/20))
frame = src.SpecFrame(post_id=post_id, serial=serial, f0=f0, fs=fs, f_res=fs/length, sig=y)
writer = src.SpecFrameSaver(post_id)
writer.open(file_name)
writer.save_title(start_period)
writer.save(frame)
writer.close()
print("Successfully saved frame")
loader = src.SpecFrameLoader(post_id)
success = loader.open(file_name)
if not success:
print("Enable to open file")
exit(1)
i_start_period = loader.read_title()
i_frame = src.SpecFrame()
loader.load(i_frame)
loader.close()
error = 1e-7
failed = False
if abs(start_period - i_start_period) > error:
print("\n\n\nTest failed. \nSaved start period was: ", start_period, "\nLoaded start period is: ", i_start_period)
failed = True
if post_id is not i_frame.get_post_id():
print("\n\n\nTest failed. \nSaved post id was: ", post_id, "\nLoaded post id is: ", i_frame.get_post_id())
failed = True
if serial is not i_frame.get_serial():
print("\n\n\nTest failed. \nSaved serial was: ", serial, "\nLoaded serial is: ", i_frame.get_serial())
failed = True
if abs(f0 - i_frame.get_central_frequency()) > error:
print("\n\n\nTest failed. \nSaved central frequency was: ", f0, "\nLoaded central frequency is: ",
i_frame.get_central_frequency())
failed = True
if abs(fs - i_frame.get_sampling_frequency()) > error:
print("\n\n\nTest failed. \nSaved sampling frequency was: ", fs, "\nLoaded sampling frequency is: ",
i_frame.get_sampling_frequency())
failed = True
if abs(fs/length - i_frame.get_frequency_resolution()) > error:
print("\n\n\nTest failed. \nSaved frequency resolution was: ", fs/length, "\nLoaded frequency resolution is: ",
i_frame.get_frequency_resolution())
failed = True
data = i_frame.get_data()
if ch_total is not data.shape[0]:
print("\n\n\nTest failed. \nSaved channels total was: ", ch_total, "\nLoaded channels total is: ",
data.shape[0])
failed = True
if abs(length - data.shape[1]) > error:
print("\n\n\nTest failed. \nSaved samples per channel was: ", length, "\nLoaded samples per channel is: ",
data.shape[1])
failed = True
for ch in range(ch_total):
for s in range(length):
if abs((data[ch, s]) - y[ch, s]) > error:
print("\n\n\nTest failed. \nData mismatch. \nChannel: ", ch, "\nSample: ", s)
failed = True
if not failed:
print("Test passed successfully")
| DimaZhu/libsource_localization | tests/test_specframewriter.py | test_specframewriter.py | py | 2,935 | python | en | code | 0 | github-code | 36 |
29184316598 | import sys
import numpy as np
import pyautogui
import win32api, win32con, win32gui
import cv2
import time
import torch
import time
class_names = [ 'counter-terrorist', 'terrorist' ]
opponent = 'terrorist'
opponent_color = (255, 0, 0)
ally_color = (0, 128, 255)
model_path = 'best-640.pt'
image_size = 640
scale_map = { 1280:1.7, 640:4.5, 320:8 }
def load_frame():
hwnd = win32gui.FindWindow(None, 'Counter-Strike: Global Offensive - Direct3D 9')
rect = win32gui.GetWindowRect(hwnd)
region = rect[0], rect[1] + 27, rect[2] - rect[0], rect[3] - rect[1] - 27
frame = np.array(pyautogui.screenshot(region=region))
frame = cv2.resize(frame, (image_size, int(image_size / 16 * 9)))
return frame
def process_frame(frame):
height, width = frame.shape[:2]
top_padding = int((image_size - height) / 2)
padded_frame = np.zeros((image_size, image_size, 3), dtype=np.uint8)
padded_frame.fill(255)
padded_frame[top_padding:top_padding+height, :width] = frame
return padded_frame
def is_opponent(label):
return class_names[label] == opponent
def find_closest(detected_boxes):
max = 0
closest_at = 0
for i, box in enumerate(detected_boxes):
x1, _, x2, _ = box
w = int(x1 - x2)
if w > max:
closest_at = i
max = w
return closest_at
if __name__ == "__main__":
opponent = sys.argv[1]
image_size = int(sys.argv[2])
model_path = "model/best-%d.pt" % image_size
model = torch.hub.load('ultralytics/yolov5', 'custom', path=model_path)
while True:
frame = load_frame()
frame = process_frame(frame)
height, width = frame.shape[:2]
display_frame = cv2.resize(frame, (500, 500))
# Detection
start_time = time.time()
results = model(frame)
print(time.time() - start_time)
rl = results.xyxy[0].tolist()
# Check every detected object
detected_boxes = []
color = (0, 0, 0)
for item in rl:
x1, y1, x2, y2, confidence, label = item
if confidence > 0.5:
if is_opponent(int(label)):
detected_boxes.append((x1, y1, x2, y2))
color = opponent_color
else:
color = ally_color
cv2.rectangle(display_frame, (int(x1/image_size*500), int(y1/image_size*500)), (int(x2/image_size*500), int(y2/image_size*500)), color, 1)
print("Detected:", len(detected_boxes), "enemies.")
# Check Closest
if len(detected_boxes) >= 1:
closest_at = find_closest(detected_boxes)
x1, y1, x2, y2 = detected_boxes[closest_at]
x = int((x1 + x2) / 2 - width / 2)
y = int((y1 + y2) / 2 - height / 2) - (y2 - y1) * 0.43 # For head shot
scale = scale_map[image_size]
x = int(x * scale)
y = int(y * scale)
# Move mouse and shoot
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE, x, y, 0, 0)
time.sleep(0.05)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)
time.sleep(0.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)
display_frame = cv2.cvtColor(display_frame, cv2.COLOR_BGR2RGB)
cv2.imshow("frame", display_frame)
cv2.waitKey(1) | anaandjelic/soft-computing-project | aimbot.py | aimbot.py | py | 3,412 | python | en | code | 1 | github-code | 36 |
14065650419 | import sys
input = sys.stdin.readline
N = int(input())
T = [0] * N
P = [0] * N
for a in range(N):
t, p = map(int, input().split())
T[a] = t
P[a] = p
dp =[0] * (N+1)
for i in range(N):
if T[i] <= N-i:
dp[i+T[i]] = max(dp[i+T[i]], dp[i]+P[i])
dp[i+1] = max(dp[i+1], dp[i])
print(dp[-1]) | yeon-june/BaekJoon | 15486.py | 15486.py | py | 317 | python | en | code | 0 | github-code | 36 |
1369587477 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from models import Company
def index(request):
companies = Company.objects.all().order_by('name')
paginator = Paginator(companies, 10)
if request.method == 'POST' and request.POST.get('search'):
companies = companies.filter(name__icontains=request.POST.get('search'))
return render(request, 'index.html', {'data': companies})
else:
page = request.GET.get('page')
try:
companies = paginator.page(page)
except PageNotAnInteger:
companies = paginator.page(1)
except EmptyPage:
companies = paginator.page(paginator.num_pages)
return render(request, 'index.html', {'data': companies})
| avallete/ft_companysee | company/views.py | views.py | py | 863 | python | en | code | 0 | github-code | 36 |
34988613077 | with open('input.txt', 'r') as f:
jolts = [int(line.strip()) for line in f.readlines()]
sjolts = [0, *sorted(jolts), max(jolts) + 3]
diffs = [b - a for a, b in zip(sjolts[:-1], sjolts[1:])]
a = sum(d == 1 for d in diffs)
b = sum(d == 3 for d in diffs)
print(a*b) | JeroenMandersloot/aoc2020 | day10/puzzle1.py | puzzle1.py | py | 269 | python | en | code | 0 | github-code | 36 |
3587965918 |
import subprocess
from platform import release
from typing import List
from libqtile import bar, layout, widget, hook
from libqtile.config import Click, Drag, Group, Key, Screen
from libqtile.lazy import lazy
# defaults
mod = "mod4"
terminal = "termite"
browser = "firefox"
media = "stremio"
fileManager = "nemo"
game="lutris"
ide = "atom"
passmanager="bwmenu"
configFolder = "/home/user/.config/qtile"
scriptFolder = "/home/user/.bin"
def app_or_group(group, app):
""" Go to specified group if it exists. Otherwise, run the specified app.
When used in conjunction with dgroups to auto-assign apps to specific
groups, this can be used as a way to go to an app if it is already
running. """
def f(qtile):
try:
qtile.groups_map[group].cmd_toscreen()
qtile.cmd_spawn(app)
except KeyError:
qtile.cmd_spawn(app)
return f
keys = [
# Switch between windows in current stack pane
Key([mod], "k", lazy.layout.down(),
desc="Move focus down in stack pane"),
Key([mod], "j", lazy.layout.up(),
desc="Move focus up in stack pane"),
# Move windows up or down in current stack
Key([mod, "control"], "k", lazy.layout.shuffle_down(),
desc="Move window down in current stack "),
Key([mod, "control"], "j", lazy.layout.shuffle_up(),
desc="Move window up in current stack "),
Key([], "XF86PowerOff", lazy.spawn(f"{scriptFolder}/sysmenu_full"),),
Key([mod], "d", lazy.screen.next_group(),),
Key([mod], "a", lazy.screen.prev_group(),),
# Switch window focus to other pane(s) of stack
Key(["mod1"], "Tab", lazy.layout.next(),
desc="Switch window focus to other pane(s) of stack"),
# Swap panes of split stack
Key([mod, "shift"], "space", lazy.layout.rotate(),
desc="Swap panes of split stack"),
#fullscreen
Key([mod], "f", lazy.window.toggle_fullscreen(), desc="Full screen"),
#programs
Key([mod], "Return", lazy.spawn(terminal), desc="Launch terminal"),
Key([mod], "b", lazy.spawn(browser), desc="Launch browser"),
Key([mod], "e", lazy.spawn(fileManager), desc="Launch file manager"),
Key([mod], "m", lazy.function(media), desc="Launch media player"),
Key([mod], "t", lazy.spawn(ide), desc="Launch IDE"),
Key([mod], "g", lazy.spawn(game), desc="Launch Games"),
Key([mod], "p", lazy.spawn(passmanager), desc="Launch passmanager"),
# Toggle between different layouts as defined below
Key([mod], "Tab", lazy.next_layout(), desc="Toggle between layouts"),
Key([mod], "q", lazy.window.kill(), desc="Kill focused window"),
# Qtile controls
Key([mod, "control"], "r", lazy.restart(), desc="Restart qtile"),
Key([mod, "control"], "q", lazy.shutdown(), desc="Shutdown qtile"),
Key([mod], "l", lazy.spawn("betterlockscreen -l")),
Key([mod], "s", lazy.spawn("betterlockscreen -s")),
# dmenu
Key(["mod1"], "r", lazy.spawn("dmenu_run"),
desc="Spawn a command using a prompt widget"),
# rofi
Key([mod], "r", lazy.spawn("rofi -combi-modi window,drun,ssh -theme solarized -show combi -icon-theme Papirus -show-icons"),
desc="Spawn a command using a prompt widget"),
# Sound
Key([mod], "Up", lazy.spawn("pulsemixer --change-volume +5"),),
Key([mod], "Down", lazy.spawn("pulsemixer --change-volume -5")),
Key([mod], "x", lazy.spawn("pulsemixer --toggle-mute")),
Key([mod], "Right", lazy.spawn("playerctl next")),
Key([mod], "Left", lazy.spawn("playerctl previous")),
Key([mod], "z", lazy.spawn("playerctl play-pause")),
# shutdown
Key([mod, "shift"], "Escape", lazy.spawn(f"{scriptFolder}/sysmenu_full"), desc="shutdown settings"),
# printScreen
Key([], "Print", lazy.spawn("flameshot gui")),
]
groups = [
Group("1", label="🌐" ),
Group("2", label="{}"),
Group("3", label="💬"),
Group("4", label="🎮"),
Group("5", label="📁"),
Group("6"),
Group("7"),
Group("8"),
Group("9"),
Group("0")
]
for i in groups:
keys.extend([
# mod1 + letter of group = switch to group
Key([mod], i.name, lazy.group[i.name].toscreen(),
desc="Switch to group {i.name}"),
# mod1 + shift + letter of group = move focused window to group
Key([mod, "shift"], i.name, lazy.window.togroup(i.name),
desc="move focused window to group {i.name}"),
])
layouts = [
layout.Zoomy(),
layout.Max(),
layout.Stack(num_stacks=2),
layout.Bsp(),
layout.Columns(),
layout.Matrix(),
layout.MonadTall(),
layout.MonadWide(),
layout.RatioTile(),
layout.Tile(),
layout.TreeTab(),
layout.VerticalTile(),
]
widget_defaults = dict(
font='xos4 Terminus Regular',
fontsize=12,
padding=3,
)
extension_defaults = widget_defaults.copy()
screens = [
Screen(
top=bar.Bar(
[
widget.GroupBox(spacing=3),
widget.WindowName(),
widget.Spacer(),
#Kernel
widget.TextBox(text=release(),background="#A0C1B9"),
# CPU
widget.Image(filename=f"{configFolder}/resources/cpu.png",
background="#ff9800"),
widget.CPU(format="{load_percent}% ", background="#ff9800"),
# MEM
widget.Sep(linewidth=4, foreground="#8bc34a",
background="#8bc34a"),
widget.Image(filename=f"{configFolder}/resources/memory.png",
background="#8bc34a"),
widget.Memory(format="{MemUsed}M | {SwapUsed}M ",
background="#8bc34a"),
# NET
widget.Net(format="{down} ↓↑ {up} ", background="#03a9f4"),
# CLK
widget.Clock(format=' ⏲ %I:%M %m-%d (%a) ',
background="#e91e63"),
# SYSTRAY
widget.Systray(background="#009688")
],
20,
),
),
]
# Drag floating layouts.
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(),
start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(),
start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
dgroups_key_binder = None
dgroups_app_rules = [] # type: List
main = None # WARNING: this is deprecated and will be removed soon
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
floating_layout = layout.Floating(float_rules=[
# Run the utility of `xprop` to see the wm class and name of an X client.
{'wmclass': 'confirm'},
{'wmclass': 'dialog'},
{'wmclass': 'download'},
{'wmclass': 'error'},
{'wmclass': 'file_progress'},
{'wmclass': 'notification'},
{'wmclass': 'splash'},
{'wmclass': 'toolbar'},
{'wmclass': 'confirmreset'}, # gitk
{'wmclass': 'makebranch'}, # gitk
{'wmclass': 'maketag'}, # gitk
{'wname': 'branchdialog'}, # gitk
{'wname': 'pinentry'}, # GPG key password entry
{'wmclass': 'ssh-askpass'}, # ssh-askpass
])
@hook.subscribe.startup_once
def autostart():
lazy.function(app_or_group("4", "lutris")),
subprocess.Popen(f"{scriptFolder}/autostart.sh")
auto_fullscreen = True
focus_on_window_activation = "smart"
# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this
# string besides java UI toolkits; you can see several discussions on the
# mailing lists, GitHub issues, and other WM documentation that suggest setting
# this string if your java app doesn't work correctly. We may as well just lie
# and say that we're a working one by default.
#
# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in
# java that happens to be on java's whitelist.
wmname = "LG3D"
| TalkingPanda0/dotfiles | .config/qtile/qtile/config.py | config.py | py | 7,903 | python | en | code | 0 | github-code | 36 |
11757705834 | import numpy as np
#Solving the first 10 exercises:
#1.Write a NumPy program to get the numpy version and show numpy build configuration:
def exercise1():
npVersion = np.__version__
print("The numpy version is",npVersion)
#2. Write a NumPy program to get help on the add function
def exercise2():
getHelpAddFunc = np.info(np.add)
print(getHelpAddFunc)
#3. Write a NumPy program to test whether none of the elements of a given array is zero.
#Give the np.array X:
#You can make a simple for to check each element or just call the np built in function np.all(X)
def exercise3():
X = np.array([[1,2,3],[4,5,6]])
for i in X:
if i == 0:
return False
return True
def exercise3_1():
X = np.array([[1,2,3],[4,5,0]])
print(np.all(X))
#4. Write a NumPy program to test whether any of the elements of a given array is non-zero.
#Give the np.array X:
#You can make a simple for to check each element or just call the np built in function np.any(X)
def exercise4():
X = np.array([[1,2,3],[4,5,6]])
for i in X:
if i != 0:
return True
return False
def exercise4_1():
X = np.array([[1,2,3],[4,5,0]])
print(np.any(X))
#5. Write a NumPy program to test a given array element-wise for finiteness (not infinity or not a Number).
#You can define a number infinite using np.inf, and not a number using np.nan.
def exercise5():
X = np.array([1,np.nan,np.inf])
Y = np.array([1,2,3])
print("The array element-wise for finiteness in array X?",np.isfinite(X))
print("The array element-wise for finiteness in array Y?",np.isfinite(Y))
#6. Write a NumPy program to test element-wise for positive or negative infinity.
def exercise6():
X = np.array([1,np.nan,np.inf])
Y = np.array([1,2,3])
print("The array element-wise for positive or negative infinity in array X?",np.isinf(X))
print("The array element-wise for positive or negative infinity in array Y?",np.isinf(Y))
#7. Write a NumPy program to test element-wise for NaN of a given array.
def exercise7():
X = np.array([1,np.nan,2])
Y = np.array([1,2,3])
print("The array element-wise for NaN in array X?",np.isnan(X))
print("The array element-wise for NaN in array Y?",np.isnan(Y))
#8. Write a NumPy program to test element-wise for complex number, real number of a given array. Also test whether a given number is a scalar type or not
def exercise8():
X = np.array([1,1+2j,2])
Y = np.array([1])
print("The array element-wise for complex number in array X?",np.iscomplex(X))
print("The array element-wise is a scalar type or not in array Y?",np.isscalar(Y))
#9. Write a NumPy program to test whether two arrays are element-wise equal within a tolerance.
#To better understand what is the tolerance range, read the documentation.
#You can use np.info(np.allclose) to check the documentation here.
#I'll give the same examples given in the documentation...
def exercise9():
print(np.allclose([1e10,1e-7], [1.00001e10,1e-8]))
print(np.allclose([1e10,1e-8], [1.00001e10,1e-9]))
print(np.allclose([1e10,1e-8], [1.0001e10,1e-9]))
print(np.allclose([1.0, np.nan], [1.0, np.nan]))
print(np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True))
#10. Write a NumPy program to create an element-wise comparison (greater, greater_equal, less and less_equal) of two given arrays.
#You can only compare if the arrays has the same size.
def exercise10():
X = np.array([1,2,3]).reshape((1,-1))
Y = np.array([2,2,3]).reshape((1,-1))
if X.size == Y.size:
print("Greater:",np.greater(X,Y))
print("Greater or equal:",np.greater_equal(X,Y))
print("Less:",np.less(X,Y))
print("Less equal",np.less_equal(X,Y))
else:
raise Exception("Arrays with different sizes")
| mauriciopssantos/NumpyExercises | NumpyBasics/exercise1to10.py | exercise1to10.py | py | 3,750 | python | en | code | 0 | github-code | 36 |
43850231663 | import pandas as pd
import numpy as np
def func23():
l= [1,34,5,100,2]
l.sort(reverse=True)
return l
def listar():
xx=[1,2,3,4,5,6,7,8,9,10,11]
return (list(filter(lambda x: x %2 ==0,xx)))
def colors():
lista=[22,33,44,55,66]
g=int(np.average(lista))
print(g)
def arregloletras(str):
lista=[]
for i in str:
lista.append(i.upper())
print(''.join(lista))
def palindrome(str_):
str_rev= str_[::-1]
return str_ == str_rev
def mirrow(str_2):
count=0
half= int( len(str_2)/2)
str_rev=str_2[::-1]
for i in range(len(str_2)-2):
if( str_2[i] != str_rev[i]):
count+=1
if count<=2:
return True
else:
return False
def matriz():
la= [[1,2,3],[4,5,6], [7,8,9]]
for i in range(len(la)):
for j in range(len(la[0])):
print(la[i][j])
matriz()
#colors()
def tri():
languages = ['Python', 'C', 'C++', 'C#', 'Java']
for i,language in enumerate(languages):
print(i,language)
i+=1
tri()
dataframe={ "val":[1,2,3,4],
"nombre":["al,g)","el,g","il,g","ol,g"]
}
l= pd.DataFrame(dataframe)
df=pd.DataFrame(dataframe)
l=l['nombre'].str.strip('ñ')
df['df']= df['nombre'].str.strip(')')
df[['uno','dos']]= df.nombre.str.split("," ,expand=True)
x = df['val'].agg
df[['First','Last']] = df.nombre.str.split(",", expand=True)
answer= df[['val']].agg('max')
#print("este es",answer)
#print(df)
str="anitalavalatina"
str_2="anitalavalatina"
#print(func23())
#print(listar())
#arregloletras(str)
print(mirrow(str_2))
| eloyina/answerpython | answers.py | answers.py | py | 1,611 | python | en | code | 0 | github-code | 36 |
25813329582 | from sys import stdin
def cd():
collection = set()
n = nums.pop()
m = nums.pop()
if (n == 0 and m == 0):
return -1
s = 0
for _ in range(n):
collection.add(nums.pop())
for _ in range(m):
if nums.pop() in collection:
s += 1
return s
nums = list(map(int, reversed(stdin.read().split())))
res = cd()
while (res != -1):
print(res)
res = cd()
| Anders-E/Kattis | cd/cd.py | cd.py | py | 422 | python | en | code | 4 | github-code | 36 |
29265371613 | import turtle
import pandas
# Everything about turtle
screen = turtle.Screen()
screen.title("U.S. states")
image = "blank_states_img.gif"
screen.addshape(image)
turtle.shape(image)
screen.setup(width=750, height=520)
state_name = turtle.Turtle()
state_name.hideturtle()
state_name.penup()
# Everything about panda
data = pandas.read_csv("50_states.csv")
all_states = data.state.to_list()
# Everything about game logic
correct_guesses = []
game_is_on = True
while game_is_on:
if not correct_guesses:
answer = (screen.textinput(title="Guess States", prompt="What's your state name guess?")).title()
else:
answer = (screen.textinput(title=f"{len(correct_guesses)}/{len(all_states)} States Correct",
prompt="What's another state name?")).title()
if answer in all_states:
state_data = data[data.state == answer]
state_name.goto(int(state_data.x), int(state_data.y))
state_name.write(answer)
correct_guesses.append(answer)
if len(correct_guesses) == 50:
game_is_on = False
print("Great! You've guessed all states of America.")
if answer == "Exit":
states_left = [state for state in all_states if state not in correct_guesses]
states_left_dict = {"state": states_left}
pandas.DataFrame(states_left_dict).to_csv("states_to_learn.csv")
break
| Sina-Eshrati/US-States-Game | main.py | main.py | py | 1,427 | python | en | code | 0 | github-code | 36 |
19130481877 | from datetime import datetime
import json
class Observation():
def __init__(self, observationTime, numericValue, stringValue, booleanValue, sensorId):
self.observationTime = observationTime.strftime("%m/%d/%Y, %H:%M:%S")
self.Value = numericValue
self.valueString = stringValue
self.valueBoolean = booleanValue
self.sensorId = sensorId
class RecEdgeMessage():
def __init__(self, deviceId, observations):
self.format = "rec3.2"
self.deviceId = deviceId
self.observations = observations
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
| midcoreboot/RaspberryPiSensors | REC.py | REC.py | py | 675 | python | en | code | 0 | github-code | 36 |
15419782961 | #!/usr/bin/python3
# Dump memory of a process (Linux only).
# Based on Giles's answer from
# https://unix.stackexchange.com/questions/6267/how-to-re-load-all-running-applications-from-swap-space-into-ram/#6271
#
# Error checking added by hackerb9.
import ctypes, re, sys
## Partial interface to ptrace(2), only for PTRACE_ATTACH and PTRACE_DETACH.
c_ptrace = ctypes.CDLL("libc.so.6").ptrace
c_pid_t = ctypes.c_int32 # This assumes pid_t is int32_t
c_ptrace.argtypes = [ctypes.c_int, c_pid_t, ctypes.c_void_p, ctypes.c_void_p]
def ptrace(attach, pid):
op = ctypes.c_int(16 if attach else 17) #PTRACE_ATTACH or PTRACE_DETACH
c_pid = c_pid_t(pid)
null = ctypes.c_void_p()
err = c_ptrace(op, c_pid, null, null)
if err != 0: raise (SysError, 'ptrace', err)
## Parse a line in /proc/$pid/maps. Return the boundaries of the chunk
## the read permission character.
def maps_line_range(line):
m = re.match(r'([0-9A-Fa-f]+)-([0-9A-Fa-f]+) ([-r])(.*)', line)
return [int(m.group(1), 16), int(m.group(2), 16), m.group(3), line[73:].strip()]
## Dump the readable chunks of memory mapped by a process
def cat_proc_mem(pid):
## Apparently we need to ptrace(PTRACE_ATTACH, $pid) to read /proc/$pid/mem
ptrace(True, int(pid))
mem_file=None
try:
## Read the memory maps to see what address ranges are readable
maps_file = open("/proc/" + pid + "/maps", 'r')
ranges = map(maps_line_range, maps_file.readlines())
maps_file.close()
## Read the readable mapped ranges
mem_file = open("/proc/" + pid + "/mem", 'rb', 0)
for r in ranges:
if r[2] == 'r':
try:
mem_file.seek(r[0])
except OverflowError as e:
# [vsyscall] is located at 2**64 - 10 * 2**20. Why does it fail to seek there?
sys.stderr.write("Warning, cannot seek to %X%s: %s\n" % (r[0], " (%s)" % (r[3]) if r[3] else "", e))
continue
try:
chunk = mem_file.read(r[1] - r[0])
except IOError as e:
# Some sections may not be readable, e.g., /dev/dri/card0
sys.stderr.write("Warning, cannot read %X - %X%s: %s\n" % (r[0],r[1], " (%s)" % (r[3]) if r[3] else "", e))
pass
sys.stdout.buffer.write(chunk)
## Cleanup
finally:
if mem_file: mem_file.close()
ptrace(False, int(pid))
if __name__ == "__main__":
for pid in sys.argv[1:]:
cat_proc_mem(pid)
| hackerb9/memcat | memcat.py | memcat.py | py | 2,573 | python | en | code | 10 | github-code | 36 |
30504364246 | import numpy as np
import cv2
from sklearn.cluster import KMeans
import pdb
from skimage.util import montage
import matplotlib.pyplot as plt
''' K means clustering with 30 classes: 26 letters, 1 blank time, 1 double letter, 1 triple letter, 1 empty'''
# gather images that have been labelled
f = open('labels.txt')
dir = '/Users/Alex/Desktop/Summer 2019/scrabble/data/'
# since scrabble is 15 by 15 i should be divisible by 15
i = 825
# if you divide i by 15 (number of rows and columns in Scrabble) you get the width and height (pixels) of each square
s = int(i/15)
# data to be clustered
data = []
counter = 0
# number of boards to cluster
num_boards = 1
for line in f.readlines():
strr = ''
# split the line in the text file
x = line.split()
# store the image name
img = dir + x[0]
# read and resize the image
img = cv2.imread(img, 0)
img = cv2.resize(img, (640, 480))
# store the 4 points in x
x = x[1:]
# convert the points to a string
pts1 = strr.join(x)
# eval converts the string to an array
pts1 = np.float32(eval(pts1))
# pts1 are the corners and pts2 is the width and height
pts2 = np.float32([[0, 0], [i, 0], [0, i], [i, i]])
# M is the perspective matrix
M = cv2.getPerspectiveTransform(pts1, pts2)
# dst is the resulting flat image
dst = cv2.warpPerspective(img, M, (i, i))
# now we need to extract the tiles
for j in range(15):
for k in range(15):
fname = str(j) + str(k) + ".txt"
square = np.float32(dst[s * j: s + s * j, s * k: s + s * k])
square = square.reshape((-1))
data.append(square)
counter += 1
if counter == num_boards:
break
features = np.asarray(data)
kmeans = KMeans(n_clusters=30, random_state=0, max_iter=500).fit(features)
inds = np.where(kmeans.labels_ == 13)
fs = np.uint8(features)
fs = fs.reshape((225,55,-1))
y = montage(fs, grid_shape=(15, 15))
plt.imshow(y)
plt.show()
# montages = build_montages(features[inds], (128, 196), (7, 3))
# for montage in montages:
# cv2.imshow("Montage", montage)
# cv2.waitKey(0)
# # for x in features[inds]:
# # cv2.imshow("yes", np.uint8(x).reshape((55,-1)))
# # cv2.waitKey(0)
# pdb.set_trace()t | meicholtz/scrabble | clustering.py | clustering.py | py | 2,242 | python | en | code | 0 | github-code | 36 |
33149847347 | import argparse
import collections
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy
import sys
import time
import json
import zenoh
# --- Command line argument parsing --- --- --- --- --- ---
parser = argparse.ArgumentParser(
prog='z_plot',
description='zenoh plotting example')
parser.add_argument('--mode', '-m', type=str, choices=['peer', 'client'],
help='The zenoh session mode.')
parser.add_argument('--connect', '-e', type=str, metavar='ENDPOINT', action='append',
help='Endpoints to connect to.')
parser.add_argument('--listen', '-l', type=str, metavar='ENDPOINT', action='append',
help='Endpoints to listen on.')
parser.add_argument('-k', '--key', type=str, default='demo/random',
help='The key expression to subscribe to.')
parser.add_argument('-i', '--history', type=float, default=10.0,
help='The history depth in seconds.')
parser.add_argument('-c', '--config', type=str, metavar='FILE',
help='A zenoh configuration file.')
args = parser.parse_args()
conf = zenoh.config_from_file(args.config) if args.config is not None else zenoh.Config()
if args.mode is not None:
conf.insert_json5(zenoh.config.MODE_KEY, json.dumps(args.mode))
if args.connect is not None:
conf.insert_json5(zenoh.config.CONNECT_KEY, json.dumps(args.connect))
if args.listen is not None:
conf.insert_json5(zenoh.config.LISTEN_KEY, json.dumps(args.listen))
lines = {}
fig, ax = plt.subplots()
ax.xaxis.axis_date()
def listener(sample):
if not str(sample.key_expr) in lines:
lines[str(sample.key_expr)] = ax.plot([], [], '-o', label=str(sample.key_expr))[0]
now = time.time()
xdata, ydata = lines[str(sample.key_expr)].get_data()
xdata = numpy.append(xdata, datetime.fromtimestamp(now if sample.timestamp is None else sample.timestamp.time))
ydata = numpy.append(ydata, float(sample.payload.decode("utf-8")))
lines[str(sample.key_expr)].set_data(zip(*filter(lambda t: t[0].timestamp() > now - args.history, zip(xdata, ydata))))
def update(_):
if len(lines):
ax.axes.relim()
ax.axes.autoscale_view(True,True,True)
ax.legend(loc=2)
zenoh.init_logger()
print("Openning session...")
z = zenoh.open(conf)
print("Declaring Subscriber on '{}'...".format(args.key))
sub = z.declare_subscriber(args.key, listener)
ani = FuncAnimation(fig, update)
plt.show()
| eclipse-zenoh/zenoh-demos | plotting/zplot/z_plot.py | z_plot.py | py | 2,515 | python | en | code | 27 | github-code | 36 |
20189362717 | import argparse
from pathlib import Path
from os.path import basename, isfile, isdir, splitext
import glob
def _list_modules(folder_name):
modules = glob.glob(str(Path(__file__).resolve().parent / folder_name / '*'))
return list(
splitext(basename(f))[0]
for f in modules
if (isfile(f) and splitext(f)[1] == '.py' and basename(f) != '__init__.py')
or (isdir(f) and isfile(Path(f) / '__init__.py'))
)
def get_args():
parser = argparse.ArgumentParser(
description='COMP9517 20T1 Project - Pedestrian Detecting, Tracking and Clustering',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'-si',
'--source-images',
help='(REQUIRED) Source images, can be either a directory or wildcard files, e.g. sequence/**/*.jpg',
dest='path',
default=str(Path(__file__).resolve().parent.parent / 'sequence' / '*.jpg'),
)
parser.add_argument(
'-sv',
'--source-video',
help='(REQUIRED) Source video, can be a camera index, a file or a url.', # TODO: Unfinished
dest='path_video',
default='',
)
parser.add_argument(
'-iw',
'--image-width',
help='Image width in pixels for resizing. '
'0: take width from the first image; '
'-1: same with 0 but keeps aspect ratio and creates black edges.',
dest='width',
type=int,
default=-1,
)
parser.add_argument(
'-ih',
'--image-height',
help='Image height in pixels for resizing.'
'0: take width from the first image; '
'-1: same with 0 but keeps aspect ratio and creates black edges.',
dest='height',
type=int,
default=-1,
)
parser.add_argument(
'-fps',
'--frames-per-second',
help='Playback frames per second.',
dest='fps',
type=float,
default=10,
)
parser.add_argument(
'--frame-skipping',
help='Enable frame skipping when the processing speed cannot keep up.',
dest='frame_skipping',
action='store_true',
)
parser.add_argument(
'--listening',
help='Enable pulling listener.',
dest='listening',
action='store_true',
)
parser.add_argument(
'-pp',
'--preprocessor',
help='Use a specific preprocessor',
dest='preprocessor',
choices=_list_modules('preprocessors'),
default='sample_preprocessor',
)
parser.add_argument(
'--no-preprocessor',
help='Disable image preprocessing',
dest='no_preprocessor',
action='store_true',
)
parser.add_argument(
'-dt',
'--detector',
help='Use a specific detector',
dest='detector',
choices=_list_modules('detectors'),
default='sample_detector',
)
parser.add_argument(
'--no-detector',
help='Disable pedestrian detecting',
dest='no_detector',
action='store_true',
)
parser.add_argument(
'-tk',
'--tracker',
help='Use a specific tracker',
dest='tracker',
choices=_list_modules('trackers'),
default='sample_tracker',
)
parser.add_argument(
'--no-tracker',
help='Disable pedestrian re-id and path tracking',
dest='no_tracker',
action='store_true',
)
parser.add_argument(
'-cl',
'--clusterer',
help='Use a specific clusterer',
dest='clusterer',
choices=_list_modules('clusterers'),
default='sample_clusterer',
)
parser.add_argument(
'--no-clusterer',
help='Disable pedestrian clustering (group detection)',
dest='no_clusterer',
action='store_true',
)
parser.add_argument(
'-time',
'--measure-time',
help='Measure and print the time consumption of each step',
dest='measure_time',
action='store_true',
)
# if len(sys.argv) == 1:
# parser.print_help(sys.stderr)
# sys.exit(1)
return parser.parse_args()
| patli96/COMP9517_20T1 | pedestrian_monitor/console_arguments.py | console_arguments.py | py | 4,198 | python | en | code | 1 | github-code | 36 |
38715734412 | #!/usr/bin/env python3
import re
gates = {}
cache = {}
def ev(gate_id):
if gate_id not in cache:
cache[gate_id] = gates[gate_id].evaluate()
return cache[gate_id]
class ConstantGate:
def __init__(self, value):
self.value = value
def evaluate(self):
if self.value.isnumeric():
return int(self.value)
return ev(self.value)
class NotGate:
def __init__(self, source):
self.source = source
def evaluate(self):
return ~ ev(self.source)
class AndGate:
def __init__(self, a, b):
self.a = a
self.b = b
def evaluate(self):
if self.a.isnumeric():
a = int(self.a)
else:
a = ev(self.a)
if self.b.isnumeric():
b = int(self.b)
else:
b = ev(self.b)
return a & b
class OrGate:
def __init__(self, a, b):
self.a = a
self.b = b
def evaluate(self):
if self.a.isnumeric():
a = int(self.a)
else:
a = ev(self.a)
if self.b.isnumeric():
b = int(self.b)
else:
b = ev(self.b)
return a | b
class LShiftGate:
def __init__(self, source, bits):
self.source = source
self.bits = bits
def evaluate(self):
return ev(self.source) << self.bits
class RShiftGate:
def __init__(self, source, bits):
self.source = source
self.bits = bits
def evaluate(self):
return ev(self.source) >> self.bits
args_re = re.compile(r'([a-z0-9]+)\s+[A-Z]+\s+([a-z0-9]+)')
with open('input.txt', 'r') as f:
for line in f:
idx = line.index('->')
dest = line[idx+2:].strip()
source = line[:idx]
if 'NOT' in source:
source = source[4:].strip()
gates[dest] = NotGate(source)
elif 'OR' in source:
m = args_re.match(source)
gates[dest] = OrGate(m[1], m[2])
elif 'AND' in source:
m = args_re.match(source)
gates[dest] = AndGate(m[1], m[2])
elif 'RSHIFT' in source:
m = args_re.match(source)
gates[dest] = RShiftGate(m[1], int(m[2]))
elif 'LSHIFT' in source:
m = args_re.match(source)
gates[dest] = LShiftGate(m[1], int(m[2]))
else:
gates[dest] = ConstantGate(source.strip())
print(ev('a'))
cache = {}
gates['b'] = ConstantGate('3176')
print(ev('a')) | lvaughn/advent | 2015/7/gates.py | gates.py | py | 2,479 | python | en | code | 1 | github-code | 36 |
27550085200 | import datetime
from imap_tools import EmailAddress
DATA = dict(
subject='double_fields',
from_='kaukinvk@yandex.ru',
to=('aa@aa.ru', 'bb@aa.ru'),
cc=('cc@aa.ru', 'dd@aa.ru'),
bcc=('zz1@aa.ru', 'zz2@aa.ru'),
reply_to=('foma1@company.ru', 'petr1@company.ru', 'foma2@company.ru', 'petr2@company.ru'),
date=datetime.datetime(2019, 5, 1, 12, 20),
date_str='Wed, 01 May 2019 12:20',
text='',
html='<div>double_fields</div>',
headers={'to': ('aa@aa.ru', 'bb@aa.ru', ''), 'cc': ('cc@aa.ru', 'dd@aa.ru'), 'bcc': ('zz1@aa.ru', 'zz2@aa.ru'), 'reply-to': ('=?UTF-8?B?0L/RgNC40LLQtdGC?= <foma1@company.ru>,\r\n =?UTF-8?B?0L/QvtC60LA=?= <petr1@company.ru>', '=?UTF-8?B?0L/RgNC40LLQtdGC?= <foma2@company.ru>,\r\n =?UTF-8?B?0L/QvtC60LA=?= <petr2@company.ru>'), 'from': ('=?utf-8?B?0JrQsNGD0LrQuNC9INCS0LvQsNC00LjQvNC40YA=?= <kaukinvk@yandex.ru>',), 'envelope-from': ('kaukinvk@yandex.ru',), 'subject': ('double_fields',), 'mime-version': ('1.0',), 'date': ('Wed, 01 May 2019 12:20',), 'message-id': ('<8872861556695229@myt5-262fb1897c00.qloud-c.yandex.net>',), 'content-type': ('multipart/mixed;\r\n\tboundary="----==--bound.887287.myt5-262fb1897c00.qloud-c.yandex.net"',), 'return-path': ('kaukinvk@yandex.ru',)},
attachments=[],
from_values=EmailAddress(name='Каукин Владимир', email='kaukinvk@yandex.ru'),
to_values=(EmailAddress(name='', email='aa@aa.ru'), EmailAddress(name='', email='bb@aa.ru')),
cc_values=(EmailAddress(name='', email='cc@aa.ru'), EmailAddress(name='', email='dd@aa.ru')),
bcc_values=(EmailAddress(name='', email='zz1@aa.ru'), EmailAddress(name='', email='zz2@aa.ru')),
reply_to_values=(EmailAddress(name='привет', email='foma1@company.ru'), EmailAddress(name='пока', email='petr1@company.ru'), EmailAddress(name='привет', email='foma2@company.ru'), EmailAddress(name='пока', email='petr2@company.ru')),
) | ikvk/imap_tools | tests/messages_data/double_fields.py | double_fields.py | py | 1,917 | python | en | code | 608 | github-code | 36 |
32889657187 | import math
import os
from pickle import FALSE, TRUE
import nltk
import string
from nltk.stem import PorterStemmer
import json
import tkinter as tk
from nltk import WordNetLemmatizer
lemmatizer=WordNetLemmatizer()
remove_punctuation_translator = str.maketrans(string.punctuation, ' '*len(string.punctuation))
def stopWord():
stop_words=[]
f=open("Stopword-List.txt")
#make an array of all stop words
for word in f.read().split("\n")[0:]:
if word:
stop_words.append(word.strip())
return stop_words
def readFromFileAndMakeIndexes():
#getcwd brings the existing path of file
# path=os.getcwd()
# path=path+'/Abstracts/'
#getting all files in path
# files=os.listdir(path)
i=0
vectorSpace=[]
docVector=[]
df={}
stop_words=stopWord()
# print(stop_words)
# for file in files:
for i in range(448):
doc_id=i+1
f=open("Abstracts\\"+str(doc_id)+".txt")
words=[]
new_words=[]
#split is a built in function used to break the documents into sentences
for line in f.read().split("\n")[0:]:
if line:
#remove any punctuation in a line
line=line.translate(remove_punctuation_translator)
#nltk libarary function used to make sentences into word tokens
words=nltk.word_tokenize(line)
for items in words:
if len(items)>1:
items=items.translate(remove_punctuation_translator)
items=lemmatizer.lemmatize(items)
new_words.append(items.lower())
# print(line)
# new_words.append(word_stemmer.stem(items))
#patition function is sued to break string at the first occurence of '.'
# doc_id=(file.partition(".")[0])
#convert from sting to int
doc_id=int(doc_id)
#Creating TermFrequency VECTOR (TF)
tf={}
temp=[]
# flag=False
for word in new_words:
if tf.__contains__(word):
tf[word]+=1
# print(word)
else:
vectorSpace.append(word)
tf[word]=1
if word not in temp:
if df.__contains__(word):
df[word]+=1
else:
df[word]=1
temp.append(word)
docVector.append(tf)
if(i==100):
docVector,df=tfIdfScore(vectorSpace,docVector,df)
print(docVector)
return vectorSpace,docVector,df
def tfIdfScore(vectorSpace,docVector,df):
N=len(docVector)
for word in vectorSpace:
for d in docVector:
if word in d:
# print(d[word])
d[word]=1+math.log10(d[word] if d[word]>0 else 1)
# else:
# d[word]=0
df[word]=math.log10(N/df[word] if df[word]>0 else 1)
# print(docVector)
for word in df:
for d in docVector:
if word in d:
d[word]=d[word]*df[word]
return docVector,df
def queryProcess(q,vectorSpace,df,docVector):
queryVector={}
N=len(docVector)
for word in vectorSpace:
queryVector[word]=0
stop_words=stopWord()
q=q.lower().split(" ")
for q_word in q:
# lemmatizer.lemmatize(q_word)
if q_word not in stop_words:
if q_word in vectorSpace:
queryVector[q_word]+=1
else:
continue
for q_word in q:
if q_word in vectorSpace:
queryVector[q_word]=1+math.log10(queryVector[q_word])
for q_word in q:
if q_word in vectorSpace:
queryVector[q_word]=queryVector[q_word]*df[q_word]
similarity(q,docVector,queryVector)
# print(queryVector)
# print(q)
def vectorDotProduct(v1,v2):
dp=0
for i in range(0,len(v1)):
dp=dp+(v1[i]*v2[i])
return(dp)
def vectorMagnitude(v):
m=0
for i in range(0,len(v)):
m=m+(v[i]**2)
return(math.sqrt(m))
def cosineScore(v1,v2):
cs=vectorDotProduct(v1,v2)
vm=vectorMagnitude(v1)*vectorMagnitude(v2)
if vm==0:
cs=0
else:
cs=cs/(vm)
return cs
def similarity(q,docVector,queryVector):
docScore={}
print(queryVector)
docCount=0
for i in range(0,len(docVector)):
v1=[]
v2=[]
for word in q:
if word in docVector[docCount]:
v1.append(docVector[docCount][word])
v2.append(queryVector[word])
# if word=="ensemble":
# print(docVector[docCount][word],queryVector[word])
docCount+=1
docScore[docCount]=(cosineScore(v1,v2))
print(docScore)
vectorSpace,docVector,df=readFromFileAndMakeIndexes()
queryProcess("ensemble",vectorSpace,df,docVector)
| mustafabawani/Vector-Space-Model | ajeeb.py | ajeeb.py | py | 4,976 | python | en | code | 0 | github-code | 36 |
23212333774 | from django.test import TestCase
from .models import User, Routine, RoutineContent, Task, Advice, Appointment, DayWeek
from .get import *
from datetime import datetime, timedelta
# Create your tests here.
class getsFunctionTestCase(TestCase):
def setUp(self):
user_a = User.objects.create_user(username='user_a', email='user_a@exp.com', password='user_a')
content_a = RoutineContent.objects.create(content='content_a')
task_a = Task.objects.create(routine_content=content_a)
advice_a = Advice.objects.create(routine_content=content_a)
appointment_a = Appointment.objects.create(routine_content=content_a)
routine_a = Routine.objects.create()
routine_a.routine_tasks.add(task_a)
routine_a.routine_advices.add(advice_a)
routine_a.routine_appointments.add(appointment_a)
def test_getUserRoutine(self):
user_a = User.objects.get(username='user_a')
routine_a = Routine.objects.create()
user_a.user_routine = routine_a
user_a.save()
routine = getUserRoutine(user_a)
self.assertEqual(routine, routine_a)
def test_getUserByUsername(self):
user_a = User.objects.get(pk=1)
user_a_get = getUserByUsername('user_a')
self.assertEqual(user_a, user_a_get)
def test_getUserByPK(self):
user_a = User.objects.get(username='user_a')
user_a_get = getUserByPK(1)
self.assertEqual(user_a, user_a_get)
def test_getUserTasks(self):
user_a = getUserByUsername('user_a')
content = RoutineContent.objects.create(content='content')
task_a = Task.objects.create(routine_content=content)
task_b = Task.objects.create(routine_content=content)
routine_a = Routine.objects.create()
routine_a.routine_tasks.add(task_a, task_b)
tasks = routine_a.routine_tasks.all()
user_a.user_routine = routine_a
user_a.save()
task_a_get = getUserTasks(user_a)
tasks = list(tasks)
task_a_get = list(task_a_get)
self.assertEqual(tasks, task_a_get)
def test_getUserAdvices(self):
user_a = getUserByUsername('user_a')
content = RoutineContent.objects.create(content='content')
advice_a = Advice.objects.create(routine_content=content)
routine_a = Routine.objects.create()
routine_a.routine_advices.add(advice_a)
advices = routine_a.routine_advices.all()
user_a.user_routine = routine_a
user_a.save()
advices_get = getUserAdvices(user_a)
advices = list(advices)
advices_get = list(advices_get)
self.assertEqual(advices, advices_get)
def test_getRoutineContent(self):
content = RoutineContent.objects.get(pk=1)
task_a = Task.objects.get(pk=1)
appointment_a = Appointment.objects.get(pk=1)
advice_a = Advice.objects.get(pk=1)
content_task = getRoutineContent(task_a)
content_appointment = getRoutineContent(appointment_a)
content_advice = getRoutineContent(advice_a)
self.assertEqual(content, content_task)
self.assertEqual(content, content_advice)
self.assertEqual(content, content_appointment)
def test_timeOfContent(self):
now = datetime.now()
finish = now + timedelta(days=2)
content = RoutineContent.objects.create(date_finish=finish)
task = Task.objects.create(routine_content=content)
day_finish = getFinishDay(task)
day_created = getCreatedDay(task)
self.assertEquals(day_created+2, day_finish)
def test_getDayWeek(self):
monday = DayWeek.objects.create(day=0)
wednesday = DayWeek.objects.create(day=2)
content = RoutineContent.objects.create(content='content')
content.day_week.add(monday, wednesday)
content.save()
array_day = [monday, wednesday]
task = Task.objects.create(routine_content=content)
days = getDayWeek(task)
self.assertEquals(days, array_day)
def test_getDateCreated(self):
content = RoutineContent.objects.create(content='content')
date = content.date_created
task = Task.objects.create(routine_content=content)
date_get = getCreatedDate(task)
self.assertEqual(date, date_get)
def test_getFinishDate(self):
content = RoutineContent.objects.create(content='content')
date = content.date_finish
task = Task.objects.create(routine_content=content)
date_get = getFinishDate(task)
self.assertEqual(date, date_get)
def test_getContent(self):
content = RoutineContent.objects.create(content='content')
content_content = content.content
task = Task.objects.create(routine_content=content)
content_get = getContent(task)
self.assertEqual(content_content, content_get)
def test_getPriority(self):
content = RoutineContent.objects.create(content='content', priority=1)
priority = content.priority
task = Task.objects.create(routine_content=content)
priority_get = getPriority(task)
self.assertEquals(priority, priority_get)
def test_getIsRoutine(self):
content = RoutineContent.objects.create(content='content', is_routine=True)
is_routine = content.is_routine
task = Task.objects.create(routine_content=content)
is_routine_get = getIsRoutine(task)
self.assertEquals(is_routine, is_routine_get)
def test_getPlace(self):
content = RoutineContent.objects.create(content='content')
appointment = Appointment.objects.create(routine_content=content, place='Rue')
place = appointment.place
place_get = getPlace(appointment)
self.assertEquals(place, place_get)
def test_getActive(self):
content = RoutineContent.objects.create(content='content')
advice = Advice.objects.create(routine_content=content, active=True)
active_get = getActive(advice)
self.assertTrue(active_get)
def test_getFinished(self):
content = RoutineContent.objects.create(content='content')
task = Task.objects.create(routine_content=content)
finished_get = getFinished(task)
self.assertFalse(finished_get)
| eduardofcabrera/CS50-CalendarDay | day_day/tests.py | tests.py | py | 6,307 | python | en | code | 0 | github-code | 36 |
38767218323 | import streamlit as st
import pandas as pd
import openai
import time
import re
openai.api_key = st.secrets["openai"]
def txt(file):
content = file.getvalue().decode('utf-8')
documents = content.split("____________________________________________________________")
# Removing any empty strings or ones that don't look like documents
documents = [doc.strip() for doc in documents if "Full text:" in doc]
# Checking the number of documents identified and displaying the first document to validate our approach
num_documents = len(documents)
# Re-initializing the lists to store the extracted data
document_names = []
document_urls = []
publication_dates = []
publication_titles = []
full_texts = []
# Re-extracting the required information from each document
for doc in documents:
# Extracting document name (defaulting to None if not found)
doc_name_match = re.search(r"Document \d+ of \d+\n\n(.*?)\n\n", doc)
document_name = doc_name_match.group(1) if doc_name_match else None
document_names.append(document_name)
# Extracting document URL (defaulting to None if not found)
url_match = re.search(r"http[^\n]+", doc)
document_url = url_match.group(0) if url_match else None
document_urls.append(document_url)
# Extracting publication date (defaulting to None if not found)
date_match = re.search(r"Publication date: ([^\n]+)", doc)
pub_date = date_match.group(1) if date_match else None
publication_dates.append(pub_date)
# Extracting publication title (defaulting to None if not found)
title_match = re.search(r"Publication title: ([^\n]+)", doc)
pub_title = title_match.group(1) if title_match else None
publication_titles.append(pub_title)
# Extracting full text (defaulting to None if not found)
full_text_match = re.search(r"Full text:([\s\S]+)", doc)
full_text = full_text_match.group(1).strip() if full_text_match else None
full_texts.append(full_text)
# Constructing the dataframe
df = pd.DataFrame({
"Document URL": document_urls,
"Publication Date": publication_dates,
"Publication Title": publication_titles,
"Full Text": full_texts
})
return df
def gpt(prompt, text, model="gpt-3.5-turbo-16k", temperature=0.2):
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": text}
],
temperature=temperature
)
response = response.choices[0].message['content']
return response
def process(df, target, prompts):
placeholder = st.empty()
# Ensure that all the columns are present
for name in prompts.keys():
if name not in df:
df[name] = ''
df[name] = df[name].astype('string')
# Loop through the dataframe rows
for i in range(0, len(df)):
for name, prompt in prompts.items():
try:
text = df.loc[i, target][0:6000] # Consider refining this based on GPT's token limits
output = gpt(prompt, text)
df.loc[i, name] = output
subset = df[[target, *prompts.keys()]]
placeholder.dataframe(subset)
except Exception as e:
st.write(f"Error encountered at index {i}. Reason: {str(e)}")
time.sleep(20) # Wait for 20 seconds
return True
example = ["Summarize the article", "List specific individuals mentioned",
"Classify article type (op-ed, report, etc.", "Prompt 4", "Prompt 5"]
file = st.file_uploader("Upload a file", type=("csv", "txt"))
if file:
try:
df = pd.read_csv(file)
except:
df = txt(file)
column = st.selectbox("Column of interest:", tuple(df.columns))
prompts = {}
n = st.number_input('Number of prompts:', min_value = 0, max_value=5)
for i in range(0,n):
prompts[f"Column {i+1}"] = st.text_input(f"Prompt {i+1}",
placeholder=example[i]
)
is_any_empty = (any(not val for val in prompts.values()))
if st.button("Process", disabled=is_any_empty):
if process(df, column, prompts):
st.download_button(
label="Download data as CSV",
data=df.to_csv().encode('utf-8'),
file_name='cleaned.csv',
mime='text/csv',
)
| skacholia/AnnotateDemo | main.py | main.py | py | 4,539 | python | en | code | 0 | github-code | 36 |
27648052145 | from sqlalchemy import create_engine, text
db_connection_string = "mysql+pymysql://zi6id5p25yfq60ih6t1y:pscale_pw_OG991jS2It86MJJqrCYvCmcJ5psfFaYkxyOLA9GoTwy@ap-south.connect.psdb.cloud/enantiomer?charset=utf8mb4"
engine = create_engine(db_connection_string,
connect_args={"ssl": {
"ssl_ca": "/etc/ssl/cert.pem"
}})
def load_jobs_from_db():
with engine.connect() as conn:
result = conn.execute(text("select * from jobs"))
jobs = []
for row in result.all():
jobs.append(row)
return jobs | ismaehl-2002/enantiomer-website-v2 | database.py | database.py | py | 579 | python | en | code | 1 | github-code | 36 |
24688793907 | from typing import Sequence
class NetStats:
def __init__(
self, net, input_shape: Sequence[int], backend: str = "torch",
self_defined_imp_class = None
):
if self_defined_imp_class is None:
if backend == "torch":
from reunn.implementation import torch_imp
imp = torch_imp.TorchStatsImp(net, input_shape)
elif backend == "spikingjelly":
from reunn.implementation import spikingjelly_imp
imp = spikingjelly_imp.SpikingjellyStatsImp(net, input_shape)
else:
raise ValueError(f"{backend} backend not supported!")
else:
imp = self_defined_imp_class(net, input_shape)
self.imp = imp
def count_parameter(self):
return self.imp.count_parameter()
def count_mac(self):
return self.imp.count_mac()
def print_summary(self):
self.imp.print_summary()
| AllenYolk/reusable-nn-code | reunn/stats.py | stats.py | py | 954 | python | en | code | 1 | github-code | 36 |
29947905371 | import requests
class Test_new_joke():
"""Создание новой шутки"""
def __init__(self):
pass
def get_categories(self):
"""Получение категориb шуток"""
url_categories = "https://api.chucknorris.io/jokes/categories"
print("Получение категорий шуток по ссылке - " + url_categories)
all_categories_get = requests.get(url_categories)
assert all_categories_get.status_code == 200
if all_categories_get.status_code == 200:
print("Статус код: 200\nКатегории получены\n")
all_categories = all_categories_get.json()
global categories_ok
categories_ok = False
for f in all_categories: # Проверка введеной категории и получение шутки
if f == user_categories:
url = "https://api.chucknorris.io/jokes/random?category=" + f
print("Получение шутки по ссылке -" + url)
result = requests.get(url)
assert result.status_code == 200
print("Статус код: 200\nШутка в данной категории получена:")
joke_get = result.json()
joke = joke_get.get('value')
print(joke + "\n")
categories_ok = True
def description(self):
"""Получение списка категорий"""
url_categories = "https://api.chucknorris.io/jokes/categories"
all_categories_get = requests.get(url_categories)
assert all_categories_get.status_code == 200
all_categories = all_categories_get.json()
for f in all_categories:
print(f)
user_categories = input("Введите название категории из которой хотите получить шутку: ")
cat = Test_new_joke()
cat.get_categories()
while not categories_ok:
print("Такой категории не существует.\nСписок категорий:")
cat.description()
user_categories = input("Введите название категории из этого списка: ")
cat.get_categories() | Grassh-str/Test_api_ChuckNorris | api_chuck.py | api_chuck.py | py | 2,399 | python | ru | code | 0 | github-code | 36 |
41772303500 | import glob
import json
import subprocess
from utils import PathUtils
from plot_sim_results import plot_multiple_results
EXP_CONFIGS = ['ring_local_config',
'ring_consensus_config']
if __name__ == '__main__':
ring_configs = glob.glob(str(PathUtils.exp_configs_folder) + '/ring' + '/*.py')
for config in ring_configs:
config = config.rsplit('/', 1)[1].rsplit('.',1)[0]
command = ['python',
PathUtils.run_ring_file,
'--exp_config', config,
#'--no_render',
'--no_plot_outcome'
]
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, error = process.communicate()
if output:
print(output.decode())
if error:
print(error.decode())
with open(str(PathUtils.ring_json_file), 'r') as f:
params_dict = json.load(f)
f.close()
plot_multiple_results(params_dict) | matteobettini/Autonomous-Vehicles-Consensus-2021 | run_experiments_ring.py | run_experiments_ring.py | py | 1,084 | python | en | code | 0 | github-code | 36 |
26377040664 | import ast
import os
from django.http import JsonResponse
from django.shortcuts import render, HttpResponse
from django.conf import settings
import json
import commons.helper
# Create your views here.
"""
Google Map
"""
def map_hello_world(request):
"""
Renders a page with embedded Google map. Passes variables to the associated html template via dictionary
'context'. The URL associated with this view function is defined in urls.py.
"""
context = {
"google_api_key": settings.GOOGLE_MAP_API_KEY,
"lat_coord": 29.4190,
"lng_coord": -98.4836,
"all_routes": json.dumps(commons.helper.getAllActiveRoutesDropDown())
}
return render(request, 'map/map_index.html', context)
def getRouteDetailsAJAX(request):
user_data = ast.literal_eval(request.GET.get('data'))
stops = commons.helper.getRoutesDetails(user_data)
if stops:
stops = list(stops)
allStops = {
'all_stops': stops
}
return HttpResponse(json.dumps(allStops))
def getBusColorDescriptionAJAX(request):
static_base_url = settings.STATIC_URL
if settings.DEBUG:
static_base_url = request.build_absolute_uri('/')[:-1].strip("/") + '/static/'
result = [
{
'icon': f'{static_base_url}map/icons/red_bus.png',
'description': "No Seats Available"
},
{
'icon': f'{static_base_url}map/icons/yellow_bus.png',
'description': "Less than 3 seats Available"
},
{
'icon': f'{static_base_url}map/icons/green_bus.png',
'description': "More than 3 seats Available"
}
]
return HttpResponse(json.dumps(result))
| TAMUSA-nsf-project/django_smartmap | map/views.py | views.py | py | 1,701 | python | en | code | 2 | github-code | 36 |
73435015143 | import numpy
import statsmodels.regression
import statsmodels.tools
import scipy.optimize as opti
import scipy.interpolate as interp
import scipy.signal as signal
import matplotlib.pyplot as plt
class PVP:
def __init__(self, sampling_period=0.01):
self.sampling_period = sampling_period
self._kinematic_profile = None
self.fit_params = [None, None, None]
self._pvp_params = None
self._removed_outliers = 0
def __repr__(self):
_str = f"{self.__class__.__name__}\n"
try:
_str += f"Duration: \t {self.timestamps[-1]:.2f} seconds \n"
except AttributeError:
pass
try:
_str += f"Number of trajectories: \t {self.kinematic_profile.shape[0]}\n"
except AttributeError:
pass
try:
_str += f"Outliers Removed: \t {self._removed_outliers}\n"
except AttributeError:
pass
try:
_str += "PVP stats: \t tau={:.3f}, sigma0={:.2e}, Dtau={:.3f}\n".format(
*tuple(self._pvp_params.values())[1:]
)
except AttributeError:
pass
try:
_str += "PVP fit: \t C={:.2f}, Omega = {:.3f}".format(
-self.fit_params[1], self.fit_params[2]
)
except (AttributeError, TypeError):
pass
return _str
@property
def timestamps(self):
return [
self.sampling_period * i for i in range(self._kinematic_profile.shape[0])
]
@property
def pvp_params(self):
self._pvp_params = {
"tau_index": numpy.argmax(self.std_prof),
"tau": numpy.argmax(self.std_prof) * self.sampling_period,
"sigma_0": numpy.max(self.std_prof),
"Dtau": self.mean_prof[numpy.argmax(self.std_prof)],
}
return self._pvp_params
@property
def kinematic_profile(self):
return self._kinematic_profile.T
@kinematic_profile.setter
def kinematic_profile(self, item):
if len(item.shape) != 3:
raise ValueError(
f"The shape of kinematic profiles should be of length 3 (time, dimension, number of movements) but it has length {len(item.shape)}"
)
self._kinematic_profile = item
@property
def _reference_signal(self):
return self.kinematic_profile
def _remove_outliers(self, remove_outliers_k_sigma_away=3.5):
"""_remove_outliers
Remove trajectory outliers, by removing all trajectories that are outside of the range (m +- k sigma), where m is the mean trajectory and sigma is the standard deviation of the set of trajectories.
.. note:
The formula above is based on the confidence interval for a Gaussian, and we apply it component per component. A true multivariate approach would use the confidence interval for a multivariate Gaussian see e.g. https://stats.stackexchange.com/questions/29860/confidence-interval-of-multivariate-gaussian-distribution
:param remove_outliers_k_sigma_away: k, defaults to 3.5
:type remove_outliers_k_sigma_away: float, optional
:return: (index of removed trajectories in old array, new array)
:rtype: tuple(list, array)
"""
_indx = []
k = remove_outliers_k_sigma_away
for ncomp in range(self.kinematic_profile.shape[1]):
mean = numpy.mean(self.kinematic_profile[:, ncomp, :], axis=0)
std = numpy.std(self.kinematic_profile[:, ncomp, :], axis=0)
for n, _traj in enumerate(self.kinematic_profile[:, ncomp, :]):
if (_traj > mean + k * std).any() or (_traj < mean - k * std).any():
_indx.append(n)
_indx = list(set(_indx))
self._kinematic_profile = numpy.delete(self._kinematic_profile, _indx, axis=2)
self.compute_profiles()
self._removed_outliers += len(_indx)
return _indx, self._kinematic_profile
def plot_std_profiles(self, ax=None, fit=True, prof_kwargs=None, fit_kwargs=None):
"""plot_std_profiles
Plots the standard deviation profiles on the provided axis. If not provided, will create a new figure from scratch.
If fit is True, will also compute the spline fit to the second and third phase. Keyword arguments to the std plotter (prof_kwargs) and to the fit plotter (fit_kwargs) can also be given.
.. note::
You should have fitted the profiles prior to plotting them.
:param ax: axis on which to draw, defaults to None. If None, creates a new figure and axis to draw on.
:type ax: plt.axis, optional
:param fit: whether to plot the spline fit, defaults to True
:type fit: bool, optional
:param prof_kwargs: keyword arguments are passed to plotter for the standard deviation profile, defaults to None
:type prof_kwargs: dict, optional
:param fit_kwargs: keyword arguments are passed to plotter for the spline fit, defaults to None
:type fit_kwargs: dict, optional
"""
if ax is None:
_, ax = plt.subplots(1, 1)
prof_kwargs = {} if prof_kwargs is None else prof_kwargs
fit_kwargs = {} if fit_kwargs is None else fit_kwargs
y = self.std_prof
x = self.timestamps
ax.semilogy(x, y, "k-", lw=3, label="PVP", **prof_kwargs)
if fit:
ax.semilogy(
self.pvp_fit_x,
self.pvp_fit_y,
"r-",
lw=3,
label="Spline fit",
**fit_kwargs,
)
ax.set_title(
r"$\tau = {:.3f}, C = {{{:.1f}}}, \Omega = {:.1f}$".format(
self.pvp_params["tau"], -self.fit_params[1], self.fit_params[2]
)
)
ax.grid(visible=True, which="minor", linestyle="--")
ax.set_xlabel("Time (s)")
ax.set_ylabel(r"$\sigma\mathrm{(t) (m)}$")
def _compute_mean_profile(self):
for k in range(self._reference_signal.shape[1]):
self._mean_prof_incr = numpy.mean(self._reference_signal[:, k, :], axis=0)
if k == 0:
self.mean_prof = self._mean_prof_incr ** 2
else:
self.mean_prof += self._mean_prof_incr ** 2
self.mean_prof = numpy.sqrt(self.mean_prof)
return self.mean_prof
def _compute_std_profile(self):
for k in range(self._reference_signal.shape[1]):
self._std_prof_incr = numpy.std(self._reference_signal[:, k, :], axis=0)
if k == 0:
self.std_prof = self._std_prof_incr ** 2
else:
self.std_prof += self._std_prof_incr ** 2
self.std_prof = numpy.sqrt(self.std_prof)
return self.std_prof
def compute_profiles(self):
"""compute_profiles
Computes the mean and standard deviation profiles for a set of trajectories.
:return: (mean profile, standard deviation profile)
:rtype: tuple(array, array)
"""
self._compute_mean_profile()
self._compute_std_profile()
return self.mean_prof, self.std_prof
def compute_pvp(self, remove_outliers_k_sigma_away=3.5):
"""compute_pvp
Run the full PVP routine:
+ compute profiles
+ remove outliers k sigma away
+ fit profiles
:param remove_outliers_k_sigma_away: remove outliers k sigma away, defaults to 3.5
:type remove_outliers_k_sigma_away: float, optional
:return: standard deviation profile, x and y values of the fit, kinematic profiles
:rtype: tuple(array, array, array, array)
"""
_, std_prof = self.compute_profiles()
_, kinematic_profiles = self._remove_outliers(
remove_outliers_k_sigma_away=remove_outliers_k_sigma_away
)
_, fit_x, fit_y = self._fit_profiles()
return std_prof, fit_x, fit_y, kinematic_profiles
def _fit_profiles(self, **optim_kwargs):
### Define cost function for optimization procedure
def monospline(THETA, *args):
x = args[0]
y = args[1]
a, b, mt = THETA
out = 0
for i, v in enumerate(x):
if v < mt:
out += (a + b * v - y[i]) ** 2
else:
out += (a + b * mt - y[i]) ** 2
return out
## Once Omega has been determined, run a classical LR on the second phase to get LR diagnostics
def get_fit_second_phase(y, indx_omega):
x = [self.sampling_period * i for i in range(indx_omega)]
yy = y[:indx_omega]
xx = statsmodels.tools.add_constant(x)
model = statsmodels.regression.linear_model.OLS(yy, xx)
self.second_phase_fit = model.fit()
return self.second_phase_fit
indx_tau, tau, sigma0, Dtau = tuple(self.pvp_params.values())
### Initialize optimization algorithm - Data and start parameters
theta0 = optim_kwargs.pop(
"spline_param_guess", [sigma0, -5, 1]
) # should work for most cases
if indx_tau:
x = self.timestamps[0:-indx_tau]
y = numpy.log2(self.std_prof[indx_tau:])
else:
x = self.timestamps
y = numpy.log2(self.std_prof)
## Global Optimization
n_iter = optim_kwargs.pop("basinhopping_n_iter", 50)
res = opti.basinhopping(
func=monospline,
x0=theta0,
niter=n_iter,
minimizer_kwargs={
"method": "Nelder-Mead",
"args": (x, y),
"options": {"maxiter": 1000, "disp": 0},
},
)
a, b, c = res.x
c0 = int(numpy.ceil(c / self.sampling_period))
self.omega = c
a, b = get_fit_second_phase(y, c0).params
_yy = [a + b * i * self.sampling_period for i in range(0, c0)] + [
a + c * b for i in range(c0, len(y[indx_tau:]))
]
_yy = [2 ** v for v in _yy]
t_fit = [
self.pvp_params["tau"] + i * self.sampling_period
for i in range(0, len(_yy))
]
self.pvp_fit_x = t_fit
self.pvp_fit_y = _yy
self.fit_params = [a, b, c + self.pvp_params["tau"]]
return self.fit_params, self.pvp_fit_x, self.pvp_fit_y
def _extend(self, trajectory, extend_to=3):
"""_extend extend trajectories
Extends the self._kinematic_profile buffer with a new trajectory while ensuring the series in the buffer always have the same size as the trajectory. For example, if the buffer has shape (X, Y) and the trajectory series has length (Z):
+ if Z > Y, then the buffer is filled with the last values to reach shape (X, Z)
+ if Z < Y, then the trajectory is filled with the last value to reach shape (1, Y)
The minimum duration of the series can be set with extend_to.
:param trajectory: trajectory to add to the self._kinematic_profile buffer
:type trajectory: array_like
:param extend_to: minimum duration of the series in seconds, defaults to 3
:type extend_to: int, optional
"""
if len(trajectory.shape) == 1:
trajectory = trajectory.reshape(-1, 1)
if self._kinematic_profile is None: # First traj
Nmin = 1 + int(numpy.ceil(extend_to / self.sampling_period))
if trajectory.shape[0] < Nmin:
fill = numpy.full(
shape=(Nmin - trajectory.shape[0], trajectory.shape[1]),
fill_value=trajectory[-1, :],
)
trajectory = numpy.concatenate((trajectory, fill), axis=0)
self._kinematic_profile = numpy.expand_dims(trajectory, axis=2)
else:
if self._kinematic_profile.shape[0] < trajectory.shape[0]:
fill = numpy.full(
shape=(
-self._kinematic_profile.shape[0] + trajectory.shape[0],
self._kinematic_profile.shape[1],
self._kinematic_profile.shape[2],
),
fill_value=self._kinematic_profile[-1, :, :],
)
self._kinematic_profile = numpy.concatenate(
(self._kinematic_profile, fill), axis=0
)
elif self._kinematic_profile.shape[0] > trajectory.shape[0]:
fill = numpy.full(
shape=(
self._kinematic_profile.shape[0] - trajectory.shape[0],
self._kinematic_profile.shape[1],
),
fill_value=trajectory[-1, :],
)
trajectory = numpy.concatenate((trajectory, fill), axis=0)
self._kinematic_profile = numpy.concatenate(
(self._kinematic_profile, numpy.expand_dims(trajectory, axis=2)), axis=2
)
return self._kinematic_profile
def _correct_edges(self, container, method="speed_threshold", edges = ['start', 'stop'], thresholds = [1,5], **kwargs):
"""_find_start correct start
Trajectories may not always be consistently segmented. This function performs a correction for the start point, as indicated by the method.
+ method = 'speed_threshold' :
Computes a threshold for speed as x_percent * max speed. All points the target and the first time when the threshold is crossed are removed.
\*\*kwargs = {'percent' : x_percent}
:param container: output from add_traj
:type container: numpy.ndarray
:param method: method to correct start, defaults to "speed_threshold"
:type method: str, optional
:return: trajectory with correction for speed
:rtype: numpy.ndarray
"""
time, traj, speed = container
indx = 1
stp_index = len(traj)-1
if method == "speed_threshold":
### Removes points until having reached a speed that is 1% of the max speed.
max_speed = numpy.max(numpy.abs(speed[1:]))
if 'start' in edges:
percent = thresholds[0]
while abs(speed[indx]) < max_speed * percent / 100:
indx += 1
if 'stop' in edges:
try:
percent = thresholds[1]
except IndexError:
percent = thresholds[0]
while abs(speed[stp_index]) < max_speed * percent / 100: # find first bump
stp_index -= 1
while abs(speed[stp_index]) > max_speed * percent / 100: # find start of decrease
stp_index -= 1
else:
raise NotImplementedError(
"Only method speed_threshold is implemented for now."
)
container = numpy.concatenate(
(time[indx:stp_index].reshape(1, -1), traj[indx:stp_index].reshape(1, -1)), axis=0
)
container = numpy.concatenate((container, speed[indx:stp_index].reshape(1, -1)), axis=0)
return container, indx, stp_index
def plot_kinematic_profiles(self, ax=None, **kwargs):
"""plot_kinematic_profiles
Plots the kinematic profiles on the provided axis. If not provided, will create a new figure from scratch.
:param ax: axis on which to draw, defaults to None. If None, creates a new figure and axis to draw on.
:type ax: plt.axis, optional
:param **kwargs: keyword arguments are passed to plt.plot()
:type **kwargs: key-values
"""
if ax is None:
fig, ax = plt.subplots(1, 2)
x = self.timestamps
for k in range(self.kinematic_profile.shape[1]):
for y in self.kinematic_profile[:, k, :]:
ax[k].plot(x, y, "-", **kwargs)
ax[k].set_xlabel("Time (s)")
ax[k].set_ylabel("Position")
def add_trajectory(self, t, *args, extend_to=3, target=None, correct_edges=False, correct_edges_kwargs = None):
"""Add trajectory to the set from which PVPs are computed
Pass the time series, and any number of positional series. For example in dim3 with x, y, z, you would call (with defaults kwargs)
.. code-block:: python
pvp.add_trajectory(t, x, y, z, extend_to = 3, target = None, correct_start = False)
You control the duration of the PVP (e.g. how far in time trajectories are extended). You also need to specify the target location for each trajectory. You can optionally synchronize the trajectories by pre-processing them (correct_start). Currently, a simple thresholding rule takes care of this synchronization.
:param t: time series
:type t: numpy.array like
:param args: positional series
:type args: numpy.array like
:param extend_to: minimal PVP duration, defaults to 3
:type extend_to: int, optional
:param target: target location, defaults to None. If None, will use the null vector as target.
:type target: iterable, optional
:param correct_start: whether to correct the location of the start of the movement for synchronization, defaults to False
:type correct_start: bool, optional
"""
default_correct_edges_kwargs = dict(method="speed_threshold", edges = ['start'], percent=[2, 5])
if correct_edges_kwargs is not None:
default_correct_edges_kwargs.update(correct_edges_kwargs)
target = [0 for i in args] if target is None else target
projections = self._project(target, *args)
container = self._interp_filt(
numpy.array(t),
*projections,
deriv=0,
resampling_period=self.sampling_period,
)
indx = 0
if correct_edges:
_norm = numpy.sqrt(numpy.sum(container[1, :, :] ** 2, axis=1))
tmp_container = self._interp_filt(
container[0, :, 0],
_norm,
deriv=1,
resampling_period=self.sampling_period,
)
_, indx, stp_indx = self._correct_edges(
tmp_container, **default_correct_edges_kwargs
)
self._extend(container[1, :, :], extend_to)
def _get_orthonormal_basis(self, target, x0):
target = numpy.asarray(target).squeeze()
x0 = numpy.atleast_1d(numpy.asarray(x0).squeeze())
if x0.shape[0] == 1:
return self._bon1(target, x0)
elif x0.shape[0] == 2:
return self._bon2(target, x0)
elif x0.shape[0] == 3:
return self._bon3(target, x0)
else:
raise NotImplementedError("Dimensions above 3 are not supported yet. ")
# below does not reliably produce an orthonormal basis
# switching to a manual cse disjunction up to 3D for now
# def _get_orthonormal_basis(self, target, x0):
# target = numpy.asarray(target).squeeze()
# x0 = numpy.asarray(x0).squeeze()
# random_basis = numpy.array(
# [
# (target - x0),
# *[
# -1 + 2 * numpy.random.random(x0.shape[0])
# for v in range(x0.shape[0] - 1)
# ],
# ]
# ).T
# self.Q, _ = numpy.linalg.qr(random_basis)
# return self.Q
def _bon1(self, target, x0):
return normalize(target - x0).reshape(-1, 1)
def _bon2(self, target, x0):
v1 = normalize(target - x0)
v2 = numpy.array([-v1[1], v1[0]])
return numpy.array([[v1], [v2]]).T
def _bon3(self, target, x0):
array = self._bon2(target, x0).T
vec3 = numpy.cross(array[0], array[1])
return numpy.array([[array[0]], [array[1]], [vec3]]).T
def _project_x(self, Q, target, x):
u = (numpy.asarray(x) - numpy.asarray(target)).reshape(-1, 1)
return (Q.T @ u).squeeze()
def _project(self, target, *args):
dim = len(args)
output = numpy.zeros(shape=(dim, len(args[0])))
args = numpy.array(args).T
Q = self._get_orthonormal_basis(target, args[0, :])
for n, arg in enumerate(args):
output[:, n] = self._project_x(Q, target, arg)
return output
def _interp_filt(
self,
t,
*args,
resampling_period=0.01,
filter_kwargs={"filtername": "kaiser", "fc": 10, "rdb": 10, "width": 5},
deriv=2,
):
"""_interp_filt interpolates and filters a 1D trajectory
Takes a trajectory, resamples it with the chosen resampling_period and filters it with the given filter. Also provides the unfiltered derivatives up to order "deriv".
:param t: trajectory time
:type t: array like
:param x: trajectory position
:type x: array like
:param resampling_period: timestep at which the trajectory will be down/over sampled, defaults to 0.01
:type resampling_period: float, optional
:param filter_kwargs: scipy.signal filter description, defaults to {"filtername": "kaiser", "fc": 10, "rdb": 10, "width": 5}
:type filter_kwargs: dict, optional
:param deriv: order for the trajectory derivatives, defaults to 2
:type deriv: int, optional
:return: an array, where the first line is the time vector, and all other lines are the nth derivatives of the trajectory (0 <= n <= deriv).
:rtype: numpy.ndarray
"""
t = numpy.asarray(t)
t = t - t[0] # set null time target
Ts = resampling_period
output_container = None
for n, x in enumerate(args):
x = numpy.asarray(x)
interpolator = interp.interp1d(t, x, fill_value="extrapolate")
resampling_instants = numpy.linspace(
0,
t[-1] + (Ts - t[-1] % Ts),
num=1 + int((t[-1] + (Ts - t[-1] % Ts)) / Ts),
)
x_interp = interpolator(resampling_instants)
if filter_kwargs["filtername"] == "kaiser":
N, beta = signal.kaiserord(
filter_kwargs["rdb"], filter_kwargs["width"] * 2 * Ts
)
taps = signal.firwin(
N, filter_kwargs["fc"] * 2 * Ts, window=("kaiser", beta)
)
filtered_x = signal.filtfilt(taps, 1, x_interp)
else:
b, a = filter_kwargs["b"], filter_kwargs["a"]
filtered_x = signal.filtfilt(b, a, x_interp)
container = numpy.concatenate(
(resampling_instants.reshape(1, -1), filtered_x.reshape(1, -1)), axis=0
)
## Compute derivatives
resampling_instants = numpy.append(
resampling_instants, resampling_instants[-1] + Ts
)
for i in range(deriv):
filtered_x = numpy.concatenate(
(
filtered_x.reshape(-1)[0].reshape(1, -1),
filtered_x.reshape(1, -1),
),
axis=1,
)
filtered_x = numpy.divide(
numpy.diff(filtered_x), numpy.diff(resampling_instants)
)
container = numpy.concatenate((container, filtered_x), axis=0)
if n == 0:
output_container = numpy.expand_dims(container, axis=2)
else:
container = numpy.expand_dims(container, axis=(2))
output_container = numpy.concatenate(
(output_container, container), axis=2
)
return output_container
class PVP_alpha(PVP):
@property
def _reference_signal(self):
return numpy.expand_dims(self.kinematic_profile[:, 0, :], 1)
class PVP_total(PVP):
@property
def _reference_signal(self):
return self.kinematic_profile
class PVP_generalized(PVP):
@property
def _reference_signal(self):
return self.kinematic_profile
def _compute_std_profile(self):
std_prof = numpy.empty(shape=(self._kinematic_profile.shape[0]))
for nt, t in enumerate(self._kinematic_profile):
cov = numpy.cov(t)
if len(cov.shape) >= 2:
std_prof[nt] = (numpy.linalg.det(cov)) ** (
1 / 2 / self._kinematic_profile.shape[1]
)
else:
std_prof[nt] = (cov.squeeze()) ** (
1 / 2 / self._kinematic_profile.shape[1]
)
self.std_prof = std_prof
return self.std_prof
def normalize(a):
return a / numpy.linalg.norm(a, 2)
| jgori-ouistiti/PVPlib | pvplib/core.py | core.py | py | 24,949 | python | en | code | 0 | github-code | 36 |
71918398185 | #! /bin/env python
import sys
particleSize = 0.02
if len(sys.argv) != 2:
print("Usage: " + sys.argv[0] + " <scene txt file>")
else:
# Read the scene file
scene = open(sys.argv[1]).readlines()[1:]
particleString = ''
for l in scene:
loc = [float(x) for x in l.split()]
particleString += """
<shape type="sphere">
<transform name="toWorld">
<scale value="%f"/>
<translate x="%f" y="%f" z="%f"/>
</transform>
<ref id="water"/>
</shape>
""" % (particleSize, loc[0], loc[1], loc[2])
# Now write the file
fName = sys.argv[1][:-3]
fName += 'xml'
of = open(fName, 'w')
of.write('<scene version="0.5.0">\n')
of.write(particleString)
of.write('</scene>')
| bryanpeele/PositionBasedFluids | frames/makeMitsubaFile.py | makeMitsubaFile.py | py | 775 | python | en | code | 2 | github-code | 36 |
8024443031 | """
You are given an n x n 2D matrix representing an image.
Rotate the image by 90 degrees (clockwise).
Follow up:
Could you do this in-place?
"""
class Solution:
# @param matrix, a list of lists of integers
# @return a list of lists of integers
def rotate(self, matrix):
start = 0
end = len(matrix) - 1
while start < end:
for i in range(end-start):
tmp = matrix[start][start+i]
matrix[start][start+i] = matrix[end-i][start]
matrix[end-i][start] = matrix[end][end-i]
matrix[end][end-i] = matrix[start+i][end]
matrix[start+i][end] = tmp
#print matrix
start += 1
end -= 1
return matrix
# Note:
# 1. Remember line 17, which is end-start
"""
matrix = [[2,29,20,26,16,28],[12,27,9,25,13,21],[32,33,32,2,28,14],[13,14,32,27,22,26],[33,1,20,7,21,7],[4,24,1,6,32,34]]
def rotate(matrix):
return [list(reversed(x)) for x in zip(*matrix)]
print rotate(matrix)
"""
| cyandterry/Python-Study | Ninja/Leetcode/48_Rotate_Image.py | 48_Rotate_Image.py | py | 1,071 | python | en | code | 62 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.