repo_name
stringclasses 400
values | branch_name
stringclasses 4
values | file_content
stringlengths 16
72.5k
| language
stringclasses 1
value | num_lines
int64 1
1.66k
| avg_line_length
float64 6
85
| max_line_length
int64 9
949
| path
stringlengths 5
103
| alphanum_fraction
float64 0.29
0.89
| alpha_fraction
float64 0.27
0.89
|
|---|---|---|---|---|---|---|---|---|---|
sayankae/Python-String
|
refs/heads/main
|
#Problem Given
#Convert a string containing only lower case letter to a string with upper case
#It is expected to solve the problem within O(sizeof(str)
#Auxilary Space O(1)
#function to convert string into upper
def to_upper(str):
#temp will store the intger value of 1st letter of the string
temp = 0
#loop will run till the end of string
for i in range(len(str)):
#ord converts the char into its equivalent integer value
#ord(str[0]) - 32, so we will get ASCII value of upper case
temp = ord(str[0])-32
#storing string in the same string but removing the first element
str = str[1::]
#chr converts integer into its equivalent char value
#adding or concatenating the str and temp together then storing it in str
str = str+chr(temp)
#return str
return str
if __name__ == "__main__":
n = input()
print(to_upper(n))
|
Python
| 26
| 37.192307
| 85
|
/lower_to_upper.py
| 0.612903
| 0.602823
|
seancarverphd/sgt
|
refs/heads/master
|
import pandas as pd
protein_data=pd.read_csv('../data/protein_classification.csv')
X=protein_data['Sequence']
def split(word):
return [char for char in word]
sequences = [split(x) for x in X]
protein_data=pd.read_csv('../data/protein_classification.csv')
X=protein_data['Sequence']
import string
# import sgtdev as sgt
from sgt import Sgt
# Spark
from pyspark import SparkContext
sc = SparkContext("local", "app")
rdd = sc.parallelize(sequences)
sgt_sc = Sgt(kappa = 1, lengthsensitive = False, mode="spark", alphabets=list(string.ascii_uppercase))
rdd_embedding = sgt_sc.fit_transform(corpus=rdd)
sc.stop()
# Multi-processing
sgt_mp = Sgt(kappa = 1, lengthsensitive = False, mode="multiprocessing", processors=3)
mp_embedding = sgt_mp.fit_transform(corpus=sequences)
mp_embedding = sgt_mp.transform(corpus=sequences)
# Default
sgt = Sgt(kappa = 1, lengthsensitive = False)
embedding = sgt.fit_transform(corpus=sequences)
# Spark again
corpus = [["B","B","A","C","A","C","A","A","B","A"], ["C", "Z", "Z", "Z", "D"]]
sc = SparkContext("local", "app")
rdd = sc.parallelize(corpus)
sgt_sc = Sgt(kappa = 1,
lengthsensitive = False,
mode="spark",
alphabets=["A", "B", "C", "D", "Z"],
lazy=False)
s = sgt_sc.fit_transform(corpus=rdd)
print(s)
sc.stop()
|
Python
| 44
| 29.295454
| 102
|
/python/test.py
| 0.656907
| 0.653153
|
TrevorTheAmazing/Capstone
|
refs/heads/master
|
import datetime
import math
import os
import librosa
from sklearn.model_selection import train_test_split
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.utils import to_categorical
import numpy as np
from tqdm import tqdm
import tensorflow
import tensorboard
PREDICTION_DIRECTORY = sys.argv[1]
#0. start tb
#tensorboard --logdir logs/fit
#I. set the data path
DATA_PATH = "C:\\Users\\Trevor\\Dropbox\\dcc\\capstone\\Capstone\\MLClassifier\\mLprojData\\Data\\"
print('DataPath is set to '+DATA_PATH)
########################
#######Get_Labels#######
########################
# Input: Folder Path #
# Output: Tuple (Label, Indices of the labels, one-hot encoded labels)#
def get_labels(path=DATA_PATH):
labels = os.listdir(path)
label_indices = np.arange(0, len(labels))
return labels, label_indices, to_categorical(label_indices)
#######wav2mfcc#######
# convert .wav to mfcc
def wav2mfcc(file_path, max_len=11):
wave, sr = librosa.load(file_path, mono=True, sr=22050)#sample rate
wave = wave[::3]
mfcc = librosa.feature.mfcc(wave, sr=22050)#sample rate
# If maximum length exceeds mfcc lengths then pad
if (max_len > mfcc.shape[1]):
pad_width = max_len - mfcc.shape[1]
mfcc = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='constant')
# Else cutoff the remaining parts
else:
mfcc = mfcc[:, :max_len]
return mfcc
################################
#######save_data_to_array#######
################################
def save_data_to_array(path=DATA_PATH, max_len=11):
labels, _, _ = get_labels(path)
for label in labels:
# Init mfcc vectors
mfcc_vectors = []
wavfiles = [path + label + '\\' + wavfile for wavfile in os.listdir(path + '\\' + label)]
for wavfile in tqdm(wavfiles, "Saving vectors of label - '{}'".format(label)):
mfcc = wav2mfcc(wavfile, max_len=max_len)
mfcc_vectors.append(mfcc)
np.save(label + '.npy', mfcc_vectors)
############################
#######get_train_test#######
############################
def get_train_test(split_ratio=0.9, random_state=42):
# Get labels
labels, indices, _ = get_labels(DATA_PATH)
# Getting first arrays
X = np.load(labels[0] + '.npy')
y = np.zeros(X.shape[0])
# Append all of the dataset into one single array, same goes for y
for i, label in enumerate(labels[1:]):
x = np.load(label + '.npy')
X = np.vstack((X, x))
y = np.append(y, np.full(x.shape[0], fill_value=(i + 1)))
assert X.shape[0] == len(y)
return train_test_split(X, y, test_size=(1 - split_ratio), random_state=random_state, shuffle=True)
##########################
#######load_dataset#######
##########################
def load_dataset(path=DATA_PATH):
data = prepare_dataset(path)
dataset = []
for key in data:
for mfcc in data[key]['mfcc']:
dataset.append((key, mfcc))
return dataset[:100]
#II. Second dimension of the feature is dim2
feature_dim_2 = 11
#III. Save data to array file first
save_data_to_array(max_len=feature_dim_2)
# # Loading train set and test set
X_train, X_test, y_train, y_test = get_train_test()
# # Feature dimension
feature_dim_1 = 20
channel = 1
epochs = 100
batch_size = 1
verbose = 1
num_classes = 15
# Reshaping to perform 2D convolution
X_train = X_train.reshape(X_train.shape[0], feature_dim_1, feature_dim_2, channel)
X_test = X_test.reshape(X_test.shape[0], feature_dim_1, feature_dim_2, channel)
y_train_hot = to_categorical(y_train)
y_test_hot = to_categorical(y_test)
def get_model():
print("get_model")
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(feature_dim_1, feature_dim_2, channel)))
model.add(Conv2D(48, kernel_size=(3, 3), activation='relu'))
#model.add(Conv2D(120, kernel_size=(2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Dropout(0.25))
model.add(Flatten())
#model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer = keras.optimizers.Adam(),
metrics=['accuracy'])
return model
# Predicts one sample
def predict(filepath, model):
print('predicting '+filepath)
sample = wav2mfcc(filepath)
sample_reshaped = sample.reshape(1, feature_dim_1, feature_dim_2, channel)
return get_labels()[0][np.argmax(model.predict(sample_reshaped))]
def prepare_dataset(path=DATA_PATH):
labels, _, _ = get_labels(path)
data = {}
for label in labels:
print('preparing ' + label + ' dataset')
data[label] = {}
data[label]['path'] = [path + label + '/' + wavfile for wavfile in os.listdir(path + '/' + label)]
vectors = []
for wavfile in data[label]['path']:
wave, sr = librosa.load(wavfile, mono=True, sr=22050)#sample rate
# Downsampling
wave = wave[::3]
mfcc = librosa.feature.mfcc(wave, sr=22050)#sample rate
vectors.append(mfcc)
data[label]['mfcc'] = vectors
return data
#prepare the dataset
prepare_dataset(DATA_PATH)
#tensorboard logs
#file_writer = tensorflow.summary.FileWriter('C:\\Users\\Trevor\\Dropbox\\dcc\\capstone\\capstone\\Capstone\\mLprojData\\Logs\\', sess.graph)
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir="logs\\fit\\" + current_time #tlc
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
model = get_model()
model.fit(X_train, y_train_hot, batch_size=batch_size, epochs=epochs, verbose=verbose, validation_data=(X_test, y_test_hot), callbacks=[tensorboard_callback])
#predict
#print(predict('C:\\Users\\Trevor\\Dropbox\\dcc\\capstone\\capstone\\Capstone\\mLprojData\\Predict\\an2.wav', model=model))
#print(predict('C:\\Users\\Trevor\\Dropbox\\dcc\\capstone\\capstone\\Capstone\\mLprojData\\Predict\\STE-000.WAV', model=model))
#print(predict('C:\\Users\\Trevor\\Dropbox\\dcc\\capstone\\capstone\\Capstone\\mLprojData\\Predict\\STE-002.WAV', model=model))
#print(predict('C:\\Users\\Trevor\\Dropbox\\dcc\\capstone\\capstone\\Capstone\\mLprojData\\Predict\\STE-003.WAV', model=model))
uploadedFiles = os.listdir(PREDICTION_DIRECTORY)
results = list()
for fileUpload in uploadedFiles:
tempResults = predict(PREDICTION_DIRECTORY+fileUpload, model=model)
print(tempResults)
results.append(tempResults)
|
Python
| 200
| 32.735001
| 158
|
/test/MLClassifier/mLproj.py
| 0.633521
| 0.614997
|
katrii/ohsiha
|
refs/heads/master
|
from django.apps import AppConfig
class OhjelmaConfig(AppConfig):
name = 'ohjelma'
|
Python
| 5
| 16.799999
| 33
|
/ohjelma/apps.py
| 0.752809
| 0.752809
|
katrii/ohsiha
|
refs/heads/master
|
# Generated by Django 3.0.2 on 2020-03-15 16:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ohjelma', '0002_song'),
]
operations = [
migrations.AddField(
model_name='song',
name='release_year',
field=models.IntegerField(default=2000),
),
]
|
Python
| 18
| 19.888889
| 52
|
/ohjelma/migrations/0003_song_release_year.py
| 0.579787
| 0.518617
|
katrii/ohsiha
|
refs/heads/master
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name = 'home'),
path('songs/', views.SongList.as_view(), name = 'song_list'),
path('view/<int:pk>', views.SongView.as_view(), name = 'song_view'),
path('new', views.SongCreate.as_view(), name = 'song_new'),
path('view/<int:pk>', views.SongView.as_view(), name = 'song_view'),
path('edit/<int:pk>', views.SongUpdate.as_view(), name = 'song_edit'),
path('delete/<int:pk>', views.SongDelete.as_view(), name = 'song_delete'),
path('tracks/', views.TrackView, name = 'track_list'),
path('yearanalysis/', views.YearAnalysis, name = 'year_analysis'),
path('analysis/<int:pk>', views.Analysis.as_view(), name = 'track_detail'),
#url(r'^tracks/(?P<tracksyear>\w+)/$', views.TrackView, name = "TrackView")
path('tracks/<int:tracksyear>', views.TrackView, name = "TrackView")
]
|
Python
| 19
| 46.63158
| 79
|
/ohjelma/urls.py
| 0.630939
| 0.630939
|
katrii/ohsiha
|
refs/heads/master
|
# Generated by Django 3.0.2 on 2020-03-13 17:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ohjelma', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('song_name', models.CharField(max_length=200)),
('song_artist', models.CharField(max_length=200)),
],
),
]
|
Python
| 21
| 26.285715
| 114
|
/ohjelma/migrations/0002_song.py
| 0.560209
| 0.516579
|
katrii/ohsiha
|
refs/heads/master
|
# Generated by Django 3.0.2 on 2020-03-29 10:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ohjelma', '0004_track'),
]
operations = [
migrations.AlterField(
model_name='track',
name='track_duration',
field=models.CharField(max_length=5),
),
]
|
Python
| 18
| 20.055555
| 49
|
/ohjelma/migrations/0005_auto_20200329_1313.py
| 0.580475
| 0.527704
|
katrii/ohsiha
|
refs/heads/master
|
# Generated by Django 3.0.2 on 2020-04-11 18:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ohjelma', '0006_auto_20200329_1329'),
]
operations = [
migrations.AddField(
model_name='track',
name='track_id',
field=models.CharField(default=0, max_length=30),
preserve_default=False,
),
]
|
Python
| 19
| 21.736841
| 61
|
/ohjelma/migrations/0007_track_track_id.py
| 0.583333
| 0.50463
|
katrii/ohsiha
|
refs/heads/master
|
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from ohjelma.models import Song
from ohjelma.models import Track
import json
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
def index(request):
return HttpResponse('Welcome.')
class SongList(ListView):
model = Song
class SongView(DetailView):
model = Song
class SongCreate(CreateView):
model = Song
fields = ['song_name', 'song_artist', 'release_year']
success_url = reverse_lazy('song_list')
class SongUpdate(UpdateView):
model = Song
fields = ['song_name', 'song_artist', 'release_year']
success_url = reverse_lazy('song_list')
class SongDelete(DeleteView):
model = Song
success_url = reverse_lazy('song_list')
#Formatting the duration time
#Takes milliseconds as parameter and returns a string mm:ss
def MsFormat(milliseconds):
dur_s = (milliseconds/1000)%60
dur_s = int(dur_s)
if dur_s < 10:
dur_s = "0{}".format(dur_s)
dur_m = (milliseconds/(1000*60))%60
dur_m = int(dur_m)
dur = "{}:{}".format(dur_m, dur_s)
return dur
def TrackView(request, tracksyear):
Track.objects.all().delete() #Clear old info
query = 'year:{}'.format(tracksyear)
#Spotify developer keys
cid = '8f91d5aff7b54e1e93daa49f123d9ee9'
secret = 'f23421ee54b144cabeab9e2dbe9104a7'
client_credentials_manager = SpotifyClientCredentials(client_id=cid, client_secret=secret)
sp = spotipy.Spotify(client_credentials_manager = client_credentials_manager)
#Lists for counting year averages
l_dance = []
l_en = []
l_aco = []
l_val = []
for i in range(0,100,50):
track_results = sp.search(q=query, type='track', limit=50,offset=i)
for i, t in enumerate(track_results['tracks']['items']):
id = t['id']
artist = t['artists'][0]['name']
song = t['name']
dur_ms = t['duration_ms']
pop = t['popularity']
dur = MsFormat(dur_ms)
trackinfo = sp.audio_features(id)
dance = trackinfo[0]['danceability']
en = trackinfo[0]['energy']
key = trackinfo[0]['key']
loud = trackinfo[0]['loudness']
spee = trackinfo[0]['speechiness']
aco = trackinfo[0]['acousticness']
inst = trackinfo[0]['instrumentalness']
live = trackinfo[0]['liveness']
val = trackinfo[0]['valence']
temp = trackinfo[0]['tempo']
l_dance.append(dance)
l_en.append(en)
l_aco.append(aco)
l_val.append(val)
Track.objects.create(track_id = id, track_artist = artist,
track_name = song, track_duration = dur, track_popularity = pop,
track_danceability = dance, track_energy = en, track_key = key,
track_loudness = loud, track_speechiness = spee,
track_acousticness = aco, track_instrumentalness = inst,
track_liveness = live, track_valence = val, track_tempo = temp)
avgdance = calculate_average(l_dance)*100
avgene = calculate_average(l_en)*100
avgaco = calculate_average(l_aco)*100
avgval = calculate_average(l_val)*100
alltracks = Track.objects.all()
context = {'alltracks': alltracks, 'year': tracksyear, 'avgdance': avgdance, 'avgene': avgene, 'avgaco': avgaco, 'avgval': avgval}
return render(request, 'tracks.html', context)
#View for each track detailed information
class Analysis(DetailView):
model = Track
#Takes a list (of numbers) as parameter, returns the average
def calculate_average(num):
sum_num = 0
for t in num:
sum_num = sum_num + t
avg = sum_num / len(num)
return avg
#View for analytics
def YearAnalysis(request):
#Spotify developer keys
cid = '8f91d5aff7b54e1e93daa49f123d9ee9'
secret = 'f23421ee54b144cabeab9e2dbe9104a7'
client_credentials_manager = SpotifyClientCredentials(client_id=cid, client_secret=secret)
sp = spotipy.Spotify(client_credentials_manager = client_credentials_manager)
#Lists for saving yearly averages
dance = []
en = []
aco = []
val = []
years = []
most_populars = []
most_danceable = ""
best_dance = 0
happiest = ""
best_val = 0
most_acoustic = ""
best_aco = 0
most_energetic = ""
best_en = 0
for year in range (1980, 2020):
bestpop = 0
mostpop = ""
l_dance = []
l_en = []
l_aco = []
l_val = []
for i in range(0,100,50):
query = 'year:{}'.format(year)
track_results = sp.search(q=query, type='track', limit=50, offset=i)
for i, t in enumerate(track_results['tracks']['items']):
#Popularity check
pop = t['popularity']
if pop > bestpop:
mostpop = "{} by {}. Popularity: {}.".format(t['name'], t['artists'][0]['name'], pop)
bestpop = pop
elif pop == bestpop:
mostpop = mostpop + " AND {} by {}. Popularity: {}.".format(t['name'], t['artists'][0]['name'], pop)
id = t['id']
trackinfo = sp.audio_features(id)
d = trackinfo[0]['danceability']
e = trackinfo[0]['energy']
a = trackinfo[0]['acousticness']
v = trackinfo[0]['valence']
l_dance.append(d)
l_en.append(e)
l_aco.append(a)
l_val.append(v)
if d > best_dance:
most_danceable = "{} by {}. ({}) Danceability: {}.".format(t['name'], t['artists'][0]['name'], year, d)
best_dance = d
elif d == best_dance:
most_danceable = most_danceable + " AND {} by {}. ({}) Danceability: {}.".format(t['name'], t['artists'][0]['name'], year, d)
if e > best_en:
most_energetic = "{} by {}. ({}) Energy: {}.".format(t['name'], t['artists'][0]['name'], year, e)
best_en = e
elif e == best_en:
most_energetic = most_energetic + " AND {} by {}. ({}) Energy: {}.".format(t['name'], t['artists'][0]['name'], year, e)
if a > best_aco:
most_acoustic = "{} by {}. ({}) Acousticness: {}.".format(t['name'], t['artists'][0]['name'], year, a)
best_aco = a
elif a == best_aco:
most_acoustic = most_acoustic + " AND {} by {}. ({}) Acousticness: {}.".format(t['name'], t['artists'][0]['name'], year, a)
if v > best_val:
happiest = "{} by {}. ({}) Valence: {}.".format(t['name'], t['artists'][0]['name'], year, v)
best_val = v
elif v == best_val:
happiest = happiest + " AND {} by {}. ({}) Valence: {}.".format(t['name'], t['artists'][0]['name'], year, v)
#Calculate year averages and add to lists
dance.append(calculate_average(l_dance))
en.append(calculate_average(l_en))
aco.append(calculate_average(l_aco))
val.append(calculate_average(l_val))
years.append(year)
most_populars.append(mostpop)
#Zip year and most popular song to a list of 2-valued tuples
yearly_populars = zip(years, most_populars)
context = {"years": years, "danceability": dance, "energy": en,
"acousticness": aco, "valence": val, "yearly_populars": yearly_populars,
"most_acoustic": most_acoustic, "most_energetic": most_energetic,
"most_danceable": most_danceable, "happiest": happiest}
return render(request, 'analysis.html', context)
|
Python
| 240
| 32.212502
| 145
|
/ohjelma/views.py
| 0.562594
| 0.543276
|
katrii/ohsiha
|
refs/heads/master
|
from django.db import models
from django.urls import reverse
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('Date published')
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
class Song(models.Model):
song_name = models.CharField(max_length=200)
song_artist = models.CharField(max_length = 200)
release_year = models.IntegerField(default=2000)
def __str__(self):
return self.song_name
def get_absolute_url(self):
return reverse('song_edit', kwargs={'pk': self.pk})
class Track(models.Model):
track_id = models.CharField(max_length=30)
track_name = models.CharField(max_length=500)
track_artist = models.CharField(max_length = 500)
track_duration = models.CharField(max_length = 10)
track_popularity = models.IntegerField(default=100)
track_danceability = models.FloatField(max_length=10)
track_energy = models.FloatField(max_length=10)
track_key = models.IntegerField(max_length=3)
track_loudness = models.FloatField(max_length=10)
track_speechiness = models.FloatField(max_length=10)
track_acousticness = models.FloatField(max_length=10)
track_instrumentalness = models.FloatField(max_length=10)
track_liveness = models.FloatField(max_length=10)
track_valence = models.FloatField(max_length=10)
track_tempo = models.FloatField(max_length=10)
def __str__(self):
return self.track_name
|
Python
| 44
| 35.840908
| 68
|
/ohjelma/models.py
| 0.714726
| 0.684535
|
katrii/ohsiha
|
refs/heads/master
|
# Generated by Django 3.0.2 on 2020-03-29 10:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ohjelma', '0005_auto_20200329_1313'),
]
operations = [
migrations.AlterField(
model_name='track',
name='track_duration',
field=models.CharField(max_length=10),
),
]
|
Python
| 18
| 20.833334
| 50
|
/ohjelma/migrations/0006_auto_20200329_1329.py
| 0.590331
| 0.506361
|
katrii/ohsiha
|
refs/heads/master
|
# Generated by Django 3.0.2 on 2020-04-11 19:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ohjelma', '0008_track_track_danceability'),
]
operations = [
migrations.AddField(
model_name='track',
name='track_acousticness',
field=models.FloatField(default=0, max_length=10),
preserve_default=False,
),
migrations.AddField(
model_name='track',
name='track_energy',
field=models.FloatField(default=0, max_length=10),
preserve_default=False,
),
migrations.AddField(
model_name='track',
name='track_instrumentalness',
field=models.FloatField(default=0, max_length=10),
preserve_default=False,
),
migrations.AddField(
model_name='track',
name='track_key',
field=models.IntegerField(default=0, max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='track',
name='track_liveness',
field=models.FloatField(default=0, max_length=10),
preserve_default=False,
),
migrations.AddField(
model_name='track',
name='track_loudness',
field=models.FloatField(default=0, max_length=10),
preserve_default=False,
),
migrations.AddField(
model_name='track',
name='track_speechiness',
field=models.FloatField(default=0, max_length=10),
preserve_default=False,
),
migrations.AddField(
model_name='track',
name='track_tempo',
field=models.FloatField(default=0, max_length=10),
preserve_default=False,
),
migrations.AddField(
model_name='track',
name='track_valence',
field=models.FloatField(default=0, max_length=10),
preserve_default=False,
),
]
|
Python
| 67
| 30.313433
| 63
|
/ohjelma/migrations/0009_auto_20200411_2211.py
| 0.544328
| 0.522879
|
katrii/ohsiha
|
refs/heads/master
|
# Generated by Django 3.0.2 on 2020-03-28 23:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ohjelma', '0003_song_release_year'),
]
operations = [
migrations.CreateModel(
name='Track',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('track_name', models.CharField(max_length=500)),
('track_artist', models.CharField(max_length=500)),
('track_duration', models.IntegerField(default=200000)),
('track_popularity', models.IntegerField(default=100)),
],
),
]
|
Python
| 23
| 30.782608
| 114
|
/ohjelma/migrations/0004_track.py
| 0.573187
| 0.526676
|
ewheeler/nomenklatura
|
refs/heads/master
|
from setuptools import setup, find_packages
setup(
name='nomenklatura',
version='0.1',
description="Make record linkages on the web.",
long_description='',
classifiers=[
],
keywords='data mapping identity linkage record',
author='Open Knowledge Foundation',
author_email='info@okfn.org',
url='http://okfn.org',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages=[],
include_package_data=False,
zip_safe=False,
install_requires=[
],
tests_require=[],
entry_points=\
""" """,
)
|
Python
| 24
| 24.208334
| 70
|
/setup.py
| 0.621488
| 0.618182
|
ewheeler/nomenklatura
|
refs/heads/master
|
from nomenklatura.model.dataset import Dataset
from nomenklatura.model.entity import Entity
from nomenklatura.model.account import Account
from nomenklatura.model.upload import Upload
|
Python
| 4
| 45
| 46
|
/nomenklatura/model/__init__.py
| 0.864865
| 0.864865
|
ewheeler/nomenklatura
|
refs/heads/master
|
import logging
from logging.handlers import RotatingFileHandler
from flask import Flask
from flask import url_for as _url_for
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.oauth import OAuth
from flask.ext.assets import Environment
import certifi
from kombu import Exchange, Queue
from celery import Celery
from nomenklatura import default_settings
logging.basicConfig(level=logging.DEBUG)
app = Flask(__name__)
app.config.from_object(default_settings)
app.config.from_envvar('NOMENKLATURA_SETTINGS', silent=True)
app_name = app.config.get('APP_NAME')
file_handler = RotatingFileHandler('/var/log/nomenklatura/errors.log',
maxBytes=1024 * 1024 * 100,
backupCount=20)
file_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
if app.debug is not True:
from raven.contrib.flask import Sentry
sentry = Sentry(app, dsn=app.config.get('SENTRY_DSN'))
db = SQLAlchemy(app)
assets = Environment(app)
celery = Celery('nomenklatura', broker=app.config['CELERY_BROKER_URL'])
queue_name = app_name + '_q'
app.config['CELERY_DEFAULT_QUEUE'] = queue_name
app.config['CELERY_QUEUES'] = (
Queue(queue_name, Exchange(queue_name), routing_key=queue_name),
)
celery = Celery(app_name, broker=app.config['CELERY_BROKER_URL'])
celery.config_from_object(app.config)
oauth = OAuth()
github = oauth.remote_app('github',
base_url='https://github.com/login/oauth/',
authorize_url='https://github.com/login/oauth/authorize',
request_token_url=None,
access_token_url='https://github.com/login/oauth/access_token',
consumer_key=app.config.get('GITHUB_CLIENT_ID'),
consumer_secret=app.config.get('GITHUB_CLIENT_SECRET'))
github._client.ca_certs = certifi.where()
def url_for(*a, **kw):
try:
kw['_external'] = True
return _url_for(*a, **kw)
except RuntimeError:
return None
|
Python
| 66
| 30.40909
| 85
|
/nomenklatura/core.py
| 0.698987
| 0.692716
|
ewheeler/nomenklatura
|
refs/heads/master
|
import os
def bool_env(val):
"""Replaces string based environment values with Python booleans"""
return True if os.environ.get(val, 'False').lower() == 'true' else False
#DEBUG = True
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL',
os.environ.get('SHARED_DATABASE_URL'))
APP_NAME = os.environ.get('APP_NAME', 'nomenklatura')
GITHUB_CLIENT_ID = os.environ.get('GITHUB_CLIENT_ID')
GITHUB_CLIENT_SECRET = os.environ.get('GITHUB_CLIENT_SECRET')
MEMCACHE_HOST = os.environ.get('MEMCACHIER_SERVERS')
S3_BUCKET = os.environ.get('S3_BUCKET', 'nomenklatura')
S3_ACCESS_KEY = os.environ.get('S3_ACCESS_KEY')
S3_SECRET_KEY = os.environ.get('S3_SECRET_KEY')
CELERY_BROKER = os.environ.get('CLOUDAMQP_URL')
SIGNUP_DISABLED = bool_env('SIGNUP_DISABLED')
|
Python
| 25
| 32.400002
| 76
|
/contrib/heroku_settings.py
| 0.698204
| 0.691018
|
ewheeler/nomenklatura
|
refs/heads/master
|
DEBUG = False
APP_NAME = 'nomenklatura'
CELERY_BROKER_URL = 'amqp://guest:guest@localhost:5672//'
ALLOWED_EXTENSIONS = set(['csv', 'tsv', 'ods', 'xls', 'xlsx', 'txt'])
SIGNUP_DISABLED = False
|
Python
| 8
| 23.375
| 69
|
/nomenklatura/default_settings.py
| 0.661538
| 0.641026
|
ewheeler/nomenklatura
|
refs/heads/master
|
import logging
import requests
from flask import url_for, session, Blueprint, redirect
from flask import request
from apikit import jsonify
from werkzeug.exceptions import Forbidden
from nomenklatura import authz
from nomenklatura.core import app, db, github
from nomenklatura.model import Account, Dataset
section = Blueprint('sessions', __name__)
@section.route('/sessions')
def status():
return jsonify({
'logged_in': authz.logged_in(),
'api_key': request.account.api_key if authz.logged_in() else None,
'account': request.account,
'base_url': url_for('index', _external=True)
})
@section.route('/sessions/authz')
def get_authz():
permissions = {}
dataset_name = request.args.get('dataset')
if dataset_name is not None:
dataset = Dataset.find(dataset_name)
permissions[dataset_name] = {
'view': True,
'edit': authz.dataset_edit(dataset),
'manage': authz.dataset_manage(dataset)
}
return jsonify(permissions)
@section.route('/sessions/login')
def login():
callback = url_for('sessions.authorized', _external=True)
return github.authorize(callback=callback)
@section.route('/sessions/logout')
def logout():
logging.info(authz.require(authz.logged_in()))
session.clear()
return redirect('/')
@section.route('/sessions/callback')
@github.authorized_handler
def authorized(resp):
if 'access_token' not in resp:
return redirect(url_for('index', _external=True))
access_token = resp['access_token']
session['access_token'] = access_token, ''
res = requests.get('https://api.github.com/user?access_token=%s' % access_token,
verify=False)
data = res.json()
for k, v in data.items():
session[k] = v
account = Account.by_github_id(data.get('id'))
if account is None:
if app.config.get('SIGNUP_DISABLED'):
raise Forbidden("Sorry, account creation is disabled")
account = Account.create(data)
db.session.commit()
return redirect('/')
|
Python
| 71
| 28.352112
| 84
|
/nomenklatura/views/sessions.py
| 0.654031
| 0.654031
|
ewheeler/nomenklatura
|
refs/heads/master
|
from flask.ext.assets import Bundle
from nomenklatura.core import assets
deps_assets = Bundle(
'vendor/jquery/dist/jquery.js',
'vendor/bootstrap/js/collapse.js',
'vendor/angular/angular.js',
'vendor/angular-route/angular-route.js',
'vendor/angular-bootstrap/ui-bootstrap-tpls.js',
'vendor/ngUpload/ng-upload.js',
filters='uglifyjs',
output='assets/deps.js'
)
app_assets = Bundle(
'js/app.js',
'js/services/session.js',
'js/directives/pagination.js',
'js/directives/keybinding.js',
'js/directives/authz.js',
'js/controllers/app.js',
'js/controllers/import.js',
'js/controllers/home.js',
'js/controllers/docs.js',
'js/controllers/review.js',
'js/controllers/datasets.js',
'js/controllers/entities.js',
'js/controllers/profile.js',
filters='uglifyjs',
output='assets/app.js'
)
css_assets = Bundle(
'vendor/bootstrap/less/bootstrap.less',
'vendor/font-awesome/less/font-awesome.less',
'style/style.less',
filters='less,cssrewrite',
output='assets/style.css'
)
assets.register('deps', deps_assets)
assets.register('app', app_assets)
assets.register('css', css_assets)
|
Python
| 44
| 25.818182
| 52
|
/nomenklatura/assets.py
| 0.680508
| 0.680508
|
ewheeler/nomenklatura
|
refs/heads/master
|
# shut up useless SA warning:
import warnings
warnings.filterwarnings('ignore', 'Unicode type received non-unicode bind param value.')
|
Python
| 3
| 44
| 88
|
/nomenklatura/__init__.py
| 0.792593
| 0.792593
|
ewheeler/nomenklatura
|
refs/heads/master
|
from normality import normalize
from flask.ext.script import Manager
from flask.ext.assets import ManageAssets
from nomenklatura.core import db
from nomenklatura.model import Entity
from nomenklatura.views import app
from nomenklatura.assets import assets
manager = Manager(app)
manager.add_command('assets', ManageAssets(assets))
@manager.command
def createdb():
""" Make the database. """
db.engine.execute("CREATE EXTENSION IF NOT EXISTS hstore;")
db.engine.execute("CREATE EXTENSION IF NOT EXISTS fuzzystrmatch;")
db.create_all()
@manager.command
def flush(dataset):
ds = Dataset.by_name(dataset)
for alias in Alias.all_unmatched(ds):
db.session.delete(alias)
db.session.commit()
if __name__ == '__main__':
manager.run()
|
Python
| 31
| 23.935484
| 70
|
/nomenklatura/manage.py
| 0.730919
| 0.730919
|
kgg511/Capstone.py
|
refs/heads/master
|
# Where I am currently: function makeOutfitMyself allows user to select an outfit choice from each category, adds it to a list, and returns the complete outfit.
# function computerChooses() has not been designed yet
# I plan on later adding in color options or allowing the user to add their own options
import random
gChoices = []
DictionaryClothing = {'head options:': 'baseball cap wig sombrero beret fedora toupee'.split(),
'chest options': 'blouse dress shirt tanktop bikini t-shirt sweater chestplate corset'.split(),
'leg options:':
'leggings skinny-jeans khaki\'s shorts daisy-dukes skirt bike-shorts tutu'.split(),
'feet options:':
'running-shoes tap-dance-shoes clogs stilettos platform-shoes sandals flipflops cowboy-boots'.split(),
'accessory options:':
'belt purse necklace headband hoop-earrings sword bow mustache goatee glasses'.split()}
# def computerChooses():
# The computer selects a random clothing option for each clothing category
# for every keyValues in DictionaryClothing:
# randomIndex = (random.randint(1, len((keyValues)-1)
# Return key[randomIndex]
def makeOutfitMyself():
# The user selects a choice for each category
Choices = []
for item in DictionaryClothing:
print(item)
print(DictionaryClothing[item])
response = ''
while response not in DictionaryClothing[item] and response != 'CC':
print("please select one of the choices, or type ‘CC’ to have the computer do it for you")
response = input()
Choices.append(response)
return Choices
# If input() in values:
# Return input()
# Else:
# randomIndex = (random.randint(1, len((key values)-1)
# Return key[randomIndex]
print("""Everyday most people must choose an outfit to wear.This game, 'Dress My Day', is here to help you design outfits.
Type MC (my choice) to make one yourself, or CC (computer choice) to have the computer make it for you.
If you make it yourself, you will be asked a series of questions about clothing type and color.
Select one of the given options by typing it in.
At any point you can respond to a question by typing “CC” and the computer will make that specific choice.
At the end, you will be told your outfit.""")
response = input()
if response == 'MC':
gChoices = makeOutfitMyself()
# Else:
# Choices.append(ComputerChooses())
# print('The outfit is now done. The outfit is: ’)
# print(Choices)
print('Looks like your outfit is: ')
for item in gChoices:
print(item)
print('Hope you enjoyed')
|
Python
| 56
| 47.785713
| 160
|
/capstone1.py
| 0.669107
| 0.667643
|
Lchet/TensorFlow_Intro
|
refs/heads/master
|
# -*- coding:utf-8 -*-
# @Filename: Tensorflow_flow.py
# Created on: 09/10/21 10:33
# @Author: Luc
import numpy as np
import pandas as pd
import tensorflow as tf
import sys
import matplotlib.pyplot as plt
import datetime
from sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split
import seaborn as sns
def exercise_1():
"""
1. Make sure tensorflow is installed on your environment: 'conda install
tensorflow'
2. import tensorflow as tf 3. Check your version of tf, use print(tf.__version__)
4. Create a constant node (node1) and a Tensor node (node2) with the values [1,2,3,4,5]
and [1,1,2,3,5]
5. Perform an element-wise multiplication of the two nodes and store it to node3,
Print the value of node3, use the .numpy() method
6. Sum the values of the elements in node3, store the result in node4.
:return:
"""
print(tf.__version__)
node1 = tf.constant([1, 2, 3, 4, 5])
node2 = tf.constant([1, 1, 2, 3, 5])
print(f'node 1: {node1}, node 2: {node2}')
node3 = node1 + node2
print(f'node3 = node1 + node2: {node3}')
node4 = tf.math.reduce_sum(node3)
print(f'node4 = sum elements of node3: {node4}')
def loss(target_y, predicted_y):
return tf.reduce_mean(tf.square(target_y - predicted_y))
def get_dataset_batches(x, y, batch_size):
n_samples = x.shape[0]
num_of_batches = int(n_samples / batch_size)
enough_samples = (num_of_batches > 2)
data_set = tf.data.Dataset.from_tensor_slices((x, y))
data_set = data_set.shuffle(buffer_size=sys.getsizeof(data_set))
if enough_samples:
data_set = data_set.batch(batch_size)
return data_set
class NN(tf.Module):
"""
Fully connected Neural Network with TensorFLow
"""
def __init__(self, input_size, layers, loss_f=loss, name=None):
"""
:param input_size: number of features the NN class is getting as input
:param layers: list of tuples (number of neuron, activation function of layer) in each layer.
For example: layers = [(2, tf.nn.leaky_relu), (4, tf.nn.relu)] means that the network
has 2 layers the first one with 2 neuron and activation of leaky_relu the the last layer (num 2) is
with 4 neurons (its weights matrix will be of dimension of 2x4) and relu activation function
:param loss_f: loss function address
:param name: custom name of the network
"""
super(NN, self).__init__(name=name)
self.layers = []
self.loss = loss
with self.name_scope:
for n_neurons, f_a in layers:
self.layers.append(Layer(input_dim=input_size, output_dim=n_neurons, f_activation=f_a))
input_size = n_neurons
# @tf.Module.with_name_scope
def __call__(self, x):
# forward pass
for layer in self.layers:
x = layer(x)
return x
def __getitem__(self, item):
return self.layers[item]
def fit(self, x, y, epochs=20, batch_size=32, l_r=0.01):
# slice x,y into batches
dataset = get_dataset_batches(x, y, batch_size)
loss_values = []
for epoch in range(epochs):
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(dataset):
# Open a GradientTape to record the operations run
with tf.GradientTape() as tape:
# Run the forward pass
# The operations are going to be recorded
# on the GradientTape thanks to tf.Module.variables.
y_hats = self.__call__(x_batch_train)
# Compute the loss value for this batch.
loss_value = self.loss(y_batch_train, y_hats)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = tape.gradient(loss_value, self.trainable_variables)
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
for d_v, v in zip(grads, self.trainable_variables):
v.assign_sub(l_r * d_v)
# Log every batches.
if step % 8 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print(f"Seen so far: {((step + 1) * batch_size)} samples")
loss_values.append(loss_value)
print("Epoch: %2d loss=%2.5f" % (epoch, loss_value))
return loss_values
def predict(self, x):
return self.__call__(x)
def __str__(self):
return f"{self.name}, num of layers: {len(self.layers)}"
class Layer(tf.Module):
def __init__(self, input_dim, output_dim, f_activation=tf.nn.leaky_relu, name=None):
"""
init the dimension of Layer class
:param input_dim: represent n_features is first layer otherwise number of neuron in previous layer
:param output_dim: number of neurons in layer
:param f_activation: activation function
:param name:
"""
super(Layer, self).__init__(name=name)
self.output_dim = output_dim
self.input_dim = input_dim
self.f_a = f_activation
# bias
self.b = None
# previous activation @ self.w
self.z = None
# activation(self.z)
self.a = None
# weights (self.input_dim x self.out_dim)
self.w = None
self._build(input_dim, output_dim)
def _build(self, input_dim, output_dim):
"""
initialize the layer's weights according to input_shape.
For example: if input shape is (2,3) it would init self.weights with (3, self.units) tensor random values
from normal distribution mean=0 std=0.05
:param input_shape: input shape of the previous layer
:return: self.weights, self.b
"""
w_init = tf.random_normal_initializer(mean=0.0, stddev=0.05, seed=None)
b_init = tf.zeros_initializer()
self.w = tf.Variable(w_init([input_dim, output_dim]), name='weights')
self.b = tf.Variable(b_init([output_dim]), name='bias')
def __call__(self, x):
self.z = tf.matmul(x, self.w) + self.b
self.a = self.f_a(self.z)
return self.a
def __str__(self):
rows, cols = self.w.shape
return f"{self.name}, input: {rows}, out: {cols}"
def identity(x):
return x
def exercise_2():
"""
In this exercise you will define a simple Linear Regression model with TensorFlow low-level API.
For a Linear Model y = Wx+b the graph looks like this:
7. Load the data from the file “data_for_linear_regression_tf.csv".
8. Define a class called MyModel()
9. The class should have two Variables (W and b), and a call method that returns the model's
output for a given input value. call method - def __call__(self, x)
10. Define a loss method that receives the predicted_y and the target_y as
arguments and returns the loss value for the current prediction. Use mean square error loss
11. Define a train() method that does five train cycles of your model (i.e. 5 epochs)
a. use tf.GradientTape() and the loss function that you have defined to record the loss as the linear
operation is processed by the network.
b. use the tape.gradient() method to retrieve the derivative of the loss with respect to W and b (dW, db)
and update W and b accordingly
12. Now, use the data to train and test your model:
a. Train your model for 100 epochs, with learning_rate 0.1
b. Save your model's W and b after each epoch, store results in a list for plotting purposes.
Print the W, b and loss values after training
:return:
"""
data = pd.read_csv('data_for_linear_regression_tf.csv')
x = tf.constant(data[['x']].values, dtype=tf.float32)
y = tf.Variable(data[['y']].values, dtype=tf.float32)
# no need activation as it is a linear problem
reg_nn = NN(1, [(2, identity), (1, identity)], "Regression")
loss_history = reg_nn.fit(x, y, epochs=200, batch_size=32, l_r=0.001)
metrics = pd.DataFrame({"Loss": [loss.numpy() for loss in loss_history]})
data['y_pred'] = reg_nn.predict(x).numpy()
# plt.figure()
# gca stands for 'get current axis'
ax = plt.gca()
data.plot(kind='scatter', x='x', y='y', ax=ax)
data.plot(kind='scatter', x='x', y='y_pred', color='red', ax=ax)
metrics.plot()
# data.plot()
plt.show()
print(f"\n exercise_2 w: {reg_nn.trainable_variables}, loss: {reg_nn.loss(y, data['y_pred'].values)}")
def exercise_3():
"""
In todays exercise we will build a neural network to predict avocado prices from the
following dataset:
Avocado Prices | Kaggle
2. We can use TensorFlow only (no Keras today), and build a neural network of as
many layers as we wish. Use a GradientTape to store the gradients.
3. We can play with the learning rate, and play with any other parameter we want
:return:
"""
def process_data(avocado):
ds = avocado.copy()
# labels
labels = ds[['AveragePrice']]
# drop unnecessary features
ds = ds.drop(columns=['Date', 'AveragePrice'])
# process data - categorical
cat_cols = ds.columns[(ds.dtypes == 'object')]
ds[cat_cols] = ds[cat_cols].astype('category')
# encode categories
for feature in cat_cols:
ds[feature] = ds[feature].cat.codes
# encode year
label_enc = LabelEncoder()
ds['year'] = label_enc.fit_transform(ds['year'])
# scale float features as change to float32 for tensor object
num_cols = ds.columns[(ds.dtypes == np.number)]
# scaler = MinMaxScaler()
scaler = StandardScaler()
ds[num_cols] = scaler.fit_transform(ds[num_cols])
return ds, labels
avocado = pd.read_csv('avocado.csv')
data, labels = process_data(avocado)
X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.12, random_state=42)
# crate tensor obj
x_train = tf.constant(X_train.values, dtype=tf.float32)
y_train = tf.Variable(y_train.values, dtype=tf.float32)
x_test = tf.constant(X_test.values, dtype=tf.float32)
avocado_model = NN(input_size=x_train.shape[-1],
layers=[(128, tf.nn.tanh), (128, tf.nn.leaky_relu), (64, tf.nn.relu), (1, identity)],
name='avocado')
loss_history = avocado_model.fit(x_train, y_train, epochs=150, batch_size=128, l_r=0.0001)
metrics = pd.DataFrame({"Loss": [loss.numpy() for loss in loss_history]})
metrics.plot()
plt.show()
# run predictions on the test
result = y_test.copy()
result['AveragePrice_Predict'] = avocado_model.predict(x_test).numpy()
sns.pairplot(result)
plt.show()
loss_pred = loss(result['AveragePrice'].values, result['AveragePrice_Predict'].values)
print(f"\n model: {avocado_model} loss: {loss_pred}")
if __name__ == '__main__':
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/gradient_tape/' + current_time + '/train'
test_log_dir = 'logs/gradient_tape/' + current_time + '/test'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
test_summary_writer = tf.summary.create_file_writer(test_log_dir)
exercise_1()
exercise_2()
exercise_3()
plt.show(block=True)
|
Python
| 310
| 37.36129
| 117
|
/Tensorflow_flow.py
| 0.606374
| 0.591995
|
jaime7981/Arduino_EFI
|
refs/heads/master
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
rpms = np.array([4000,3500,3000,2500,2000,1500,1000,500])
throttle = np.array([0,0,10,20,40,60,80,100,120])
efi_map = np.array([[17.2, 16.8, 15.5, 14.8, 13.8, 13.0, 12.2],
[17.0, 16.5, 15.0, 14.0, 13.4, 13.0, 12.4],
[16.8, 16.0, 14.6, 14.2, 13.6, 13.2, 12.6],
[16.6, 15.8, 14.8, 14.4, 13.8, 13.4, 12.8],
[16.4, 15.5, 15.0, 14.6, 14.0, 13.6, 13.0],
[16.2, 15.6, 15.2, 14.8, 14.2, 13.8, 13.2],
[16.0, 15.8, 15.5, 15.1, 14.6, 14.0, 13.5]])
def ShowEFIMap():
plt.figure(figsize = (6, 6))
ax = plt.subplot(111)
ax.set_ylabel("RPM")
ax.set_xlabel("Throttle")
plt.imshow(efi_map, cmap = "autumn")
ax.set_xticklabels(throttle)
ax.set_yticklabels(rpms)
for a in range(len(efi_map)):
for b in range(len(efi_map[a])):
ax.text(a,b,efi_map[b,a], ha = "center", va = "center", color = "b")
ax.set_title("EFI MAP")
plt.colorbar()
plt.show()
ShowEFIMap()
|
Python
| 35
| 30.771429
| 80
|
/EFI_map.py
| 0.516652
| 0.335734
|
hgarud/Logistic_RF_MNIST
|
refs/heads/master
|
import numpy as np
import gzip
from sklearn.preprocessing import OneHotEncoder
class MNIST_Data(object):
def __init__(self, base_dir, img_size):
self.base_dir = base_dir
self.img_size = img_size
def _load_labels(self, file_name):
file_path = self.base_dir + file_name
with gzip.open(file_path, 'rb') as f:
labels = np.frombuffer(f.read(), np.uint8, offset=8)
return np.array(labels)
def _load_imgs(self, file_name):
file_path = self.base_dir + file_name
with gzip.open(file_path, 'rb') as f:
images = np.frombuffer(f.read(), np.uint8, offset=16)
images = images.reshape(-1, self.img_size)
return np.array(images)
if __name__ == '__main__':
mnist_loader = MNIST_Data(base_dir = "/home/hrishi/1Hrishi/ECE542_Neural_Networks/Homeworks/2/Data/", img_size = 784)
train_labels = mnist_loader._load_labels("train-labels-idx1-ubyte.gz")
onehot_encoder = OneHotEncoder(n_values = 10, sparse=False)
onehot_encoded = onehot_encoder.fit_transform(train_labels.reshape(-1,1))
print(train_labels)
print(onehot_encoded)
|
Python
| 34
| 32.941177
| 121
|
/codebase/Data.py
| 0.636915
| 0.620451
|
hgarud/Logistic_RF_MNIST
|
refs/heads/master
|
from Data import MNIST_Data
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import numpy as np
import csv
mnist_loader = MNIST_Data(base_dir = "/home/hrishi/1Hrishi/ECE542_Neural_Networks/Homeworks/2/Data/", img_size = 784)
X_train = mnist_loader._load_imgs("train-images-idx3-ubyte.gz")
y_train = mnist_loader._load_labels("train-labels-idx1-ubyte.gz")
X_test = mnist_loader._load_imgs("t10k-images-idx3-ubyte.gz")
y_test = mnist_loader._load_labels("t10k-labels-idx1-ubyte.gz")
# np.random.seed(1) # Reset random state
# np.random.shuffle(X_train)
# np.random.shuffle(y_train)
input = np.append(X_train, y_train[:,None], axis=1)
# print(input.shape)
np.random.shuffle(input)
X_train = input[:,0:784]
y_train = input[:,784]
# X_train, X_test, y_train, y_test = train_test_split(train_images, train_labels, test_size=0.33, shuffle = True, random_state=42)
# from sklearn.preprocessing import StandardScaler
# scaler = StandardScaler()
# X_train = scaler.fit_transform(X_train)
# from sklearn.decomposition import PCA
# pca = PCA(n_components = 256)
# X_train = pca.fit_transform(X_train)
# X_test = pca.fit_transform(X_test)
# l2-sag-ovr = 91.25% acc without standard scaling
# l2-sag-multinomial = 91.91% acc without standard scaling
# l1-saga-ovr = 91.37% acc without standard scaling
# l1-saga-multinomial = 92.29% acc without standard scaling
# logistic_regressor = LogisticRegression(penalty = 'l1', solver = 'saga', tol = 1e-1, multi_class = 'multinomial', verbose = 1, n_jobs = -1)
# logistic_regressor.fit(X_train, y_train)
#
# predictions = logistic_regressor.predict(X_test)
# from sklearn.metrics import accuracy_score
# print(accuracy_score(y_test, predictions))
#
# onehot_encoder = OneHotEncoder(n_values = 10, sparse = False, dtype = np.int8)
# predictions = onehot_encoder.fit_transform(y_train.reshape(-1,1))
# np.savetxt('lr.csv', predictions, delimiter = ',', fmt = '%i')
from sklearn.ensemble import RandomForestClassifier
random_forest_regressor = RandomForestClassifier(criterion = 'entropy', verbose = 1)
random_forest_regressor.fit(X_train, y_train)
predictions = random_forest_regressor.predict(X_test)
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test, predictions))
onehot_encoder = OneHotEncoder(n_values = 10, sparse = False, dtype = np.int8)
predictions = onehot_encoder.fit_transform(y_train.reshape(-1,1))
np.savetxt('rf.csv', predictions, delimiter = ',', fmt = '%i')
|
Python
| 62
| 39.967743
| 141
|
/codebase/Main.py
| 0.739764
| 0.712598
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
import os
from flask import Flask, request, redirect, \
url_for, session, jsonify, send_from_directory, make_response, send_file
from . import api
from . import utils
from .. import VIDEO_UPLOAD_PATH, FRAMES_UPLOAD_PATH, IMG_EXTENSION, VIDEO_EXTENSION, CACHE
from . VideoProcessing import Frame, VideoUploader, VideoDownloader, Filter
from . decorators import parameter_check, url_arg_check, metadata_check
from . errors import InvalidAPIUsage
@api.route('/upload/', methods=['POST'])
@parameter_check(does_return=False, req_c_type='multipart/form-data')
@metadata_check(does_return=False, req_type='video/mp4')
def upload_video():
"""
uploads the video
"""
byteStream = request.files['file']
vu = VideoUploader()
vu.upload_from_bytestream(byteStream)
session['s_id'] = vu.id
f_c = utils.framecount_from_vid_id(vu.id)
session['video_frame_count'] = f_c
session['is_uploaded'] = True
return jsonify({'status' : '201',
'message' : 'video uploaded!'}), 201
@api.route('/preview/', defaults={'frame_idx':1}, methods=['GET'])
@api.route('/preview/<frame_idx>/', methods=['GET', 'POST'])
@parameter_check(does_return=False, req_c_type='application/json')
@url_arg_check(does_return=True, req_type=int, arg='frame_idx', session=session)
def preview_thumbnail(frame_idx):
"""
Preview a frame by index, given filter parameters
"""
if session.get('is_uploaded'):
data = request.get_json()
filter_params = data['filter_params']
session['filter_params'] = filter_params
frame = Frame(session['s_id'])
frame_i = frame.get_by_idx(frame_idx)
filter_frame = Filter(frame_i).run_func(filter_params)
frame.f_save(filter_frame, session['s_id'])
return send_from_directory(directory=f'{FRAMES_UPLOAD_PATH}',
path=f'{session["s_id"]}{IMG_EXTENSION}',
as_attachment=True), 200
raise InvalidAPIUsage('Invalid usage: please upload a video first')
@api.route('/download/', methods=['POST'])
@parameter_check(does_return=True, req_c_type='application/json', session=session)
def download_video(vid_range):
"""
Download a video given filter parameters
"""
if session.get('is_uploaded'):
data = request.get_json()
fps = data['fps']
filter_params = data['filter_params']
frame_count = session['video_frame_count']
vd = VideoDownloader(fps, vid_range)
filter_vid = vd.download(session['s_id'], frame_count, filter_params)
session['is_downloaded'] = True
return send_from_directory(directory=f'{VIDEO_UPLOAD_PATH}',
path=f'{filter_vid}{VIDEO_EXTENSION}',
as_attachment=True), 200
raise InvalidAPIUsage('Invalid usage: please upload a video first')
@api.route('/status/', methods=['GET'])
@parameter_check(req_c_type='application/json')
def status():
"""
The progress of the user, uploaded, download / frames
"""
resp = {}
try:
if session['is_uploaded']:
resp["upload"] = "done"
if CACHE.get(f"{session['s_id']}_d"):
d_status = CACHE.get(f"{session['s_id']}_d")
resp["downloaded_frames"] = f'{d_status}/{session["video_frame_count"]}'
if session["is_downloaded"]:
resp["is_downloaded"] = True
except KeyError:
pass
return jsonify({"status" : resp}), 200
|
Python
| 103
| 33.233009
| 91
|
/app/api/videoApi.py
| 0.620533
| 0.615712
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
from flask_swagger_ui import get_swaggerui_blueprint
swagger_ui = get_swaggerui_blueprint(
'/docs',
'/static/swagger.json',
config={
"app_name": "videoApi"
}
)
|
Python
| 10
| 21.6
| 52
|
/app/docs/__init__.py
| 0.517699
| 0.517699
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
from flask import Blueprint
api = Blueprint('videoApi', __name__)
from . import videoApi, errors, help
|
Python
| 5
| 20
| 37
|
/app/api/__init__.py
| 0.72381
| 0.72381
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
from flask import redirect, url_for, jsonify
from . import main
@main.app_errorhandler(404)
def page_not_found(e):
return jsonify(error=str(e)), 404
@main.app_errorhandler(405)
def method_not_allowed(e):
return jsonify(error=str(e)), 405
|
Python
| 10
| 23.799999
| 44
|
/app/main/errors.py
| 0.72
| 0.672
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
from flask import request, jsonify
from functools import wraps
from .errors import InvalidAPIUsage, InvalidFilterParams, IncorrectVideoFormat
"""
Almost like an Architect - makes decorations
"""
def decorator_maker(func):
def param_decorator(fn=None, does_return=None, req_c_type=None, req_type=None, arg=None, session=None):
def deco(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
result = func(does_return, req_c_type, req_type, arg, session)
if does_return:
return fn(result)
return fn(*args, **kwargs)
return wrapper
if callable(fn): return deco(fn)
return deco
return param_decorator
"""
Checks if user input is not out of bounds, and also Content-Type
"""
def wrap_param_check(does_return, req_c_type, req_type, arg, session):
check_content_type(req_c_type)
return check_correct_filter_params(session)
def check_content_type(req_c_type):
if not request.content_type.startswith(req_c_type):
raise InvalidAPIUsage(f'Content-Type should be of type: {req_c_type}', 400)
def check_correct_filter_params(session):
if request.data:
data = request.get_json()
f_params = data['filter_params']
if 'filter_params' not in data:
raise InvalidFilterParams(1)
elif 'type' not in f_params:
raise InvalidFilterParams(1)
if 'download' in request.url:
if 'fps' not in data:
raise InvalidFilterParams(1)
if 'max_f' in f_params and 'min_f' in f_params:
max_fr = session['video_frame_count']
min_f_raw = f_params['min_f']
max_f_raw = f_params['max_f']
if min_f_raw == "": min_f_raw = 0
if max_f_raw == "": max_f_raw = max_fr
min_f = _check_for_req_type(int, min_f_raw, 4)
max_f = _check_for_req_type(int, max_f_raw, 4)
a = check_bounds(min_f_raw, max_fr)
b = check_bounds(max_f_raw, max_fr)
return sorted([a, b])
def _check_for_req_type(req_type, val, ex):
try:
req_type(val)
except Exception:
raise InvalidFilterParams(ex)
return val
parameter_check = decorator_maker(wrap_param_check)
"""
Checks if user input is not out of bounds, and also Content-Type
"""
def wrap_url_arg_check(does_return, req_c_type, req_type, arg, session):
check_arg_urls(req_type, arg)
frame_idx = request.view_args[arg]
return check_bounds(frame_idx, session['video_frame_count'])
def check_arg_urls(req_type, arg):
try:
req_type(request.view_args[arg])
except ValueError:
raise InvalidAPIUsage(f'Content-Type should be of type: {req_type.__name__}', 400)
def check_bounds(frame_idx, max_frames):
f_max = int(max_frames)
f_idx = int(frame_idx)
if f_idx > f_max:
f_idx = f_max-50
elif f_idx < 1:
f_idx = 1
return f_idx
url_arg_check = decorator_maker(wrap_url_arg_check)
"""
Checks Video Metadata
"""
def wrap_metadata_check(does_return, req_c_type, req_type, arg, session):
check_metadata(req_type)
def check_metadata(req_type):
byteStream = request.files['file']
vid_type = byteStream.__dict__['headers'].get('Content-Type')
if vid_type != req_type:
raise IncorrectVideoFormat(1)
metadata_check = decorator_maker(wrap_metadata_check)
"""
Excpetion Handler for non-Endpoints
"""
def exception_handler(fn=None, ex=None, type=None, pas=False):
def deco(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
fn(*args, **kwargs)
except Exception:
if not pas:
raise ex(type)
pass
return fn(*args, **kwargs)
return wrapper
if callable(fn): return deco(fn)
return deco
|
Python
| 135
| 27.992592
| 107
|
/app/api/decorators.py
| 0.599898
| 0.595554
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
import cv2
import math
import string
import random
import numpy as np
import skvideo.io
from PIL import Image
from .. import VIDEO_EXTENSION, VIDEO_UPLOAD_PATH, \
FRAMES_UPLOAD_PATH, IMG_EXTENSION, CACHE
FPS = 23.98
SK_CODEC = 'libx264'
def create_vid_path(name):
return f'{VIDEO_UPLOAD_PATH}/{name}{VIDEO_EXTENSION}'
def create_frame_path(name):
return f'{FRAMES_UPLOAD_PATH}/{name}{IMG_EXTENSION}'
def framecount_from_vid_id(video_id):
video_path = create_vid_path(video_id)
cap = cv2.VideoCapture(video_path)
return math.floor(cap.get(7))
def id_generator(size, chars=string.ascii_lowercase + string.digits) -> str:
return ''.join(random.choice(chars) for _ in range(size))
def create_sk_video_writer(video_f_path, fps = None):
if not fps : fps = FPS
return skvideo.io.FFmpegWriter(video_f_path,
outputdict={'-c:v':SK_CODEC, '-profile:v':'main',
'-pix_fmt': 'yuv420p', '-r':str(fps)})
def set_cache_f_count(s_id: str, ud: str, fc: str) -> None:
CACHE.set(f'{s_id}_{ud}', fc)
def bgr_to_rgb(frame: np.ndarray) -> np.ndarray:
return frame[:, :, ::-1]
def is_greyscale(frame) -> bool:
return frame.ndim == 2
def is_rgb(frame) -> bool:
return frame.ndim == 3
def img_from_greyscale(frame: np.ndarray) -> Image:
return Image.fromarray(frame).convert("L")
def img_from_bgr(frame: np.ndarray) -> Image:
return Image.fromarray(bgr_to_rgb(frame))
|
Python
| 59
| 23.932203
| 76
|
/app/api/utils.py
| 0.650611
| 0.639756
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
"""
"""
SECRET_KEY = os.environ.get('SECRET_KEY')
FLASK_CONFIG = os.environ.get('FLASK_CONFIG')
VIDEO_EXTENSION = os.environ.get('VIDEO_EXTENSION')
VIDEO_WIDTH = os.environ.get('VIDEO_WIDTH')
VIDEO_HEIGHT = os.environ.get('VIDEO_HEIGHT')
IMG_EXTENSION = os.environ.get('IMG_EXTENSION')
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
"""
"""
DEBUG = True
config = {
'development': DevelopmentConfig,
'default': DevelopmentConfig
}
|
Python
| 33
| 17.727272
| 55
|
/config.py
| 0.624595
| 0.624595
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
from werkzeug.utils import secure_filename
from functools import partial
import subprocess as sp
import time
import skvideo.io
import numpy as np
import threading
import ffmpeg
import shlex
import cv2
import re
from PIL import Image
from werkzeug.datastructures import FileStorage as FStorage
from .. import VIDEO_EXTENSION, VIDEO_WIDTH, VIDEO_HEIGHT, \
VIDEO_UPLOAD_PATH, FRAMES_UPLOAD_PATH, IMG_EXTENSION
from . import utils
from . errors import IncorrectVideoFormat, InvalidFilterParams, InvalidAPIUsage
from . decorators import exception_handler
FRAME_SIZE = VIDEO_WIDTH * VIDEO_HEIGHT * 3
FRAME_WH = (VIDEO_WIDTH, VIDEO_HEIGHT)
FFMPEG_COMMAND = 'ffmpeg -i pipe: -f rawvideo -pix_fmt bgr24 -an -sn pipe: -loglevel quiet'
ID_LEN = 32
class Frame:
def __init__(self, id=None):
self.id = id
@exception_handler(ex=IncorrectVideoFormat, type=2)
def from_bytes(self, in_bytes: bytes) -> np.ndarray:
"""
"""
frame_arr = np.frombuffer(in_bytes, np.uint8)
f_arr = frame_arr.reshape([VIDEO_HEIGHT, VIDEO_WIDTH, 3])
return utils.bgr_to_rgb(f_arr)
def f_save(self, frame: np.ndarray, frame_id: str) -> None:
upload_path = utils.create_frame_path(frame_id)
if utils.is_rgb(frame):
Image.fromarray(frame).save(upload_path)
return
utils.img_from_greyscale(frame).save(upload_path)
return
def get_by_idx(self, frame_idx):
vid = utils.create_vid_path(self.id)
cap = cv2.VideoCapture(vid)
cap.set(1, frame_idx)
_, frame = cap.read()
return frame
class VideoUploader(Frame):
def __init__(self):
id = utils.id_generator(ID_LEN)
super().__init__(id)
self.frame_count = 0
def upload_from_bytestream(self, byte_stream: FStorage):
video_f_path = utils.create_vid_path(self.id)
sk_writer = utils.create_sk_video_writer(video_f_path)
sh_command = shlex.split(FFMPEG_COMMAND)
process = sp.Popen(sh_command, stdin=sp.PIPE, stdout=sp.PIPE, bufsize=10**8)
thread = threading.Thread(target=self._writer, args=(process, byte_stream, ))
thread.start()
while True:
in_bytes = process.stdout.read(FRAME_SIZE)
if not in_bytes: break
frame = self.from_bytes(in_bytes)
self.frame_count += 1
if self.frame_count == 1: self.f_save(frame, self.id)
sk_writer.writeFrame(frame)
thread.join()
sk_writer.close()
def _writer(self, process, byte_stream):
for chunk in iter(partial(byte_stream.read, 1024), b''):
process.stdin.write(chunk)
try:
process.stdin.close()
except (BrokenPipeError):
pass
class Filter:
def __init__(self, img=None):
self.img = img
def applyCanny(self, params):
if 'thresh1' in params and 'thresh2' in params:
gs_img = self.applyGreyScale(params)
return cv2.Canny(gs_img,
int(params['thresh1']),
int(params['thresh2']))
raise InvalidFilterParams(3, 'canny')
def applyGauss(self, params):
if 'ksize_x' and 'ksize_y' in params and \
params['ksize_x'] % 2 != 0 and \
params['ksize_y'] % 2 != 0:
g_img = self.img.copy()
if np.ndim(g_img) == 3: g_img = utils.bgr_to_rgb(g_img)
return cv2.GaussianBlur(g_img,
(int(params["ksize_x"]), int(params["ksize_y"])), 0)
raise InvalidFilterParams(3, 'gauss')
def applyGreyScale(self, _):
c_img = self.img.copy()
return cv2.cvtColor(c_img, cv2.COLOR_RGB2GRAY)
def applyLaplacian(self, params):
gs_img = self.applyGreyScale(params)
return cv2.Laplacian(gs_img, cv2.CV_8U)
def run_func(self, params):
if params["type"] in self.filter_map:
func = self.filter_map[params["type"]].__get__(self, type(self))
return func(params)
raise InvalidFilterParams(2)
def _default(self, _):
return utils.bgr_to_rgb(self.img)
filter_map = {'canny': applyCanny,
'gauss': applyGauss,
'greyscale': applyGreyScale,
'laplacian': applyLaplacian,
'': _default}
class VideoDownloader(Frame, Filter):
def __init__(self, fps, vid_range=None):
Frame.__init__(self)
Filter.__init__(self)
self.fps = fps
self.vid_range = vid_range
self.curr_f_frame = None
if vid_range:
self.range_min = vid_range[0]
self.range_max = vid_range[1]
def download(self, s_id, tot_video_frames, params):
f_vid_name = f'{s_id}_{params["type"]}'
video_f_path = utils.create_vid_path(f_vid_name)
local_vid = cv2.VideoCapture(utils.create_vid_path(s_id))
vid_writer = utils.create_sk_video_writer(video_f_path, self.fps)
for i in range(tot_video_frames-1):
utils.set_cache_f_count(s_id, 'd', i)
_, curr_frame = local_vid.read()
if curr_frame is None: break
self.img = curr_frame
f_frame = self._filter_apply(i, params)
vid_writer.writeFrame(f_frame)
vid_writer.close()
return f_vid_name
def _filter_apply(self, i, params):
"""
we simply check if a range is given,
then if we get a gs-img from the filter we add three dimensions
"""
if self.vid_range:
if(i >= self.vid_range[0] and
i <= self.vid_range[1]):
f_frame = self.run_func(params)
if not utils.is_rgb(f_frame):
return np.dstack(3*[f_frame])
return f_frame
else:
return self.run_func({"type":""})
else:
return self.run_func(params)
|
Python
| 189
| 30.989418
| 91
|
/app/api/VideoProcessing.py
| 0.571948
| 0.563844
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
from flask import jsonify, request, send_from_directory
from . decorators import parameter_check
from . import api
from ..import HELP_MSG_PATH
import json
AV_EP = ["upload", "preview", "download", "stats", "filters"]
AV_FILTERS = ["canny", "greyscale", "laplacian", "gauss"]
@api.route('/help/', methods=['GET'])
@api.route('/help/<endpts>/', methods=['GET'])
@api.route('/help/filters/<filter_type>/', methods=['GET'])
@parameter_check(req_c_type='application/json')
def help(endpts=None, filter_type=None):
if endpts and endpts in AV_EP:
return jsonify(load_json_from_val(endpts)), 200
elif filter_type and filter_type in AV_FILTERS:
return jsonify(load_json_from_val(filter_type)), 200
else:
return jsonify(load_json_from_val('help')), 200
def load_json_from_val(val):
f = open(HELP_MSG_PATH+f'/{val}.json')
return json.load(f)
|
Python
| 25
| 34.16
| 61
|
/app/api/help.py
| 0.67463
| 0.664391
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
from flask import Flask
from config import config
from flask_caching import Cache
from flask_swagger_ui import get_swaggerui_blueprint
VIDEO_EXTENSION=None
VIDEO_WIDTH=None
VIDEO_HEIGHT=None
VIDEO_UPLOAD_PATH=None
FRAMES_UPLOAD_PATH=None
IMG_EXTENSION=None
HELP_MSG_PATH=None
CACHE=None
def create_app(config_name):
global VIDEO_EXTENSION
global VIDEO_WIDTH
global VIDEO_HEIGHT
global VIDEO_UPLOAD_PATH
global FRAMES_UPLOAD_PATH
global IMG_EXTENSION
global HELP_MSG_PATH
global CACHE
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
cache = Cache(config={"CACHE_TYPE": "filesystem",
"CACHE_DIR": app.root_path + '/static/cache'})
cache.init_app(app)
CACHE = cache
VIDEO_EXTENSION = app.config["VIDEO_EXTENSION"]
VIDEO_WIDTH = int(app.config["VIDEO_WIDTH"])
VIDEO_HEIGHT = int(app.config["VIDEO_HEIGHT"])
IMG_EXTENSION = app.config["IMG_EXTENSION"]
VIDEO_UPLOAD_PATH = app.root_path + '/static/uploads/videos'
FRAMES_UPLOAD_PATH = app.root_path + '/static/uploads/frames'
HELP_MSG_PATH = app.root_path + '/static/helpmessages'
#TODO: video max dimensions, video max length
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .api import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/videoApi/v1')
from .docs import swagger_ui
app.register_blueprint(swagger_ui, url_prefix="/docs")
return app
|
Python
| 66
| 22.742424
| 72
|
/app/__init__.py
| 0.696235
| 0.695597
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
import sys
import traceback
from flask import jsonify, request
from . import api
class InvalidAPIUsage(Exception):
status_code = 400
def __init__(self, message='', status_code=None):
super().__init__()
self.message = message
self.path = request.path
if status_code is None:
self.status_code = InvalidAPIUsage.status_code
def to_dict(self):
rv = {}
rv['path'] = self.path
rv['status'] = self.status_code
rv['message'] = self.message
return rv
class IncorrectVideoFormat(InvalidAPIUsage):
def __init__(self, message_id):
super().__init__()
self.message = self.msg[message_id]
msg = {1:'Incorrect video type: only RGB - Type=video/mp4 allowed',
2:'Incorrect video dimensions: only 720p supported (1280*720)'}
class InvalidFilterParams(InvalidAPIUsage):
def __init__(self, message_id, filter_name=''):
super().__init__()
self.message = self.msg(message_id, filter_name)
def msg(self, id, filter_name):
# TODO:Lukas [07252021] messges could be stored in static files as JSON
avail_msg = {1:'Incorrect filter parameters: should be {"fps": "<fps: float>", "filter_params":{"type":"<filter: str>"}} \
or for default preview, {"filter_params":{"type":""}}',
2:f'Incorrect filter parameters: filter does not exist, for more go to /api/v1/help/filters/',
3:f'Incorrect filter parameters: required parameters are missing or invalid, for more go to /api/v1/help/filters/{filter_name}/',
4:f'Incorrect download parameters: for more go to /api/v1/help/download/',
}
return avail_msg[id]
@api.errorhandler(InvalidAPIUsage)
def invalid_api_usage(e):
return jsonify(e.to_dict()), 400
|
Python
| 52
| 34.884617
| 150
|
/app/api/errors.py
| 0.61007
| 0.591859
|
ttruty/SmartWatchProcessing
|
refs/heads/master
|
#!/usr/bin/env python3
"""Module to calculate reliability of samples of raw accelerometer files."""
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import argparse
import os
def main():
"""
Application entry point responsible for parsing command line requests
"""
parser = argparse.ArgumentParser(description='Process accelerometer data.')
parser.add_argument('input_file', metavar='file', type=str, nargs='+',
help='filename for csv accelerometer data')
# parse command line arguments
args = parser.parse_args()
for file in args.input_file:
reliability_score(file)
def reliability_score(input_file):
""" calculate reliability score based on input file
:param str input_file: CSV from provided dataset
:return: New file written to csv output naming convention and new png image of plot
:rtype: void
"""
sampling_rate=20 # Sample rate (Hz) for target device data
# save file name
base_input_name = os.path.splitext(input_file)[0]
# timestamp for filename
now = datetime.datetime.now()
timestamp = str(now.strftime("%Y%m%d_%H-%M-%S"))
df = pd.read_csv(input_file) # read data
df['Time'] = pd.to_datetime(df['Time'], unit='ms') # convert timestamp to seconds
df = df.set_index('Time') #index as timestamp to count
samples_seconds = df.resample('1S').count() # count sample in each 1s time period
# reliability by second
samples_seconds['Reliability']= samples_seconds['Hour'] / sampling_rate
samples_seconds.loc[samples_seconds['Reliability'] >= 1, 'Reliability'] = 1 #if sample rate greater than one set to 1
# save csv of reliability by second
header = ["Reliability"]
samples_seconds.to_csv("reliability_csv_by_seconds_" + base_input_name + "_" + timestamp + ".csv" , columns=header)
print("Reliability for data set = " + str(samples_seconds["Reliability"].mean(axis=0)))
# set and display plot
plot_df = samples_seconds.reset_index() # add index column
plot_df.plot(x='Time', y='Reliability', rot=45, style=".", markersize=5)
# save png image
plt.savefig("reliability_plot_" + base_input_name + "_" + timestamp + ".png", bbox_inches='tight')
#show plot
plt.title("Reliability Score by Second")
plt.show()
if __name__ == '__main__':
main() # Standard boilerplate to call the main() function to begin the program.
|
Python
| 68
| 35.235294
| 121
|
/ReliabilityScore.py
| 0.665314
| 0.660041
|
ttruty/SmartWatchProcessing
|
refs/heads/master
|
#!/usr/bin/env python3
"""Module to calculate energy and non-wear time of accelerometer data."""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
import argparse
def main():
"""
Application entry point responsible for parsing command line requests
"""
parser = argparse.ArgumentParser(description='Process Non-wear-time accelerometer data.')
parser.add_argument('input_file', metavar='file', type=str, nargs='+',
help='filename for csv accelerometer data')
# parse command line arguments
args = parser.parse_args()
for file in args.input_file:
energy_calculations(file)
def energy_calculations(input_file):
"""calculate energy and non-wear time stamps based on input file
:param str input_file: CSV from provided dataset
:return: New file written to csv output naming convention and new png image of plot
:rtype: void
"""
df = pd.read_csv(input_file) # read data
df['Time'] = pd.to_datetime(df['Time'], unit='ms') # convert timestamp to datetime object
# save file name
base_input_name = os.path.splitext(input_file)[0]
# timestamp for filename
now = datetime.datetime.now()
timestamp = str(now.strftime("%Y%m%d_%H-%M-%S"))
# Simple smoothing signal with rolling window
# use rolling window of 10 samples ~ .5 second
df['accX'] = df['accX'].rolling(window=10, min_periods=1).mean() # smoothing
df['accY'] = df['accY'].rolling(window=10, min_periods=1).mean() # smoothing
df['accZ'] = df['accZ'].rolling(window=10, min_periods=1).mean() # smoothing
#rolling std
df['stdX'] = df['accX'].rolling(300).std()*1000 # rolling std of 15 seconds is 300 samples
df['stdY'] = df['accY'].rolling(300).std()*1000
df['stdZ'] = df['accZ'].rolling(300).std()*1000 # 1000 X to convert g to mg
# Calculate non-wear time using std if 2 of 3 axes is less than target, point can be marked as non-wear point
target_std=13 # set target std to check against
df["Non_Wear"] = (df['stdX'] < target_std) & (df['stdY'] < target_std) | (df['stdX'] < target_std) & (df['stdZ'] < target_std) | (df['stdY'] < target_std) & (df['stdZ'] < target_std)
# Vector Mag to calc non-worn time
df["Energy"]= np.sqrt((df['accX']**2) + (df['accY']**2) + (df['accZ']**2)) # energy calculation
# plot the energy expenditure
ax = df.plot(x="Time", y='Energy', rot=45, markersize=5)
ax = plt.gca()
# run gridlines for each hour bar
ax.get_xaxis().grid(True, which='major', color='grey', alpha=0.5)
ax.get_xaxis().grid(True, which='minor', color='grey', alpha=0.25)
# mask the blocks for wear and non_wear time
df['block'] = (df['Non_Wear'].astype(bool).shift() != df['Non_Wear'].astype(bool)).cumsum() # checks if next index label is different from previous
df.assign(output=df.groupby(['block']).Time.apply(lambda x:x - x.iloc[0])) # calculate the time of each sample in blocks
# times of blocks
start_time_df = df.groupby(['block']).first() # start times of each blocked segment
stop_time_df = df.groupby(['block']).last() # stop times for each blocked segment
# lists of times stamps
non_wear_starts_list=start_time_df[start_time_df['Non_Wear'] == True]['Time'].tolist()
non_wear_stops_list=stop_time_df[stop_time_df['Non_Wear'] == True]['Time'].tolist()
# new df from all non-wear periods
data = { "Start": non_wear_starts_list, "Stop": non_wear_stops_list}
df_non_wear=pd.DataFrame(data) # new df for non-wear start/stop times
df_non_wear['delta'] = [pd.Timedelta(x) for x in (df_non_wear["Stop"]) - pd.to_datetime(df_non_wear["Start"])]
# check if non-wear is longer than target
valid_no_wear = df_non_wear["delta"] > datetime.timedelta(minutes=5) # greater than 5 minutes
no_wear_timestamps=df_non_wear[valid_no_wear]
# list of valid non-wear starts and stops
non_wear_start = no_wear_timestamps["Start"]
non_wear_stop = no_wear_timestamps["Stop"]
# calculate total capture time
capture_time_df = df[['Time']].copy()
# capture_time_df = capture_time_df.set_index('Time')
# plot non-wear periods
for non_start, non_stop in zip(non_wear_start, non_wear_stop):
capture_time_df['Non_Wear'] = (capture_time_df['Time'] > non_start ) & (capture_time_df['Time'] < non_stop )
ax.axvspan(non_start, non_stop, alpha=0.5, color='red')
# blocking validated wear and non wear time
capture_time_df['block'] = (capture_time_df['Non_Wear'].astype(bool).shift() != capture_time_df['Non_Wear'].astype(bool)).cumsum() # checks if next index label is different from previous
capture_time_df.assign(output=capture_time_df.groupby(['block']).Time.apply(lambda x: x - x.iloc[0])) # calculate the time of each sample in blocks
# times of blocks
start_time_df = capture_time_df.groupby(['block']).first() # start times of each blocked segment
stop_time_df = capture_time_df.groupby(['block']).last() # stop times for each blocked segment
start_time_df.rename(columns={'Time': 'StartTime'}, inplace=True)
stop_time_df.rename(columns={'Time': 'StopTime'}, inplace=True)
# combine start and stop dataframes
time_marks = pd.concat([start_time_df, stop_time_df], axis=1)
print("Capture Segment Periods:")
print(time_marks)
#save csv of individual time periods (worn and non-worn timestamps
time_marks.to_csv("wear_periods_csv_" + base_input_name + "_" + timestamp + ".csv")
# save png image
plt.savefig("non_wear_time_plot_" + base_input_name + "_" + timestamp + ".png", bbox_inches='tight')
#show plot
plt.title("Non-wear Time")
plt.show()
if __name__ == '__main__':
main() # Standard boilerplate to call the main() function to begin the program.
|
Python
| 128
| 44.929688
| 191
|
/EnergyCalculation.py
| 0.65329
| 0.642068
|
ttruty/SmartWatchProcessing
|
refs/heads/master
|
#!/usr/bin/env python3
"""Combine all hours of data into one CSV"""
import pandas as pd
#output combined CSV file
def concat_file(file_list, output_file):
"""concat .csv file according to list of files
:param str file_list: List of CSV from provided dataset
:param str output_file: Output filename to save the concat CSV of files
:return: New file written to <output_file>
:rtype: void
"""
combined_csv = pd.concat([pd.read_csv(f) for f in file_list ]) #combine all files in the list
combined_csv.to_csv( output_file, index=False, encoding='utf-8-sig') #export to csv with uft-8 encoding
# hold paths for each hour
acc_file_locations=[]
gyro_file_locations=[]
# loop to add path hours to list
for hour in range (12,18):
acc_file_locations.append("Data-raw/Accelerometer/2019-11-12/" + str(hour) + "/accel_data.csv")
gyro_file_locations.append("Data-raw/Gyroscope/2019-11-12/" + str(hour) + "/accel_data.csv")
concat_file(acc_file_locations, 'acc_data.csv')
concat_file(gyro_file_locations, 'gyro_data.csv')
|
Python
| 26
| 40.076923
| 107
|
/ConcatData.py
| 0.69447
| 0.672915
|
CaptainIllidan/yolo
|
refs/heads/master
|
import numpy as np
import cv2
import pyyolo
cap = cv2.VideoCapture('gun4_2.mp4')
meta_filepath = "/home/unknown/yolo/darknet.data"
cfg_filepath = "/home/unknown/yolo/darknet-yolov3.cfg"
weights_filepath = "/home/unknown/yolo/yolov3.weights"
meta = pyyolo.load_meta(meta_filepath)
net = pyyolo.load_net(cfg_filepath, weights_filepath, False)
while(cap.isOpened()):
ret, frame = cap.read()
if not ret:
break
yolo_img = pyyolo.array_to_image(frame)
res = pyyolo.detect(net, meta, yolo_img)
for r in res:
cv2.rectangle(frame, r.bbox.get_point(pyyolo.BBox.Location.TOP_LEFT, is_int=True),
r.bbox.get_point(pyyolo.BBox.Location.BOTTOM_RIGHT, is_int=True), (0, 255, 0), 2)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
Python
| 32
| 25.8125
| 103
|
/detect.py
| 0.664336
| 0.642191
|
androiddevnotesforks/github-scraper
|
refs/heads/master
|
"""Scrape GitHub data for organizational accounts."""
import argparse
import asyncio
import csv
import json
import sys
import time
from pathlib import Path
from typing import Any, Dict, List, Tuple
import aiohttp
import networkx as nx
class GithubScraper():
"""Scrape information about organizational Github accounts.
Use Github API key and user name to make requests to Github API.
Create spreadsheets named after data type and date.
Attributes:
orgs (List[str]): List of organizational Github accounts to scrape
session (aiohttp.ClientSession): Session using Github user name and API token
"""
def __init__(self, organizations: List[str],
session: aiohttp.ClientSession) -> None:
"""Instantiate object."""
self.orgs = organizations
self.session = session
# Members and repositories of listed organizations. Instantiated as empty dict
# and only loaded if user selects operation that needs this list.
# Saves API calls.
self.members: Dict[str, List[str]] = {}
self.repos: List[Dict[str, Any]] = []
# Directory to store scraped data with timestamp
self.data_directory: Path = Path(
Path.cwd(), 'data', time.strftime('%Y-%m-%d_%H-%M-%S')
)
Path(self.data_directory).mkdir()
async def get_members(self) -> Dict[str, List[str]]:
"""Get list of members of specified orgs.
Returns:
Dict[str, List[str]]: Keys are orgs, values list of members
"""
print("Collecting members of specified organizations...")
members: Dict[str, List[str]] = {}
tasks: List[asyncio.Task[Any]] = []
for org in self.orgs:
url = f"https://api.github.com/orgs/{org}/members"
tasks.append(asyncio.create_task(self.call_api(url, organization=org)))
json_org_members: List[Dict[str, Any]] = await self.load_json(tasks)
# Extract names of org members from JSON data
for org in self.orgs:
members[org] = []
for member in json_org_members:
members[member["organization"]].append(member['login'])
return members
async def load_json(self, tasks: List[asyncio.Task[Any]]) -> List[Dict[str, Any]]:
"""Execute tasks with asyncio.wait() to make API calls.
Args:
tasks (List[asyncio.Task[Any]]): List of awaitable tasks to execute
Returns:
List[Dict[str, Any]]: Full JSON returned by API
"""
full_json: List[Dict[str, Any]] = []
done, pending = await asyncio.wait(
tasks, return_when="ALL_COMPLETED"
)
for task in done:
try:
full_json.extend(await task)
except aiohttp.ContentTypeError:
# If repository is empty, pass
pass
return full_json
async def call_api(self, url: str, **added_fields: str) -> List[Dict[str, Any]]:
"""Load json file using requests.
Makes API calls and returns JSON results.
Args:
url (str): Github API URL to load as JSON
**added_fields (str): Additional information that will be added to each item
in the JSON data
Returns:
List[Dict[str, Any]]: Github URL loaded as JSON
"""
page: int = 1
json_data: List[Dict[str, Any]] = []
# Requesting user info doesn't support pagination and returns dict, not list
if url.split("/")[-2] == 'users':
async with self.session.get(f"{url}?per_page=100") as resp:
member_json: Dict[str, Any] = await resp.json()
for key, value in added_fields.items():
member_json[key] = value
json_data.append(member_json)
return json_data
# Other API calls return lists and should paginate
while True:
async with self.session.get(f"{url}?per_page=100&page={str(page)}") as resp:
json_page: List[Dict[str, Any]] = await resp.json()
if json_page == []:
break
for item in json_page:
for key, value in added_fields.items():
item[key] = value
json_data.extend(json_page)
page += 1
return json_data
def generate_csv(self, file_name: str, json_list: List[Dict[str, Any]],
columns_list: List) -> None:
"""Write CSV file.
Args:
file_name (str): Name of the CSV file
json_list (List[Dict[str, Any]]): JSON data to turn into CSV
columns_list (List): List of columns that represent relevant fields
in the JSON data
"""
with open(
Path(self.data_directory, file_name),
'a+',
encoding='utf-8'
) as file:
csv_file = csv.DictWriter(
file,
fieldnames=columns_list,
extrasaction="ignore"
)
csv_file.writeheader()
for item in json_list:
csv_file.writerow(item)
print(
f"- file saved as {Path('data', self.data_directory.name, file_name)}"
)
async def get_org_repos(self) -> List[Dict[str, Any]]:
"""Create list of the organizations' repositories."""
print("Scraping repositories")
tasks: List[asyncio.Task[Any]] = []
for org in self.orgs:
url = f"https://api.github.com/orgs/{org}/repos"
tasks.append(asyncio.create_task(self.call_api(url, organization=org)))
return await self.load_json(tasks)
async def create_org_repo_csv(self) -> None:
"""Write a CSV file with information about orgs' repositories."""
# Create list of items that should appear as columns in the CSV
table_columns: List[str] = [
'organization',
'name',
'full_name',
'stargazers_count',
'language',
'created_at',
'updated_at',
'homepage',
'fork',
'description'
]
self.generate_csv('org_repositories.csv', self.repos, table_columns)
async def get_repo_contributors(self) -> None:
"""Create list of contributors to the organizations' repositories."""
print("Scraping contributors")
json_contributors_all = []
graph = nx.DiGraph()
table_columns: List[str] = [
'organization',
'repository',
'login',
'contributions',
'html_url',
'url'
]
tasks: List[asyncio.Task[Any]] = []
for org in self.orgs:
for repo in self.repos:
url = f"https://api.github.com/repos/{org}/{repo['name']}/contributors"
tasks.append(
asyncio.create_task(
self.call_api(url, organization=org, repository=repo['name'])
)
)
json_contributors_all = await self.load_json(tasks)
self.generate_csv('contributor_list.csv', json_contributors_all, table_columns)
for contributor in json_contributors_all:
graph.add_node(
contributor['repository'], organization=contributor['organization']
)
graph.add_edge(
contributor['login'],
contributor['repository'],
organization=contributor['organization']
)
nx.write_gexf(
graph,
Path(self.data_directory, 'contributor_network.gexf')
)
print(
"- file saved as "
f"{Path('data', self.data_directory.name, 'contributor_network.gexf')}"
)
async def get_members_repos(self) -> None:
"""Create list of all the members of an organization and their repositories."""
print("Getting repositories of all members.")
json_members_repos: List[Dict[str, Any]] = []
table_columns: List[str] = [
'organization',
'user',
'full_name',
'fork',
'stargazers_count',
'forks_count',
'language',
'description'
]
tasks: List[asyncio.Task[Any]] = []
for org in self.members:
for member in self.members[org]:
url = f"https://api.github.com/users/{member}/repos"
tasks.append(
asyncio.create_task(
self.call_api(url, organization=org, user=member)
)
)
json_members_repos = await self.load_json(tasks)
self.generate_csv('members_repositories.csv',
json_members_repos, table_columns)
async def get_members_info(self) -> None:
"""Gather information about the organizations' members."""
print("Getting user information of all members.")
table_columns: List[str] = [
'organization',
'login',
'name',
'url',
'type',
'company',
'blog',
'location'
]
tasks: List[asyncio.Task[Any]] = []
for org in self.orgs:
for member in self.members[org]:
url = f"https://api.github.com/users/{member}"
tasks.append(asyncio.create_task(
self.call_api(url, organization=org))
)
json_members_info: List[Dict[str, Any]] = await self.load_json(tasks)
self.generate_csv('members_info.csv', json_members_info, table_columns)
async def get_starred_repos(self) -> None:
"""Create list of all the repositories starred by organizations' members."""
print("Getting repositories starred by members.")
json_starred_repos_all: List[Dict[str, Any]] = []
table_columns: List[str] = [
'organization',
'user',
'full_name',
'html_url',
'language',
'description'
]
tasks: List[asyncio.Task[Any]] = []
for org in self.members:
for member in self.members[org]:
url = f"https://api.github.com/users/{member}/starred"
tasks.append(asyncio.create_task(
self.call_api(url, organization=org, user=member))
)
json_starred_repos_all = await self.load_json(tasks)
self.generate_csv('starred_repositories.csv',
json_starred_repos_all, table_columns)
async def generate_follower_network(self) -> None:
"""Create full or narrow follower networks of organizations' members.
Get every user following the members of organizations (followers)
and the users they are following themselves (following). Then generate two
directed graphs with NetworkX. Only includes members of specified organizations
if in narrow follower network.
"""
print('Generating follower networks')
# Create graph dict and add self.members as nodes
graph_full = nx.DiGraph()
graph_narrow = nx.DiGraph()
for org in self.orgs:
for member in self.members[org]:
graph_full.add_node(member, organization=org)
graph_narrow.add_node(member, organization=org)
# Get followers and following for each member and build graph
tasks_followers: List[asyncio.Task[Any]] = []
tasks_following: List[asyncio.Task[Any]] = []
for org in self.members:
for member in self.members[org]:
url_followers = f"https://api.github.com/users/{member}/followers"
tasks_followers.append(
asyncio.create_task(
self.call_api(url_followers, follows=member, original_org=org))
)
url_following = f"https://api.github.com/users/{member}/following"
tasks_following.append(
asyncio.create_task(
self.call_api(
url_following, followed_by=member, original_org=org)
)
)
json_followers = await self.load_json(tasks_followers)
json_following = await self.load_json(tasks_following)
# Build full and narrow graphs
for follower in json_followers:
graph_full.add_edge(
follower['login'],
follower['follows'],
organization=follower['original_org']
)
if follower['login'] in self.members[follower['original_org']]:
graph_narrow.add_edge(
follower['login'],
follower['follows'],
organization=follower['original_org'])
for following in json_following:
graph_full.add_edge(
following['followed_by'],
following['login'],
organization=following['original_org'])
if following['login'] in self.members[following['original_org']]:
graph_narrow.add_edge(
following['followed_by'],
following['login'],
organization=following['original_org'])
# Write graphs and save files
nx.write_gexf(
graph_full,
Path(self.data_directory, "full-follower-network.gexf"))
nx.write_gexf(
graph_narrow,
Path(self.data_directory, "narrow-follower-network.gexf"))
print(
f"- files saved in {Path('data', self.data_directory.name)} as "
"full-follower-network.gexf and narrow-follower-network.gexf"
)
async def generate_memberships_network(self) -> None:
"""Take all the members of the organizations and generate a directed graph.
This shows creates a network with the organizational memberships.
"""
print("Generating network of memberships.")
graph = nx.DiGraph()
tasks: List[asyncio.Task[Any]] = []
for org in self.members:
for member in self.members[org]:
url = f"https://api.github.com/users/{member}/orgs"
tasks.append(asyncio.create_task(
self.call_api(
url, organization=org, scraped_org_member=member))
)
json_org_memberships = await self.load_json(tasks)
for membership in json_org_memberships:
graph.add_node(membership['scraped_org_member'], node_type='user')
graph.add_edge(
membership['scraped_org_member'],
membership['login'], # name of organization user is member of
node_type='organization')
nx.write_gexf(
graph,
Path(self.data_directory, 'membership_network.gexf')
)
print(
"- file saved as "
f"{Path('data', self.data_directory.name, 'membership_network.gexf')}"
)
def read_config() -> Tuple[str, str]:
"""Read config file.
Returns:
Tuple[str, str]: Github user name and API token
Raises:
KeyError: If config file is empty
"""
try:
with open(Path(Path.cwd(), 'config.json'), 'r', encoding='utf-8') as file:
config = json.load(file)
user: str = config['user_name']
api_token: str = config['api_token']
if user == "" or api_token == "":
raise KeyError
else:
return user, api_token
except (FileNotFoundError, KeyError):
sys.exit("Failed to read Github user name and/or API token. "
"Please add them to the config.json file.")
def read_organizations() -> List[str]:
"""Read list of organizations from file.
Returns:
List[str]: List of names of organizational Github accounts
"""
orgs: List[str] = []
with open(Path(Path.cwd(), 'organizations.csv'), 'r', encoding="utf-8") as file:
reader = csv.DictReader(file)
for row in reader:
orgs.append(row['github_org_name'])
if not orgs:
sys.exit("No organizations to scrape found in organizations.csv. "
"Please add the names of the organizations you want to scrape "
"in the column 'github_org_name' (one name per row).")
return orgs
def parse_args() -> Dict[str, bool]:
"""Parse arguments.
We use the 'dest' value to map args with functions/methods. This way, we
can use getattr(object, dest)() and avoid long if...then list in main().
Returns:
Dict[str, bool]: Result of vars(parse_args())
"""
argparser = argparse.ArgumentParser(
description="Scrape organizational accounts on Github."
)
argparser.add_argument(
"--all",
"-a",
action="store_true",
help="scrape all the information listed below"
)
argparser.add_argument(
"--repos",
"-r",
action='store_true',
dest="create_org_repo_csv",
help="scrape the organizations' repositories (CSV)"
)
argparser.add_argument(
"--contributors",
"-c",
action="store_true",
dest="get_repo_contributors",
help="scrape contributors of the organizations' repositories (CSV and GEXF)"
)
argparser.add_argument(
"--member_repos",
"-mr",
action="store_true",
dest="get_members_repos",
help="scrape all repositories owned by the members of the organizations (CSV)"
)
argparser.add_argument(
"--member_infos",
"-mi",
action="store_true",
dest="get_members_info",
help="scrape information about each member of the organizations (CSV)"
)
argparser.add_argument(
"--starred",
"-s",
action="store_true",
dest="get_starred_repos",
help="scrape all repositories starred by the members of the organizations (CSV)"
)
argparser.add_argument(
"--followers",
"-f",
action="store_true",
dest="generate_follower_network",
help="generate a follower network. Creates full and narrow network graph, the "
"latter only shows how scraped organizations are networked among each "
"other (two GEXF files)"
)
argparser.add_argument(
"--memberships",
"-m",
action="store_true",
dest="generate_memberships_network",
help="scrape all organizational memberships of org members (GEXF)"
)
args: Dict[str, bool] = vars(argparser.parse_args())
return args
async def main() -> None:
"""Set up GithubScraper object."""
args: Dict[str, bool] = parse_args()
if not any(args.values()):
sys.exit("You need to provide at least one argument. "
"For usage, call: github_scraper -h")
user, api_token = read_config()
organizations = read_organizations()
# To avoid unnecessary API calls, only get org members and repos if needed
require_members = ['get_members_repos', 'get_members_info', 'get_starred_repos',
'generate_follower_network', 'generate_memberships_network']
require_repos = ['create_org_repo_csv', 'get_repo_contributors']
# Start aiohttp session
auth = aiohttp.BasicAuth(user, api_token)
async with aiohttp.ClientSession(auth=auth) as session:
github_scraper = GithubScraper(organizations, session)
# If --all was provided, simply run everything
if args['all']:
github_scraper.members = await github_scraper.get_members()
github_scraper.repos = await github_scraper.get_org_repos()
for arg in args:
if arg != 'all':
await getattr(github_scraper, arg)()
else:
# Check args provided, get members/repos if necessary, call related methods
called_args = [arg for arg, value in args.items() if value]
if any(arg for arg in called_args if arg in require_members):
github_scraper.members = await github_scraper.get_members()
if any(arg for arg in called_args if arg in require_repos):
github_scraper.repos = await github_scraper.get_org_repos()
for arg in called_args:
await getattr(github_scraper, arg)()
if __name__ == "__main__":
if sys.platform == 'win32':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
asyncio.run(main())
|
Python
| 541
| 37.693161
| 88
|
/github_scraper.py
| 0.559882
| 0.559213
|
maxiantian/DownloadTaobaoSearchPicture
|
refs/heads/master
|
import urllib.request
import re
import ssl
#全局取消证书验证
ssl._create_default_https_context = ssl._create_unverified_context
#设置淘宝搜索的关键词
keyword = urllib.request.quote("毛衣")
#将爬虫伪装成火狐浏览器
headers=("User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:57.0) Gecko/20100101 Firefox/57.0")
#创建一个opener对象
opener = urllib.request.build_opener()
#为opener设置header
opener.addheaders = [headers]
#将opener设为全局
urllib.request.install_opener(opener)
#为了方便测试,仅下载两页的数据
for i in range(0,2):
#把关键词添加到URL中并请求数据
url = "https://s.taobao.com/search?q="+ keyword +"&imgfile=&js=1&stats_click=search_radio_all%3A1&initiative_id=staobaoz_20180121&ie=utf8&bcoffset=4&ntoffset=4&p4ppushleft=1%2C48&s="+str(44*i)
data = urllib.request.urlopen(url).read().decode("utf-8","ignore")
#测试是否请求到数据
print(len(data))
#使用正则表达式进行匹配
pat = '"pic_url":"//(.*?)"'
imgs = re.compile(pat).findall(data)
for j in range(0,len(imgs)):
#用匹配到的网址建立完整的URL,并直接下载到本地
thisurl = "http://" + imgs[j]
file = "/Users/Rocky1/Desktop/imgs/" + str(i) + "-" + str(j) + ".jpg"
urllib.request.urlretrieve(thisurl,file)
|
Python
| 42
| 26.166666
| 196
|
/DownloadTaobaoSearchPicture.py
| 0.675934
| 0.635969
|
vishnuyar/DS-Unit-3-Sprint-2-SQL-and-Databases
|
refs/heads/master
|
import pandas as pd
import sqlite3
#Import buddymove dataset by reading csv
buddymove_df = pd.read_csv('buddymove_holidayiq.csv')
#Printing the shape of the dataset and first five rows
print(buddymove_df.shape)
print(buddymove_df.head())
#Printing the number of null values in the dataset
print("The number of null values in the dataset are:\n",buddymove_df.isna().sum())
#Opening a sqlite connection and creating a database
database_name = 'buddymove_holidayiq.sqlite3'
conn = sqlite3.connect(database_name)
#Dumping the dataframe to the database
buddymove_df.to_sql('buddymove_tbl',con=conn,if_exists='replace')
#Checking for the first five rows to ensure the database dump was complete
query = 'SELECT * FROM buddymove_tbl LIMIT 5;'
#Query for number of rows in database
query_rows = 'SELECT COUNT("User Id") FROM buddymove_tbl;'
try:
answer = conn.execute(query)
for row in answer:
print(row)
except:
pass
#Getting the number of rows in the table
try:
answer = conn.execute(query_rows)
for row in answer:
print(f'Number of rows in the table buddymove_tbl is :{row[0]}')
except:
pass
#Number of users have rated atleast 100 in nature and atleast 100 in shopping category
query_users = 'SELECT COUNT("User Id") FROM buddymove_tbl WHERE\
"Nature" >=100 AND "Shopping" >=100;'
try:
answer = conn.execute(query_users)
for row in answer:
print(f'Number of users have rated atleast 100 in Nature and Shopping are :{row[0]}')
except:
pass
#Query for getting average rating for all categories
query_avg_rating = 'SELECT AVG("Sports"),AVG("Religious"),AVG("Nature"),AVG("Theatre"),AVG("Shopping"),AVG("Picnic") FROM buddymove_tbl;'
try:
answer = conn.execute(query_avg_rating)
for row in answer:
print(f'Avg rating for Sports:{row[0]:.2f}')
print(f'Avg rating for Religious:{row[1]:.2f}')
print(f'Avg rating for Nature:{row[2]:.2f}')
print(f'Avg rating for Theatre:{row[3]:.2f}')
print(f'Avg rating for Shopping:{row[4]:.2f}')
print(f'Avg rating for Picnic:{row[5]:.2f}')
except:
pass
#committing the changes and closing the connection
conn.commit()
conn.close()
|
Python
| 60
| 35.633335
| 137
|
/module1-introduction-to-sql/buddymove_holidayiq.py
| 0.702002
| 0.686988
|
vishnuyar/DS-Unit-3-Sprint-2-SQL-and-Databases
|
refs/heads/master
|
import pymongo
client = pymongo.MongoClient("mongodb+srv://mongoadmin:2BlYV2t3X4jws3XR@cluster0-uosnx.mongodb.net/test?retryWrites=true&w=majority")
db = client.test
|
Python
| 5
| 25.4
| 97
|
/module3-nosql-and-document-oriented-databases/mongoproject.py
| 0.772727
| 0.772727
|
vishnuyar/DS-Unit-3-Sprint-2-SQL-and-Databases
|
refs/heads/master
|
import sqlite3
"""Database operations for the NorthWind data """
def data_operations(conn,query):
""" Function which performs database operations and returns results """
try:
#creating a cursor for connection
cursor = conn.cursor()
#executing the query on the database and get the results
results = cursor.execute(query).fetchall()
except:
return "error in data operations"
#If No error is enountered, cursor is closed
cursor.close()
#Committing the data operatios
conn.commit()
#On successful completion return the results
return results
def top_10_expensive_items(connect):
"""
Function to find the Top 10 expensive items per unit price
Expected Output
The 10 Most expensive Items in the database are:
Côte de Blaye
Thüringer Rostbratwurst
Mishi Kobe Niku
Sir Rodney's Marmalade
Carnarvon Tigers
Raclette Courdavault
Manjimup Dried Apples
Tarte au sucre
Ipoh Coffee
Rössle Sauerkraut
"""
#query for ten most expensive items in the database
expensive_query = """ SELECT ProductName
FROM Product
ORDER BY UnitPrice DESC LIMIT 10 """
result = data_operations(connect,expensive_query)
print("The 10 Most expensive Items in the database are:\n")
for row in result:
print(f'{row[0]}')
def avg_age_employee(connect):
"""
Function to find the average age of the employee at the time of hire
Expected Output
The Averag age of the employee at the time of hire is:37.22
"""
#Query for the Avg age of the employee at the time of hiring
avg_age_employee = """ SELECT AVG(HireDate - BirthDate) FROM Employee; """
result = data_operations(connect,avg_age_employee)
print("\n")
for row in result:
print(f'The Averag age of the employee at the time of hire is:{row[0]:0.2f}')
def avg_employee_age_bycity(connect):
"""
Function to find the average age of employees at the time of hire by city
Expected Output
The Averag age of the employee at the time of hire by City is :
Kirkland : 29.0
London : 32.5
Redmond : 56.0
Seattle : 40.0
Tacoma : 40.0
"""
#Query for avg age of the employee by city
avg_employee_age_bycity = """ SELECT city, AVG(HireDate - BirthDate)
FROM Employee GROUP BY City; """
result = data_operations(connect,avg_employee_age_bycity)
print("\nThe Averag age of the employee at the time of hire by City is :")
for row in result:
print(f'{row[0]} : {row[1]}')
def expensive_items_supplier(connect):
"""
Function to find the Top 10 expensive items per unit price and the Name of the Supplier
Expected Output
The 10 Most expensive Items in the database Supplier Names followed by Item Name:
Aux joyeux ecclésiastiques : Côte de Blaye
Plutzer Lebensmittelgroßmärkte AG : Thüringer Rostbratwurst
Tokyo Traders : Mishi Kobe Niku
Specialty Biscuits, Ltd. : Sir Rodney's Marmalade
Pavlova, Ltd. : Carnarvon Tigers
Gai pâturage : Raclette Courdavault
G'day, Mate : Manjimup Dried Apples
Forêts d'érables : Tarte au sucre
Leka Trading : Ipoh Coffee
Plutzer Lebensmittelgroßmärkte AG : Rössle Sauerkraut
"""
#Query for Top 10 expensive items along with Suppliet names
expensive_query_supplier = """ SELECT CompanyName,ProductName
FROM Product, Supplier
WHERE Supplier.Id = Product.SupplierId
ORDER BY UnitPrice DESC LIMIT 10"""
result = data_operations(connect,expensive_query_supplier)
print("\nThe 10 Most expensive Items in the database with Supplier Names followed by Item Name:\n")
for row in result:
print(f'{row[0]} : {row[1]}')
def largest_category(connect):
"""
Function to find the Top category with largest unique products
Expected Output
The Category with largest unique products is :Confections
"""
#Query for the name of category with largest number of unique products
largest_category = """ SELECT CategoryName FROM Category WHERE id = (
SELECT CategoryId FROM Product
GROUP BY CategoryId
ORDER BY COUNT(CategoryId) DESC LIMIT 1) """
result = data_operations(connect,largest_category)
print("\n")
for row in result:
print(f'The Category with largest unique products is :{row[0]}')
def most_territories_employee(connect):
"""
Function to find the Top 10 expensive items per unit price
Expected Output
The Employee with most territories is :Robert King
"""
#Query for name of the Employee who has the most territories
most_territories_employee = """ SELECT FirstName,LastName FROM Employee WHERE id = (
SELECT EmployeeId FROM EmployeeTerritory
GROUP BY EmployeeId
ORDER BY COUNT(TerritoryId) DESC LIMIT 1
) """
result = data_operations(connect,most_territories_employee)
print("\n")
for row in result:
print(f'The Employee with most territories is :{row[0]} {row[1]}')
#Creating a try catch block to safely operation on database
try:
#Creating a connection to the Northwind database
connect = sqlite3.connect('northwind_small.sqlite3')
top_10_expensive_items(connect)
avg_age_employee(connect)
avg_employee_age_bycity(connect)
expensive_items_supplier(connect)
largest_category(connect)
most_territories_employee(connect)
except :
pass
#closing the connection to the database
connect.close()
|
Python
| 159
| 35.00629
| 103
|
/sprint/northwind.py
| 0.632774
| 0.622467
|
vishnuyar/DS-Unit-3-Sprint-2-SQL-and-Databases
|
refs/heads/master
|
import sqlite3
#Creating a connection to the rpg database
conn = sqlite3.connect('rpg_db.sqlite3')
#creating a cursor for rpg database connection
curs = conn.cursor()
#Query for number of characters in the game
query = 'SELECT COUNT(name) FROM charactercreator_character;'
#Executing the query
answer = curs.execute(query)
for row in answer:
print(f'They are a total of {row[0]} characters in the game')
no_of_characters = row[0]
#Different classes of character by table name
character_class = ['mage','thief','cleric','fighter']
for subclass in character_class:
query = f'SELECT COUNT(character_ptr_id) FROM charactercreator_{subclass}'
answer = curs.execute(query)
for row in answer:
print(f'They are {row[0]} characters of the class {subclass}')
#Total items in the armoury
query = 'SELECT COUNT(name) FROM armory_item;'
answer = curs.execute(query)
for row in answer:
print(f'They are a total of {row[0]} items in the armoury')
no_of_items = row[0]
#Number of weapons in the items
query = f'SELECT COUNT(item_ptr_id) FROM armory_weapon;'
answer = curs.execute(query)
for row in answer:
print(f'They are a total of {row[0]} weapons in the items')
no_of_weapons = row[0]
#Number of non weapons in the items
print(f'They are a total of {(no_of_items-no_of_weapons)} weapons in the items')
#No of items for Top 20 chararcters by name
query = 'select count(item_id) AS no_of_items,name\
from charactercreator_character_inventory,charactercreator_character\
where charactercreator_character.character_id = charactercreator_character_inventory.character_id\
GROUP BY charactercreator_character_inventory.character_id ORDER BY name ASC LIMIT 20;'
answer = curs.execute(query).fetchall()
print('The Number of items of the top 20 characters by name are')
for row in answer:
print(f'No. of Items:{row[0]}, Name:{row[1]}')
#No of weapons for Top 20 chararcters by name
query = 'select count(armory_weapon.item_ptr_id) AS no_of_items,name\
from charactercreator_character_inventory,charactercreator_character,armory_weapon\
where charactercreator_character.character_id = charactercreator_character_inventory.character_id\
AND charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id\
GROUP BY charactercreator_character_inventory.character_id ORDER BY name ASC LIMIT 20;'
answer = curs.execute(query).fetchall()
print('The number of weapons of the top 20 characters by name are')
for row in answer:
print(f'No. of Items:{row[0]}, Name:{row[1]}')
#Total Number of items held by characters
query = 'select count(id) from charactercreator_character_inventory'
answer = curs.execute(query)
for row in answer:
total_no_of_items = row[0]
#Average number of items for each character
print(f'The average number of items per character is {total_no_of_items/no_of_characters:0.2f}')
#closing the cursor and connection
curs.close()
conn.close()
|
Python
| 67
| 42.985073
| 106
|
/module1-introduction-to-sql/rpg_queries.py
| 0.742702
| 0.732858
|
vishnuyar/DS-Unit-3-Sprint-2-SQL-and-Databases
|
refs/heads/master
|
import psycopg2
from sqlalchemy import create_engine
import pandas as pd
#Reading titanic file to upload into pandas
titanic = pd.read_csv('titanic.csv')
#Print the shape of titanic and print the top 5 rows
print(titanic.shape)
print(titanic.head())
#Pring the columns of the titanics dataframe
print(titanic.columns)
from sqlalchemy import create_engine
dbname = "rgfajssc"
username = "rgfajssc"
pass_word = "SECRET"
host = "john.db.elephantsql.com"
#creating creating engine inserting the titanic dataframe to postgres
try:
engine = create_engine(f'postgresql://{username}:{pass_word}@{host}/{username}')
titanic.to_sql('titanic', engine)
except:
pass
# pg_connect = psycopg2.connect(dbname=dbname, user=username,
# password=pass_word, host=host)
# cur = pg_connect.cursor()
|
Python
| 28
| 29.035715
| 84
|
/module2-sql-for-analysis/insert_titanic.py
| 0.725
| 0.721429
|
vishnuyar/DS-Unit-3-Sprint-2-SQL-and-Databases
|
refs/heads/master
|
import psycopg2
import sqlite3
dbname = "rgfajssc"
username = "rgfajssc"
pass_word = "U0W4kG-Um-Pug_wj8ec9OnbkQ70deuZR"
host = "john.db.elephantsql.com"
pg_connect = psycopg2.connect(dbname=dbname, user=username,
password=pass_word, host=host)
cur = pg_connect.cursor()
#Query for Survived people by class
query = 'SELECT survived,pclass, COUNT(pclass) FROM titanic \
GROUP BY (pclass,survived) order by survived asc ,pclass asc;'
try:
cur.execute(query)
for row in cur:
print(row)
except :
pass
cur.close()
pg_connect.close()
|
Python
| 23
| 23.782608
| 70
|
/module2-sql-for-analysis/titanic_query.py
| 0.667838
| 0.662566
|
vishnuyar/DS-Unit-3-Sprint-2-SQL-and-Databases
|
refs/heads/master
|
import sqlite3
""" Demo Data Query Using Sqlite 3 """
def data_operations(conn,query):
""" Function which performs database operations and returns results """
try:
#creating a cursor for connection
cursor = conn.cursor()
#executing the query on the database and get the results
results = cursor.execute(query).fetchall()
except:
return "error in data operations"
#If No error is enountered, cursor is closed
cursor.close()
#Committing the data operatios
conn.commit()
#On successful completion return the results
return results
def count_query(connect):
"""
Function to find the number of rows in the demo table
Expected Output :The number of rows in demo table is 6
"""
#How many rows in the demo table
count_query = """SELECT COUNT(*) FROM demo; """
#checking for 3 rows
result = data_operations(connect,count_query)
for row in result:
print(f'The number of rows in demo table is {row[0]}')
def xy_query(connect):
"""
Function for finding the number of rows where x and y are atleast equal to 5
Expected Output :The number of rows where x and y is atleast 5: 4
"""
#How many rows where x and y are atleast 5
xy_query = """ SELECT COUNT(*) FROM demo WHERE x >= 5 AND y >= 5; """
#checking for atleast x y having value 5
result = data_operations(connect,xy_query)
for row in result:
print(f'The number of rows where x and y is atleast 5: {row[0]}')
def y_unique(connect):
"""
Function for finding the Number of Unique values of Y
Expected output : The number of distinct values of y : 2
"""
#Query for unique values of y
y_unique = """ SELECT COUNT(DISTINCT y) FROM demo ; """
#checking for distinct values of y
result = data_operations(connect,y_unique)
for row in result:
print(f'The number of distinct values of y : {row[0]}')
#Create a connection within a try excecpt block to pass errors without causing exception
try:
#Creating a database with name demo_data
connect = sqlite3.connect('demo_data.sqlite3')
#SQL Query for creating the table
create_table_query = """CREATE TABLE "demo" (
"s" TEXT,
"x" INTEGER,
"y" INTEGER
); """
#Creating the table by sending to data operations function
result = data_operations(connect,create_table_query)
#Inserting values into the demo table
insert_query = """INSERT INTO demo (s,x,y) values
("\'g\'",3,9),
("\'v\'",5,7),
("\'f\'",8,7) ;"""
#inserting the values into the demo table
data_operations(connect,insert_query)
#Now checking the demo table for data
count_query(connect) # Number of rows in the table
xy_query(connect) # Number of rows with x y values more than 5
y_unique(connect) # Number of distinct y values in the table
except:
pass
#closing the connection to the database
connect.close()
|
Python
| 97
| 31.804123
| 88
|
/sprint/demo_data.py
| 0.613329
| 0.60547
|
hanhe333/CS186
|
refs/heads/master
|
#!/usr/bin/env python
import sys
from auction import iround
import math
from gsp import GSP
from util import argmax_index
class HHAWbudget:
"""Balanced bidding agent"""
def __init__(self, id, value, budget):
self.id = id
self.value = value
self.budget = budget
self.TOTAL_CLICKS = 0
self.NUMBER_OF_PLAYERS = 0
self.NUMBER_OF_SLOTS = 0
def initialize_parameters(self, t, history):
num_slots = len(history.round(t-1).clicks)
self.NUMBER_OF_SLOTS = num_slots
total_clicks = 0
for time in range(48):
total_clicks += self.clicks_round(time)
self.TOTAL_CLICKS = total_clicks
self.NUMBER_OF_PLAYERS = len(history.round(t-1).bids)
def clicks_round(self, t):
clicks = 0.0
for slot in range(self.NUMBER_OF_SLOTS):
clicks += self.clicks_slot(t, slot)
return clicks
def clicks_slot(self, t, slot):
return iround(iround(30*math.cos(math.pi*t/24) + 50)*(.75**slot))
def clicks_factor(self, t):
return (self.clicks_round(t)/(self.TOTAL_CLICKS/48))**(.33)
def calculate_past_clicks(self, t, history):
past_clicks = 0
for i in range(t-1):
past_clicks += sum(history.round(i).clicks)
return past_clicks
def calculate_budgets(self, t, history):
# sorted from lowest bid to highest
id_to_budget = dict()
ids = list()
for i in xrange(len(history.round(t-1).bids)):
ids.append(history.round(t-1).bids[i][0])
for idx in ids:
for i in xrange(t):
bids = sorted(history.round(i).bids, key=lambda bid: -bid[1])
slot_num = -1
for j in xrange(len(bids)-1):
if bids[j][0] == idx and j < len(history.round(i).slot_payments):
slot_num = j
if not idx in id_to_budget:
id_to_budget[idx] = 0
if slot_num != -1:
id_to_budget[idx] = id_to_budget[idx] + history.round(i).slot_payments[slot_num]
return id_to_budget
def defaults(self, t, history, reserve):
num_zero = 0
if t < 2:
return 0
else:
for i in xrange(self.NUMBER_OF_PLAYERS):
if history.round(t-2).bids[i][1] <= reserve+1 and history.round(t-1).bids[i][1] <= reserve+1 and i != self.id:
num_zero += 1
elif history.round(t-1).bids[i][1] <= reserve+1 and i != self.id:
num_zero += .5
return num_zero
def budget_factor(self, t, history, reserve):
budget = self.calculate_budgets(t, history)[self.id]
past_clicks = self.calculate_past_clicks(t, history)
defaults = self.defaults(t, history, reserve)
budget_value = (1.0 - (budget/60000.0) + (float(past_clicks)/self.TOTAL_CLICKS))**2.5
if budget_value < .7 and defaults >= .5:
return 0
elif budget_value < .8 and defaults >= 1:
return 0
elif budget_value < .85 and defaults >= 1.5 and t < 47:
return 0
return budget_value
def slot_info(self, t, history, reserve):
"""Compute the following for each slot, assuming that everyone else
keeps their bids constant from the previous rounds.
Returns list of tuples [(slot_id, min_bid, max_bid)], where
min_bid is the bid needed to tie the other-agent bid for that slot
in the last round. If slot_id = 0, max_bid is 2* min_bid.
Otherwise, it's the next highest min_bid (so bidding between min_bid
and max_bid would result in ending up in that slot)
"""
prev_round = history.round(t-1)
avr_round = []
if t >= 2:
prev_round2 = history.round(t-2)
# predict other people's values
for i in xrange(len(prev_round.bids)):
if prev_round.bids[i][1] == 0:
avr_round.append(prev_round.bids[i])
elif abs(prev_round.bids[i][1] - prev_round2.bids[i][1]) < 10:
avr_round.append((prev_round.bids[i][0], .5*prev_round.bids[i][1] + .5*prev_round2.bids[i][1]))
else:
avr_round.append(prev_round2.bids[i])
else:
avr_round = prev_round.bids
other_bids = filter(lambda (a_id, b): a_id != self.id, avr_round)
clicks = prev_round.clicks
def compute(s):
(min, max) = GSP.bid_range_for_slot(s, clicks, reserve, other_bids)
if max == None:
max = 2 * min
return (s, min, max)
info = map(compute, range(len(clicks)))
# sys.stdout.write("slot info: %s\n" % info)
return info
def expected_utils(self, t, history, reserve):
"""
Figure out the expected utility of bidding such that we win each
slot, assuming that everyone else keeps their bids constant from
the previous round.
returns a list of utilities per slot.
"""
# TODO: Fill this in
clicks = history.round(t-1).clicks
utilities = [0.0]*(len(clicks)) # Change this
info = self.slot_info(t, history, reserve)
for i in xrange(len(clicks)):
s_k = self.clicks_slot(t, i)
utilities[i] = s_k*(self.value - info[i][1])
return utilities
def target_slot(self, t, history, reserve):
"""Figure out the best slot to target, assuming that everyone else
keeps their bids constant from the previous rounds.
Returns (slot_id, min_bid, max_bid), where min_bid is the bid needed to tie
the other-agent bid for that slot in the last round. If slot_id = 0,
max_bid is min_bid * 2
"""
i = argmax_index(self.expected_utils(t, history, reserve))
info = self.slot_info(t, history, reserve)
return info[i]
def initial_bid(self, reserve):
bid = 0
if self.value <=60:
return self.value-.01
else:
return .55*self.value-.01
def bid(self, t, history, reserve):
# The Balanced bidding strategy (BB) is the strategy for a player j that, given
# bids b_{-j},
# - targets the slot s*_j which maximizes his utility, that is,
# s*_j = argmax_s {clicks_s (v_j - p_s(j))}.
# - chooses his bid b' for the next round so as to
# satisfy the following equation:
# clicks_{s*_j} (v_j - p_{s*_j}(j)) = clicks_{s*_j-1}(v_j - b')
# (p_x is the price/click in slot x)
# If s*_j is the top slot, we (arbitrarily) choose
# b' = (v_j + p_0(j)) / 2. We can
# thus deal with all slots uniformly by defining clicks_{-1} = 2 clicks_0.
#
# initialize parameters
if self.NUMBER_OF_PLAYERS == 0 or self.TOTAL_CLICKS == 0 or self.NUMBER_OF_SLOTS == 0:
self.initialize_parameters(t, history)
# print "Number of players: ", self.NUMBER_OF_PLAYERS
# print "Number of slots: ", self.NUMBER_OF_SLOTS
# print "Total clicks", self.TOTAL_CLICKS
prev_round = history.round(t-1)
(slot, min_bid, max_bid) = self.target_slot(t, history, reserve)
num_default = self.defaults(t, history, reserve)
bid = 0
if num_default == 0:
bid = (min_bid + max_bid)/2
elif num_default > 0 and num_default <= 1:
bid = (.25*min_bid + .75*max_bid)
elif num_default > 1:
bid = max_bid
budget_effect = self.budget_factor(t, history, reserve)
click_effect = self.clicks_factor(t)
# print "defaults: ", num_default
# print "bid (pre factors): ", bid, min_bid, max_bid
# print "slot: ", slot
# print "budget: ", budget_effect
# print "click: ", click_effect
bid = bid*budget_effect*click_effect
if bid > max_bid:
bid = max_bid
if bid > self.value:
bid = self.value
if bid < reserve+1.01 and reserve+1.01 < self.value:
return reserve+1.01
if self.value < 75 and budget_effect > 1:
return self.value
return iround(bid)-0.01
def __repr__(self):
return "%s(id=%d, value=%d)" % (
self.__class__.__name__, self.id, self.value)
|
Python
| 245
| 33.746941
| 126
|
/HW3/hw3-code/hhawbudget.py
| 0.547035
| 0.530476
|
aramidetosin/NornirJunos
|
refs/heads/main
|
import requests
from datetime import datetime
import time
import argparse
import getpass
import json
from rich import print
import logging
import urllib3
from netmiko import ConnectHandler
from eve_up import get_nodes, get_links
from ipaddress import *
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logging.basicConfig(level=logging.DEBUG,
format=f'%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')
def base_config():
counter = 0
nodes = get_nodes(topo="NornirJunos.unl")
ips = ['192.168.20.191',
'192.168.20.192',
'192.168.20.193',
'192.168.20.194',
'192.168.20.195',
'192.168.20.196',
'192.168.20.197',
'192.168.20.198',
'192.168.20.199']
for key, value in nodes.items():
try:
if value["template"] == "vmx":
## Get the Telnet address and port set to variables
url = value["url"].split(":")
ip = url[1].replace("//", "")
port = (url[2])
node_conn = {
'device_type' : 'juniper_junos_telnet',
'host' : ip,
'port': port,
}
# Initiate connection to EVE
net_connect = ConnectHandler(**node_conn)
hostname = f'vMX{str(counter+1)}'
# counter += 1
# Send commands and view output
config_commands = [ 'set system root-authentication encrypted-password "$1$hBBaQcLY$AZYmNq9VbicPSNbl4KDcf0"',
'delete chassis auto-image-upgrade',
f'set system host-name {hostname}',
'set system domain-name abc.co',
'set system services ssh',
'set system services netconf ssh',
'set system login user cisco class super-user authentication encrypted-password "$1$hBBaQcLY$AZYmNq9VbicPSNbl4KDcf0"',
f'set interfaces ge-0/0/4 unit 0 family inet address {ips[counter]}/24']
output = net_connect.send_config_set(config_commands, exit_config_mode=False)
counter += 1
print(output)
# Commit
output = net_connect.commit(and_quit=True)
print(output)
except Exception as err:
continue
# def ip_addresser(subnet: str = '192.168.20.0/24'):
# subnets = list(ip_network(subnet).subnets(new_prefix = 30))
# print(subnets)
# for subn in subnets:
# print(list(subn.hosts()))
# links = get_links("NornirJunos.unl")
# print(links)
# return subnets
if __name__ == "__main__":
base_config()
|
Python
| 75
| 35.653332
| 117
|
/working/set_ints.py
| 0.534207
| 0.487991
|
swiftcore/ReSQL
|
refs/heads/master
|
#!/bin/env python
# the graphical interface of resql compiler client
import wx
import wx.grid
from subprocess import *
import sys
# call the client to connect to the server for rewriting the sql statement
def call_clnt(sql):
try:
# under windows
# p1 = Popen(['echo',sql],stdout=PIPE,shell=True)
# p2 = Popen(['python','clnt.py'],stdin=p1.stdout,stdout=PIPE,shell=True)
p1 = Popen(['echo',sql],stdout=PIPE)
p2 = Popen(['python','clnt.py'],stdin=p1.stdout,stdout=PIPE)
fout,ferr = p2.communicate()
return fout
except OSError, e:
pass
# define my application form
class MyApp(wx.App):
def OnInit(self):
frame = MyFrame("ReSQL Client v0.1",(50,60),(460,420))
frame.Show()
self.SetTopWindow(frame)
return True
# define the output table of the query result
class MyTable(wx.grid.PyGridTableBase):
def __init__(self):
wx.grid.PyGridTableBase.__init__(self)
self.data = {}
self.odd = wx.grid.GridCellAttr()
self.odd.SetBackgroundColour("white")
self.even = wx.grid.GridCellAttr()
self.even.SetBackgroundColour("wheat")
self.rows = self.cols = 5
def setData(self,data):
self.data = data
def GetNumberRows(self):
return self.rows
def GetNumberCols(self):
return self.cols
def SetNumberRows(self,rows):
self.rows = rows
def SetNumberCols(self,cols):
self.cols = cols
def IsEmptyCell(self,row,col):
return self.data.get((row,col)) is None
def GetValue(self,row,col):
value = self.data.get((row,col))
if value is not None:
return value
else:
return ''
def SetValue(self,row,col,value):
self.data[(row,col)] = value
def GetAttr(self,row,col,kind):
attr = [self.even,self.odd][row % 2]
attr.IncRef()
return attr
# define the Frame with menus, buttons, output table in it.
class MyFrame(wx.Frame):
def __init__(self,title,post,sizet):
wx.Frame.__init__(self,None,-1,title, pos=post,size=sizet)
menuFile = wx.Menu()
menuFile.Append(1,"&About")
menuFile.AppendSeparator()
menuFile.Append(2,"E&xit")
menuOutput = wx.Menu()
menuOutput.Append(3,"Output rewiting")
menuOutput.Append(4,"Output result")
menuBar = wx.MenuBar()
menuBar.Append(menuFile,"&File")
menuBar.Append(menuOutput,"&Output")
panel = wx.Panel(self,-1)
self.schemaBtn = wx.Button(panel,-1,"S",pos=(20,0),size=(40,30))
self.rewriteBtn = wx.Button(panel,-1,"R",pos=(70,0),size=(40,30))
self.rewriteBtn.Disable()
self.execBtn = wx.Button(panel,-1,"Exec",pos=(120,0),size=(40,30))
self.execBtn.Disable()
self.Bind(wx.EVT_BUTTON,self.OnSchemaClick,self.schemaBtn)
self.Bind(wx.EVT_BUTTON,self.OnRewrite,self.rewriteBtn)
self.Bind(wx.EVT_BUTTON,self.OnExecQuery,self.execBtn)
self.text = wx.TextCtrl(panel,-1,"",size=(440,100),pos=(5,40),style=wx.TE_MULTILINE)
from wx.grid import Grid
self.grid = Grid(panel,pos=(5,140),size=(440,200))
self.table = MyTable()
self.grid.SetTable(self.table)
self.SetMenuBar(menuBar)
self.CreateStatusBar()
self.SetStatusText("Welcome to ReSQL Client")
self.Bind(wx.EVT_MENU,self.OnAbout,id=1)
self.Bind(wx.EVT_MENU,self.OnQuit,id=2)
self.Bind(wx.EVT_MENU,self.OnOutputRewriting,id=3)
self.Bind(wx.EVT_MENU,self.OnOutputResult,id=4)
def OnQuit(self,event):
self.Close()
def OnAbout(self,event):
msg = "The Rewriting SQL client v0.1 !\n\nContact:wanghit2006@gmail.com"
wx.MessageBox(msg,"About ReSQL client", wx.OK | wx.ICON_INFORMATION, self)
def OnSchemaClick(self,event):
schemaStr = 'schema: ' + self.text.GetValue().decode('utf-8').encode('ascii')
back = call_clnt(schemaStr.replace('\n',' '))
wx.MessageBox(back,"Schema",wx.OK | wx.ICON_INFORMATION,self)
self.rewriteBtn.Enable()
def OnRewrite(self,event):
sql = "sql: " + self.text.GetValue().decode('utf-8').encode('ascii')
self.resql = call_clnt(sql.replace('\n',' '))
wx.MessageBox(self.resql,"Rewrite", wx.OK | wx.ICON_INFORMATION,self)
self.execBtn.Enable()
def OnExecQuery(self,event):
#rows,cols,data = self.execQuery(self.resql)
#self.table.SetNumberRows(rows)
#self.table.SetNumberCols(cols)
#self.table.setData(data)
#self.grid.SetTable(self.table)
wx.MessageBox("Exec " + self.resql,"Rewrite", wx.OK | wx.ICON_INFORMATION,self)
def OnOutputRewriting(self,event):
msg = "output writing query"
wx.MessageBox(msg,"Rewriting query",wx.OK | wx.ICON_INFORMATION,self)
def OnOutputResult(self,event):
rows,cols = self.table.GetNumberRows(),self.table.GetNumberCols()
wd = []
for i in range(rows):
tmp = []
for j in range(cols):
tmp.append(self.table.data[(i,j)])
wd.append(tmp)
import csv
writer = csv.writer(open('sample.csv','wb'))
writer.writerows(wd)
msg = "output query result"
wx.MessageBox(msg,"Query result",wx.OK | wx.ICON_INFORMATION,self)
def execQuery(self,sql):
import pymssql
cxn = pymssql.connect(password='61813744',host=r'.\SQLEXPRESS',database='hello')
cur = cxn.cursor()
cur.execute(sql)
data = {}
i = coln = 0
for eachRow in cur.fetchall():
coln = len(eachRow)
for x in range(coln):
data[(i,x)] = eachRow[x]
i += 1
cxn.commit()
cxn.close()
return cur.rownumber,coln,data
if __name__ == '__main__':
app = MyApp(False)
app.MainLoop()
|
Python
| 180
| 32.416668
| 92
|
/showcase.py
| 0.59335
| 0.579717
|
swiftcore/ReSQL
|
refs/heads/master
|
# the rewrite sql compiler server end
import socket
from subprocess import *
HOST = ''
PORT = 9999
BUFSIZE=1024
ADDR = (HOST,PORT)
# call the resql compiler
# pass in the sql statement
# return the rewriting sql result
def rewrite_sql(sql):
print 'In reswrite_sql: ',sql
p1 = Popen(["echo",sql],stdout=PIPE)
p2 = Popen(["./resql"],stdin=p1.stdout,stdout=PIPE)
fout = p2.communicate()[0]
if p2.returncode == 0:
return fout
else:
return "can't rewrite"
# parse the create table statement
# and generation the schema file
# for the convince processing of the compiler writen in C.
def parse_query(query):
table = []
keys = []
attrs = []
# parse table name
s = query.find('c')
e = query.find('(')
st = query[s:e]
stab = st.split()
table.append(stab[-1])
s = e+1
e = query.rfind(')');
sat = query[s:e]
sats = sat.split(',')
for attab in sats:
if attab.find("foreign") != -1:
pass
elif attab.find('primary') == -1:
attrs.append((attab.split())[0])
else:
s = attab.find('(')
e = attab.rfind(')')
keys = attab[s+1:e].split(',')
for i in range(len(keys)):
keys[i] = keys[i].strip()
for key in keys:
attrs.remove(key)
table.append(keys)
table.append(attrs)
return table
# read in the schema definations and
# pass each defination to the parse_query function
# get the parsed schema and write it into the schema.txt file
def extract_schema(schemadef):
querys = schemadef.split(';')
tables = []
for query in querys:
query = query.strip()
if query != '':
tables.append(parse_query(query))
fout = open('schema.txt','w')
fout.write(str(len(tables)) + '\n')
for table in tables:
fout.write(table[0]+'\n')
fout.write(str(len(table[1]))+' ')
fout.write(str(len(table[2]))+'\n')
for key in table[1]:
fout.write(key+'\n')
for attr in table[2]:
fout.write(attr+'\n')
fout.write('\n')
# deal with the user requestion
def deal_with(conn,data):
if not data.rstrip().endswith(';'):
data = data + ';'
if data.startswith("\""):
data = data[1:]
if data.endswith("\";"):
data = data[:-2]
if data.startswith('schema:'):
print 'create schema with'
create_sql = data.split(':')[1]
print create_sql
extract_schema(create_sql)
conn.send('SCHEMA')
elif data.startswith("sql:"):
ts = data.split(':')[1]
print 'try rewriting'
print ts
resql = rewrite_sql(ts)
conn.send(resql)
else:
print 'wrong format'
print data
conn.send('WRONG')
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.bind(ADDR)
sock.listen(5)
while True:
print 'waiting... for connection...'
connection,address = sock.accept()
print '...connect from',address
buf = connection.recv(1024)
deal_with(connection,buf)
connection.close()
sock.close()
|
Python
| 127
| 24.338583
| 61
|
/server.py
| 0.557005
| 0.546132
|
canada-poll-location-analysis/General-Election-Official-Voting-Results-Scraper
|
refs/heads/master
|
import logging
import os
import pandas as pd
import wget as wget
from tqdm import tqdm
from sean_logger import setup_logging
from toolbox import make_directory
def scrape_election_results(prov_id=35, base_url=None, results_format=1):
setup_logging()
if results_format == 1:
results_format = "pollbypoll_bureauparbureau"
elif results_format == 2:
results_format = "pollresults_resultatsbureau"
if base_url is None:
base_url = "https://www.elections.ca/res/rep/off/ovr2015app/41/data_donnees/"
num_except_in_a_row = 0
exceptions = []
for fed_num in tqdm(range(prov_id * 1000, ((prov_id + 1) * 1000) - 1)):
logging.info(f"fed num {fed_num}")
try:
url = f"{base_url}{results_format}{fed_num}.csv"
outfile = f"./data_donnees/{results_format}{fed_num}.csv"
logging.debug(url)
logging.debug(outfile)
make_directory(outfile)
wget.download(url, outfile)
num_except_in_a_row = 0
except:
logging.exception(f"Exception!! {fed_num}")
exceptions.append(fed_num)
num_except_in_a_row += 1
if num_except_in_a_row > 10:
logging.info(f"Probably finished at {fed_num - num_except_in_a_row}")
break
logging.info(f"Missed FED Nums:")
for fed in exceptions:
logging.info(fed)
logging.info()
print('fin')
def combine_result_csvs(folder=None, cols=None):
if folder is None:
folder = "./data_donnees/"
files = os.listdir(folder)
if cols is None:
cols = "Electoral District Number/Numéro de circonscription," \
"Electoral District Name/Nom de circonscription," \
"Polling Station Number/Numéro du bureau de scrutin," \
"Polling Station Name/Nom du bureau de scrutin," \
"Rejected Ballots/Bulletins rejetés," \
"Total Votes/Total des votes," \
"Electors/Électeurs".split(',')
print("Reading...")
frames = [pd.read_csv(folder + file, usecols=cols) for file in tqdm(files)]
print("Combining...")
data = pd.concat(frames)
print("Writing...")
data.to_csv("turnout_data_ontario_42nd_federal.csv", index=False)
print("Fin.")
if __name__ == '__main__':
scrape_election_results()
combine_result_csvs()
|
Python
| 69
| 33.492752
| 85
|
/GeneralElectionOfficialVotingResultsScraper.py
| 0.587995
| 0.576562
|
kaosx5s/OratoricalDecaf
|
refs/heads/master
|
import datetime
from google.appengine.ext import db
from google.appengine.api import users
'''
DATASTORE CLASSES
'''
class Articles(db.Model):
link = db.LinkProperty()
text = db.StringProperty()
votes = db.IntegerProperty()
posted = db.DateTimeProperty()
owner = db.StringProperty()
class Votes(db.Model):
article_id = db.IntegerProperty()
users = db.ListProperty(db.Email)
class Comments(db.Model):
article_id = db.IntegerProperty()
comment_owner = db.EmailProperty()
comment_text = db.StringProperty()
posted = db.DateTimeProperty()
'''
DATASTORE FUNCTIONS
'''
'''
Function: Post Article
Properties:
input:
link = URL link passed from script
text = Article title text passed from script
output:
None
required:
None
'''
def Post_Article(link,text,owner):
article_info = Articles()
#set the article data
article_info.link = link
article_info.text = text
article_info.votes = 0
article_info.posted = datetime.datetime.now()
article_info.owner = owner
#store it!
article_info.put()
'''
Function: Get Article List
Properties:
input:
None
output:
Articles -> list
[0] = database index id
[1] = article link (URL)
[2] = article text
[3] = article vote amount
required:
None
'''
def Get_Articles():
articles = []
result = []
for i in Articles.all().order('-posted'):
result = [i.key().id(),i.link,i.text,i.votes]
articles.append(result)
return(articles)
'''
Function: Post Comment
Properties:
input:
article_id = entity id of article from script
commentor = comment author (username)
comment_text = comment text body passed from script
output:
None
required:
None
'''
def Post_Comment(article_id,commentor,comment_text):
#note that article_id is actually an entity id which can be pulled when we load the comments
new_comment = Comments(Articles().get_by_id(ids = article_id).key())
#setup the comment data
new_comment.article_id = article_id
new_comment.comment_owner = commentor
new_comment.comment_text = comment_text
new_comment.posted = datetime.datetime.now()
new_comment.put()
'''
Function: Article Vote
Properties:
input:
output:
required:
'''
def Vote_Article(username,article_id,vote):
'''
note, vote can only be -1 or 1, 0 IS NOT acceptable
also note this is a two prong function, we must make sure the user has not voted prior; if they have not voted than
we must add the vote to the Articles() table and then also add an entry to the Votes() table.
'''
new_vote = Votes().all().filter("article_id =",int(article_id))
#we should always have an article that matches its ID, if not than we are in serious trouble!
article_add_vote = Articles().get_by_id(ids = int(article_id))
email_address = db.Email(username)
#make sure the votes for this article exist, if not create a new entry for them.
if new_vote.get() is None:
#WARNING: we are redefining new_vote!
new_vote = Votes(Articles().get_by_id(ids = int(article_id)).key())
new_vote.article_id = int(article_id)
new_vote.users = [email_address]
article_add_vote.votes = int(vote)
#add the vote to the article first
article_add_vote.put()
#now add the votes entity
new_vote.put()
return
else:
#check to see if we have already voted for this article!
already_voted = Votes.all().filter("article_id =",article_id).filter("users in",[email_address]).get()
if already_voted is None:
return 1
new_vote = Votes().all().filter("article_id =",int(article_id)).get()
new_vote = Votes(Articles().get_by_id(ids = int(article_id)).key()).get_by_id(ids = new_vote.key().id())
new_vote.users.append(email_address)
article_add_vote.votes = int(article_add_vote.votes) + int(vote)
new_vote.put()
article_add_vote.put()
|
Python
| 144
| 25.145834
| 117
|
/Project/datastore.py
| 0.696865
| 0.694474
|
kaosx5s/OratoricalDecaf
|
refs/heads/master
|
'''
Author: Robert Cabral
File Name: Post_Module.py
Purpose: To create an Article Post into the database that has the Article Title and Article URL properties
associated with the Article Post.
Date: 2/16/2013
'''
import datastore
import webapp2
import cgi
from google.appengine.api import users
form = """
<html>
<body>
<form method="post">
<div><h1>Post Page</h1></div>
<div>Title:</div>
<div><textarea name="link_title" rows="2" cols="60"></textarea></div>
<div>Location/URL:<br></div>
<div><textarea name="link_url" rows="2" cols="60"></textarea></div>
<div><input type="submit" value="Post"></div>
</form>
</body>
</html>
"""
def escape_html(s):
return cgi.escape(s, quote = True)
class PostPage(webapp2.RequestHandler):
def write_form(self, error="", title="", url=""):
self.response.out.write(form %{"error": error,
"link_title": escape_html(title),
"link_url": escape_html(url)})
def get(self):
#We should check to see if the user is logged in here instead of after our POST.
if users.get_current_user():
self.write_form()
else:
self.redirect(users.create_login_url(self.request.uri))
def post(self):
user = users.get_current_user()
user_link_url = self.request.get('link_url')
user_link_title = self.request.get('link_title')
user_name = user.nickname()
datastore.Post_Article(user_link_url,user_link_title,user_name)
self.redirect("/")
|
Python
| 49
| 29.55102
| 107
|
/Project/articles.py
| 0.637701
| 0.626337
|
kaosx5s/OratoricalDecaf
|
refs/heads/master
|
import cgi
import datetime
import urllib
import webapp2
import datastore
from google.appengine.ext import db
from google.appengine.api import users
class RequestHandler(webapp2.RequestHandler):
def get(self, article_id):
self.response.out.write('<html><body>')
#article_key = self.request.get('article_key')
my_article = datastore.Articles().get_by_id(ids = int(article_id))
article_name = my_article.text
#user login check
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
#article name
self.response.out.write('Article Name: <b>%s</b>' % article_name)
self.response.out.write('<br><a href="/">Back</a>')
#comment query
comment_list = datastore.Comments().all().filter("article_id =",int(article_id))
#comment submission form
self.response.out.write("""
<form method="post">
<div><textarea name="comment_text" rows="3" cols="60"></textarea></div>
<div><input type="submit" value="Post"></div>
</form>""")
for comments in comment_list:
#sub-note - comments will always have an author
self.response.out.write('<b>%s</b> wrote:' % comments.comment_owner)
self.response.out.write('<blockquote>%s</blockquote>' % cgi.escape(comments.comment_text))
self.response.out.write("""</body></html>""" )
def post(self, article_id):
comment_text = self.request.get('comment_text')
datastore.Post_Comment(int(article_id),users.get_current_user().email(),cgi.escape(comment_text))
self.redirect('/comment/%s'% (article_id))
|
Python
| 51
| 29.215687
| 98
|
/Project/comment.py
| 0.696104
| 0.692857
|
kaosx5s/OratoricalDecaf
|
refs/heads/master
|
# This file contains hardcoded strings and values
main_page="article_list.html"
main_title="Oratorical Decaf"
|
Python
| 4
| 26.75
| 49
|
/Project/config.py
| 0.792793
| 0.792793
|
kaosx5s/OratoricalDecaf
|
refs/heads/master
|
import webapp2
import os
import datastore
import config
import vote
import articles
import comment
import jinja2
from google.appengine.ext import db
from google.appengine.api import users
# jinja2 file loading copied from
# https://github.com/fRuiApps/cpfthw/blob/master/webapp2/views.py
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'templates')
j_env = jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATE_DIR))
class MainHandler(webapp2.RequestHandler):
def get(self):
template = j_env.get_template(config.main_page)
self.response.write('''
<a href="/article">Post new article</a>
''')
articles = datastore.Get_Articles()
self.response.write(template.render(title=config.main_title,data = articles))
app = webapp2.WSGIApplication([
('/', MainHandler),
('/vote/(.*)', vote.RequestHandler),
('/article', articles.PostPage),
('/comment/(.*)', comment.RequestHandler)
],
debug=True)
|
Python
| 33
| 27.848484
| 85
|
/Project/main.py
| 0.717437
| 0.709034
|
moniker-dns/icinga-cookbook
|
refs/heads/master
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import socket
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--socket', help="Socket to connect to", type=str,
default="/var/run/pdns.controlsocket")
parser.add_argument('--timeout', help="Socket timeout", type=int, default=5)
args = parser.parse_args()
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.settimeout(args.timeout)
try:
s.connect(args.socket)
s.send('PING\n')
data = s.recv(1024)
except socket.timeout:
print ("CRITICAL: Socket timeout, please investigate!")
sys.exit(2)
except socket.error, e:
print ("CRITICAL: %s" % e)
sys.exit(2)
s.close()
if data != 'PONG':
print('CRITICAL: Ping error, didn\' receive PONG!')
sys.exit(2)
else:
print("OK: Socket is responding with PONG")
sys.exit(0)
|
Python
| 36
| 23.333334
| 76
|
/files/default/plugins/check_pdns_socket
| 0.670857
| 0.660571
|
jeespinozam/bomberman-ai
|
refs/heads/master
|
from tensorforce.agents import PPOAgent
from serpent.utilities import SerpentError
import numpy as np
import os
# This file is borrowed from SerpentAIsaacGameAgentPlugin:
# https://github.com/SerpentAI/SerpentAIsaacGameAgentPlugin/blob/master/files/helpers/ppo.py
class SerpentPPO:
def __init__(self, frame_shape=None, game_inputs=None):
if frame_shape is None:
raise SerpentError("A 'frame_shape' tuple kwarg is required...")
states_spec = {"type": "float", "shape": frame_shape}
if game_inputs is None:
raise SerpentError("A 'game_inputs' dict kwarg is required...")
self.game_inputs = game_inputs
self.game_inputs_mapping = self._generate_game_inputs_mapping()
actions_spec = {"type": "int", "num_actions": len(self.game_inputs)}
network_spec = [
{"type": "conv2d", "size": 1, "window": 2, "stride": 1},
{"type": "flatten"},
# {"type": "dense", "size": 64},
{"type": "dense", "size": 6}
]
self.agent = PPOAgent(
states=states_spec,
actions=actions_spec,
network=network_spec,
batched_observe=256,
batching_capacity=1000,
# BatchAgent
#keep_last_timestep=True,
# PPOAgent
step_optimizer=dict(
type='adam',
learning_rate=1e-4
),
optimization_steps=10,
# Model
scope='ppo'
#discount=0.97,
# DistributionModel
#distributions=None,
#entropy_regularization=0.01,
# PGModel
#baseline_mode=None,
#baseline=None,
#baseline_optimizer=None,
#gae_lambda=None,
# PGLRModel
#likelihood_ratio_clipping=None,
#summary_spec=summary_spec,
#distributed_spec=None,
# More info
#device=None,
#session_config=None,
#saver=None,
#variable_noise=None,
#states_preprocessing_spec=None,
#explorations_spec=None,
#reward_preprocessing_spec=None,
#execution=None,
#actions_exploration=None,
#update_mode=None,
#memory=None,
#subsampling_fraction=0.1
)
def generate_action(self, game_frame_buffer):
states = np.stack(
game_frame_buffer,
axis=2
)
# Get prediction from agent, execute
action = self.agent.act(states)
label = self.game_inputs_mapping[action]
return action, label, self.game_inputs[label]
def observe(self, reward=0, terminal=False):
self.agent.observe(reward=reward, terminal=terminal)
def _generate_game_inputs_mapping(self):
mapping = dict()
for index, key in enumerate(self.game_inputs):
mapping[index] = key
return mapping
def save_model(self):
self.agent.save_model(directory=os.path.join(os.getcwd(), "datasets", "bomberman", "ppo_model"), append_timestep=False)
def restore_model(self):
self.agent.restore_model(directory=os.path.join(os.getcwd(), "datasets", "bomberman"))
|
Python
| 106
| 30.716982
| 127
|
/plugins/SerpentBombermanGameAgentPlugin/files/helpers/ppo.py
| 0.550863
| 0.542534
|
jeespinozam/bomberman-ai
|
refs/heads/master
|
# import time
# import os
# import pickle
# import serpent.cv
#
# import numpy as np
# import collections
#
# from datetime import datetime
#
#
# from serpent.frame_transformer import FrameTransformer
# from serpent.frame_grabber import FrameGrabber
# from serpent.game_agent import GameAgent
# from serpent.input_controller import KeyboardKey
# from serpent.sprite import Sprite
# from serpent.sprite_locator import SpriteLocator
# from serpent.sprite_identifier import SpriteIdentifier
#
# # from .helpers.game_status import Game
# from .helpers.terminal_printer import TerminalPrinter
# from .helpers.ppo import SerpentPPO
#
#
# import random
#
# class SerpentBombermanGameAgent(GameAgent):
#
# def __init__(self, **kwargs):
# super().__init__(**kwargs)
#
# self.frame_handlers["PLAY"] = self.handle_play
#
# self.frame_handler_setups["PLAY"] = self.setup_play
#
# self.value = None
# print("Sprites")
# print(type(self.game.sprites))
# print("game")
# print(self.game)
# print("game type")
# print(type(self.game))
# for i,value in enumerate(self.game.sprites):
# if(i==13):
# print(value)
# self.value = value
# self.spriteGO = self.game.sprites.get("SPRITE_GAME_OVER")
# self.spriteWO = self.game.sprites.get("SPRITE_GAME_WON")
# #self.sprite.image_data
# self.printer = TerminalPrinter()
#
# def setup_play(self):
# game_inputs = {
# "Move Up": [KeyboardKey.KEY_UP],
# "Move Down": [KeyboardKey.KEY_DOWN],
# "Move Left": [KeyboardKey.KEY_LEFT],
# "Move Right": [KeyboardKey.KEY_RIGHT],
# "Leave Bomb": [KeyboardKey.KEY_SPACE]
# }
# self.game_inputs = game_inputs
#
# # self.ppo_agent = SerpentPPO(
# # frame_shape=(480, 549, 4),
# # game_inputs=game_inputs
# # )
#
# self.first_run = True
# self.game_over = False
# self.current_attempts = 0
# self.run_reward = 0
# self.started_at = datetime.utcnow().isoformat()
# self.paused_at = None
#
# print("Enter - Auto Save")
# self.input_controller.tap_key(KeyboardKey.KEY_ENTER)
# time.sleep(2)
#
# return
#
# def extract_game_area(self, frame_buffer):
# game_area_buffer = []
#
# for game_frame in frame_buffer.frames:
# game_area = serpent.cv.extract_region_from_image(
# game_frame.grayscale_frame,
# self.game.screen_regions["GAME_REGION"]
# )
#
# frame = FrameTransformer.rescale(game_area, 0.25)
# game_area_buffer.append(frame)
#
# return game_area_buffer
#
# def handle_play(self, game_frame):
# if self.first_run:
# self.current_attempts += 1
# self.first_run = False
# return None
#
# self.printer.add("")
# self.printer.add("BombermanAI")
# self.printer.add("Reinforcement Learning: Training a PPO Agent")
# self.printer.add("")
# self.printer.add(f"Stage Started At: {self.started_at}")
# self.printer.add(f"Current Run: #{self.current_attempts}")
# self.printer.add("")
#
# inputs = [KeyboardKey.KEY_UP,
# KeyboardKey.KEY_DOWN,
# KeyboardKey.KEY_LEFT,
# KeyboardKey.KEY_RIGHT,
# KeyboardKey.KEY_SPACE]
#
# #game over?
# sprite_to_locate = Sprite("QUERY", image_data=self.spriteGO.image_data)
#
# sprite_locator = SpriteLocator()
# locationGO = sprite_locator.locate(sprite=sprite_to_locate, game_frame=game_frame)
# print(locationGO)
#
# #won game?
# sprite_to_locate = Sprite("QUERY", image_data=self.spriteWO.image_data)
# sprite_locator = SpriteLocator()
# locationWO = sprite_locator.locate(sprite=sprite_to_locate, game_frame=game_frame)
# print(locationWO)
#
# print(type(game_frame))
#
# if(locationGO!= None or locationWO!= None):
# #enter clic in both cases
# self.input_controller.tap_key(KeyboardKey.KEY_ENTER)
# else:
# game_frame_buffer = FrameGrabber.get_frames([0, 1, 2, 3], frame_type="PIPELINE")
# game_frame_buffer = self.extract_game_area(game_frame_buffer)
# action, label, value = self.ppo_agent.generate_action(game_frame_buffer)
#
# print(action, label, value)
# self.input_controller.tap_key(value)
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import time
import os
import pickle
import serpent.cv
import numpy as np
import collections
from datetime import datetime
from serpent.frame_transformer import FrameTransformer
from serpent.frame_grabber import FrameGrabber
from serpent.game_agent import GameAgent
from serpent.input_controller import KeyboardKey
from serpent.sprite import Sprite
from serpent.sprite_locator import SpriteLocator
from serpent.sprite_identifier import SpriteIdentifier
import skimage.io
from serpent.visual_debugger.visual_debugger import VisualDebugger
from .helpers.game_status import Game
from .helpers.terminal_printer import TerminalPrinter
from .helpers.ppo import SerpentPPO
from .helpers.dqn import KerasAgent
import random
class MyFrame:
def __init__ (self, frame):
self.frame = frame
class SerpentBombermanGameAgent(GameAgent):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.frame_handlers['PLAY'] = self.handle_play
self.frame_handler_setups['PLAY'] = self.setup_play
self.value = None
#print('Sprites')
#print(type(self.game.sprites))
#print('game')
#print(self.game)
#print('game type')
#print(type(self.game))
self.spriteGO = self.game.sprites.get('SPRITE_GAME_OVER')
self.spriteWO = self.game.sprites.get('SPRITE_GAME_WON')
self.spriteGirl = self.game.sprites.get('SPRITE_BETTY_0')
self.printer = TerminalPrinter()
self.visual_debugger = VisualDebugger()
self.gamestate = Game()
def setup_play(self):
game_inputs = {
"MoveUp": [KeyboardKey.KEY_UP],
"MoveDown": [KeyboardKey.KEY_DOWN],
"MoveLeft": [KeyboardKey.KEY_LEFT],
"MoveRight": [KeyboardKey.KEY_RIGHT],
"LeaveBomb": [KeyboardKey.KEY_SPACE],
"None": [0]
}
self.game_inputs = game_inputs
self.game_actions = [
KeyboardKey.KEY_UP,
KeyboardKey.KEY_DOWN,
KeyboardKey.KEY_LEFT,
KeyboardKey.KEY_RIGHT,
KeyboardKey.KEY_SPACE,
None]
##120, 137
self.dqn_agent = KerasAgent(shape=(104, 136, 1), action_size=len(self.game_actions))
#load model
#self.ppo_agent.restore_model()
self.first_run = True
##states trainning
self.epoch = 1
self.total_reward = 0
##state & action
self.prev_state = None
self.prev_action = None
self.prev_reward = 0
print("Enter - Auto Save")
self.input_controller.tap_key(KeyboardKey.KEY_ENTER)
self.gamestate.restartState()
time.sleep(2)
def extract_game_area(self, frame_buffer):
game_area_buffer = []
for game_frame in frame_buffer.frames:
game_area = \
serpent.cv.extract_region_from_image(game_frame.grayscale_frame,self.game.screen_regions['GAME_REGION'])
frame = FrameTransformer.rescale(game_area, 0.25)
game_area_buffer.append(frame)
print(np.array(game_area_buffer).shape)
return np.array(game_area_buffer)
def convert_to_rgba(self, matrix):
#print(matrix)
new_matrix = []
for x in range(0,len(matrix)):
line = []
for y in range(0,len(matrix[x])):
#pixel
pixel = matrix[x][y]
new_pixel = [pixel[0],pixel[1],pixel[2], 255]
line.append(new_pixel)
new_matrix.append(line)
return np.array(new_matrix)
def update_game_state(self, frame):
game_area = \
serpent.cv.extract_region_from_image(frame,self.game.screen_regions['GAME_REGION'])
#game ...
# 0,0
# 32,32
game_squares = [[None for j in range(0,11)] for i in range(0,15)]
const_offset = 8
const = 32
#game variables
self.gamestate.bombs = [] #{x, y}
self.gamestate.enemies = [] #{x,y}
#force girl to die if not found
girl_found = False
for i in range(0,15):
for j in range(0, 11):
izq = ((j+1)*const - const_offset, (i+1)*const - const_offset)
der = ((j+2)*const + const_offset, (i+2)*const + const_offset)
reg = (izq[0], izq[1], der[0], der[1])
square = serpent.cv.extract_region_from_image(game_area, reg)
square = self.convert_to_rgba(square)
sprite_to_locate = Sprite("QUERY", image_data=square[..., np.newaxis])
sprite = self.sprite_identifier.identify(sprite_to_locate, mode="SIGNATURE_COLORS")
game_squares[i][j] = sprite
if("SPRITE_BETTY" in sprite):
self.girl = {"x": i, "y": j}
girl_found = True
elif("SPRITE_GEORGE" in sprite):
self.gamestate.enemies.append({"x": i, "y": j})
elif("SPRITE_BOMB" in sprite):
self.gamestate.bombs.append({"x": i, "y": j})
self.gamestate.girl_alive = girl_found
self.gamestate.done = not girl_found
return game_squares
def handle_play(self, game_frame):
#self.printer.add("")
#self.printer.add("BombermanAI")
#self.printer.add("Reinforcement Learning: Training a PPO Agent")
#self.printer.add("")
#self.printer.add(f"Stage Started At: {self.started_at}")
#self.printer.add(f"Current Run: #{self.current_attempts}")
#self.printer.add("")
#self.check_game_state(game_frame)
#####################CHECK STATE###########################
#game over?
locationGO = None
sprite_to_locate = Sprite("QUERY", image_data=self.spriteGO.image_data)
sprite_locator = SpriteLocator()
locationGO = sprite_locator.locate(sprite=sprite_to_locate, game_frame=game_frame)
#print("Location Game over:",locationGO)
#won game?
locationWO = None
sprite_to_locate = Sprite("QUERY", image_data=self.spriteWO.image_data)
sprite_locator = SpriteLocator()
locationWO = sprite_locator.locate(sprite=sprite_to_locate, game_frame=game_frame)
#print("Location Game won:",locationWO)
self.gamestate.victory = locationWO!= None
self.gamestate.lose = locationGO!=None
self.gamestate.girl_alive = (locationGO== None and locationWO== None)
self.gamestate.done = not self.gamestate.girl_alive
print(f"Is alive? {self.gamestate.girl_alive}")
print(f"Game over? {self.gamestate.lose}")
print(f"Won? {self.gamestate.victory}")
#####################VISUAL DEBUGGER###########################
for i, game_frame in enumerate(self.game_frame_buffer.frames):
self.visual_debugger.store_image_data(
game_frame.frame,
game_frame.frame.shape,
str(i)
)
#####################MODEL###########################
#get buffer
frame_buffer = FrameGrabber.get_frames([0, 1, 2, 3], frame_type="PIPELINE")
game_frame_buffer = self.extract_game_area(frame_buffer)
state = game_frame_buffer.reshape(4, 104, 136, 1)
if(self.gamestate.done):
print(f"Game over, attemp {self.epoch}")
if (self.epoch % 10)== 0:
print("saving model")
self.dqn_agent.save_model(f"bombergirl_epoch_{self.epoch}.model")
self.printer.save_file()
self.printer.add(f"{self.gamestate.victory},{self.gamestate.lose},{self.epoch},{self.gamestate.time},{self.total_reward}")
self.total_reward = 0
self.dqn_agent.remember(self.prev_state, self.prev_action, self.prev_reward, state, True)
self.dqn_agent.replay()
self.input_controller.tap_key(KeyboardKey.KEY_ENTER)
self.epoch += 1
self.total_reward = 0
self.gamestate.restartState()
self.prev_state = None
self.prev_action = None
else:
#update time
self.gamestate.updateTime()
#print(np.stack(game_frame_buffer,axis=1).shape)
#print(game_frame_buffer.shape)
#print(state.shape)
if(not (self.prev_state is None) and not (self.prev_action is None)):
self.dqn_agent.remember(self.prev_state, self.prev_action, self.prev_reward, state, False)
#do something
action_index = self.dqn_agent.act(state)
#get key
action = self.game_actions[action_index]
#get random frame from buffer
game_frame_rand = random.choice(frame_buffer.frames).frame
#update enviroment accorind to frame
###################FUN UPDATE STATE#########################################
game_area = \
serpent.cv.extract_region_from_image(game_frame_rand,self.game.screen_regions['GAME_REGION'])
#game ...
# 0,0
# 32,32
game_squares = [[None for j in range(0,11)] for i in range(0,15)]
const_offset = 8
const = 32
#game variables
self.gamestate.bombs = [] #{x, y}
self.gamestate.enemies = [] #{x,y}
#force girl to die if not found
girl_found = False
for i in range(0,15):
for j in range(0, 11):
izq = ((j+1)*const - const_offset, (i+1)*const - const_offset)
der = ((j+2)*const + const_offset, (i+2)*const + const_offset)
reg = (izq[0], izq[1], der[0], der[1])
square = serpent.cv.extract_region_from_image(game_area, reg)
square = self.convert_to_rgba(square)
sprite_to_locate = Sprite("QUERY", image_data=square[..., np.newaxis])
sprite = self.sprite_identifier.identify(sprite_to_locate, mode="SIGNATURE_COLORS")
game_squares[i][j] = sprite
if("SPRITE_BETTY" in sprite):
self.girl = {"x": i, "y": j}
girl_found = True
elif("SPRITE_GEORGE" in sprite):
self.gamestate.enemies.append({"x": i, "y": j})
elif("SPRITE_BOMB" in sprite):
self.gamestate.bombs.append({"x": i, "y": j})
elif("SPRITE_BONUSES" in sprite):
self.gamestate.bonus.append({"x": i, "y": j})
#####################CHECK STATE###########################
#game over?
locationGO = None
sprite_to_locate = Sprite("QUERY", image_data=self.spriteGO.image_data)
sprite_locator = SpriteLocator()
locationGO = sprite_locator.locate(sprite=sprite_to_locate, game_frame=game_frame)
#print("Location Game over:",locationGO)
#won game?
locationWO = None
sprite_to_locate = Sprite("QUERY", image_data=self.spriteWO.image_data)
sprite_locator = SpriteLocator()
locationWO = sprite_locator.locate(sprite=sprite_to_locate, game_frame=game_frame)
#print("Location Game won:",locationWO)
self.gamestate.lose = locationGO!=None
self.gamestate.victory = locationWO!= None
self.gamestate.girl_alive = (locationGO== None and locationWO== None)
self.gamestate.done = not self.gamestate.girl_alive
print(f"Is alive? {self.gamestate.girl_alive}")
print(f"Game over? {self.gamestate.lose}")
print(f"Won? {self.gamestate.victory}")
###################REWARD#########################################
#get reward
reward = self.gamestate.getReward(action_index)
self.total_reward += reward
self.prev_state = state
self.prev_action = action_index
self.prev_reward = reward
if(action):
self.input_controller.tap_key(action, 0.15 if action_index < 4 else 0.01)
print(f"Action: {self.gamestate.game_inputs[action_index]}, reward: {reward}, total_reward: {self.total_reward}")
#action, label, value = self.ppo_agent.generate_action(game_frame_buffer)
#print(action, label, value)
#key, value = random.choice(list(self.game_inputs.items()))
#if(value[0]):
# self.input_controller.tap_key(value[0])
#game_squares = self.extract_game_squares(game_frame.frame)
def check_game_state(self, game_frame):
#game over?
locationGO = None
sprite_to_locate = Sprite("QUERY", image_data=self.spriteGO.image_data)
sprite_locator = SpriteLocator()
locationGO = sprite_locator.locate(sprite=sprite_to_locate, game_frame=game_frame)
print("Location Game over:",locationGO)
#won game?
locationWO = None
sprite_to_locate = Sprite("QUERY", image_data=self.spriteWO.image_data)
sprite_locator = SpriteLocator()
locationWO = sprite_locator.locate(sprite=sprite_to_locate, game_frame=game_frame.frames)
print("Location Game won:",locationWO)
self.gamestate.girl_alive = (locationGO== None and locationWO== None)
self.gamestate.done = not self.gamestate.girl_alive
self.gamestate.victory = locationWO!= None
print(f"Is alive? {self.gamestate.girl_alive}")
print(f"Game over? {self.gamestate.lose}")
print(f"Won? {self.gamestate.victory}")
|
Python
| 476
| 36.39706
| 134
|
/plugins/SerpentBombermanGameAgentPlugin/files/serpent_Bomberman_game_agent.py
| 0.57411
| 0.566614
|
jeespinozam/bomberman-ai
|
refs/heads/master
|
import json
import sys
import random
import os
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import *
from keras.optimizers import *
class KerasAgent:
def __init__(self, shape, action_size):
self.weight_backup = "bombergirl_weight.model"
self.shape = shape
self.action_size = action_size
self.memory = deque(maxlen=2000)
self.learning_rate = 0.001
self.gamma = 0.95
self.exploration_rate = 1.0
self.exploration_min = 0.01
self.exploration_decay = 0.995
self.model = self._build_model()
def _build_model(self):
model = Sequential()
# Convolutions.
model.add(Conv2D(
16,
kernel_size=(3, 3),
strides=(1, 1),
#data_format='channels_first',
input_shape=self.shape
))
model.add(Activation('relu'))
model.add(Conv2D(
32,
kernel_size=(3, 3),
strides=(1, 1),
data_format='channels_first'
))
model.add(Activation('relu'))
# Dense layers.²
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dense(self.action_size))
model.summary()
model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
#model.compile(RMSprop(), 'MSE')
if os.path.isfile(self.weight_backup):
model.load_weights(self.weight_backup)
self.exploration_rate = self.exploration_min
return model
def save_model(self, name):
self.model.save(self.weight_backup)
self.model.save(name)
def act(self, state):
if np.random.rand() <= self.exploration_rate:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0])
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def replay(self, sample_batch_size=256):
if len(self.memory) < sample_batch_size:
sample_batch_size=len(self.memory)
sample_batch = random.sample(self.memory, sample_batch_size)
for state, action, reward, next_state, done in sample_batch:
target = reward
if not done:
target = (reward + self.gamma *
np.amax(self.model.predict(next_state)[0]))
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.exploration_rate > self.exploration_min:
self.exploration_rate *= self.exploration_decay
|
Python
| 88
| 31.772728
| 72
|
/plugins/SerpentBombermanGameAgentPlugin/files/helpers/dqn.py
| 0.571429
| 0.555479
|
jeespinozam/bomberman-ai
|
refs/heads/master
|
from serpent.game import Game
from .api.api import BombermanAPI
from serpent.utilities import Singleton
from serpent.game_launchers.web_browser_game_launcher import WebBrowser
class SerpentBombermanGame(Game, metaclass=Singleton):
def __init__(self, **kwargs):
kwargs["platform"] = "web_browser"
kwargs["window_name"] = "Safari"
kwargs["url"] = "http://0.0.0.0:8000"
kwargs["browser"] = WebBrowser.DEFAULT
super().__init__(**kwargs)
self.api_class = BombermanAPI
self.api_instance = None
@property
def screen_regions(self):
#t
dic_offset = {
"WINDOWS_CHROME": {
# "top": 81,
# "left": 5
"top": 0,
"left": 0
}
}
offset = dic_offset["WINDOWS_CHROME"]
regions = {
"GAME_REGION": (offset["top"], offset["left"], 416 + offset["top"], 544 + offset["left"]), #544x416
"GAME_OVER_REGION": (118 + offset["top"], 163 + offset["left"], 151 + offset["top"], 383 + offset["left"]), #220x33 - 163,118
"WIN_REGION": (118 + offset["top"], 171 + offset["left"], 149 + offset["top"], 372 + offset["left"]), # 201x31 - 171,118
}
return regions
@property
def ocr_presets(self):
presets = {
"SAMPLE_PRESET": {
"extract": {
"gradient_size": 1,
"closing_size": 1
},
"perform": {
"scale": 10,
"order": 1,
"horizontal_closing": 1,
"vertical_closing": 1
}
}
}
return presets
# from serpent.game import Game
#
# from .api.api import BombermanAPI
#
# from serpent.utilities import Singleton
#
# from serpent.game_launchers.web_browser_game_launcher import WebBrowser
#
#
# class SerpentBombermanGame(Game, metaclass=Singleton):
#
# def __init__(self, **kwargs):
# kwargs["platform"] = "web_browser"
#
# kwargs["window_name"] = "Safari"
#
# kwargs["url"] = "http://0.0.0.0:8000"
# kwargs["browser"] = WebBrowser.DEFAULT
#
# super().__init__(**kwargs)
#
# self.api_class = BombermanAPI
# self.api_instance = None
#
# @property
# def screen_regions(self):
# regions = {
# "GAME_REGION": (0, 0, 480, 549), ##545x416
# "GAME_OVER_REGION": (160,160, 225, 404),
# "WIN_REGION": (175,130, 220, 421),
# }
#
# return regions
#
# @property
# def ocr_presets(self):
# presets = {
# "SAMPLE_PRESET": {
# "extract": {
# "gradient_size": 1,
# "closing_size": 1
# },
# "perform": {
# "scale": 10,
# "order": 1,
# "horizontal_closing": 1,
# "vertical_closing": 1
# }
# }
# }
#
# return presets
|
Python
| 118
| 22.372881
| 128
|
/plugins/SerpentBombermanGamePlugin/files/serpent_Bomberman_game.py
| 0.532995
| 0.485497
|
jeespinozam/bomberman-ai
|
refs/heads/master
|
#from .memreader import MemoryReader
import time
class Game:
enemies = [] #{x,y}
bombs = [] #{x,y}
bonus = []
girl = {"x": 0, "y": 0}
start_time = 0
time = 0
game_inputs = {
0: "MoveUp",
1: "MoveDown",
2: "MoveLeft",
3: "MoveRight",
4: "LeaveBomb",
5: "None"
}
girl_alive = True
done = False
lose = False
victory = False
##const
TIME_NORM = 10
MOVEMENT_RW = 5
BONUS_RW = 10
ALIVE_RW = 20
ENEMIES_NORM = 5
REWARD_BOMB = 25
REWARD_VICTORY = 100
REWARD_LOSE = 50
MAX_DISTANCE = 8
def restartState(self):
self.girl_alive = True
self.done = False
self.lose = False
self.victory = False
self.time = 0
self.start_time = time.time()
def getCurrentTimeNormalized(self):
return self.time / self.TIME_NORM
def getDistanceNormalized(self, elem1, elem2):
return abs(elem1['x'] - elem2['x']) + abs(elem1['y'] - elem2['y'])
def updateTime(self):
self.time = time.time() - self.start_time
def getReward(self, action):
reward = 0
# Para castigar por el numero de enemigos
reward -= self.ENEMIES_NORM*len(self.enemies)
# Para casticar con el paso del tiempo
reward -= self.getCurrentTimeNormalized()
# Para castigar/ premiar si la chica está cerca/lejos a una bomba
for bomb in self.bombs:
distance = self.getDistanceNormalized(bomb, self.girl)
if distance < self.MAX_DISTANCE:
reward -= distance
else
reward += distance
if(action == 4):
# Para premiar que esté colocando una bomba
reward += self.REWARD_BOMB
for enemy in self.enemies:
# Para premiar que la bomba está más cerca a un enemigo
distance = self.getDistanceNormalized(enemy, self.girl)
if distance< self.MAX_DISTANCE:
reward += self.REWARD_BOMB/distance
if(action < 4):
# Para premiar que se mueve
reward += self.MOVEMENT_RW
# Para premiar que esté más cerca a un bonus
for bonus in self.bonus:
reward += self.BONUS_RW / self.getDistanceNormalized(bonus, self.girl)
# Para premiar que está jugando
if(self.girl_alive):
reward += self.ALIVE_RW
# Para castigar que ha perdido
if self.lose:
reward -= self.REWARD_LOSE
# Para premiar que ha ganado
if self.victory:
reward += self.REWARD_VICTORY
return reward
|
Python
| 96
| 27.041666
| 86
|
/plugins/SerpentBombermanGameAgentPlugin/files/helpers/game_status.py
| 0.552749
| 0.539376
|
Jannlk/GLO-2000-TP4
|
refs/heads/master
|
import os.path
import re
from hashlib import sha256
from os.path import getsize
#Méthode qui crée un nouveau compte dans le répertoire du serveur
#id : nom du dossier
#mdp : mot de passe
#return : "0" si un problème est survenu avec le fichier, "1" si le compte a été créé
def creerCompte(id, mdp):
state = "1"
try:
os.makedirs(id)
file = open(id + "/config.txt", "w")
file.write(sha256(mdp.encode()).hexdigest())
file.close()
except:
state = "0"
return state
#Méthode qui vérifie si le compte existe
#id : Nom du dossier du compte
#return: "1" si le compte existe, "0" sinon
def verifierID(id):
state = "0"
if os.path.exists(id + "/config.txt"):
state = "1"
return state
#Méthode qui vérifie si le mot de passe respecte les conditions
#mdp : le mot de passe
#return : "1" si le mot de passe respecte les conditions, "0" sinon.
def veififierMDP(mdp):
state = "0"
if (re.search(r"^[a-zA-Z0-9]{6,12}$", mdp) and re.search(r".*[0-9].*", mdp) and re.search(r".*[a-zA-Z].*",mdp)):
state = "1"
return state
#Méthode qui permet d'ouvrir le dossier d'un utilisateur
#id, mdp : L'identifiant et le mot de passe de l'utilisateur
#Return : "-1" s'il y a un problème avec lors de l'ouverture du fichier
# "0" si le mot de passe de correspond pas
# "1" si la connexion est un succès
def connexion(id, mdp):
state = "1"
try:
file = open(id + "/config.txt", "r")
password = file.readline()
file.close()
if sha256(mdp.encode()).hexdigest() != password:
state = "0"
except:
state = "-1"
return state
#Méthode qui permet d'ouvrir le dossier d'un utilisateur
#id, subject, data: L'identifiant de l'utilisateur, le sujet et corps du message
#Return : "-1" s'il y a un problème avec lors de l'ouverture du fichier
# "0" si tout se passe bien
def courrielLocal(id, subject, data):
state = "0"
try:
file = open(id + "/" + subject + ".txt", "w")
file.write(data)
file.close()
state = "0"
except:
state = "-1"
return state
#Méthode qui permet d'ouvrir un courriel local
#subject, data: Sujet et corps du courriel
def ouvrirLocal(id, filename):
try:
file = open( id + "/" + filename, "r")
str_content = file.read();
file.close()
return str_content
except:
print("Fichier introuvable.")
#Méthode qui permet d'enregistrer un courriel vers un utilisateur inexistant
#subject, data: Sujet et corps du courriel
def courrielDump(subject, data):
try:
if not os.path.exists("DESTERREUR"):
os.makedirs("DESTERREUR")
file = open("DESTERREUR/" + subject + ".txt", "w")
file.write(data)
file.close()
except:
print("Guess somebody fucked up good.")
#Méthode qui retourne la grosseur d'un directory
#id: le directory
def getSize(id):
try:
size = getsize(id)
return size
except:
print("Mauvais nom de repertoire")
#Méthode qui retourne la liste trié par date
#id, liste: la liste a trier
def sortDate(id, liste):
liste.sort(key=lambda x: os.path.getmtime(id + "/" + x))
return liste
#Méthode qui retourne la liste trié alphabetiquement
#id, liste: la liste a trier
def sortAlpha(liste):
liste = liste.sort()
return liste
|
Python
| 119
| 27.537815
| 116
|
/TP4_111126561/utilitaires.py
| 0.623969
| 0.612485
|
Jannlk/GLO-2000-TP4
|
refs/heads/master
|
import smtplib, re, socket, optparse, sys
import os.path
import pickle
from email.mime.text import MIMEText
import utilitaires
parser = optparse.OptionParser()
parser.add_option("-a", "--address", action="store", dest="address", default="localhost")
parser.add_option("-p", "--port", action="store", dest="port", type=int, default=1337)
opts = parser.parse_args(sys.argv[1:])[0]
destination = (opts.address, opts.port)
#Création du socket
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(destination)
serversocket.listen(5)
print("Listening on port " + str(serversocket.getsockname()[1]))
nbConnexions = 0
nbDeconnexions = 0
while True:
#Un client se connecte au serveur
(s, address) = serversocket.accept()
nbConnexions += 1
print(str(nbConnexions) + "e connexion au serveur")
#Réception du choix d'option du menu connexion.
option = s.recv(1024).decode()
#Si l'utilisateur choisit de se connecter
if option == "1":
#On vérifie que le compte existe et que le mot de passe est valide
id = s.recv(1024).decode()
mdp = s.recv(1024).decode()
verificationID = utilitaires.verifierID(id)
s.send(verificationID.encode())
if verificationID != "0":
verificationMDP = utilitaires.connexion(id, mdp)
s.send(verificationMDP.encode())
while verificationID != "1" or verificationMDP != "1":
id = s.recv(1024).decode()
mdp = s.recv(1024).decode()
verificationID = utilitaires.verifierID(id)
s.send(verificationID.encode())
if verificationID != "0":
verificationMDP = utilitaires.connexion(id, mdp)
s.send(verificationMDP.encode())
if verificationMDP == "-1":
continue
#Si l'utilisateur choisit de se créer un compte
elif option == "2":
#Création de l'identifiant
id = s.recv(1024).decode()
mdp = s.recv(1024).decode()
verificationID = utilitaires.verifierID(id)
s.send(verificationID.encode())
if verificationID != "1":
verificationMDP = utilitaires.veififierMDP(mdp)
s.send(verificationMDP.encode())
while verificationID != "0" or verificationMDP != "1":
id = s.recv(1024).decode()
mdp = s.recv(1024).decode()
verificationID = utilitaires.verifierID(id)
s.send(verificationID.encode())
if verificationID != "1":
verificationMDP = utilitaires.veififierMDP(mdp)
s.send(verificationMDP.encode())
verificationErreur = utilitaires.creerCompte(id, mdp)
s.send(verificationErreur.encode())
if verificationErreur == "0":
continue
while True:
# Réception du choix d'option du menu connexion.
option = s.recv(1024).decode()
#Envoie d'un courriel
if option == "1":
# reception du courriel et verification qu’il est valide
emailFrom = s.recv(1024).decode()
emailAddress = s.recv(1024).decode()
while not re.search(r"^[^@]+@[^@]+\.[^@]+$", emailAddress):
msg = "-1"
s.send(msg.encode())
emailAddress = s.recv(1024).decode()
msg = "0"
s.send(msg.encode())
# creation du courriel
subject = s.recv(1024).decode()
data = s.recv(1024).decode()
courriel = MIMEText(data)
courriel["From"] = emailFrom
courriel["To"] = emailAddress
courriel["Subject"] = subject
#Externe
use_smtp_ulaval = False
if(re.match(r"^[^@]+@reseauglo\.ca$", emailAddress) == None):
use_smtp_ulaval = True
if use_smtp_ulaval == True:
# envoi du courriel par le smtp de l'ecole
try:
smtpConnection = smtplib.SMTP(host="smtp.ulaval.ca", timeout=10)
smtpConnection.sendmail(courriel["From"], courriel["To"], courriel.as_string())
smtpConnection.quit()
msg = "0"
s.send(msg.encode())
except:
msg = "-1"
s.send(msg.encode())
else:
chemin_dossier = emailAddress.replace("@reseauglo.ca", "")
verification = utilitaires.courrielLocal(chemin_dossier, courriel['Subject'], courriel.as_string())
if(verification != "0"):
utilitaires.courrielDump(courriel['Subject'], courriel.as_string())
s.send(verification.encode())
elif option == "2":
id = s.recv(1024).decode()
files = os.listdir(id)
files.remove("config.txt")
files = utilitaires.sortDate(id, files)
mails = []
for file in files:
file = file.replace(".txt", "")
mails.append(file)
data_string = pickle.dumps(mails)
s.send(data_string)
email_id = int(s.recv(1024).decode()) - 1
email_content = utilitaires.ouvrirLocal(id, files[email_id])
s.send(email_content.encode())
elif option == "3":
id = s.recv(1024).decode()
filesize = utilitaires.getSize(id)
s.send(str(filesize).encode())
files = os.listdir(id)
files.remove("config.txt")
files = sorted(files, key=str)
mails = []
for file in files:
file = file.replace(".txt", "")
print(file)
mails.append(file)
data_string = pickle.dumps(mails)
s.send(data_string)
elif option == "4":
nbDeconnexions += 1
print(str(nbDeconnexions) + "e deconnexion au serveur")
break
|
Python
| 166
| 35.379517
| 115
|
/TP4_111126561/serveur.py
| 0.55539
| 0.537341
|
Jannlk/GLO-2000-TP4
|
refs/heads/master
|
import smtplib, re, socket, optparse, sys
import os.path
from email.mime.text import MIMEText
from hashlib import sha256
import getpass
import pickle
parser = optparse.OptionParser()
parser.add_option("-a", "--address", action="store", dest="address", default="localhost")
parser.add_option("-p", "--port", action="store", dest="port", type=int, default=1337)
opts = parser.parse_args(sys.argv[1:])[0]
destination = (opts.address, opts.port)
#Connexion au serveur
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(10)
s.connect(destination)
s.settimeout(None)
while True:
#Menu de connexion, choix d'une option
option = input("Menu de connexion \n1. Se connecter \n2. Creer un compte \n")
while option != "1" and option != "2":
option = input("Veuillez saisir une option valide:\n")
s.send(option.encode())
#Se connecter
if option == "1":
id = input("Veuillez saisir votre identifiant:\n")
mdp = getpass.getpass("Veuillez saisir votre mot de passe:\n")
s.send(id.encode())
s.send(mdp.encode())
reponseID = s.recv(1024).decode()
if reponseID != "0":
reponseMDP = s.recv(1024).decode()
while reponseID != "1" or reponseMDP != "1":
if reponseID != "1":
id = input("Veuillez saisir un identifiant valide:\n")
mdp = getpass.getpass("Veuillez saisir votre mot de passe:\n")
elif reponseMDP == "-1":
print("Desole, un probleme est survenu.")
continue
else:
print("Ce n'est pas le bon mot de passe. Veuillez reessayer.")
id = input("Veuillez saisir votre identifiant:\n")
mdp = getpass.getpass("Veuillez saisir votre mot de passe:\n")
s.send(id.encode())
s.send(mdp.encode())
reponseID = s.recv(1024).decode()
if reponseID != "0":
reponseMDP = s.recv(1024).decode()
#Créer un compte
elif option == "2":
id = input("Veuillez choisir un identifiant:\n")
mdp = getpass.getpass("Veuillez choisir un mot de passe contenant de 6 à 12 carateres, dont au moins une lettre et un chiffre:\n")
s.send(id.encode())
s.send(mdp.encode())
reponseID = s.recv(1024).decode()
if reponseID != "1":
reponseMDP = s.recv(1024).decode()
while reponseID != "0" or reponseMDP != "1":
if reponseID != "0":
id = input("Cet identifiant est deja pris, veuillez en choisir un autre:\n")
mdp = getpass.getpass("Veuillez saisir votre mot de passe:\n")
else:
print("Ce mot de passe ne respecte pas les conditions, veuilelz en choisir un autre.")
id = input("Veuillez saisir votre identifiant a nouveau:\n")
mdp = getpass.getpass("Veuillez saisir votre nouveau mot de passe:\n")
s.send(id.encode())
s.send(mdp.encode())
reponseID = s.recv(1024).decode()
if reponseID != "1":
reponseMDP = s.recv(1024).decode()
reponseCreationCompte = s.recv(1024).decode()
if reponseCreationCompte == "0":
print("Desole, un probleme est survenu")
continue
while True:
option = input("\nMenu principale\n1. Envoi de courriels\n2. Consultation de courriels\n3. Statistiques\n4. Quitter\n")
while option not in ["1", "2", "3", "4"]:
option = input("Veuillez saisir une option valide:\n")
s.send(option.encode())
if option == "1":
email_from = id + "@reseauglo.ca"
s.send(email_from.encode())
response = "-1"
while(response == "-1"):
email_to = input("\nÀ: ")
s.send(email_to.encode())
response = s.recv(1024).decode()
subject = input("\nSujet: ")
s.send(subject.encode())
data = input("\nMessage: ")
s.send(data.encode())
response = s.recv(1024).decode()
if(response == "-1"):
print("\nErreur lors de l'envoie du courriel.")
continue
else:
print("\nCourriel envoyé avec succès!")
elif option == "2":
s.send(id.encode())
data_string = s.recv(1024)
mails = pickle.loads(data_string)
print("\nListe de vos courriels: \n")
compteur = 1;
for mail in mails:
print("\n" + str(compteur) + ". " + mail)
compteur += 1
email_id = input("\nQuel courriel souhaitez-vous visionner? \n")
s.send(email_id.encode())
email_content = s.recv(1024).decode()
print("\n" + email_content)
input("\nAppuyez sur Enter pour continuer...")
continue
elif option == "3":
s.send(id.encode())
filesize = s.recv(1024).decode()
data_string = s.recv(1024)
mails = pickle.loads(data_string)
print("\nNombre de messages: " + str(len(mails)) + "\n")
print("\nTaille du repertoire personnel (en octets): " + filesize + "\n")
print("\nListe de vos courriels: \n")
compteur = 1;
for mail in mails:
print("\n" + str(compteur) + ". " + mail)
compteur += 1
input("\nAppuyez sur Enter pour continuer...")
continue
elif option == "4":
break;
s.close()
exit()
|
Python
| 152
| 36.157894
| 138
|
/client.py
| 0.542766
| 0.52311
|
junprog/contrastive-baseline
|
refs/heads/main
|
from utils.contrastive_trainer import CoTrainer
from utils.simsiam_trainer import SimSiamTrainer
import argparse
import os
import math
import torch
args = None
def parse_args():
parser = argparse.ArgumentParser(description='Train ')
parser.add_argument('--data-dir', default='/mnt/hdd02/process-ucf',
help='training data directory')
parser.add_argument('--save-dir', default='D:/exp_results',
help='directory to save models.')
parser.add_argument('--cifar10', action='store_true',
help='use cifar10 dataset')
parser.add_argument('--SimSiam', action='store_true',
help='try Simple Siamese Net')
parser.add_argument('--arch', type=str, default='vgg19',
help='the model architecture [vgg19, vgg19_bn, resnet18]')
parser.add_argument('--pattern-feature', type=str, default='conv-512x1x1',
help='the feature to contrast [conv-512x1x1, fc-4096]')
parser.add_argument('--projection', action='store_true',
help='use MLP projection')
parser.add_argument('--prediction', action='store_true',
help='use MLP prediction')
parser.add_argument('--mlp-bn', action='store_true',
help='use MLP Batch Normalization')
parser.add_argument('--lr', type=float, default=1e-2,
help='the initial learning rate')
parser.add_argument('--weight-decay', type=float, default=1e-4,
help='the weight decay')
parser.add_argument('--momentum', type=float, default=0.9,
help='the momentum')
parser.add_argument('--div-row', type=int, default=3,
help='one side`s number of pathes')
parser.add_argument('--div-col', type=int, default=3,
help='one side`s number of pathes')
parser.add_argument('--aug', action='store_true',
help='the weight decay')
parser.add_argument('--margin', type=float, default=1.0,
help='the margin of loss function')
parser.add_argument('--resume', default='',
help='the path of resume training model')
parser.add_argument('--max-model-num', type=int, default=30,
help='max models num to save ')
parser.add_argument('--check_point', type=int, default=100,
help='milestone of save model checkpoint')
parser.add_argument('--max-epoch', type=int, default=300,
help='max training epoch')
parser.add_argument('--val-epoch', type=int, default=10,
help='the num of steps to log training information')
parser.add_argument('--val-start', type=int, default=0,
help='the epoch start to val')
parser.add_argument('--batch-size', type=int, default=8,
help='train batch size')
parser.add_argument('--device', default='0', help='assign device')
parser.add_argument('--num-workers', type=int, default=8,
help='the num of training process')
parser.add_argument('--crop-size', type=int, default=224,
help='the crop size of the train image')
parser.add_argument('--visual-num', type=int, default=4,
help='the number of visualize images')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
torch.backends.cudnn.benchmark = True
os.environ['CUDA_VISIBLE_DEVICES'] = args.device.strip('-') # set vis gpu
if args.SimSiam:
trainer = SimSiamTrainer(args)
else:
trainer = CoTrainer(args)
trainer.setup()
trainer.train()
|
Python
| 86
| 44.313953
| 85
|
/train.py
| 0.56736
| 0.552989
|
junprog/contrastive-baseline
|
refs/heads/main
|
from typing import Callable, Optional
import random
from PIL import Image
import numpy as np
import torch
import torchvision
from torchvision import transforms
from torchvision.datasets import CIFAR10
np.random.seed(765)
random.seed(765)
class SupervisedPosNegCifar10(torch.utils.data.Dataset):
def __init__(self, dataset, phase):
# split by some thresholds here 80% anchors, 20% for posnegs
lengths = [int(len(dataset)*0.8), int(len(dataset)*0.2)]
self.anchors, self.posnegs = torch.utils.data.random_split(dataset, lengths)
if phase == 'train':
self.anchor_transform = transforms.Compose([transforms.Resize(64),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.posneg_transform = transforms.Compose([transforms.Resize(64),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
else:
self.anchor_transform = transforms.Compose([transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.posneg_transform = transforms.Compose([transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def __len__(self):
return len(self.anchors)
def __getitem__(self, index):
anchor, label = self.anchors[index]
if self.anchor_transform is not None:
anchor = self.anchor_transform(anchor)
# now pair this up with an image from the same class in the second stream
if random.random() > 0.5:
A = np.where(np.array(self.posnegs.dataset.targets) == label)[0]
posneg_idx = np.random.choice(A[np.in1d(A, self.posnegs.indices)])
posneg, label = self.posnegs[np.where(self.posnegs.indices==posneg_idx)[0][0]]
target = torch.tensor([1]).long()
else:
A = np.where(np.array(self.posnegs.dataset.targets) != label)[0]
posneg_idx = np.random.choice(A[np.in1d(A, self.posnegs.indices)])
posneg, label = self.posnegs[np.where(self.posnegs.indices==posneg_idx)[0][0]]
target = torch.tensor([0]).long()
if self.posneg_transform is not None:
posneg = self.posneg_transform(posneg)
return anchor, posneg, target, label
class PosNegCifar10(torch.utils.data.Dataset):
def __init__(self, dataset, phase):
# split by some thresholds here 80% anchors, 20% for posnegs
self.dataset = dataset
if phase == 'train':
self.anchor_transform = transforms.Compose([transforms.Resize(64),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.posneg_transform = transforms.Compose([transforms.Resize(64),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
else:
self.anchor_transform = transforms.Compose([transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.posneg_transform = transforms.Compose([transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
anchor, label = self.dataset[index]
# now pair this up with an image from the same class in the second stream
if random.random() > 0.5:
posneg = anchor
target = torch.tensor([1]).long()
else:
while True:
neg_idx = random.randint(0, len(self.dataset)-1)
if neg_idx != index:
break
posneg, label = self.dataset[neg_idx]
target = torch.tensor([0]).long()
if self.anchor_transform is not None:
anchor = self.anchor_transform(anchor)
if self.posneg_transform is not None:
posneg = self.posneg_transform(posneg)
return anchor, posneg, target, label
### Simple Siamese code
imagenet_mean_std = [[0.485, 0.456, 0.406],[0.229, 0.224, 0.225]]
class SimSiamTransform():
def __init__(self, image_size, train, mean_std=imagenet_mean_std):
self.train = train
if self.train:
image_size = 224 if image_size is None else image_size # by default simsiam use image size 224
p_blur = 0.5 if image_size > 32 else 0 # exclude cifar
# the paper didn't specify this, feel free to change this value
# I use the setting from simclr which is 50% chance applying the gaussian blur
# the 32 is prepared for cifar training where they disabled gaussian blur
self.transform = transforms.Compose([
transforms.RandomResizedCrop(image_size, scale=(0.2, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([transforms.ColorJitter(0.4,0.4,0.4,0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([transforms.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=p_blur),
transforms.ToTensor(),
transforms.Normalize(*mean_std)
])
else:
self.transform = transforms.Compose([
transforms.Resize(int(image_size*(8/7)), interpolation=Image.BICUBIC), # 224 -> 256
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(*mean_std)
])
def __call__(self, x):
x1 = self.transform(x)
x2 = self.transform(x)
return x1, x2
def get_simsiam_dataset(args, phase, download=True, debug_subset_size=None):
if phase == 'train':
train = True
transform = SimSiamTransform(args.crop_size, train)
elif phase == 'val':
train = False
transform = SimSiamTransform(args.crop_size, train)
elif phase == 'linear_train':
train = True
transform = transforms.Compose([
transforms.RandomResizedCrop(args.crop_size, scale=(0.08, 1.0), ratio=(3.0/4.0,4.0/3.0), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(*imagenet_mean_std)
])
elif phase == 'linear_val':
train = False
transform = transforms.Compose([
transforms.Resize(int(args.crop_size*(8/7)), interpolation=Image.BICUBIC), # 224 -> 256
transforms.CenterCrop(args.crop_size),
transforms.ToTensor(),
transforms.Normalize(*imagenet_mean_std)
])
dataset = torchvision.datasets.CIFAR10(root="CIFAR10_Dataset", train=train, transform=transform, download=download)
if debug_subset_size is not None:
dataset = torch.utils.data.Subset(dataset, range(0, debug_subset_size)) # take only one batch
dataset.classes = dataset.dataset.classes
dataset.targets = dataset.dataset.targets
return dataset
|
Python
| 183
| 45.284153
| 134
|
/datasets/cifar10.py
| 0.572086
| 0.524029
|
junprog/contrastive-baseline
|
refs/heads/main
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class L2ContrastiveLoss(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise
Args :
output1 & output2 : [N, dim]
target : [N]
"""
def __init__(self, margin=1.0):
super().__init__()
self.margin = margin
self.eps = 1e-9
def forward(self, output1, output2, target, size_average=True):
target = target.squeeze()
distances = (output2 - output1).pow(2).sum(1) # squared distances
losses = 0.5 * (target.float() * distances +
(1 + -1 * target).float() * F.relu(self.margin - (distances + self.eps).sqrt()).pow(2))
return losses.mean() if size_average else losses.sum()
|
Python
| 24
| 34.583332
| 119
|
/models/l2_contrastive_loss.py
| 0.595545
| 0.572098
|
junprog/contrastive-baseline
|
refs/heads/main
|
# in : original image
# out : cropped img1 (anchor)
# cropped img2 (compete)
# target (positive img1 - img2 : 1, negative img1 - img2 : 0)
import os
from glob import glob
import random
import numpy as np
from PIL import Image
from PIL import ImageFilter
import torch
import torch.utils.data as data
import torchvision.transforms.functional as F
from torchvision import transforms
random.seed(765)
def divide_patches(img, row, col):
patche_size_w = int(img.size[0] / col)
patche_size_h = int(img.size[1] / row)
patches = []
for cnt_i, i in enumerate(range(0, img.size[1], patche_size_h)):
if cnt_i == row:
break
for cnt_j, j in enumerate(range(0, img.size[0], patche_size_w)):
if cnt_j == col:
break
box = (j, i, j+patche_size_w, i+patche_size_h)
patches.append(img.crop(box))
return patches
def create_pos_pair(patches):
idx = random.randint(0, len(patches)-1)
img1 = patches[idx]
img2 = patches[idx]
target = np.array([1])
return img1, img2, target
def create_neg_pair(patches):
idx = random.sample(range(0, len(patches)-1), k=2)
img1 = patches[idx[0]]
img2 = patches[idx[1]]
target = np.array([0])
return img1, img2, target
def random_crop(im_h, im_w, crop_h, crop_w):
res_h = im_h - crop_h
res_w = im_w - crop_w
i = random.randint(0, res_h)
j = random.randint(0, res_w)
return i, j, crop_h, crop_w
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
class PosNegSpatialDataset(data.Dataset):
# divide_num : 3 -> 3x3= 9 paches
def __init__(self, data_path, crop_size, divide_num=(3,3), aug=True):
self.data_path = data_path
self.im_list = sorted(glob(os.path.join(self.data_path, '*.jpg')))
self.c_size = crop_size
self.d_row = divide_num[0]
self.d_col = divide_num[1]
if aug:
self.aug = transforms.Compose([
transforms.CenterCrop(self.c_size),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip()
])
else:
self.aug = transforms.CenterCrop(self.c_size)
self.trans = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def __len__(self):
return len(self.im_list)
def __getitem__(self, index):
img_path = self.im_list[index]
img = Image.open(img_path).convert('RGB')
patches = divide_patches(img, self.d_row, self.d_col)
if random.random() > 0.5:
img1, img2, target = create_pos_pair(patches)
else:
img1, img2, target = create_neg_pair(patches)
img1 = self.aug(img1)
img2 = self.aug(img2)
target = torch.from_numpy(target).long()
img1 = self.trans(img1)
img2 = self.trans(img2)
return img1, img2, target, None
class SpatialDataset(data.Dataset):
# divide_num : 3 -> 3x3= 9 paches
def __init__(self, phase, data_path, crop_size, divide_num=(3,3), aug=True):
with open(os.path.join(data_path, '{}.txt'.format(phase)), 'r') as f:
im_list = f.readlines()
self.im_list = [im_name.replace('\n', '') for im_name in im_list]
self.c_size = crop_size
self.d_row = divide_num[0]
self.d_col = divide_num[1]
self.trans = transforms.Compose([
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def __len__(self):
return len(self.im_list)
def __getitem__(self, index):
img_path = self.im_list[index]
img = Image.open(img_path).convert('RGB')
patches = divide_patches(img, self.d_row, self.d_col)
img1, img2, label = create_pos_pair(patches)
assert img1.size == img2.size
wd, ht = img1.size
i, j, h, w = random_crop(ht, wd, self.c_size, self.c_size)
img1 = F.crop(img1, i, j, h, w)
img2 = F.crop(img2, i, j, h, w)
img1 = self.trans(img1)
img2 = self.trans(img2)
imgs = (img1, img2)
return imgs, label
|
Python
| 162
| 29.660494
| 92
|
/datasets/spatial.py
| 0.573701
| 0.53826
|
junprog/contrastive-baseline
|
refs/heads/main
|
import torch
import torchvision
from PIL import Image
from matplotlib import pyplot as plt
import random
model = torchvision.models.__dict__['vgg19']()
print(model)
img = torch.rand(1,3,256,256)
out = model.features(img)
print(out.size())
import torchvision.transforms as trans
crop = trans.RandomCrop(224)
img = torch.rand(1,3,256,256)
out = crop(img)
print(out.size())
def divide_patches(img, row, col):
patche_size_w = int(img.size[0] / col)
patche_size_h = int(img.size[1] / row)
patches = []
for cnt_i, i in enumerate(range(0, img.size[1], patche_size_h)):
if cnt_i == row:
break
for cnt_j, j in enumerate(range(0, img.size[0], patche_size_w)):
if cnt_j == col:
break
box = (j, i, j+patche_size_w, i+patche_size_h)
patches.append(img.crop(box))
return patches
def display_images(
images: [Image],
row=3, col=3, width=10, height=4, max_images=15,
label_wrap_length=50, label_font_size=8):
if not images:
print("No images to display.")
return
if len(images) > max_images:
print(f"Showing {max_images} images of {len(images)}:")
images=images[0:max_images]
height = max(height, int(len(images)/col) * height)
plt.figure(figsize=(width, height))
for i, image in enumerate(images):
plt.subplot(row, col, i + 1)
plt.imshow(image)
plt.show()
image = Image.open("/mnt/hdd02/shibuya_scramble/image_000294.jpg").convert("RGB")
p = divide_patches(image, 2, 3)
print(len(p))
display_images(p, row=2, col=3)
def create_pos_pair(patches):
idx = random.randint(0, len(patches)-1)
img1 = patches[idx]
img2 = patches[idx]
label = 1
return img1, img2, label
def create_neg_pair(patches):
idx = random.sample(range(0, len(patches)-1), k=2)
img1 = patches[idx[0]]
img2 = patches[idx[1]]
label = 0
return img1, img2, label
def get_img(img):
patches = divide_patches(img, 3, 2)
if random.random() > 0.5:
img1, img2, label = create_pos_pair(patches)
else:
img1, img2, label = create_neg_pair(patches)
return img1, img2, label
res = []
for i in range(10):
img1, img2, label = get_img(image)
flag = False
if img1 == img2:
flag = True
res.append([flag, label])
print(res)
|
Python
| 99
| 22.828283
| 81
|
/exp.py
| 0.612807
| 0.577184
|
junprog/contrastive-baseline
|
refs/heads/main
|
import os
from collections import OrderedDict
import torch
import torch.nn as nn
import torchvision.models as models
class LinearEvalModel(nn.Module):
def __init__(self, arch='vgg19', dim=512, num_classes=10):
super().__init__()
if arch == 'vgg19':
self.features = models.vgg19().features
if arch == 'vgg19_bn':
self.features = models.vgg19_bn().features
elif arch == 'resnet18':
resnet18 = models.resnet18(pretrained=False)
self.features = nn.Sequential(*list(resnet18.children())[:-1])
self.avg_pool = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(dim, num_classes)
def weight_init(self, weight_path, device, arch):
state_dict = torch.load(os.path.join(weight_path, 'best_model.pth'), device)
new_state_dict = OrderedDict()
if 'resnet' in arch:
for k, v in state_dict.items():
if 'encoder' in k:
k = k.replace('encoder.', '')
new_state_dict[k] = v
self.features.load_state_dict(new_state_dict)
elif 'vgg' in arch:
for k, v in state_dict.items():
if 'encoder' in k:
k = k.replace('encoder.0.', '')
new_state_dict[k] = v
self.features.load_state_dict(new_state_dict)
for m in self.features.parameters():
m.requires_grad = False
def forward(self, x):
x = self.features(x)
x = self.avg_pool(x)
x = x.squeeze()
out = self.fc(x)
return out
|
Python
| 52
| 29.961538
| 84
|
/models/create_linear_eval_model.py
| 0.545963
| 0.528571
|
junprog/contrastive-baseline
|
refs/heads/main
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def D(p, z, version='simplified'): # negative cosine similarity
if version == 'original':
z = z.detach() # stop gradient
p = F.normalize(p, dim=1) # l2-normalize
z = F.normalize(z, dim=1) # l2-normalize
return -(p*z).sum(dim=1).mean()
elif version == 'simplified':
return - F.cosine_similarity(p, z.detach(), dim=-1).mean()
else:
raise Exception
class CosineContrastiveLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, z1, z2, p1, p2):
if z1.dim() != 2:
z1 = z1.squeeze()
if z2.dim() != 2:
z2 = z2.squeeze()
if p1 is not None or p2 is not None:
loss = D(p1, z2) / 2 + D(p2, z1) / 2
else:
loss = D(z1, z2)
return loss
|
Python
| 33
| 25.848484
| 66
|
/models/cosine_contrastive_loss.py
| 0.528217
| 0.496614
|
junprog/contrastive-baseline
|
refs/heads/main
|
import os
import numpy as np
import torch
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
class Save_Handle(object):
"""handle the number of """
def __init__(self, max_num):
self.save_list = []
self.max_num = max_num
def append(self, save_path):
if len(self.save_list) < self.max_num:
self.save_list.append(save_path)
else:
remove_path = self.save_list[0]
del self.save_list[0]
self.save_list.append(save_path)
if os.path.exists(remove_path):
os.remove(remove_path)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = 1.0 * self.sum / self.count
def get_avg(self):
return self.avg
def get_count(self):
return self.count
## cannot use in training
@torch.no_grad()
def accuracy(meter, output1, output2, target):
"""Computes the accuracy overthe predictions"""
for logit in [output1, output2]:
corrects = (torch.max(logit, 1)[1].data == target.squeeze().long().data).sum()
accu = float(corrects) / float(target.size()[0])
meter.update(accu)
return meter
|
Python
| 58
| 25.862068
| 88
|
/utils/helper.py
| 0.552632
| 0.541078
|
junprog/contrastive-baseline
|
refs/heads/main
|
import os
import numpy as np
from PIL import Image
import torch
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
### torch テンソル(バッチ)を受け取って、args.div_numに応じて、描画する
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
def invnorm(img, N):
img = img[N,:,:,:].to('cpu').detach().numpy().copy()
img = img.transpose(1,2,0)
img = img*std+mean
return img
class ImageDisplayer:
def __init__(self, args, save_fir):
# N is number of batch to display
self.args = args
self.save_dir = save_fir
self.N = args.visual_num
@torch.no_grad()
def __call__(self, epoch, prefix, img1, img2, target):
imgs1 = []
imgs2 = []
targets = []
for n in range(self.N):
imgs1.append(invnorm(img1,n))
imgs2.append(invnorm(img2,n))
if target is not None:
targets.append(target[n].item())
else:
targets = None
self.display_images(epoch, prefix, imgs1, imgs2, targets)
def display_images(self, epoch, prefix, images1: [Image], images2: [Image], targets,
columns=2, width=8, height=8, label_wrap_length=50, label_font_size=8):
if not (images1 and images2):
print("No images to display.")
return
height = max(height, int(len(images1)/columns) * height)
plt.figure(figsize=(width, height))
i = 1
if targets is not None:
for (im1, im2, tar) in zip(images1, images2, targets):
im1 = Image.fromarray(np.uint8(im1*255))
im2 = Image.fromarray(np.uint8(im2*255))
plt.subplot(self.N, 2, i)
plt.title(tar, fontsize=20)
plt.imshow(im1)
i += 1
plt.subplot(self.N, 2, i)
plt.title(tar, fontsize=20)
plt.imshow(im2)
i += 1
else:
for (im1, im2) in zip(images1, images2):
im1 = Image.fromarray(np.uint8(im1*255))
im2 = Image.fromarray(np.uint8(im2*255))
plt.subplot(self.N, 2, i)
plt.imshow(im1)
i += 1
plt.subplot(self.N, 2, i)
plt.imshow(im2)
i += 1
plt.tight_layout()
output_img_name = 'imgs_{}_{}.png'.format(prefix, epoch)
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
class EmbeddingDisplayer:
def __init__(self, args, save_fir):
self.args = args
self.save_dir = save_fir
self.cifar10_classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
self.colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
@torch.no_grad()
def __call__(self, embeddings, targets, epoch, prefix, xlim=None, ylim=None):
embeddings = embeddings.to('cpu').detach().numpy().copy()
targets = targets.to('cpu').detach().numpy().copy()
plt.figure(figsize=(10,10))
for i in range(10):
inds = np.where(targets==i)[0]
plt.scatter(embeddings[inds,0], embeddings[inds,1], alpha=0.5, color=self.colors[i])
if xlim:
plt.xlim(xlim[0], xlim[1])
if ylim:
plt.ylim(ylim[0], ylim[1])
plt.legend(self.cifar10_classes)
output_img_name = 'emb_{}_{}.png'.format(prefix, epoch)
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
class LossGraphPloter:
def __init__(self, save_fir):
self.save_dir = save_fir
self.epochs = []
self.losses = []
def __call__(self, epoch, loss, prefix):
self.epochs.append(epoch)
self.losses.append(loss)
output_img_name = '{}_loss.svg'.format(prefix)
plt.plot(self.epochs, self.losses)
plt.title('Loss')
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
class AccLossGraphPloter:
def __init__(self, save_fir):
self.save_dir = save_fir
self.tr_accs = []
self.vl_accs = []
self.tr_losses = []
self.vl_losses = []
self.epochs = []
def __call__(self, epoch, tr_acc, vl_acc, tr_loss, vl_loss, prefix):
self.tr_accs.append(tr_acc)
self.vl_accs.append(vl_acc)
self.tr_losses.append(tr_loss)
self.vl_losses.append(vl_loss)
self.epochs.append(epoch)
output_img_name = '{}_eval.svg'.format(prefix)
fig, (axL, axR) = plt.subplots(ncols=2, figsize=(10,4))
axL.plot(self.epochs, self.tr_accs, label='train')
axL.plot(self.epochs, self.vl_accs, label='val')
axL.set_title('Top-1 Accuracy')
axL.set_xlabel('epoch')
axL.set_ylabel('acc [%]')
axL.legend(loc="lower right")
axR.plot(self.epochs, self.tr_losses, label='train')
axR.plot(self.epochs, self.vl_losses, label='val')
axR.set_title('Loss')
axR.set_xlabel('epoch')
axR.set_ylabel('loss')
axR.legend(loc="upper right")
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
|
Python
| 162
| 32.092594
| 96
|
/utils/visualizer.py
| 0.539552
| 0.508955
|
junprog/contrastive-baseline
|
refs/heads/main
|
import os
from glob import glob
import numpy as np
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='Test ')
parser.add_argument('--data-dir', default='/mnt/hdd02/shibuya_scramble',
help='original data directory')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
## Random Train-Val split
im_list = sorted(glob(os.path.join(args.data_dir, '*.jpg')))
im_list = [im_name for im_name in im_list]
tr_im_list = list(np.random.choice(im_list, size=int(len(im_list)*0.8), replace=False))
vl_im_list = list(set(im_list) - set(tr_im_list))
for phase in ['train', 'val']:
with open(os.path.join(args.data_dir, './{}.txt'.format(phase)), mode='w') as f:
if phase == 'train':
f.write('\n'.join(tr_im_list))
elif phase == 'val':
f.write('\n'.join(vl_im_list))
|
Python
| 30
| 30.766666
| 91
|
/train_val_split.py
| 0.581322
| 0.577125
|
junprog/contrastive-baseline
|
refs/heads/main
|
import os
import sys
import time
import logging
import numpy as np
import torch
from torch import optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
import torchvision.models as models
import torchvision.datasets as datasets
from models.simple_siamese_net import SiameseNetwork
from models.cosine_contrastive_loss import CosineContrastiveLoss
from utils.trainer import Trainer
from utils.helper import Save_Handle, AverageMeter, worker_init_fn
from utils.visualizer import ImageDisplayer, LossGraphPloter
from datasets.spatial import SpatialDataset
from datasets.cifar10 import PosNegCifar10, get_simsiam_dataset
class SimSiamTrainer(Trainer):
def setup(self):
"""initialize the datasets, model, loss and optimizer"""
args = self.args
self.vis = ImageDisplayer(args, self.save_dir)
self.tr_graph = LossGraphPloter(self.save_dir)
self.vl_graph = LossGraphPloter(self.save_dir)
if torch.cuda.is_available():
self.device = torch.device("cuda")
self.device_count = torch.cuda.device_count()
logging.info('using {} gpus'.format(self.device_count))
else:
raise Exception("gpu is not available")
if args.cifar10:
self.datasets = {x: get_simsiam_dataset(args, x) for x in ['train', 'val']}
else:
self.datasets = {x: SpatialDataset(x,
args.data_dir,
args.crop_size,
(args.div_row, args.div_col),
args.aug) for x in ['train', 'val']}
self.dataloaders = {x: DataLoader(self.datasets[x],
batch_size=args.batch_size,
shuffle=(True if x == 'train' else False),
num_workers=args.num_workers*self.device_count,
pin_memory=(True if x == 'train' else False),
worker_init_fn=worker_init_fn) for x in ['train', 'val']}
# Define model, loss, optim
self.model = SiameseNetwork(args)
self.model.to(self.device)
self.criterion = CosineContrastiveLoss()
self.criterion.to(self.device)
self.optimizer = optim.SGD(self.model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
#self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=[80, 120, 160, 200, 250], gamma=0.1)
self.scheduler = lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=args.max_epoch)
self.start_epoch = 0
self.best_loss = np.inf
if args.resume:
suf = args.resume.rsplit('.', 1)[-1]
if suf == 'tar':
checkpoint = torch.load(args.resume, self.device)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.start_epoch = checkpoint['epoch'] + 1
elif suf == 'pth':
self.model.load_state_dict(torch.load(args.resume, self.device))
self.save_list = Save_Handle(max_num=args.max_model_num)
def train(self):
"""training process"""
args = self.args
for epoch in range(self.start_epoch, args.max_epoch):
logging.info('-'*5 + 'Epoch {}/{}'.format(epoch, args.max_epoch - 1) + '-'*5)
self.epoch = epoch
self.train_epoch(epoch)
if epoch % args.val_epoch == 0 and epoch >= args.val_start:
self.val_epoch(epoch)
def train_epoch(self, epoch):
epoch_loss = AverageMeter()
epoch_start = time.time()
self.model.train() # Set model to training mode
for step, ((input1, input2), label) in enumerate(self.dataloaders['train']):
input1 = input1.to(self.device)
input2 = input2.to(self.device)
with torch.set_grad_enabled(True):
(z1, z2), (p1, p2) = self.model(input1, input2)
loss = self.criterion(z1, z2, p1, p2)
epoch_loss.update(loss.item(), input1.size(0))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# visualize
if step == 0:
self.vis(epoch, 'train', input1, input2, label)
pass
logging.info('Epoch {} Train, Loss: {:.5f}, lr: {:.5f}, Cost {:.1f} sec'
.format(self.epoch, epoch_loss.get_avg(), self.optimizer.param_groups[0]['lr'], time.time()-epoch_start))
self.tr_graph(self.epoch, epoch_loss.get_avg(), 'tr')
if epoch % self.args.check_point == 0:
model_state_dic = self.model.state_dict()
save_path = os.path.join(self.save_dir, '{}_ckpt.tar'.format(self.epoch))
torch.save({
'epoch': self.epoch,
'optimizer_state_dict': self.optimizer.state_dict(),
'model_state_dict': model_state_dic
}, save_path)
self.save_list.append(save_path) # control the number of saved models
def val_epoch(self, epoch):
epoch_start = time.time()
self.model.eval() # Set model to evaluate mode
epoch_loss = AverageMeter()
for step, ((input1, input2), label) in enumerate(self.dataloaders['val']):
input1 = input1.to(self.device)
input2 = input2.to(self.device)
with torch.set_grad_enabled(False):
(z1, z2), (p1, p2) = self.model(input1, input2)
loss = self.criterion(z1, z2, p1, p2)
epoch_loss.update(loss.item(), input1.size(0))
# visualize
if step == 0:
self.vis(epoch, 'val', input1, input2, label)
pass
logging.info('Epoch {} Val, Loss: {:.5f}, Cost {:.1f} sec'
.format(self.epoch, epoch_loss.get_avg(), time.time()-epoch_start))
self.vl_graph(self.epoch, epoch_loss.get_avg(), 'vl')
model_state_dic = self.model.state_dict()
if self.best_loss > epoch_loss.get_avg():
self.best_loss = epoch_loss.get_avg()
logging.info("save min loss {:.2f} model epoch {}".format(self.best_loss, self.epoch))
torch.save(model_state_dic, os.path.join(self.save_dir, 'best_model.pth'))
|
Python
| 157
| 41.21656
| 127
|
/utils/simsiam_trainer.py
| 0.562698
| 0.550626
|
junprog/contrastive-baseline
|
refs/heads/main
|
import torch
import torch.nn as nn
class projection_MLP(nn.Module):
def __init__(self, in_dim=512, hidden_dim=512, out_dim=512): # bottleneck structure
super().__init__()
self.layers = nn.Sequential(
nn.Linear(in_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, out_dim)
)
def forward(self, x):
if x.dim() != 2:
x = x.squeeze()
x = self.layers(x)
return x
class prediction_MLP(nn.Module):
def __init__(self, in_dim=512, hidden_dim=256, out_dim=512): # bottleneck structure
super().__init__()
self.layer1 = nn.Sequential(
nn.Linear(in_dim, hidden_dim),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Linear(hidden_dim, out_dim)
def forward(self, x):
if x.dim() != 2:
x = x.squeeze()
x = self.layer1(x)
x = self.layer2(x)
return x
class SiameseNetwork(nn.Module):
def __init__(self, model, pattern_feature = 'conv-512x1x1', projection=False, prediction=False):
super(SiameseNetwork, self).__init__()
self.projection = projection
self.prediction = prediction
if pattern_feature == 'conv-512x1x1':
features = model().features
max_pool = nn.AdaptiveAvgPool2d((1,1))
self.encoder = nn.Sequential(features, max_pool)
if projection:
self.projector = projection_MLP(in_dim=512, hidden_dim=512, out_dim=512)
if prediction:
self.predictor = prediction_MLP(in_dim=512, out_dim=512)
elif pattern_feature == 'fc-4096':
features = model()
self.encoder = nn.Sequential(*[self.encoder.classifier[0]])
if projection:
self.projector = projection_MLP(in_dim=4096, hidden_dim=4096, out_dim=4096)
if prediction:
self.predictor = prediction_MLP(in_dim=4096, out_dim=4096)
def forward(self, input1, input2):
if self.prediction:
f, h = self.encoder, self.predictor
z1, z2 = f(input1), f(input2)
if self.projection:
z1, z2 = self.projection(input1), self.projection(input2)
p1, p2 = h(z1), h(z2)
else:
f = self.encoder
z1, z2 = f(input1), f(input2)
if self.projection:
z1, z2 = self.projection(input1), self.projection(input2)
p1, p2 = None, None
return (z1, z2), (p1, p2)
|
Python
| 85
| 30.047058
| 100
|
/models/simple_siamese_net_tmp.py
| 0.538258
| 0.498485
|
junprog/contrastive-baseline
|
refs/heads/main
|
import os
import sys
import time
import logging
import numpy as np
import torch
from torch import optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
import torchvision.models as models
import torchvision.datasets as datasets
from models.siamese_net import SiameseNetwork
from models.l2_contrastive_loss import L2ContrastiveLoss
from utils.trainer import Trainer
from utils.helper import Save_Handle, AverageMeter, worker_init_fn
from utils.visualizer import ImageDisplayer, EmbeddingDisplayer
from datasets.spatial import SpatialDataset
from datasets.cifar10 import PosNegCifar10
class CoTrainer(Trainer):
def setup(self):
"""initialize the datasets, model, loss and optimizer"""
args = self.args
self.vis = ImageDisplayer(args, self.save_dir)
self.emb = EmbeddingDisplayer(args, self.save_dir)
if torch.cuda.is_available():
self.device = torch.device("cuda")
self.device_count = torch.cuda.device_count()
logging.info('using {} gpus'.format(self.device_count))
else:
raise Exception("gpu is not available")
if args.cifar10:
# Download and create datasets
or_train = datasets.CIFAR10(root="CIFAR10_Dataset", train=True, transform=None, download=True)
or_val = datasets.CIFAR10(root="CIFAR10_Dataset", train=False, transform=None, download=True)
# splits CIFAR10 into two streams
self.datasets = {x: PosNegCifar10((or_train if x == 'train' else or_val),
phase=x) for x in ['train', 'val']}
else:
self.datasets = {x: SpatialDataset(os.path.join(args.data_dir, x),
args.crop_size,
args.div_num,
args.aug) for x in ['train', 'val']}
self.dataloaders = {x: DataLoader(self.datasets[x],
batch_size=args.batch_size,
shuffle=(True if x == 'train' else False),
num_workers=args.num_workers*self.device_count,
pin_memory=(True if x == 'train' else False),
worker_init_fn=worker_init_fn) for x in ['train', 'val']}
# Define model, loss, optim
self.model = SiameseNetwork(models.__dict__[args.arch], pattern_feature = args.pattern_feature)
self.model.to(self.device)
self.criterion = L2ContrastiveLoss(args.margin)
self.criterion.to(self.device)
self.optimizer = optim.SGD(self.model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=[80, 120, 160, 200, 250], gamma=0.1)
self.start_epoch = 0
self.best_loss = np.inf
if args.resume:
suf = args.resume.rsplit('.', 1)[-1]
if suf == 'tar':
checkpoint = torch.load(args.resume, self.device)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.start_epoch = checkpoint['epoch'] + 1
elif suf == 'pth':
self.model.load_state_dict(torch.load(args.resume, self.device))
self.save_list = Save_Handle(max_num=args.max_model_num)
def train(self):
"""training process"""
args = self.args
for epoch in range(self.start_epoch, args.max_epoch):
logging.info('-'*5 + 'Epoch {}/{}'.format(epoch, args.max_epoch - 1) + '-'*5)
self.epoch = epoch
self.train_epoch(epoch)
if epoch % args.val_epoch == 0 and epoch >= args.val_start:
self.val_epoch(epoch)
def train_epoch(self, epoch):
epoch_loss = AverageMeter()
epoch_start = time.time()
self.model.train() # Set model to training mode
for step, (input1, input2, target, label) in enumerate(self.dataloaders['train']):
input1 = input1.to(self.device)
input2 = input2.to(self.device)
target = target.to(self.device)
with torch.set_grad_enabled(True):
output1, output2 = self.model(input1, input2)
loss = self.criterion(output1, output2, target)
epoch_loss.update(loss.item(), input1.size(0))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# visualize
if step == 0:
self.vis(epoch, 'train', input1, input2, target)
self.emb(output1, label, epoch, 'train')
logging.info('Epoch {} Train, Loss: {:.5f}, Cost {:.1f} sec'
.format(self.epoch, epoch_loss.get_avg(), time.time()-epoch_start))
model_state_dic = self.model.state_dict()
save_path = os.path.join(self.save_dir, '{}_ckpt.tar'.format(self.epoch))
torch.save({
'epoch': self.epoch,
'optimizer_state_dict': self.optimizer.state_dict(),
'model_state_dict': model_state_dic
}, save_path)
self.save_list.append(save_path) # control the number of saved models
def val_epoch(self, epoch):
epoch_start = time.time()
self.model.eval() # Set model to evaluate mode
epoch_loss = AverageMeter()
for step, (input1, input2, target, label) in enumerate(self.dataloaders['val']):
input1 = input1.to(self.device)
input2 = input2.to(self.device)
target = target.to(self.device)
with torch.set_grad_enabled(False):
output1, output2 = self.model(input1, input2)
loss = self.criterion(output1, output2, target)
epoch_loss.update(loss.item(), input1.size(0))
# visualize
if step == 0:
self.vis(epoch, 'val', input1, input2, target)
self.emb(output1, label, epoch, 'val')
logging.info('Epoch {} Val, Loss: {:.5f}, Cost {:.1f} sec'
.format(self.epoch, epoch_loss.get_avg(), time.time()-epoch_start))
model_state_dic = self.model.state_dict()
if self.best_loss > epoch_loss.get_avg():
self.best_loss = epoch_loss.get_avg()
logging.info("save min loss {:.2f} model epoch {}".format(self.best_loss, self.epoch))
torch.save(model_state_dic, os.path.join(self.save_dir, 'best_model.pth'))
|
Python
| 155
| 42.625805
| 127
|
/utils/contrastive_trainer.py
| 0.573732
| 0.561012
|
junprog/contrastive-baseline
|
refs/heads/main
|
import os
import argparse
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
import torchvision.models as models
from datasets.cifar10 import get_simsiam_dataset
from models.create_linear_eval_model import LinearEvalModel
from utils.visualizer import AccLossGraphPloter
from utils.logger import setlogger
args = None
def parse_args():
parser = argparse.ArgumentParser(description='Test ')
parser.add_argument('--save-dir', default='/mnt/hdd02/contrastive-learn/0113-193048',
help='model directory')
parser.add_argument('--device', default='0', help='assign device')
parser.add_argument('--arch', default='vgg19', help='model architecture')
parser.add_argument('--max-epoch', default=100, type=int, help='train epoch')
parser.add_argument('--crop-size', default=224, type=int, help='input size')
parser.add_argument('--batch-size', default=512, type=int, help='input size')
parser.add_argument('--lr', default=1e-1, type=float, help='learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.device.strip() # set vis gpu
plotter = AccLossGraphPloter(args.save_dir)
setlogger(os.path.join(args.save_dir, 'eval.log')) # set logger
datasets = {x: get_simsiam_dataset(args, x) for x in ['linear_train', 'linear_val']}
dataloaders = {x: DataLoader(datasets[x],
batch_size=(args.batch_size),
shuffle=(True if x == 'linear_train' else False),
num_workers=8,
pin_memory=(True if x == 'linear_train' else False)) for x in ['linear_train', 'linear_val']}
device = torch.device('cuda')
model = LinearEvalModel(arch=args.arch)
model.weight_init(args.save_dir, device, args.arch) ## initialize & freeze
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[40, 60, 80], gamma=0.1)
## Training & Test Roop
model.to(device)
for epoch in range(args.max_epoch):
model.train()
losses, acc, step, total = 0., 0., 0., 0.
for data, target in dataloaders['linear_train']:
data, target = data.to(device), target.to(device)
logits = model(data)
optimizer.zero_grad()
loss = criterion(logits, target)
loss.backward()
losses += loss.item()
optimizer.step()
scheduler.step()
pred = F.softmax(logits, dim=-1).max(-1)[1]
acc += pred.eq(target).sum().item()
step += 1
total += target.size(0)
tr_loss = losses / step
tr_acc = acc / total * 100.
logging.info('[Train Epoch: {0:2d}], loss: {1:.3f}, acc: {2:.3f}'.format(epoch, tr_loss, tr_acc))
model.eval()
losses, acc, step, total = 0., 0., 0., 0.
with torch.no_grad():
for data, target in dataloaders['linear_val']:
data, target = data.to(device), target.to(device)
logits = model(data)
loss = criterion(logits, target)
losses += loss.item()
pred = F.softmax(logits, dim=-1).max(-1)[1]
acc += pred.eq(target).sum().item()
step += 1
total += target.size(0)
vl_loss = losses / step
vl_acc = acc / total * 100.
logging.info('[Test Epoch: {0:2d}], loss: {1:.3f} acc: {2:.2f}'.format(epoch, vl_loss, vl_acc))
plotter(epoch, tr_acc, vl_acc, tr_loss, vl_loss, args.arch)
|
Python
| 108
| 36.23148
| 125
|
/linear_eval.py
| 0.591542
| 0.572886
|
junprog/contrastive-baseline
|
refs/heads/main
|
import torch
import torch.nn as nn
class SiameseNetwork(nn.Module):
def __init__(self, model, pretrained=False, simple_model=False):
super(SiameseNetwork, self).__init__()
self.simple_model = simple_model
if simple_model:
self.features = nn.Sequential(nn.Conv2d(3, 32, 5), nn.PReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(32, 64, 5), nn.PReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(64, 64, 5), nn.PReLU(),
nn.MaxPool2d(2, stride=2))
self.classifier = nn.Sequential(nn.Linear(64 * 4 * 4, 256),
nn.PReLU(),
nn.Linear(256, 256),
nn.PReLU(),
nn.Linear(256, 2))
else:
if pretrained:
self.encoder = model(pretrained=True)
self.encoder.classifier = nn.Sequential(*[self.encoder.classifier[i] for i in range(6)])
self.encoder.classifier.add_module('out', nn.Linear(4096, 2))
else:
self.encoder = model(num_classes=2)
def forward_once(self, x):
if self.simple_model:
output = self.features(x)
output = output.view(output.size()[0], -1)
output = self.classifier(output)
else:
output = self.encoder(x)
return output
def forward(self, input1, input2):
output1 = self.forward_once(input1)
output2 = self.forward_once(input2)
return output1, output2
|
Python
| 44
| 38.477272
| 104
|
/models/siamese_net.py
| 0.474654
| 0.440092
|
andrewjschuang/Turing
|
refs/heads/master
|
import time
from datetime import datetime
from flask import (Flask, abort, flash, redirect, render_template, request,
session, url_for)
from sqlalchemy.exc import IntegrityError
from wtforms import (Form, RadioField, StringField, SubmitField, TextAreaField, TextField,
validators)
from models.model import User, Project, Task, Questionnaire, Question, Response
from models.shared import db
class SignUp(Form):
name = TextField('Name:', validators=[validators.required()])
email = TextField('Email:', validators=[
validators.required(), validators.Length(min=6, max=35)])
password = TextField('Password:', validators=[
validators.required(), validators.Length(min=3, max=35)])
class Login(Form):
email = TextField('Email:', validators=[
validators.required(), validators.Length(min=6, max=35)])
password = TextField('Password:', validators=[
validators.required(), validators.Length(min=3, max=35)])
def create_app(config=None):
app = Flask(__name__)
if config:
app.config.from_mapping(config)
else:
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///prod.db'
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db.init_app(app)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.route('/signup', methods=['GET', 'POST'])
def signup():
form = SignUp(request.form)
if request.method == 'POST':
if form.validate():
name = request.form['name']
password = request.form['password']
email = request.form['email']
u = User(email=email, name=name, password=password)
db.session.add(u)
db.session.commit()
session['auth'] = {'name': name,
'email': email, 'timestamp': time.time()}
return redirect(url_for('index'))
else:
flash('All the form fields are required.', category='error')
return render_template('signup.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = Login(request.form)
if request.method == 'POST':
if form.validate():
password = request.form['password']
email = request.form['email']
user = User.query.filter_by(email=email).first()
print(user)
if user:
print(user)
if user.password == password:
session['auth'] = {'name': user.name,
'email': user.email,
'timestamp': time.time()
}
return redirect(url_for('index'))
else:
flash('Authentication failed', category='error')
else:
flash('Authentication failed', category='error')
else:
flash('All the form fields are required', category='error')
return render_template('login.html', form=form)
@app.route('/', methods=['GET'])
def index():
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
info = user.get_index_data()
print(info)
return render_template('index.html', **info)
return redirect('/login')
@app.route('/responses')
def responses():
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
quests = Questionnaire.query.all()
return render_template('responses.html', quests=quests)
@app.route('/respond/<int:ref>', methods=['GET', 'POST'])
def respond(ref):
quest = Questionnaire.query.get(ref)
if not quest:
print('no questionnaire found with id %s' % ref)
return abort(404)
if request.method == 'GET':
return render_template('feedback.html', name=quest.name, questions=quest.questions)
elif request.method == 'POST':
for question_id in request.form:
question = Question.query.get(question_id)
resp = Response(question=question.id, rating=request.form.get(question_id))
db.session.add(resp)
db.session.commit()
return render_template('feedback_received.html')
@app.route('/projects', methods=['GET', 'POST'])
def projects():
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
if request.method == 'POST':
name = request.form['projectName']
description = request.form['projectDescription']
pro = Project(name=name,description=description)
db.session.add(pro)
user.project.append(pro)
db.session.commit()
grid = user.get_project_grid(3)
return render_template('projects.html', projectgrid=grid)
return redirect('/login')
@app.route('/tasks/user')
@app.route('/tasks/user/<int:ref>', methods=['GET', 'POST'])
def user_tasks(ref=None):
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
if ref:
user: User = User.query.filter_by(id=ref).first()
if not user:
return abort(404)
if request.method == 'POST':
name = request.form.get('taskName')
description = request.form.get('taskDescription')
t_time = request.form.get('taskTime')
if not all((name, description, t_time)):
abort(404)
t_time = datetime.strptime(t_time,'%Y-%m-%dT%H:%M:%S.%fZ')
n_task: Task = Task(name=name, description=description, end_time=t_time)
user.tasks.append(n_task)
db.session.commit()
return abort(200)
else:
return render_template('tasks.html', data=user)
@app.route('/tasks/project/<int:ref>', methods=['GET', 'POST'])
def proj_tasks(ref):
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
project:Project = Project.query.filter_by(id=ref).first()
if not project:
return abort(404)
if request.method == 'POST':
name = request.form.get('taskName')
description = request.form.get('taskDescription')
t_time = request.form.get('taskDate')
if not all((name, description, t_time)):
abort(404)
t_time = datetime.strptime(t_time,'%Y-%m-%dT%H:%M:%S.%fZ')
n_task: Task = Task(name=name, description=description, end_time=t_time)
project.tasks.append(n_task)
user.tasks.append(n_task)
db.session.commit()
return ('' ,200)
else:
return render_template('tasks.html', data=project)
@app.route('/tasks/task/<int:ref>', methods=['GET', 'POST'])
def task_tasks(ref):
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
task:Task = Task.query.filter_by(id=ref).first()
if not task:
return abort(404)
if request.method == 'POST':
name = request.form.get('taskName')
description = request.form.get('taskDescription')
t_time = request.form.get('taskDate')
if not all((name, description, t_time)):
abort(404)
t_time = datetime.strptime(t_time,'%Y-%m-%dT%H:%M:%S.%fZ')
n_task: Task = Task(name=name, description=description, end_time=t_time)
db.session.add(n_task)
task.tasks.append(n_task)
db.session.commit()
user.tasks.append(n_task)
db.session.commit()
print(task, task.tasks)
print(n_task, n_task.tasks)
return ('' ,200)
else:
print(task, task.tasks)
return render_template('tasks.html', data=task)
@app.route('/test', methods=['GET'])
def test():
return render_template('newQuestionnaire.html')
@app.route('/questionnaire/<int:ref>', methods=['GET', 'POST'])
def questionnaire(ref):
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
task:Task = Task.query.filter_by(id=ref).first()
if not task:
return abort(404)
if request.method == 'POST':
name = request.form.get('name')
if not name:
return abort(404)
quest = Questionnaire(name=name,task=task)
task.questionnaires.append(quest)
for key, value in request.form.items():
if not value or key == 'name':
continue
else:
quest.questions.append(Question(text=value,questionnaire=quest))
db.session.commit()
return render_template('newQuestionnaire.html')
@app.route('/logout', methods=['GET'])
def logout():
session.pop('auth')
return redirect(url_for('index'))
return app
if __name__ == '__main__':
app = create_app()
db.create_all(app=app)
app.run(host='localhost', port=3000, debug=True)
|
Python
| 290
| 36.889656
| 95
|
/turing.py
| 0.516017
| 0.510375
|
andrewjschuang/Turing
|
refs/heads/master
|
from flask_testing import TestCase
from models.shared import db
from models.model import User, Task, Project, Question, Response, Questionnaire
from turing import create_app
import unittest
class MyTest(TestCase):
def create_app(self):
config = {
'SQLALCHEMY_DATABASE_URI': 'sqlite:///test.db',
'TESTING': True,
'SECRET_KEY': 'secret',
'SQLALCHEMY_TRACK_MODIFICATIONS': True
}
return create_app(config)
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_nothing(self):
assert True
def test_user(self):
user = User(email='em', name='us', password='pass')
db.session.add(user)
db.session.commit()
assert user in db.session
def test_project(self):
project = Project(name='n',description='desc')
db.session.add(project)
db.session.commit()
assert project in db.session
def test_task(self):
task = Task(name='n', description='desc')
db.session.add(task)
db.session.commit()
assert task in db.session
def test_usr_add_tsk2_prj(self):
user = User(email='em', name='us', password='pass')
db.session.add(user)
db.session.commit()
project = Project(name='n',description='desc')
db.session.add(project)
user.project.append(project)
db.session.commit()
project: Project= User.query.filter_by(email='em').first().project[0]
task = Task(name='n', description='desc')
db.session.add(task)
project.tasks.append(task)
db.session.commit()
assert user.project[0].tasks[0] == task
def test_sub_tasks(self):
task = Task(name='n', description='desc')
db.session.add(task)
assert task in db.session
s_task = Task(name='n', description='desc')
db.session.add(s_task)
assert task in db.session
db.session.commit()
task.tasks.append(s_task)
db.session.commit()
assert task.tasks[0] == s_task
def test_questionnaire(self):
questionnaire = Questionnaire(name='Questions')
db.session.add(questionnaire)
question0 = Question(text="ola ?", questionnaire=questionnaire)
question1 = Question(text="tudo bem ?", questionnaire=questionnaire)
questionnaire.questions.append(question0)
questionnaire.questions.append(question1)
for i in range(10):
question0.responses.append(Response(rating=5,question=question0))
for i in range(10):
question1.responses.append(Response(rating=5,question=question1))
rs = [x.rating for x in questionnaire.questions[0].responses]
assert sum(rs)/len(rs) == 5
rs = [x.rating for x in questionnaire.questions[1].responses]
assert sum(rs)/len(rs) == 5
if __name__ == '__main__':
unittest.main()
|
Python
| 113
| 26.168142
| 79
|
/test.py
| 0.596091
| 0.588599
|
andrewjschuang/Turing
|
refs/heads/master
|
functionalities = {
'Login': 'Login page',
'Feedback': 'This feedback form',
'Todo': 'To do module',
'Projects': 'Anything related to projects',
'Code': 'Code editor',
'Forum': 'The forum',
'Profile': 'Your profile page',
}
|
Python
| 9
| 27
| 47
|
/functionalities.py
| 0.59127
| 0.59127
|
aejontargaryen/conceal-bot
|
refs/heads/master
|
import requests
import json
import time
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from poolModels import pool, poolBase
engine = create_engine('sqlite:///poolData.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
poolBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Insert a Person in the person table
new_pool = pool(url='http://pool.conceal.network/api/live_stats', name='Official Pool', type="normal", poolurl='https://pool.conceal.network')
session.add(new_pool)
new_pool = pool(url='https://ccx.scecf.org:21001/live_stats', name='SCECF', type="normal", poolurl='https://ccx.scecf.org')
session.add(new_pool)
new_pool = pool(url='https://ccx.bluerockpools.net:8119/live_stats', name='Blue Rock Pool', type="normal", poolurl='https://ccx.bluerockpools.net')
session.add(new_pool)
new_pool = pool(url='http://minexmr24.ru:8124/live_stats', name='CCX Майнинг пул', type="normal", poolurl='http://ccx.minexmr24.ru')
session.add(new_pool)
new_pool = pool(url='https://ccx.go-mine.it/api/pool/stats', name='go mine it!', type="node", poolurl='https://ccx.go-mine.it')
session.add(new_pool)
new_pool = pool(url='https://api.ccx.heigh-ho.funkypenguin.co.nz/live_stats', name='Funky Penguin', type="normal", poolurl='https://ccx.heigh-ho.funkypenguin.co.nz')
session.add(new_pool)
new_pool = pool(url='https://conceal.herominers.com/api/stats', name='herominers', type="normal", poolurl='https://conceal.herominers.com')
session.add(new_pool)
new_pool = pool(url='https://ccx.thepiratemine.nl:2890/live_stats', name='ThePirateMine', type="normal", poolurl='https://ccx.thepiratemine.nl')
session.add(new_pool)
session.commit()
|
Python
| 41
| 52.731709
| 165
|
/poolSetup.py
| 0.749093
| 0.739564
|
aejontargaryen/conceal-bot
|
refs/heads/master
|
import random
import requests
import sys
import discord
import binascii
import json
from collections import deque
from jsonrpc_requests import Server
from models import Transaction, TipJar
config = json.load(open('config.json'))
class CCXServer(Server):
def dumps(self, data):
data['password'] = config['rpc_password']
return json.dumps(data)
rpc = CCXServer("http://{}:{}/json_rpc".format(config['rpc_host'], config['rpc_port']))
daemon = CCXServer("http://{}:{}/json_rpc".format(config['daemon_host'], config['daemon_port']))
CONFIRMED_TXS = []
def get_supply():
lastblock = daemon.getlastblockheader()
bo = daemon.f_block_json(hash=lastblock["block_header"]["hash"])
return float(bo["block"]["alreadyGeneratedCoins"])/1000000
def format_hash(hashrate):
i = 0
byteUnits = [" H", " KH", " MH", " GH", " TH", " PH"]
while (hashrate > 1000):
hashrate = hashrate / 1000
i = i+1
return "{0:,.2f} {1}".format(hashrate, byteUnits[i])
def gen_paymentid(address):
rng = random.Random(address+config['token'])
length = 32
chunk_size = 65535
chunks = []
while length >= chunk_size:
chunks.append(rng.getrandbits(chunk_size * 8).to_bytes(chunk_size, sys.byteorder))
length -= chunk_size
if length:
chunks.append(rng.getrandbits(length * 8).to_bytes(length, sys.byteorder))
result = b''.join(chunks)
return "".join(map(chr, binascii.hexlify(result)))
def get_deposits(session):
# get the current block height
# we only want to insert tx after 10 blocks from the tx
data = daemon.getlastblockheader()
height = int(data["block_header"]["height"])
print("INFO: Current blockchain height is {}".format(height))
# scan for deposits
print("scanning the blockchain for deposits")
print("getting list of payment id's in the tipjar database")
allPID = session.query(TipJar).all()
thePID = 0
totalPID = len(allPID)
for thePID in range(0,totalPID):
currentPID = allPID[thePID].paymentid
print("INFO: checking PID {}".format(currentPID))
params = {"payment_id": currentPID}
data = rpc.get_payments(params)
#go through each transaction and them to the confirmed transactions array
for tx in data['payments']:
unlockWindow = int(tx["block_height"]) + 10
if tx['tx_hash'] in CONFIRMED_TXS: # if its already there, ignore it
continue
if unlockWindow < height: # its a confirmed and unlocked transaction
CONFIRMED_TXS.append({'transactionHash': tx['tx_hash'],'amount': tx['amount'], 'ready':True, 'pid':currentPID})
print("CONF: confirmed tx {} for {} ccx at block {}".format(tx['tx_hash'],tx['amount'],tx['block_height']))
else :
toUnlock = unlockWindow - height
print("UNCF: unconfirmed tx {} for {} ccx will unlock in {} blocks".format(tx['tx_hash'],tx['amount'],toUnlock))
for i,trs in enumerate(CONFIRMED_TXS): #now we go through the array of all transactions from our registered users
processed = session.query(Transaction).filter(Transaction.tx == trs['transactionHash']).first()
amount = 0
print("INFO: looking at tx: " + trs['transactionHash'])
if processed: # done already, lets ignore and remove it from the array
print("INFO: already processed: " + trs['transactionHash'])
CONFIRMED_TXS.pop(i)
continue
likestring = trs['pid']
balance = session.query(TipJar).filter(TipJar.paymentid.contains(likestring)).first() #get the balance from that PID
print("INFO: Balance for pid {} is: {}".format(likestring,balance))
if not balance:
print("user does not exist!")
continue
amount = trs['amount']
change = 0
if trs['pid']==balance.paymentid: # money entering tipjar, add to user balance
print("UPDATE: deposit of {} to PID {}".format(amount,balance.paymentid))
change += amount
try:
balance.amount += change
except:
print("no balance, setting balance to: {}".format(change))
balance.amount = change
print("new balance: {}".format(balance.amount))
session.commit()
if balance:
nt = Transaction(trs['transactionHash'], change, trs['pid'])
CONFIRMED_TXS.pop(i)
yield nt
def get_fee(amount):
return 100
def build_transfer(amount, transfers, balance):
print("SEND PID: {}".format(balance.paymentid[0:58] + balance.withdraw))
params = {
'fee': get_fee(amount),
'paymentId': balance.paymentid[0:58] + balance.withdraw,
'mixin': 3,
'destinations': transfers
}
return params
REACTION_AMP_CACHE = deque([], 500)
def reaction_tip_lookup(message):
for x in REACTION_AMP_CACHE:
if x['msg'] == message:
return x
def reaction_tip_register(message, user):
msg = reaction_tip_lookup(message)
if not msg:
msg = {'msg': message, 'tips': []}
REACTION_AMP_CACHE.append(msg)
msg['tips'].append(user)
return msg
def reaction_tipped_already(message, user):
msg = reaction_tip_lookup(message)
if msg:
return user in msg['tips']
|
Python
| 152
| 34.835526
| 143
|
/utils.py
| 0.612264
| 0.603084
|
aejontargaryen/conceal-bot
|
refs/heads/master
|
import asyncio
import discord
from discord.ext.commands import Bot, Context
import requests
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from poolModels import pool, poolBase
from models import Wallet, TipJar, Base, Transaction
from utils import config, format_hash, gen_paymentid, rpc, daemon, \
get_deposits, get_fee, build_transfer, get_supply, \
reaction_tip_register, reaction_tipped_already
HEADERS = {'Content-Type': 'application/json'}
### DATABASE SETUP ###
engine = create_engine('sqlite:///ccxbot.db')
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
### POOL DATABASE SETUP ###
poolEngine = create_engine('sqlite:///poolData.db')
poolBase.metadata.create_all(poolEngine)
poolSession = sessionmaker(bind=poolEngine)
session2 = poolSession()
client = Bot(
description="{} Discord Bot".format(config['symbol']),
command_prefix=config['prefix'],
pm_help=False)
@client.event
async def on_member_join(member):
await send_join_pm(member, client)
async def wallet_watcher():
await client.wait_until_ready()
while not client.is_closed:
for tx in get_deposits(session):
session.add(tx)
try:
session.commit()
except:
session.rollback()
balance = session.query(TipJar).filter(TipJar.paymentid == tx.paymentid).first()
if not balance: # don't do for withdrawals from jar (basically tips)
return
good_embed = discord.Embed(title="Deposit Recieved!",colour=discord.Colour(0xD4AF37))
good_embed.description = "Your deposit of {} {} has now been credited.".format(tx.amount/config['units'], config['symbol'])
print("TRANSACTION PID IS: " + tx.paymentid)
good_embed.add_field(name="New Balance", value="{0:,.2f}".format(balance.amount/config['units']))
user = await client.get_user_info(str(balance.userid))
try:
await client.send_message(user, embed=good_embed)
except:
continue
await asyncio.sleep(119) # just less than the block time
client.loop.create_task(wallet_watcher())
@client.event
async def on_ready():
print("Bot online!")
### TEST COMMANDS ###
# test to see if we can a list of users online
# and then to a rain function which sends money from a different wallet to everyone
# and to test getting a welcome dm
async def send_join_pm(member, client):
"""
Sends a welcome private message to joined members.
"""
if member.bot:
return
currently_online = ""
for m in member.server.members:
if not m.status.__str__() == "offline":
if m.roles.__contains__(discord.utils.get(m.server.roles, name="core team")):
currently_online += ":white_small_square: " + m.mention + "\n"
await client.send_message(member,
"**Hey, " + member.name + "! Welcome to the Conceal Discord! :)**\n\n"
"If you're new here and have some questions head over to the **#faq** channel for an introduction to the project and answers to common questions.\n"
"You can also head over to the **#annoucements##** channel and see the latest news on where we are and what we are doing.\n"
"If you have more questions, look for one the admins or devs\n\n"
"**Devs currently online:**\n\n%s\n\n"
"You can also use this bot to get more information:\n"
"Use the command `.help` to get list of commands.\n"
"You can also see current network information with `.stats` or other specific commands like `.hashrate`, `.height`, `.difficulty`, and `.supply`\n"
"Don't forget to register your wallet address with the bot with the command `.registerwallet` so you can recieve tips.\n"
"If you want to send tips then type `.deposit` after you register your wallet address and transfer some funds to your TipJar.\n"
% currently_online)
@client.command(pass_context=True)
async def price(ctx, exchange=None):
err_embed = discord.Embed(title=":x:Error:x:", colour=discord.Colour(0xf44242))
coindata = requests.get("https://maplechange.com/api/v2/tickers/ccxbtc.json")
btc = requests.get("https://www.bitstamp.net/api/ticker/")
try:
to_json = coindata.json()
except ValueError:
err_embed.description = "The MapleChange API is down"
await client.say(embed = err_embed)
return
coindata_embed = discord.Embed(title="Conceal: MapleChange", url="https://maplechange.com/markets/ccxbtc", description="Current pricing of CCX", color=0x7F7FFF)
coindata_embed.set_thumbnail(url=config['logo_url'])
url = 'https://maplechange.com/api/v2/tickers/ccxbtc.json'
coindata_embed.add_field(name="Sell", value="{0:,.0f} sats".format(round(float(coindata.json()['ticker']['sell'])*100000000)), inline=True)
coindata_embed.add_field(name="Current", value="{0:,.0f} sats".format(round(float(coindata.json()['ticker']['last'])*100000000)), inline=True)
coindata_embed.add_field(name="High", value="{0:,.0f} sats".format(round(float(coindata.json()['ticker']['high'])*100000000)), inline=True)
coindata_embed.add_field(name="{}-USD".format(config['symbol']),
value="${0:,.4f} USD".format(float(coindata.json()['ticker']['sell'])*float(btc.json()['last'])), inline=True)
coindata_embed.add_field(name="BTC-USD", value="${0:,.2f} USD".format(float(btc.json()['last'])), inline=True)
await client.say(embed=coindata_embed)
### NETWORK COMMANDS ###
@client.command()
async def hashrate():
""" .hashrate - Returns network hashrate """
data = daemon.getlastblockheader()
hashrate = format_hash(float(data["block_header"]["difficulty"]) / 120)
await client.say("The current global hashrate is **{}/s**".format(hashrate))
@client.command()
async def difficulty():
""" .difficulty - Returns network difficulty """
data = daemon.getlastblockheader()
difficulty = float(data["block_header"]["difficulty"])
await client.say("The current difficulty is **{0:,.0f}**".format(difficulty))
@client.command()
async def height():
""" .height - Returns the current blockchain height """
data = daemon.getlastblockheader()
height = int(data["block_header"]["height"])
await client.say("The current block height is **{:,}**".format(height))
@client.command()
async def supply():
""" .supply - Returns the current circulating supply """
supply = get_supply()
await client.say("The current circulating supply is **{:0,.2f}** {}".format(supply, config['symbol']))
@client.command()
async def stats():
""" .stats - Returns all network stats """
data = daemon.getlastblockheader()
hashrate = format_hash(float(data["block_header"]["difficulty"]) / 120)
data = daemon.getlastblockheader()
height = int(data["block_header"]["height"])
deposits = int(data["block_header"]["deposits"]) / 1000000
supply = get_supply()
data = daemon.getlastblockheader()
difficulty = float(data["block_header"]["difficulty"])
stats_embed=discord.Embed(title="Conceal", url="https://github.com/TheCircleFoundation/", description="Complete Network Stats", color=0x7F7FFF)
stats_embed.set_thumbnail(url=config['logo_url'])
hashFromPools = 0
allPools = session2.query(pool).all()
totalPools = len(allPools)
for poolNumber in range(0,totalPools):
poolHash = allPools[poolNumber].hashrate
hashFromPools = hashFromPools + poolHash
stats_embed.add_field(name="Hashrate (from Pools)", value="{}KH/s".format(hashFromPools/1000))
stats_embed.add_field(name="Hashrate (from Difficulty)", value="{}/s".format(hashrate))
stats_embed.add_field(name="Height", value="{:,}".format(height))
stats_embed.add_field(name="Difficulty", value="{0:,.0f}".format(difficulty))
stats_embed.add_field(name="Circulating Supply", value="{:0,.2f} CCX".format(supply))
stats_embed.add_field(name="Deposits", value="{:0,.2f}".format(deposits))
stats_embed.set_footer(text="Powered by the Conceal Discord bot. Message @katz for any issues.")
await client.say(embed=stats_embed)
@client.command()
async def pools():
""" .pools - Get a list of pools and current stats """
stats_embed=discord.Embed(title="Conceal", url="https://github.com/TheCircleFoundation/", description="Mining Pool Stats", color=0x7F7FFF)
stats_embed.set_thumbnail(url=config['logo_url'])
hashFromPools = 0
allPools = session2.query(pool).all()
totalPools = len(allPools)
for poolNumber in range(0,totalPools):
poolName = allPools[poolNumber].name
poolSiteURL = allPools[poolNumber].poolurl
poolHash = allPools[poolNumber].hashrate
hashFromPools = hashFromPools + poolHash
poolMiners = allPools[poolNumber].miners
stats_embed.add_field(name=poolName, value=poolSiteURL, inline=False)
stats_embed.add_field(name="Hashrate", value="{} KH/s".format(poolHash/1000))
stats_embed.add_field(name="Miners", value="{:,}".format(poolMiners))
stats_embed.add_field(name="Hashrate (from Pools)", value="{}KH/s".format(hashFromPools/1000))
stats_embed.set_footer(text="Powered by the Conceal Discord bot. Message @katz for any issues.")
await client.say(embed=stats_embed)
### WALLET COMMANDS ###
@client.command(pass_context=True)
async def members(ctx):
members = ""
allID = session.query(Wallet).all()
theID = 0
totalID = len(allID)
await client.say("List of members:")
for theID in range(0,totalID):
currentID = allID[theID].userid
memberName = discord.utils.get(client.get_all_members(), id=str(currentID))
members = members + " @" + str(memberName)
await client.say(members)
@client.command(pass_context=True)
async def registerwallet(ctx, address):
""" .registerwallet <addr> - Register your wallet in the database """
err_embed = discord.Embed(title="Error", colour=discord.Colour(0xf44242))
good_embed = discord.Embed(title="{}'s Wallet".format(ctx.message.author.name),colour=discord.Colour(0xD4AF37))
if address is None:
err_embed.description = "Please provide an address"
await client.send_message(ctx.message.author, embed = err_embed)
return
exists = session.query(Wallet).filter(Wallet.userid == ctx.message.author.id).first()
addr_exists = session.query(Wallet).filter(Wallet.address == address).first()
if exists:
good_embed.title = "Your wallet exists!".format(exists.address)
good_embed.description = "```{}``` use `{}updatewallet <addr>` to change".format(exists.address, config['prefix'])
await client.send_message(ctx.message.author, embed = good_embed)
return
if addr_exists:
err_embed.description = "Address already registered by another user!"
await client.send_message(ctx.message.author, embed = err_embed)
return
elif not exists and len(address) == 98:
w = Wallet(address, ctx.message.author.id,ctx.message.id)
session.add(w)
session.commit()
good_embed.title = "Successfully registered your wallet"
good_embed.description = "```{}```".format(address)
await client.send_message(ctx.message.author, embed = good_embed)
pid = gen_paymentid(address)
balance = session.query(TipJar).filter(TipJar.paymentid == pid).first()
if not balance:
t = TipJar(pid, ctx.message.author.id, 0)
session.add(t)
else:
balance.paymentid = pid
session.commit()
tipjar_addr = "ccx7Wga6b232eSVfy8KQmBjho5TRXxX8rZ2zoCTyixfvEBQTj1g2Ysz1hZKxQtw874W3w6BZkMFSn5h3gUenQemZ2xiyyjxBR7"
good_embed.title = "Your Tipjar Info"
good_embed.description = "Deposit {} to start tipping! ```transfer 3 {} <amount> -p {}```".format(config['symbol'], tipjar_addr, pid)
balance = session.query(TipJar).filter(TipJar.paymentid == pid).first()
await client.send_message(ctx.message.author, embed = good_embed)
return
elif len(address) > 98:
err_embed.description = "Your wallet must be 98 characeters long, your entry was too long"
elif len(address) < 98:
err_embed.description = "Your wallet must be 98 characeters long, your entry was too short"
await client.say(embed = err_embed)
@registerwallet.error
async def registerwallet_error(error, ctx):
await client.say("Please provide an address: .registerwallet <addr>.")
@client.command(pass_context=True)
async def updatewallet(ctx, address):
""" .updatewallet <addr> - Changes your registred wallet address """
err_embed = discord.Embed(title="Error", colour=discord.Colour(0xf44242))
if address == None:
err_embed.description = "Please provide an address!"
await client.send_message(ctx.message.author, embed=err_embed)
return
address = address.strip()
good_embed = discord.Embed(title="{}'s Updated Wallet".format(ctx.message.author.name),colour=discord.Colour(0xD4AF37))
exists = session.query(Wallet).filter(Wallet.userid == ctx.message.author.id).first()
if not exists:
err_embed.description = "You haven't registered a wallet!"
addr_exists = session.query(Wallet).filter(Wallet.address == address).first()
if addr_exists:
err_embed.description = "Address already registered by another user!"
await client.send_message(ctx.message.author, embed = err_embed)
return
elif exists and len(address) == 98:
old_pid = gen_paymentid(exists.address)
old_balance = session.query(TipJar).filter(TipJar.paymentid == old_pid).first()
exists.address = address
pid = gen_paymentid(address)
old_balance.paymentid = pid
good_embed.title = "Successfully updated your wallet"
good_embed.description = "```{}```".format(address)
session.commit()
await client.send_message(ctx.message.author, embed = good_embed)
tipjar_addr = "ccx7Wga6b232eSVfy8KQmBjho5TRXxX8rZ2zoCTyixfvEBQTj1g2Ysz1hZKxQtw874W3w6BZkMFSn5h3gUenQemZ2xiyyjxBR7"
good_embed.title = "Your Tipjar Info"
good_embed.description = "Deposit {} to start tipping! ```transfer 3 {} <amount> -p {}```".format(config['symbol'], tipjar_addr, pid)
await client.send_message(ctx.message.author, embed = good_embed)
good_embed.title = "Balance Update"
good_embed.url = ""
good_embed.description = "New Balance: `{:0,.2f}` {1}".format(old_balance.amount / config['units'], config['symbol'])
await client.send_message(ctx.message.author, embed = good_embed)
return
elif len(address) > 98:
err_embed.description = "Your wallet must be 98 characeters long, your entry was too long"
elif len(address) < 98:
err_embed.description = "Your wallet must be 98 characeters long, your entry was too short"
await client.say(embed=err_embed)
@updatewallet.error
async def updatewallet_error(error, ctx):
await client.say("Please provide an address: .updatewallet <addr>")
@client.command(pass_context=True)
async def wallet(ctx, user: discord.User=None):
""" .wallet - Returns your registered wallet address """
err_embed = discord.Embed(title=":x:Error:x:", colour=discord.Colour(0xf44242))
good_embed = discord.Embed(colour=discord.Colour(0xD4AF37))
if not user:
exists = session.query(Wallet).filter(Wallet.userid == ctx.message.author.id).first()
if not exists:
err_embed.description = "You haven't registered a wallet or specified a user!"
else:
good_embed.title = "Your wallet"
good_embed.description = "Here's your wallet {}! ```{}```".format(ctx.message.author.mention, exists.address)
await client.send_message(ctx.message.author, embed = good_embed)
return
else:
exists = session.query(Wallet).filter(Wallet.userid == user.id).first()
if not exists:
err_embed.description = "{} hasn't registered a wallet!".format(user.name)
else:
good_embed.title = "{}'s wallet".format(user.name)
good_embed.description = "```{}```".format(exists.address)
await client.send_message(ctx.message.author, embed = good_embed)
return
await client.send_message(ctx.message.author, embed = err_embed)
@client.command(pass_context=True)
async def deposit(ctx, user: discord.User=None):
""" .deposit - Get deposit information so you can start tipping """
err_embed = discord.Embed(title=":x:Error:x:", colour=discord.Colour(0xf44242))
good_embed = discord.Embed(title="Your Tipjar Info")
tipjar_addr = "ccx7Wga6b232eSVfy8KQmBjho5TRXxX8rZ2zoCTyixfvEBQTj1g2Ysz1hZKxQtw874W3w6BZkMFSn5h3gUenQemZ2xiyyjxBR7"
exists = session.query(Wallet).filter(Wallet.userid == ctx.message.author.id).first()
if exists:
pid = gen_paymentid(exists.address)
good_embed.description = "Deposit {} to start tipping! ,Send the funds you want to deposit to the address: ``{}`` (Pay to: in the GUI) and put ``{}`` in the Payment ID field. CLI users just send a transfer to the same address and payment ID.".format(config['symbol'], tipjar_addr, pid)
balance = session.query(TipJar).filter(TipJar.paymentid == pid).first()
if not balance:
t = TipJar(pid, ctx.message.author.id, 0)
session.add(t)
session.commit()
await client.send_message(ctx.message.author, embed = good_embed)
else:
err_embed.description = "You haven't registered a wallet!"
err_embed.add_field(name="Help", value="Use `{}registerwallet <addr>` before trying to tip!".format(config['prefix']))
await client.say(embed=err_embed)
@client.command(pass_context=True)
async def balance(ctx, user: discord.User=None):
""" .balance - PMs your tipjar balance """
err_embed = discord.Embed(title=":x:Error:x:", colour=discord.Colour(0xf44242))
good_embed = discord.Embed(title="Your Tipjar Balance is")
exists = session.query(Wallet).filter(Wallet.userid == ctx.message.author.id).first()
if exists:
pid = gen_paymentid(exists.address)
balance = session.query(TipJar).filter(TipJar.paymentid == pid).first()
if not balance:
t = TipJar(pid, ctx.message.author.id, 0)
session.add(t)
session.commit()
else:
good_embed.description = "`{0:,.2f}` {1}".format(balance.amount / config['units'], config['symbol'])
good_embed.add_field(name="Widthrawal", value="You can tip yourself to widthraw CCX to your wallet")
await client.send_message(ctx.message.author, embed=good_embed)
else:
err_embed.description = "You haven't registered a wallet!"
err_embed.add_field(name="Help", value="Use `{}registerwallet <addr>` before trying to tip!".format(config['prefix']))
await client.say(embed=err_embed)
EMOJI_MONEYBAGS = "\U0001F4B8"
EMOJI_SOS = "\U0001F198"
EMOJI_ERROR = "\u274C"
@client.command(pass_context=True)
async def tip(ctx, amount, sender):
""" .tip <amount> <username> - Tips a user the specified amount """
await _tip(ctx, amount, None, None)
async def _tip(ctx, amount,
sender: discord.User=None,
receiver: discord.User=None):
err_embed = discord.Embed(title=":x:Error:x:", colour=discord.Colour(0xf44242))
good_embed = discord.Embed(title="You were tipped!", colour=discord.Colour(0xD4AF37))
request_desc = "Register with `{}registerwallet <youraddress>` to get started!".format(config['prefix'])
request_embed = discord.Embed(title="{} wants to tip you".format(ctx.message.author.name), description=request_desc)
if not sender: # regular tip
sender = ctx.message.author
if not receiver:
tipees = ctx.message.mentions
else:
tipees = [receiver, ]
try:
amount = int(round(float(amount)*config['units']))
except:
await client.say("Amount must be a number equal or greater than {}".format(10000 / config['units']))
return False
if amount <= 9999:
err_embed.description = "`amount` must be equal or greater than {}".format(10000 / config['units'])
await client.say(embed=err_embed)
return False
fee = get_fee(amount)
self_exists = session.query(Wallet).filter(Wallet.userid == sender.id).first()
if not self_exists:
err_embed.description = "You haven't registered a wallet!"
err_embed.add_field(name="Help", value="Use `{}registerwallet <addr>` before trying to tip!".format(config['prefix']))
await client.send_message(sender, embed=err_embed)
return False
pid = gen_paymentid(self_exists.address)
balance = session.query(TipJar).filter(TipJar.paymentid == pid).first()
if not balance:
t = TipJar(pid, sender.id, 0)
session.add(t)
session.commit()
err_embed.description = "You are not registered, please `{}deposit` to tip".format(config['prefix'])
await client.send_message(sender, embed=err_embed)
return False
if balance.amount < 0:
balance.amount = 0
session.commit()
err_embed.description = "Your balance was negative!"
await client.send_message(sender, embed=err_embed)
katz = discord.utils.get(client.get_all_members(), id='408875878328827916')
err_embed.title = "{} had a negative balance!!".format(sender.name)
err_embed.description = "PID: {}".format(pid)
await client.send_message(katz, embed=err_embed)
return False
if ((len(tipees)*(amount))+fee) > balance.amount:
err_embed.description = "Your balance is too low! Amount + Fee = `{}` {}".format(((len(tipees)*(amount))+fee) / config['units'], config['symbol'])
await client.add_reaction(ctx.message, "\u274C")
await client.send_message(sender, embed=err_embed)
return False
destinations = []
actual_users = []
failed = 0
for user in tipees:
user_exists = session.query(Wallet).filter(Wallet.userid == user.id).first()
if user_exists:
destinations.append({'amount': amount, 'address': user_exists.address})
if user_exists.userid != sender.id: # multitip shouldn't tip self.
actual_users.append(user)
else:
failed = failed+1
await client.add_reaction(ctx.message, EMOJI_SOS)
try:
await client.send_message(user, embed = request_embed)
except:
continue
if len(destinations) == 0:
await client.add_reaction(ctx.message, EMOJI_SOS)
return False
transfer = build_transfer(amount, destinations, balance)
print(transfer)
result = rpc.transfer(transfer)
print(result)
await client.add_reaction(ctx.message, EMOJI_MONEYBAGS)
balance.amount -= ((len(actual_users)*amount)+fee)
tx = Transaction(result['tx_hash'], (len(actual_users)*amount)+fee, balance.paymentid)
session.add(tx)
session.commit()
good_embed.title = "Tip Sent!"
good_embed.description = (
"Sent `{0:,.2f}` {1} to {2} users! With Transaction Hash ```{3}```"
.format(amount / config['units'],
config['symbol'],
len(actual_users),
result['tx_hash']))
good_embed.url = (
"http://www.example.com/#?hash={}#blockchain_transaction"
.format(result['tx_hash']))
good_embed.add_field(name="New Balance", value="`{:0,.2f}` {}".format(balance.amount / config['units'], config['symbol']))
good_embed.add_field(name="Transfer Info", value="Successfully sent to {0} users. {1} failed.".format(len(actual_users), failed))
try:
await client.send_message(sender, embed=good_embed)
except:
pass
for user in actual_users:
good_embed = discord.Embed(title="You were tipped!", colour=discord.Colour(0xD4AF37))
good_embed.description = (
"{0} sent you `{1:,.2f}` {2} with Transaction Hash ```{3}```"
.format(sender.mention,
amount / config['units'],
config['symbol'],
result['tx_hash']))
good_embed.url = (
"http://www.example.com/#?hash={}#blockchain_transaction"
.format(result['tx_hash']))
try:
await client.send_message(user, embed=good_embed)
except:
continue
return True
client.run(config['token'])
|
Python
| 550
| 44.098183
| 293
|
/bot.py
| 0.642961
| 0.632277
|
DyanLi/Design-of-Algorithms
|
refs/heads/master
|
#coding=utf-8
'''
use re to
caul the num of the words
alice=ALICE
change:
use these function
1,with open(...) as f:
2,content = f.read()
3,allwords = finditer( ... content ... )
finditer is iter, findall is list
4,all_lower_words = imap(str.lower, allwords)
5,count = Counter(all_lower_words)
much butter than build a empty dict
'''
import re,math
import itertools
import collections
from operator import itemgetter
with open('alice.txt',"rt") as f:
content=f.read()
allwords=re.findall(r'[a-zA-Z]+',content)
#if i use find finditer ,i cannot use imap,allwords is a list
all_lower_words = itertools.imap(str.lower, allwords)
count = collections.Counter(all_lower_words)
#dict sort method 1: change key and value
#cntSorted=dict((v,k) for k,v in cnt.iteritems())
#cntSorted.sort()
#important and not be neglected
#print list(cntSorted.iteritems())[-10:]
#dict sort method 2: use lambda
#cntSorted=sorted(count.iteritems(),key=lambda d:d[1],reverse=True)
#print cntSorted[0:10]
#dict sort method 3: use operator
cntSorted=sorted(count.iteritems(),key=itemgetter(1),reverse=True)
print cntSorted[0:10]
#draw a pic
import matplotlib.pyplot as plt
#plt.bar(range(20), [cntSorted[i][1] for i in range(20)])
#plt.xticks(range(20), [cntSorted[i][0] for i in range(20)],rotation=30)
length=len(cntSorted)
plt.plot(range(length), [math.log(cntSorted[i][1],10) for i in range(length)])
plt.title(u"WordFrequencyAnalysis-zipf")
plt.show()
|
Python
| 57
| 24.456141
| 78
|
/HW2015/HW1.py
| 0.726207
| 0.702759
|
DyanLi/Design-of-Algorithms
|
refs/heads/master
|
#coding: utf-8
'''
huarongdao pass puzzle
ver:3.3
a reconstruction version of ver3.0
draw every state like a tree
by Dyan
Dec 2015
'''
from __future__ import division
import cairo
import colorsys
import copy
import collections
class Block(object):
u'''华容道具有10个块,棋盘左上角坐标(1,1),右下角坐标(4,5)'''
def __init__ (self, width, height, name, kind ,x=-1 ,y=-1):
self.width = width
self.height = height
self.name = name
self.kind = kind #1:cao 2:guanyu 3:generals 4:pawns
self.x=x
self.y=y
def move(self, dir):
#move the Block based on dir
if dir=="U": #y-1
if self.y==1:
return False
self.y -= 1
return True
if dir=="D": #y+1
if self.y + self.height > 5:
return False
self.y += 1
return True
if dir=="L": #x-1
if self.x==1:
return False
self.x -= 1
return True
if dir=="R": #x+1
if self.x + self.width > 4:
return False
self.x += 1
return True
assert False
def draw(self,px,py,ctx):
#draw the Block
h = (hash(self.name) & 0x0f)/16.0
r,g,b = colorsys.hsv_to_rgb(h, .75, .9)
ctx.set_source_rgb(r,g,b)
ctx.rectangle( px+self.x, py+self.y ,self.width-.1,self.height-.1)
ctx.fill_preserve()
#stroke the edges
ctx.set_line_width(0.03)
ctx.set_source_rgb(0,0,0)
ctx.stroke()
#give a text
ctx.select_font_face("u微软雅黑", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
ctx.set_font_size(0.3)
x_bearing, y_bearing, width, height = ctx.text_extents (self.name)[:4]
ctx.move_to( px+self.x + self.width / 2 - width / 2 - x_bearing, py+self.y + self.height/2 - height / 2 - y_bearing)
ctx.show_text(self.name)
class Board(object):
u'''棋盘'''
def __init__ (self,pos):
self.items = [
Block(2,2,u"曹操",1,pos[0][0],pos[0][1]),
Block(2,1,u"关羽",2,pos[1][0],pos[1][1]),
Block(1,2,u"张飞",3,pos[2][0],pos[2][1]),
Block(1,2,u"马超",3,pos[3][0],pos[3][1]),
Block(1,2,u"赵云",3,pos[4][0],pos[4][1]),
Block(1,2,u"黄忠",3,pos[5][0],pos[5][1]),
Block(1,1,u"卒1",4,pos[6][0],pos[6][1]),
Block(1,1,u"卒2",4,pos[7][0],pos[7][1]),
Block(1,1,u"卒3",4,pos[8][0],pos[8][1]),
Block(1,1,u"卒4",4,pos[9][0],pos[9][1])
]
print "OK?", self.is_ok()
def __str__(self):
return self.pattern()
def copy(self):
return copy.deepcopy(self)
def pattern(self):
#使用一个二维数组唯一地表示一个棋盘的状态
mask = [ [0]*5 for i in range(4) ]
for i, b in enumerate(self.items):
for x in range(b.width):
for y in range(b.height):
mask[b.x+x-1][b.y+y-1]= b.kind
#print mask
#数组变字符串,一个字符串唯一地表示一个棋盘的状态
s = ""
for j in range(4):
s += "".join("%d" % n for n in mask[j])
return s
def is_ok(self):
#check the move is available?
return self.pattern().count('0')==2
def is_done(self):
#check the board is sulution?
return self.items[0].x==2 and self.items[0].y==4
def draw(self,px,py,ctx):
#draw current board
#draw every block based on self.pos
for i, b in enumerate(self.items):
b.draw(px,py,ctx)
#draw the board boundary
ctx.set_line_width(0.1)
ctx.rectangle( px+1 -.1, py+1 -.1 ,4 +.1,5 +.1 )
ctx.stroke()
def nextstate(self):
u''' 列举可行的移动,返回几种新的棋盘 '''
result = []
for i ,b in enumerate(self.items):
for m in ("U","D","R","L"):
new_board = self.copy()
if new_board.items[i].move(m):
if new_board.is_ok():
result.append(new_board)
return result
def bfs(board):
u'''广度优先搜索求棋盘解 '''
open_list = [board] #待搜索的Board()实例
close_list = [] #搜索过的实例
searched= {} #已经寻找到的节点 dict k:pattern v:board instance of Board()
distance = {} #dict k:pattern v:number of moves
searched[board.pattern()] = board
distance[board.pattern()] = 0
prev = {} #dict k:pattern v:father node pattern
prev[board.pattern()] = None
found = False
#记录迭代次数
cnt = 0
while open_list and not found:
#从openlist取出一个节点,放入closelist
v = open_list.pop(0)
close_list.append(v)
#v 's distance
vd = distance[v.pattern()]
#每处理1k个结点,输出信息
cnt+=1
if cnt%1==0:
print "Iter %d, Len(open)=%d, len(close)=%d, Dist=%d" % (cnt, len(open_list), len(close_list), vd)
# 这个结点棋盘所有可行的下一步
for i, nv in enumerate(v.nextstate()):
np = nv.pattern()
#如果是正解 跳出
if nv.is_done():
searched[np] = nv
prev[np] = v.pattern()
found = True
break
#如果这一步曾经走过 不再处理
if np in searched:
continue
prev[np] = v.pattern()
open_list.append(nv)
searched[np] = nv
distance[np] = vd + 1
if found:
prev[nv.pattern()] = v.pattern()
close_list.append(nv)
distance[nv.pattern()] = vd + 1
result = [nv]
while prev[nv.pattern()]!=None:
nv = searched[ prev[nv.pattern()] ]
result.append(nv)
print "Done!"
drawtree(close_list,distance,prev,result[::1])
return result[::-1]
return None
def drawtree(close_list,distance,prev,result):
#计算每一层的孩子总数目
alldst = []
for s in close_list:
alldst.append(distance[s.pattern()])
laycnt = collections.Counter(alldst)
#画出棋盘状态的树状结构
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,6000,7000)
ctx = cairo.Context(surface)
#ctx.translate(3000,0)
ctx.scale(50, 50)
#x指示s是当前层第几个孩子
x = [0]*20
#s横向的位置
sx = {}
while close_list:
s = close_list.pop(0)
sp = s.pattern()
sd = distance[sp]
factor = 120/(laycnt[sd]+1)
#记录s的横向位置
x[sd]+=1
sx[sp] = x[sd]*factor
s.draw(sx[sp] ,sd*10, ctx)
if prev[sp]:
ctx.set_line_width(.1)
ctx.move_to(sx[sp]+3,sd*10+1)
ctx.line_to(sx[ prev[sp] ]+3,(sd-1)*10+6)
ctx.stroke()
for i, s in enumerate(result):
sp = s.pattern()
sd = distance[sp]
ctx.set_line_width(0.2)
ctx.set_source_rgb(1,1,0.2)
if prev[sp]:
ctx.move_to(sx[sp]+3,sd*10+1)
ctx.line_to(sx[ prev[sp] ]+3,(sd-1)*10+6)
ctx.stroke()
surface.write_to_png("t1.png")
if __name__ == "__main__":
# 开局棋子位置
# 横刀立马
#init = [(2,1),(2,3),
# (1,1),(1,3),(4,1),(4,3),
# (1,5),(2,4),(3,4),(4,5)]
# easy
init = [(1,3),(3,3),
(1,1),(2,1),(3,1),(4,1),
(1,5),(2,5),(3,4),(4,5)]
# most easy
#init = [(1,4),(1,3),
# (1,1),(2,1),(3,1),(4,1),
# (3,3),(3,5),(4,4),(4,5)]
board=Board(init)
#board.draw()
result = bfs(board)
print "Find a solution after %d moves." % (len(result)-1)
|
Python
| 272
| 26.481617
| 124
|
/HW2015/HW3-dy.py
| 0.486515
| 0.449132
|
DyanLi/Design-of-Algorithms
|
refs/heads/master
|
#coding: utf-8
'''
HW2.py is used to solve eight queens puzzle,
you can change the size number to resize the board.
change:
1,draw pieces with special operator not XOR but SOURCE
2,long string can write like """....""
3,format for str have symbol{} so use %d
'''
import itertools,cairo,math
size=8 #the size of the board
cnt=0 #number of right answer
#check whether it is a right step or not
def conflict(state, nextX):
nextY = len(state)
for i in range(nextY):
if abs(nextX-state[i])== nextY-i:
return True
return False
def drawqueen(solution):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,size*100,size*100)
ctx=cairo.Context(surface)
#draw the board
for i in range(0,size):
for j in range(0,size):
if (i+j)%2==1:
ctx.rectangle(i*100,j*100,100,100)
ctx.fill()
#draw the pieces
ctx.set_line_width(10)
ctx.set_source_rgb(1, 1, 0.2)
#change1
ctx.set_operator(cairo.OPERATOR_SOURCE)
for i in range(size):
ctx.arc(solution[i]*100+50,i*100+50,35,0,2*math.pi)
ctx.stroke()
filename="chess"+str(cnt)+".png"
surface.write_to_png(filename)
#remove the solutions in same cols
for solve in itertools.permutations(range(size)):
flag=0
for i in range(1,size):
if conflict(solve[0:i],solve[i]):
break
else:
flag+=1
if flag==size-1:
cnt+=1
drawqueen(solve)
#make a tex ducument to generate pdf
f=open("mkreport.tex",'w')
#change2
f.write("""\documentclass[twocolumn]{article}
\usepackage{graphicx}
\\title{A Report About Eight Queens Puzzle}\n
\\begin{document}
\maketitle\n
""")
#change3
for i in range(1,cnt+1):
f.write("""
\\begin{figure}[t]
\centering
\includegraphics[width=0.3\\textwidth]{chess%d.png}
\caption{Sulution %d of Eight Queens Puzzle}
\end{figure}\n""" % (i,i) )
if i%6==0:
f.write('\n\clearpage\n')
f.write('\n\end{document}')
f.close()
|
Python
| 88
| 21.636364
| 71
|
/HW2015/HW2.py
| 0.63253
| 0.599398
|
DyanLi/Design-of-Algorithms
|
refs/heads/master
|
#coding: utf-8
'''
huarongdao pass puzzle
ver:1.0
it is a original version
by HF
Dec 2015
'''
from __future__ import division
import cairo
import colorsys
import copy
#define soldiers
#from IPython import embed
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed()
class block(object):
def __init__(self, w, h, name, kind, x=-1, y=-1):
self.x = x
self.y = y
self.w = w
self.h = h
self.name = name
self.kind = kind
def move(self, dir):
if dir=="U": #y-1
if self.y==1:
return False
self.y -= 1
return True
if dir=="D": #y+1
if self.y + self.h > 5:
return False
self.y += 1
return True
if dir=="L": #x-1
if self.x==1:
return False
self.x -= 1
return True
if dir=="R": #x+1
if self.x + self.w > 4:
return False
self.x += 1
return True
assert False
def draw(self, ctx):
h = (hash(self.name) & 0x0f)/16.0
r,g,b = colorsys.hsv_to_rgb(h, 0.6, .9)
ctx.set_source_rgb(r,g,b)
ctx.rectangle(self.x+0.05,self.y+0.05,self.w-0.1,self.h-0.1)
ctx.fill_preserve()
ctx.set_line_width(0.03)
ctx.set_source_rgb(0,0,0)
ctx.stroke()
ctx.select_font_face("u微软雅黑", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
ctx.set_font_size(0.3);
x_bearing, y_bearing, width, height = ctx.text_extents (self.name)[:4]
ctx.move_to( self.x + self.w/2 - width / 2 - x_bearing, self.y + self.h/2 - height / 2 - y_bearing)
ctx.show_text(self.name)
class board(object):
def __init__(self):
self.items = [
block(2,2,u"曹操",1,2,1),
block(2,1,u"关羽",3,2,3),
block(1,2,u"张飞",2,1,1),
block(1,2,u"马超",2,1,3),
block(1,2,u"赵云",2,4,1),
block(1,2,u"黄忠",2,4,3),
block(1,1,u"卒1",4,1,5),
block(1,1,u"卒2",4,2,4),
block(1,1,u"卒3",4,3,4),
block(1,1,u"卒4",4,4,5)
]
self.items = [
block(2,2,u"曹操",1,1,3),
block(2,1,u"关羽",3,1,5),
block(1,2,u"张飞",2,1,1),
block(1,2,u"马超",2,2,1),
block(1,2,u"赵云",2,3,1),
block(1,2,u"黄忠",2,4,1),
block(1,1,u"卒1",4,3,3),
block(1,1,u"卒2",4,4,3),
block(1,1,u"卒3",4,3,4),
block(1,1,u"卒4",4,4,4)
]
print "OK?", self.is_ok()
def copy(self):
return copy.deepcopy(self)
def __str__(self):
return self.pattern()
s
def pattern(self):
mask = [ [0]*5 for j in range(4) ]
for b in self.items:
for x in range(b.w):
for y in range(b.h):
#print b.x, b.y, b.name, b.kind
mask[x+b.x-1][y+b.y-1] = b.kind
s = ""
for j in range(4):
s += "".join("%d" % n for n in mask[j])
return s
def is_ok(self):
return self.pattern().count('0')==2
def is_done(self):
return self.items[0].x==2 and self.items[0].y==4
def draw(self, ctx):
for i in self.items:
i.draw(ctx)
#draw self
def nextState(self):
result = []
for i, b in enumerate(self.items):
for m in ("U","D","L","R"):
new_board = self.copy()
if new_board.items[i].move(m):
if new_board.is_ok():
result.append(new_board)
return result
def bfs(initboard):
open_list = [initboard]
close_list = []
visited = {}
distance = {}
visited[initboard.pattern()] = initboard
distance[initboard.pattern()] = 0
prev = {}
prev[initboard.pattern()] = None
found = False
cnt = 0
while open_list and not found:
v = open_list.pop(0)
close_list.append(v)
vd = distance[v.pattern()]
if vd >=10:
ipshell()
cnt += 1
if cnt%100==0:
print "Iter %d, Len(open)=%d, len(close)=%d, Dist=%d" % (cnt, len(open_list), len(close_list), vd)
for nv in v.nextState():
if nv.is_done():
prev[nv.pattern()] = v.pattern()
visited[np] = nv
found = True
break
np = nv.pattern()
if np in visited:
continue
open_list.append(nv)
visited[np] = nv
distance[np] = vd + 1
prev[np] = v.pattern()
if found:
result = [nv]
while prev[nv.pattern()]!=None:
nv = visited[ prev[nv.pattern()] ]
result.append(nv)
print "Done"
return result[::-1]
return None
if __name__ == "__main__":
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,500,500)
ctx=cairo.Context(surface)
ctx.scale(50, 50)
bd = board()
bd.draw(ctx)
surface.write_to_png("1.png")
nbd = bd.nextState()
result = bfs(bd)
for i, b in enumerate(result):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,500,500)
ctx=cairo.Context(surface)
ctx.scale(50, 50)
b.draw(ctx)
surface.write_to_png("t%d.png" %i)
|
Python
| 207
| 25.705315
| 110
|
/HW2015/HW3-hf.py
| 0.460954
| 0.423174
|
leopesi/pool_budget
|
refs/heads/master
|
from .dimensao import Dimensao
|
Python
| 1
| 30
| 30
|
/projeto/dimensoes/customclass/estruturas/__init__.py
| 0.866667
| 0.866667
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-06-11 22:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0017_auto_20200611_1859'),
]
operations = [
migrations.RenameField(
model_name='clientemodel',
old_name='numerocasa',
new_name='numero_casa',
),
migrations.AddField(
model_name='dimensaomodel',
name='status',
field=models.CharField(blank=True, choices=[('Em negociação', 'Em negociação'), ('Contrato', 'Contrato'), ('Encerrado', 'Encerrado')], default='Em negociação', help_text='Status do Orçamento', max_length=15),
),
]
|
Python
| 23
| 30.130434
| 220
|
/projeto/dimensoes/migrations/0018_auto_20200611_1905.py
| 0.596369
| 0.550279
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-03-17 17:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0004_auto_20200317_0933'),
]
operations = [
migrations.AddField(
model_name='dimensaomodel',
name='data',
field=models.DateTimeField(blank=True, null=True),
),
]
|
Python
| 18
| 21.388889
| 62
|
/projeto/dimensoes/migrations/0005_dimensaomodel_data.py
| 0.598015
| 0.521092
|
leopesi/pool_budget
|
refs/heads/master
|
# Generated by Django 3.0.3 on 2020-06-03 22:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0011_auto_20200516_1518'),
]
operations = [
migrations.AlterField(
model_name='clientemodel',
name='telefone',
field=models.IntegerField(blank=True),
),
]
|
Python
| 18
| 21
| 50
|
/projeto/dimensoes/migrations/0012_auto_20200603_1916.py
| 0.598485
| 0.520202
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.