seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
981584569 | import argparse
import random
def main():
args = parse_args()
compile_random_pattern(args.n, args.output)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('n', type=int, help='dimension of output file (power of 2)')
parser.add_argument('output', type=argparse.FileType('wb'), help='output file')
args = parser.parse_args()
if not is_power2(args.n):
parser.error('n should be a power of 2')
return args
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
def compile_random_pattern(n, output):
for _ in range(n*n):
value = random.choice([b'\x00', b'\x01'])
output.write(value)
if __name__ == '__main__':
main()
| hjbyt/OS_HW5 | compile_random.py | compile_random.py | py | 725 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 26,
"usage_type": "call"
}
] |
31881700577 | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('a_home.urls')),
path('about/', include('a_home.urls')),
path('portfolio/', include('a_portfolio.urls')),
path('', include('a_portfolio.urls')),
path('flower/', include('a_flower.urls')),
path('', include('a_flower.urls')),
path('service/', include('a_service.urls')),
path('', include('a_service.urls')),
path('blog', include('a_blog.urls')),
]
"""нам будут доступны все медиа файлы.В режиме debug мы сможем перейти
к ним прямо в браузере, когда вылез какой-нибудь трейсбек ошибки,
и вообще это упрощает отладку приложения"""
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
#конфигурация надписи админ в админке джанго
admin.site.site_header = "Моя Админка Flower&Gifts"
admin.site.site_title = "Flowers & Gifts"
admin.site.index_title = "Добро Пожаловать в админку Flowers&Gifts" | V0lodimirV/Flower_site | a_configuration/a_configuration/urls.py | urls.py | py | 1,307 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "dja... |
42099837809 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
print('make pre-encoded tcga data from 2048')
import os
import sys
import csv
import numpy as np
import pickle
from PIL import Image
import tensorflow as tf
import tensorflow_ae_base
from tensorflow_ae_base import *
import tensorflow_util
import myutil
exec(open('extern_params.py').read())
# extern stamp1 stamp2
exec(open('tensorflow_ae_stage1.py').read())
exec(open('tensorflow_ae_stage2.py').read())
# extern
ss = 2048
ns = 256
dir_image = '/project/hikaku_db/data/tissue_images'
dir_data = 'dat1'
nx = ss
ny = ss
nl = 3
# stamp1,stamp2
exec(open('tensorflow_ae_stage1.py').read())
exec(open('tensorflow_ae_stage2.py').read())
tf_input = tf.placeholder(tf.float32, [None,ny,nx,nl])
tf_encode1 = get_encode1(tf_input)
tf_encode2 = get_encode2(tf_encode1)
sess.run(tf.initialize_all_variables())
file_imglist = 'typelist.filterd.txt'
fileTable = list(csv.reader(open("dat1/typelist.filterd.txt",'r'), delimiter='\t'))
iii_sample = np.random.choice(range(len(fileTable)),size=ns,replace=False)
index = []
qqq_trn = []
yyy_trn = []
for aa in range(ns):
ii = iii_sample[aa]
file_src = fileTable[ii][0]
path_data = os.path.join(dir_image,file_src)
## print(path_data)
img_src = Image.open(path_data,'r')
mx = img_src.size[0]
my = img_src.size[1]
img_tmp = Image.open(path_data,'r')
qqq_tmp = (np.asarray(img_tmp) / 255.0)[np.newaxis,:,:,:]
qqq_encode2 = tf_encode2.eval({tf_input: qqq_tmp})
index.append(ii)
qqq_trn.append(qqq_encode2)
yyy_trn.append(fileTable[ii][1])
index = np.asarray(index)
qqq_trn = np.vstack(qqq_trn)
yyy_trn = np.asarray(yyy_trn)
np.save('dat1/tcga_encode2_w512.{}.npy'.format(stamp2),qqq_trn)
np.save('dat1/type_encode2_w512.{}.npy'.format(stamp2),yyy_trn)
np.save('dat1/index_encode2_w512.{}.npy'.format(stamp2),index)
| naono-git/cnncancer | make_tcga_encoded2_2048.py | make_tcga_encoded2_2048.py | py | 1,884 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "tensorflow.placeholder",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "tensorflow.float32",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.initialize_all_variables",
"line_number": 45,
"usage_type": "call"
},
{
... |
28440071535 | import pandas as pd
import os
from datetime import datetime
from sqlalchemy import create_engine
from load_data import load_data
from transform_data import transform_data
from config import DB_NAME,PASSWORD,USER
def insert_data_incremental(df_to_upload, table_name):
"""
Inserts data from a DataFrame into a PostgreSQL table using incremental loading.
Args:
df_to_upload (pandas.DataFrame): DataFrame containing the data to insert.
table_name (str): Name of the table to insert the data into.
"""
# Set up the database connection
engine = create_engine(f'postgresql://{USER}:{PASSWORD}@localhost:5432/{DB_NAME}')
# Retrieve the last_update value from the database
existing_data = pd.read_sql_query('SELECT MAX(last_update) FROM "' + table_name + '"', engine)
last_update_db = existing_data.iloc[0, 0]
# Check if last_update_db is null
if pd.isnull(last_update_db):
last_update_db = pd.to_datetime('1900-01-01') # Or any other minimum date you want to use
else:
last_update_db = pd.to_datetime(last_update_db) # Convert to datetime
# Convert the 'last_update' column to datetime format
df_to_upload['last_update'] = pd.to_datetime(df_to_upload['last_update'])
# Filter the newer records in the DataFrame
new_records = df_to_upload[df_to_upload['last_update'] > last_update_db]
# Check if there are new records to update
if not new_records.empty:
# Load the necessary data for transformation
df_states, df_locations, df_vehicle_types, _ = load_data()
# Transform the data of the new records
df_to_upload = transform_data(df_states, df_locations, df_vehicle_types, new_records)
# Insert the updated data into the database
with engine.begin() as connection:
# Avoid duplicates based on id_trip
df_to_upload.to_sql('temp_table', connection, if_exists='replace', index=False)
connection.execute(f'''
INSERT INTO "{table_name}" (id_trip, start, end_trip, state, vehicle_type, name, start_datetime, duration, kilometers_traveled, average_speed, theoretical_consumption, last_update)
SELECT t.id_trip, t.start, t.end_trip, t.state, t.vehicle_type, t.name, t.start_datetime, t.duration, t.kilometers_traveled, t.average_speed, t.theoretical_consumption, t.last_update
FROM temp_table t
LEFT JOIN "{table_name}" f ON t.id_trip = f.id_trip
WHERE f.id_trip IS NULL
''')
print("The records have been updated in the database.")
else:
print("No new records to update in the database.")
# Update existing records
with engine.begin() as connection:
# Update the existing records in the table based on id_trip
df_to_upload.to_sql('temp_table', connection, if_exists='replace', index=False)
connection.execute(f'''
UPDATE "{table_name}"
SET start = t.start, end_trip = t.end_trip, state = t.state, vehicle_type = t.vehicle_type,
name = t.name, start_datetime = t.start_datetime, duration = t.duration,
kilometers_traveled = t.kilometers_traveled, average_speed = t.average_speed,
theoretical_consumption = t.theoretical_consumption, last_update = t.last_update
FROM temp_table t
WHERE "{table_name}".id_trip = t.id_trip
''')
print("The existing records have been updated in the database.")
# Close the database connection
engine.dispose()
data_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../data")
# Read the trips_1.csv file and store the data in a DataFrame
df_trips_1 = pd.read_csv(os.path.join(data_folder, "trips_2.csv"), sep=";")
# Call the insert_data_incremental function only if there are new records to update
if not df_trips_1.empty:
insert_data_incremental(df_trips_1, 'fact_trip')
else:
print("No new records to update or insert.")
| manu2492/Data-Transformation-Pipeline-Analyzing-and-Enriching-Trip-Data | etl/update_database.py | update_database.py | py | 4,029 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "config.USER",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "config.PASSWORD",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "config.DB_NAME",... |
2515527479 | import sqlite3
connection = sqlite3.connect("rpg_db.sqlite3")
connection.row_factory = sqlite3.Row
curs = connection.cursor()
# 1). How many total Characters are there
total_characters = """
SELECT COUNT(*) FROM charactercreator_character;
"""
results = curs.execute(total_characters).fetchall()
print("total Characters:", results[0]['COUNT(*)'])
# 2). How many of each specific subclass?
each_subclass = ['charactercreator_cleric', 'charactercreator_fighter',
'charactercreator_mage', 'charactercreator_thief']
for character_class in each_subclass:
subclass = f'SELECT COUNT(character_ptr_id) FROM {character_class}'
results = curs.execute(subclass).fetchall()
print(f'Each specific Subclass {character_class}:', results[0]
['COUNT(character_ptr_id)'])
subclass = "SELECT COUNT(mage_ptr_id) FROM charactercreator_necromancer"
results = curs.execute(subclass).fetchall()
print('Each specific Subclass charactercreator_mage:', results[0]['COUNT(mage_ptr_id)'])
# 3). How many total Items?
items = "SELECT COUNT(item_id) FROM charactercreator_character_inventory"
results = curs.execute(items).fetchall()
print('total Items:', results[0]['COUNT(item_id)'])
total_items = results[0]['COUNT(item_id)']
# 4). How many of the Items are weapons? How many are not?
weapons = 'SELECT COUNT(item_ptr_id) FROM armory_weapon'
results = curs.execute(weapons).fetchall()
print('Items are weapons:', results[0]['COUNT(item_ptr_id)'])
print('Items are NOT weapons:', total_items-results[0]['COUNT(item_ptr_id)'])
# 5). How many Items does each character have? (Return first 20 rows)
items_per_character = """
SELECT
character_id,
COUNT(item_id)
FROM charactercreator_character_inventory
GROUP BY character_id
LIMIT 20;
"""
results = curs.execute(items_per_character).fetchall()
for ii in range(len(results)):
print('Character', results[ii][0], 'has', results[ii][1], 'items.')
# 6).How many Weapons does each character have? (Return first 20 rows)
weapon_per_character = """
SELECT
character_id,
COUNT(item_id)
FROM charactercreator_character_inventory
JOIN armory_weapon
ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id
GROUP BY character_id
LIMIT 20;
"""
results = curs.execute(weapon_per_character).fetchall()
for ii in range(len(results)):
print('Character', results[ii][0], 'has', results[ii][1], 'weapons')
# 7). On average, how many Items does each Character have?
average_item_per_character = """
SELECT COUNT(item_id)
FROM charactercreator_character_inventory
GROUP BY character_id;
"""
results = curs.execute(average_item_per_character).fetchall()
items = 0
for ii in range(len(results)):
items += results[ii][0]
average = items / len(results)
print('Average amount of items per each character:', average)
# 8). On average, how many Weapons does each character have?
average_weapon_per_character = """
SELECT
COUNT(item_id)
FROM charactercreator_character_inventory
Join armory_weapon
ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id
GROUP BY character_id
"""
results = curs.execute(average_weapon_per_character).fetchall()
weapons = 0
for ii in range(len(results)):
weapons += results[ii][0]
average_weapon = weapons / len(results)
print('Average amount of weapons per each character:', average_weapon)
| Edudeiko/DS-Unit-3-Sprint-2-SQL-and-Databases | module1-introduction-to-sql/321_assignment.py | 321_assignment.py | py | 3,321 | python | en | code | null | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sqlite3.Row",
"line_number": 5,
"usage_type": "attribute"
}
] |
31929838287 | import re
from collections import defaultdict
class ReviewSentence():
def __init__(self, line):
self.sentiment = line[:3]
self.text = line[4:]
class Review():
def __init__(self, header):
self.header = header
a, b, c = header.split("_")
self.review_category = a
self.review_sentiment = b
self.sentences = []
def add_sentence(self, line):
s = ReviewSentence(line)
self.sentences.append(s)
def __iter__(self):
return iter(self.sentences)
def __len__(self):
return len(self.sentences)
def __getitem__(self, index):
return self.sentences[index]
def parse_review_file(filename = "training_data.txt"):
reviews = []
curr_review = None
with open(filename, "r") as f:
for line in f:
if curr_review is not None:
if line == '\n':
reviews.append(curr_review)
curr_review = None
else:
curr_review.add_sentence(line)
else:
if line != '\n':
curr_review = Review(line)
if curr_review is not None:
reviews.append(curr_review)
return reviews
# reviews = parse_review_file()
# r = reviews[0]
def get_pos_neg_words():
pos = set()
neg = set()
with open('opinion-lexicon-English/positive-words.txt', 'r') as f:
for line in f:
if line[0] != ';':
pos.add(line.strip())
with open('opinion-lexicon-English/negative-words.txt', 'r') as f:
for line in f:
if line[0] != ';':
neg.add(line.strip())
return pos, neg
clue_re = re.compile(r"type=(.*) len=1 word1=(.*) pos1=(.*) stemmed1=(.*) priorpolarity=(.*)")
class ClueDict:
def __init__(filename):
self.clues = defaultdict(lambda : [])
with open(filename, 'r') as f:
for line in f:
s = line.strip()
clue = Clue(s)
self.clues[clue.word].append(clue)
def __contains__(self, word):
return word in self.clues
def __getitem__(self, word):
return self.clues[word]
class Clue:
def __init__(line):
match = clue_re.match(line)
groups = match.groups()
self.strong = (groups[0] == 'strongsubj')
self.word = groups[1]
self.pos = groups[2]
self.stemmed = (groups[3] == 'y')
self.polarity = groups[4][:3]
def __str__(self):
return "word: %s, polarity: %s" % (self.word, self.polarity)
def __repr__(self):
return str(self)
| hmdavis/NLP-project-2 | NLP-project-3/parsers.py | parsers.py | py | 2,192 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 68,
"usage_type": "call"
}
] |
15791070220 | import Preprocessing as pre
from Classifiers import accuracy, RandomForestClassifier, confusion_matrix
import time
import h5py
import numpy as np
import argparse
import joblib
def RFClassifier(X_train, y_train, X_val, y_val, n_trees, tree_depth, split_metric, name, jobs):
clf = RandomForestClassifier(n_trees=n_trees, tree_depth=tree_depth, split_metric=split_metric, n_jobs=jobs)
for i, (x_t, y_t) in enumerate(zip(X_train, y_train)):
clf.fit(x_t, y_t)
#option to save classifier
#print('Saving classifier to ', 'output/'+name)
#clf.save('final_run'+str(jobs)+'.json')
y_pred = []
# predict
t0 = time.time()
for x_v in X_val[i]:
y_pred.append(clf.predict(x_v, n_trees=n_trees))
print(len(y_pred),' labels predicted')
print('Prediction time:', time.time()-t0,'s')
results = np.asarray(y_pred).reshape(5000)
with h5py.File('output/'+'predicted_labels.h5', 'w') as H:
H.create_dataset('label', data=results)
confusion_matrix(y_pred[:2000], y_val[:2000], 'output/'+name)
acc = accuracy(y_pred[:2000], y_val[i][:2000])
print('Accuracy:', acc)
return clf
#line below is used to protect __main__, only required when using parallel processing
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Training Random Forest classifier on complete dataset')
parser.add_argument('-p',
'--parallel',
dest='parallel',
action='store_true',
help='use 3/4 number of cpu cores to speedup runtime')
parser.add_argument('-o',
'--oobe',
dest='oobe',
action='store_true',
help='generate out-of-bag error plot')
args = parser.parse_args()
n_jobs = 1
if args.parallel:
#use 75% of No. of CPU cores (to insure other external activities can be performed smoothely)
n_jobs = int(joblib.cpu_count() * 3 / 4)
training_set, test_set = pre.load_data()
print('No dimensionality reduction\nnumber of trees = 275\nmax tree depth: 50\nsplit metric: entropy')
print('Running on',n_jobs,'cpu core(s)')
X_train, Y_train, X_test, Y_test = pre.preprocess_all(training_set, test_set, -1)
print('Training a classifier...')
#training Random Forest using full training set, 275 trees, max tree depth of 50 and entropy as split metric.
classifier = RFClassifier(X_train, Y_train, X_test, Y_test, 275, 50, 'entropy','final_run.png',n_jobs)
#Accuracy may vary each run because of the random nature of random forest
print()
if args.oobe:
# Out-of-bag error plot
classifier.plot_oobe('output/'+'oobe.png',15, -1, 15)
| samedwardsFM/Next-level-random-forest-from-scratch | Final_run.py | Final_run.py | py | 2,841 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Classifiers.RandomForestClassifier",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.asarray"... |
71578930343 | import vtk
def main():
colors = vtk.vtkNamedColors()
# create a sphere
sphere = vtk.vtkSphere()
sphere.SetRadius(1)
sphere.SetCenter(1, 0, 0)
# create a box
box = vtk.vtkBox()
box.SetBounds(-1, 1, -1, 1, -1, 1)
# combine the two implicit functions
boolean = vtk.vtkImplicitBoolean()
boolean.SetOperationTypeToDifference()
# boolean.SetOperationTypeToUnion()
# boolean.SetOperationTypeToIntersection()
boolean.AddFunction(box)
boolean.AddFunction(sphere)
# The sample function generates a distance function from the implicit
# function. This is then contoured to get a polygonal surface.
sample = vtk.vtkSampleFunction()
sample.SetImplicitFunction(boolean)
sample.SetModelBounds(-1, 2, -1, 1, -1, 1)
sample.SetSampleDimensions(40, 40, 40)
sample.ComputeNormalsOff()
# contour
surface = vtk.vtkContourFilter()
surface.SetInputConnection(sample.GetOutputPort())
surface.SetValue(0, 0.0)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(surface.GetOutputPort())
mapper.ScalarVisibilityOff()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().EdgeVisibilityOn()
actor.GetProperty().SetColor(colors.GetColor3d('AliceBlue'))
actor.GetProperty().SetEdgeColor(colors.GetColor3d('SteelBlue'))
# A renderer and render window
renderer = vtk.vtkRenderer()
renderer.SetBackground(colors.GetColor3d('Silver'))
# add the actor
renderer.AddActor(actor)
# render window
renwin = vtk.vtkRenderWindow()
renwin.AddRenderer(renderer)
# An interactor
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renwin)
# Start
interactor.Initialize()
renwin.Render()
# renderer.GetActiveCamera().AddObserver('ModifiedEvent', CameraModifiedCallback)
renderer.GetActiveCamera().SetPosition(5.0, -4.0, 1.6)
renderer.GetActiveCamera().SetViewUp(0.1, 0.5, 0.9)
renderer.GetActiveCamera().SetDistance(6.7)
renwin.Render()
interactor.Start()
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/ImplicitFunctions/Boolean.py | Boolean.py | py | 2,132 | python | en | code | 319 | github-code | 36 | [
{
"api_name": "vtk.vtkNamedColors",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "vtk.vtkSphere",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "vtk.vtkBox",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "vtk.vtkImplicitBoolean",
... |
37716835129 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='EventCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='event',
name='categories',
field=models.ManyToManyField(to='events.EventCategory'),
),
]
| vinsmokemau/Eventstarter | events/migrations/0002_auto_20151014_0134.py | 0002_auto_20151014_0134.py | py | 833 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 14,
"usage_type": "call"
},
... |
25459718320 | from __future__ import absolute_import, division, print_function
import numpy as np
np.random.seed(1337) # for reproducibility
np.set_printoptions(threshold=np.nan)
import tensorflow as tf
tf.enable_eager_execution()
import os
import matplotlib.pyplot as plt
import pickle
import networkx as nx
import SegEval as ev
from random import randint
import networkx as nx
import csv
SIZE = (50, 50, 3)
NUM_SAMPLES = 1
NUM_VALID = 1
BATCH_SIZE = 1
NUM_EPOCHS = 1000
def debugImages(im1, im2):
pic = dict()
pic[-1] = 'x'
pic[0] = '.'
pic[1] = '+'
pic[2] = '*'
pic[3] = '#'
pic[4] = '$'
pic[5] = '%'
pic[6] = '?'
print("debug:")
for i in range(im1.shape[0]):
line1 = ""
line2 = ""
for j in range(im1.shape[1]):
id1 = int(im1[i,j])
if id1 > 0:
id1 = id1 % 7
line1 = line1 + pic[id1]
id2 = int(im2[i,j])
if id2 > 0:
id2 = id2 % 7
line2 = line2 + pic[id2]
line = line1 + ' : ' + line2
print(line)
####################################################################################################################################
####################################################################################################################################
def getData():
# Training set
im = list()
gt = list()
f = open('../synimage/train.p', 'rb')
data = pickle.load(f)
f.close()
for i in range(NUM_SAMPLES):
im.append(data[0][i][0:100:2,0:100:2,:])
gt.append(data[1][i][0:100:2,0:100:2,:])
X = np.array(im).astype(np.single)
Y = np.array(gt).astype(np.single)
# Validation set
im = list()
gt = list()
f = open('../synimage/test.p', 'rb')
data = pickle.load(f)
f.close()
for i in range(NUM_VALID):
im.append(data[0][i][0:100:2,0:100:2,:])
gt.append(data[1][i][0:100:2,0:100:2,:])
XT = np.array(im).astype(np.single)
YT = np.array(gt).astype(np.single)
print('Training set')
print(X.shape)
print(Y.shape)
print('Testing set')
print(XT.shape)
print(YT.shape)
return X, Y, XT, YT
def sobel_output_shape(input_shape):
return (input_shape[0], input_shape[1], input_shape[2], input_shape[3], 2)
def rand_image(model, X):
YP = model(X)
G = nx.grid_2d_graph(SIZE[0], SIZE[1])
for u, v, d in G.edges(data = True):
if u[0] == v[0]: #vertical, dy, channel 0
channel = 0
if u[1] == v[1]: #horizontal, dy, channel 1
channel = 1
d['weight'] = (YP[0, u[0], u[1], 0] + YP[0, v[0], v[1], 0])/2.0
L = ev.GetLabelsAtThreshold(G)
img = np.zeros((SIZE[0], SIZE[1]), np.single)
for i in range(img.shape[0]):
for j in range(img.shape[1]):
img[i,j] = L[(i,j)]
return img
def rand_error(YP, Y):
G = nx.grid_2d_graph(SIZE[0], SIZE[1])
nlabels_dict = dict()
for u, v, d in G.edges(data = True):
if u[0] == v[0]: #vertical, dy, channel 0
channel = 0
if u[1] == v[1]: #horizontal, dy, channel 1
channel = 1
d['weight'] = (YP[0, u[0], u[1], 0] + YP[0, v[0], v[1], 0])/2.0
nlabels_dict[u] = Y[0, u[0], u[1], 0]
nlabels_dict[v] = Y[0, v[0], v[1], 0]
error = ev.FindRandErrorAtThreshold(G, nlabels_dict, 0.0)
return error
def get_rand_weight(YP, Y):
G = nx.grid_2d_graph(SIZE[0], SIZE[1])
nlabels_dict = dict()
for u, v, d in G.edges(data = True):
if u[0] == v[0]: #vertical, dy, channel 0
channel = 0
if u[1] == v[1]: #horizontal, dy, channel 1
channel = 1
d['weight'] = (YP[0, u[0], u[1], 0] + YP[0, v[0], v[1], 0])/2.0
nlabels_dict[u] = Y[0, u[0], u[1], 0]
nlabels_dict[v] = Y[0, v[0], v[1], 0]
[posCounts, negCounts, mstEdges, totalPos, totalNeg] = ev.FindRandCounts(G, nlabels_dict)
posError = totalPos
negError = 0.0
WY = np.zeros((1, SIZE[0], SIZE[1], 1), np.single)
SY = np.zeros((1, SIZE[0], SIZE[1], 1), np.single)
for i in range(len(posCounts)):
posError = posError - posCounts[i]
negError = negError + negCounts[i]
WS = posError - negError
(u,v) = mstEdges[i]
if u[0] == v[0]: #vertical, dy, channel 0
channel = 0
if u[1] == v[1]: #horizontal, dy, channel 1
channel = 1
WY[0, u[0], u[1], 0] += abs(WS)/2.0
WY[0, v[0], v[1], 0] += abs(WS)/2.0
if WS > 0.0:
SY[0, u[0], u[1], 0] += 0.5
SY[0, v[0], v[1], 0] += 0.5
if WS < 0.0:
SY[0, u[0], u[1], 0] += -0.5
SY[0, v[0], v[1], 0] += -0.5
# Std normalization
totalW = np.sum(WY)
if totalW != 0.0:
WY = np.divide(WY, totalW)
#SY = np.divide(SY, np.max(SY))
return [WY, SY]
def get_maximin_weight(YP, Y):
G = nx.grid_2d_graph(SIZE[0], SIZE[1])
nlabels_dict = dict()
for u, v, d in G.edges(data = True):
if u[0] == v[0]: #vertical, dy, channel 0
channel = 0
if u[1] == v[1]: #horizontal, dy, channel 1
channel = 1
d['weight'] = (YP[0, u[0], u[1], 0] + YP[0, v[0], v[1], 0])/2.0
nlabels_dict[u] = Y[0, u[0], u[1], 0]
nlabels_dict[v] = Y[0, v[0], v[1], 0]
WY = np.zeros((1, SIZE[0], SIZE[1], 1), np.single)
SY = np.zeros((1, SIZE[0], SIZE[1], 1), np.single)
#build an MST
mstEdges = ev.mstEdges(G)
MST = nx.Graph()
MST.add_edges_from(mstEdges)
#get a random pair u,v
u = (randint(0, SIZE[0]-1), randint(0, SIZE[1]-1))
v = (randint(0, SIZE[0]-1), randint(0, SIZE[1]-1))
while u == v:
u = (randint(0, SIZE[0]-1), randint(0, SIZE[1]-1))
v = (randint(0, SIZE[0]-1), randint(0, SIZE[1]-1))
#find the maximin path between u and v on the MST
path = nx.shortest_path(MST, source=u, target=v)
#the maximin edge
(us,vs) = min([edge for edge in nx.utils.pairwise(path)], key=lambda e: G.edges[e]['weight'])
WY[0, us[0], us[1], 0] += 0.5
WY[0, vs[0], vs[1], 0] += 0.5
if Y[0, u[0], u[1], 0] == Y[0, v[0], v[1], 0]:
SY[0, us[0], us[1], 0] += 0.5
SY[0, vs[0], vs[1], 0] += 0.5
else:
SY[0, us[0], us[1], 0] += -0.5
SY[0, vs[0], vs[1], 0] += -0.5
return [WY, SY]
def conv_block(input_tensor, num_filters):
encoder = tf.keras.layers.Conv2D(num_filters, (3, 3), padding='same')(input_tensor)
encoder = tf.keras.layers.BatchNormalization()(encoder)
encoder = tf.keras.layers.Activation('relu')(encoder)
encoder = tf.keras.layers.Conv2D(num_filters, (3, 3), padding='same')(encoder)
encoder = tf.keras.layers.BatchNormalization()(encoder)
encoder = tf.keras.layers.Activation('relu')(encoder)
return encoder
def encoder_block(input_tensor, num_filters):
encoder = conv_block(input_tensor, num_filters)
encoder_pool = tf.keras.layers.MaxPooling2D((2, 2), strides=(2, 2))(encoder)
return encoder_pool, encoder
def decoder_block(input_tensor, concat_tensor, num_filters):
decoder = tf.keras.layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(input_tensor)
decoder = tf.keras.layers.concatenate([concat_tensor, decoder], axis=-1)
decoder = tf.keras.layers.BatchNormalization()(decoder)
decoder = tf.keras.layers.Activation('relu')(decoder)
decoder = tf.keras.layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
decoder = tf.keras.layers.BatchNormalization()(decoder)
decoder = tf.keras.layers.Activation('relu')(decoder)
decoder = tf.keras.layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
decoder = tf.keras.layers.BatchNormalization()(decoder)
decoder = tf.keras.layers.Activation('relu')(decoder)
return decoder
def runExp():
X, Y, XT, YT = getData()
print('## Model ')
input_image = tf.keras.layers.Input(shape=(50,50,3))
encoder0_pool, encoder0 = encoder_block(input_image, 64)
center = conv_block(encoder0_pool, 128)
decoder0 = decoder_block(center, encoder0, 64)
out = tf.keras.layers.Conv2D(1, (3,3), padding='same')(decoder0)
model = tf.keras.Model(inputs=input_image, outputs=out)
#model.summary()
def myloss(model, X, Y):
YP = model(X)
WY, SY = get_rand_weight(YP, Y)
loss = tf.losses.hinge_loss(labels=SY, logits=YP, weights=WY, reduction=tf.losses.Reduction.NONE)
return loss
def my_maximin_loss(model, X, Y):
YP = model(X)
WY, SY = get_maximin_weight(YP, Y)
loss = tf.losses.hinge_loss(labels=SY, logits=YP, weights=WY, reduction=tf.losses.Reduction.NONE)
return loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
loss_kruskal = []
val_loss_kruskal = []
rand_kruskal = []
val_rand_kruskal =[]
for epoch in range(NUM_EPOCHS):
#WY, SY = get_rand_weight(model(X),Y)
with tf.GradientTape() as tape:
loss = myloss(model, X, Y)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables), global_step=tf.train.get_or_create_global_step())
if (epoch+1) % 10 == 0:
loss_kruskal.append(loss)
v_loss = myloss(model, XT, YT)
val_loss_kruskal.append(v_loss)
pred_img = rand_image(model, XT)
debugImages(YT[0,:,:,0], pred_img)
err = rand_error(model(X), Y)
v_err = rand_error(model(XT), YT)
rand_kruskal.append(err)
val_rand_kruskal.append(v_err)
print("Epoch: {}, Loss: {} Err: {}".format(epoch+1, loss, err))
print("Epoch: {}, Val Loss: {} Val Err: {}".format(epoch+1, v_loss, v_err))
del model
with open('train_kruskal.log', 'a', newline='') as csvfile:
fieldnames = ['epoch', 'loss_kruskal', 'val_loss_kruskal', 'rand_kruskal', 'val_rand_kruskal']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(loss_kruskal)):
writer.writerow({'epoch':(i+1)*10,
'loss_kruskal':loss_kruskal[i].numpy(),
'val_loss_kruskal':val_loss_kruskal[i].numpy(),
'rand_kruskal':rand_kruskal[i],
'val_rand_kruskal':val_rand_kruskal[i]})
####################################################################################################################################
####################################################################################################################################
if __name__ == '__main__':
print("TensorFlow version: {}".format(tf.__version__))
print("Eager execution: {}".format(tf.executing_eagerly()))
runExp()
| remrace/qnguyen5-thesis-tamucc | src/bin/test.py | test.py | py | 11,127 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.seed",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "numpy.set_printoptions",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
... |
37220401007 | import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import utils
from TGATlayer import TGATlayer
class TGATML(nn.Module):
def __init__(self, adjm, node_feats,in_dim=1,out_dim=24, residual_channels=2,dilation_channels=2, end_channels=2*10, layers=5, reg_param=0):
super().__init__()
self.adjm = adjm
self.node_feats = node_feats
self.reg_param = reg_param
self.TCNGAT1 = TCNGATlayer(in_dim,out_dim,residual_channels,dilation_channels, end_channels,layers, kernel_size=2)
self.TCNGAT2 = TCNGATlayer(in_dim,out_dim,residual_channels,dilation_channels, end_channels,layers, kernel_size=2)
self.in_regressor = nn.Linear(out_dim, 1)
self.out_regressor = nn.Linear(out_dim, 1)
def src_embed(self):
x = self.TCNGAT1.forward(self.adjm, self.node_feats)
embedding = x.transpose(1, 3)
embedding = torch.squeeze(embedding)
return embedding
def dst_embed(self):
x = self.TCNGAT2.forward(self.adjm, self.node_feats)
embedding = x.transpose(1, 3)
embedding = torch.squeeze(embedding)
return embedding
def est_inflow(self, trip_od, dst_embedding):
in_nodes, in_flows_idx = torch.unique(trip_od[:, 1], return_inverse=True)
return self.in_regressor(dst_embedding[in_nodes])
def est_outflow(self, trip_od, src_embedding):
out_nodes, out_flows_idx = torch.unique(trip_od[:, 0], return_inverse=True)
return self.out_regressor(src_embedding[out_nodes])
def get_loss(self, trip_od, scaled_trip_volume,inflows, outflows, edge_est, inflow_est, outflow_est, multitask_weights):
out_nodes, out_flows_idx = torch.unique(trip_od[:, 0], return_inverse=True)
in_nodes, in_flows_idx = torch.unique(trip_od[:, 1], return_inverse=True)
scaled_outflows = utils.scale(outflows[out_nodes])
scaled_inflows = utils.scale(inflows[in_nodes])
edge_est_loss = MSE(edge_est, scaled_trip_volume)
inflow_est_loss = MSE(inflow_est, scaled_inflows)
outflow_est_loss = MSE(outflow_est, scaled_outflows)
reg_loss = 0.5 * (self.regularization_loss(self.src_embed()) + self.regularization_loss(self.dst_embed()))
total_loss = multitask_weights[0] * edge_est_loss + multitask_weights[1] * inflow_est_loss + multitask_weights[2] * outflow_est_loss + self.reg_param * reg_loss
return total_loss
def regularization_loss(self, embedding):
return torch.mean(embedding.pow(2))
class Edge_Regression(nn.Module):
def __init__(self, regfunction):
file = r'.\Distance.csv'
distm = pd.read_csv(file, index_col=0)
self.distm = distm.values
self.edge_regressor = regfunction
def edge_fit(self, trip_od, trip_volume,src_embedding, dst_embedding):
src_emb = src_embedding[trip_od[:,0]]
dst_emb = dst_embedding[trip_od[:,1]]
scaled_distm = self.distm / self.distm.max() * np.max([src_emb.max().detach().numpy(), dst_emb.max().detach().numpy()])
feat_dist = scaled_distm[trip_od[:, 0], trip_od[:, 1]].reshape(-1, 1)
feat_distT = torch.from_numpy(feat_dist).view(-1, 1)
edge_feat = torch.cat((src_emb, feat_distT, dst_emb), dim=1)
edge_feat= edge_feat.detach().numpy()
return(self.edge_regressor.fit(edge_feat,trip_volume))
def edge_estimate(self, trip_od, src_embedding, dst_embedding):
src_emb = src_embedding[trip_od[:,0]]
dst_emb = dst_embedding[trip_od[:,1]]
scaled_distm = self.distm / self.distm.max() * np.max([src_emb.max().detach().numpy(), dst_emb.max().detach().numpy()])
feat_dist = scaled_distm[trip_od[:, 0], trip_od[:, 1]].reshape(-1, 1)
feat_distT = torch.from_numpy(feat_dist).view(-1, 1)
edge_feat = torch.cat((src_emb, feat_distT, dst_emb), dim=1)
edge_feat= edge_feat.detach().numpy()
edge_pre = self.edge_regressor.predict(edge_feat)
return (torch.tensor(edge_pre))
def MSE(y_hat, y):
'''
Root mean square
'''
limit = 20000
if y_hat.shape[0] < limit:
return torch.mean((y_hat - y)**2)
else:
acc_sqe_sum = 0
for i in range(0, y_hat.shape[0], limit):
acc_sqe_sum = acc_sqe_sum + torch.sum((y_hat[i: i + limit] - y[i: i + limit]) ** 2)
return acc_sqe_sum / y_hat.shape[0] | shiql/TGAT-ML | TGATML.py | TGATML.py | py | 4,454 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
11814225329 | import sqlite3
from sqlite3 import Error
from datetime import date
class PatientDataStore():
"""
Stores patient information in sqlite3 database
"""
__instance = None
@staticmethod
def getInstance():
"""
This is a static method to create class as a singleton
"""
if PatientDataStore.__instance == None:
PatientDataStore()
return PatientDataStore.__instance
def __init__(self):
"""
Initialize connection to sqlite datbase
"""
if PatientDataStore.__instance != None:
raise Exception("This class is a singleton")
else:
PatientDataStore.__instance = self
self.__open_connection()
patients_table = "CREATE TABLE IF NOT EXISTS patients(id integer PRIMARY KEY, name text, temperature integer, home_parish text, diagnosis integer, created_at text)"
cursorObj = self.con.cursor()
cursorObj.execute(patients_table)
self.con.commit()
def store_patient(self, name, parish, temp, diagnosis):
"""
Initialize connection to sqlite datbase
Parameters:
name(str): patients name
parish(str): patients home parish
temp(int): patients temperature
diagnosis(int): patients diagnosis percentage chance patient has covid
"""
created_at = date.today().strftime("%d-%m-%Y")
new_patient = f"INSERT INTO patients(name, temperature, home_parish, diagnosis, created_at) VALUES ('{name}', {temp}, '{parish}', {diagnosis}, '{created_at}')"
cursorObj = self.con.cursor()
cursorObj.execute(new_patient)
self.con.commit()
def get_patients(self):
"""
Return all patients stored
"""
cursorObj = self.con.cursor()
cursorObj.execute("SELECT * FROM patients")
rows = list(cursorObj.fetchall())
print(rows)
self.con.commit()
def __open_connection(self):
try:
self.con = sqlite3.connect("covidAi.db")
print("[Connection established]")
except Error:
print(Error)
def __close_connection(self):
self.con.close()
if __name__ == "__main__":
pateintdb = PatientDataStore()
| Rumone/ai-project | patient_data_store.py | patient_data_store.py | py | 2,314 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.date.today",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "sqlite3.Error",
... |
494837227 | from dagster_graphql.test.utils import execute_dagster_graphql
from dagster.core.instance import DagsterInstance
from .utils import define_test_context, sync_execute_get_run_log_data
COMPUTE_LOGS_QUERY = '''
query ComputeLogsQuery($runId: ID!, $stepKey: String!) {
pipelineRunOrError(runId: $runId) {
... on PipelineRun {
runId
computeLogs(stepKey: $stepKey) {
stdout {
data
}
}
}
}
}
'''
COMPUTE_LOGS_SUBSCRIPTION = '''
subscription ComputeLogsSubscription($runId: ID!, $stepKey: String!, $ioType: ComputeIOType!, $cursor: String!) {
computeLogs(runId: $runId, stepKey: $stepKey, ioType: $ioType, cursor: $cursor) {
data
}
}
'''
def test_get_compute_logs_over_graphql(snapshot):
payload = sync_execute_get_run_log_data(
{'executionParams': {'selector': {'name': 'spew_pipeline'}, 'mode': 'default'}}
)
run_id = payload['run']['runId']
result = execute_dagster_graphql(
define_test_context(instance=DagsterInstance.local_temp()),
COMPUTE_LOGS_QUERY,
variables={'runId': run_id, 'stepKey': 'spew.compute'},
)
compute_logs = result.data['pipelineRunOrError']['computeLogs']
snapshot.assert_match(compute_logs)
def test_compute_logs_subscription_graphql(snapshot):
payload = sync_execute_get_run_log_data(
{'executionParams': {'selector': {'name': 'spew_pipeline'}, 'mode': 'default'}}
)
run_id = payload['run']['runId']
subscription = execute_dagster_graphql(
define_test_context(instance=DagsterInstance.local_temp()),
COMPUTE_LOGS_SUBSCRIPTION,
variables={'runId': run_id, 'stepKey': 'spew.compute', 'ioType': 'STDOUT', 'cursor': '0'},
)
results = []
subscription.subscribe(lambda x: results.append(x.data))
assert len(results) == 1
result = results[0]
assert result['computeLogs']['data'] == 'HELLO WORLD\n'
snapshot.assert_match(results)
| helloworld/continuous-dagster | deploy/dagster_modules/dagster-graphql/dagster_graphql_tests/graphql/test_compute_logs.py | test_compute_logs.py | py | 1,983 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "utils.sync_execute_get_run_log_data",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "dagster_graphql.test.utils.execute_dagster_graphql",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "utils.define_test_context",
"line_number": 37,
"us... |
28418729236 | # Implementation of Selenium WebDriver with Python using PyTest
import pytest
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
import sys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from time import sleep
DELAY = 3
test_users = [
"SELENIUM_TEST",
"Lois_Lane",
"Clark_Kent",
"Jenny_Flex",
]
try:
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get('https://rateer.pythonanywhere.com/')
driver.maximize_window()
except Exception as e:
print(str(e))
try:
driver.find_element(by=By.XPATH, value='//a[@href="'+ "/home/signup/" +'"]').click()
for i in test_users:
driver.find_element(by=By.XPATH, value='//input[@name="'+ "username" +'"]').send_keys(i)
driver.find_element(by=By.XPATH, value='//input[@name="'+ "email" +'"]').send_keys(i+"@seleniummail.com")
driver.find_element(by=By.XPATH, value='//input[@name="'+ "password" +'"]').send_keys(i)
driver.find_element(by=By.XPATH, value='//button[@type="'+ "submit" +'"]').submit()
sleep(DELAY)
print('\n\n\
Feature: {}\n\
Given conditions: {}\n\
When: {}'\
.format(
'https://rateer.pythonanywhere.com/home/signup/',
'Attempting to input',
'Clicking submit'))
try:
ui_res = driver.find_element(by=By.XPATH, value='//h3[contains(text(), \'Registered Successfully!\')]').text
print('\
Then: Test Passed -- {}\n'\
.format(
ui_res))
except Exception as e:
print('\
Then: Test Failed -- {}\n'\
.format(
str(e)))
driver.find_element(by=By.XPATH, value='//a[@href="'+ "/home/" +'"]').click()
except Exception as e:
print('\n\
Feature: {}\n\
Given conditions: {}\n\
When: {}\n\
Then: {}'\
.format(
'https://rateer.pythonanywhere.com/home/signup/',
'Attempting to input',
'Clicking submit',
'Test Failed! Details:'+str(e)))
print(str(e))
sleep(DELAY)
sleep(DELAY)
sleep(DELAY)
sleep(DELAY)
sleep(DELAY)
sleep(DELAY)
sleep(DELAY)
sleep(DELAY)
driver.quit() | syedsair/rateer-automated-tests | UI/signup.py | signup.py | py | 2,374 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "webdriver_manager.chrome.ChromeDriverManager",
"line_number": 20,
"usage_type": "call"
},
... |
26469573974 | import codecs
import hashlib
import base58
from bitcoinaddress.util import checksum
from bitcoinaddress import Wallet
from bitcoinutils.keys import PrivateKey
from bitcoinutils.setup import setup
import ecdsa
from ecdsa.curves import SECP256k1
from ecdsa.util import sigencode_der_canonize
from .base import BaseKey
from ..hash import sha256, sha3
from ..common import parseType
from ..hint import KEY_PRIVATE, KEY_PUBLIC
class Keypair(object):
def __init__(self, priv, useSeed):
if not useSeed:
raw, _ = parseType(priv)
self.priv = BaseKey(KEY_PRIVATE, raw)
self.seed = None
else:
self.priv = None
self.seed = priv
self.generatePrivateKey()
self.generatePublicKey()
def generatePrivateKey(self):
if self.priv != None:
assert isinstance(
self.priv, BaseKey), 'Wrong private key or seed; Keypair.generatePrivateKey'
elif self.seed != None:
assert len(
self.seed) >= 36, 'Seed is too short to create Keypair; Keypair.generatePrivateKey'
sh = sha3(self.seed.encode())
if len(sh.digest) < 44:
shb = base58.b58encode(sh.digest)[:-3]
else:
shb = base58.b58encode(sh.digest)[:-4]
k = str(hex((int.from_bytes(shb, "big") % (SECP256k1.order - 1)) + 1))[2:]
self.priv = BaseKey(KEY_PRIVATE, encodeKey(bytes.fromhex(k)))
def generatePublicKey(self):
wif = base58.b58encode_check(
base58.b58decode_check(self.priv.key)[:-1]).decode()
wallet = Wallet(wif)
self.pub = BaseKey(KEY_PUBLIC, base58.b58encode(codecs.decode(
wallet.address.pubkeyc, "hex")).decode())
@property
def privateKey(self):
return self.priv.typed
@property
def publicKey(self):
return self.pub.typed
def sign(self, b):
assert isinstance(b, bytes), 'Input must be bytes object; Keypair.sign'
setup('mainnet')
hs = sha256(b).digest
wif = self.priv.key
pk = PrivateKey(wif=wif)
sk = ecdsa.SigningKey.from_string(pk.key.to_string(), curve=SECP256k1)
return sk.sign(hs, hashfunc=hashlib.sha256, sigencode=sigencode_der_canonize)
def encodeKey(key):
pk = b'\x80' + key + b'\x01'
return base58.b58encode(pk + checksum(pk)).decode()
def getNewKeypair():
return getKeypairFromPrivateKey(encodeKey(Wallet().key.digest) + KEY_PRIVATE)
def getKeypairFromPrivateKey(priv):
_, type = parseType(priv)
assert type == KEY_PRIVATE, 'Not private key; getKeypairFromPrivateKey'
return Keypair(priv, False)
def getKeypairFromSeed(seed):
assert len(
seed) >= 36, 'Seed is too short to create Keypair; getKeypairFromseed'
return Keypair(seed, True)
| ProtoconNet/mitum-py-util | src/mitumc/key/keypair.py | keypair.py | py | 2,852 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "common.parseType",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "base.BaseKey",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "hint.KEY_PRIVATE",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "base.BaseKey",
... |
15370165912 | import pytest
import sqlalchemy as sa
import sqlalchemy.orm
import jessiql.sainfo.version
from apiens.tools.sqlalchemy.session.session_tracking import TrackingSessionMaker, TrackingSessionCls
@pytest.mark.xfail(jessiql.sainfo.version.SA_13, reason='Session() is not a context manager in SA 1.3', )
def test_tracking_sessionmaker(engine: sa.engine.Engine):
Session = TrackingSessionMaker(bind=engine)
# === Test: use as a context manager
# Check that the session itself works
with Session() as ssn:
assert ssn.query(1).scalar() == 1
# Verify that it's properly closed
Session.assert_no_active_sessions() # does not fail
# === Test: failing
# Open a session
ssn = Session()
# It fails
with pytest.raises(AssertionError) as e:
Session.assert_no_active_sessions()
msg = str(e.value)
assert '1 active' in msg
assert __file__ in msg
# Close it. No more failures.
ssn.close()
Session.assert_no_active_sessions()
def test_tracking_session(engine: sa.engine.Engine):
Session = TrackingSessionCls(weak=False)
# === Test
ssn = Session()
# It fails
with pytest.raises(AssertionError) as e:
Session.assert_no_active_sessions()
# Close it. No more failures
ssn.close()
Session.assert_no_active_sessions()
| kolypto/py-apiens | tests/tools_sqlalchemy/test_session_tracking.py | test_session_tracking.py | py | 1,330 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sqlalchemy.engine",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "apiens.tools.sqlalchemy.session.session_tracking.TrackingSessionMaker",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 26,
"usage_ty... |
5353044388 | import pandas as pd
import ipywidgets as widgets
from ipylabel.templates import Table
class ImageDashboard():
'''
Abstract dashboard class for image data.
'''
def __init__(self, images, format='png'):
if format not in ['png', 'jpg']:
raise ValueError('Format must be either png or jpg')
self.image_format = format
self.images = self.generate_ipyimages(images=images)
self.figure = self.generate_figure()
def generate_ipyimages(self, images):
'''
Create an ipywidgets.Image object for each image.
Args:
images (list): A list of image bytes objects.
Returns:
ipyimges (list): A list of ipywidgets.Image objects.
'''
if isinstance(images, list):
try:
ipyimages = []
for image in images:
assert isinstance(image, bytes)
ipyimage = widgets.Image(value=image, format=self.image_format)
ipyimages.append(ipyimage)
return ipyimages
except AssertionError as e:
raise ValueError(f'images parameter must contain a list of image bytes objects.') from e
else:
raise ValueError(f'images parameter must contain a list of image bytes objects.')
class ImageClassification(ImageDashboard):
def __init__(self, **kwargs):
super.__init__(**kwargs)
self.table = generate_table()
def generate_table(self):
df = pd.DataFrame([1])
return Table(data=df, title='Labels')
| crabtr26/ipylabel | ipylabel/Dashboards.py | Dashboards.py | py | 1,704 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ipywidgets.Image",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "ipylabel.templates.Table",
"line_number": 52,
"usage_type": "call"
}
] |
19793018115 | # -*- coding: utf-8 -*-
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals.six import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
from math import sqrt
dataset = pd.read_csv("bank_customer.csv")
df = pd.DataFrame(dataset)
bank_data = df.copy()
def combine_job_poutcome(bank_data):
bank_data['job'] = bank_data['job'].replace(['management', 'admin.'], 'white-collar')
bank_data['job'] = bank_data['job'].replace(['services', 'housemaid'], 'pink-collar')
bank_data['job'] = bank_data['job'].replace(['retired', 'student','unemployed','unknown'], 'other')
bank_data['poutcome'] = bank_data['poutcome'].replace(['other','unknown'], 'unknown')
combine_job_poutcome(bank_data)
def convert_categorical_values(bank_data):
le = LabelEncoder()
bank_data['job'] = le.fit_transform(bank_data['job'])
bank_data['marital'] = le.fit_transform(bank_data['marital'])
bank_data['education'] = le.fit_transform(bank_data['education'])
bank_data['default'] = le.fit_transform(bank_data['default'])
bank_data['housing'] = le.fit_transform(bank_data['housing'])
bank_data['loan'] = le.fit_transform(bank_data['loan'])
bank_data['contact'] = le.fit_transform(bank_data['contact'])
bank_data['month'] = le.fit_transform(bank_data['month'])
bank_data['poutcome'] = le.fit_transform(bank_data['poutcome'])
bank_data['deposit'] = le.fit_transform(bank_data['deposit'])
convert_categorical_values(bank_data)
data_1 = bank_data[['age','job','marital','education','balance','housing','duration','poutcome']]
data_2 = bank_data[['job','marital','education','housing']]
def train_and_test(dataset):
print("Accuracy values of the dataset :")
X = dataset
y = bank_data['deposit'].to_frame()
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=100)
return X_train,X_test,y_train,y_test
X_train,X_test,y_train,y_test = train_and_test(data_1)
def with_entropy(depth):
tree_with_entropy = DecisionTreeClassifier(criterion = "entropy", random_state = 100, max_depth = depth, min_samples_leaf = 5)
tree_with_entropy.fit(X_train, y_train)
return tree_with_entropy
def with_gini_index(depth):
tree_with_gini = DecisionTreeClassifier(criterion = "gini", random_state = 100, max_depth = depth, min_samples_leaf = 5)
tree_with_gini.fit(X_train, y_train)
return tree_with_gini
def prediction(X_test, tree):
prediction = tree.predict(X_test)
print(prediction)
return prediction
def calculate_test_accuracy(tree):
tree_score_test = tree.score(X_test, y_test)
print("Test score: ",tree_score_test)
return tree_score_test
def calculate_train_accuracy(tree):
tree_score_train = tree.score(X_train, y_train)
print("Training score: ",tree_score_train)
return tree_score_train
def calculate_confidence_interval(accuracy,dataset):
z = 1.96
interval = z * sqrt((accuracy * (1-accuracy)) / 100)
return interval
def upper_lower_p():
alpha = 0.5 #because %95
lower_p = alpha / 2
upper_p = upper_p = (100 - alpha) + (alpha / 2.0)
print("lower_p :", lower_p,"upper_p :", upper_p)
def plot_tree(tree_name, file_name):
dot_data = StringIO()
export_graphviz(tree_name, out_file=dot_data,
feature_names=X_train.columns,
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
graph.write_png(file_name)
def display(dataset):
print("--------------","DEPTH 3","--------------------")
gini_tree_depth_3 = with_gini_index(3)
entropy_tree_depth_3 = with_entropy(3)
gini_prediction = prediction(X_test, gini_tree_depth_3)
print("Gini Accuracy is :")
calculate_test_accuracy(gini_tree_depth_3)
calculate_train_accuracy(gini_tree_depth_3)
entropy_prediction = prediction(X_test, entropy_tree_depth_3)
print("Entropy Accuracy is :")
calculate_test_accuracy(entropy_tree_depth_3)
calculate_train_accuracy(entropy_tree_depth_3)
print("Interval of accuracy: ")
interval = calculate_confidence_interval(calculate_test_accuracy(gini_tree_depth_3),data_1)
print(interval)
upper_lower_p()
print("--------------","DEPTH 7","--------------------")
gini_tree_depth_7 = with_gini_index(7)
entropy_tree_depth_7 = with_entropy(7)
gini_prediction = prediction(X_test, gini_tree_depth_7)
print("Gini Accuracy is :")
calculate_test_accuracy(gini_tree_depth_7)
calculate_train_accuracy(gini_tree_depth_7)
entropy_prediction = prediction(X_test, entropy_tree_depth_7)
print("Entropy Accuracy is :")
calculate_test_accuracy(entropy_tree_depth_7)
calculate_train_accuracy(entropy_tree_depth_7)
print("Interval of accuracy: ")
interval = calculate_confidence_interval(calculate_test_accuracy(gini_tree_depth_7),data_1)
print(interval)
upper_lower_p()
plot_tree(gini_tree_depth_7, "gini_data1_7.png")
plot_tree(entropy_tree_depth_7, "entropy_data1_7.png")
plot_tree(gini_tree_depth_3, "gini_data1_3.png")
plot_tree(entropy_tree_depth_3, "entropy_data1_3.png")
| oykuandac/Entropy-Gini-Indexes-Prediction | exercise.py | exercise.py | py | 5,568 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "s... |
34899584843 |
from calendar import weekday
class Employee():
num_of_emps = 0
raise_amount = 1.04
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
self.email = first + '.' + last + '@gmail.com'
Employee.num_of_emps +=1
def fullname(self):
return "{} {}".format(self.first, self.last)
def apply_raise(self):
self.pay = int(self.pay * Employee.raise_amount)# we also use "self instead of Employee here"
@classmethod
def set_raise_amt(cls, amount):
cls.raise_amt = amount
@classmethod
def form_string(cls, emp_str):
first, last, pay = emp_str.split('-')
Employee(first, last, pay)
cls(first, last, pay)
@staticmethod
def is_workingday(day):
if day.weekday() == 5 or day.weekday() == 6:
return False
return True
emp_1 = Employee('Sangram', 'Bahadur', 20000000)
emp_2 = Employee('Vansh', 'Bahadur', 20000000)
import datetime
my_date = datetime.date(2016, 7, 10)
print(Employee.is_workingday(my_date))# it will return True or False based on day.
emp_str_1 = 'John-Doe-70000'
emp_str_2 = 'stive-Smith-30000'
emp_str_3 = 'Jane-Doe-90000'
new_emp_1 = Employee.form_string(emp_str_1)
#print(new_emp_1.email)
#print(new_emp_1.pay)
# first, last, pay = emp_str_1.split('-')
# emp_1.set_raise_amt(1.08)
# print(Employee.raise_amount)
# print(emp_1.raise_amount)
# print(emp_2.raise_amount) | sangramdhurve/Oops_python | Working with Classes.py | Working with Classes.py | py | 1,530 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.date",
"line_number": 41,
"usage_type": "call"
}
] |
71073486183 | #!/usr/bin/env python3
import argparse
import logging
import pathlib
import time
import signal
import shutil
import numpy as np
import yaml
import tensorboardX
import torch
import torch.utils.data
import utils
from tasks.arithmetic import Arithmetic
from models.lstm import LSTM
from models.ntm import NTM
from models.dnc import DNC
def choose_complexity(min_len, max_len, cur_complexity):
rnd = np.random.choice([0, 1, 2], p=[0.1, 0.25, 0.65])
e = np.random.geometric(1/2)
res = 0
if max_len == min_len:
res = np.array([0,])
else:
res = np.zeros(max_len - min_len + 1)
if rnd == 0:
res[:] = 1 / (max_len - min_len + 1)
return res
elif rnd == 1:
max_complexity = min(max_len - min_len + 1, cur_complexity + 1 + e)
res[:max_complexity] = 1
res /= res.sum()
return res
else:
max_complexity = min(max_len - min_len, cur_complexity + e)
res[max_complexity] = 1
return res
def train(model, optimizer, criterion, train_data, validation_data, config):
if config.scheduler is not None:
optimizer, scheduler = optimizer
writer = tensorboardX.SummaryWriter(logdir=str(config.tensorboard))
iter_start_time = time.time()
loss_sum = 0
cost_sum = 0
cur_step = 0
last_curriculum_update = 0
cur_complexity = 0
for i, (x, y, m) in enumerate(train_data, 1):
model.train()
batch_size, seq_len, symbols_amount = x.shape
if config.gpu and torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
m = m.cuda()
optimizer.zero_grad()
pred = model(x)
loss = criterion(pred, y, m)
loss.backward()
# clip gradients
torch.nn.utils.clip_grad_norm_(model.parameters(), config.gradient_clipping)
optimizer.step()
pred_idx = pred.data.max(-1)[1]
true_idx = y.data.max(-1)[1]
cost = ((pred_idx != true_idx).float() * m).sum() / batch_size
loss_sum += loss.item()
cost_sum += cost.item()
val_tensors = []
for (vx, vy, vm), vlength in validation_data:
val_tensors.append((vx.cuda(), vy.cuda(), vm.cuda(), vlength))
cur_step += batch_size
if i % config.verbose_interval == 0:
time_now = time.time()
time_per_iter = (time_now - iter_start_time) / config.verbose_interval * 1000.0
loss_avg = loss_sum / config.verbose_interval
cost_avg = cost_sum / config.verbose_interval
message = f"Iter: {i}, Sequences: {cur_step}, "
message += f"loss: {loss_avg:.2f}, cost: {cost_avg:.2f}, "
message += f"({time_per_iter:.2f} ms/iter)"
logging.info(message)
iter_start_time = time_now
loss_sum = 0
cost_sum = 0
if i % config.checkpoint_interval == 0:
logging.info('Saving checkpoint')
utils.save_checkpoint(
model,
optimizer,
cur_step,
train_data,
config.checkpoints)
logging.info('Validating model on longer sequences')
for vx, vy, vm, vlength in val_tensors:
vpred = model(vx)
vpred_idx = vpred.data.max(-1)[1]
vtrue_idx = vy.data.max(-1)[1]
vcost = ((vpred_idx != vtrue_idx).float() * vm).sum() / 50
if vlength == 20:
vloss = criterion(vpred, vy, vm)
writer.add_scalar(f'val/loss{vlength}', vloss.item(), global_step=cur_step)
writer.add_scalar(f'val/cost{vlength}', vcost.item(), global_step=cur_step)
if config.scheduler is not None and i % config.scheduler.interval == 0:
logging.info('Learning rate scheduler')
scheduler.step(cost.item())
# Write scalars to tensorboard
writer.add_scalar('train/loss', loss.item(), global_step=cur_step)
writer.add_scalar('train/cost', cost.item(), global_step=cur_step)
if config.curriculum is not None:
if loss.item() < config.curriculum.threshold \
and (i - last_curriculum_update) >= config.curriculum.update_step:
cur_complexity += 1
logging.info('complexity is increased. Current complexity is {}'.format(cur_complexity))
last_curriculum_update = i
train_data.distribution = choose_complexity(train_data.min_len, train_data.max_len, cur_complexity)
else:
train_data.distribution = np.ones(config.task.max_len - config.task.min_len + 1) \
/ (config.task.max_len - config.task.min_len + 1)
# Stopping
if not running:
return
if config.exit_after and cur_step > config.exit_after:
return
def setup_model(config):
# Load data
if config.task.name == 'arithmetic':
train_data = Arithmetic(
batch_size=config.task.batch_size,
min_len=config.task.min_len,
max_len=config.task.max_len,
task=config.task.task,
seed=config.seed,
)
np.random.seed(config.seed)
params = [20, 30, 40, 60]
validation_data = []
for length in params:
example = train_data.gen_batch(
batch_size=50,
min_len=length, max_len=length,
distribution=np.array([1,])
)
validation_data.append((example, length))
loss = Arithmetic.loss
else:
logging.info('Unknown task')
exit(0)
# Setup model
torch.manual_seed(config.seed)
if config.model.name == 'lstm':
model = LSTM(
n_inputs=train_data.symbols_amount,
n_outputs=train_data.symbols_amount,
n_hidden=config.model.n_hidden,
n_layers=config.model.n_layers,
)
elif config.model.name == 'ntm':
model = NTM(
input_size=train_data.symbols_amount,
output_size=train_data.symbols_amount,
mem_word_length=config.model.mem_word_length,
mem_cells_count=config.model.mem_cells_count,
n_writes=config.model.n_writes,
n_reads=config.model.n_reads,
controller_n_hidden=config.model.controller_n_hidden,
controller_n_layers=config.model.controller_n_layers,
controller=config.model.controller,
layer_sizes=config.model.layer_sizes,
controller_output=config.model.controller_output,
clip_value=config.model.clip_value,
dropout=config.model.dropout
)
elif config.model.name == 'dnc':
model = DNC(
input_size=train_data.symbols_amount,
output_size=train_data.symbols_amount,
n_cells=config.model.n_cells,
cell_width=config.model.cell_width,
n_reads=config.model.n_reads,
controller_n_hidden=config.model.controller_n_hidden,
controller_n_layers=config.model.controller_n_layers,
clip_value=config.model.clip_value,
)
else:
logging.info('Unknown model')
exit(0)
if config.gpu and torch.cuda.is_available():
model = model.cuda()
logging.info('Loaded model')
logging.info('Total number of parameters %d', model.calculate_num_params())
# Setup optimizer
if config.optimizer == 'sgd':
optimizer = torch.optim.SGD(
model.parameters(),
lr=config.learning_rate,
momentum=config.momentum
)
if config.optimizer == 'rmsprop':
optimizer = torch.optim.RMSprop(
model.parameters(),
lr=config.learning_rate,
momentum=config.momentum,
)
if config.optimizer == 'adam':
optimizer = torch.optim.Adam(
model.parameters(),
lr=config.learning_rate
)
if config.scheduler is not None:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
mode='min',
factor=config.scheduler.factor,
patience=config.scheduler.patience,
verbose=config.scheduler.verbose,
threshold=config.scheduler.threshold,
)
optimizer = (optimizer, scheduler)
if config.load:
model, optimizer, train_data, step = utils.load_checkpoint(
model, optimizer, train_data, config.load,
)
return model, optimizer, loss, train_data, validation_data
def read_config():
parser = argparse.ArgumentParser(
prog='Train/Eval script',
description=('Script for training and evaluating memory models on various bitmap tasks. '
'All parameters should be given throug the config file.'),
)
parser.add_argument(
'-n',
'--name',
type=str,
required=True,
help='Name of the current experiment. Can also provide name/with/path for grouping'
)
parser.add_argument(
'-k',
'--keep',
action='store_true',
help='Keep logs from previous run.'
)
parser.add_argument(
'-l', '--load',
help='Path to checkpoint file to load from',
default=None,
)
args = parser.parse_args()
path = pathlib.Path('experiments')/args.name
assert args.name, f'No such directory: {str(path)}.'
assert (path/'config.yaml').exists(), 'No configuration file found.'
with open(path/'config.yaml') as f:
config = utils.DotDict(yaml.safe_load(f))
if not args.keep:
(path/'tensorboard').exists() and shutil.rmtree(path/'tensorboard')
(path/'checkpoints').exists() and shutil.rmtree(path/'checkpoints')
open(path/'train.log', 'w').close()
(path/'tensorboard').mkdir(exist_ok=True)
(path/'checkpoints').mkdir(exist_ok=True)
config.path = path
config.tensorboard = path/'tensorboard'
config.checkpoints = path/'checkpoints'
config.load = args.load
return config
def signal_handler(signal, frame):
global running
print('You pressed Ctrl+C!')
running = False
def main():
global running
running = True
signal.signal(signal.SIGINT, signal_handler)
config = read_config()
utils.set_logger(config.path/'train.log')
print(config.path)
print(config.tensorboard)
logging.info('Loaded config:\n')
logging.info('=' * 30 + '\n')
with open(config.path/'config.yaml') as conf:
logging.info(conf.read())
logging.info('=' * 30 + '\n')
logging.info('Start training')
model, optimizer, loss, train_data, validation_data = setup_model(config)
train(model, optimizer, loss, train_data, validation_data, config)
if __name__ == "__main__":
main()
| dasimagin/ksenia | train_arithmetic.py | train_arithmetic.py | py | 10,912 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "numpy.random.choice",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.geometric",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.ra... |
16560146527 | """empty message
Revision ID: da088c937095
Revises: ecc7f6cfc777
Create Date: 2022-08-02 16:55:48.199125
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'da088c937095'
down_revision = 'ecc7f6cfc777'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user_images', sa.Column('image_withTag_path', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user_images', 'image_withTag_path')
# ### end Alembic commands ###
| nukano0522/flask_apps | image_detection/migrations/versions/da088c937095_.py | da088c937095_.py | py | 685 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String"... |
12151473251 | import numpy as np
import math
import random
import matplotlib.pyplot as plt
NUM_DIMENSIONS = 10
NUM_PARTICLES = 50
MAX_ITERATIONS = 100
INERTIA_WEIGHT = 0.729
COGNITIVE_WEIGHT = 1.49445
SOCIAL_WEIGHT = 1.49445
MIN_POSITION = -5.12
MAX_POSITION = 5.12
def f1(position: np.ndarray):
fitness = np.sum(position**2 - 10 * np.cos(2 * math.pi * position) + 10)
return fitness
class Particle:
def __init__(self, num_dimensions: int):
self.position = np.zeros(num_dimensions)
self.velocity = np.zeros(num_dimensions)
self.personal_best = np.zeros(num_dimensions)
self.personal_best_fitness = float('inf')
def initialize_particle(particle: Particle, min_position: float, max_position: float, num_dimensions: int, evaluate_func: callable):
particle.position = np.random.uniform(min_position, max_position, num_dimensions)
particle.velocity = np.zeros(num_dimensions)
particle.personal_best = particle.position
particle.personal_best_fitness = evaluate_func(particle.position)
def update_particle(particle: Particle, global_best: np.ndarray, num_dimensions: int, min_position: float, max_position: float, inertia_weight: float, cognitive_weight: float, social_weight: float, evaluate_func: callable):
for i in range(num_dimensions):
r1 = random.random()
r2 = random.random()
particle.velocity[i] = (inertia_weight * particle.velocity[i] +
cognitive_weight * r1 * (particle.personal_best[i] - particle.position[i]) +
social_weight * r2 * (global_best[i] - particle.position[i]))
particle.position[i] += particle.velocity[i]
# Clamp position within the valid range
particle.position[i] = max(min(particle.position[i], max_position), min_position)
fitness = evaluate_func(particle.position)
if fitness < particle.personal_best_fitness:
particle.personal_best = particle.position
particle.personal_best_fitness = fitness
def pso(num_dimensions: int, num_particles: int, max_iterations: int, min_position: float, max_position: float, inertia_weight: float, cognitive_weight: float, social_weight: float, evaluate_func: callable):
# PSO initialization
particles = [Particle() for _ in range(num_particles)]
global_best = np.zeros(num_dimensions)
global_best_fitness = float('inf')
# Initialize particles
for particle in particles:
initialize_particle(particle, min_position, max_position, num_dimensions, evaluate_func)
if particle.personal_best_fitness < global_best_fitness:
global_best = particle.personal_best
global_best_fitness = particle.personal_best_fitness
# PSO iterations
convergence_data = []
for _ in range(max_iterations):
convergence_data.append(global_best_fitness)
for particle in particles:
update_particle(particle, global_best, num_dimensions, min_position, max_position, inertia_weight, cognitive_weight, social_weight, evaluate_func)
if particle.personal_best_fitness < global_best_fitness:
global_best = particle.personal_best
global_best_fitness = particle.personal_best_fitness
# Print global minimum
print("Global Minimum Found:")
for i, value in enumerate(global_best):
print(f"x[{i}] = {value}")
print("Minimum Fitness:", global_best_fitness)
# Plot convergence graph
plt.plot(range(max_iterations), convergence_data)
plt.title("PSO Convergence")
plt.xlabel("Iteration")
plt.ylabel("Best Fitness")
plt.show()
if __name__ == "__main__":
pso(num_dimensions=10, num_particles=50, max_iterations=100, min_position=-5.12, max_position=5.12, inertia_weight=0.729, cognitive_weight=1.49445, social_weight=1.49445, evaluate_func=f1) | dvher/AlgoritmosExactosMetaheuristica | Tarea3/main.py | main.py | py | 3,848 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.ndarray",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 1... |
9092725521 | import helpers
import numpy as onp
import enum
class Methods(enum.Enum):
Sturm = enum.auto()
SturmOriginal = enum.auto()
FactorGraph = enum.auto()
FactorGraphGT = enum.auto()
number_samples = 50
parameter_set_rot = {
"stddev_pos": onp.array([0.001, 0.03, 0.1]),
"stddev_ori": onp.array([1.0, 3.0, 10.0]) * onp.pi / 180,
"observation_amount": onp.array([5, 10, 20, 40, 80, 160, 320]),
"sample_length": onp.array([15.0, 45.0, 90.0]) * onp.pi / 180,
}
parameter_set_trans = {
"stddev_pos": onp.array([0.001, 0.03, 0.1]),
"stddev_ori": onp.array([1.0, 3.0, 10.0]) * onp.pi / 180,
"observation_amount": onp.array([5, 10, 20, 40, 80, 160, 320]),
"sample_length": onp.array([0.05, 0.20, 0.40]),
}
methods_to_use = [Methods.FactorGraph]
methods_to_use = [Methods.FactorGraph, Methods.SturmOriginal]
def dispatch(motion_type, parameter_set):
for pi in helpers.dict_product(parameter_set):
sd_p = pi["stddev_pos"]
sd_o = pi["stddev_ori"]
ob_a = pi["observation_amount"]
sa_l = pi["sample_length"]
mt = motion_type
experiment_name = (
f"op_sd_p_{sd_p}_sd_o_{sd_o}_ob_a_{ob_a}_sa_l_{sa_l}_mt_{motion_type}"
)
cmd = (
"python -m only_poses.main"
" --experiment-root-path"
f" ./experiments/only_poses/ --experiment-name"
f" {experiment_name} --motion-type {mt} --stddev-pos {sd_p} --stddev-ori"
f" {sd_o} --observation-amount {ob_a} --sample-length"
f" {sa_l} --number-samples {number_samples} --create-samples"
)
# Add huber
# cmd += " --all-hubers"
# cmd += " --huber-delta 1"
if not Methods.Sturm in methods_to_use:
cmd += " --no-use-sturm"
if not Methods.SturmOriginal in methods_to_use:
cmd += " --no-use-sturm-original"
if not Methods.FactorGraph in methods_to_use:
cmd += " --no-use-fg"
if not Methods.FactorGraphGT in methods_to_use:
cmd += " --no-use-fg-gt"
print(cmd)
dispatch("TRANS", parameter_set_trans)
dispatch("ROT", parameter_set_rot)
| SuperN1ck/cat-ind-fg | only_poses/run_all.py | run_all.py | py | 2,174 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "enum.auto",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "enum.auto",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "enum.auto",
"line_number": 9,
... |
42406705027 | """A matrix is a collection of scalar values arranged in rows and columns as a rectan-
gular grid of a fixed size. The elements of the matrix can be accessed by specifying
a given row and column index with indices starting at 0.
"""
from typing import Any
from py_ds.arrays.array import Array
from py_ds.arrays.array2d import Array2D
class Matrix():
def __init__(self, numRows: int, numCols: int):
"""initializes internal 2d array and sets all values to 0"""
self.__matrix = Array2D(numRows, numCols)
self.__matrix.zeros()
@classmethod
def of(cls, *arrays: Array):
"""initializes internal 2d array with the specific elements"""
nrRows = len(arrays)
nrCols = len(arrays[0])
obj = cls(nrRows, nrCols)
for idx in range(nrRows):
row = arrays[idx]
for col_idx in range(len(row)):
obj.__setitem__((idx, col_idx), row[col_idx])
return obj
def getNumOfRows(self) -> int:
"""returns the number of rows in the matrix"""
return self.__matrix.getNumOfRows()
def getNumOfColumns(self):
"""returns the number of columns in the matrix"""
return self.__matrix.getNumOfColumns()
def __getitem__(self, ndxTuple):
"""returns the values stored in the given matrix element. both row and col must be within the valid range"""
return self.__matrix.__getitem__(ndxTuple)
def __setitem__(self, ndxTuple, scalar):
"""sets the matrix element at the given row and col to scalar. the element indices must be within the valid range"""
self.__matrix.__setitem__(ndxTuple, scalar)
def scaleBy(self, scalar):
"""mutliplies each element of the matrix by the given scalar value. The underlying matrix is modified by this
operation"""
for array in self.__matrix:
for idx in range(len(array)):
array[idx] *= scalar
def transpose(self):
"""return a new matrix that is the transpose of this matrix"""
newNrRows = self.getNumOfColumns()
newNrCols = self.getNumOfRows()
transposed = Array2D(newNrRows, newNrCols)
for ridx in range(self.getNumOfRows()):
for cidx in range(self.getNumOfColumns()):
value = self.__getitem__((ridx, cidx))
transposed.__setitem__((cidx, ridx), value)
self.__matrix = transposed
def __applyOperator(self, rhsMatrix: 'Matrix', func, row_index: int, column_index: int) -> None:
value = self.__operator(rhsMatrix, func, row_index, column_index)
self.__setitem__((row_index, column_index), value)
def __operator(self, rhsMatrix: 'Matrix', func, row_index: int, column_index: int) -> Any:
return func(self.__getitem__((row_index, column_index)), rhsMatrix.__getitem__((row_index, column_index)))
def add(self, rhsMatrix: 'Matrix') -> None:
"""creates and returns a new matrix that is the result of adding this matrix to the given rhsMatrix. The size of
the two matrices must be the same"""
"""creates and returns a new matrix that is the result of adding this matrix to the given rhsMatrix. The size of
the two matrices must be the same"""
assert self.getNumOfRows() == rhsMatrix.getNumOfRows(), "Number of rows must be the same"
assert self.getNumOfColumns() == rhsMatrix.getNumOfColumns(), "Number of columns must be the same"
for ridx in range(self.getNumOfRows()):
for cidx in range(self.getNumOfColumns()):
self.__applyOperator(rhsMatrix, (lambda x, y: x + y), ridx, cidx)
def subtract(self, rhsMatrix: 'Matrix'):
"""the same as Matrix.add() operation but subtracts the two matrices"""
"""creates and returns a new matrix that is the result of adding this matrix to the given rhsMatrix. The size of
the two matrices must be the same"""
assert self.getNumOfRows() == rhsMatrix.getNumOfRows(), "Number of rows must be the same"
assert self.getNumOfColumns() == rhsMatrix.getNumOfColumns(), "Number of columns must be the same"
for ridx in range(self.getNumOfRows()):
for cidx in range(self.getNumOfColumns()):
self.__applyOperator(rhsMatrix, (lambda x, y: x - y), ridx, cidx)
def multiply(self, rhsMatrix: 'Matrix') -> 'Matrix':
""" Creates and returns a new matrix resulting from matrix multiplication.
Multiplication. Matrix multiplication is only defined for matrices where the number of columns in the matrix on
the lefthand side is equal to the number of rows in the matrix on the righthand side. The result is a new matrix
that contains the same number of rows as the matrix on the lefthand side and the same number of columns as the
matrix on the righthand side. In other words, given a matrix of size m × n multiplied by a matrix of size n × p,
the resulting matrix is of size m × p. In multiplying two matrices, each element of the new matrix is the result
of summing the product of a row in the lefthand side matrix by a column in the righthand side matrix.
"""
assert self.getNumOfRows() == rhsMatrix.getNumOfColumns(), "Number columns must equal the other number of rows"
assert self.getNumOfColumns() == rhsMatrix.getNumOfRows(), "Number of rows must equal the other number of columns"
multiplied = Matrix(
max(self.getNumOfRows(), rhsMatrix.getNumOfRows()),
max(self.getNumOfColumns(), rhsMatrix.getNumOfColumns())
)
multiplied.__matrix.zeros()
for a_ridx in range(self.getNumOfRows()):
for b_cidx in range(rhsMatrix.getNumOfColumns()):
for b_ridx in range(rhsMatrix.getNumOfRows()):
tmp = multiplied.__getitem__((a_ridx, b_cidx))
tmp += self.__getitem__((a_ridx, b_ridx)) * rhsMatrix.__getitem__((b_ridx, b_cidx))
multiplied.__setitem__((a_ridx, b_cidx), tmp)
return multiplied
def __eq__(self, other):
if not isinstance(other, Matrix):
return False
if not other.getNumOfColumns() == self.getNumOfColumns() and other.getNumOfRows() == self.getNumOfRows():
return False
# delegate to Array2D
return other.__matrix.__eq__(self.__matrix)
def __str__(self):
def print_row(row) -> str:
token = "|\t"
for idx in range(len(row)):
token += str(row[idx])
if idx < len(row) - 1:
token += "\t"
token += "\t|\n"
return token
val = "\nMatrix\n"
for array in self.__matrix:
val += print_row(array)
return val
| jurajzachar/py-ds | py_ds/arrays/matrix.py | matrix.py | py | 6,832 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "py_ds.arrays.array2d.Array2D",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "py_ds.arrays.array.Array",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "py_ds.arrays.array2d.Array2D",
"line_number": 56,
"usage_type": "call"
},
{
... |
17887209605 | from pdb import Pdb
class Powerdb(Pdb):
def precmd(self, line):
if not isinstance(line, str): return line
return super().precmd(line)
def onecmd(self, line):
self.prompt = '--> '
# print('line:', line)
if line == ':r':
self.message('%-15s' % '[Step Out....]')
return self.do_return(None)
if line == ':s':
self.message('%-15s' % '[Step Into...]')
return self.do_step(None)
if line == ':n':
self.message('%-15s' % '[Step Next...]')
return self.do_next(None)
if line == ':c':
self.message('%-15s' % '[Continue....]')
return self.do_continue(None)
if line == ':u':
return self.do_up(None)
if line == ':d':
return self.do_down(None)
if line == ':l':
return self.do_list(None)
if line == ':w':
return self.do_where(None)
if line == ':q':
self.message('%-15s' % '[Step Debug..]')
return self.do_quit(None)
if line == ':m':
return self.refresh_bpmark(None)
if isinstance(line, tuple):
method, name = line
# if method == 'locals':
# self.message((get_locals(self.curframe_locals, name), True))
# self.prompt = None
if method == 'breakpoint':
self.clear_all_breaks()
for file, line in name:
self.set_break(file, line)
self.message(('breakpoint', True))
if self.first:
self.first, self.prompt = False, None
return
self.message('%-15s' % '[Set BreakPoint...]\n')
return 0
_, self.message = self.message, print
self.default(line)
self.message = _
def user_call(self, frame, argument_list):
if self._wait_for_mainpyfile: return
if self.stop_here(frame):
self.interaction(frame, None)
def user_return(self, frame, return_value):
pass
def print_stack_entry(self, frame_lineno, prompt_prefix=''):
frame, lineno = frame_lineno
import linecache
filename = self.canonic(frame.f_code.co_filename)
line = linecache.getline(filename, lineno, frame.f_globals)
{'path': filename, 'no': lineno, 'line': line}
self.message(({'path': filename, 'no': lineno, 'line': line.rstrip()}, True))
if self.first: self.message('--> %-15s' % '[Debugging...]')
self.message('%-15s ln:%-4d || %s' % (
filename.split('\\')[-1], lineno, line))
def debug(self, filename, globals=None, locals=None):
import os, os.path as osp, sys
os.chdir(osp.split(filename)[0])
sys.path.append(osp.split(filename)[0])
self.prompt, self.first = '--> ', True
# import __main__
# __main__.__dict__.clear()
# __main__.__dict__.update({"__name__": "__main__",
# "__file__": filename, "__builtins__": __builtins__, })
# self._wait_for_mainpyfile = True
# self.mainpyfile = self.canonic(filename)
# self._user_requested_quit = False
with open(filename, "rb") as fp:
statement = "exec(compile(%r, %r, 'exec'))" % (fp.read(), self.mainpyfile)
self.set_break(file_path, 7, temporary=False)
# self.run(statement, globals, locals)
# self.run('python3 -u '+file_path, globals, locals)
self._runscript(file_path)
# self.message(({'path': 'end', 'no': 0, 'line': ''}, False))
# self.message(({'path': 'end', 'no': 0, 'line': ''}, False))
# self.message('Debug Completed...\n')
db = Powerdb()
# db.message = self.write
file_path = '/home/hzy/Documents/Developing/Python/pyminer_dist_debian_deepin/pyminer/pmtoolbox/debug/test2.py'
db.debug(file_path)
| pyminer/pyminer | pyminer/utils/debug/pdbtest.py | pdbtest.py | py | 3,956 | python | en | code | 77 | github-code | 36 | [
{
"api_name": "pdb.Pdb",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "linecache.getline",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": ... |
33434928097 | from django.views.decorators.http import require_http_methods
from django.http import HttpResponse
import os
def get_file(fpath):
if os.path.isfile(fpath):
return open(fpath, 'rb').read()
else:
return None
# 显示图片
@require_http_methods(["GET"])
def show_image(request):
pic_addr = str(request.path).replace("/showimage/", "/")
content = get_file(pic_addr)
if content is not None:
return HttpResponse(content, content_type="image/png") | XuYiFanHHH/QingXian_Back-end | QingXian/views.py | views.py | py | 489 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.isfile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.views.decor... |
36287589774 | from django.shortcuts import render, redirect, HttpResponse
from . import functions
# Create your views here.
# 真正的登录在home函数中
def login(request):
if request.session.get('is_login', None): # 将登录信息保存到session中实现重复调用
return redirect('/home/')
return render(request, 'login.html', {'error': False}) # 将error参数返回
# 登出
def logout(request):
if request.session.get('is_login', None):
request.session.flush()
return redirect('/login/')
# 注册过渡函数,用于界面跳转
def register(request):
if request.session.get('is_login', None):
return redirect('/home/')
return render(request, 'register.html')
# 注册确认函数
def register_confirm(request):
if request.method == 'POST': # 接收到post请求
name = request.POST.get('name') # 获得参数并保留到变量
password1 = request.POST.get('password1')
password2 = request.POST.get('password2') # html标签传递的数据
types = request.POST.get('types')
user = functions.find_user(name) # user获取包含目标name的元组
if user: # 如果已经找到user,证明用户名重复,返回error类型为0
return render(request, 'register.html', {'error': 0})
if password1 != password2:
return render(request, 'register.html', {'error': 1})
if types != '医生' and types != '护士长' and types != '病房护士' and types != '急诊护士':
return render(request, 'register.html', {'error': 3})
if functions.add_user(name, password1, types):
return redirect('/login/')
else: # 若上述函数没有成功执行
return render(request, 'register.html', {'error': 2})
# 主页面交互函数
def home(request):
if request.session.get('is_login', None): # 检查登陆状态并传递参数cur为0
return render(request, 'home.html', {'cur': 0})
# 登录函数
if request.method == 'POST': # 如果抓取到post请求
name = request.POST.get('name')
password = request.POST.get('password')
user = functions.find_name_password(name, password) # 核对密码,该函数返回了一个元组列表
if user: # 如果user成功获取,即登陆成功,将信息保存到session中
request.session['is_login'] = True
request.session['name'] = user[0][0]
request.session['password'] = user[0][1]
request.session['type'] = user[0][2]
return render(request, 'home.html', {'cur': 0})
else:
return render(request, 'login.html', {'error': True}) # 登陆失败则返回错误标签
else: # 没有post请求
return redirect('/login/')
def patient(request):
if request.session.get('is_login', None):
if request.session.get('type') == '医生' or request.session.get('type') == '护士长':
result = functions.my_patient(request.session.get('name'))
if request.session.get('type') == '病房护士':
result = functions.my_patient_ward_nurse(request.session.get('name'))
patients, idx = [], 1
for i in result:
data = {'idx': idx, 'id': i[0], 'age': i[1],
'gender': '男' if i[2] else '女', 'level': i[3],
'name': i[4], 'section': i[5],
'status': i[6], 'ward_name': i[7], 'ward_nurse': i[8]}
patients.append(data)
idx += 1
return render(request, 'patient.html', {'patients': patients, 'cur': 2}) # 返回workers,cur游标返回1
else:
return redirect('/login/')
def patient_query(request):
if request.method == 'POST':
attribute = request.POST.get('attribute')
patients, result = [], []
result = functions.find_patient(attribute)
idx = 1
for i in result:
data = {'idx': idx, 'id': i[0], 'age': i[1],
'gender': '男' if i[2] else '女', 'level': i[3],
'name': i[4], 'section': i[5],
'status': i[6], 'ward_name': i[7], 'ward_nurse': i[8]}
patients.append(data)
idx += 1
return render(request, 'patient.html', {'patients': patients, 'cur': 2})
def patient_information(request, name):
if request.session.get('is_login', None):
result = functions.patient_history(name)
informations, idx = [], 1
for i in result:
data = {'idx': idx, 'id': i[0], 'name': i[2],
'date': i[1],
'temperature': i[3], 'positive': i[4]}
informations.append(data)
idx += 1
return render(request, 'patient_information.html', {'informations': informations})
else:
return redirect('/login/')
def nurses(request):
if request.session.get('is_login', None):
result = functions.section_nurses(request.session.get('name'))
nurses, idx = [], 1
for i in result:
data = {'idx': idx, 'name': i[0], 'gender': i[1],
'type': i[2]}
nurses.append(data)
idx += 1
return render(request, 'nurses.html', {'nurses': nurses, 'cur': 3})
else:
return redirect('/login/')
def information(request):
if request.session.get('is_login', None):
result = functions.show_information(request.session.get('name'))
informations, idx = [], 1
for i in result:
data = {'idx': idx, 'name': i[0], 'age': i[1],
'gender': i[2], 'type': i[3], 'section': i[4]}
informations.append(data)
idx += 1
return render(request, 'information.html', {'informations': informations, 'cur': 1})
else:
return redirect('/login/')
def modify_information(request):
if request.session.get('is_login', None):
return render(request, 'information_modify.html', {'cur': 1})
else:
return redirect('/login/')
def modify_information_confirm(request):
if request.method == 'POST': # 接收到post请求
name = request.POST.get('name')
age = request.POST.get('age') # 获得参数并保留到变量
gender = request.POST.get('gender')
section = request.POST.get('section') # html标签传递的数据
if section != '轻症区域' and section != '重症区域' and section != '危重症区域': # 如果已经找到user,证明用户名重复,返回error类型为0
return render(request, 'information_modify.html', {'error': 0, 'cur': 1})
if gender != '男' and gender != '女':
return render(request, 'information_modify.html', {'error': 1, 'cur': 1})
if functions.modify_information(request.session.get('name'), name, age, gender, section):
return redirect('/information/')
else: # 若上述函数没有成功执行
return render(request, 'information_modify.html', {'error': 2, 'cur': 1})
def report(request):
if request.session.get('is_login', None):
return render(request, 'report.html', {'cur': 4})
else:
return redirect('/login/')
def report_confirm(request):
if request.method == 'POST': # 接收到post请求
id = request.POST.get('id') # 获得参数并保留到变量
name = request.POST.get('name')
positive = request.POST.get('positive') # html标签传递的数据
user = functions.find_patient(name)
if not user:
return render(request, 'report.html', {'error': 0, 'cur': 4})
if request.session.get('type') != '医生':
return render(request, 'report.html', {'error': 2, 'cur': 4})
if functions.new_report(id, name, positive):
return HttpResponse("提交成功!")
else: # 若上述函数没有成功执行
return render(request, 'report.html', {'error': 1, 'cur': 4})
def patient_add(request):
if request.session.get('is_login', None):
return render(request, 'patient_add.html', {'cur': 2})
else:
return redirect('/login/')
def patient_add_confirm(request):
if request.method == 'POST': # 接收到post请求
name = request.POST.get('name')
age = request.POST.get('age')
gender = request.POST.get('gender') # html标签传递的数据
level = request.POST.get('level') # html标签传递的数据
section = request.POST.get('section')
ward_name = request.POST.get('ward_name')
ward_nurse = request.POST.get('ward_nurse')
if request.session.get('type') != '急诊护士':
return render(request, 'patient_add.html', {'error': 0, 'cur': 2})
if functions.new_patient(name, age, gender, level, section, ward_name, ward_nurse):
return HttpResponse("提交成功!")
else: # 若上述函数没有成功执行
return render(request, 'patient_add.html', {'error': 1, 'cur': 2})
| Spetrichor/database_project | app01/views.py | views.py | py | 9,068 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.shortcuts.redirect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 17,
"usage_type": "call"
},
{
"api_na... |
10492838240 | import open3d as o3d
import copy
from modern_robotics import *
down_voxel_size = 10
icp_distance = down_voxel_size * 15
result_icp_distance = down_voxel_size * 1.5
radius_normal = down_voxel_size * 2
def cal_angle(pl_norm, R_dir):
angle_in_radians = \
np.arccos(
np.abs(pl_norm.x*R_dir[0]+ pl_norm.y*R_dir[1] + pl_norm.z*R_dir[2])
)
return angle_in_radians
def registerLocalCloud(target, source):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.voxel_down_sample(down_voxel_size)
target_temp.voxel_down_sample(down_voxel_size)
source_temp.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(
radius=radius_normal, max_nn=30))
target_temp.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(
radius=radius_normal, max_nn=30))
# source_temp.estimate_normals()
# target_temp.estimate_normals()
current_transformation = np.identity(4)
result_icp_p2l = o3d.pipelines.registration.registration_icp(source_temp, target_temp, icp_distance,
current_transformation, o3d.pipelines.registration.TransformationEstimationPointToPlane())
print("----------------")
print("initial guess from Point-to-plane ICP registeration")
print(result_icp_p2l)
print(result_icp_p2l.transformation)
p2l_init_trans_guess = result_icp_p2l.transformation
# print('try result_icp')
result_icp = o3d.pipelines.registration.registration_colored_icp(source_temp, target_temp, result_icp_distance,
p2l_init_trans_guess, o3d.pipelines.registration.TransformationEstimationForColoredICP(),
)
print("----------------")
print("result icp")
print(result_icp)
print(result_icp.transformation)
return result_icp.transformation
if __name__ == '__main__':
with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:
# Load point clouds
pcds = []
for i in range(24):
pcd = o3d.io.read_point_cloud(f"./360degree_pointclouds/test_pointcloud_{i}.pcd")
print(np.mean(np.asarray(pcd.points)[:, 2]))
# Statistical outlier removal
# pcd, _ = pcd.remove_statistical_outlier(nb_neighbors=20,
# std_ratio=2.0)
pcds.append(pcd)
# Visualize the mesh
o3d.visualization.draw_geometries(pcds)
cloud_base = pcds[0]
cloud1 = copy.deepcopy(cloud_base)
detectTransLoop = np.identity(4)
posWorldTrans = np.identity(4)
for cloud2 in pcds[1:]:
posLocalTrans = registerLocalCloud(cloud1, cloud2)
detectTransLoop = np.dot(posLocalTrans, detectTransLoop)
posWorldTrans = np.dot(posWorldTrans, posLocalTrans)
cloud1 = copy.deepcopy(cloud2)
cloud2.transform(posWorldTrans)
cloud_base = cloud_base + cloud2
# downsampling
# cloud_base.voxel_down_sample(down_voxel_size)
# # Statistical outlier removal
# cloud_base, _ = cloud_base.remove_statistical_outlier(nb_neighbors=30,
# std_ratio=3.0)
# Visualize the mesh
o3d.visualization.draw_geometries([cloud_base])
# Save point cloud
o3d.io.write_point_cloud('./merged_pointcloud_{0}.pcd'.format(i), cloud_base) | chansoopark98/3D-Scanning | test_color_icp.py | test_color_icp.py | py | 3,645 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "copy.deepcopy",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "open3d.geometry.KDTreeSearchParamHybrid",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "o... |
1934215882 | import pandas as pd
import matplotlib.pyplot as plt
Data_Raw = pd.read_csv('iris.data',sep=',',header=-1)
Unique_Label = pd.unique(Data_Raw.values[:,4])
NUmeric_Label = Data_Raw[4].apply(list(Unique_Label).index)
count = 0
colors = ['red','green','blue','purple']
for i in Unique_Label:
Temp = Data_Raw[(Data_Raw[4]==i)][[0,1]]
plt.scatter(Temp[:][0],Temp[:][1], marker='^', c=colors[count], label = i)
count = count +1
plt.legend()
plt.xlabel('Var1')
plt.ylabel('Var2')
plt.show() | melikaknight/Iris-Dataset | P4c.py | P4c.py | py | 513 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pandas.unique",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pypl... |
27501697384 | from datetime import datetime
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
with DAG(dag_id="airtable-download",
start_date=datetime(2021, 11, 1),
concurrency=1) as dag:
download_time_tracker_base = DummyOperator(task_id="download-time-tracker-base",
pool="airtable-api-pool")
download_projects_base = DummyOperator(task_id="download-projects-base",
pool="airtable-api-pool")
download_crm_base = DummyOperator(task_id="download-crm-base",
pool="airtable-api-pool")
notify_failure = DummyOperator(task_id="notify-failure",
trigger_rule="one_failed")
[download_crm_base, download_projects_base, download_time_tracker_base] >> notify_failure
| ktechboston/kt-public-dags | dags/airtable-example/airtable-download.py | airtable-download.py | py | 908 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "airflow.DAG",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "airflow.operators.dummy_operator.DummyOperator",
"line_number": 10,
"usage_type": "call"
},
{
"api_na... |
74518816744 | # -*- coding: utf-8 -*-
#!/usr/bin/python3
from mitmproxy.options import Options
from mitmproxy.proxy.config import ProxyConfig
from mitmproxy.proxy.server import ProxyServer
from mitmproxy.tools.dump import DumpMaster
import argparse
GOOGLE_URL = 'googleapis.com'
class Addon(object):
def __init__(self, token):
super().__init__()
self.token = token
def request(self, flow):
'''
Simple search and replace of the token in the HTTP request
'''
if GOOGLE_URL in flow.request.pretty_url:
# print(flow.request.pretty_url)
# print(flow.request.headers)
if 'oauth2' not in flow.request.pretty_url:
flow.request.headers['authorization'] = 'Bearer %s' % self.token
class ProxyMaster(DumpMaster):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def run(self):
try:
DumpMaster.run(self)
except KeyboardInterrupt:
self.shutdown()
if __name__ == "__main__":
# Parse args
parser = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="Auth token")
parser.add_argument("--port", type=str, help="Port to listen to")
args = parser.parse_args()
# Set options
options = Options(
listen_host='0.0.0.0',
listen_port=int(args.port),
http2=True
)
config = ProxyConfig(options)
# Create master
master = ProxyMaster(options, with_termlog=False, with_dumper=False)
master.server = ProxyServer(config)
master.addons.add(Addon(args.token))
# Run the proxy
master.run() | ThibaultLengagne/gta | start_mitm_proxy.py | start_mitm_proxy.py | py | 1,655 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mitmproxy.tools.dump.DumpMaster",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "mitmproxy.tools.dump.DumpMaster.run",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "mitmproxy.tools.dump.DumpMaster",
"line_number": 37,
"usage_type": "n... |
15737275688 | import fnmatch
from ftplib import FTP
import ftplib
import io
import os
import requests
from tqdm import tqdm
import os
import time
import gzip
import xml.etree.ElementTree as ET
import csv
directory = "D:/gg/"
filenames = os.listdir(directory)
print(filenames)
ftp_server = ftplib.FTP("ftp.ncbi.nlm.nih.gov", "anonymous", "wanhoyinjoshua@yahoo.com",timeout=300)
# force UTF-8 encoding
ftp_server.encoding = "utf-8"
#if username and password is required.
ftp_server.cwd('/pubmed/baseline')
ftp_server.dir()
files = ftp_server.nlst()
print(files)
local_path = r'D:/gg/' # change this to the path of your choice
import re
for file in files:
if file in filenames:
print("skip")
continue
remote_file_path = file
with ftplib.FTP("ftp.ncbi.nlm.nih.gov", "anonymous", "wanhoyinjoshua@yahoo.com",timeout=30) as ftp:
# Download the file to a BytesIO object
ftp_server.cwd('/pubmed/baseline')
for file in files:
if file in filenames:
print("skip")
continue
file_bytes = io.BytesIO()
local_file = open(local_path + file, 'wb')
try:
print(f"downloading {file}")
ftp_server.retrbinary('RETR ' + file, local_file.write)
print(f"completed download {file}")
except Exception as e:
print('Error during download:', e)
local_file.close()
continue
time.sleep(2)
local_file.close()
# Update the hash object with the file contents
ftp_server.quit()
# Disconnect from FTP server
| wanhoyinjoshua/authorship | nftp.py | nftp.py | py | 1,596 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "ftplib.FTP",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "ftplib.FTP",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 53,
... |
23542375248 | from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
my_url = 'https://www.newegg.com/Video-Cards-Video-Devices/Category/ID-38?Tpk=graphic+cards'
#opening connection and grabbing page
uClient = uReq(my_url)
page_html = uClient.read()
#close the client
uClient.close()
#html parsing
page_soup = soup(page_html, "html.parser")
print(page_soup.h1)
| vallab/hackerrank_dashboard | first_scrap.py | first_scrap.py | py | 380 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 14,
"usage_type": "call"
}
] |
30843941779 | import SimpleITK as sitk
import numpy as np
#!/usr/bin/python2.6
# -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
from PIL import Image
import pandas as pd
import sys
'''python import模块时, 是在sys.path里按顺序查找的。
sys.path是一个列表,里面以字符串的形式存储了许多路径。
使用A.py文件中的函数需要先将他的文件路径放到sys.path中
'''
sys.path.append('..//')
from Config.Config import Config as conf
'''
读取图像相关数据
输入:
图像数据路径
输出:
图像数据、原点、缩放因子
'''
def load_image(filename):
image=sitk.ReadImage(filename)
numpy_image=sitk.GetArrayFromImage(image)
numpy_origin=np.array(list(reversed(image.GetOrigin())))
numpy_spacing=np.array(list(reversed(image.GetSpacing())))
return numpy_image,numpy_origin,numpy_spacing
'''
读取候选区数据
输入:候选区文件路径
输出:pd的数据类型,可以直接使用pandas进行相关处理
'''
def read_csv(filename):
lines=[]
lines=pd.read_csv(filename)
return lines
'''
坐标转换
输入:候选区坐标、原点、缩放因子
输出:对应的image数组中的index
'''
def coord_convert(worldcood,origin,spacing):
stretched_voxel_coord=np.absolute(worldcood-origin)
voxel_coord=stretched_voxel_coord/spacing
return voxel_coord
#正规化CT图(范围0-1)
def normalize_planes(ct_image):
maxHU=400#人体组织正常的HU应该是在这个范围之下
minHU=-1000#空气的HU
normalized_image=(ct_image-minHU)/(maxHU-minHU)
normalized_image[normalized_image>1]=1
normalized_image[normalized_image<0]=0
return normalized_image
'''
这边是对2D来说的,把候选区的位置在图片上框出来
输入:图片数据,x,y坐标,框的半径、框的厚度
输出:加入框的图片数据
'''
def draw_box(data,y,x,radius=30,pad=2):
data[max(0, y - radius):min(data.shape[0], y + radius),\
max(0, x - radius - pad):max(0, x - radius)] = 3000
data[max(0, y - radius):min(data.shape[0], y + radius),\
min(data.shape[1], x + radius):min(data.shape[1], x + radius + pad)] = 3000
data[max(0, y - radius - pad):max(0, y - radius),\
max(0, x - radius):min(data.shape[1], x + radius)] = 3000
data[min(data.shape[0], y + radius):min(data.shape[0], y + radius + pad),\
max(0, x - radius):min(data.shape[1], x + radius)] = 3000 # 横线
return data
if __name__=='__main__':
#image_path=conf.CT_dir+'1.3.6.1.4.1.14519.5.2.1.6279.6001.105756658031515062000744821260.mhd'
#1.3.6.1.4.1.14519.5.2.1.6279.6001.108197895896446896160048741492
image_path=conf.CT_dir+'1.3.6.1.4.1.14519.5.2.1.6279.6001.108197895896446896160048741492.mhd'
csv_path=conf.scv_dir+'candidates.csv'
image,origin,spacing=load_image(image_path)
#为了后面的batch处理,这边需要对origin、和spacing进行维度处理
origin=origin[np.newaxis]
spacing=spacing[np.newaxis]
print("=======image info=====")
print('size:',image.shape)
print('origin:',origin)
print('spacing:',spacing)
candidates=read_csv(csv_path)
#print('====candidates samples====')
#for i in range(conf.batch_size+1):
# print(candidates[i])
# pass
start=15647
#9313
#16051
cand=candidates.loc[15645:15654]
cand=np.asarray(cand)
world_coord=np.asarray([cand[:,3],cand[:,2],cand[:,1]],dtype=float).T
print(world_coord)
print(coord_convert(world_coord,origin,spacing))
voxel_coord=np.rint(coord_convert(world_coord,origin,spacing)).astype(int)
for i in range(0,conf.batch_size):
#patch=image
plt.clf()
image_no_cut=np.copy(image[voxel_coord[i][0]])#避免引用传参
plt.hist(image_no_cut.flatten(), bins=80, color='c')
plt.show()
new_image=draw_box(image_no_cut,voxel_coord[i][1],voxel_coord[i][2],radius=10,pad=2)
plt.title(str(cand[i][4]))
plt.imshow(new_image,cmap='gray')
plt.show()
#numpyImage, numpyOrigin, numpySpacing = load_image(image_path)
#print(numpyImage.shape)
#print(numpyOrigin)
#print(numpySpacing)
#cands = read_csv(csv_path)
##这边要注意candidate的数据要跟我的读取的文件对应
#for cand in cands[9315:9317]:
# worldCoord = np.asarray([float(cand[3]),float(cand[2]),float(cand[1])])
# voxelCoord = np.rint(coord_convert(worldCoord, numpyOrigin, numpySpacing)).astype(int)
# voxelWidth = 64
# patch = numpyImage[voxelCoord[0],voxelCoord[1]-32:voxelCoord[1]+32,voxelCoord[2]-32:voxelCoord[2]+32]
# patch = normalize_planes(patch)
# print(worldCoord)
# print(voxelCoord)
# print(patch)
# plt.imshow(patch, cmap='gray')
# plt.show()
| JiabinTan/LUNA16 | data_proc/reader_disp.py | reader_disp.py | py | 4,833 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "SimpleITK.ReadImage",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "SimpleITK.GetArrayF... |
74872310504 | import pandas as pd
import plotly.figure_factory as pf
import statistics
data = pd.read_csv('./height-weight.csv')
heigth = data['Height(Inches)'].tolist()
mean = statistics.mean(heigth)
median = statistics.median(heigth)
mode = statistics.mode(heigth)
stan = statistics.stdev(heigth)
# caluculating percentage of data in the first mean range
heigth1 = []
lowermean1 = mean-stan
uppermean1 = mean+stan
for i in heigth:
if(i > lowermean1 and i < uppermean1):
heigth1.append(i)
percentage = (len(heigth1)/len(heigth))*100
print(percentage)
# caluculating percentage of data in the second mean range
heigth2 = []
lowermean2 = mean-(2*stan)
uppermean2 = mean+(2*stan)
for i in heigth:
if(i > lowermean2 and i < uppermean2):
heigth2.append(i)
percentage2 = (len(heigth2)/len(heigth))*100
print(percentage2)
# caluculating percentage of data in the third mean range
heigth3 = []
lowermean3 = mean-(3*stan)
uppermean3 = mean+(3*stan)
for i in heigth:
if(i > lowermean3 and i < uppermean3):
heigth3.append(i)
percentage3 = (len(heigth3)/len(heigth))*100
print(percentage3)
fig = pf.create_distplot([heigth] , ["Heigth"] , show_hist=False)
fig.show()
w = data['Weight(Pounds)'].tolist()
meanw = statistics.mean(w)
medianw = statistics.median(w)
modew = statistics.mode(w)
stanw = statistics.stdev(w)
# caluculating percentage of data in the first mean range
w1 = []
lowermean1w = meanw-stanw
uppermean1w = meanw+stanw
for i in w:
if(i > lowermean1w and i < uppermean1w):
w1.append(i)
percentagew = (len(w1)/len(w))*100
print(percentagew)
# caluculating percentage of data in the second mean range
w2 = []
lowermean2w = meanw-(2*stanw)
uppermean2w = meanw+(2*stanw)
for i in w:
if(i > lowermean2w and i < uppermean2w):
w2.append(i)
percentage2w = (len(w2)/len(w))*100
print(percentage2w)
# caluculating percentage of data in the third mean range
w3 = []
lowermean3w = meanw-(3*stanw)
uppermean3w = meanw+(3*stanw)
for i in w:
if(i > lowermean3w and i < uppermean3w):
w3.append(i)
percentage3w = (len(w3)/len(w))*100
print(percentage3w)
| KARNAMROOPESH/Python13 | distribution.py | distribution.py | py | 2,204 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "statistics.mean",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "statistics.median",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "statistics.mode",
... |
18934606840 | from django import template
from django.conf import settings
from wagtail.images.models import SourceImageIOError
from wagtail.images.templatetags.wagtailimages_tags import ImageNode
from django.utils.safestring import mark_safe
from common.templatetags.string_utils import uid
register = template.Library()
@register.tag(name="responsiveimage")
def responsiveimage(parser, token):
"""Use in place of the native Wagtail image tag to add srcsets to images."""
bits = token.split_contents()[1:]
image_expr = parser.compile_filter(bits[0])
filter_spec = bits[1]
remaining_bits = bits[2:]
if remaining_bits[0][:6] == 'format':
filter_spec = "{}|{}".format(filter_spec, remaining_bits[0])
remaining_bits = remaining_bits[1:]
if remaining_bits[-2] == 'as':
attrs = _parse_attrs(remaining_bits[:-2])
# token is of the form {% responsiveimage self.photo max-320x200 srcset="filter_spec xyzw" [ attr="val" ... ] as img %}
return ResponsiveImageNode(image_expr, filter_spec, attrs=attrs, output_var_name=remaining_bits[-1])
else:
# token is of the form {% responsiveimage self.photo max-320x200 srcset="filter_spec xyzw" [ attr="val" ... ] %}
# all additional tokens should be kwargs, which become attributes
attrs = _parse_attrs(remaining_bits)
return ResponsiveImageNode(image_expr, filter_spec, attrs=attrs)
def _parse_attrs(bits):
"""Parse the attrbutes sent to responsive image tag."""
template_syntax_error_message = (
'"responsiveimage" tag should be of the form '
'{% responsiveimage self.photo max-320x200 srcset="fill-400x120 400w, fill-600x180 600w" sizes="100vw" [ custom-attr="value" ... ] %} or '
'{% responsiveimage self.photo max-320x200 srcset="whatever" as img %}'
)
attrs = {}
for bit in bits:
try:
name, value = bit.split('=')
except ValueError:
raise template.TemplateSyntaxError(template_syntax_error_message)
if value[0] == value[-1] and value[0] in ('"', "'"):
# If attribute value is in quotes, strip the quotes and store the attr as a string.
attrs[name] = value[1:-1]
else:
# This attribute isn't in quotes, so it's a variable name. Send a Variable as the attr, so the
# ResponsiveImageNode can render it based on the context it gets.
attrs[name] = template.Variable(value)
return attrs
class ResponsiveImageNode(ImageNode, template.Node):
"""Node for use by the responsive image tag."""
def render(self, context):
"""Render the responsive image node."""
try:
image = self.image_expr.resolve(context)
except template.VariableDoesNotExist:
return ''
if not image:
return ''
try:
with image.get_willow_image() as willow:
original_format = willow.format_name
conversion = getattr(settings, "WAGTAILIMAGES_FORMAT_CONVERSIONS", None)
if conversion is not None:
output_format = conversion.get(
original_format, original_format
)
else:
output_format = original_format
if output_format not in ['jpeg', 'png', 'gif', 'webp']:
output_format = 'webp'
if output_format != original_format and "format" not in self.filter_spec:
self.filter_spec = "{}|{}".format(self.filter_spec, "format-{}".format(output_format))
rendition = image.get_rendition(self.filter)
except SourceImageIOError:
# It's fairly routine for people to pull down remote databases to their
# local dev versions without retrieving the corresponding image files.
# In such a case, we would get a SourceImageIOError at the point where we try to
# create the resized version of a non-existent image. Since this is a
# bit catastrophic for a missing image, we'll substitute a dummy
# Rendition object so that we just output a broken link instead.
Rendition = image.renditions.model # pick up any custom Image / Rendition classes that may be in use
rendition = Rendition(image=image, width=0, height=0)
rendition.file.name = 'not-found'
output_format = 'webp'
# Parse srcset format into array of renditions.
try:
try:
# Assume it's a Variable object, and try to resolve it against the context.
srcset = self.attrs['srcset'].resolve(context)
except AttributeError:
# It's not a Variable, so assume it's a string.
srcset = self.attrs['srcset']
# Parse each src from the srcset.
raw_sources = srcset.replace('"', '').split(',')
srcset_renditions = []
widths = []
newsrcseturls = []
for source in raw_sources:
flt = source.strip().split(' ')[0]
width = source.strip().split(' ')[1]
'''
Make retina sizes.
This section will extract the sizes and widths,
double them up, and add them to the srcset for retina versions.
This srcset will also be passed to the responsive css filter, to be used for retina versions
in the media queries.
'''
flt_bits = flt.split('-')
flt_retina_values = []
if flt_bits[1].lower().find('x'):
flt_values = flt_bits[1].split('x')
else:
flt_values = flt_bits[1]
for value in flt_values:
flt_retina_values.append(str(int(value) * 2))
flt_retina = '%s-%s' % (flt_bits[0], 'x'.join(flt_retina_values))
width_retina = '%sw' % (str(int(width.replace('w', '')) * 2))
'''
End of retina sizes.
'''
# cache widths to be re-appended after filter has been converted to URL
widths.append(width)
widths.append(width_retina)
try:
srcset_renditions.append(image.get_rendition("{}|{}".format(flt, "format-{}".format(output_format))))
srcset_renditions.append(image.get_rendition("{}|{}".format(flt_retina, "format-{}".format(output_format))))
except SourceImageIOError:
# pick up any custom Image / Rendition classes that may be in use
TmpRendition = image.renditions.model
tmprend = TmpRendition(image=image, width=0, height=0)
tmprend.file.name = 'not-found'
for index, rend in enumerate(srcset_renditions):
newsrcseturls.append(' '.join([rend.url, widths[index]]))
except KeyError:
newsrcseturls = []
pass
if self.output_var_name:
rendition.srcset = ', '.join(newsrcseturls)
# return the rendition object in the given variable
context[self.output_var_name] = rendition
return ''
else:
# render the rendition's image tag now
resolved_attrs = {}
for key in self.attrs:
if key == 'srcset':
resolved_attrs[key] = ','.join(newsrcseturls)
continue
try:
# Assume it's a Variable object, and try to resolve it against the context.
resolved_attrs[key] = self.attrs[key].resolve(context)
except AttributeError:
# It's not a Variable, so assume it's a string.
resolved_attrs[key] = self.attrs[key]
return rendition.img_tag(resolved_attrs)
@register.filter
def responsive_css(image, prefix='ri'):
"""Filter for use with the responsive image node to output scoped css for background images."""
if not image or not image.srcset:
return ''
srcset = [x for x in image.srcset.split(',')]
srcset = [[x.strip().split(' ')[0], x.strip().split(' ')[1]] for x in srcset]
srcset = [[x[0], int(x[1].replace('w', ''))] for x in srcset]
css = '<style scoped>'
# create a counter, as we only want to render css every other time
index = 0
for size in srcset:
# increment counter each time
index += 1
# skip every second item as they are the retina versions, of which we just want the src
if index % 2 == 0:
continue
# set retina src to the next index in the srcset
retina = srcset[index]
# create the kwargs for the css
kwargs = {
'size': size[1],
'prefix': prefix,
'id': image.uid,
'url': size[0],
'url_2x': retina[0]
}
# render the css
css += """
@media all and (min-width: {size}px) {{
#{prefix}{id} {{
background-image: url({url});
background-image: -webkit-image-set(url("{url}") 1x, url("{url_2x}") 2x);
background-image: image-set(url("{url}") 1x, url("{url_2x}") 2x);
background-position-y: center;
}}
}}
""".format(**kwargs)
css += '</style>'
return mark_safe(css)
@register.filter
def responsive_id(image, prefix='ri'):
"""Filter to add a unique id for use with the responsive_css filter."""
if not image:
return ''
image.uid = uid()
html = 'id="%s%s"' % (prefix, image.uid)
return mark_safe(html)
| IATI/IATI-Standard-Website | common/templatetags/responsive.py | responsive.py | py | 9,878 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "django.template.Library",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.template.TemplateSyntaxError",
"line_number": 46,
"usage_type": "call"
},
{
"api_nam... |
27472478696 | from time import sleep
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.mail.message import EmailMultiAlternatives
from django.core.management.base import BaseCommand
from django.core.validators import validate_email
class Command(BaseCommand):
help = 'Send email to Dariusz and you in CC'
def handle(self, *args, **options):
if not settings.EMAIL_HOST:
print("Please add Mailgun first and try again")
return
if not settings.MY_EMAIL:
print("Please set MY_EMAIL environment variable and try again")
return
if not settings.MY_NAME:
print("Please set MY_NAME environment variable and try again")
return
try:
validate_email(settings.MY_EMAIL)
except ValidationError:
print("Aw please...")
print("Did you really expect I will accept {} as an email?")
sleep(2)
print("Really??")
print("Please set MY_EMAIL environment variable correctly and try again")
return
EmailMultiAlternatives(
"Hey, I'm done!",
"My name is {} and I've completed this task!".format(settings.MY_NAME),
settings.FROM_EMAIL,
[settings.DARIUSZ_EMAIL],
cc=[settings.MY_EMAIL],
reply_to=[settings.MY_EMAIL],
).send()
print("Looks like your email was send! Check your email, you're in CC :)") | DariuszAniszewski/codepot-heroku-workshop | codepot/management/commands/send_email.py | send_email.py | py | 1,511 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.EMAIL_HOST",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 14,
"usage_type": "nam... |
13463415841 | import os
import gc
import dill
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning)
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
from sklearn.multioutput import MultiOutputRegressor
import lightgbm as lgb
from utils import correlation_score
from config import *
# data directory
DATA_DIR = os.path.join(PATH_WORKING, 'data_cite_x512')
pipe_y = dill.load(open(os.path.join(DATA_DIR, 'pipe_y.dill'), 'rb'))
# model directory with results
MODEL_DIR = os.path.join(PATH_WORKING, 'lgb_cite_x512')
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
class CFG:
n_folds = 10
def train_lgbm():
# load data
X = np.load(os.path.join(DATA_DIR, 'X_train.npy'))
y = np.load(os.path.join(DATA_DIR, 'y_train.npy'))
y_true = pd.read_hdf(FP_CITE_TRAIN_TARGETS).values.astype('float32')
scores = []
for k, (train_index, test_index) in enumerate(KFold(n_splits=CFG.n_folds, shuffle=True, random_state=SEED*4).split(X)):
X_train = X[train_index]
y_train = y[train_index]
X_test = X[test_index]
y_test = y[test_index]
init_params = {
'num_iterations': 1000,
'learning_rate': 0.05,
'max_depth': 10,
'num_leaves': 200,
'reg_alpha': 0.03,
'reg_lambda': 0.002,
#'subsample': 0.6,
#'min_data_in_leaf': 263,
'colsample_bytree': 0.8,
'random_seed': 4243,
'early_stopping_round': 100,
'verbosity': -1,
}
models = []
y_pred = np.zeros(y_test.shape)
for j in range(y_train.shape[1]):
fit_params = {
'X': X_train,
'y': y_train[:,j],
'eval_set': [(X_test, y_test[:,j])],
'eval_metric': 'rmse',
"verbose": False,
}
model = lgb.LGBMRegressor(**init_params).fit(**fit_params)
models.append(model)
y_pred[:,j] = model.predict(X_test)
print(f'Feature: {j} MSE: {mean_squared_error(y_test[:,j], y_pred[:,j])}')
dill.dump(models, open(os.path.join(MODEL_DIR, f'models_{k}.dill'), 'wb'))
y_pred = pipe_y.inverse_transform(y_pred)
score = correlation_score(y_true[test_index], y_pred)
scores.append(score)
print('Fold:', k, 'Score:', score)
del X_train, X_test, y_train, y_test, y_pred
gc.collect()
#break
print('CV score:', np.mean(scores))
class ModelWrapper:
def __init__(self, models):
self.models = models
def predict(self, X):
y = np.zeros((X.shape[0], len(self.models)))
for j, model in enumerate(self.models):
y[:,j] = model.predict(X)
return y
def predict_test():
X_test = np.load(os.path.join(DATA_DIR, 'X_test.npy'))
y_pred = None
for k in range(CFG.n_folds):
print('Fold:', k)
trees = dill.load(open(os.path.join(MODEL_DIR, f'models_{k}.dill'), 'rb'))
model = ModelWrapper(trees)
y_fold = model.predict(X_test)
if y_pred is None:
y_pred = y_fold
else:
y_pred += y_fold
y_pred /= CFG.n_folds
y_pred = pipe_y.inverse_transform(y_pred)
assert y_pred.shape == SHAPES['cite']
np.save(os.path.join(MODEL_DIR, 'y_pred.npy'), y_pred)
def predict_folds():
X = np.load(os.path.join(DATA_DIR, 'X_train.npy'))
for k, (train_index, test_index) in enumerate(KFold(n_splits=CFG.n_folds, shuffle=True, random_state=SEED*4).split(X)):
print('Fold:', k)
X_test = X[test_index]
trees = dill.load(open(os.path.join(MODEL_DIR, f'models_{k}.dill'), 'rb'))
model = ModelWrapper(trees)
y_fold = model.predict(X_test)
y_fold = pipe_y.inverse_transform(y_fold)
assert y_fold.shape[1] == SHAPES['cite'][1]
np.save(os.path.join(MODEL_DIR, f'y_fold_{k}.npy'), y_fold)
if __name__ == "__main__":
"""
eval "$(/opt/anaconda_teams/bin/conda shell.bash hook)"
conda activate /home/datashare/envs/rden-py37-tf-cpu
python other_research/kaggle-02-single-cell-integration/train_lgb_cite.py
"""
#train_lgbm()
#predict_test()
predict_folds()
#CV score: 0.8961377694285847 | romden/kaggle | 2022_09_single-cell-integration/train_lgb_cite.py | train_lgb_cite.py | py | 4,566 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "dill.load",
"lin... |
21735015266 | import os
import random
import cv2
from ultralytics import YOLO
from tracker import Tracker
import numpy as np
import copy
from frame import Frame
def Analyser():
video_path = os.path.join(os.getcwd(), 'assets', 'people.mp4')
video_out_path = os.path.join(os.getcwd(), 'assets', 'people_out.mp4')
cap = cv2.VideoCapture(video_path)
video_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
video_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
x1_area = int(0.10 * video_width)
y1_area = int(0.10 * video_height)
x2_area = int(0.90 * video_width)
y2_area = int(0.90 * video_height)
ret, frame = cap.read()
cap_out = cv2.VideoWriter(video_out_path, cv2.VideoWriter_fourcc(*'mp4v'), cap.get(cv2.CAP_PROP_FPS), (frame.shape[1], frame.shape[0]))
model = YOLO('model_data/yolov8n.pt')
tracker = Tracker()
cur_frame = Frame((x1_area, y1_area, x2_area, y2_area))
n_init = 3
colors = [(random.randint(0,255), random.randint(0,255), random.randint(0,255)) for j in range(70)]
while ret:
count = np.array([0,0,0,0])
# count ( out, in, inflow, outflow)
results = model(frame)
for result in results:
detections = []
for r in result.boxes.data.tolist():
x1, y1, x2, y2, score, class_id = r
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
score = float(score)
class_id = int(class_id)
if score > 0.35 and class_id == 0:
detections.append([x1, y1, x2, y2, score, class_id])
tracker.update(frame, detections)
if n_init > 0:
n_init -= 1
else:
cur_frame.update(tracker.tracks)
for track in tracker.tracks:
bbox = track.to_tlbr()
x1, y1, x2, y2 = bbox
track_id = track.track_id
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), colors[track_id%len(colors)], 2)
cv2.putText(frame, f'{track_id}', (int(x1), int(y1)-10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, colors[track_id%len(colors)], 1)
track_state = cur_frame.tracks_state[track_id]
count[track_state[0]] += 1
cv2.putText(frame, f'{cur_frame.state_class[track_state[0]]}', (int(x1), int(y1)-30), cv2.FONT_HERSHEY_SIMPLEX, 0.4, colors[track_id%len(colors)], 1)
cv2.putText(frame, f'{count[1]},{str(" ") if count[2]==0 else count[2]},{count[3]}', (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,0), 2)
cv2.rectangle(frame, (x1_area, y1_area), (x2_area, y2_area), (0, 0xFF, 0), 2)
cv2.imshow('frame', frame)
cv2.waitKey(1)
cap_out.write(frame)
ret, frame = cap.read()
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
Analyser() | KushJoshi16/CrowdInflowOutflow | VideoAnalyser.py | VideoAnalyser.py | py | 2,949 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number":... |
71480593704 | from dotenv import load_dotenv
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from kafka import KafkaProducer
import os
load_dotenv()
access_token = os.environ.get('ACCESS_TOKEN')
access_token_secret = os.environ.get('ACCESS_TOKEN_SECRET')
consumer_key = os.environ.get('CONSUMER_KEY')
consumer_secret = os.environ.get('CONSUMER_SECRET')
producer = KafkaProducer(bootstrap_servers='localhost:9092')
topic_name = 'twitterdata'
class twitterAuth():
"""Set up Twitter Authentication"""
def authenticateTwitterApp(self):
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
return auth
class TwitterStreamer():
"""Set up Streamer"""
def __init__(self):
self.twitterAuth = twitterAuth()
def stream_tweets(self):
while True:
listener = ListenerTS()
auth = self.twitterAuth.authenticateTwitterApp()
stream = Stream(auth, listener)
stream.filter(track=["Covid"], stall_warnings=True, languages= ["en"])
class ListenerTS(StreamListener):
def on_data(self, raw_data):
producer.send(topic_name, str.encode(raw_data))
return True
if __name__ == "__main__":
TS = TwitterStreamer()
TS.stream_tweets() | luciferreeves/KafkaPySpark | producer.py | producer.py | py | 1,350 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
... |
14656471260 | import requests
import json
import logging
import os
import time
import uuid
from collections import Counter
API_URL = os.environ.get('API_URL') or 'https://api.gooee.io'
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
# Sentry Setup
SENTRY_ENVIRONMENT = 'TMPL_SENTRY_ENVIRONMENT'
SENTRY_RELEASE = 'TMPL_SENTRY_RELEASE'
SENTRY_KEY = 'TMPL_SENTRY_KEY'
SENTRY_PARAMS = (SENTRY_ENVIRONMENT, SENTRY_RELEASE, SENTRY_KEY)
SENTRY_CLIENT = None
if not any(map(lambda p: p.startswith('TMPL_'), SENTRY_PARAMS)):
import raven
SENTRY_CLIENT = raven.Client(
dsn=SENTRY_KEY,
environment=SENTRY_ENVIRONMENT,
release=SENTRY_RELEASE,
transport=raven.transport.http.HTTPTransport,
)
# Allow Devices and Spaces to be powered on/off, dim/brighten, and set to x%
with open('space-template.json') as fp:
SPACE_TEMPLATE = json.load(fp)
with open('device-template.json') as fp:
DEVICE_TEMPLATE = json.load(fp)
# Map of Amazon's device capabilities to Gooee's device meta values
CAPABILITY_TO_META = {
'powerState': ('onoff', lambda val: 'ON' if val else 'OFF'),
'brightness': ('dim', lambda val: val),
'powerLevel': ('dim', lambda val: val),
'percentage': ('dim', lambda val: val),
'connectivity': ('is_online',
lambda val: {"value": "OK" if val else "UNREACHABLE"})
}
class AuthException(Exception):
"""Auth Exception from the Cloud API."""
class BadRequestException(Exception):
"""Got a 400 Bad request from the Cloud API."""
class ParentSpaceException(Exception):
"""Parent Spaces do not have devices causing a ZeroDivisionError"""
class MetaNotAvailableException(Exception):
"""Gooee API not reporting meta for Device/Space"""
def lambda_handler(request: dict, context: dict) -> dict:
"""Main Lambda handler."""
try:
LOGGER.info('Directive:')
LOGGER.info(json.dumps(request, indent=4, sort_keys=True))
header = request['directive']['header']
if header['name'] == 'Discover':
response = handle_discovery(request)
elif header['name'] == 'ReportState':
response = handle_report_state(request)
elif header['namespace'] == 'Alexa.PowerController':
response = handle_power_controller(request)
elif header['namespace'] == 'Alexa.BrightnessController':
response = handle_brightness_controller(request)
elif header['namespace'] == 'Alexa.Authorization':
response = handle_auth(request)
else:
raise Exception
LOGGER.info('Response:')
LOGGER.info(json.dumps(response, indent=4, sort_keys=True))
return response
except Exception as err:
error_response = {
'event': {
'header': {
'namespace': 'Alexa',
'name': 'ErrorResponse',
'messageId': str(uuid.uuid4()),
'payloadVersion': '3',
},
'endpoint': {
'endpointId':
request['directive']['endpoint']['endpointId']
},
'payload': {
'type': 'INTERNAL_ERROR',
'message': 'Unhandled Error',
}
}
}
# Add correlation token to the response only if the directive is not of type `Discover` or
# `AddOrUpdateReport`
if request['directive']['header']['name'] != 'Discover' and \
request['directive']['header']['name'] != 'AddOrUpdateReport' and \
error_response['event']['header'].get('correlationToken'):
error_response['event']['header']['correlationToken'] = \
request['directive']['header']['correlationToken']
if isinstance(err, BadRequestException):
error_response['event']['payload']['type'] = 'NO_SUCH_ENDPOINT'
error_response['event']['payload']['message'] = err.args[0]
elif isinstance(err, AuthException):
error_response['event']['payload']['type'] = \
'INVALID_AUTHORIZATION_CREDENTIAL'
error_response['event']['payload']['message'] = err.args[0]
elif isinstance(err, ParentSpaceException):
error_response['event']['payload']['type'] = 'INVALID_DIRECTIVE'
error_response['event']['payload']['message'] = err.args[0]
return error_response # Skip logging in Sentry
elif isinstance(err, MetaNotAvailableException):
error_response['event']['payload']['type'] = 'ENDPOINT_UNREACHABLE'
error_response['event']['payload']['message'] = err.args[0]
return error_response # Skip logging in Sentry
if SENTRY_CLIENT:
SENTRY_CLIENT.captureException()
return error_response
def g_post_action_request(payload: dict, key: str):
"""Make a POST action request to the Gooee Cloud API"""
headers = {
'Authorization': f'Bearer {key}',
'Content-Type': 'application/json',
}
payload["origin"] = "alexa"
LOGGER.info('POST Request:')
LOGGER.info(headers)
LOGGER.info(json.dumps(payload))
res = requests.post(API_URL + '/actions', json=payload, headers=headers)
if res.status_code in (requests.codes.UNAUTHORIZED, requests.codes.FORBIDDEN):
raise AuthException('Auth error')
if res.status_code in (requests.codes.BAD_REQUEST, requests.codes.NOT_FOUND):
raise BadRequestException('Device or Space not found')
LOGGER.info('Cloud-api response:')
LOGGER.info(res.text)
def g_get_request(endpoint: str, key: str):
"""Make a GET request to the Gooee Cloud API"""
headers = {
'Authorization': f'Bearer {key}',
}
LOGGER.info(f'GET Request: {endpoint}')
LOGGER.info(headers)
url = API_URL + endpoint
data = []
while url:
res = requests.get(url, headers=headers)
if res.status_code in (requests.codes.UNAUTHORIZED, requests.codes.FORBIDDEN):
raise AuthException('Auth error')
if res.status_code in (requests.codes.BAD_REQUEST, requests.codes.NOT_FOUND):
raise BadRequestException('Device or Space not found')
url = res.links.get('next', {}).get('url')
data = data + res.json() if isinstance(res.json(), list) else res.json()
LOGGER.info('Cloud-api response:')
LOGGER.info(data)
return data
def g_get_state(type_: str, id_: str, bearer_token: str) -> dict:
"""Fetches the status of a Space/Device in a name, value dict"""
if type_ == 'device':
gooee_response = g_get_request(f'/{type_}s/{id_}', bearer_token)
return {meta['name']: meta['value'] for meta in gooee_response['meta']}
else: # space only supports dim and onoff states
gooee_response = g_get_request(
f'/{type_}s/{id_}/device_states',
bearer_token,
)
counter = Counter()
for val in gooee_response['states'].values():
counter.update(val)
try:
avg_dim = int(counter['dim'] / len(gooee_response['states']))
except ZeroDivisionError: # Possible parent space with nested spaces
# /spaces/{id}/devices_states does not report nested spaces devices
# which causes a 0/0
raise ParentSpaceException('State Reporting not supported on this '
'Space.')
return { # Average dim and if one device in space is on, onoff = True
'dim': avg_dim,
'onoff': bool(counter['onoff']),
'is_online': True, # hard code space to be online
}
def handle_discovery(request: dict) -> dict:
"""Discovery Handler"""
try:
bearer_token = request['directive']['payload']['scope']['token']
except KeyError:
# As per Alexa docs: If an error such as an expired token occurs
# during a discovery request, return an empty endpoint array and not
# an error.
return []
endpoints = []
# Get User's scoped Spaces
try:
res = g_get_request('/spaces/?_include=id,name', bearer_token)
except AuthException:
pass # As per Alexa docs: if an error associated with the customer's
# account occurs, the skill should return an empty endpoints array
else:
for space in res:
appliance = SPACE_TEMPLATE.copy()
appliance['friendlyName'] = space['name']
appliance['endpointId'] = space['id']
endpoints.append(appliance)
# Get User's scoped Devices
try:
res = g_get_request(
'/devices/?_include=name,id&type__in=wim,bulb',
bearer_token,
)
except AuthException:
pass # As per Alexa docs: if an error associated with the customer's
# account occurs, the skill should return an empty endpoints array
else:
for device in res:
appliance = DEVICE_TEMPLATE.copy()
appliance['friendlyName'] = device['name']
appliance['endpointId'] = device['id']
endpoints.append(appliance)
response = {
'event': {
'header': {
'namespace': 'Alexa.Discovery',
'name': 'Discover.Response',
'payloadVersion': '3',
'messageId': str(uuid.uuid4())
},
'payload': {
'endpoints': endpoints
}
}
}
return response
def handle_power_controller(request: dict) -> dict:
"""PowerController Handler"""
request_name = request['directive']['header']['name']
endpoint = request['directive']['endpoint']['endpointId']
type_ = request['directive']['endpoint']['cookie']['type']
bearer_token = request['directive']['endpoint']['scope']['token']
if request_name == 'TurnOn':
value = 'ON'
else:
value = 'OFF'
payload = {
'name': f'Alexa {value} request',
'type': value.lower(),
'value': {'transition_time': 2},
}
payload[type_] = endpoint
g_post_action_request(payload, bearer_token)
response = {
'context': {
'properties': [
{
'namespace': 'Alexa.PowerController',
'name': 'powerState',
'value': value,
'timeOfSample': time.strftime(
'%Y-%m-%dT%H:%M:%S.00Z',
time.gmtime(),
),
'uncertaintyInMilliseconds': 500,
}
]
},
'event': {
'header': {
'namespace': 'Alexa',
'name': 'Response',
'payloadVersion': '3',
'messageId': str(uuid.uuid4()),
'correlationToken':
request['directive']['header']['correlationToken'],
},
'endpoint': {
'scope': {
'type': 'BearerToken',
'token': bearer_token,
},
'endpointId': endpoint
},
'payload': {},
}
}
return response
def handle_brightness_controller(request: dict) -> dict:
"""BrightnessController Handler"""
request_name = request['directive']['header']['namespace']
request_data = request['directive']['payload']
bearer_token = request['directive']['endpoint']['scope']['token']
endpoint = request['directive']['endpoint']['endpointId']
type_ = request['directive']['endpoint']['cookie']['type']
value = None
payload = {type_: endpoint}
if 'brightness' in request_data:
value = request_data['brightness']
payload.update({
'name': 'Alexa brightness request',
'type': 'dim',
'value': {'level': value, 'transition_time': 1},
})
g_post_action_request(payload, bearer_token)
elif 'brightnessDelta' in request_data:
value = request_data['brightnessDelta']
payload.update({
'name': 'Alexa brightnessDelta request',
'type': 'adjust',
'value': {'delta': value, 'transition_time': 1},
})
g_post_action_request(payload, bearer_token)
response = {
'context': {
'properties': [
{
'namespace': request_name,
'name': 'brightness',
'value': abs(value),
'timeOfSample': time.strftime(
'%Y-%m-%dT%H:%M:%S.00Z',
time.gmtime(),
),
'uncertaintyInMilliseconds': 500,
}
]
},
'event': {
'header': {
'namespace': 'Alexa',
'name': 'Response',
'payloadVersion': '3',
'messageId': str(uuid.uuid4()),
'correlationToken':
request['directive']['header']['correlationToken'],
},
'endpoint': {
'scope': {
'type': 'BearerToken',
'token': bearer_token,
},
'endpointId': endpoint
},
'payload': {},
}
}
return response
def handle_auth(request: dict) -> dict:
"""Authorization Handler"""
request_name = request['directive']['header']['name']
if request_name == 'AcceptGrant':
response = {
'event': {
'header': {
'namespace': 'Alexa.Authorization',
'name': 'AcceptGrant.Response',
'payloadVersion': '3',
'messageId': str(uuid.uuid4()),
},
'payload': {},
}
}
return response
def handle_report_state(request: dict) -> dict:
"""ReportState Handler"""
endpoint = request['directive']['endpoint']['endpointId']
type_ = request['directive']['endpoint']['cookie']['type']
bearer_token = request['directive']['endpoint']['scope']['token']
gooee_state = g_get_state(type_, endpoint, bearer_token)
properties = []
capabilities = (SPACE_TEMPLATE['capabilities']
if type_ == 'space' else DEVICE_TEMPLATE['capabilities'])
for capability in capabilities:
try:
if not capability['properties']['retrievable']:
continue
except KeyError:
continue
amz_name = capability['properties']['supported'][0]['name']
property_ = {
'namespace': capability['interface'],
'name': amz_name,
'timeOfSample': time.strftime(
'%Y-%m-%dT%H:%M:%S.00Z',
time.gmtime(),
),
'uncertaintyInMilliseconds': 500,
}
# Translate Gooee meta to how Alexa expects it, for example:
# if gooee_state was {'onoff': True} transfunc will return 'ON'
gooee_name, transfunc = CAPABILITY_TO_META[amz_name]
try:
property_['value'] = transfunc(gooee_state[gooee_name])
except KeyError: # Meta is not available from gooee response
raise MetaNotAvailableException('Gooee API not reporting meta for '
'Device/Space')
properties.append(property_)
response = {
'context': {
'properties': properties,
},
'event': {
'header': {
'namespace': 'Alexa',
'name': 'StateReport',
'payloadVersion': '3',
'messageId': str(uuid.uuid4()),
'correlationToken':
request['directive']['header']['correlationToken'],
},
'endpoint': {
'scope': {
'type': 'BearerToken',
'token': bearer_token,
},
'endpointId': endpoint
},
'payload': {
},
}
}
return response
| GooeeIOT/cloud-alexa-control-lambda | lambda_function.py | lambda_function.py | py | 16,236 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"li... |
22209550187 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
def setup():
import sys
from socket import error as socketerror
import configparser
from module.pysqueezecenter.server import Server
from module.pysqueezecenter.player import Player
# Python2 compatibility
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
print("No config file found, please provide the following details. The default is provided between brackets.")
config = {}
config['host'] = input("IP address of Squeezebox Server [127.0.0.1]: ")
config['port'] = input("Port of Squeezebox Server [9090]: ")
config['user'] = input("Username of server's CLI []: ")
config['passwd'] = input("Password of server's CLI []: ")
if not config['host']:
config['host'] = "127.0.0.1"
if not config['port']:
config['port'] = "9090"
sbs = Server(hostname=config['host'],
port=config['port'],
username=config['user'],
password=config['passwd'] )
print()
try:
sbs.connect()
if sbs.logged_in:
print("Succesfully connected to Squeezebox Server v%s on %s:%s" % ( sbs.get_version(), config['host'], config['port'] ))
else:
print("Could not connect to server, possible wrong credentials, run %s again" % sys.argv[0])
sys.exit(1)
except socketerror:
print("Network is unreachable, check ip/port settings and run %s again." % sys.argv[0])
sys.exit(1)
print()
print("The following players are connected to the server:")
players = sbs.get_players()
for i in range(len(players)):
player = str(players[i]).replace("Player: ","")
print("[%i]: %s" % (i+1, player))
answer = int(input("Press the number which MAC address you want to use: "))-1
config['mac'] = str(players[answer]).replace("Player: ","")
print()
sq = sbs.get_player(config['mac'])
print("Path to current song in playlist, use this as a hint for the following question.")
print(unquote(sq.get_track_path()))
config['remotefolder'] = input("Remote folder where music resides on Squeezebox Server, e.g. file:///path/to/music []: ")
config['localfolder'] = input("Local folder where music resides locally, e.g. /path/to/music []: ")
config['driver'] = input("Music driver to use [alsa]: ")
config['output'] = input("Audio device to use e.g.plughw:0,0 []: ")
setupconfig = configparser.RawConfigParser()
setupconfig.add_section('global')
setupconfig.set('global', 'host', config['host'])
setupconfig.set('global', 'port', config['port'])
setupconfig.set('global', 'user', config['user'])
setupconfig.set('global', 'passwd', config['passwd'])
setupconfig.set('global', 'mac', config['mac'])
setupconfig.set('global', 'remotefolder', config['remotefolder'])
setupconfig.set('global', 'localfolder', config['localfolder'])
setupconfig.set('global', 'driver', config['driver'])
setupconfig.set('global', 'output', config['output'])
with open('sbcc.cfg', 'wb') as configfile:
setupconfig.write(configfile)
print("Configuration is setup successfully!")
return config
| terual/sbcc | module/setup.py | setup.py | py | 3,921 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "module.pysqueezecenter.server.Server",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "socket.er... |
18456187648 | from __future__ import unicode_literals
from udsactor.log import logger
from . import operations
from . import store
from . import REST
from . import ipc
from . import httpserver
from .scriptThread import ScriptExecutorThread
from .utils import exceptionToMessage
import socket
import time
import random
import os
import subprocess
import shlex
import stat
IPC_PORT = 39188
cfg = None
def initCfg():
global cfg # pylint: disable=global-statement
cfg = store.readConfig()
if logger.logger.isWindows():
# Logs will also go to windows event log for services
logger.logger.serviceLogger = True
if cfg is not None:
logger.setLevel(cfg.get('logLevel', 20000))
else:
logger.setLevel(20000)
cfg = {}
# If ANY var is missing, reset cfg
for v in ('host', 'ssl', 'masterKey'):
if v not in cfg:
cfg = None
break
return cfg
class CommonService(object):
def __init__(self):
self.isAlive = True
self.api = None
self.ipc = None
self.httpServer = None
self.rebootRequested = False
self.knownIps = []
socket.setdefaulttimeout(20)
def reboot(self):
self.rebootRequested = True
def execute(self, cmdLine, section):
cmd = shlex.split(cmdLine, posix=False)
if os.path.isfile(cmd[0]):
if (os.stat(cmd[0]).st_mode & stat.S_IXUSR) != 0:
try:
res = subprocess.check_call(cmd)
except Exception as e:
logger.error('Got exception executing: {} - {}'.format(cmdLine, e))
return False
logger.info('Result of executing cmd was {}'.format(res))
return True
else:
logger.error('{} file exists but it it is not executable (needs execution permission by admin/root)'.format(section))
else:
logger.error('{} file not found & not executed'.format(section))
return False
def setReady(self):
self.api.setReady([(v.mac, v.ip) for v in operations.getNetworkInfo()])
def interactWithBroker(self):
'''
Returns True to continue to main loop, false to stop & exit service
'''
# If no configuration is found, stop service
if cfg is None:
logger.fatal('No configuration found, stopping service')
return False
self.api = REST.Api(cfg['host'], cfg['masterKey'], cfg['ssl'])
# Wait for Broker to be ready
counter = 0
while self.isAlive:
try:
# getNetworkInfo is a generator function
netInfo = tuple(operations.getNetworkInfo())
self.knownIps = dict(((i.mac, i.ip) for i in netInfo))
ids = ','.join([i.mac for i in netInfo])
if ids == '':
# Wait for any network interface to be ready
logger.debug('No valid network interfaces found, retrying in a while...')
raise Exception()
logger.debug('Ids: {}'.format(ids))
self.api.init(ids)
# Set remote logger to notify log info to broker
logger.setRemoteLogger(self.api)
break
except REST.InvalidKeyError:
logger.fatal('Can\'t sync with broker: Invalid broker Master Key')
return False
except REST.UnmanagedHostError:
# Maybe interface that is registered with broker is not enabled already?
# Right now, we thing that the interface connected to broker is
# the interface that broker will know, let's see how this works
logger.fatal('This host is not managed by UDS Broker (ids: {})'.format(ids))
return False # On unmanaged hosts, there is no reason right now to continue running
except Exception as e:
logger.debug('Exception on network info: retrying')
# Any other error is expectable and recoverable, so let's wait a bit and retry again
# but, if too many errors, will log it (one every minute, for
# example)
counter += 1
if counter % 60 == 0: # Every 5 minutes, raise a log
logger.info('Trying to inititialize connection with broker (last error: {})'.format(exceptionToMessage(e)))
# Wait a bit before next check
self.doWait(5000)
# Now try to run the "runonce" element
runOnce = store.runApplication()
if runOnce is not None:
logger.info('Executing runOnce app: {}'.format(runOnce))
if self.execute(runOnce, 'RunOnce') is True:
# operations.reboot()
return False
# Broker connection is initialized, now get information about what to
# do
counter = 0
while self.isAlive:
try:
logger.debug('Requesting information of what to do now')
info = self.api.information()
data = info.split('\r')
if len(data) != 2:
logger.error('The format of the information message is not correct (got {})'.format(info))
raise Exception
params = data[1].split('\t')
if data[0] == 'rename':
try:
if len(params) == 1: # Simple rename
logger.debug('Renaming computer to {}'.format(params[0]))
self.rename(params[0])
# Rename with change password for an user
elif len(params) == 4:
logger.debug('Renaming computer to {}'.format(params))
self.rename(params[0], params[1], params[2], params[3])
else:
logger.error('Got invalid parameter for rename operation: {}'.format(params))
return False
break
except Exception as e:
logger.error('Error at computer renaming stage: {}'.format(e.message))
return None # Will retry complete broker connection if this point is reached
elif data[0] == 'domain':
if len(params) != 5:
logger.error('Got invalid parameters for domain message: {}'.format(params))
return False # Stop running service
self.joinDomain(params[0], params[1], params[2], params[3], params[4])
break
else:
logger.error('Unrecognized action sent from broker: {}'.format(data[0]))
return False # Stop running service
except REST.UserServiceNotFoundError:
logger.error('The host has lost the sync state with broker! (host uuid changed?)')
return False
except Exception as err:
if counter % 60 == 0:
logger.warn('Too many retries in progress, though still trying (last error: {})'.format(exceptionToMessage(err)))
counter += 1
# Any other error is expectable and recoverable, so let's wait
# a bit and retry again
# Wait a bit before next check
self.doWait(5000)
if self.rebootRequested:
try:
operations.reboot()
except Exception as e:
logger.error('Exception on reboot: {}'.format(e.message))
return False # Stops service
return True
def checkIpsChanged(self):
if self.api is None or self.api.uuid is None:
return # Not connected
netInfo = tuple(operations.getNetworkInfo())
for i in netInfo:
# If at least one ip has changed
if i.mac in self.knownIps and self.knownIps[i.mac] != i.ip:
logger.info('Notifying ip change to broker (mac {}, from {} to {})'.format(i.mac, self.knownIps[i.mac], i.ip))
try:
# Notifies all interfaces IPs
self.api.notifyIpChanges(((v.mac, v.ip) for v in netInfo))
# Regenerates Known ips
self.knownIps = dict(((v.mac, v.ip) for v in netInfo))
# And notify new listening address to broker
address = (self.knownIps[self.api.mac], self.httpServer.getPort())
# And new listening address
self.httpServer.restart(address)
# sends notification
self.api.notifyComm(self.httpServer.getServerUrl())
except Exception as e:
logger.warn('Got an error notifiying IPs to broker: {} (will retry in a bit)'.format(e.message.decode('windows-1250', 'ignore')))
def clientMessageProcessor(self, msg, data):
logger.debug('Got message {}'.format(msg))
if self.api is None:
logger.info('Rest api not ready')
return
if msg == ipc.REQ_LOGIN:
res = self.api.login(data).split('\t')
# third parameter, if exists, sets maxSession duration to this.
# First & second parameters are ip & hostname of connection source
if len(res) >= 3:
self.api.maxSession = int(res[2]) # Third parameter is max session duration
msg = ipc.REQ_INFORMATION # Senf information, requested or not, to client on login notification
if msg == ipc.REQ_LOGOUT:
self.api.logout(data)
self.onLogout(data)
if msg == ipc.REQ_INFORMATION:
info = {}
if self.api.idle is not None:
info['idle'] = self.api.idle
if self.api.maxSession is not None:
info['maxSession'] = self.api.maxSession
self.ipc.sendInformationMessage(info)
def initIPC(self):
# ******************************************
# * Initialize listener IPC & REST threads *
# ******************************************
logger.debug('Starting IPC listener at {}'.format(IPC_PORT))
self.ipc = ipc.ServerIPC(IPC_PORT, clientMessageProcessor=self.clientMessageProcessor)
self.ipc.start()
if self.api.mac in self.knownIps:
address = (self.knownIps[self.api.mac], random.randrange(43900, 44000))
logger.info('Starting REST listener at {}'.format(address))
self.httpServer = httpserver.HTTPServerThread(address, self)
self.httpServer.start()
# And notify it to broker
self.api.notifyComm(self.httpServer.getServerUrl())
def endIPC(self):
# Remove IPC threads
if self.ipc is not None:
try:
self.ipc.stop()
except Exception:
logger.error('Couln\'t stop ipc server')
if self.httpServer is not None:
try:
self.httpServer.stop()
except Exception:
logger.error('Couln\'t stop REST server')
def endAPI(self):
if self.api is not None:
try:
self.api.notifyComm(None)
except Exception:
logger.error('Couln\'t remove comms url from broker')
self.notifyStop()
# ***************************************************
# Methods that ARE overriden by linux & windows Actor
# ***************************************************
def rename(self, name, user=None, oldPassword=None, newPassword=None):
'''
Invoked when broker requests a rename action
MUST BE OVERRIDEN
'''
raise NotImplementedError('Method renamed has not been implemented!')
def joinDomain(self, name, domain, ou, account, password):
'''
Invoked when broker requests a "domain" action
MUST BE OVERRIDEN
'''
raise NotImplementedError('Method renamed has not been implemented!')
# ****************************************
# Methods that CAN BE overriden by actors
# ****************************************
def doWait(self, miliseconds):
'''
Invoked to wait a bit
CAN be OVERRIDEN
'''
time.sleep(float(miliseconds) / 1000)
def notifyStop(self):
'''
Overriden to log stop
'''
logger.info('Service is being stopped')
def preConnect(self, user, protocol):
'''
Invoked when received a PRE Connection request via REST
'''
logger.debug('Pre-connect does nothing')
return 'ok'
def onLogout(self, user):
logger.debug('On logout invoked for {}'.format(user))
| karthik-arjunan/testuds | actors/src/udsactor/service.py | service.py | py | 13,012 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "udsactor.log.logger.logger.isWindows",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "udsactor.log.logger.logger",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "udsactor.log.logger",
"line_number": 30,
"usage_type": "name"
},
... |
11783204177 | #######################
# Imports
#######################
from pandas.tseries.offsets import DateOffset
import streamlit as st
import plotly.graph_objects as go
import plotly.io as pio
from pmdarima import auto_arima
import numpy as np
pio.renderers.default = 'browser'
import pandas as pd
from sqlalchemy import create_engine
import statsmodels.api as sm
from pandas.tseries.offsets import DateOffset
# make any grid with a function
def make_grid(cols,rows):
grid = [0]*cols
for i in range(cols):
with st.container():
grid[i] = st.columns(rows)
return grid
# set size of streamlit page
st.set_page_config(layout="wide")
# set style of dataframe
styles = [
dict(selector="th", props=[("color", "#FFFFFF"),
("border", "1px solid #eee"),
("padding", "12px 35px"),
("border-collapse", "collapse"),
("background", "#002366"),
("text-transform", "uppercase"),
("font-size", "18px")
]),
dict(selector="td", props=[("color", "#FFFFFF"),
("border", "1px solid #eee"),
("padding", "12px 35px"),
("border-collapse", "collapse"),
("font-size", "18px"),
("background", "#002366")
]),
dict(selector="table", props=[
("font-family" , 'Arial'),
("margin" , "25px auto"),
("border-collapse" , "collapse"),
("border" , "1px solid #eee"),
("border-bottom" , "2px solid #00cccc"),
]),
dict(selector="caption", props=[("caption-side", "bottom")])
]
#######################
# Connection with DB
#######################
db_config = {
'user': 'report', # имя пользователя
'pwd': 'DFSeew53dfgxz_dsffh6769675D', # пароль
'host': '51.250.69.136',
'port': 5433, # порт подключения
'db': 'wb_dwh' # название базы данных
}
connection_string = 'postgresql://{}:{}@{}:{}/{}'.format(
db_config['user'],
db_config['pwd'],
db_config['host'],
db_config['port'],
db_config['db'],
)
engine = create_engine(connection_string)
#######################
# Loading orders
#######################
query = ''' SELECT * FROM stg.orders'''
orders = pd.read_sql_query(query, con=engine)
orders['last_change_date'] = orders['last_change_date'].dt.date
#######################
# Loading stocks
#######################
query = '''SELECT * FROM stg.stocks'''
stocks = pd.read_sql_query(query, con=engine)
stocks['lastchangedate'] = stocks['lastchangedate'].dt.date
#######################
# Creating the buttons
#######################
button_container = st.container()
with button_container:
number_1 = st.sidebar.number_input('Insert Excess Stock Quantity (days)',
min_value=10,
max_value=1000,
step=1)
number_2 = st.sidebar.number_input('Insert Insufficient Stock Quantity (days)',
min_value=5,
max_value=1000,
step=1)
#######################
# Stock information
#######################
stock_container = st.container()
with stock_container:
st.markdown('# Stock general info')
grid_1 = make_grid(2, (3, 1))
# Getting up-to-date info about stock
current_stock_date = stocks.sort_values(by=['lastchangedate'], ascending=False)['lastchangedate'].iloc[0]
# General info about all stock
grid_1[0][0].markdown(f'**Articles in stock {current_stock_date}**')
current_stock = stocks.query('lastchangedate == @current_stock_date')
fig = go.Figure()
for warehouse in current_stock['warehousename'].unique():
stock_by_articles = current_stock.query('warehousename == @warehouse').groupby('nmid')['quantity'].sum().reset_index().sort_values(by = 'quantity', ascending = False)
exist_stock_by_article = stock_by_articles.query('quantity >0')
fig.add_trace(go.Bar(x=exist_stock_by_article['nmid'], y=exist_stock_by_article['quantity'], name = warehouse))
fig.update_layout(barmode='stack', width=900, height=400)
fig.update_traces(opacity=0.75)
fig.update_layout(xaxis_type='category', xaxis={'categoryorder': "total descending"})
grid_1[0][0].plotly_chart(fig)
grid_1[1][0].markdown(f'**Product groups in stock {current_stock_date}**')
fig = go.Figure()
for warehouse in current_stock['warehousename'].unique():
stock_by_subject = current_stock.query('warehousename == @warehouse').groupby('subject')['quantity'].sum().reset_index().sort_values(by='quantity',
ascending=False)
exist_stock_by_subject = stock_by_subject.query('quantity >0')
fig.add_trace(go.Bar(x=exist_stock_by_subject['subject'], y=exist_stock_by_subject['quantity'], name = warehouse))
fig.update_layout(barmode='stack', width=900, height=400)
fig.update_traces(opacity=0.75)
fig.update_layout(xaxis_type='category', xaxis={'categoryorder': "total descending"})
grid_1[1][0].plotly_chart(fig)
# Run out of stock
grid_1[0][1].markdown('**Articles out of stock**')
stock_by_articles_cat = current_stock.groupby(['nmid', 'subject'])['quantity'].sum().reset_index().sort_values(by='quantity', ascending=False)
critical_in_stock = stock_by_articles_cat.query('quantity == 0').reset_index(drop=True).set_index('nmid')
grid_1[0][1].dataframe(critical_in_stock['subject'])
grid_1[1][1].markdown('**Product groups out of stock**')
stock_by_subject_cat = current_stock.groupby(['subject'])['quantity'].sum().reset_index().sort_values(
by='quantity', ascending=False)
critical_subject_in_stock = stock_by_subject_cat.query('quantity == 0').reset_index(drop=True).set_index('subject')
grid_1[1][1].dataframe(critical_subject_in_stock.index)
prediction_container = st.container()
with prediction_container:
st.markdown('# Stock in days')
st.markdown('### Please wait. Loading information may take a few seconds')
# @st.experimental_memo(suppress_st_warning=True)
def skip_computation():
orders_ = orders.query('is_cancel == False').groupby(['nm_id', 'last_change_date'])['id'].count().reset_index()
articles = orders_['nm_id'].unique()
dict_prediction = {}
for article in articles:
article_sales = orders_.query('nm_id == @article').set_index('last_change_date')
idx = pd.date_range(orders['last_change_date'].min(), orders['last_change_date'].max())
article_sales = article_sales.reindex(idx)
article_sales.loc[article_sales['nm_id'].isna(), 'nm_id'] = article
article_sales = article_sales.fillna(0)
article_sales.columns = ['article', 'sales']
article_sales = article_sales[['sales']]
model = sm.tsa.statespace.SARIMAX(article_sales['sales'], order=(0, 0, 0), seasonal_order=(1, 1, 1, 12))
results = model.fit()
future_dates = [article_sales.index[-1] + DateOffset(days=x) for x in range(0, 30)]
future_dataset_df = pd.DataFrame(index=future_dates[1:], columns=article_sales.columns)
future_df = pd.concat([article_sales, future_dataset_df])
future_df['forecast'] = results.predict(start=len(article_sales), end=len(future_df), dynamic=True)
dict_prediction[article] = future_df['forecast'].sum()
res = pd.DataFrame(dict_prediction.items(), columns=['nm_id', 'predicted_sales']).astype(int)
art_stock = current_stock.groupby(['nmid'])['quantity'].sum().reset_index().astype(int)
res = res.merge(art_stock, right_on = 'nmid', left_on = 'nm_id', how = 'left')[['nm_id', 'predicted_sales', 'quantity']].fillna(0)
res['stock_in_days'] = res['quantity']/res['predicted_sales']*30
res['quantity'] = res['quantity'].astype(int)
res['stock_in_days'] = res['stock_in_days'].round(0)
res.columns = ['nm_id', 'predicted orders for the next 30 days', 'current stock', 'stock_in_days']
return res
st.dataframe(skip_computation(), use_container_width=True)
st.markdown('**<NA> in stock_in_days means there is no demand for this product**')
bad_articles_container = st.container()
with bad_articles_container:
st.markdown('# Excess goods')
if number_1:
st.table(skip_computation().query('stock_in_days >= @number_1').set_index('nm_id').astype(int).style.set_table_styles(styles))
st.markdown('# Shortage of goods')
if number_2:
st.table(skip_computation().query('stock_in_days <= @number_2').set_index('nm_id').astype(int).style.set_table_styles(styles))
article_details_container = st.container()
with article_details_container:
st.markdown('# Article details')
articles = orders.query('is_cancel == False')['nm_id'].unique()
articles_selection = st.multiselect('Choose a product article (nmid)',
options=articles,
default=articles[0])
for_string = [str(i) for i in articles_selection]
string_articles = ' '.join(for_string)
st.dataframe(skip_computation().query('nm_id == @articles_selection').set_index('nm_id'), use_container_width=True)
for article in articles_selection:
orders_ = orders.query('is_cancel == False').groupby(['nm_id', 'last_change_date'])['id'].count().reset_index()
article_sales = orders_.query('nm_id == @article').set_index('last_change_date')
idx = pd.date_range(orders['last_change_date'].min(), orders['last_change_date'].max())
article_sales = article_sales.reindex(idx)
article_sales.loc[article_sales['nm_id'].isna(), 'nm_id'] = article
article_sales = article_sales.fillna(0)
article_sales.columns = ['article', 'sales']
article_sales = article_sales[['sales']]
model = sm.tsa.statespace.SARIMAX(article_sales['sales'], order=(0, 0, 0), seasonal_order=(1, 1, 1, 12))
results = model.fit()
future_dates = [article_sales.index[-1] + DateOffset(days=x) for x in range(0, 30)]
future_dataset_df = pd.DataFrame(index=future_dates[1:], columns=article_sales.columns)
future_df = pd.concat([article_sales, future_dataset_df])
future_df['forecast'] = results.predict(start=len(article_sales), end=len(future_df), dynamic=True)
future_df = future_df.reset_index()
fig = go.Figure()
fig.add_trace(go.Scatter(x=future_df['index'], y=future_df['sales'], name=f'orders {article}'))
fig.add_trace(go.Scatter(x=future_df['index'], y=future_df['forecast'], name=f'predicted orders {article}'))
fig.update_layout(barmode='overlay', width=1300, height=300)
fig.update_traces(opacity=0.75)
st.plotly_chart(fig)
| EkaterinaTerentyeva/Streamlit | streamlit_app.py | streamlit_app.py | py | 11,082 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "plotly.io.renderers",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "plotly.io",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "streamlit.container",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "streamlit.colu... |
981587259 | import argparse
import math
def main():
args = parse_args()
decompile_pattern(args.input, args.output)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('input', type=argparse.FileType('rb'), help='input pattern file')
parser.add_argument('output', type=argparse.FileType('w'), help='output .cells file')
return parser.parse_args()
def decompile_pattern(input, output):
content = input.read()
if not is_power_of(len(content), 4):
exit('Error, input file size is not a power of 4 (size = %d)' % len(content))
n = int(math.sqrt(len(content)))
i = 0
for x in range(n):
for y in range(n):
value = ord(content[i])
value = {0: '.', 1: 'O'}[value]
i += 1
output.write(value)
output.write('\n')
def is_power_of(num, base):
if base == 1 and num != 1: return False
if base == 1 and num == 1: return True
if base == 0 and num != 1: return False
power = int(math.log(num, base) + 0.5)
return base ** power == num
if __name__ == '__main__':
main()
| hjbyt/OS_HW5 | decompile_pattern.py | decompile_pattern.py | py | 1,149 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "math.sqrt... |
37584223577 | import numpy as np
import plotly.offline as py
import plotly.graph_objs as go
import pandas as pd
data = pd.read_csv("../data/training_data.csv")
# print(data.head())
X = data.values[:,0]
Y = np.array([])
np.percentile(X, [25,50,75])
nd=pd.qcut(X,3, labels=[0,1,2])
nd2=pd.qcut(X,3, labels=["close","not close","far"])
for i in range(len(X)):
# print(X[i],nd2[i])
Y = np.append(Y,nd[i])
# print(len(X),len(Y))
# print(X[0],Y[0],nd2[0])
#Plots
trace = go.Scatter(
x = X,
y = nd2,
mode = 'markers'
)
# print(Y.reshape(Y.shape[0], -1),X.shape[0])
data = [trace]
py.plot(data, filename='../outputs/cluster_percentile.html')
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
def p(clf, test_x):
pred = clf.predict([[test_x]])
if(pred[0] == 0):
pred = "Anomalous"
elif(pred[0] == 1):
pred = "not close"
else:
pred = "far"
conf = clf.decision_function([[test_x]])
print("test:",test_x,"\npredict:", pred,"\nconfidence out of 3 classes:", conf)
clf = SVC(kernel='linear')
clf.fit(X.reshape(X.shape[0], -1), Y.reshape(Y.shape[0]))
p(clf,405)
p(clf,55)
p(clf,155)
p(clf,5) | lmEshoo/sensors-anomoly-detection | src/cluster_percentile.py | cluster_percentile.py | py | 1,157 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.qcut",
"line_nu... |
23412397140 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('compras', '0018_auto_20151108_1208'),
]
operations = [
migrations.AlterField(
model_name='compra',
name='fecha_entrega',
field=models.DateTimeField(default=datetime.datetime(2015, 11, 8, 20, 47, 36, 986000, tzinfo=utc), help_text=b'Indique la fecha en la que el proveedor debe entregar el pedido.'),
),
migrations.AlterField(
model_name='compra',
name='fecha_pedido',
field=models.DateTimeField(default=datetime.datetime(2015, 11, 8, 20, 47, 36, 985000, tzinfo=utc), help_text=b'Ingrese la fecha en la que se realiza el pedido.'),
),
migrations.AlterField(
model_name='compra',
name='numero_compra',
field=models.IntegerField(default=1, help_text=b'Este dato se genera automaticamente cada vez que se va crear una Orden de Compra.', verbose_name=b'Numero Orden de Compra'),
),
migrations.AlterField(
model_name='proveedor',
name='ruc',
field=models.CharField(help_text=b'RUC del proveedor.', unique=True, max_length=15, verbose_name=b'RUC'),
),
migrations.AlterField(
model_name='telefonoproveedor',
name='codigo_ciudad_operadora_telefono',
field=models.ForeignKey(default=21, to='bar.CodigoCiudadOperadoraTelefono', help_text=b'Seleccione o ingrese el codigo de ciudad u operadora de telefonia movil.'),
),
migrations.AlterField(
model_name='telefonoproveedor',
name='contacto',
field=models.CharField(default=b'Nombre persona', help_text=b'Nombre de la persona a la cual contactar en este numero.', max_length=100, blank=True),
),
migrations.AlterField(
model_name='telefonoproveedor',
name='interno',
field=models.IntegerField(help_text=b'Ingrese el numero de interno.', blank=True),
),
]
| pmmrpy/SIGB | compras/migrations_1/0019_auto_20151108_1747.py | 0019_auto_20151108_1747.py | py | 2,202 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 16,
"usage_type": "call"
},
{... |
43229153536 | import requests
def buscar_avatar(usuario):
"""
Buscar o avatar de um usuario no GitHub
:param usuario: str com o nome de usuario do github
:return: str com o link do avatar
"""
url = f'https://api.github.com/users/{usuario}'
resp = requests.get(url)
return resp.json()['avatar_url']
if __name__ == '__main__':
print(buscar_avatar('wartrax13'))
| wartrax13/libpythonpro | libpythonpro/github_api.py | github_api.py | py | 386 | python | pt | code | 1 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
}
] |
17888210285 | import numpy
import pytest
from pyminer_algorithms import *
def test_transpose():
# 测试一维矩阵
a = numpy.ones((3,))
assert matrix_transpose(a).shape == (3, 1)
# 测试二维矩阵
a = numpy.ones((3, 4))
assert matrix_transpose(a).shape == (4, 3)
# 测试三维矩阵
a = numpy.ones((3, 4, 5))
with pytest.raises(AssertionError):
matrix_transpose(a)
| pyminer/pyminer | tests/test_algorithms/test_linear_algebra/test_matrix_transpose.py | test_matrix_transpose.py | py | 406 | python | en | code | 77 | github-code | 36 | [
{
"api_name": "numpy.ones",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 18... |
36635815592 | #Caching Mode
from config import api_id, api_key, account_id, site_ip, get_site_status
import requests
url_cache_mode = 'https://my.imperva.com/api/prov/v1/sites/performance/cache-mode'
def modify_cache_mode():
with open('./domain.txt', 'r', encoding="utf-8") as file:
domain_site = get_site_status()
for domain in file:
domain = domain.strip()
site_id = domain_site[domain]
data_site = {
'api_id': api_id,
'api_key': api_key,
'site_id': site_id,
'cache_mode': 'disable',
'dynamic_cache_duration': '',
'aggressive_cache_duration': ''
}
response = requests.post(url_cache_mode, data=data_site)
return response.json()
# modify_cache_mode() | coeus-lei/python | imperva/ModifyCacheMode.py | ModifyCacheMode.py | py | 851 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "config.get_site_status",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "config.api_id",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "config.api_key",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "requests.post",
... |
22635877529 | import pathlib
from .. import utils # noqa, pylint: disable=unused-import
from .. import enclosures
class TemplateEnclosurePlugin(enclosures.EnclosurePlugin): # noqa: V102
"""
The default enclosure plugin, expands a template into the target path.
"""
# Default a hierarchy under the feed title and item title
template = str(
pathlib.Path(
"Feeds",
"{utils.quote_sep(feed_parsed.feed.title).strip()}",
"{utils.quote_sep(item_parsed.title).strip()}{enclosure_path.suffix}",
)
)
def load_config(self):
"""
Pre-process and validate the plugin config prior to linking each enclosure.
"""
self.template = self.config.get("template", self.template)
if not isinstance(self.template, str): # pragma: no cover
raise ValueError(
f"Format `template` for plugin must be a string: {self.template!r}"
)
def __call__(
self,
*args,
**kwargs,
): # pylint: disable=too-many-arguments
"""
Link the feed item enclosure to a target path expanded from a template.
"""
# Maybe find a better template format/engine, perhaps Jinja2?
# We need a templating engine that supports a very rich set of operations
# sufficient, for example, to extract data from `ElementTree` objects. This
# means we need to support execution of arbitrary code. This shouldn't be a
# problem since anyone that can run `$ feedarchiver` can also run `$ python`.
# But still, this has a bad code smell.
# https://python-forum.io/thread-24481.html
return [
eval( # nosec: B307, pylint: disable=eval-used
f"f{self.template!r}",
globals(),
dict(locals(), **kwargs),
)
]
| rpatterson/feed-archiver | src/feedarchiver/enclosures/template.py | template.py | py | 1,888 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 14,
"usage_type": "call"
}
] |
30533093290 | import cv2
import numpy as np
import os
from glob import glob
from tqdm import tqdm
#image = cv2.imread(r'C:\Users\mbarut\Desktop\car detection\vehicle-speed-counting\traffic.jpg')
"""
print(image.shape)
area = np.array([[552,605],[560,760],[1211,652],[1127,590]],np.int32)
color = (255, 0, 0)
thickness = 2
image = cv2.polylines(image, [area],
True, color, thickness)
# Displaying the image
while(1):
cv2.imshow('image', image)
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows()
"""
def create_dir(path):
try:
if not os.path.exists(path):
os.makedirs(path)
except OSError:
print(f"ERROR: creating directory with name {path}")
def save_frame(video_path, gap=5):
name = video_path.split('\\')[-1]
cap = cv2.VideoCapture(video_path)
fps = cap.get(cv2.CAP_PROP_FPS)
idx = 0
gap = int(fps*gap)
#print(f'FPS of the video : {fps}')
os.chdir(r'C:\Users\mbarut\Desktop\dataset')
while True:
ret, frame = cap.read()
if ret == False:
cap.release()
break
if idx == 0:
cv2.imwrite(f"{name}_{idx}.png", frame)
else:
if idx % gap == 0:
cv2.imwrite(f"{name}_{idx}.png", frame)
#print(f"{save_path}/{name}_{idx}.png")
idx += 1
if __name__ == "__main__":
PATH = r'C:\Users\mbarut\Desktop\Eds kamera'
SAVE_PATH = r'C:\Users\mbarut\Desktop'
video_paths = glob(f'{PATH}/*')
save_dir = 'dataset'
save_path = os.path.join(SAVE_PATH, save_dir)
create_dir(save_path)
for path in tqdm(video_paths,total=len(video_paths)):
save_frame(path, gap=5)
| MehmetBarutcu/ObjectDetectionApp | car detection/vehicle-speed-counting/trial.py | trial.py | py | 1,808 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_... |
28832230499 | from MoocletCreationAutomator.secure import MOOCLET_API_TOKEN
import requests
import json
class MoocletConnector:
def __init__(self, token=MOOCLET_API_TOKEN):
self.token = MOOCLET_API_TOKEN
self.url = "https://mooclet.canadacentral.cloudapp.azure.com/engine/api/v1/"
def create_mooclet_object(self, params):
endpoint = "mooclet"
objects = requests.post(
url=self.url + endpoint,
data=params,
headers={'Authorization': f'Token {self.token}'}
)
print(objects.status_code)
if objects.status_code != 201:
print("unable to create mooclet")
print(objects.json())
else:
print(objects.json())
print(objects.json()["id"])
return objects.json()["id"]
def create_version_object(self, params):
endpoint = "version"
objects = requests.post(
url=self.url + endpoint,
data=params,
headers={'Authorization': f'Token {self.token}'}
)
print(objects.status_code)
if objects.status_code != 201:
print(objects.json())
print(f"unable to create version for {params['name']}")
else:
print(objects.json())
return objects.json()["name"]
def create_policy_parameter(self, params):
endpoint = "policyparameters"
objects = requests.post(
url=self.url + endpoint,
data=params,
headers={'Authorization': f'Token {self.token}'}
)
print(objects.status_code)
if objects.status_code != 201:
print(objects.json())
print(f"unable to create policy parameters")
else:
print(objects.json())
def create_variable(self, params):
endpoint = "variable"
objects = requests.post(
url=self.url + endpoint,
data=params,
headers={'Authorization': f'Token {self.token}'}
)
print(objects.status_code)
if objects.status_code != 201:
print(objects.json())
print(f"unable to create variables")
else:
print(objects.json())
| Intelligent-Adaptive-Interventions-Lab/MturkDeploymentAutomater | MoocletCreationAutomator/MoocletConnector.py | MoocletConnector.py | py | 2,227 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "MoocletCreationAutomator.secure.MOOCLET_API_TOKEN",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "MoocletCreationAutomator.secure.MOOCLET_API_TOKEN",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 14,
"usa... |
8503903259 | import tkinter as tk
from tkinter import ttk
from PIL import Image, ImageTk # import pillow library for images
Title_Font = ("Century Gothic", 15)
MARK_IV_Heading_Font = ("Century Gothic", 25)
Headline_Font = ("Century Gothic", 16)
imageMonash= "monash-university-malaysia_2.png"
def popMessage(msg):
pop = tk.Tk()
pop.geometry('60x60')
def quit():
pop.destroy()
pop.wm_title("pop up message")
label = ttk.Label(pop,text=msg, font = ("Century Gothic", 10))
label.grid(row=0, column=0)
button1= ttk.Button(pop, text="Okay", command = quit)
button1.grid(row=1,column=0)
pop.mainloop()
class Mark(tk.Tk):
def __init__(self,*args, **kwargs): # Runs everytime the class mark is called
tk.Tk.__init__(self, *args, **kwargs)
#tk.Tk.iconbitmap(self, default = "markiv.ico")
tk.Tk.wm_title(self, "Marking Software")
container = tk.Frame(self) # Creating a window
container.pack(side="top", fill="both", expand=True) # expand fills entire space of pack
container.grid_rowconfigure(0, weight = 1) # first arg = minimum size
container.grid_columnconfigure(0, weight = 1) # first arg = minimum size
self.frames = {} # creating a dictionary for frames NOT YET IMPLEMENTED
# creating a tuple of frames with their own geometry
for F,geometry in zip( (HomePage, StudentPage, MarkingPage), ('475x400','500x200','500x200')):
page_name = F.__name__
frame = F(parent=container, controller=self)
self.frames[page_name] = (frame,geometry)
frame.config(bg="white")
# assign grid to frame
# sticky == allignment and strech
# sticky to north south east west
# means everything is strecthced to the size of frame
frame.grid(row=0, column = 0, sticky="nsew")
#initialise first page to show
self.show_frame("HomePage")
# Shows only the frame chosen by user
def show_frame(self, page_name):
frame,geometry = self.frames[page_name]
self.update_idletasks()
self.geometry(geometry)
frame.tkraise()
# Class that opens the the first frame
# Inherits all parent classes method and attributes
# Parent class in this case is Mark
class HomePage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
########################################
##### CODE GOES HERE ######
########################################
# create an object and assign to label
# basically an initializer for a label
label1 = tk.Label(self, text = "MARK IV",bg = "white",fg = "black", font = MARK_IV_Heading_Font)
# Calling the label method to output to start frame
label1.grid(column =0,row=8, columnspan=5,sticky = "n")
label2 = tk.Label(self, text = "Monash IT Deparment Marking Software",bg = "white",fg = "black", font = Headline_Font)
label2.grid(column=0,row=9, columnspan=5, sticky="n")
label3 = tk.Label(self, text = "", bg="white")
label3.grid(column=0, row=10)
label4 = tk.Label(self, text = "", bg="white")
label4.grid(column=0, row=14)
# label for unit names and student id
unit_name = tk.Label(self, text = "Unit Name: ",bg = "white",fg = "black", font = Title_Font)
students_no = tk.Label(self, text = "Number of Students: " ,bg = "white",fg = "black", font =Title_Font)
entry_1 = tk.Entry(self) # text box to receive input from user
unit_name.grid(row = 11, column = 0,sticky = "n")
students_no.grid(row = 12, column = 0, sticky = "n")
entry_1.grid(row = 11, column = 0, sticky="e")
radio_1 = tk.Radiobutton(self, text="4 students", variable=1, value=4,fg = "black",bg="white" , font = Title_Font )
radio_2 = tk.Radiobutton(self, text="5 students", variable=1, value=5,fg = "black",bg="white" , font = Title_Font)
radio_1.grid(row=12,column=0, sticky="e")
radio_2.grid(row=13,column=0, sticky="e")
ttk.Style().configure('white/black.TButton', foreground='Black', background='white')
# moves to student page by calling frame PageOne
button1 = ttk.Button(self, text = "SAVE RECORDS", style = 'white/black.TButton',
command =lambda: (controller.show_frame("StudentPage"), popMessage("Added the record!"))) # moves to student page by calling frame PageOne
button1.grid(row=15,column=0, sticky="n")
button2 = ttk.Button(self, text = "EXIT", style = 'white/black.TButton',
command = controller.destroy)
button2.grid(row=15,column=0, sticky="ne")
# creating a canvas to insert an image
self.original = Image.open('monash-university-malaysia.png')
self.image = ImageTk.PhotoImage(self.original)
self.display = tk.Canvas(self, height = 130,width = 450,bd=-1, highlightthickness=1) #resizing canvas to img size
self.display.create_image(0, 0, image=self.image, anchor="n", tags="img")
self.display.grid(row=1 ,sticky="nwes")
self.grid(row=0,column=1)
self.bind("<Configure>", self.resize)
def resize(self, event):
size = (event.width, event.height)
resized = self.original.resize((450,160),Image.ANTIALIAS)
self.image = ImageTk.PhotoImage(resized)
self.display.delete("img")
self.display.create_image(0, 0, image=self.image, anchor="nw", tags="IMG")
class StudentPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
########################################
##### CODE GOES HERE ######
########################################
# create an object and assign to label
# basically an initializer for a label
label = tk.Label(self, text= "Student Information Page",font=MARK_IV_Heading_Font,bg="white")
# Calling the label method to output to first page
label.pack(pady=10, padx=10)
nextButton = ttk.Button(self, text = "Go to marking page",
command = lambda: controller.show_frame("MarkingPage")) # moves to page one by calling fframe StartPage
nextButton.pack()
class MarkingPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
########################################
##### CODE GOES HERE ######
########################################
# create an object and assign to label
# basically an initializer for a label
label = tk.Label(self, text= "Marks allocation page",font=MARK_IV_Heading_Font,bg="white")
# Calling the label method to output to first page
label.pack(pady=10, padx=10)
backButton = ttk.Button(self, text = "Back to student page",
command = lambda: controller.show_frame("StudentPage")) # moves to page one by calling fframe StartPage
backButton.pack()
markingSoftware = Mark()
markingSoftware.mainloop()
| Veinga/FIT-2101 | assignment_1/marking_software.py | marking_software.py | py | 7,332 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tkinter.Tk",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk.Label",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "tkinter.ttk.Button",
"li... |
5048789180 | import os
from flask import Blueprint, flash, redirect, render_template, request, url_for, send_from_directory
from flask_login import current_user, login_required, login_user, logout_user
from auth.methods import AuthMethod
from catalog.methods import CatalogMethod
from category.methods import CategoryMethod
from item.methods import ItemMethod
catalog_bp = Blueprint('catalog_bp', __name__)
@catalog_bp.route('/')
@catalog_bp.route('/catalog')
def index():
# categories = Category.query(name).distinct().order_by(desc(name))
# items = Item.query().all()
# return render_template('index.html', categories=categories, items=items, item_categories=item_categories)
# Load all categories from DB
categories = CategoryMethod.get_all_categories('asc')
# Load all items from DB
items = ItemMethod.get_all_items('asc')
# Load all categories in which the same item exist
item_categories = {}
for item in items:
item_categories[item.get_name()] = CatalogMethod.get_all_categories_of_item_id(item.get_id())
return render_template('index.html',
title='Catalog Application',
subtitle='Python3 + Flask + SQLALchemy',
categories=categories,
item_categories=item_categories)
@catalog_bp.route('/endpoint')
@login_required
def endpoint():
""" Run and display various analytics reports """
categories = CategoryMethod.get_all_categories('asc')
""" All categories is ascendant order """
items = ItemMethod.get_all_items('asc')
""" All items in ascendant order """
# { category_name: [items_name ], ... }
catalog_links_name_by_category = CatalogMethod.get_all_links_names_by_category()
""" All catalog_links with category_name & item_name, grouped by category """
# { item_name: [categories_name ], ... }
catalog_links_name_by_item = CatalogMethod.get_all_links_names_by_item()
""" All catalog_links with category_name & item_name, grouped by item """
# { user_id: [(login_time, logout_time), ...], ... }
login_logout_sessions = AuthMethod.get_all()
""" All login_logout_sessions start/end """
print('login_logout_sessions: ', login_logout_sessions)
# user_name - login_time - logout_time - duration
# login_logout_sessions_name = CatalogMethod.get_all_login_logout_sessions_names()
""" All login_logout_sessions start/end with date-format & time duration """
return render_template('catalog/endpoint.html',
title='EndPoint',
categories=categories,
items=items,
catalog_links_categories=catalog_links_name_by_category,
catalog_links_items=catalog_links_name_by_item,
login_logout_sessions=login_logout_sessions)
@catalog_bp.route('/favicon')
@catalog_bp.route('/favicon.ico')
def favicon():
ico_dir = os.path.join(os.path.dirname(os.getcwd()), 'userApp', 'application', 'static', 'dist', 'img', 'ico')
print('ico_dir: ', ico_dir)
return send_from_directory(ico_dir, 'favicon.ico')
@catalog_bp.errorhandler(400)
def key_error(e):
return render_template('catalog/400.html', error=e), 400
@catalog_bp.errorhandler(500)
def internal_server_error(e):
return render_template('catalog/generic.html', error=e), 500
@catalog_bp.errorhandler(Exception)
def unhandled_exception(e):
return render_template('catalog/generic.html', error=e, exception=Exception), 500
| or73/Catalog_App | application/modules/catalog/views.py | views.py | py | 3,583 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "category.methods.CategoryMethod.get_all_categories",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "category.methods.CategoryMethod",
"line_number": 22,
"usage_type": "na... |
15212822365 | #import libraries
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from dash import Dash, dcc, html, Input, Output
#Connect to Cloud SQL using the Cloud SQL Python connector
#define your app object
app = Dash(__name__)
#--Import and clean data(importing csv into pandas)
#df = pd.read_csv('bees_data.csv') ?
#To get data from Cloud Sql use this file
df = pd.read_csv('~/development/gcp/dashboards/bees/bees_data.csv')
#group data by pctOfColonies
df = df.groupby(['state', 'ansi', 'affectedBy', 'year', 'stateCode'])[['pctOfColonies']].mean()
df.reset_index(inplace=True)
print(df[:5])
#App layout
app.layout = html.Div([
html.H1("Web Application Dashboards with Dash", style={'text-align': 'center'}),
dcc.Dropdown(id="slct_year",
options=[
{"label": "2015", "value": 2015},
{"label": "2016", "value": 2016},
{"label": "2017", "value": 2017},
{"label": "2018", "value": 2018},
],
multi=False,
value=2015,
style={'width': "40%"}
),
html.Div(id='output_container', children=[]),
html.Br(),
dcc.Graph(id='my_bee_map', figure={})
])
#Connect the Plotly graphs with Dash Components
@app.callback(
[Output(component_id='output_container', component_property='children'),
Output(component_id='my_bee_map', component_property='figure')],
[Input(component_id='slct_year', component_property='value')]
)
def update_graph(option_slctd):
print(option_slctd)
print(type(option_slctd))
container = "The year chose by user was: {}".format(option_slctd)
dff = df.copy()
dff = dff[dff["year"] == option_slctd]
dff = dff[dff["affectedBy"] == "Varroa_mites"]
#Plotly Express
fig = px.choropleth(
data_frame = dff,
locationmode='USA-states',
locations='stateCode',
scope="usa",
color='pctOfColonies',
hover_data=['state', 'pctOfColonies'],
color_continuous_scale=px.colors.sequential.YlOrRd,
labels={'pctOfColonies': '% of Bee Colonies'},
template='plotly_dark'
)
return container, fig
if __name__ == '__main__':
app.run_server(debug=False)
| RichardLadson/bees | bees.py | bees.py | py | 2,259 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dash.Dash",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "dash.html.Div",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "dash.html",
"line_number"... |
12607877643 | # 本地Chrome浏览器设置方法
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup as bs
# from selenium.webdriver.chrome.options import Options # 从options模块中调用Options类
from PIL import Image
from aip import AipOcr
import time
import csv
import os
import requests
import pickle
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'
'AppleWebKit/537.36 (KHTML, like Gecko)'
'Chrome/88.0.4321.0 Safari/537.36 Edg/88.0.702.0'}
EDGE = {"browserName": "MicrosoftEdge",
"version": "",
"platform": "WINDOWS",
# 关键是下面这个
"ms:edgeOptions": {
'extensions': [],
'args': [
# '--headless',
'--disable-gpu',
]}
}
# def cookies_read():
# cookies_txt = open('cookies.txt', 'r')
# cookies_dict = json.loads(cookies_txt.read())
# cookies = requests.utils.cookiejar_from_dict(cookies_dict)
# return (cookies)
def getCaptcha():
APP_ID = '23602828'
API_KEY = '8y5HIlvOUmGGr3Y8Pm7uCD4M'
SECRET_KEY = 'SRYgLRj9aq2sqGx9GUqMsUOU2QzNlV4D'
client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
with open('cap.jpg', 'rb') as f:
img = f.read()
res = client.basicAccurate(img)
cap = ''
if 'words_result' in res.keys():
for item in res['words_result']:
cap += str(item['words'])
return cap
def printCaptcha():
driver.save_screenshot('screenshot.png')
img = driver.find_element_by_tag_name('img')
location = img.location
size = img.size
rectangle = (int(location['x']), int(location['y']), int(
location['x'] + size['width']), int(location['y'] + size['height']))
i = Image.open('screenshot.png')
frame4 = i.crop(rectangle)
frame4 = frame4.convert('RGB')
frame4.save('cap.jpg')
def login():
username = driver.find_element_by_id('username')
username.send_keys("1815200045")
# username.send_keys(input('Username:'))
pwd = driver.find_element_by_id('password')
pwd.send_keys("weishenme123")
# pwd.send_keys(input('Password:'))
printCaptcha()
captcha = driver.find_element_by_id('captcha')
captcha.send_keys(getCaptcha())
submit = driver.find_element_by_class_name('btn-submit')
submit.click()
try:
# 登录成功后跳转
WebDriverWait(driver, 20, 0.5).until(
EC.presence_of_element_located((By.ID, 'layout_63')))
driver.find_element_by_id('layout_63').click()
print('Login success!')
except:
driver.quit()
exit("Time Limit Exceed,Please Retry!")
# driver.get('http://jwxt.gzhu.edu.cn/sso/lyiotlogin')
def toScorePage():
""" try:
# 登录成功后跳转
WebDriverWait(driver, 20, 0.5).until(
EC.presence_of_element_located((By.ID, 'layout_63')))
driver.find_element_by_id('layout_63').click()
print('Login success!')
except:
driver.quit()
exit("Time Limit Exceed,Please Retry!") """
# 打开教务系统新标签页
windows = driver.window_handles
driver.switch_to.window(windows[-1]) # To new Tag
driver.find_element_by_xpath(
"/html/body/div[3]/div/nav/ul/li[4]/a").click()
# 使用了重复的 id 属性,需要使用完整 xpath
driver.find_element_by_xpath(
"/html/body/div[3]/div/nav/ul/li[4]/ul/li[4]/a").click()
# 打开查询成绩新标签页
windows = driver.window_handles
driver.switch_to.window(windows[-1]) # To new Tag
pickle.dump(driver.get_cookies(), open("Scores_Cookies.pkl", "wb"))
print(driver.get_cookies())
# 非 <select>元素,xpath定位,选择学年为 “1.All 2.2021-2022 3.2020-2021 4.2019-2020 5. 2018-2019”
driver.find_element_by_xpath("//div[@id='xnm_chosen']/a/span").click()
driver.find_element_by_xpath(
f"//div[@id='xnm_chosen']/div/ul/li[{year}]").click()
# 非 <select>元素,xpath定位,选择学期为 1.全部 2.1 3.2
driver.find_element_by_xpath("//div[@id='xqm_chosen']/a/span").click()
driver.find_element_by_xpath(
f"//div[@id='xqm_chosen']/div/ul/li[{term}]").click()
# <select> 元素操作方法,修改显示成绩信息的条数
sel = Select(driver.find_element_by_xpath(
"//*[@id='pager_center']/table/tbody/tr/td[8]/select"))
sel.select_by_value('100')
time.sleep(0.5)
driver.find_element_by_id('search_go').click() # 点击查询按钮
time.sleep(1) # wait records display
save()
def save():
li = []
soup = bs(driver.page_source, 'html.parser')
items = soup.find_all('tr', class_='ui-widget-content jqgrow ui-row-ltr')
for item in items:
i = item.find_all('td')
li.append([i[1].text, i[2].text, i[4].text,
i[6].text, i[7].text, i[9].text])
# scores = text.split('\n')
# for score in scores:
# item = score.split()
# li.append([item[1],item[2],item[4],item[6],item[7],item[8]])
with open('scores.csv', 'w', newline='', encoding='utf-8') as c:
writer = csv.writer(c, dialect='excel')
writer.writerow(['学年', '学期', '课程', '学分', '成绩', '绩点'])
for i in li:
writer.writerow(i)
text = driver.find_element_by_xpath("//table[@id='tabGrid']/tbody").text
with open('scores.txt', 'w', encoding='utf-8') as f:
f.write(text)
if __name__ == '__main__':
while True:
year = int(input(
"Choose academic year:1.All 2.2021-2022 3.2020-2021 4.2019-2020 5.2018-2019: "))
ch = ['', '2021', '2020', '2019', '2018']
xnm = ch[year - 1]
if year in [2, 3, 4, 5]:
year += 6
break
elif year == 1:
break
else:
print("Error choice, please re-input.")
while True:
term = int(input("Choose term:1.All 2.First term 3.Second term: "))
choice = ['', '3', '12']
if term in [1, 2, 3]:
xqm = choice[term - 1]
break
else:
print("Error choose, please re-input.")
params = (
('doType', 'query'),
('gnmkdm', 'N305005'),
('su', '1815200045'),
)
data = {
'xnm': xnm,
'xqm': xqm,
'_search': 'false',
'nd': round(time.time() * 1000),
'queryModel.showCount': '100',
'queryModel.currentPage': '1',
'queryModel.sortName': '',
'queryModel.sortOrder': 'asc',
'time': '1'
}
try:
My_cookies = pickle.load(open("D:\\MyCode\\Code\\Python\\queryMyScores\\Scores_Cookies.pkl", "rb"))
cookies = {'SF_cookie_18': My_cookies[0]['value'], 'JSESSIONID': My_cookies[1]['value']}
response = requests.post('http://jwxt.gzhu.edu.cn/jwglxt/cjcx/cjcx_cxDgXscj.html', params=params,
cookies=cookies, data=data, verify=False)
res = response.json()
for i in res['items']:
print(f"{i['kcmc']}:{i['bfzcj']}")
except:
# driver = webdriver.Edge()
driver = webdriver.Edge(
capabilities=EDGE, executable_path=r"C:\Program Files (x86)\Microsoft\Edge\Application\msedgedriver.exe")
driver.maximize_window()
driver.get('https://cas.gzhu.edu.cn/cas_server/login')
# driver.set_window_size(1920,1080)
login()
toScorePage()
driver.quit() # 关闭浏览器
os.remove('cap.jpg')
os.remove('screenshot.png')
| Kenny3Shen/CodeShen | Code/Python/queryMyScores/queryScores.py | queryScores.py | py | 7,792 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aip.AipOcr",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.wait.W... |
70539349223 | import torch
import operator
from sema2insts import Sema2Insts, expr2graph
import json
import random
from time import time
import z3
from z3_exprs import serialize_expr
from synth import synthesize, sigs, check_synth_batched
llvm_insts = [inst for inst in sigs.keys() if inst.startswith('llvm')]
inst_pool = []
with open('instantiated-insts.json') as f:
for inst, imm8 in json.load(f):
inst_pool.append((inst, imm8))
num_insts = len(inst_pool)
model = Sema2Insts(num_insts)
model.load_state_dict(torch.load('sema2insts.model'))
model.eval()
bvmax = lambda a,b : z3.If(a>=b, a, b)
x, y, z = z3.BitVecs('x y z', 32)
liveins = [('x', 32), ('y', 32)]
def p19_impl(x, m, k):
o1 = x >> k;
o2 = x ^ o1;
o3 = o2 & m;
o4 = o3 << k;
o5 = o4 ^ o3;
return o5 ^ x;
one = z3.BitVecVal(1, 32)
zero = z3.BitVecVal(0, 32)
p01 = x & (x-1) # 0.496
p02 = x & (x+1) # 0.496
p03 = x & (-x) # 0.4899
p04 = x ^ (x-1) # 0.728
p05 = x | (x-1) # 1.068
p06 = x | (x+1) # 0.613
p07 = (~x) & (x+1) # 0.57
p08 = (~x) & (x-1) # 0.64
p09 = ((x >> 31) ^ x) - (x >> 31) # 2.6092
p10 = z3.If((x & y) <= (x ^ y), one, zero) # 1.24
p11 = z3.If(x & (~y) > y, one, zero) # no solution
p12 = z3.If(x & (~y) <= y, one, zero) # 4.07898
p13 = (x >>31) | (-x) >> 31 # 0.36
p14 = (x&y) + ((x^y) >> 1) # no solution
p15 = (x|y) - ((x^y) >> 1) # no solution
p16 = (x^y) & (-z3.If(z3.UGE(x,y), one, zero)) ^ y # no solution
p17 = (((x-1) | x) + 1) & x # 1.32
p18 = z3.If(z3.And(((x-1)&x)==0, x!=0), one, zero) # no solution
# p19 = timeout?
o1 = -x
o2 = x & o1
o3 = x + o2
o4 = x ^ o2
o5 = o4 >> 2
o6 = o5 / o2
p20 = o3 | o6 # no solution
#target = bvmax(x, y) * y
liveins = [('x', 32)]
#liveins = [('x', 32), ('y', 32)]
liveins = [('x', 32), ('y', 32), ('z', 32)]
target = p19_impl(x, y, z)
target = p20
target = z3.simplify(target)
#ranges = {'z': (0, 32), 'x': (-10,10) }
#counter_examples = {'x': [0] }
counter_examples = {'x': [1<<3, 0b101 << 3, 1<<4, 1<<31, 1<<15, 1<<11, 0b11 << 23, 0b101 << 15] }
counter_examples = {}
counter_examples = {'z': list(range(32)) }
target_serialized = serialize_expr(target)
g, g_inv, ops, params, _ = expr2graph(target_serialized)
inst_probs = model(g, g_inv, ops, params).softmax(dim=0)
_, ids = inst_probs.topk(30)
insts = [inst_pool[i] for i in ids]
insts = [(inst, None) for inst in sigs if '32' in inst and 'llvm' in inst and 'Trunc' not in inst and
'Ext' not in inst
]
print(insts)
random.shuffle(insts)
begin = time()
out = synthesize(insts, target, liveins, timeout=60 * 60, num_levels=7, test_inputs=counter_examples)
end = time()
print(out)
print('Time elapsed:', end-begin)
| ychen306/upgraded-succotash | test-synth.py | test-synth.py | py | 2,618 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "synth.sigs.keys",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "synth.sigs",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sema2insts.Sema2Insts",
"lin... |
21955928918 | from RepSys import Error, config, layout
from RepSys.svn import SVN
from RepSys.util import execcmd
from RepSys.util import get_output_exec
from io import StringIO
import sys
import os
import os.path
import re
import time
import locale
import glob
import tempfile
import shutil
import subprocess
locale.setlocale(locale.LC_ALL, "C")
def getrelease(pkgdirurl, rev=None, macros=[], exported=None, create=False):
"""Tries to obtain the version-release of the package for a
yet-not-markrelease revision of the package.
Is here where things should be changed if "automatic release increasing"
will be used.
"""
svn = SVN()
pkgcurrenturl = os.path.join(pkgdirurl, "current")
specurl = os.path.join(pkgcurrenturl, "SPECS")
srpmurl = os.path.join(pkgcurrenturl, "SRPMS")
if not create:
if exported is None:
tmpdir = tempfile.mktemp()
svn.export(specurl, tmpdir, rev=rev)
else:
tmpdir = os.path.join(exported, "SPECS")
try:
found = glob.glob(os.path.join(tmpdir, "*.spec"))
if not found:
raise Error("no .spec file found inside %s" % specurl)
specpath = found[0]
options = [("--define", expr) for expr in macros]
command = ["rpm", "-q", "--qf", "%{EPOCH}:%{VERSION}-%{RELEASE}\n",
"--specfile", specpath]
command.extend(options)
status, output = execcmd(*command)
releases = output.split()
try:
epoch, vr = releases[0].split(":", 1)
version, release = vr.split("-", 1)
except ValueError:
raise Error("Invalid command output: %s: %s" % \
(command, output))
#XXX check if this is the right way:
if epoch == "(none)":
ev = version
else:
ev = epoch + ":" + version
return ev, release
finally:
if exported is None and os.path.isdir(tmpdir):
shutil.rmtree(tmpdir)
else:
if exported is None:
tmpdir = tempfile.mktemp()
svn.export(specurl, tmpdir, rev=rev)
else:
tmpdir = os.path.join(exported, "SRPMS")
try:
found = glob.glob(os.path.join(srpmurl, "*.src.rpm"))
if not found:
raise Error("no .src.rpm file found inside %s" % srpmurl)
srpmpath = found[0]
options = [("--define", expr) for expr in macros]
command = ["rpm", "-q", "--qf", "%{EPOCH}:%{VERSION}-%{RELEASE}\n",
"--specfile", specpath]
command.extend(options)
status, output = execcmd(*command)
releases = output.split()
try:
epoch, vr = releases[0].split(":", 1)
version, release = vr.split("-", 1)
except ValueError:
raise Error("Invalid command output: %s: %s" % \
(command, output))
#XXX check if this is the right way:
if epoch == "(none)":
ev = version
else:
ev = epoch + ":" + version
return ev, release
finally:
if exported is None and os.path.isdir(tmpdir):
shutil.rmtree(tmpdir)
class _Revision:
lines = []
date = None
raw_date = None
revision = None
author_name = None
author_email = None
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
lines = repr(self.lines)[:30] + "...]"
line = "<_Revision %d author=%r date=%r lines=%s>" % \
(self.revision, self.author, self.date, lines)
return line
class _Release(_Revision):
version = None
release = None
revisions = []
release_revisions = []
authors = []
visible = False
def __init__(self, **kwargs):
self.revisions = []
_Revision.__init__(self, **kwargs)
def __repr__(self):
line = "<_Release v=%s r=%s revs=%r>" % \
(self.version, self.release, self.revisions)
return line
unescaped_macro_pat = re.compile(r"(^|[^%])%([^%])")
def escape_macros(text):
escaped = unescaped_macro_pat.sub("\\1%%\\2", text)
return escaped
def format_lines(lines):
first = 1
entrylines = []
perexpr = re.compile(r"([^%])%([^%])")
for line in lines:
if line:
line = escape_macros(line)
if first:
first = 0
line = line.lstrip()
if line[0] != "-":
nextline = "- " + line
else:
nextline = line
elif line[0] != " " and line[0] != "-":
nextline = " " + line
else:
nextline = line
if nextline not in entrylines:
entrylines.append(nextline)
return entrylines
class _Author:
name = None
email = None
revisions = None
visible = False
def group_releases_by_author(releases):
allauthors = []
grouped = []
for release in releases:
# group revisions of the release by author
authors = {}
latest = None
for revision in release.revisions:
authors.setdefault(revision.author, []).append(revision)
# create _Authors and sort them by their latest revisions
decorated = []
for authorname, revs in authors.items():
author = _Author()
author.name = revs[0].author_name
author.email = revs[0].author_email
author.revisions = revs
# #41117: mark those authors without visible messages
author.visible = bool(sum(len(rev.lines) for rev in revs))
revlatest = author.revisions[0]
# keep the latest revision even for completely invisible
# authors (below)
if latest is None or revlatest.revision > latest.revision:
latest = revlatest
if not author.visible:
# only sort those visible authors, invisible ones are used
# only in "latest"
continue
decorated.append((revlatest.revision, author))
decorated.sort(reverse=1)
if release.visible:
release.authors = [t[1] for t in decorated]
firstrel, release.authors = release.authors[0], release.authors[1:]
release.author_name = firstrel.name
release.author_email = firstrel.email
release.release_revisions = firstrel.revisions
else:
# we don't care about other possible authors in completely
# invisible releases
firstrev = release.revisions[0]
release.author_name = firstrev.author_name
release.author_email = firstrev.author_email
release.raw_date = firstrev.raw_date
release.date = firstrev.date
release.date = latest.date
release.raw_date = latest.raw_date
release.revision = latest.revision
grouped.append(release)
return grouped
def group_revisions_by_author(currentlog):
revisions = []
last_author = None
for entry in currentlog:
revision = _Revision()
revision.lines = format_lines(entry.lines)
revision.raw_date = entry.date
revision.date = parse_raw_date(entry.date)
revision.revision = entry.revision
if entry.author == last_author:
revisions[-1].revisions.append(revision)
else:
author = _Author()
author.name, author.email = get_author_name(entry.author)
author.revisions = [revision]
revisions.append(author)
last_author = entry.author
return revisions
emailpat = re.compile("(?P<name>.*?)\s*<(?P<email>.*?)>")
usermap = {}
def get_author_name(author):
found = emailpat.match(config.get("users", author, author))
gold = emailpat.match(usermap.get(author,""))
name = ((found and found.group("name")) or (gold and gold.group("name")) or author)
email = ((found and found.group("email")) or (gold and gold.group("email")) or author+"@mageia.org")
return name, email
def parse_raw_date(rawdate):
return time.strftime("%a %b %d %Y", rawdate)
def filter_log_lines(lines):
# Lines in commit messages beginning with CLOG will be the only shown
# in the changelog. These lines will have the CLOG token and blanks
# stripped from the beginning.
onlylines = None
clogstr = config.get("log", "unignore-string")
if clogstr:
clogre = re.compile(r"(^%s[^ \t]?[ \t])" % clogstr)
onlylines = [clogre.sub("", line)
for line in lines if line.startswith(clogstr)]
if onlylines:
filtered = onlylines
else:
# Lines in commit messages containing SILENT at any position will be
# skipped; commits with their log messages beggining with SILENT in the
# first positionj of the first line will have all lines ignored.
ignstr = config.get("log", "ignore-string", "SILENT")
if len(lines) and lines[0].startswith(ignstr):
return []
filtered = [line for line in lines if ignstr not in line]
return filtered
def make_release(author=None, revision=None, date=None, lines=None,
entries=[], released=True, version=None, release=None):
rel = _Release()
rel.author = author
if author:
rel.author_name, rel.author_email = get_author_name(author)
rel.revision = revision
rel.version = version
rel.release = release
rel.date = (date and parse_raw_date(date)) or None
rel.lines = lines
rel.released = released
rel.visible = False
for entry in entries:
lines = filter_log_lines(entry.lines)
revision = _Revision()
revision.revision = entry.revision
revision.lines = format_lines(lines)
if revision.lines:
rel.visible = True
revision.date = parse_raw_date(entry.date)
revision.raw_date = entry.date
revision.author = entry.author
(revision.author_name, revision.author_email) = \
get_author_name(entry.author)
rel.revisions.append(revision)
return rel
def dump_file(releases, currentlog=None, template=None):
''' Template cheetah suppressed and replaced by hard code. The template is selectable with the "name" in "template" section of config file.'''
templname = template or config.get("template", "name",
"default")
draft=""
releases_author = group_releases_by_author(releases)
revisions_author = group_revisions_by_author(currentlog)
if templname == 'revno':
''' a specific template'''
for rel in releases_author:
if not rel.released:
draft = " (not released yet)\n"
draft = draft + "* {0} {1} <{2}> {3}-{4}\n\n".format(rel.date, rel.author_name, rel.author_email, rel.version, rel.release)
for rev in rel.release_revisions:
first=True
spaces = " " * (len(str(rev.revision)) +3)
for line in rev.lines:
if first:
draft = draft +"[{0}] {1}\n".format(rev.revision, line)
first = False
else:
draft = draft + spaces + line + "\n"
for author in rel.authors:
if not author.visible:
continue
draft += "+ {0} <{1}>\n".format(author.name, author.email)
for rev in author.revisions:
first=True
spaces = " " * (len(str(rev.revision)) + 3)
for line in rev.lines:
if first:
draft = draft +"[{0}] {1}\n".format(rev.revision, line)
first = False
else:
draft = draft + spaces + line + "\n"
if rel is not releases_author[-1]:
draft += "\n"
else:
# default template
if not releases_author[-1].visible:
releases_author = releases_author[:-1]
for rel in releases_author:
if not rel.released:
unreleased = " (not released yet)\n"
else:
unreleased = ""
draft = draft + "* {0} {1} <{2}> {3}-{4}\n{5}+ Revision: {6}\n".format(rel.date, rel.author_name, rel.author_email, rel.version, rel.release, unreleased, rel.revision)
if not rel.visible:
draft = draft + "+ rebuild (emptylog)\n"
for rev in rel.release_revisions:
for line in rev.lines:
draft = draft + line + "\n"
for author in rel.authors:
if not author.visible:
continue
draft += "+ {0} <{1}>\n".format(author.name, author.email)
for rev in author.revisions:
for line in rev.lines:
draft = draft + line + "\n"
if rel is not releases_author[-1]:
draft += "\n"
return draft
class InvalidEntryError(Exception):
pass
def parse_repsys_entry(revlog):
# parse entries in the format:
# %repsys <operation>
# key: value
# ..
# <newline>
# <comments>
#
if len(revlog.lines) == 0 or not revlog.lines[0].startswith("%repsys"):
raise InvalidEntryError
try:
data = {"operation" : revlog.lines[0].split()[1]}
except IndexError:
raise InvalidEntryError
for line in revlog.lines[1:]:
if not line:
break
try:
key, value = line.split(":", 1)
except ValueError:
raise InvalidEntryError
data[key.strip().lower()] = value.strip() # ???
return data
def get_revision_offset():
try:
revoffset = config.getint("log", "revision-offset", 0)
except (ValueError, TypeError):
raise Error("Invalid revision-offset number in configuration "
"file(s).")
return revoffset or 0
oldmsgpat = re.compile(
r"Copying release (?P<rel>[^\s]+) to (?P<dir>[^\s]+) directory\.")
def parse_markrelease_log(relentry):
if not ((relentry.lines and oldmsgpat.match(relentry.lines[0]) \
or parse_repsys_entry(relentry))):
raise InvalidEntryError
from_rev = None
path = None
for changed in relentry.changed:
if changed["action"] == "A" and changed["from_rev"]:
from_rev = changed["from_rev"]
path = changed["path"]
break
else:
raise InvalidEntryError
# get the version and release from the names in the path, do not relay
# on log messages
version, release = path.rsplit(os.path.sep, 3)[-2:]
return version, release, from_rev
def svn2rpm(pkgdirurl, rev=None, size=None, submit=False,
template=None, macros=[], exported=None, create=False):
concat = config.get("log", "concat", "").split()
revoffset = get_revision_offset()
svn = SVN()
pkgreleasesurl = layout.checkout_url(pkgdirurl, releases=True)
pkgcurrenturl = layout.checkout_url(pkgdirurl)
releaseslog = svn.log(pkgreleasesurl, noerror=1)
currentlog = svn.log(pkgcurrenturl, limit=size, start=rev,
end=revoffset)
# sort releases by copyfrom-revision, so that markreleases for same
# revisions won't look empty
releasesdata = []
if releaseslog:
for relentry in releaseslog[::-1]:
try:
(version, release, relrevision) = \
parse_markrelease_log(relentry)
except InvalidEntryError:
continue
releasesdata.append((relrevision, -relentry.revision, relentry,
version, release))
releasesdata.sort()
# collect valid releases using the versions provided by the changes and
# the packages
prevrevision = 0
releases = []
for (relrevision, dummy, relentry, version, release) in releasesdata:
if prevrevision == relrevision:
# ignore older markrelease of the same revision, since they
# will have no history
continue
entries = [entry for entry in currentlog
if relrevision >= entry.revision and
(prevrevision < entry.revision)]
if not entries:
#XXX probably a forced release, without commits in current/,
# check if this is the right behavior
sys.stderr.write("warning: skipping (possible) release "
"%s-%s@%s, no commits since previous markrelease (r%r)\n" %
(version, release, relrevision, prevrevision))
continue
release = make_release(author=relentry.author,
revision=relentry.revision, date=relentry.date,
lines=relentry.lines, entries=entries,
version=version, release=release)
releases.append(release)
prevrevision = relrevision
# look for commits that have been not submitted (released) yet
# this is done by getting all log entries newer (greater revision no.)
# than releasesdata[-1] (in the case it exists)
if releasesdata:
latest_revision = releasesdata[-1][0] # the latest copied rev
else:
latest_revision = 0
notsubmitted = [entry for entry in currentlog
if entry.revision > latest_revision]
if notsubmitted:
# if they are not submitted yet, what we have to do is to add
# a release/version number from getrelease()
version, release = getrelease(pkgdirurl, macros=macros,
exported=exported, create=create)
toprelease = make_release(entries=notsubmitted, released=False,
version=version, release=release)
releases.append(toprelease)
data = dump_file(releases[::-1], currentlog=currentlog, template=template)
return data
def _split_changelog(stream):
current = None
count = 0
def finish(entry):
lines = entry[2]
# strip newlines at the end
for i in range(len(lines)-1, -1, -1):
if lines[i] != "\n":
break
del lines[i]
return entry
for line in stream:
if line.startswith("*"):
if current:
yield finish(current)
fields = line.split()
rawdate = " ".join(fields[:5])
try:
date = time.strptime(rawdate, "* %a %b %d %Y")
except ValueError as e:
raise Error("failed to parse spec changelog: %s" % e)
curlines = [line]
current = (date, count, curlines)
# count used to ensure stable sorting when changelog entries
# have the same date, otherwise it would also compare the
# changelog lines
count -= 1
elif current:
curlines.append(line)
else:
pass # not good, but ignore
if current:
yield finish(current)
def sort_changelog(stream):
entries = _split_changelog(stream)
log = StringIO()
for time, count, elines in sorted(entries, reverse=True):
log.writelines(elines)
log.write("\n")
return log
def split_spec_changelog(stream):
chlog = StringIO()
spec = StringIO()
found = 0
visible = 0
for line in stream:
if line.startswith("%changelog"):
found = 1
elif not found:
spec.write(line)
elif found:
if line.strip():
visible = 1
chlog.write(line)
elif line.startswith("%"):
found = 0
spec.write(line)
spec.seek(0)
if not visible:
# when there are only blanks in the changelog, make it empty
chlog = StringIO()
return spec, chlog
def get_old_log(pkgdirurl):
chlog = StringIO()
oldurl = config.get("log", "oldurl")
if oldurl:
svn = SVN(url=oldurl)
tmpdir = tempfile.mktemp()
try:
if oldurl == '.' or oldurl.startswith('./'):
pkgoldurl = os.path.join(pkgdirurl, oldurl)
else:
pkgname = layout.package_name(pkgdirurl)
pkgoldurl = os.path.join(svn.url, pkgname)
try:
# we're using HEAD here because fixes in misc/ (oldurl) may
# be newer than packages' last changed revision.
svn.export(pkgoldurl, tmpdir)
except Error:
pass
else:
logfile = os.path.join(tmpdir, "log")
if os.path.isfile(logfile):
with open(logfile, 'r', encoding = 'utf-8') as lf:
chlog.write("\n") # TODO needed?
log = lf.read()
log = escape_macros(log)
chlog.write(log)
finally:
if os.path.isdir(tmpdir):
shutil.rmtree(tmpdir)
chlog.seek(0)
return chlog
from html.parser import HTMLParser
from urllib.request import urlopen
class UserTagParser(HTMLParser):
li = False
ahref = False
userpage = None
namepat = re.compile("(?P<name>.*?)\s*\((?P<user>.*?)\)")
usermap = {}
usermapfile = None
def __init__(self, url=None, defaultmail=None, *cmd, **kwargs):
HTMLParser.__init__(self, *cmd, **kwargs)
self.url = url or "http://people.mageia.org/u/"
self.defaultmail = defaultmail or "mageia.org"
def handle_starttag(self, tag, attrs):
if tag == "li":
self.li = True
if self.li and tag == "a":
for att in attrs:
if att[0] == "href":
self.ahref = True
self.userpage = att[1]
def handle_endtag(self, tag):
if self.li and tag == "a":
self.ahref = False
self.userpage = None
if tag == "li":
self.li = False
def handle_data(self, data):
if self.li and self.ahref:
found = self.namepat.match(data)
if found:
user = found.group("user")
name = found.group("name")
if user and name and user+".html" == self.userpage:
self.usermap[user] = "%s <%s@%s>" % (name, user, self.defaultmail)
def add_missing_usermaps(self):
# This user map is from 2013-08-24, so it's rather dated, but some
# users seem to have been removed since, resulting in svn username
# conversion failing due to not finding username in usermap file
f = urlopen("http://gitweb.mageia.org/software/infrastructure/svn-git-migration/plain/metadata/mageia-user-map.txt")
for user in f.read().decode("UTF-8").splitlines():
username, namemail = user.split(" = ")
if username not in self.usermap:
self.usermap[username] = namemail
f.close()
def get_user_map(self):
f = urlopen(self.url)
userhtml = f.read().decode("UTF-8")
f.close()
self.feed(userhtml)
self.add_missing_usermaps()
return self.usermap
def get_user_map_file(self):
if not self.usermap:
self.get_user_map()
self.usermapfile = tempfile.mkstemp(suffix=".txt", prefix="usermap")
f = open(self.usermapfile[0], "w", encoding="UTF-8")
f.writelines("%s = %s\n" % user for user in sorted(self.usermap.items()))
f.close()
return self.usermapfile[1]
def cleanup(self):
if os.path.exists(self.usermapfile[1]):
os.unlink(self.usermapfile[1])
def _map_user_names():
if not usermap:
parser = UserTagParser()
usermap.update(parser.get_user_map())
def get_changelog(pkgdirurl, another=None, svn=True, rev=None, size=None,
submit=False, sort=False, template=None, macros=[], exported=None,
oldlog=False, create=False, fullnames=False):
"""Generates the changelog for a given package URL
@another: a stream with the contents of a changelog to be merged with
the one generated
@svn: enable changelog from svn
@rev: generate the changelog with the changes up to the given
revision
@size: the number of revisions to be used (as in svn log --limit)
@submit: defines whether the latest unreleased log entries should have
the version parsed from the spec file
@sort: should changelog entries be reparsed and sorted after appending
the oldlog?
@template: the path to the cheetah template used to generate the
changelog from svn
@macros: a list of tuples containing macros to be defined when
parsing the version in the changelog
@exported: the path of a directory containing an already existing
checkout of the package, so that the spec file can be
parsed from there
@oldlog: if set it will try to append the old changelog file defined
in oldurl in repsys.conf
@create: if set, will use rpm -qp rpm instead of --specfile to get release number
"""
newlog = StringIO()
if svn:
if fullnames:
if not usermap:
_map_user_names()
rawsvnlog = svn2rpm(pkgdirurl, rev=rev, size=size, submit=submit,
template=template, macros=macros, exported=exported, create=create)
newlog.write(rawsvnlog)
if another:
newlog.writelines(another)
if oldlog:
newlog.writelines(get_old_log(pkgdirurl))
if sort:
newlog.seek(0)
newlog = sort_changelog(newlog)
newlog.seek(0)
return newlog
def specfile_svn2rpm(pkgdirurl, specfile, rev=None, size=None,
submit=False, sort=False, template=None, macros=[], exported=None, create=False, fullnames=False):
with open(specfile, encoding = 'utf-8') as fi:
spec, oldchlog = split_spec_changelog(fi)
another = None
if config.getbool("log", "merge-spec", False):
another = oldchlog
sort = sort or config.getbool("log", "sort", False)
if fullnames:
_map_user_names()
chlog = get_changelog(pkgdirurl, another=another, rev=rev, size=size,
submit=submit, sort=sort, template=template, macros=macros,
exported=exported, oldlog=True, create=create)
with open(specfile, "w", encoding='utf-8') as fo:
fo.writelines(spec)
fo.write("\n%changelog\n")
fo.writelines(chlog)
if __name__ == "__main__":
l = svn2rpm(sys.argv[1], create=True)
print(l)
# vim:et:ts=4:sw=4
| DrakXtools/repsys | RepSys/log.py | log.py | py | 27,147 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "locale.setlocale",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "locale.LC_ALL",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "RepSys.svn.SVN",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.join",
... |
15883229551 | import time
import random
import json
import hashlib
import requests
import mysql.connector as mc
from mysql.connector import Error as mce
import sys
my_key='your key from API'
my_secret='your secret from API'
rand_prefix = str(random.randint(100000,999999))
now = str(int(time.time()))
method_name = 'problemset.problems'
method_param=[]
hash_in = rand_prefix+ '/' + method_name + '?' + f'apiKey={my_key}&time={now}' + '#' + my_secret
hash_code = hashlib.sha512(str(hash_in).encode('utf-8')).hexdigest()
url = f'https://codeforces.com/api/{method_name}?apiKey={my_key}&time={now}&apiSig={rand_prefix}{hash_code}'
res = requests.get(url)
j_dict = json.loads(res.text)
if j_dict['status']=='OK':
tag_set=set()
for prob in j_dict['result']['problems']:
for tag in prob['tags']:
tag_set.add(tag)
tag_list=list(tag_set)
else:
print(j_dict['comment'])
sys.exit()
conn = mc.connect(user='root', password='ineedcoffee', host='127.0.0.1')
cur = conn.cursor()
cur.execute('drop database if exists CAC_db')
cur.execute('create database CAC_db')
cur.execute('use CAC_db')
cur.execute('drop table if exists Prob_set')
set_arg = ','.join([*map(lambda x: "'"+x+"'",tag_list)])
sql=f'''create table Prob_set(
id varchar(50) not null,
name varchar(65) not null,
difficulty int(11) not null,
tags set({set_arg}),
primary key(`id`)
)'''
cur.execute(sql)
for prob in j_dict['result']['problems']:
pid = str(prob['contestId'])+'/'+prob['index']
name = prob['name']
name = name.replace("'","\'")
name = name.replace('''"''','''\'''')
tags = ','.join(prob['tags'])
try:
diff = prob['rating']
except KeyError:
diff = 0
try:
cur.execute(f'''insert into CAC_db.Prob_set values("{pid}","{name}",{diff},"{tags}")''')
except Exception as e:
print(e)
conn.commit()
conn.close()
print('DB generation completed') | ineed-coffee/CAC-Code-Forces-Algorithm-Classifier- | make_db.py | make_db.py | py | 1,915 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.randint",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "hashlib.sha512",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_numb... |
69981548263 | from collections import defaultdict
import math
import torch
import torch.nn as nn
from algorithm.trainer import SampleBatch, feed_forward_generator, recurrent_generator
def get_gard_norm(it):
sum_grad = 0
for x in it:
if x.grad is None:
continue
sum_grad += x.grad.norm()**2
return math.sqrt(sum_grad)
def huber_loss(e, d):
a = (abs(e) <= d).float()
b = (e > d).float()
return a * e**2 / 2 + b * d * (abs(e) - d / 2)
def mse_loss(e):
return e**2 / 2
class MAPPO:
def __init__(self, args, policy):
self.policy = policy
self.clip_param = args.clip_param
self.ppo_epoch = args.ppo_epoch
self.num_mini_batch = args.num_mini_batch
self.data_chunk_length = args.data_chunk_length
self.value_loss_coef = args.value_loss_coef
self.entropy_coef = args.entropy_coef
self.max_grad_norm = args.max_grad_norm
self.huber_delta = args.huber_delta
self._use_max_grad_norm = args.use_max_grad_norm
self._use_clipped_value_loss = args.use_clipped_value_loss
self._use_huber_loss = args.use_huber_loss
self._use_value_active_masks = args.use_value_active_masks
self._use_policy_active_masks = args.use_policy_active_masks
self.actor_optimizer = torch.optim.Adam(
self.policy.actor.parameters(),
lr=args.lr,
eps=args.opti_eps,
weight_decay=args.weight_decay,
)
self.critic_optimizer = torch.optim.Adam(
self.policy.critic.parameters(),
lr=args.critic_lr,
eps=args.opti_eps,
weight_decay=args.weight_decay,
)
def cal_value_loss(self, values, value_preds_batch, return_batch,
active_masks_batch):
if self.policy.popart_head is not None:
self.policy.update_popart(return_batch)
return_batch = self.policy.normalize_value(return_batch)
value_pred_clipped = value_preds_batch + (
values - value_preds_batch).clamp(-self.clip_param,
self.clip_param)
error_clipped = return_batch - value_pred_clipped
error_original = return_batch - values
if self._use_huber_loss:
value_loss_clipped = huber_loss(error_clipped, self.huber_delta)
value_loss_original = huber_loss(error_original, self.huber_delta)
else:
value_loss_clipped = mse_loss(error_clipped)
value_loss_original = mse_loss(error_original)
if self._use_clipped_value_loss:
value_loss = torch.max(value_loss_original, value_loss_clipped)
else:
value_loss = value_loss_original
if self._use_value_active_masks:
value_loss = (value_loss *
active_masks_batch).sum() / active_masks_batch.sum()
else:
value_loss = value_loss.mean()
return value_loss
def ppo_update(self, sample: SampleBatch, update_actor=True):
# Reshape to do in a single forward pass for all steps
action_log_probs, values, dist_entropy = self.policy.analyze(sample)
# actor update
imp_weights = torch.exp(action_log_probs - sample.action_log_probs)
surr1 = imp_weights * sample.advantages
surr2 = torch.clamp(imp_weights, 1.0 - self.clip_param,
1.0 + self.clip_param) * sample.advantages
assert surr1.shape[-1] == surr2.shape[-1] == 1
if self._use_policy_active_masks:
policy_loss = (-torch.min(surr1, surr2) * sample.active_masks
).sum() / sample.active_masks.sum()
dist_entropy = (dist_entropy * sample.active_masks
).sum() / sample.active_masks.sum()
else:
policy_loss = -torch.min(surr1, surr2).mean()
dist_entropy = dist_entropy.mean()
value_loss = self.cal_value_loss(values, sample.value_preds,
sample.returns, sample.active_masks)
self.actor_optimizer.zero_grad()
if update_actor:
(policy_loss - dist_entropy * self.entropy_coef).backward()
if self._use_max_grad_norm:
actor_grad_norm = nn.utils.clip_grad_norm_(
self.policy.actor.parameters(), self.max_grad_norm)
else:
actor_grad_norm = get_gard_norm(self.policy.actor.parameters())
self.actor_optimizer.step()
self.critic_optimizer.zero_grad()
(value_loss * self.value_loss_coef).backward()
if self._use_max_grad_norm:
critic_grad_norm = nn.utils.clip_grad_norm_(
self.policy.critic.parameters(), self.max_grad_norm)
else:
critic_grad_norm = get_gard_norm(self.policy.critic.parameters())
self.critic_optimizer.step()
return value_loss, critic_grad_norm, policy_loss, dist_entropy, actor_grad_norm, imp_weights
def train(self, storage, update_actor=True):
train_info = defaultdict(lambda: 0)
for _ in range(self.ppo_epoch):
if self.policy.num_rnn_layers > 0:
data_generator = recurrent_generator(storage,
self.num_mini_batch,
self.data_chunk_length)
else:
data_generator = feed_forward_generator(
storage, self.num_mini_batch)
for sample in data_generator:
(value_loss, critic_grad_norm, policy_loss, dist_entropy,
actor_grad_norm,
imp_weights) = self.ppo_update(sample,
update_actor=update_actor)
train_info['value_loss'] += value_loss.item()
train_info['policy_loss'] += policy_loss.item()
train_info['dist_entropy'] += dist_entropy.item()
train_info['actor_grad_norm'] += actor_grad_norm
train_info['critic_grad_norm'] += critic_grad_norm
train_info['ratio'] += imp_weights.mean()
num_updates = self.ppo_epoch * self.num_mini_batch
for k in train_info.keys():
train_info[k] /= num_updates
return train_info
def prep_training(self):
self.policy.actor.train()
self.policy.critic.train()
def prep_rollout(self):
self.policy.actor.eval()
self.policy.critic.eval()
| garrett4wade/revisiting_marl | algorithm/trainers/mappo.py | mappo.py | py | 6,597 | python | en | code | 19 | github-code | 36 | [
{
"api_name": "math.sqrt",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.optim.Adam",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.Adam",
"l... |
74497317225 | from flask import Flask, render_template, request, flash, redirect, url_for, session, g
import os
import secrets
from PIL import Image
from HackSite.deeplearning.classification import predict
def create_app(test_config=None):
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='uPqECIPsjfu7qQ93MwHiyDr73QyhyjUphSnehNAt',
DATABASE=os.path.join(app.instance_path, 'HackSite.sqlite'),
)
if test_config is None:
app.config.from_pyfile('config.py', silent=True)
else:
app.config.from_mapping(test_config)
try:
os.makedirs(app.instance_path)
except OSError:
pass
from . import db
db.init_app(app)
from HackSite.db import get_db
@app.before_request
def load_logged_in_user():
auth = session.get('user_id')
if auth is None:
g.user = None
else:
g.user = {'id':1, 'name':'auth'}
def save_picture(picture):
fname = secrets.token_hex(20)
_, f_ext = os.path.splitext(picture.filename)
picture_fn = fname + f_ext
picture_path = os.path.join(app.root_path, 'static/images', picture_fn)
i = Image.open(picture)
i.save(picture_path)
return picture_path
@app.route('/', methods=('GET', 'POST'))
def index():
if request.method == 'POST':
img = request.files['uploaded_img']
img = save_picture(img)
pred = predict(app, img)
os.remove(img)
if pred == 0:
lol = 'Civilian Aircraft'
elif pred == 1:
lol = 'Military Aircraft'
return render_template('index.html', output=lol)
return render_template('index.html')
@app.route('/questions', methods=('GET', 'POST'))
def questions():
db = get_db()
questions = db.execute('SELECT * FROM question').fetchall()
if request.method == 'POST':
q = request.form['q']
error = None
if len(q) > 255:
error = 'Question should not exceed 255 characters.'
if error is None:
db.execute('INSERT INTO question (body) VALUES (?)', (q,))
db.commit()
return redirect(url_for('questions'))
flash(error)
return render_template('questions.html', questions=questions)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/auth', methods=('GET', 'POST'))
def auth():
if g.user:
return redirect(url_for('index'))
if request.method == 'POST':
p = request.form['password']
error = None
if p != 'password:)':
error = 'Invalid password.'
if error is None:
session['user_id'] = '1'
return redirect(url_for('index'))
flash(error)
return render_template('auth.html')
@app.route('/logout')
def logout():
session.clear()
return redirect(url_for('index'))
@app.route('/<int:id>', methods=('GET', 'POST'))
def answer(id):
db = get_db()
answers = db.execute('SELECT * FROM answer WHERE question_id = ?', (id,)).fetchall()
if request.method == 'POST':
a = request.form['body']
error = None
if len(a) > 255:
error = 'Max length: 255'
if error is None:
db.execute('INSERT INTO answer (question_id, body) VALUES (?,?)', (id, a))
db.commit()
return redirect(url_for('answer', id=id))
flash(error)
return render_template('answer.html', id=id, answers=answers)
return app | BenVN123/AircraftClassificationDL | HackSite/__init__.py | __init__.py | py | 3,992 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number":... |
44210008243 | # -*- coding: utf-8 -*-
#!usr/bin/evn python3
#这一部分用来理解正规标达式 / 日期运算
#备注:当要搜寻特定文字时可以使用正规表达式
"""
Created on Tue Nov 7 13:56:09 2017
@author: vizance
"""
#===正规表达式与匹配文字组合(资料分析中,字段资料的大小写匹配非常重要)===
#呼叫re模组(正规表达式模组,使用中继字元来匹配各种文字组合)
#中继字元如 |、()、[]、*、+、?、^、$、(?P<name>)
import re #re模组用来创造或搜寻各种文字组合
string = "The quick brown fox jumps over the lazy dog."
string_list = string.split()
pattern = re.compile(r"The",re.I)
#re.compile代表将文字组合编译成正规表达式
#不一定要做但做了可以提升电脑执行速度,re.I可以确保程式区分大小写;
#r确保python不会处理字串的特殊序列,例如\ \t \n
count = 0
for word in string_list:
if pattern.search(word):
count = count + 1
print("output #38: {0:d}".format(count))
#将比对到的单字(The)印出在萤幕上
string = "The quick brown fox jumps over the lazy dog."
string_list = string.split()
pattern = re.compile(r"(?P<match_word>The)",re.I)
#r"(?P<name>The)"透过(?P<name>)符号群组名称<name>来使用匹配的字串,群组中放进The
print("output #39: ")
for word in string_list:
if pattern.search(word):
print("{:s}".format(pattern.search(word).group('match_word')))
#于pattern中搜寻是否匹配The,若找到则印出match_word群组中的值
##练习搜寻dog
pattern2 = re.compile(r"(?P<match_dog>dog)",re.I )
print("practice searching dog: ")
for word in string_list:
if pattern2.search(word):
print("{:s}".format(pattern2.search(word).group("match_dog")))
#将字串的The替换成a
string = "The quick brown fox jumps over the lazy dog."
string_to_find = r"The" #将正规表达式指派给变数,方便程式码阅读
pattern = re.compile(string_to_find,re.I)
print("output #40: {:s}".format(pattern.sub("a", string)))
##练习替换dog为cat
string_to_find2 = r"dog"
pattern2=re.compile(string_to_find2, re.I)
print("practice to replace dog: {:s}".format(pattern2.sub("cat", string)))
#===正规表达式与匹配文字组合结束===
#===日期的运算处理===
from datetime import date, datetime, timedelta #引入datatime模组
##印出今天的年、月及日元素
today = date.today()#date物件只能秀出年、月、日
print("output #41: today:{0!s}".format(today)) #!s表示被传入到print陈述式里面的值都要转换为string
print("output #42: {0!s}".format(today.year))
print("output #43: {0!s}".format(today.month))
print("output #44: {0!s}".format(today.day))
current_time = datetime.today() #使用datetime来表示详细的时间、datetime物件可以秀出年月日、小时、分、秒
print("output #45: {0!s}".format(current_time))
##使用timedelta来计算新日期
one_day = timedelta(days=-1)
yesterday = today + one_day
print("output #46: yesterday:{0!s}".format(yesterday))
eight_hours = timedelta(hours=-8)
print("output #47: {0!s} {1!s} ".format(eight_hours.days,eight_hours.seconds))
#使用timedelta时,括号内的时间会被转换成日、秒、毫秒,秒的计算过程是:24hr * 3,600秒 - 8hr * 3,600秒
date_diff = today - yesterday
print("output #48: {0!s}".format(date_diff)) #计算出来的结果是用datetime呈现
print("output #49: {0!s}".format(str(date_diff).split()[0]))
#将date_diff换成string,利用split取出第[0]个索引,date_diff的内容为 1 day 0:00:00
##使用strftime,来用date物件建立特定格式的字串
print("output #50: {0!s}".format(today.strftime('%m/%d/%Y')))
print("output #51: {0!s}".format(today.strftime('%b %d, %Y')))
##使用strftime,以特定格式的字串建立datetime物件
#产生代表日期的字串
date1 = today.strftime('%m%d%Y')
date2 = today.strftime('%b %d, %Y')
date3 = today.strftime('%Y-%m-%d')
date4 = today.strftime('%B %d, %Y')
#两个datetime物件、两个date物件
###datetime中的strf跟strp的差别:strf用来正规化日期、strp用来将字串转换为datetime格式
print("output #54: {0!s}".format(datetime.strptime(date1, '%m%d%Y')))
###只想取日期
print("output #56: {0!s}".format(datetime.date(datetime.strptime(date3, '%Y-%m-%d'))))
#===日期的运算处理结束=== | vizance/Python_Data_Analysis | 第一章_基礎介紹/基本練習2_正規表達式與日期運算.py | 基本練習2_正規表達式與日期運算.py | py | 4,376 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "re.I",
"line_number": 32,
"us... |
22776411105 | from jira import JIRA
import re
import json
import requests
import browser_cookie3
import creds as creds
#connect to jira instance
jiraOptions = {'server' : creds.url
}
jira = JIRA(server=jiraOptions, token_auth=creds.api_token)
#get list of issues
issues = []
def getIssues():
print('Searhing for issues..')
for singleIssue in jira.search_issues(jql_str='project = SD and issuetype = "Business Systems and Services" AND assignee = "stepan.botalov@dxc.com" and status = Approved'):
issues.append(singleIssue.key)
return issues
#get approval comments
approval_comments = []
def getComments(issues):
print('Getting approval comments..')
for issue in issues:
comments = jira.comments(issue)
for comment in comments:
comment_dict = {}
if comment.author.displayName == 'SD Robot' and 'The request has been sent for approval to' in comment.body:
comment_dict['key'] = issue
comment_dict['body'] = comment.body
approval_comments.append(comment_dict)
return approval_comments
cookies = browser_cookie3.chrome(domain_name='lp-uat.luxoft.com') #get current cookies_dict from the browser
cookies_dict = requests.utils.dict_from_cookiejar(cookies) #convert cookies_dict to dict
#get project code, project role, and username to grant access
code_role_user = []
def getProjectCode(approval_comments):
print('Parsing approval comments..')
for comment in approval_comments:
approval_dict = {}
project_code = re.search(r'([A-Z]){3,10}|([A-Z0-9]){3,10}', comment['body'])
reporter = re.findall(r'[a-zA-Z.]+[0-9]?@dxc.com', comment['body'])
project_role = re.search(r'project-manager|analyst|developer|tester|test-manager|customer|dev-lead|ci-engineer', comment['body'], re.IGNORECASE)
approval_dict['key'] = comment['key']
try:
approval_dict['project_code'] = project_code.group()
except (AttributeError):
continue
try: #adding username to a dict
payload = {"search":reporter[1], #email
"directoryIds":[360449],
"avatarSizeHint":128}
crowd_url = 'https://lp-uat.luxoft.com/crowd/rest/admin/latest/users/search?limit=50&start=0'
r = requests.post(crowd_url, json=payload, cookies=cookies_dict) #search for a user using crowd api
username = json.loads(r.text)['values'][0]['username'] # extracting username from response
approval_dict['reporter'] = username
except (IndexError):
continue
approval_dict['project_role'] = project_role.group()
code_role_user.append(approval_dict)
#print(approval_dict)
return code_role_user
#send post request to add user to a project
console_url = 'https://lp-uat.luxoft.com/console/rest/project/nested-member'
def grantAccess(project_code, reporter, project_role):
payload = {"projectCode":"",
"userProjectRoles":[],
"username":""}
payload["projectCode"] = project_code
payload["userProjectRoles"] = project_role
payload["username"] = reporter
r = requests.post(console_url, json=payload, cookies=cookies_dict)
return r.status_code
def resolveIssue(key):
comment = '''
Access is granted
Please check in 15 minutes.
Regards,
Stepan
'''
jira.transition_issue(issue=key, transition=61, resolution={'id': '1'}, worklog='20', comment=comment)
getIssues()
if len(issues) == 0:
print('There is no requests in Approved status')
else:
print(str(len(issues)) + ' issue(s) in Approved status - ' + str(issues))
getComments(issues)
if len(approval_comments) == 0:
print('No valid approval comments detected')
else:
getProjectCode(approval_comments)
print(code_role_user)
for x in code_role_user:
print('Granting access to ' + x['reporter'] + ' to ' + x['project_code'] + ' project in the scope of ' + x['key'] + ' request')
r = grantAccess(x['project_code'], x['reporter'], x['project_role'])
print(r)
if str(r) == '201':
print('Access to ' + x['reporter'] + ' to ' + x['project_code'] + ' project successfully granted')
resolveIssue(x['key'])
else:
print('post request got failed')
| SBotalov/sd_automation | sd_granting_access.py | sd_granting_access.py | py | 4,477 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "creds.url",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "jira.JIRA",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "creds.api_token",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "jira.search_issues",
... |
23072964772 | from behave import given, when, then
from hamcrest import assert_that, equal_to, raises
from LinkList.src.linkList import LinkList
@given(u'nodes are created')
def add_node(context):
for row in context.table:
context.model.add_node(row["node"])
@given(u'All nodes are reset')
def delete_list(context):
context.model = getattr(context, "model", None)
if context.model is None:
context.model = LinkList()
if context.model is not None:
context.model.delete_list()
@when(u'nodes are counted')
def count_nodes(context):
context.model.actual_number_of_nodes = context.model.get_number_of_node()
@then(u'it matches to below value')
def match_count(context):
for row in context.table:
expected_number_of_row = row["node"]
assert_that(context.model.actual_number_of_nodes, expected_number_of_row, 'number of nodes')
@when(u'search node with data {number:d}')
def search(context, number):
context.model.actual_data = context.model.find(number)
@then(u'below data is returned')
def match_data(context):
for row in context.table:
expected_find_result = row["node"]
assert str(context.model.actual_data) == expected_find_result, "find the element in link list actual_data={} "
"with type{},expected={} with type{} ". \
format(context.model.actual_data, type(context.model.actual_data), expected_find_result,
type(expected_find_result))
@when(u'below nodes are removed')
def remove(context):
for row in context.table:
node_to_be_removed = row["node"]
context.model.remove(node_to_be_removed)
@then(u'link list contains below data')
def print_list(context):
actual_data = context.model.print_list()
expected_data = []
for row in context.table:
expected_data.append(row["node"])
translation = {39: None}
expected_data = str(expected_data).translate(translation)
assert expected_data == str(actual_data), "link list content does not match expected data={} with type={}," \
" actual data ={} with type={}". \
format(expected_data, type(expected_data), actual_data, type(str(actual_data)))
| rushikeshnakhate/HackaThon | python/LinkList/test/bdd/steps/countNumberOfNodesStepsDefinition.py | countNumberOfNodesStepsDefinition.py | py | 2,220 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "behave.given",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "LinkList.src.linkList.LinkList",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "behave.given",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "behave.when",... |
7939048922 | import math
from collections import defaultdict
from fractions import gcd
class Point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __sub__(self, p2):
return Point(self.x-p2.x, self.y-p2.y)
def __str__(self):
return "(" + str(int(self.x)) + ", " + str(int(self.y)) +")"
def __repr__(self):
return str(self)
def __hash__(self):
return hash(str(self))
def __eq__(self,other):
return self.x == other.x and self.y == other.y
def length_square(self):
return (self.x ** 2) + (self.y ** 2)
def simplify(self):
common = abs(gcd(self.x, self.y)) or 1
self.x /= common
self.y /= common
return self
def answer(dimensions, your_position, guard_position, distance):
dims = Point(dimensions[0], dimensions[1])
p_captn = Point(your_position[0], your_position[1])
p_guard = Point(guard_position[0], guard_position[1])
r_square = distance ** 2
min_room_coord = Point(
math.floor((p_captn.x - distance)/dims.x),
math.floor((p_captn.y - distance)/dims.y))
max_room_coord = Point(
math.ceil ((p_captn.x + distance)/dims.x),
math.ceil ((p_captn.y + distance)/dims.y))
firing_vectors = defaultdict(set)
captn_vectors = defaultdict(set)
# finding all vectors leading towards either self or target and associated lengths
for i in range(min_room_coord.x, max_room_coord.x+1):
for j in range(min_room_coord.y, max_room_coord.y+1):
if i and j: # i and j not equal to 0
guard_mirror_coord = mirror_coords(p_guard, i, j, dims.x, dims.y)
captn_mirror_coord = mirror_coords(p_captn, i, j, dims.x, dims.y)
firing_vect = guard_mirror_coord - p_captn
captn_vect = captn_mirror_coord - p_captn
firing_vect_len = firing_vect.length_square()
captn_vect_len = captn_vect.length_square()
if firing_vect_len <= r_square:
firing_vectors[firing_vect.simplify()].add(firing_vect_len)
if captn_vect_len <= r_square:
captn_vectors[captn_vect.simplify()].add(captn_vect_len)
for f in captn_vectors:
if f in firing_vectors: # if self is in line of fire
if min(captn_vectors[f]) < min(firing_vectors[f]): # determine who is hit first
firing_vectors.pop(f)
return len(firing_vectors)
# find new coordinates after mirror tiling the plane with "rooms"
def mirror_coords(real_coord, room_x, room_y, w, h):
return Point(mc(real_coord.x, room_x, w), mc(real_coord.y, room_y, h))
def mc(real_coord, room, dim):
m = (-1) if room < 0 else (1)
if room&1:
return m * ( real_coord + dim*(abs(room)-1))
else:
return m * (-real_coord + dim*abs(room))
# -------------------------------------------------------------------------------------------------------------------
dimensions = [3, 2]
captain_position = [1, 1]
badguy_position = [2, 1]
distance = 4
# ouput: 7
dimensions = [42, 59]
captain_position = [34, 44]
badguy_position = [6, 34]
distance = 500
dimensions = [300, 275]
captain_position = [150, 150]
badguy_position = [185, 100]
distance = 500
# ouput: 9
print(answer(dimensions, captain_position, badguy_position, distance))
| deepspacepirate/googlefoobar | L4-bringing_a_gun_to_a_guard_fight.py | L4-bringing_a_gun_to_a_guard_fight.py | py | 3,170 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fractions.gcd",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 45... |
32846353410 | # This program downloads a list of files from an NCBI FTP site that are delineated in a csv file.
# Assumes that the CSV file has a header, and skips the first row.
# ARGUMENT1 - CSV file that contains the URLs of the files to download.
# ARGUMENT2 - The suffix of the files to download. i.e. "_genomic.fna", "_protein.faa", etc. The program will automatically append ".gz" to the url so it can get the files.
# ARGUMENT3 - (optional) Path to directory where downloaded files should be placed.
# ARGUMENT4 - (optional) 0-indexed column number in the csv file that should be used to rename each downloaded file. If none is provided, files will be named "TEMP_Genus_Species_Strain_BioSample"
# ex - python DownloadFromNCBIFTP.py penguins.csv _genomic.fna.gz Downloads\Penguins 0
import sys, os, csv, urllib.request
from pathlib import Path
# FUNCTION LIST
def getName(row, suffix):
nameList = row[0].split(' ')
genus = nameList[0]
species = nameList[1]
strain = row[2]
sample = row[3].strip()
return 'TEMP%' + genus + '%' + species + '%' + strain + '%' + sample + '.fasta.gz'
# Check user-entered arguments
if len(sys.argv) < 2:
raise Exception('You must enter a csv file to parse.')
urlsFile = Path(sys.argv[1])
if not urlsFile.exists():
raise Exception('Could not find the specified file "' + sys.argv[1])
if len(sys.argv) < 3:
raise Exception('You must specify a file suffix.')
outPath = ''
if len(sys.argv) > 3:
outPath = sys.argv[3] + '/'
if not Path(sys.argv[3]).exists():
raise Exception('Could not find output directory "' + sys.argv[3] + '"')
nameCol = -1
if len(sys.argv) > 4:
try:
nameCol = int(sys.argv[4])
except:
raise Exception('Argument 4 must be an integer')
# Get the download urls from the CSV file and determine what each download will be named
urls = {}
hasHeader = True
downloadCol = 14
with urlsFile.open() as csvFile:
reader = csv.reader(csvFile, delimiter=',')
for row in reader:
if hasHeader:
hasHeader = False
continue
if row[downloadCol] == '-' or row[downloadCol] == '':
continue
if len(row) < 2:
break
sampleSignifier = row[downloadCol].split('/')[len(row[downloadCol].split('/')) - 1]
downloadPath = row[downloadCol] + '/' + sampleSignifier + sys.argv[2] + '.gz'
fileName = row[nameCol] + '.fasta.gz' if nameCol != -1 else getName(row, sys.argv[2])
urls[fileName] = downloadPath
# Download the files
for fileName in urls.keys():
print('Downloading ' + urls[fileName])
try:
urllib.request.urlopen(urls[fileName])
print(fileName)
except:
print(f'No {sys.argv[2]} file for {fileName}')
continue
req = urllib.request.Request(urls[fileName])
data = None
with urllib.request.urlopen(req) as response:
data = response.read()
with open(outPath + fileName, 'wb') as newFile:
newFile.write(data)
print('Download script completed successfully.')
| platipenguin/metaproteomics-database-optimization | jupyter_notebooks/DownloadFromNCBIFTPtxt.py | DownloadFromNCBIFTPtxt.py | py | 2,950 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number"... |
12445974610 | from django.http import HttpResponse
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework import status
from rest_framework.decorators import api_view
from django.views.decorators.csrf import csrf_exempt
from djangoTest import settings
from oAuth.models import User, OauthStockData, OauthTodayData, OauthSecretKey, OauthIp, OauthQueryLog
from oAuth.models import stockdata20204, stockdata20214, stockdata20211, stockdata20212, stockdata20213, stockdata20221\
, stockdata20222
from oAuth.models import stockdata20223, stockdata20224, stockdata20231, stockdata20232
from django.db import connection
from rest_framework.response import Response
from django.core.mail import send_mail
from oAuth.serializer import UserSerializer, SecretkeySerializer, IpSerializer
from django.db.models import Q
import datetime
import pytz
from django.utils import timezone
#主要业务逻辑在此处实现
# 节假日处理,方便实现翻页逻辑
holidays = ['2023-04-29', '2023-04-30', '2023-05-01', '2023-05-02', '2023-05-03', '2023-06-22', '2023-06-23',
'2023-06-24', '2023-09-29', '2023-09-30', '2023-10-01', '2023-10-02', '2023-10-03', '2023-10-04',
'2023-10-05', '2023-10-06']
min_date = datetime.datetime(year=2023, month=4, day=24)
#此处为预留的数据分表后根据日期判断表的情况
def date_switch(dt):
if dt.year == 2020:
return stockdata20204
elif dt.year == 2021:
if dt.month < 4:
return stockdata20211
elif dt.month < 7:
return stockdata20212
elif dt.month < 10:
return stockdata20213
else:
return stockdata20214
elif dt.year == 2022:
if dt.month < 4:
return stockdata20221
elif dt.month < 7:
return stockdata20222
elif dt.month < 10:
return stockdata20223
else:
return stockdata20224
else:
if dt.month < 4:
return stockdata20231
else:
return OauthStockData
#utc时间转化为 '%Y-%m-%d %H:%M:%S' 格式的当前时区时间字符串
def convert_to_localtime(utctime):
fmt = '%Y-%m-%d %H:%M:%S'
utc = utctime.replace(tzinfo=pytz.UTC)
localtz = utc.astimezone(timezone.get_current_timezone())
return localtz.strftime(fmt)
#utc时间转化为 '%Y-%m-%d %H:%M:%S.%f' 格式的当前时区时间,带.%f处理microsecond
def convert_to_local_datetime(utctime):
fmt = '%Y-%m-%d %H:%M:%S.%f'
utc = utctime.replace(tzinfo=pytz.UTC)
localtz = utc.astimezone(timezone.get_current_timezone())
str = localtz.strftime(fmt)
date = datetime.datetime.strptime(str, fmt)
return date
# Create your views here.
#drf 实现RESTFUL标准的接口
#继承ViewSet重写的函数名与请求种类的对应关系
#list 对应get请求
#creat 对应post请求
#update 对应put请求和patch请求
#destory 对应delete请求
#pycharm开发时可通过按住ctrl+点击的方式跳转到继承的ViesSet类源码,便于学习
#drf实现的获取用户信息的ViewSet
class UserInfoViewSet(viewsets.ViewSet):
queryset = User.objects.all().order_by('-date_joined')
http_method_names = ['get']#仅允许get请求
def list(self, request, *args, **kwargs):
user_info = User.objects.filter(id=request.user.id).values()[0]
# user = User.objects.filter(id=request.user.id).values()[0]
utctime = user_info['last_login']
fin_utc_time = user_info['fin_time']
cntime = convert_to_localtime(utctime)#时区转换
fin_time = None
dt = datetime.datetime.now()
if fin_utc_time is not None:#处理空值
fin_time = convert_to_localtime(fin_utc_time)
ft = convert_to_local_datetime(fin_utc_time)
if ft < dt:#判断业务是否过期
user = User.objects.get(id=request.user.id)
user.business_activate = 0
user_info['business_activate'] = 0
user.save()
user_info['password'] = ['']#隐藏密码信息
user_info['last_login'] = cntime
user_info['fin_time'] = fin_time
bus_status = user_info['business_activate']
role = request.user.roles
if bus_status == 0:#设置状态文本
user_info['business_activate'] = ['未激活']
else:
user_info['business_activate'] = ['已激活']
if role == 0:#设置类型文本
user_info['roles'] = ['管理员']
else:
user_info['roles'] = ['普通用户']
return Response(user_info)
# ViewSets define the view behavior.
#注册用户业务实现
class UserCreateViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
http_method_names = ['post', 'get']#允许get post
permission_classes = []#注册接口,需要移除权限验证
# list_get creat_post update_put/patch destory_delete
#retrieve对应对单条数据的get,请求链接中应有唯一标识该条数据的值
def retrieve(self, request, *args, **kwargs):
instance = User.objects.get(code=kwargs['pk'])#用户激活,根据code获取用户实例
instance.is_active = True#设置激活
instance.save()#保存
data = {
'status': 'success',
}
return render(request, 'active_success.html')
#用户注册
def create(self, request, *args, **kwargs):
# get real ip address
#获取真实ip地址,此处注意Nginx配置的参数
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[-1].strip()
else:
ip = request.META.get('REMOTE_ADDR')
# get serializer
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user_info = self.perform_create(serializer)
# set sign in ip
user_info.sign_ip = ip
user_info.is_active = False#默认设置非激活
if request.data['password'] == request.data['repassword']:#校验注册两次输入的密码是否相同
user_info.set_password(request.data['password'])#用django原生的set_password设置密码,数据库中存储的是加密后的密码
code = user_info.code
url = request.build_absolute_uri("/api/user/active/" + str(code) + "/")#生成激活链接
username = user_info.username
#构造邮件
subject = '账户激活-' + username#标题
from_email = settings.EMAIL_HOST_USER#邮件来源
to_email = user_info.email#邮件目的
meg_html = '欢迎注册测试版系统,请访问您的专有链接以激活账户:' + url#激活链接
try:
send_mail(subject, meg_html, from_email, [to_email])
user_info.save()#发送成功后保存
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
except Exception as e:
user_info.delete()#异常退出则删掉刚刚创建的用户
data = {
'error': e
}
return Response(data, status.HTTP_400_BAD_REQUEST)
else:
data = {
'error': '密码校验错误,请重试'
}
return Response(data, status=status.HTTP_400_BAD_REQUEST)
def perform_create(self, serializer):
return serializer.save()
#用户修改密码接口
class UserChangeViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
http_method_names = ['patch']#仅允许patch
def update(self, request, *args, **kwargs):#此处的request中的user是根据解析token获得的,此处已完成权限验证
user_id = request.user.id
instance = self.get_object()
if user_id == instance.id:#校验链接中的id与token对应的id是否相同
psw = request.data['password']
instance.set_password(psw)
instance.save()
data = {
'success': '修改成功'
}
return Response(data, status=status.HTTP_202_ACCEPTED)
else:
data = {
'error': '校验错误'
}
return Response(data, status=status.HTTP_400_BAD_REQUEST)
#密钥接口
class SecretKeyViewSet(viewsets.ModelViewSet):
queryset = OauthSecretKey.objects.all()
serializer_class = SecretkeySerializer
http_method_names = ['get', 'post', 'put', 'patch']
#查询密钥
def list(self, request, *args, **kwargs):
queryset = OauthSecretKey.objects.filter(uid=request.user.id)#根据token解析出的用户id做筛选
if len(queryset) > 0:#如果密钥存在
instance = queryset.values()[0]
last_change_time = instance['last_change_time']
last_query_time = instance['last_query_time']
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
data = serializer.data
if last_change_time is not None:
lct = convert_to_localtime(last_change_time)#转换时区
data[0]['last_change_time'] = lct
if last_query_time is not None:
lqt = convert_to_localtime(last_query_time)#转换时区
data[0]['last_query_time'] = lqt
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
else:#空值处理,默认
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
#创建密钥
def create(self, request, *args, **kwargs):
# get real ip address
#获取反向代理前的真实ip地址
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[-1].strip()
else:
ip = request.META.get('REMOTE_ADDR')
query_keys = OauthSecretKey.objects.filter(uid=request.user.id)
if len(query_keys) > 0:#已有密钥无法重复申请,若后续要允许一个用户拥有多个密钥,需修改此处
data = {
'error': '您已拥有密钥,请勿重复申请!'
}
return Response(data, status=status.HTTP_400_BAD_REQUEST)
else:
data = request.data
data['ip'] = ip
data['uid'] = request.user.id
data['username'] = request.user.username
data['last_change_time'] = datetime.datetime.now()#记录上次变更时间
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
#更新密钥状态
def update(self, request, *args, **kwargs):
data = {}
instance = self.get_object()
# get real ip address
query_type = request.query_params['type']
query_key = request.query_params['key']
if query_type == "ip":#更改绑定ip
if instance.uid == request.user:#校验请求的用户与token解析的用户是否一致
last_change_time = instance.last_change_time#获取密钥上次变更时间
dt = datetime.datetime.now()
l_changetime = convert_to_local_datetime(last_change_time)#时区转换
if (dt - l_changetime).seconds < 1800:#时间间隔判断
data['error'] = '修改间隔不能小于30分钟'
return Response(data, status.HTTP_400_BAD_REQUEST)
else:
instance.last_change_time = dt#更新上次变更时间
instance.save()#保存
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[-1].strip()
else:
ip = request.META.get('REMOTE_ADDR')
if instance.ip != ip:#如果ip变更了则修改,否则不修改
instance.ip = ip
instance.save()#保存
return Response(data, status.HTTP_202_ACCEPTED)
else:
data['error'] = '校验错误'
else:#变更密钥状态,主要用于解除锁定
if instance.uid == request.user:#校验用户
if instance.status == 0 or instance.status == 1:#正常 或 警告 状态,直接返回
return Response(data, status.HTTP_202_ACCEPTED)
elif instance.status == 2:#锁定状态转正常
instance.status = 0
instance.save()
return Response(data, status.HTTP_202_ACCEPTED)
elif instance.status == 4:#警告并锁定状态转警告状态
instance.status = 1
instance.save()
return Response(data, status.HTTP_202_ACCEPTED)
elif instance.status == 3:#禁用状态无法解锁
data['error'] = '密钥已被禁用'
return Response(data, status.HTTP_400_BAD_REQUEST)#返回错误信息
#生成链接接口
class LinkViewSet(viewsets.ModelViewSet):
queryset = OauthSecretKey.objects.all()
serializer_class = SecretkeySerializer
http_method_names = ['get']#仅允许get
def list(self, request, *args, **kwargs):
data = {}
queryset = OauthSecretKey.objects.filter(uid=request.user.id)
if len(queryset) > 0:
key_instance = queryset.values()[0]
data['key'] = key_instance['value']#密钥值
data['base_url'] = request.build_absolute_uri("/query")#基本链接,会根据访问链接自动生成
return Response(data)#返回key 和 base_url 由前端自己拼接呈现链接
else:#无密钥时的处理
data['error'] = '您还没有密钥,请申请密钥后重试'
return Response(data, status.HTTP_404_NOT_FOUND)
#预留的ip接口,此处代码与mixins.py内的相同
class IpViewSet(viewsets.ModelViewSet):
queryset = OauthIp.objects.all()
serializer_class = IpSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return Response(serializer.data)
def perform_update(self, serializer):
serializer.save()
def partial_update(self, request, *args, **kwargs):
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
#以下为django原生方法实现的接口,未使用drf
#登出,返回状态码,前端移除token
def user_logout(request):
if request.method == 'GET':
return HttpResponse(status=200)
if request.method == 'POST':
return HttpResponse(status=404)
#预留的广告主页
def ad_home(request):
return render(request, "home.html")
#预留的活动页
def usr_activity(request):
return render(request, "activity.html")
#用户请求复盘数据接口
def api_charts(request):
code = request.GET.get('code')#获取请求中的code
date = request.GET.get('date')#获取date,可能为空
secretkey = request.GET.get('secretkey')#获取密钥
if secretkey is None:#参数中无密钥的情况
err_data = {
'error': '校验错误'
}
return render(request, "key_error.html", err_data)
#获取真实ip
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[-1].strip()
else:
ip = request.META.get('REMOTE_ADDR')
#查询密钥是否存在
keys = OauthSecretKey.objects.filter(value=secretkey)
err_data = {}
#如果密钥存在
if len(keys) > 0:
key_instance = keys.values()[0]
uid = key_instance['uid_id']
user = User.objects.get(id=uid)#获取用户信息
if user.business_activate == 0:#业务激活状态判断,未激活直接返回
err_data['error'] = '业务未激活'
return render(request, "key_error.html", err_data)
user_fin_time = user.fin_time#获取业务结束时间
cur_time = datetime.datetime.now()
user_ft = convert_to_local_datetime(user_fin_time)
if cur_time > user_ft:#校验业务是否到期
user.business_activate = 0#设置状态为未激活
user.save()
err_data['error'] = '业务已过期'#过期返回
return render(request, "key_error.html", err_data)
if ip == key_instance['ip']:#判断请求ip是否与密钥绑定ip相同
if key_instance['status'] == 0 or key_instance['status'] == 1:#相同则查看密钥状态,是否为正常或警告
if_frequent = False#设置标志记录是否请求过于频繁
if key_instance['last_query_time'] is None:#如果过去没请求过数据,更新最近请求时间
key = OauthSecretKey.objects.get(id=key_instance['id'])
key.last_query_time = datetime.datetime.now()
key.save()
else:#计算时间差,判断是否访问过于频繁
last_query_time = key_instance['last_query_time']
lq_time = convert_to_local_datetime(last_query_time)#时区转换
cur_time = datetime.datetime.now()#当前时间
days = (cur_time - lq_time).days
seconds = (cur_time - lq_time).seconds
#此处的坑,python datetime 中microseconds只记录时间中不足一秒的时间有多少微秒,要结合days和seconds判断是否高频
if days == 0 and seconds == 0 and (cur_time - lq_time).microseconds < 100000:#如果请求间隔小于0.1秒
key = OauthSecretKey.objects.get(id=key_instance['id'])
if key_instance['status'] == 0:#若原本密钥状态正常
key.status = 1#设为警告状态
key.last_query_time = cur_time#更新上次查询时间
key.save()
err_data['error'] = '警告,请求过于频繁,再次频繁请求时密钥将被禁用'
if_frequent = True#设置频繁标记
else:#其他情况直接禁用密钥
key.status = 3
key.last_query_time = cur_time#更新上次查询时间
key.save()
err_data['error'] = '多次频繁请求,密钥已被禁用'
if_frequent = True
if not if_frequent:#如果非高频访问
key = OauthSecretKey.objects.get(id=key_instance['id'])
key.last_query_time = cur_time#更新上次请求时间
key.save()
max_date = datetime.datetime.now()
if max_date.weekday() == 5:#如果当前时间为周六
max_date = max_date + datetime.timedelta(days=-1)
if max_date.weekday() == 6:#周日
max_date = max_date + datetime.timedelta(days=-2)
if date is None:#如果无date参数,自动设为最近的历史数据
dt = max_date
lastday = dt + datetime.timedelta(days=-1)#上一天
while lastday.weekday() == 5 or lastday.weekday() == 6 or lastday.strftime( # 周末和节假日跳过
'%Y-%m-%d') in holidays:
lastday = lastday + datetime.timedelta(days=-1)
if lastday.date() <= min_date.date():
lastday = min_date
dt = lastday
date = dt.strftime('%Y-%m-%d')
date_value = dt.year * 10000 + dt.month * 100 + dt.day#转换为数据库中的int型日期
date_str = str(dt.year) + '年' + str(dt.month) + '月' + str(dt.day) + '日'
else:
dt = datetime.datetime.strptime(date, '%Y-%m-%d')#有date,转化为datetime类型
if dt>max_date:
dt = max_date
date = dt.strftime('%Y-%m-%d')
date_value = dt.year * 10000 + dt.month * 100 + dt.day#转化为数据库中的int型日期
date_str = str(dt.year) + '年' + str(dt.month) + '月' + str(dt.day) + '日'
log = OauthQueryLog(uid=user, type=1, code=code, date=date_value,
query_time=datetime.datetime.now(), ip=ip)#用户请求写入日志
log.save()
#stockdb = date_switch(dt)#分表时使用
#queryres = stockdb.objects.all().values().filter(code=code, date=date_value)#获取数据 分表的情况
queryres = OauthStockData.objects.all().values().filter(code=code, date=date_value)#获取数据
yesterday = dt + datetime.timedelta(days=-1)#上一天,用于翻页
tomorrow = dt + datetime.timedelta(days=+1)#下一天,用于翻页
while yesterday.weekday() == 5 or yesterday.weekday() == 6 or yesterday.strftime(#周末和节假日跳过
'%Y-%m-%d') in holidays:
yesterday = yesterday + datetime.timedelta(days=-1)
if yesterday.date() <= min_date.date():
yesterday = min_date
while tomorrow.weekday() == 5 or tomorrow.weekday() == 6 or tomorrow.strftime(
'%Y-%m-%d') in holidays:
tomorrow = tomorrow + datetime.timedelta(days=+1)
if tomorrow.date() >= max_date.date():
tomorrow = max_date
yesterday_str = str(yesterday.year) + '-' + str(yesterday.month) + '-' + str(yesterday.day)
tomorrow_str = str(tomorrow.year) + '-' + str(tomorrow.month) + '-' + str(tomorrow.day)
time_list = []#时间单独发送,用于前端echarts的时间类型x轴
index = 0
flag = False
index920 = 0#记录9点20之后的首个数据在列表中的位置
for item in queryres:
date = item['date']
time = item['time']
year = int(date / 10000)
month = int((date - year * 10000) / 100)
day = date % 100
hour = int(time / 10000)
minute = int((time - hour * 10000) / 100)
sec = time % 100
if hour == 9 and minute >= 20 and flag is False:
index920 = index
flag = True
dt = datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute, second=sec)
time_list.append(str(dt))#把int型时间转化为时间字符串
index = index + 1
url = request.build_absolute_uri("/query/charts/?secretkey=" + str(key_instance['value']))#url,用于翻页跳转构造链接
context = {
'queryset': queryres,#数据
'timelist': time_list,#时间
'code': code,
'date': date,
'date_str': date_str,
'yesterday_str': yesterday_str,
'tomorrow_str': tomorrow_str,
'jmp_str': url,
'index_920': index920,
}
return render(request, "usr_echarts.html", context)
else:#密钥为不可用状态
err_data['error'] = '密钥状态异常'
return render(request, "key_error.html", err_data)
elif key_instance['status'] == 0 or key_instance['status'] == 2:#ip校验未通过
key = OauthSecretKey.objects.get(id=key_instance['id'])#密钥状态设为锁定
key.status = 2
key.save()
err_data['error'] = 'ip异常,密钥已被锁定'
elif key_instance['status'] == 1 or key_instance['status'] == 4:
key = OauthSecretKey.objects.get(id=key_instance['id'])#密钥状态设为警告并锁定
key.status = 4
key.save()
err_data['error'] = 'ip异常,密钥已被锁定'
else:#密钥已被禁用
err_data['error'] = '密钥状态异常'
else:#密钥不存在
err_data['error'] = '密钥不存在'
return render(request, "key_error.html", err_data)
#当日数据查询接口,与复盘数据接口类似
def api_charts_today(request):
code = request.GET.get('code')
secretkey = request.GET.get('secretkey')
#不用加日期参数
if secretkey is None:#密钥参数是否为空
err_data = {
'error': '校验错误'
}
return render(request, "key_error.html", err_data)
#获取真实ip
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[-1].strip()
else:
ip = request.META.get('REMOTE_ADDR')
keys = OauthSecretKey.objects.filter(value=secretkey)
err_data = {}
if len(keys) > 0:#密钥存在
key_instance = keys.values()[0]
uid = key_instance['uid_id']
user = User.objects.get(id=uid)#获取用户信息
if user.times <= 0:#判断剩余查询次数
err_data['error'] = '剩余查询次数不足'
return render(request, "key_error.html", err_data)
if ip == key_instance['ip']:
if key_instance['status'] == 0 or key_instance['status'] == 1:
if_frequent = False#预留,可加频率控制
if not if_frequent:
key = OauthSecretKey.objects.get(id=key_instance['id'])
key.save()
max_date = datetime.datetime.now()
while max_date.weekday() == 5 or max_date.weekday() == 6 or max_date.strftime(#周末和节假日跳过
'%Y-%m-%d') in holidays:
max_date = max_date + datetime.timedelta(days=-1)
if max_date.date() <= min_date.date():
max_date = min_date
dt = max_date
date_value = dt.year * 10000 + dt.month * 100 + dt.day
log = OauthQueryLog(uid=user, type=0, code=code, date=date_value,
query_time=datetime.datetime.now(), ip=ip)
log.save()#写入日志
date_str = str(dt.year) + '年' + str(dt.month) + '月' + str(dt.day) + '日'
queryres = OauthTodayData.objects.all().values().filter(code=code, date=20230505)#测试数据为20230505的数据,正式使用时修改 改为date_value则查询当日数据,若表只存一天的数据,可只根据code查询
if len(queryres) != 0:#未查询到数据不减扣次数
user.times -= 1
user.save()
time_list = []
index = 0
flag = False
index920 = 0
for item in queryres:
date = item['date']
time = item['time']
year = int(date / 10000)
month = int((date - year * 10000) / 100)
day = date % 100
hour = int(time / 10000)
minute = int((time - hour * 10000) / 100)
sec = time % 100
if hour == 9 and minute >= 20 and flag is False:
index920 = index
flag = True
dt = datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute, second=sec)
time_list.append(str(dt))
index = index + 1
context = {
'queryset': queryres,
'timelist': time_list,
'code': code,
'date_str': date_str,
'index_920': index920,
}
return render(request, "usr_echart_today.html", context)
else:#异常状态处理同复盘数据处理
err_data['error'] = '密钥状态异常'
return render(request, "key_error.html", err_data)
elif key_instance['status'] == 0 or key_instance['status'] == 2:
key = OauthSecretKey.objects.get(id=key_instance['id'])
key.status = 2
key.save()
err_data['error'] = 'ip异常,密钥已被锁定'
elif key_instance['status'] == 1 or key_instance['status'] == 4:
key = OauthSecretKey.objects.get(id=key_instance['id'])
key.status = 4
key.save()
err_data['error'] = 'ip异常,密钥已被锁定'
else:
err_data['error'] = '密钥状态异常'
else:
err_data['error'] = '密钥不存在'
return render(request, "key_error.html", err_data)
| lff12876/DjangoWeb2023 | oAuth/views.py | views.py | py | 31,531 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "oAuth.models.stockdata20204",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "oAuth.models.stockdata20211",
"line_number": 36,
"usage_type": "name"
},
{
"api_nam... |
19123385662 | #!/usr/bin/env Python3
# Web scraping script for fun
import requests
from bs4 import BeautifulSoup
def scrape_website(url):
response = requests.get(url)
if response.status_code == 200:
soup = BeautifulSoup(response.content, "html.parser")
links = soup.find_all("a")
for link in links:
print(link.get("href"))
else:
print("Failed to retrieve the web page.")
def main():
url = input("Enter the URL of the website you want to scrape: ")
scrape_website(url)
if __name__ == "__main__":
main() | Caedesium/PythonPractice | Scripts1.0/webscraper1.py | webscraper1.py | py | 561 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 12,
"usage_type": "call"
}
] |
13498558067 | import pyttsx3 #pip install pyttsx3
import speech_recognition as sr #pip install speechRecognition
import datetime
import wikipedia #pip install wikipedia
import webbrowser
import os
import smtplib
import cv2
from time import ctime
import time
from requests import get
import sys
import pywhatkit as kit
from googletrans import Translator
from pytube import YouTube
import pyjokes
import phonenumbers
from phonenumbers import timezone
from phonenumbers import carrier
from phonenumbers import geocoder
import speedtest
from bs4 import BeautifulSoup
import PyPDF2
import psutil
import pyautogui
from pywikihow import search_wikihow
import instaloader
import requests
#from dictionary import translate
#from news import speak_news, getNewsUrl
from PIL import Image
# importing the module
from englisttohindi.englisttohindi import EngtoHindi
#import COVID19Py
print("""
--------------------------------------------------------------------------
| __ ______ ______ __ __ _ _____ |
| | | | __ | | __ \ \ \ / / | | / ____] |
| | | | |__| | | | | | \ \ / / | | / / |
| | | | __ | | '--'_/ \ \ / / | | \ \____ |
| __ | | | | | | | _ \ \ \ / / | | \___ \ |
| \ |___| | | | | | | | \ \ \ \_/ / | | ____} | |
| \_______/ |_| |_| |_| \__\ \___/ |_| \_____/ |
| |
--------------------------------------------------------------------------
""")
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
# print(voices[0].id) for boy voice
engine.setProperty('voice', voices[1].id)
def silenceTime(query):
print(query)
x=0
#caliculating the given time to seconds from the speech commnd string
if ('10' in query) or ('ten' in query):x=600
elif '1' in query or ('one' in query):x=60
elif '2' in query or ('two' in query):x=120
elif '3' in query or ('three' in query):x=180
elif '4' in query or ('four' in query):x=240
elif '5' in query or ('five' in query):x=300
elif '6' in query or ('six' in query):x=360
elif '7' in query or ('seven' in query):x=420
elif '8' in query or ('eight' in query):x=480
elif '9' in query or ('nine' in query):x=540
silence(x)
#Silence
def silence(k):
t = k
s = "Ok okay I will be silent for "+str(t/60)+" minutes"
speak(s)
while t:
mins, secs = divmod(t, 60)
timer = '{:02d}:{:02d}'.format(mins, secs)
print(timer, end="\r")
time.sleep(1)
t -= 1
speak("okay "+str(k/60)+" minutes over")
def condition():
usage = str(psutil.cpu_percent())
speak("CPU is at"+usage+" percentage")
print("CPU is at"+ usage +" percentage")
battray = psutil.sensors_battery()
percentage = battray.percent
print(percentage)
speak(f"Boss our system have {percentage} percentage Battery")
if percentage >=75:
speak(f"Boss we could have enough charging to continue our work")
print(f"Boss we could have enough charging to continue our work")
elif percentage >=40 and percentage <=75:
speak(f"Boss we should connect out system to charging point to charge our battery")
print(f"Boss we should connect out system to charging point to charge our battery")
elif percentage >=15 and percentage <=30:
speak(f"Boss we don't have enough power to work, please connect to charging")
print(f"Boss we don't have enough power to work, please connect to charging")
else:
speak(f"Boss we have very low power, please connect to charging otherwise the system will shutdown very soon")
print(f"Boss we have very low power, please connect to charging otherwise the system will shutdown very soon")
def pdf_reader():
speak("okay enter the name of the book which you want to read")
n = input("Enter the book name: ")
n = n.strip()+".pdf"
book_n = open(n,'rb')
pdfReader = PyPDF2.PdfFileReader(book_n)
pages = pdfReader.numPages
speak(f"okay there are total of {pages} in this book")
speak("please enter the page number Which I nead to read")
num = int(input("Enter the page number: "))
page = pdfReader.getPage(num)
text = page.extractText()
print(text)
speak(text)
def news():
MAIN_URL_= "https://newsapi.org/v2/top-headlines?sources=techcrunch&apiKey=YOUR_NEWS_API_KEY"
MAIN_PAGE_ = get(MAIN_URL_).json()
articles = MAIN_PAGE_["articles"]
headings=[]
seq = ['first','second','third','fourth','fifth','sixth','seventh','eighth','ninth','tenth'] #If you need more than ten you can extend it in the list
for ar in articles:
headings.append(ar['title'])
for i in range(len(seq)):
print(f"todays {seq[i]} news is: {headings[i]}")
speak(f"todays {seq[i]} news is: {headings[i]}")
speak("Boss I am done, I have read most of the latest news")
def scshot():
#speak("Boss, please tell me the name for this screenshot file")
name = takeCommand()
speak("Please boss hold the screen for few seconds, I am taking screenshot")
time.sleep(3)
myScreenshot = pyautogui.screenshot()
myScreenshot.save(r'C:\Users\ruchi\OneDrive\Desktop\screenshot1.png')
speak("I am done boss, the screenshot is saved in main folder.")
def Instagram_Pro():
speak("Boss please enter the user name of Instagram: ")
name = input("Enter username here: ")
webbrowser.open(f"www.instagram.com/{name}")
time.sleep(5)
speak("Boss would you like to download the profile picture of this account.")
cond = takeCommand()
if('download' in cond):
mod = instaloader.Instaloader()
mod.download_profile(name,profile_pic_only=True)
speak("I am done boss, profile picture is saved in your main folder. ")
else:
pass
def Cal_day():
day = datetime.datetime.today().weekday() + 1
Day_dict = {1: 'Monday', 2: 'Tuesday', 3: 'Wednesday',4: 'Thursday', 5: 'Friday', 6: 'Saturday',7: 'Sunday'}
if day in Day_dict.keys():
day_of_the_week = Day_dict[day]
print(day_of_the_week)
return day_of_the_week
#Search for a process how to do
def How():
speak("How to do mode is is activated")
while True:
speak("Please tell me what you want to know")
how = takeCommand()
try:
if ("exit" in how) or("close" in how):
speak("Ok sir how to mode is closed")
break
else:
max_result=1
how_to = search_wikihow(how,max_result)
assert len(how_to) == 1
how_to[0].print()
speak(how_to[0].summary)
except Exception as e:
speak("Sorry sir, I am not able to find this")
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning!")
elif hour>=12 and hour<18:
speak("Good Afternoon!")
else:
speak("Good Evening!")
speak("I am Jarvis Sir. Please tell me how may I help you")
def takeCommand():
#It takes microphone input from the user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source,duration=1)
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source , phrase_time_limit=4)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
except Exception as e:
# print(e)
speak("Say that again please...")
return "None"
return query
def InternetSpeed():
speak("Wait a few seconds okay, checking your internet speed")
st = speedtest.Speedtest()
dl = st.download()
dl = dl/(1000000) #converting bytes to megabytes
up = st.upload()
up = up/(1000000)
print(dl,up)
speak(f"okay, we have {dl} megabytes per second downloading speed and {up} megabytes per second uploading speed")
def verifyMail():
try:
speak("what should I say?")
content = takeCommand()
speak("To whom do u want to send the email?")
to = takeCommand()
sendEmail(to,content)
speak("Email has been sent to "+str(to))
except Exception as e:
print(e)
speak("Sorry sir I am not not able to send this email")
def sendEmail(to,content):
server=smtplib.SMTP("smtp.gmail.com",587)
server.ehlo()
server.starttls()
server.login("your@gmail.com","yourpassword")
server.sendmail("sender@gmail.com",to,content)
server.close()
def whatsapp():
speak("tell me the name of the person!")
name=takeCommand()
if 'bro' in name:
speak("tell me the message!")
msg=takeCommand()
speak("tell me the time!")
speak("time in hour !")
hour = int(takeCommand())
speak("time in minute !")
minute = int(takeCommand())
kit.sendwhatmsg("+919876543210",msg,hour,minute,20)
speak("ok ,sending whatsapp message!")
elif 'papa' in name:
speak("tell me the message!")
msg=takeCommand()
speak("tell me the time!")
speak("time in hour !")
hour = int(takeCommand())
speak("time in minute !")
minute = int(takeCommand())
kit.sendwhatmsg("+919876543210",msg,hour,minute,20)
speak("ok ,sending whatsapp message!")
else:
speak("tell me the number !")
phonenumber=int(takeCommand())
ph='+91'+phonenumber
speak("tell me the message!")
msg=takeCommand()
speak("tell me the time!")
speak("time in hour !")
hour = int(takeCommand())
speak("time in minute !")
minute = int(takeCommand())
kit.sendwhatmsg("+919876543210",msg,hour,minute,20)
speak("ok ,sending whatsapp message!")
def track():
ro_number = phonenumbers.parse("+919876543210", None)
print(geocoder.description_for_number(ro_number, "en"))
print(carrier.name_for_number(ro_number, "en"))
print(timezone.time_zones_for_number(ro_number))
def temperature():
IP_Address = get('https://api.ipify.org').text
url = 'https://get.geojs.io/v1/ip/geo/'+IP_Address+'.json'
geo_reqeust = get(url)
geo_data = geo_reqeust.json()
city = geo_data['city']
search = f"temperature in {city}"
url_1 = f"https://www.google.com/search?q={search}"
r = get(url_1)
data = BeautifulSoup(r.text,"html.parser")
temp = data.find("div",class_="BNeawe").text
speak(f"current {search} is {temp}")
def location():
speak("Wait boss, let me check")
try:
IP_Address = get('https://api.ipify.org').text
print(IP_Address)
url = 'https://get.geojs.io/v1/ip/geo/'+IP_Address+'.json'
print(url)
geo_reqeust = get(url)
geo_data = geo_reqeust.json()
city = geo_data['city']
state = geo_data['region']
country = geo_data['country']
tZ = geo_data['timezone']
longitude = geo_data['longitude']
latidute = geo_data['latitude']
org = geo_data['organization_name']
print(city+" "+state+" "+country+" "+tZ+" "+longitude+" "+latidute+" "+org)
speak(f"Boss i am not sure, but i think we are in {city} city of {state} state of {country} country")
speak(f"and boss, we are in {tZ} timezone the latitude os our location is {latidute}, and the longitude of our location is {longitude}, and we are using {org}\'s network ")
except Exception as e:
speak("Sorry boss, due to network issue i am not able to find where we are.")
pass
if __name__ == "__main__":
wishMe()
while True:
# command = takeCommand() #Every time taking command after a task is done
# print(command)
# if 1:
query = takeCommand().lower()
# Logic for executing tasks based on query
if ('wikipedia' in query) or ('open wikipedia'):
speak('Searching Wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia")
print(results)
speak(results)
elif ('open youtube' in query) or ('youtube' in query):
speak("opening the youtube !")
webbrowser.open("youtube.com")
elif ('open google' in query) or ('google' in query):
speak("opening the google !")
speak("sir,what should i search in google")
cm=takeCommand().lower()
webbrowser.open(f"{cm}")
elif ('open stackoverflow' in query) or ('stackoverflow' in query):
webbrowser.open("stackoverflow.com")
elif ('play music' in query) or ('music' in query):
speak("dude! music is playing.... !")
music_dir = 'C:\\Music'
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir, songs[0]))
elif ('the time' in query) or ('time' in query):
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"Sir, the time is {strTime}")
elif ('open notepad' in query) or ('notepad' in query):
speak("opening the notepad !")
npath="C:\\Windows\\system32\\notepad.exe"
os.startfile(npath)
elif ('open command prompt' in query) or ('command prompt' in query):
speak("opening the Command prompt !")
os.system("start cmd")
elif ('open camera' in query) or ('camera' in query):
speak('opening the camera')
speak('press space to capture the picture')
speak('press esc to close the camera')
# press esc to close & space to click photo at current location
cam = cv2.VideoCapture(0)
cv2.namedWindow("test")
img_counter = 0
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
cv2.imshow("test", frame)
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
# SPACE pressed
img_name = "opencv_frame_{}.png".format(img_counter)
cv2.imwrite(img_name, frame)
print("{} written!".format(img_name))
img_counter += 1
cam.release()
cv2.destroyAllWindows()
elif ('how are you' in query) or('how you being' in query) :
speak("I am fine")
elif ('hi'in query) or('hai'in query) or ('hey'in query) or ('hello' in query) :
speak("Hello okay what can I help for u")
elif ('ip address' in query) or ('ip' in query):
ip = get('https://api.ipify.org').text
print(f"your IP address is {ip}")
speak(f"your IP address is {ip}")
elif ('open facebook' in query) or ('facebook' in query):
speak("opening the Facebook !")
webbrowser.open("www.facebook.com")
elif ('send message' in query) or ('whatsapp' in query) or ('send message to vivek' in query):
kit.sendwhatmsg("+919876543210", "this is for testing",10,50)
elif ('play songs on youtube' in query) or ('fav song on youtube' in query):
kit.playonyt("kya mujhe pyar hai")
elif ('translator' in query) or ('in hindi' in query) or('speak in hindi' in query):
tran()
elif ('microsoft teams' in query) or ('teams' in query):
speak("opening the Microsoft teams !")
teampath="C:\\Users\\ruchi\\AppData\\Local\\Microsoft\\Teams\\Update"
os.startfile(teampath)
elif ('word' in query) or ('Microsoft word' in query) or ('open word' in query):
speak("opening the Microsoft word !")
wordpath="C:\\Program Files\\Microsoft Office\\root\\Office16\\WINWORD"
os.startfile(wordpath)
elif ('powerpoint' in query) or ('microsoft powerpoint' in query ) or ('open powerpoint' in query):
speak("opening the Microsoft powerpoint !")
pointpath="C:\\Program Files\\Microsoft Office\\root\\Office16\\POWERPNT"
os.startfile(pointpath)
elif ('code blocks' in query) or ('open codeblocks' in query) or ('codeblocks' in query):
speak("opening the codeblocks !")
blockpath="C:\\Program Files\\CodeBlocks\\codeblocks"
os.startfile(blockpath)
elif ('visual studio' in query)or('open studio' in query) or ('open visual studio' in query):
speak("opening the Visualstudio !")
studiopath="C:\\Users\\ruchi\\AppData\\Local\\Programs\\Microsoft VS Code\\Code"
os.startfile(studiopath)
elif ('anaconda' in query) or ('conda' in query):
speak("opening the Anaconda !")
condapath="C:\\OneDriveTemp\\Anaconda\\pythonw"
os.startfile(condapath)
elif ('calculator' in query) or ('calc' in query) or ('cal' in query):
speak("opening the calculator !")
calpath="C:\\Windows\\System32\\calc.exe"
os.startfile(calpath)
elif 'email to vivek' in query:
try:
speak("What should I say?")
content = takeCommand().lower()
to = "your@gmail.com"
sendEmail(to, content)
except Exception as e:
print(e)
speak("Sorry ,my friend . I am not able to send this email")
elif ('hotstar' in query) or ('open hotstar' in query):
speak('opening your disney plus hotstar')
webbrowser.open('https://www.hotstar.com/in')
elif ('prime' in query) or ('open prime' in query):
speak('opening your amazon prime videos')
webbrowser.open('https://www.primevideo.com/')
elif ('netflix' in query) or ('open netflix' in query):
speak('opening Netflix videos')
webbrowser.open('https://www.netflix.com/')
elif 'close notepad' in query:
speak("okay , closeing notepad!")
os.system("taskkill /f /im notepad.exe")
elif ('slides' in query) or ('open slides' in query):
speak('opening your google slides')
webbrowser.open('https://docs.google.com/presentation/')
elif ('canva' in query) or ('open canva' in query):
speak('opening your canva')
webbrowser.open('https://www.canva.com/')
elif ('github' in query) or ('open github' in query):
speak('opening your github')
webbrowser.open('https://github.com/')
elif ('gitlab' in query) or ('open gitlab' in query):
speak('opening your gitlab')
webbrowser.open('https://gitlab.com/-/profile')
elif ('play youtube' in query) or ('any song' in query) or ('search in youtube' in query):
speak("okay can you please say what to search in youtube")
search =takeCommand()
if "play" in search:
search = search.replace("play","")
speak('playing '+search)
print(f'playing {search}')
kit.playonyt(search)
print('playing')
elif "download youtube video" in query:
speak("okay please enter the youtube video link which you want to download")
link = input("Enter the YOUTUBE video link: ")
yt=YouTube(link)
yt.streams.get_highest_resolution().download()
speak(f"okay downloaded {yt.title} from the link you given into the main folder")
elif ('edge' in query) or ('open edge' in query):
speak('opening your Miscrosoft edge')
os.startfile('..\\..\\MicrosoftEdge.exe')
elif ('flipkart' in query) or ('open flipkart' in query):
speak('Opening flipkart online shopping website')
webbrowser.open("https://www.flipkart.com/")
elif ('amazon' in query) or ('open amazon' in query):
speak('Opening amazon online shopping website')
webbrowser.open("https://www.amazon.in/")
elif ('temperature' in query) or ('today temperature' in query):
speak('the temperature is ')
temperature()
elif ('today schedule' in query) or ('schedule' in query):
day = Cal_day().lower()
speak("okay today's shedule is")
Week = {"monday" : "okay from 9:00 to 9:50 you have python class, from 10:00 to 11:50 you have data structure class, from 12:00 to 2:00 you have brake, and today you have sensors lab from 2:00",
"tuesday" : "okay from 9:00 to 9:50 you have system analysis class, from 10:00 to 10:50 you have break,from 11:00 to 12:50 you have networking class, from 1:00 to 2:00 you have brake, and today you have python lab from 2:00",
"wednesday" : "okay today you have a full day of classes from 9:00 to 10:50 you have Data structures class, from 11:00 to 11:50 you have advance database class, from 12:00 to 12:50 you have data structure class, from 1:00 to 2:00 you have brake, and today you have Data structures lab from 2:00",
"thrusday" : "okay today you have a full day of classes from 9:00 to 10:50 you have Maths class, from 11:00 to 12:50 you have networking class, from 1:00 to 2:00 you have brake, and today you have advance database lab from 2:00",
"friday" : "okay today you have a full day of classes from 9:00 to 9:50 you have Biology class, from 10:00 to 10:50 you have data structures class, from 11:00 to 12:50 you have Elements of computing class, from 1:00 to 2:00 you have brake, and today you have Electronics lab from 2:00",
"saturday" : "okay today you have a full day of classes from 9:00 to 11:50 you have maths lab, from 12:00 to 12:50 you have english class, from 1:00 to 2:00 you have brake, and today you have elements of computing lab from 2:00",
"sunday":"okay today is holiday but we can't say anything when they will bomb with any assisgnments"}
if day in Week.keys():
speak(Week[day])
elif 'your name' in query:
speak("My name is jarvis")
elif 'my name' in query:
speak("your name is Ruchi")
elif 'university name' in query:
speak("you are studing in charusat university, pursuing bachelors in computer application ")
elif 'what can you do' in query:
speak("I speak with you until you want to stop, I can say time, open your social media accounts,your open source accounts, open google browser,and I can also open your college websites, I can search for some thing in google and I can tell jokes")
elif 'your age' in query:
speak("I am very young that u")
elif 'date' in query:
speak('Sorry not intreseted, I am having headache, we will catch up some other time')
elif 'are you single' in query:
speak('No, I am in a relationship with wifi')
elif 'joke' in query:
speak(pyjokes.get_joke())
elif 'are you there' in query:
speak('Yes , I am here')
elif 'gmail' in query:
speak('opening your google gmail')
webbrowser.open('https://mail.google.com/mail/')
elif ' google maps' in query:
speak('opening google maps')
webbrowser.open('https://www.google.co.in/maps/')
elif 'google news' in query:
speak('opening google news')
webbrowser.open('https://news.google.com/')
elif 'google calender' in query:
speak('opening google calender')
webbrowser.open('https://calendar.google.com/calendar/')
elif 'google photos' in query:
speak('opening your google photos')
webbrowser.open('https://photos.google.com/')
elif 'google documents' in query:
speak('opening your google documents')
webbrowser.open('https://docs.google.com/document/')
elif 'google spreadsheet' in query:
speak('opening your google spreadsheet')
webbrowser.open('https://docs.google.com/spreadsheets/')
elif 'close calculator' in query:
speak("okay , closeing calculator!")
os.system("taskkill /f /im calc.exe")
elif 'close powerpoint' in query:
speak("okay , closeing powerpoint!")
os.system("taskkill /f /im POWERPNT")
elif 'close word' in query:
speak("okay , closeing word!")
os.system("taskkill /f /im WINWORD")
elif 'close microsoft teams' in query:
speak("okay , closeing Microsoft teams!")
os.system("taskkill /f /im Update")
elif 'close visual studio' in query:
speak("okay , closeing visual studio!")
os.system("taskkill /f /im Code")
elif 'close anaconda' in query:
speak("okay , closeing anaconda!")
os.system("taskkill /f /im pythonw")
elif ('shutdown the system' in query) or ('down the system' in query):
speak("okay shutting down the system in 10 seconds")
time.sleep(10)
os.system("shutdown /s /t 5")
elif ("you can sleep" in query) or ("sleep now" in query):
speak("Okay okay, I am going to sleep you can call me anytime.")
break
#command for waking the jarvis from sleep
#jarvis wake up
elif ("wake up" in query) or ("get up" in query):
speak("okay, I am not sleeping, I am in online, what can I do for u")
elif 'send email' in query:
verifyMail()
#command for checking the temperature in surroundings
#jarvis check the surroundings temperature
elif "temperature" in query:
temperature()
#command for checking internet speed
#Eg: jarvis check my internet speed
elif "internet speed" in query:
InternetSpeed()
#command if you don't want the JARVIS to spack until for a certain time
#Note: I can be silent for max of 10mins
# Eg: JARVIS keep quiet for 5 minutes
elif ('silence' in query) or ('silent' in query) or ('keep quiet' in query) or ('wait for' in query) :
silenceTime(query)
#command for restarting the system
#Eg: jarvis restart the system
elif ('restart the system' in query) or ('restart' in query):
speak("Boss restarting the system in 10 seconds")
time.sleep(10)
os.system("shutdown /r /t 5")
#command for make the system sleep
#Eg: jarvis sleep the system
elif ('sleep the system' in query) or ('sleep' in query):
speak("Boss the system is going to sleep")
os.system("rundll32.exe powrprof.dll, SetSuspendState 0,1,0")
#Command for reading PDF
#EG: Jarvis read pdf
elif ("read pdf" in query) or ("pdf" in query):
pdf_reader()
elif ('system condition' in query) or ('condition of the system' in query):
speak("checking the system condition")
condition()
elif ('take screenshot' in query)or ('screenshot' in query) or("take a screenshot" in query):
scshot()
elif ("volume mute" in query) or ("mute the sound" in query) :
speak('muting the sound')
pyautogui.press("volumemute")
speak('volume muted')
elif ("volume down" in query) or ("decrease volume" in query):
pyautogui.press("volumedown")
speak('volume decreased')
#command for searching for a procedure how to do something
#Eg:jarvis activate mod
# jarvis How to make a cake (or) jarvis how to convert int to string in programming
elif "activate mod" in query:
How()
#command for increaing the volume in the system
#Eg: jarvis increase volume
elif ("volume up" in query) or ("increase volume" in query):
pyautogui.press("volumeup")
speak('volume increased')
elif ('where i am' in query) or ('where we are' in query):
location()
elif ('yes' in query) or ('yup' in query) or ('yeah' in query):
speak("ok friend what should i do")
elif ('tell me news' in query) or ("the news" in query) or ("todays news" in query):
speak("Please wait boss, featching the latest news")
news()
#Command for opening an instagram profile and downloading the profile pictures of the profile
#Eg: jarvis open a profile on instagram
elif ('instagram profile' in query) or("profile on instagram" in query):
Instagram_Pro()
elif ('instagram' in query) or ('open instagram' in query):
speak('opening your instagram')
webbrowser.open('https://www.instagram.com/')
elif ('twitter' in query) or ('open twitter' in query):
speak('opening your twitter')
webbrowser.open('https://twitter.com/')
elif ('discord' in query) or ('open discord' in query):
speak('opening your discord')
webbrowser.open('https://discord.com/channels/@me')
elif 'it\'s my birthday today' in query:
print(" Wow! Wish you a very Happy Birthday")
speak(" Wow! Wish you a very Happy Birthday")
elif ("where is" in query) or ('current location' in query):
data = query.split(" ")
location = data[2]
speak("Hold on, I will show you where " + location + " is.")
os.system('cmd /k "start chrome https://www.google.nl/maps/place/"'+ location)
elif 'remember that' in query:
speak("what should i remember sir")
rememberMessage = takeCommand()
speak("you said me to remember"+rememberMessage)
remember = open('data.txt', 'w')
remember.write(rememberMessage)
remember.close()
elif ('do you remember anything' in query) or ('remember anything' in query):
remember = open('data.txt', 'r')
speak("you said me to remember that" + remember.read())
elif ("switch the window" in query) or ("switch window" in query) or ('window' in query):
speak("Okay sir, Switching the window")
pyautogui.keyDown("alt")
pyautogui.press("tab")
time.sleep(1)
pyautogui.keyUp("alt")
elif ("track" in query) or ("track a mobile number" in query):
track()
elif ('no thanks' in query) or ('no' in query):
speak("thank you! sir goodbyie have a nice day")
sys.exit()
elif ("goodbye" in query) or ("get lost" in query) or ('by' in query) or ('talk to you later' in query):
speak("Thanks for using me , have a good day")
sys.exit()
speak("sir,do you want any other work")
| ruchiparmar7/jarvis | jarvis.py | jarvis.py | py | 32,964 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pyttsx3.init",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "psutil.cpu_percent",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "psutil.sensors_battery",
... |
10589524750 | from pathlib import Path
from enum import Enum
DEFAULT_FILE_CONTENT_VALUE = ""
DEFAULT_CURRENT_LINE_CONTENT_VALUE = []
PARAMETER_OPEN = "<"
PARAMETER_CLOSE = ">"
START_KEYWORD = "$"
KEYWORDS_TABLE = \
(
"$CREATE_FILE",
"$LINK",
"$FILE",
"$TEXT",
"$TARGET"
)
class KEYWORDS_ENUM(Enum):
CREATE_FILE = 0
LINK = 1
FILE = 2
TEXT = 3
TARGET = 4
class INVALID_KEYWORD(Exception):
ErrorMessage = ""
def __init__(self, LineID, Keyword):
self.ErrorMessage = 'Invalid keyword "{}" in line {}'.format(Keyword, LineID)
class MISSING_SYNTAX(Exception):
ErrorMessage = ""
def __init__(self, LineID, Syntax):
self.ErrorMessage = 'Missing "{}" in line {}'.format(Syntax, LineID)
class INVALID_ARGUMENT(Exception):
ErrorMessage = ""
def __init__(self, LineID, Argument):
self.ErrorMessage = 'Invalid argument "{}" in line {}'.format(Argument, LineID)
class INVALID_SYNTAX(Exception):
ErrorMessage = ""
def __init__(self, LineID, Syntax):
self.ErrorMessage = 'Invalid syntax "{}" in line {}'.format(Syntax, LineID)
def CheckIsKeyword(Word, LineID):
if Word[0] != START_KEYWORD:
raise MISSING_SYNTAX(LineID, START_KEYWORD)
i = 0
for Keyword in KEYWORDS_TABLE:
if Word == Keyword:
return KEYWORDS_ENUM(i)
i += 1
raise INVALID_KEYWORD(LineID, Word)
def CheckParameterCorrectness(Parameter, LineID, MinimumLength):
if len(Parameter) < MinimumLength:
raise INVALID_ARGUMENT(LineID, Parameter)
if Parameter[0] != PARAMETER_OPEN:
raise MISSING_SYNTAX(LineID, PARAMETER_OPEN)
elif Parameter[len(Parameter) - 1] != PARAMETER_CLOSE:
raise MISSING_SYNTAX(LineID, PARAMETER_CLOSE)
def ReadParameter(Parameter):
Argument = Parameter[1: len(Parameter) - 1]
return Argument
class Parser:
FileContent = DEFAULT_FILE_CONTENT_VALUE
CurrentLine = DEFAULT_CURRENT_LINE_CONTENT_VALUE
CurrentLineID = 0
CurrentSeek = 0
OpenedScope = False
def __init__(self, File):
self.FileContent = Path(File).read_text()
def ReadLine(self):
self.CurrentLine.clear()
LocalLine = ""
for i in range(self.CurrentSeek, len(self.FileContent)):
if self.FileContent[i] == '\n':
self.CurrentLineID = self.CurrentLineID + 1
i += 1
LocalLine = self.FileContent[self.CurrentSeek: i]
self.CurrentSeek = i
break
if not len(LocalLine):
return False
FirstWordCharacter = 0
for i in range(FirstWordCharacter, len(LocalLine)):
if LocalLine[i] == PARAMETER_OPEN:
self.OpenedScope = True
elif LocalLine[i] == PARAMETER_CLOSE:
self.OpenedScope = False
if self.OpenedScope:
continue
if LocalLine[i] == ' ' or LocalLine[i] == '\n':
LocalWord = LocalLine[FirstWordCharacter: i]
LocalWord.replace('\n', '')
LocalWord.replace(' ', '')
if len(LocalWord) >= 1:
self.CurrentLine.append(LocalWord)
i += 1
FirstWordCharacter = i
return True
def ControlBadSyntax(self, RangeEnd, Threshold):
EndOfRange = RangeEnd if RangeEnd < len(self.CurrentLine) and RangeEnd != 0 else len(self.CurrentLine)
if len(self.CurrentLine) > Threshold:
CollectedSyntax = ""
for i in range(Threshold, EndOfRange):
CollectedSyntax += self.CurrentLine[i]
raise INVALID_SYNTAX(self.CurrentLineID, CollectedSyntax)
def InterpretLine(self):
Command = []
if len(self.CurrentLine) <= 1:
return Command
elif self.CurrentLine[0][0] == "#":
return Command
Keyword = CheckIsKeyword(self.CurrentLine[0], self.CurrentLineID)
Command.append(Keyword)
if Keyword == KEYWORDS_ENUM.CREATE_FILE:
CheckParameterCorrectness(self.CurrentLine[1], self.CurrentLineID, 3)
Command.append(ReadParameter(self.CurrentLine[1]))
self.ControlBadSyntax(0, 2)
elif Keyword == KEYWORDS_ENUM.TARGET:
CheckParameterCorrectness(self.CurrentLine[1], self.CurrentLineID, 3)
Command.append(ReadParameter(self.CurrentLine[1]))
self.ControlBadSyntax(0, 2)
elif Keyword == KEYWORDS_ENUM.LINK:
LKeyword = CheckIsKeyword(self.CurrentLine[1], self.CurrentLineID)
Command.append(LKeyword)
if LKeyword == KEYWORDS_ENUM.FILE:
CheckParameterCorrectness(self.CurrentLine[2], self.CurrentLineID, 3)
Command.append(ReadParameter(self.CurrentLine[2]))
elif LKeyword == KEYWORDS_ENUM.TEXT:
CheckParameterCorrectness(self.CurrentLine[2], self.CurrentLineID, 2)
Command.append(ReadParameter(self.CurrentLine[2]))
self.ControlBadSyntax(0, 3)
return Command
| Cijei03/TextFilesMerger | Parser.py | Parser.py | py | 5,126 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 91,
"usage_type": "call"
}
] |
40152665258 | """Edit a 'rotomap' series of images.
In all modes:
Press 'q' to quit.
Press 'Q' to quit with exit code 1.
Press left for previous image, right for next image.
Press up for previous map, down for next map.
Ctrl-click on a point to zoom in on it.
Press 'z' or 'x' to adjust the zoom level.
Press space to restore original zoom.
Mode selection:
Press '1' for mole edit mode (the starting mode).
Press '2' for mask edit mode.
Press '3' for bounding area mode.
Press '4' for mole marking mode.
Press '0' for auto-mole debug mode.
In 'mole edit' mode:
Click on a point to add or move a mole there and save.
Shift-click on a point to delete it.
Shift-right-click on a point to randomize the uuid.
Alt-Shift-click on a point to copy it's uuid.
Also, press 'end' or '+' when over a point to copy it's uuid.
Alt-Shift-right-click over a point to make it canonical.
Alt-click on a point to paste the copied uuid.
Press 'o' to toggle follow mode.
Press 'm' to toggle move mode.
Press 'i' to 'rotomap identify' in the current image.
Press 'c' to to confirm all moles in the current image.
Press enter to toggle mole markers.
In 'mask edit' mode:
Click on a point to draw masking there.
Shift-click on a point to remove masking there.
Press '<' to decrease the size of the mask tool.
Press '>' to increase the size of the mask tool.
Press '.' to reset the size of the mask tool to the default.
In 'mole marking' mode:
Click on a point to add or move a mole there and save.
Shift-click on a point to delete it.
Press 'a' to accentuate marked moles, for considering removal.
"""
import argparse
import os.path
import numpy
import mel.lib.common
import mel.lib.fs
import mel.lib.fullscreenui
import mel.lib.image
import mel.lib.math
import mel.lib.ui
import mel.rotomap.display
import mel.rotomap.mask
import mel.rotomap.moles
import mel.rotomap.relate
# Radius within which we should look for moles, in later work perhaps we'll
# make this configurable by the user.
_MAGIC_MOLE_FINDER_RADIUS = 50
def setup_parser(parser):
parser.add_argument(
"ROTOMAP",
type=mel.rotomap.moles.make_argparse_rotomap_directory,
nargs="+",
help="A list of paths to rotomaps.",
)
parser.add_argument(
"--follow",
type=str,
default=None,
help="UUID of a mole to follow, try to jump to it in the first set.",
)
parser.add_argument(
"--copy-to-clipboard",
action="store_true",
help="Copy UUID to the clipboard, as well as printing. Mac OSX only.",
)
parser.add_argument(
"--advance-n-frames",
"--skip",
type=int,
metavar="N",
default=None,
help="Start with the image with the specified index, instead of 0.",
)
parser.add_argument(
"--visit-list-file",
type=argparse.FileType(),
metavar="PATH",
help=(
"Use keys to jump through this list of this form: "
"'path/to/jpg:hash:optional co-ords'."
),
)
class MoveController:
def __init__(self):
self.status = "Move mode"
def on_lbutton_down_noflags(self, editor, mouse_x, mouse_y):
editor.move_nearest_mole(mouse_x, mouse_y)
return True
def on_key(self, editor, key):
pass
class FollowController:
def __init__(self, editor, follow, mole_uuid_list):
self.mole_uuid_list = mole_uuid_list
if follow:
self.mole_uuid_list[0] = follow
editor.skip_to_mole(self.mole_uuid_list[0])
editor.follow(self.mole_uuid_list[0])
self.is_paste_mode = False
self.update_status()
def on_lbutton_down_noflags(self, editor, mouse_x, mouse_y):
editor.crud_mole(self.mole_uuid_list[0], mouse_x, mouse_y)
editor.follow(self.mole_uuid_list[0])
return True
def pre_key(self, editor, key):
self._prev_moles = editor.moledata.moles
def on_key(self, editor, key):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
arrows = [
pygame.K_UP,
pygame.K_DOWN,
pygame.K_LEFT,
pygame.K_RIGHT,
]
if key in arrows:
update_follow(
editor,
self.mole_uuid_list[0],
self._prev_moles,
self.is_paste_mode,
)
elif key == pygame.K_p:
self.is_paste_mode = not self.is_paste_mode
self.update_status()
editor.set_status(self.status)
editor.show_current()
def update_status(self):
if self.is_paste_mode:
self.status = "follow paste mode"
else:
self.status = "follow mode"
class MoleEditController:
def __init__(self, editor, follow, copy_to_clipboard):
self.mole_uuid_list = [None]
self.follow_controller = FollowController(
editor, follow, self.mole_uuid_list
)
self.move_controller = MoveController()
self.sub_controller = None
self.mouse_x = 0
self.mouse_y = 0
self.copy_to_clipboard = copy_to_clipboard
def on_mouse_event(self, editor, event):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
self.mouse_x, self.mouse_y = pygame.mouse.get_pos()
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
self.on_lbutton_down(editor, self.mouse_x, self.mouse_y)
elif event.button == 3:
self.on_rbutton_down(editor, self.mouse_x, self.mouse_y)
def on_lbutton_down(self, editor, mouse_x, mouse_y):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
key_mods = pygame.key.get_mods()
if key_mods & pygame.KMOD_ALT:
if key_mods & pygame.KMOD_SHIFT:
self.mole_uuid_list[0] = editor.get_mole_uuid(mouse_x, mouse_y)
print(self.mole_uuid_list[0])
if self.copy_to_clipboard:
mel.lib.ui.set_clipboard_contents(self.mole_uuid_list[0])
else:
editor.set_mole_uuid(mouse_x, mouse_y, self.mole_uuid_list[0])
elif key_mods & pygame.KMOD_SHIFT:
editor.remove_mole(mouse_x, mouse_y)
else:
if self.sub_controller:
if self.sub_controller.on_lbutton_down_noflags(
editor, mouse_x, mouse_y
):
return
editor.add_mole(mouse_x, mouse_y)
def on_rbutton_down(self, editor, mouse_x, mouse_y):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
key_mods = pygame.key.get_mods()
if key_mods & pygame.KMOD_ALT:
if key_mods & pygame.KMOD_SHIFT:
editor.confirm_mole(mouse_x, mouse_y)
elif key_mods & pygame.KMOD_SHIFT:
editor.set_mole_uuid(
mouse_x,
mouse_y,
mel.rotomap.moles.make_new_uuid(),
is_canonical=False,
)
def pre_key(self, editor, key):
if self.sub_controller:
try:
self.sub_controller.pre_key(editor, key)
except AttributeError:
pass
def on_key(self, editor, key):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
if key == pygame.K_o:
is_follow = self.sub_controller is self.follow_controller
if not is_follow and self.mole_uuid_list[0]:
self.sub_controller = self.follow_controller
editor.set_status(self.sub_controller.status)
else:
self.sub_controller = None
editor.set_status("")
editor.show_current()
elif key == pygame.K_m:
if not self.sub_controller == self.move_controller:
self.sub_controller = self.move_controller
editor.set_status(self.sub_controller.status)
else:
self.sub_controller = None
editor.set_status("")
editor.show_current()
elif key == pygame.K_f:
editor.toggle_faded_markers()
elif key == pygame.K_RETURN:
editor.toggle_markers()
elif key == pygame.K_PLUS:
self.mole_uuid_list[0] = editor.get_mole_uuid(
self.mouse_x, self.mouse_y
)
print(self.mole_uuid_list[0])
if self.copy_to_clipboard:
mel.lib.ui.set_clipboard_contents(self.mole_uuid_list[0])
elif key == pygame.K_i:
# Auto-identify
#
# Import mel.rotomap.identifynn as late as possible, because it has
# some expensive dependencies.
import mel.rotomap.identifynn
identifier = mel.rotomap.identifynn.make_identifier()
target = editor.moledata.current_image_path()
frame = mel.rotomap.moles.RotomapFrame(os.path.abspath(target))
new_moles = identifier.get_new_moles(frame)
mel.rotomap.moles.save_image_moles(new_moles, str(frame.path))
editor.moledata.reload()
editor.show_current()
elif key == pygame.K_c:
editor.confirm_all()
if self.sub_controller:
try:
sub_controller_onkey = self.sub_controller.on_key
except AttributeError:
pass
else:
sub_controller_onkey(editor, key)
class MaskEditController:
def __init__(self):
pass
def on_mouse_event(self, editor, event):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
key_mods = pygame.key.get_mods()
enable = not (key_mods & pygame.KMOD_SHIFT)
mouse_x, mouse_y = pygame.mouse.get_pos()
if event.type in (pygame.MOUSEBUTTONDOWN, pygame.MOUSEMOTION):
# is_mouse_button_pressed = pygame.mouse.get_pressed(num_buttons=3)
is_mouse_button_pressed = pygame.mouse.get_pressed()
if is_mouse_button_pressed[0]:
editor.set_mask(mouse_x, mouse_y, enable)
def pre_key(self, editor, key):
pass
def on_key(self, editor, key):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
key_mods = pygame.key.get_mods()
shift = key_mods & pygame.KMOD_SHIFT
if shift:
if key == pygame.K_COMMA:
editor.set_smaller_masker()
elif key == pygame.K_PERIOD:
editor.set_larger_masker()
elif key == pygame.K_PERIOD:
editor.set_default_masker()
class MoleMarkController:
def __init__(self):
pass
def on_mouse_event(self, editor, event):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
if event.type != pygame.MOUSEBUTTONDOWN:
return
key_mods = pygame.key.get_mods()
mouse_x, mouse_y = pygame.mouse.get_pos()
if event.button == 1:
if key_mods & pygame.KMOD_SHIFT:
if key_mods & pygame.KMOD_ALT:
nearest_mole = editor.get_nearest_mole(mouse_x, mouse_y)
if nearest_mole is not None:
nearest_mole["kind"] = "non-mole"
nearest_mole["looks_like"] = "mole"
editor.moledata.save_moles()
editor.show_current()
else:
editor.remove_mole(mouse_x, mouse_y)
else:
if key_mods & pygame.KMOD_ALT:
nearest_mole = editor.get_nearest_mole(mouse_x, mouse_y)
if nearest_mole is not None:
nearest_mole["kind"] = "mole"
nearest_mole["looks_like"] = "non-mole"
editor.moledata.save_moles()
editor.show_current()
else:
editor.add_mole(mouse_x, mouse_y)
elif event.button == 3:
nearest_mole = editor.get_nearest_mole(mouse_x, mouse_y)
if nearest_mole is not None:
if key_mods & pygame.KMOD_ALT:
if key_mods & pygame.KMOD_SHIFT:
nearest_mole["kind"] = "non-mole"
nearest_mole["looks_like"] = "unsure"
else:
nearest_mole["kind"] = "mole"
nearest_mole["looks_like"] = "unsure"
else:
if key_mods & pygame.KMOD_SHIFT:
nearest_mole["kind"] = "non-mole"
nearest_mole["looks_like"] = "non-mole"
else:
nearest_mole["kind"] = "mole"
nearest_mole["looks_like"] = "mole"
editor.moledata.save_moles()
editor.show_current()
def pre_key(self, editor, key):
pass
def on_key(self, editor, key):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
if key == pygame.K_a:
is_alt = editor.marked_mole_overlay.is_accentuate_marked_mode
editor.marked_mole_overlay.is_accentuate_marked_mode = not is_alt
editor.show_current()
class BoundingAreaController:
def __init__(self):
pass
def on_mouse_event(self, editor, event):
pass
def pre_key(self, editor, key):
pass
def on_key(self, editor, key):
pass
class AutomoleDebugController:
def __init__(self):
pass
def on_mouse_event(self, editor, event):
pass
def pre_key(self, editor, key):
pass
def on_key(self, editor, key):
pass
class VisitList:
def __init__(self, items):
self._items = items
self._index = 0
def back(self):
self._index = (self._index + len(self._items) - 1) % len(self._items)
return self.current()
def forward(self):
self._index = (self._index + 1) % len(self._items)
return self.current()
def current(self):
return self._items[self._index]
def __bool__(self):
return bool(self._items)
class Controller:
def __init__(self, editor, follow, copy_to_clipboard, visit_list, logger):
self._visit_list = VisitList(visit_list)
self._logger = logger
self._melroot = mel.lib.fs.find_melroot()
self.moleedit_controller = MoleEditController(
editor, follow, copy_to_clipboard
)
self.maskedit_controller = MaskEditController()
self.molemark_controller = MoleMarkController()
self.boundingarea_controller = BoundingAreaController()
self.automoledebug_controller = AutomoleDebugController()
self.current_controller = self.moleedit_controller
logger.reset(mode="editmole")
self.zooms = [1.0, 0.75, 0.5, 0.25, 2.0, 1.75, 1.5]
self.zoom_index = 0
self._reset_logger_new_image(editor)
def on_mouse_event(self, editor, event):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
key_mods = pygame.key.get_mods()
if key_mods & pygame.KMOD_CTRL:
mouse_x, mouse_y = pygame.mouse.get_pos()
editor.show_zoomed(mouse_x, mouse_y)
return
self.current_controller.on_mouse_event(editor, event)
def _reset_logger_new_image(self, editor):
self._logger.reset(
path=os.path.relpath(
os.path.abspath(editor.moledata.image_path),
start=self._melroot,
)
)
def on_key(self, editor, key):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
self.current_controller.pre_key(editor, key)
if key == pygame.K_LEFT:
editor.show_prev()
self._reset_logger_new_image(editor)
elif key == pygame.K_RIGHT:
editor.show_next()
self._reset_logger_new_image(editor)
elif key == pygame.K_UP:
editor.show_prev_map()
self._reset_logger_new_image(editor)
elif key == pygame.K_DOWN:
editor.show_next_map()
self._reset_logger_new_image(editor)
elif key == pygame.K_SPACE:
editor.show_fitted()
elif key == pygame.K_0:
# Switch to automole debug mode
self.current_controller = self.automoledebug_controller
editor.set_automoledebug_mode()
self._logger.reset(mode="debug")
elif key == pygame.K_1:
# Switch to mole edit mode
self.current_controller = self.moleedit_controller
editor.set_editmole_mode()
self._logger.reset(mode="editmole")
elif key == pygame.K_2:
# Switch to mask edit mode
self.current_controller = self.maskedit_controller
editor.set_editmask_mode()
self._logger.reset(mode="editmask")
elif key == pygame.K_3:
# Switch to bounding area mode
self.current_controller = self.boundingarea_controller
editor.set_boundingarea_mode()
self._logger.reset(mode="boundingarea")
elif key == pygame.K_4:
# Switch to mole marking mode
self.current_controller = self.molemark_controller
self._logger.reset(mode="molemark")
editor.set_molemark_mode()
elif key == pygame.K_b:
# Go back in the visit list
if self._visit_list:
to_visit = self._visit_list.back()
editor.visit(to_visit)
elif key == pygame.K_n:
# Go to the next in the visit list
if self._visit_list:
to_visit = self._visit_list.forward()
editor.visit(to_visit)
elif key == pygame.K_z:
self.zoom_index += 1
self.zoom_index %= len(self.zooms)
zoom = self.zooms[self.zoom_index]
editor.set_status(f"Zoom {zoom}")
editor.set_zoom_level(zoom)
elif key == pygame.K_x:
self.zoom_index += len(self.zooms) - 1
self.zoom_index %= len(self.zooms)
zoom = self.zooms[self.zoom_index]
editor.set_status(f"Zoom {zoom}")
editor.set_zoom_level(zoom)
self.current_controller.on_key(editor, key)
def process_args(args):
# Import pygame as late as possible, to avoid displaying its
# startup-text where it is not actually used.
import pygame
visit_list = []
if args.visit_list_file:
visit_list = args.visit_list_file.read().splitlines()
with mel.lib.common.timelogger_context("rotomap-edit") as logger:
with mel.lib.fullscreenui.fullscreen_context() as screen:
editor = mel.rotomap.display.Editor(args.ROTOMAP, screen)
if args.advance_n_frames:
editor.show_next_n(args.advance_n_frames)
controller = Controller(
editor, args.follow, args.copy_to_clipboard, visit_list, logger
)
for event in mel.lib.fullscreenui.yield_events_until_quit(screen):
if event.type == pygame.KEYDOWN:
controller.on_key(editor, event.key)
elif event.type in (
pygame.MOUSEBUTTONDOWN,
pygame.MOUSEMOTION,
):
controller.on_mouse_event(editor, event)
def update_follow(editor, follow_uuid, prev_moles, is_paste_mode):
guess_pos = None
editor.follow(follow_uuid)
if (
mel.rotomap.moles.uuid_mole_index(editor.moledata.moles, follow_uuid)
is None
):
guess_pos = mel.rotomap.relate.guess_mole_pos(
follow_uuid, prev_moles, editor.moledata.moles
)
if guess_pos is not None:
ellipse = mel.lib.moleimaging.find_mole_ellipse(
editor.moledata.get_image().copy(),
guess_pos,
_MAGIC_MOLE_FINDER_RADIUS,
)
if ellipse is not None:
guess_pos = numpy.array(ellipse[0], dtype=int)
editor.show_zoomed_display(guess_pos[0], guess_pos[1])
if is_paste_mode:
editor.add_mole_display(
guess_pos[0], guess_pos[1], follow_uuid
)
return guess_pos
# -----------------------------------------------------------------------------
# Copyright (C) 2015-2018 Angelos Evripiotis.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| aevri/mel | mel/cmd/rotomapedit.py | rotomapedit.py | py | 22,318 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "mel.lib.common.rotomap",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "mel.lib.common",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "argparse.FileType",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "pygame.... |
28632705196 | import time
import traceback
import functools
from functools import update_wrapper
from flask import request, make_response, current_app
from datetime import timedelta
from densefog import config
from densefog import logger
from densefog.common import jsonable
from densefog.common import local
from densefog.error_code import *
class HandleError(Exception):
def __init__(self, message, ret_code=None, data=None):
self.message = message
self.ret_code = ret_code
self.data = data
def __unicode__(self):
return unicode(self.message)
def __str__(self):
return str(self.message)
def __repr__(self):
return repr(self.message)
def handle(method):
@functools.wraps(method)
def wrap(*args, **kwargs):
local.start_context()
logger.info('Begin handle request.')
start = time.time()
try:
data = method(*args, **kwargs)
ret_code = 0
message = None
except HandleError as ex:
logger.error('Getting HandleError on grand.handle.')
data = ex.data or {}
ret_code = ex.ret_code
message = ex.message
data['exceptionTag'] = local.get_context_id()
except Exception as ex:
# normally we should not come to here. lower api should
# handle every exception.
stack = traceback.format_exc()
logger.stacktrace(stack)
logger.error('Getting Exception on grand.handle.')
data = {}
if config.CONF.debug:
data['exceptionStr'] = stack
data['exceptionTag'] = local.get_context_id()
ret_code = ErrCodeServerError
message = 'An error occurred while processing your request.'
ret = {
'data': data,
'retCode': ret_code,
'message': message
}
try:
if 'pretty' in request.args:
json_data = jsonable.dumps(ret,
indent=4,
sort_keys=True,
str=True)
else:
json_data = jsonable.dumps(ret, str=True)
except Exception as ex:
stack = traceback.format_exc()
logger.stacktrace(stack)
logger.error('jsonable.dumps catch exception: %s' % ex)
data = {}
if config.CONF.debug:
data['exceptionStr'] = stack
data['exceptionTag'] = local.get_context_id()
json_data = jsonable.dumps({
'data': data,
'retCode': 5000,
'message': message
}, str=True)
# we always response 200 HTTP code... we use different retCode.
status_code = 200
response = make_response(json_data, status_code)
response.headers['Content-Type'] = 'application/json; charset=utf-8'
cost = time.time() - start
message = '%s\t%s\t%s\t%s\t%s\t%s' % (request.remote_addr,
ret_code,
status_code,
request.method,
request.path,
cost)
logger.info(message)
logger.info('End handle request. end context.')
local.clear_context()
return response
return wrap
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
h['Access-Control-Allow-Headers'] = \
"Origin, X-Requested-With, Content-Type, Accept, Authorization"
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
| hashipod/densefog | densefog/web/grand.py | grand.py | py | 5,280 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "densefog.common.local.start_context",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "densefog.common.local",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "densefog.logger.info",
"line_number": 37,
"usage_type": "call"
},
{
"ap... |
69928483943 | print('Loading...')
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator, MultipleLocator, AutoMinorLocator
from matplotlib.axes import Axes
import pandas as pd
import numpy as np
import time
import math
import seaborn as sns
from IPython.display import clear_output, display, IFrame
from chord import Chord
# UI
import ipywidgets as widgets
from tqdm import tnrange, tqdm_notebook
# other fuctions
from functions import get_discogs
from functions import get_lyrics
from functions import plot_wordcloud
from functions import sentiment_analysis
from functions import advanced_analytics
# word processing
import re
import os
import string
import nltk
nltk.download('punkt')
from nltk.corpus import stopwords
nltk.download('stopwords')
stop_words = stopwords.words('english')
clear_output()
import warnings
warnings.filterwarnings('ignore')
#-------------------------------------------------------------------------------
# GLOBAL VARIABLES
# default selected tab in the UI
selected_section = 0
selection_tab_of_section_1 = 0
selection_tab_of_section_2 = 0
selection_tab_of_section_3 = 0
selection_tab_of_section_4 = 0
selection_tab_of_section_5 = 0
# global var for discog
discog = ''
discog_store = []
#global var for artist
artist = ''
colour_palette = {'blue': '#4878D0',
'orange': '#EE854A',
'purple': '#956CB4',
'red': '#D9363E',
'green': '#6ACC64',
'purple': '#956CB4',
'brown': '#8C613C',
'pink': '#DC7EC0',
'light grey': '#c4bba5',
'grey': '#797979',
'tan': '#D5BB67'}
descriptions_colour = colour_palette.get('grey')
#plt.gca()
plt.style.use('seaborn')
def no_selections_warning():
# clear previous output
clear_output()
display(
widgets.HTML(
value=f'''
<b><font color="red">
No artist/discography selected. Make sure selections
are made before running visualisations.
</b>''',
layout=widgets.Layout(width="100%"))
)
global selected_section
selected_section = 0
# set selected tab
global selection_tab_of_section_1
selection_tab_of_section_1 = 0 if artist == '' else 1
# display UI
UI()
#-------------------------------------------------------------------------------
# UI SECTION 1 variables and functions
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# SECTION 1 | TAB 1 variables and functions
#global widget to type artist name
artist_input = widgets.Text()
# define function to overwrite artist from user input
def set_artist(x):
global artist
artist = x
# function to adapt long texts for display in charts
def adapt_title(text):
adapted_text = []
while len(text) > 0:
text_slice = text[:21]
last_char = 0 if text_slice == text else min(
text_slice[::-1].find(' '), len(text_slice)
)
adapted_text.append(
text_slice[:None if last_char == 0 else -last_char]
)
text = text[21 - last_char:]
return '\n'.join(adapted_text)
# function to run at click of the button
def get_discography(x):
#clear previous output and print message
clear_output()
print("Looking for best match for '" + str(artist_input.value) +"'")
time.sleep(2)
# if already exists, load data from discog_store.csv
global discog_store
try:
discog_store = pd.read_csv('discog_store.csv')
except:
pass
# try overwrite artist with artist found as best match from DISCOGS
global artist
try:
set_artist(
get_discogs.getArtistID(artist_input.value)[1]
)
# if no match found from discogs, stop and print a message
except:
display(
widgets.HTML(
value=f'''<b><font color="red">
No match found,\
please try again
</b>''',
layout=widgets.Layout(width="100%"))
)
#print(widgets"No match found, please try again")
return UI()
# if match found, print the corrected artist name
print("Retrieving discography for '" + artist + "'")
time.sleep(1)
# get discogs - check if in csv, else use DISCOGS API
global discog
if len(discog_store) == 0:
discog = get_discogs.getArtistData(artist)
elif artist in discog_store['ARTIST_NAME'].unique():
discog = discog_store[discog_store['ARTIST_NAME'] == artist].copy()
else:
discog = get_discogs.getArtistData(artist)
#-------------------------------------------------------------------------------
discog['YEAR_ALBUM'] = "[" + discog['YEAR'].astype(str) + "] " \
+ discog['ALBUM']
# look for duplicates and overwrite their names (issue #51)
for album in discog['YEAR_ALBUM']:
unique_album_ids = discog[discog['YEAR_ALBUM'] == album]\
['DISCOGS_ALBUM_ID'].unique().tolist()
if len(unique_album_ids) > 1:
counter = 1
for album_id in unique_album_ids:
discog.loc[discog['DISCOGS_ALBUM_ID'] == album_id, 'YEAR_ALBUM'] = \
discog[discog['DISCOGS_ALBUM_ID'] == album_id]\
['YEAR_ALBUM'] + " (" + str(counter) + ")"
counter += 1
# make version for displaying in charts (with line breaks)
discog['YEAR_ALBUM_DISPLAY'] = discog['YEAR_ALBUM'].astype(str).apply(adapt_title)
# clear previous output
clear_output()
# set selected section
global selected_section
selected_section = 0
# set selected tab
global selection_tab_of_section_1
selection_tab_of_section_1 = 1
# set album selecor content
# overwrite album_filter with current selection
global album_filter
set_album_filter(
discog[discog['EXCLUDE_ALBUM'] != True]['YEAR_ALBUM'].unique().tolist()
)
global album_selector
set_album_selector(
discog['YEAR_ALBUM'].unique().tolist(),
album_filter
)
# display UI
UI()
#-------------------------------------------------------------------------------
# SECTION 1 | TAB 2 variables and functions
#create album_filter
album_filter = []
# func to set album filter
def set_album_filter(x):
global album_filter
album_filter = x
discog_filtered = []
options = []
def multi_checkbox_widget(albums, albums_filter):
options_dict = {album: widgets.Checkbox(description=album,
value=False,
layout={'margin' : '-2px',
'width' : 'initial'})
for album in albums}
options = [options_dict[album] for album in albums]
for option in options:
if option.description in albums_filter:
option.value = True
options_widget = widgets.VBox(options,
layout = {'overflow': 'scroll',
'max_height': '300px',
'width' : 'initial'})
multi_select = widgets.VBox([options_widget],
layout=widgets.Layout(padding = "20px"))
return multi_select
def set_album_selector(options, options_filter):
global album_selector
album_selector = multi_checkbox_widget(options,options_filter)
album_selector = multi_checkbox_widget([],[])
import time
def select_deselect_all(x):
#clear previous output
clear_output()
global album_selector
if album_selector.children[0].children[0].value == True:
set_album_selector(discog['YEAR_ALBUM'].unique().tolist(), [])
else:
set_album_selector(discog['YEAR_ALBUM'].unique().tolist(),
discog['YEAR_ALBUM'].unique().tolist())
# select this tab
global selected_section
selected_section = 0
# select sub tab
global selection_tab_of_section_1
selection_tab_of_section_1 = 1
# display UI
UI()
def apply_selection(x):
global discog
global discog_store
try:
discog_store = pd.read_csv('discog_store.csv')
except:
pass
#apply user selections, overwrite album_filter with current selection
global album_selector
global album_filter
selected = []
# read user input
for album in album_selector.children[0].children:
# overwrite EXCLUDE_ALBUM flag
discog.loc[discog['YEAR_ALBUM'] == album.description,
'EXCLUDE_ALBUM'] = not album.value
#make a list of selected albums
if album.value == True:
selected.append(album.description)
# set filter = list of selected albums
set_album_filter(selected)
# reset selector to keep the actual selections
set_album_selector(discog['YEAR_ALBUM'].unique().tolist(), selected)
#clear previous output
clear_output()
# overwrite .CSV and update flags
if len(discog_store) == 0:
discog_store = discog.copy(deep=True)
else:
if artist not in discog_store['ARTIST_NAME'].unique():
discog_store = discog_store.append(discog,
ignore_index=True,
sort=False)
else:
for i,r in discog.iterrows():
# overwrite EXCLUDE_ALBUM flag
discog_store.loc[discog_store['YEAR_ALBUM'] == r['YEAR_ALBUM'],
'EXCLUDE_ALBUM'] = r['EXCLUDE_ALBUM']
try:
discog_store.to_csv('discog_store.csv', index = False)
except PermissionError:
clear_output()
display(
widgets.HTML(
value=f'''<b><font color="red">
Permission denied: \
make sure 'discog_store.csv' is closed and try again
</b>''',
layout=widgets.Layout(width="100%")
)
)
return UI()
#get lyrics
print('Getting the lyrics, please hold on')
# if there are any albums and songs to be included
if len(discog_store[(discog_store['ARTIST_NAME'] == artist)
& (discog_store["EXCLUDE_ALBUM"] == False)
& (discog_store["EXCLUDE_SONG"] == False)]) > 0:
# and LYRICS colum does not exist yet
if "LYRICS" not in discog_store.columns:
# get lyrics for all records of the artist (flags considered)
lyrics_data = get_lyrics.getLyrics(
discog_store[discog_store['ARTIST_NAME'] == artist])
# otherwise if there are any lyrics in current selection that are empty
elif len(discog_store[(discog_store['ARTIST_NAME'] == artist)
& (discog_store["EXCLUDE_ALBUM"] == False)
& (discog_store["EXCLUDE_SONG"] == False)
&(~discog_store['LYRICS'].notnull())]) > 0:
# look only for lyrics that have not yet been collected
lyrics_data = get_lyrics.getLyrics(
discog_store[(discog_store['ARTIST_NAME'] == artist) \
&(~discog_store['LYRICS'].notnull())])
else:
lyrics_data = []
else:
lyrics_data = []
# if any new lyrics were retrieved
if len(lyrics_data) != 0:
# do sentiment analysis
print('Analyzing lyric sentiment...')
sentiment_data = sentiment_analysis.sentimentAnalyser(
lyrics_data, artist
)
print('Processing and cleaning, please hold on')
# iterate through results and populate the columns in main discog store
# add lyrics
for i,r in lyrics_data.iterrows():
discog_store.loc[
(discog_store['ARTIST_NAME'] == r['ARTIST_NAME']) &
(discog_store['TRACK_TITLE'] == r['TRACK_TITLE']),
"LYRICS"] = r['LYRICS']
# add sentiment data
for i,r in sentiment_data.iterrows():
discog_store.loc[
(discog_store['ARTIST_NAME'] == r['ARTIST_NAME']) &
(discog_store['TRACK_TITLE'] == r['TRACK_TITLE']),
"SENTIMENT_PCT_NEGATIVE"] = r['SENTIMENT_PCT_NEGATIVE']
discog_store.loc[
(discog_store['ARTIST_NAME'] == r['ARTIST_NAME']) &
(discog_store['TRACK_TITLE'] == r['TRACK_TITLE']),
"SENTIMENT_PCT_NEUTRAL"] = r['SENTIMENT_PCT_NEUTRAL']
discog_store.loc[
(discog_store['ARTIST_NAME'] == r['ARTIST_NAME']) &
(discog_store['TRACK_TITLE'] == r['TRACK_TITLE']),
"SENTIMENT_PCT_POSITIVE"] = r['SENTIMENT_PCT_POSITIVE']
discog_store.loc[
(discog_store['ARTIST_NAME'] == r['ARTIST_NAME']) &
(discog_store['TRACK_TITLE'] == r['TRACK_TITLE']),
"SENTIMENT_COMPOUND_SCORE"] = r['SENTIMENT_COMPOUND_SCORE']
discog_store.loc[
(discog_store['ARTIST_NAME'] == r['ARTIST_NAME']) &
(discog_store['TRACK_TITLE'] == r['TRACK_TITLE']),
"SENTIMENT_GROUP"] = r['SENTIMENT_GROUP']
# add column with lyrics with removed stopwords
discog_store["LYRICS_CLEAN"] = discog_store['LYRICS'].astype(str).apply(
lambda x: ' '.join(list(word for word in re.findall(
r"[\w]+|[^\s\w]",x.lower()
) if word not in stop_words
)))
# remove punctuation
discog_store["LYRICS_CLEAN"] = discog_store[
'LYRICS_CLEAN'
].str.replace('[^\w\s] ','')
#list unique clean words
discog_store["LYRICS_CLEAN_UNIQUE"] = discog_store[
'LYRICS_CLEAN'
].astype(str).apply(lambda x: list(set(x.split())))
#count unique clean words
discog_store["LYRICS_CLEAN_UNIQUE_COUNT"] = discog_store[
'LYRICS_CLEAN_UNIQUE'
].apply(lambda x: len(x))
#-------------------------------------------------------------------------------
# write the updated content
try:
discog_store.to_csv('discog_store.csv', index = False)
except PermissionError:
clear_output()
display(
widgets.HTML(
value=f'''<b><font color="red">
Permission denied: \
make sure 'discog_store.csv' is closed and try again
</b>''',
layout=widgets.Layout(width="100%")
)
)
return UI()
clear_output()
# select tab
global selected_section
selected_section = 0
# select sub tab
global selection_tab_of_section_1
selection_tab_of_section_1 = 2
#update discog_filtered variable
### read latest saved discog_store (reformats datatypes)
discog_store = pd.read_csv('discog_store.csv')
### overwrite discog_filtered
global discog_filtered
discog_filtered = discog_store[
(discog_store['ARTIST_NAME']==artist)\
&(discog_store['YEAR_ALBUM'].isin(album_filter))
].copy()
if len(discog_filtered) == 0:
no_selections_warning()
else:
# populate sentiment chart dropdown with selected albums
global sentiment_dropdown2
sentiment_dropdown2 = widgets.Dropdown(
options=discog_filtered['YEAR_ALBUM'].unique(),
value=discog_filtered['YEAR_ALBUM'].unique()[0],
description='Select Album:',
disabled=False,)
# display UI
UI()
#-------------------------------------------------------------------------------
# SECTION 1 | TAB 3 variables and functions
# set bin_size var with default 10
bin_size = 10
#defining a slider for bin_size selector
period_selection_slider = widgets.IntSlider(
value=bin_size,
min=1,
max=10,
step=1,
description='Period Size (# of years)',
style = {'description_width': 'initial'},
disabled=False,
continuous_update=True,
orientation='horizontal',
readout=True,
readout_format='d'
)
# define function to overwrite bin_size from slider input
def set_bin_size(x):
global bin_size
bin_size = period_selection_slider.value
#clear previous output
clear_output()
# select next section
global selected_section
selected_section = 1
# select tab
global selection_tab_of_section_2
selection_tab_of_section_2 = 0
# restart UI
UI()
#-------------------------------------------------------------------------------
# UI SECTION 2 variables and functions
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# SECTION 2 | TAB 1 variables and functions
def generate_period_bins(discog, bin_size):
'''returns list of period bins starting 0th year of the decade
of first release'''
bins = []
if bin_size == 1:
start_year = int(discog['YEAR'].min())
last_year = int(discog['YEAR'].max())
year = start_year
while year <= last_year:
bins.append(str(year))
year += bin_size
else:
start_year = int(str(discog['YEAR'].min())[:3]+'0')
last_year = int(discog['YEAR'].max()) \
+ int(str(100 - int(str(discog['YEAR'].max())[2:]))[-1])
year = start_year
while year <= last_year:
bins.append(str(year) + '-' + str(year + bin_size-1))
year += bin_size
return bins
def unique_per_period(discog, column, bin_size):
'''returns a dataframe with count of unique values within a column
by defined bin size'''
data = {'period' : generate_period_bins(discog, bin_size),
column : []}
if bin_size == 1:
for period in data['period']:
data[column].append(
len(
discog[
discog['YEAR'].astype(int) == int(period)
][column].unique()
)
)
else:
for period in data['period']:
data[column].append(
len(
discog[
(discog['YEAR'].astype(int) >= int(period[:4])) \
& (discog['YEAR'].astype(int) <= int(period[5:]))
][column].unique()
)
)
# filter out all but one period empty before first and after last record
output = pd.DataFrame.from_dict(data)
# flag record with and without data
output.loc[output[column] == 0, 'data_flag'] = 0
output.loc[output[column] > 0, 'data_flag'] = 1
# chronological counter
counter1=[]
counter = 0
for i in range(output.shape[0]):
if i == output.shape[0]-1:
counter = counter + output.iloc[i]['data_flag']
counter1.append(counter)
elif i == 0:
counter = counter \
+ output.iloc[i]['data_flag'] \
+ output.iloc[i+1]['data_flag']
counter1.append(counter)
else:
counter = counter + output.iloc[i+1]['data_flag']
counter1.append(counter)
output['counter1'] = pd.Series(counter1)
# reverse - anti chronological counter
output = output.iloc[::-1].reset_index(drop=True)
counter2=[]
counter = 0
for i in range(output.shape[0]):
if i == output.shape[0]-1:
counter = counter + output.iloc[i]['data_flag']
counter2.append(counter)
elif i == 0:
counter = counter \
+ output.iloc[i]['data_flag'] \
+ output.iloc[i+1]['data_flag']
counter2.append(counter)
else:
counter = counter + output.iloc[i+1]['data_flag']
counter2.append(counter)
output['counter2'] = pd.Series(counter2)
# reverse back
output = output.iloc[::-1].reset_index(drop=True)
# return filtered data
return output[(output.counter1 > 0) & (output.counter2 > 0)]
def album_song_count_per_period(discog, bin_size):
'''returns a merged dataframe by period
with counts of albums and songs per period'''
data_albums = unique_per_period(discog, 'YEAR_ALBUM', bin_size)
data_songs = unique_per_period(discog, 'TRACK_TITLE', bin_size)
return data_albums.merge(data_songs, on = 'period')
def add_period_column(discog, bin_size):
'''adds column with period info without any other modifications'''
period_col_data = []
bins = generate_period_bins(discog, bin_size)
if bin_size == 1:
for i, r in discog.iterrows():
period_col_data.append(r['YEAR'])
else:
for i, r in discog.iterrows():
for period in bins:
if int(r['YEAR']) >= int(period[:4]) \
and int(r['YEAR']) <= int(period[5:]):
period_col_data.append(period)
discog['period'] = period_col_data
return discog
def plot_albums_songs_per_period_bar(discog, bin_size):
'''plots the number of albums and songs per period'''
width = 0.2
data = album_song_count_per_period(
discog,
bin_size
).set_index('period')
fig, ax1 = plt.subplots(figsize=(8,5))
ax2 = ax1.twinx() # instantiate a second axes sharing the same x-axis
ax2.set_axisbelow(True)
color = colour_palette.get('blue')
ax1.set_ylabel('Number of songs', color=color)
#ax1.plot(data.period, data.album, color=color)
data['TRACK_TITLE'].plot(kind='bar',
color=color,
ax=ax1,
width=width,
position=1)
ax1.tick_params(axis='y', labelcolor=color)
ax1.tick_params(axis='x', labelrotation=45 if len(data.index) > 5 else 0)
ax1.set_xlabel('Year' if bin_size == 1 else str(bin_size) + '-year period')
# ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = colour_palette.get('orange')
ax2.grid(False)
ax2.set_ylabel('Number of albums', color=color) # x-label handled with ax1
#ax2.plot(data.index.tolist(), data.track_title, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label may be slightly clipped
data['YEAR_ALBUM'].plot(kind='bar',
color=color,
ax=ax2,
width=width,
position=0
)
ax1.yaxis.set_major_locator(MaxNLocator(integer=True))
ax2.yaxis.set_major_locator(MaxNLocator(integer=True))
plt.title('Albums and Song Count by Period', fontsize=18)
ax2.grid(False, axis='both')
ax1.grid(False, axis='x')
plt.show()
# function to run at click of the button_3
def show_basic_charts(x):
if len(discog_filtered) == 0:
no_selections_warning()
else:
global bin_size
#clear previous output
clear_output()
# select current tab
global selected_section
selected_section = 1
# select sub tab
global selection_tab_of_section_2
selection_tab_of_section_2 = 0
# display UI
UI()
#display chart using the bin_size
#plot_albums_songs_per_period(discog_filtered, bin_size)
plot_albums_songs_per_period_bar(discog_filtered, bin_size)
#-------------------------------------------------------------------------------
# SECTION 2 | TAB 2 variables and functions
def violin_plot(discog, bin_size):
data = add_period_column(discog, bin_size)
# Draw Plot
plt.figure(figsize=(8,5), dpi= 80)
violinplot = sns.violinplot(x='period',
y='LYRICS_CLEAN_UNIQUE_COUNT',
data=data,
scale='width',
inner='quartile',
linewidth = 1.5,
cut=0,
palette=colour_palette.values())
# rotate x axis labels
if len(data['period'].unique()) > 5:
for item in violinplot.get_xticklabels():
item.set_rotation(45)
# Decoration
plt.title('Lexical Diversity', fontsize=18)
plt.ylabel('Number of unique words (excl. stopwords)')
plt.xlabel('Year' if bin_size == 1 else str(bin_size) + '-year period')
plt.show()
def show_lexical_diversity(x):
if len(discog_filtered) == 0:
no_selections_warning()
else:
global bin_size
#clear previous output
clear_output()
# select current tab
global selected_section
selected_section = 1
# select sub tab
global selection_tab_of_section_2
selection_tab_of_section_2 = 1
# display UI
UI()
#pirate_plot(discog_filtered, bin_size)
violin_plot(discog_filtered, bin_size)
#-------------------------------------------------------------------------------
# SECTION 2 | TAB 3 variables and functions
# function to run at click of the button_show_wordclouds
def show_wordclouds(x):
if len(discog_filtered) == 0:
no_selections_warning()
else:
#clear previous output
clear_output()
print("WordClouds are rolling in...")
global bin_size
# select current tab
global selected_section
selected_section = 1
# select sub tab
global selection_tab_of_section_2
selection_tab_of_section_2 = 2
data = add_period_column(discog_filtered, bin_size)
clear_output()
# display UI
UI()
if wordcloud_by_selection_dropdown.value == 'period':
plot_wordcloud.createWordCloud(
data[~data['LYRICS_CLEAN'].isnull()], 'period'
)
elif wordcloud_by_selection_dropdown.value == 'album':
plot_wordcloud.createWordCloud(
data[~data['LYRICS_CLEAN'].isnull()], 'YEAR_ALBUM'
)
wordcloud_by_selection_dropdown = widgets.Dropdown(options=['period', 'album',],
value='period',
description='Display by:',
disabled=False,)
#-------------------------------------------------------------------------------
# UI SECTION 3 variables and functions
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# SECTION 3 | TAB 1 variables and functions
def plot_albums_discogs_popularity(discog):
'''plots Discogs registered owners and average ratings'''
width = 0.2
data = pd.pivot_table(discog,
index = 'YEAR_ALBUM_DISPLAY',
values = ['DISCOGS_PPL_HAVING', 'DISCOGS_AVG_RATING'],
aggfunc = 'max')
fig, ax1 = plt.subplots(figsize=(max(8,min(15,len(data)+2)),5))
color = colour_palette.get('orange')
ax1.set_axisbelow(True)
#ax1.yaxis.grid(True, which = 'major', linestyle = '--', color = '#d3d3d3')
ax1.yaxis.grid(True)
ax1.set_ylabel('Discogs owners', color=color)
#ax1.plot(data.period, data.album, color=color)
data['DISCOGS_PPL_HAVING'].plot(kind='bar',
color=color,
ax=ax1,
width=width,
position=1)
ax1.tick_params(axis='y', labelcolor=color)
ax1.tick_params(axis='x', labelrotation=45)
ax1.set_xlabel('') # no label but adds spacing between charts
xlabels = data.index.tolist()
ax1.set_xticklabels(xlabels, ha='right')
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = colour_palette.get('blue')
ax2.set_ylabel('Discogs average rating', color=color)
#ax2.plot(data.index.tolist(), data.track_title, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout() # otherwise the right y-label may be slightly clipped
data['DISCOGS_AVG_RATING'].plot(kind='bar',
color=colour_palette.get('blue'),
ax=ax2,
width=width,
position=0)
#ax1.yaxis.set_major_locator(MaxNLocator(integer=True))
#ax2.yaxis.set_major_locator(MaxNLocator(integer=True))
#ax1.xticks(ha='right')
ax2.grid(None)
plt.title('Discogs Users - Owners and Average Ratings\n', fontsize=18)
plt.show()
print('\n')
def plot_albums_ratings(discog):
threshold = discog['DISCOGS_AVG_RATING'].mean()
data = pd.pivot_table(discog,index='YEAR_ALBUM_DISPLAY',
values=['DISCOGS_AVG_RATING'],
aggfunc = 'max')
values = np.array(data['DISCOGS_AVG_RATING'].tolist())
# plot it
fig, ax = plt.subplots(figsize=(max(8,min(15,len(data)+2)),5))
data['DISCOGS_AVG_RATING'].plot(kind='bar',
color=colour_palette.get('purple'),
ax=ax,
width=0.5,
position=1)
ax.set_ylabel('Avg. Discogs user rating')
ax.set_xlabel('') # no label but adds spacing between charts
ax.tick_params(axis='x', labelrotation=45)
xlabels = data.index.tolist()
ax.set_xticklabels(xlabels, ha='right')
# horizontal line indicating the threshold
ax.plot([-1, len(data)], [threshold, threshold], "k--") #, color = 'b')
plt.title('Average Discogs Users Rating by Album vs Index\n', fontsize=18)
plt.show()
print('\n')
def plot_albums_ratings_indexing(discog):
threshold = discog['DISCOGS_AVG_RATING'].mean()
data = pd.pivot_table(discog,index='YEAR_ALBUM_DISPLAY',
values=['DISCOGS_AVG_RATING'],
aggfunc = 'max')
values = (np.array(data['DISCOGS_AVG_RATING'])-threshold).tolist()
data['indexing'] = values
# plot it
fig, ax = plt.subplots(figsize=(max(8,min(15,len(data)+2)),5))
data['positive'] = data['indexing'] > 0
data['indexing'].plot(kind='bar',
color=data.positive.map(
{True: colour_palette.get('green'),
False: colour_palette.get('red')}
),
ax=ax, width=0.5, position=1)
ax.set_ylabel('Album Discogs rating vs. average')
ax.set_xlabel('') # no label but adds spacing between charts
ax.tick_params(axis='x', labelrotation=45)
xlabels = data.index.tolist()
ax.set_xticklabels(xlabels, ha='right')
# horizontal line indicating the threshold
ax.plot([-1, len(data)], [0, 0], "k--") #, color = 'b')
plt.title('Average Discogs Users Rating by Album vs Index\n',
fontsize=18)
plt.show()
# function to run at click of the button
def show_discogs_users_charts(x):
if len(discog_filtered) == 0:
no_selections_warning()
else:
global bin_size
# select current tab
global selected_section
selected_section = 2
# select sub tab
global selection_tab_of_section_3
selection_tab_of_section_3 = 0
#clear previous output
clear_output()
# display UI
UI()
plot_albums_discogs_popularity(discog_filtered)
plot_albums_ratings(discog_filtered)
plot_albums_ratings_indexing(discog_filtered)
#-------------------------------------------------------------------------------
# SECTION 3 | TAB 2 variables and functions
def create_chord_diag(df, column1, column2):
'''
Creating Chord diagram.
@param - Dataframe
@param - bin_size -> int [by default 10]
@return - Chord_diagram using Chord Library
column1 - refers charts, either album or track
(BILLBOARD_TRACK_RANK, BILLBOARD_ALBUM_RANK)
column2 - period or sentiment (sentiment buckets tracks,
sentiment buckets for albums)
'''
# Creating period column
global bin_size
data = add_period_column(df, bin_size)
artist_nam = df['ARTIST_NAME'].iloc[0]
# Flag Charted/Uncharted tracks/albums
data.loc[data[column1] >= 1.0
, "Charted_Uncharted"
] = 'Charted'
data.loc[~(data[column1] >= 1.0)
, "Charted_Uncharted"
] = 'Uncharted'
#Group By period and Chart_Unchart
df_groupby = pd.pivot_table(
data,
index = [column2, 'Charted_Uncharted'],
values = 'TRACK_TITLE' if column1 == 'BILLBOARD_TRACK_RANK' else 'ALBUM',
aggfunc = lambda x: len(x.unique())
).reset_index()
# Converting period to string to solve issue when using bin size 1
df_groupby[column2] = df_groupby[column2].astype(str)
# df_groupby = data[[column2
# , column1
# , "Charted_Uncharted"]
# ].groupby([column2, 'Charted_Uncharted']).nunique().reset_index()
# Creating two pivot data
pivot_data = df_groupby.pivot(
index = column2,
columns = 'Charted_Uncharted',
values = 'TRACK_TITLE' if column1 == 'BILLBOARD_TRACK_RANK' else 'ALBUM'
)
pivot_data2 = df_groupby.pivot(
index = 'Charted_Uncharted',
columns = column2,
values = 'TRACK_TITLE' if column1 == 'BILLBOARD_TRACK_RANK' else 'ALBUM'
)
# Appending two dataFrame
df_final = pd.concat(
[pivot_data, pivot_data2],
sort = True).fillna(0).astype(int).sort_index()
matrix = df_final.values.tolist()
ls_col_nam = [col for col in df_final.columns]
# {artist}
# calculate whether to wrap labels or not
# anticipate all albums/tracks may be charted or uncharted
wrap_index = True
if 'Uncharted' in pivot_data.columns and 'Charted'in pivot_data.columns:
wrap_index = False if (
(pivot_data['Uncharted'].sum() / pivot_data['Charted'].sum()) <= 0.2 or
(pivot_data['Charted'].sum() / pivot_data['Uncharted'].sum()) <= 0.2
) else True
if len(data[column2].unique()) > 5:
wrap_index = False
#if len(data[column2].unique()) < 5
plot = Chord(matrix
, ls_col_nam
, padding=0.05
, width = 600 if wrap_index == True else 500
, margin= 10 if wrap_index == True else 80
, wrap_labels= wrap_index
).to_html()
display(
widgets.HTML(
value=f'''<h3><center><font color="black">
Artist: {artist_nam}</center>
</h3>''',
layout=widgets.Layout(width="100%")
)
)
display(
IFrame(
src="./out.html",
width=1000,
height=700
)
)
#------------------------------------------------------------------------------------------
# function to run at click of the button
def show_billboard_100_charts(x):
if len(discog_filtered) == 0:
no_selections_warning()
else:
global bin_size
# select current tab
global selected_section
selected_section = 2
# select sub tab
global selection_tab_of_section_3
selection_tab_of_section_3 = 1
#clear previous output
clear_output()
# display UI
UI()
display(widgets.HTML(value=f'''<h2><center><font color="black">
Songs placement in Billboard 100 charts</center></h2>''',
layout=widgets.Layout(width="100%")))
create_chord_diag(discog_filtered, column1 = 'BILLBOARD_TRACK_RANK', column2 ='period')
#-------------------------------------------------------------------------------
# SECTION 3 | TAB 3 variables and functions
#TODO make chord diagram for albums
# function to run at click of the button
def show_billboard_album_charts(x):
if len(discog_filtered) == 0:
no_selections_warning()
else:
global bin_size
# select current tab
global selected_section
selected_section = 2
# select sub tab
global selection_tab_of_section_3
selection_tab_of_section_3 = 2
#clear previous output
clear_output()
# display UI
UI()
display(
widgets.HTML(
value=f'''
<h2><center><font color="black">
Album placement in Billboard Albums charts
</center></h2>''',
layout=widgets.Layout(width="100%")
)
)
create_chord_diag(
discog_filtered,
column1 = 'BILLBOARD_ALBUM_RANK',
column2 ='period')
#-------------------------------------------------------------------------------
# # SECTION 4 | TAB 1 variables and functions
sentiment_dropdown1 = widgets.Dropdown(options=['albums', 'tracks by album',],
value='albums',
description='Display by:',
disabled=False,)
sentiment_dropdown2 = widgets.Dropdown(options=[''],
value='',
description='',
disabled=False,)
# function to run at click of the button_show_wordclouds
def show_sentiment_graphs(x):
if len(discog_filtered) == 0:
no_selections_warning()
else:
#clear previous output
clear_output()
print("Analysing the sentiment of the lyrics...")
# select current tab
global selected_section
selected_section = 3
# select sub tab
global selection_tab_of_section_4
selection_tab_of_section_4 = 0
clear_output()
UI()
if sentiment_dropdown1.value == 'albums':
# display label
display(
widgets.HTML(
value=f'''<h3><center><font color="black">
Sentiment Analysis - diverging bars</center></h3>
<h4><center><font color="black">
Artist: {artist} </center></h4>''',
layout=widgets.Layout(width="100%"))
)
# display plot of scores by album, sorted chronologically
advanced_analytics.plotDivergingBars(
discog_filtered.reset_index(),
'SENTIMENT_COMPOUND_SCORE',
'YEAR_ALBUM',
green=colour_palette.get('green'),
red=colour_palette.get('red'),
sort_by_values = False)
else:
# display more specific label
display(
widgets.HTML(
value=f'''<h3><center><font color="black">
Sentiment Analysis - diverging bars</center></h3>
<h4><center><font color="black">
Artist: {artist} |
Album: {sentiment_dropdown2.value[7:]} |
Year: {sentiment_dropdown2.value[1:5]}
</center></h4>''',
layout=widgets.Layout(width="100%"))
)
# display plot by song in a selected album, sorted by sentiment score
advanced_analytics.plotDivergingBars(
discog_filtered[
discog_filtered.YEAR_ALBUM == sentiment_dropdown2.value
].reset_index(),
'SENTIMENT_COMPOUND_SCORE',
'TRACK_TITLE',
green=colour_palette.get('green'),
red=colour_palette.get('red')
)
# inner function to be triggered with a change of dropdown1 value
def adapt_UI(x):
global discog_filtered, sentiment_dropdown2
global selected_section
selected_section = 3
# select sub tab
global selection_tab_of_section_4
selection_tab_of_section_4 = 0
clear_output()
sentiment_dropdown2 = widgets.Dropdown(options=discog_filtered['YEAR_ALBUM'].unique(),
value=discog_filtered['YEAR_ALBUM'].unique()[0],
description='Select Album:',
disabled=False,)
UI()
#-------------------------------------------------------------------------------
# SECTION 4 | TAB 2
def show_sentiment_vs_charts_song(x):
if len(discog_filtered) == 0:
no_selections_warning()
else:
# select current tab
global selected_section
selected_section = 3
# select sub tab
global selection_tab_of_section_4
selection_tab_of_section_4 = 1
#clear previous output
clear_output()
# display UI
UI()
display(
widgets.HTML(
value=f'''<h2><center><font color="black">
Tracks sentiment vs placement in Billboard 100 charts
</center></h2>''',
layout=widgets.Layout(width="100%")
)
)
create_chord_diag(
discog_filtered,
column1 = 'BILLBOARD_TRACK_RANK',
column2 ='SENTIMENT_GROUP'
)
#-------------------------------------------------------------------------------
# SECTION 4 | TAB 3 Sentiment score over time
def sntm_scr_ovr_time(data):
import matplotlib.gridspec as gridspec
# Decide Colors
mycolors = ['tab:red', 'tab:blue', 'tab:green'
, 'tab:orange', 'tab:brown', 'tab:grey'
, 'tab:pink', 'tab:olive'
]
mycolors1 = ['tab:grey'
, 'tab:green'
]
mycolors2 = ['tab:grey'
, 'tab:red'
]
expect_cols = ['Negative', 'Neutral', 'Positive']
##-----------------------------------------------------------------------------------##
fig= plt.figure(figsize=(16, 9)) # Creating a figure
gs = gridspec.GridSpec(2,2) # Creating 2 by 2 Grid in the figure
# Subplot on each axis
ax1 = fig.add_subplot(gs[0,:]) # ploting whole first row of grid
ax2 = fig.add_subplot(gs[1,0])
ax3 = fig.add_subplot(gs[1,1])
##-----------------------------------------------------------------------------------##
df = pd.pivot_table(data,
index = 'YEAR',
columns = 'SENTIMENT_GROUP',
values = 'TRACK_TITLE',
aggfunc = 'count').fillna(0)
for col in expect_cols:
if col not in df.columns:
df[col] = 0
# Prepare data
x = df.index
y_neg = [0 if math.isnan(x) else x for x in [x*-1 for x in df['Negative'].values.tolist()]]
y_ntr1 = [0 if math.isnan(x) else x for x in [math.floor(x/2) *-1 for x in df['Neutral'].values.tolist()]]
y_ntr2 = [0 if math.isnan(x) else x for x in [math.ceil(x/2) for x in df['Neutral'].values.tolist()]]
y_pos = [0 if math.isnan(x) else x for x in df['Positive'].values.tolist()]
#y_pos = df['Positive'].values.tolist()
y_1 = np.vstack([y_ntr2, y_pos])
y_2 = np.vstack([y_ntr1, y_neg,])
# Plot for each column
# ax = plt.gca()
ax1.bar(x, y_ntr2, width = 0.9, color = colour_palette.get('light grey'))
ax1.bar(x, y_pos, width = 0.9, bottom=y_ntr2, color = colour_palette.get('green'))
ax1.bar(x, y_ntr1, width = 0.9, color = colour_palette.get('light grey'))
ax1.bar(x, y_neg, width = 0.9, bottom=y_ntr1, color = colour_palette.get('red'))
# add gridline
ax1.set_axisbelow(True)
ax1.yaxis.set_major_locator(MultipleLocator(5))
ax1.yaxis.set_minor_locator(AutoMinorLocator(5)) # minor line every 1
ax1.yaxis.grid(True, which = 'major', linestyle = '--', color = '#d3d3d3')
ax1.yaxis.grid(True, which = 'minor', linestyle = ':', color = '#d1d1d1')
# Decorations
ax1.set_title('Song lyrics by Sentiment over time', fontsize=14)
#set axes limits
# make sure the chart always displays at least 12 years on x axis
min_ticks = 12
x_vals = df.index
x_min = int(x_vals.min()) - 1
x_max = int(x_vals.max()) + 1
diff = x_max - x_min
if diff < min_ticks:
padding = (min_ticks - diff) / 2
x_max = math.ceil(x_max + padding)
x_min = math.floor(x_min - padding)
# set the actual limits for each axis
ylimit = round(max(max(y_pos) + max(y_ntr2), abs(min(y_neg) + min(y_ntr1))) *1.1)
ylimit_final = [-ylimit, ylimit]
xlimit_final = [x_min, x_max]
ax1.set(ylim = ylimit_final, xlim = xlimit_final)
# remove y axis tick labels
ax1.yaxis.set_ticklabels([])
# force x axis tics to be integers
ax1.xaxis.set_major_locator(MaxNLocator(integer=True))
##-----------------------------------------------------------------------------------##
# Ploting for charted and uncharted Track Rank
columns = data.columns[1:]
labs = columns.values.tolist()
data.loc[~data['BILLBOARD_TRACK_RANK'].isnull(), 'charted_uncharted'] = 'charted'
data.loc[data['BILLBOARD_TRACK_RANK'].isnull(), 'charted_uncharted'] = 'uncharted'
# charted
df_charted = pd.pivot_table(data[data.charted_uncharted == 'charted'],
index = 'YEAR',
columns = 'SENTIMENT_GROUP',
values = 'TRACK_TITLE',
aggfunc = 'count').fillna(0)
x = df_charted.index
for col in expect_cols:
if col not in df_charted.columns:
df_charted[col] = 0
y_neg = [0 if math.isnan(x) else x for x in [x*-1 for x in df_charted['Negative'].values.tolist()]]
y_ntr1 = [0 if math.isnan(x) else x for x in [math.floor(x/2) *-1 for x in df_charted['Neutral'].values.tolist()]]
y_ntr2 = [0 if math.isnan(x) else x for x in [math.ceil(x/2) for x in df_charted['Neutral'].values.tolist()]]
y_pos = [0 if math.isnan(x) else x for x in df_charted['Positive'].values.tolist()]
y_1 = np.vstack([y_ntr2, y_pos])
y_2 = np.vstack([y_ntr1, y_neg,])
# plot bars
ax2.bar(x, y_ntr2, width = 0.9, color = colour_palette.get('light grey'))
ax2.bar(x, y_pos, width = 0.9, bottom=y_ntr2, color = colour_palette.get('green'))
ax2.bar(x, y_ntr1, width = 0.9, color = colour_palette.get('light grey'))
ax2.bar(x, y_neg, width = 0.9, bottom=y_ntr1, color = colour_palette.get('red'))
# add gridline
ax2.set_axisbelow(True)
ax2.yaxis.set_major_locator(MultipleLocator(5))
ax2.yaxis.set_minor_locator(AutoMinorLocator(5)) # minor line every 1
ax2.yaxis.grid(True,
which = 'major',
linestyle = '--',
color = '#d3d3d3')
ax2.yaxis.grid(True,
which = 'minor',
linestyle = ':',
color = '#d1d1d1')
# Setting up x and y axis limit
ax2.set(ylim = ylimit_final, xlim = xlimit_final)
# remove y axis tick labels
ax2.yaxis.set_ticklabels([])
# force x axis tics to be integers
ax2.xaxis.set_major_locator(MaxNLocator(integer=True))
# set title
ax2.title.set_text('Charted Tracks')
##-----------------------------------------------------------------------------------##
# uncharted
df_uncharted = pd.pivot_table(data[data.charted_uncharted == 'uncharted'],
index = 'YEAR',
columns = 'SENTIMENT_GROUP',
values = 'TRACK_TITLE',
aggfunc = 'count').fillna(0)
x = df_uncharted.index
for col in expect_cols:
if col not in df_uncharted.columns:
df_uncharted[col] = 0
y_neg = [
0 if math.isnan(x) else x for x in [
x*-1 for x in df_uncharted['Negative'].values.tolist()
]
]
y_ntr1 = [
0 if math.isnan(x) else x for x in [
math.floor(x/2) *-1 for x in df_uncharted['Neutral'].values.tolist()
]
]
y_ntr2 = [
0 if math.isnan(x) else x for x in [
math.ceil(x/2) for x in df_uncharted['Neutral'].values.tolist()
]
]
y_pos = [
0 if math.isnan(x) else x for x in df_uncharted['Positive'].values.tolist()
]
y_1 = np.vstack([y_ntr2, y_pos])
y_2 = np.vstack([y_ntr1, y_neg,])
# plot bars
ax3.bar(x,
y_ntr2,
width = 0.9,
color = colour_palette.get('light grey')
)
ax3.bar(x,
y_pos,
width = 0.9,
bottom=y_ntr2,
color = colour_palette.get('green')
)
ax3.bar(x,
y_ntr1,
width = 0.9,
color = colour_palette.get('light grey')
)
ax3.bar(x,
y_neg,
width = 0.9,
bottom=y_ntr1,
color = colour_palette.get('red')
)
# add gridline
ax3.set_axisbelow(True)
ax3.yaxis.set_major_locator(MultipleLocator(5)) # line every 5
ax3.yaxis.set_minor_locator(AutoMinorLocator(5)) # minor line every 1
ax3.yaxis.grid(True,
which = 'major',
linestyle = '--',
color = '#d3d3d3')
ax3.yaxis.grid(True,
which = 'minor',
linestyle = ':',
color = '#d1d1d1')
# Setting up x and y axis limit
ax3.set(ylim = ylimit_final, xlim = xlimit_final)
# remove y axis tick labels
ax3.yaxis.set_ticklabels([])
# force x axis tics to be integers
ax3.xaxis.set_major_locator(MaxNLocator(integer=True))
ax3.title.set_text('Uncharted Tracks')
plt.show()
#-------------------------------------------------------------------------------
# SECTION 4 | TAB 3 Sentiment score over time Charted Vs Uncharted Tracks
## Not in use ###
# def sntm_scr_ovr_cht_unchta(data):
# mycolors1 = ['tab:grey', 'tab:green']
# mycolors2 = ['tab:grey', 'tab:red']
# f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 4.5))
# f.suptitle('Sentiment score over time, charted vs uncharted tracks', fontsize=14)
# columns = data.columns[1:]
# labs = columns.values.tolist()
# data.loc[~data['BILLBOARD_TRACK_RANK'].isnull(), 'charted_uncharted'] = 'charted'
# data.loc[data['BILLBOARD_TRACK_RANK'].isnull(), 'charted_uncharted'] = 'uncharted'
# # charted
# df_charted = pd.pivot_table(data[data.charted_uncharted == 'charted'],
# index = 'YEAR',
# columns = 'SENTIMENT_GROUP',
# values = 'TRACK_TITLE',
# aggfunc = 'count')
# x = df_charted.index
# y_neg = [0 if math.isnan(x) else x for x in [x*-1 for x in df_charted['Negative'].values.tolist()]]
# y_ntr1 = [0 if math.isnan(x) else x for x in [x/2 *-1 for x in df_charted['Neutral'].values.tolist()]]
# y_ntr2 = [0 if math.isnan(x) else x for x in [x/2 for x in df_charted['Neutral'].values.tolist()]]
# y_pos = [0 if math.isnan(x) else x for x in df_charted['Positive'].values.tolist()]
# y_1 = np.vstack([y_ntr2, y_pos])
# y_2 = np.vstack([y_ntr1, y_neg,])
# # plot bars
# ax1.bar(x, y_ntr2, width = 0.9, color = '#b2b2b2')
# ax1.bar(x, y_pos, width = 0.9, bottom=y_ntr2, color = '#198c19')
# ax1.bar(x, y_ntr1, width = 0.9, color = '#b2b2b2')
# ax1.bar(x, y_neg, width = 0.9, bottom=y_ntr1, color = '#ff4c4c')
# # add gridline
# ax1.set_axisbelow(True)
# ax1.yaxis.grid(True, which = 'major', linestyle = '--', color = '#d3d3d3')
# # calculate axis limits
# xlimit_1 = [df_charted.index.min()-1, df_charted.index.max()+1]
# ylimit_1 = round(max(max(y_pos) + max(y_ntr2), abs(min(y_neg) + min(y_ntr1))) *1.1)
# # labels
# # ax1.set_yticklabels([abs(x) for x in ax1.get_yticks()])
# # set title
# ax1.title.set_text('Charted Tracks')
# # uncharted
# df_uncharted = pd.pivot_table(data[data.charted_uncharted == 'uncharted'],
# index = 'YEAR',
# columns = 'SENTIMENT_GROUP',
# values = 'TRACK_TITLE',
# aggfunc = 'count')
# x = df_uncharted.index
# y_neg = [0 if math.isnan(x) else x for x in [x*-1 for x in df_uncharted['Negative'].values.tolist()]]
# y_ntr1 = [0 if math.isnan(x) else x for x in [x/2 *-1 for x in df_uncharted['Neutral'].values.tolist()]]
# y_ntr2 = [0 if math.isnan(x) else x for x in [x/2 for x in df_uncharted['Neutral'].values.tolist()]]
# y_pos = [0 if math.isnan(x) else x for x in df_uncharted['Positive'].values.tolist()]
# y_1 = np.vstack([y_ntr2, y_pos])
# y_2 = np.vstack([y_ntr1, y_neg,])
# # plot bars
# ax2.bar(x, y_ntr2, width = 0.9, color = '#b2b2b2')
# ax2.bar(x, y_pos, width = 0.9, bottom=y_ntr2, color = '#198c19')
# ax2.bar(x, y_ntr1, width = 0.9, color = '#b2b2b2')
# ax2.bar(x, y_neg, width = 0.9, bottom=y_ntr1, color = '#ff4c4c')
# # add gridline
# ax2.set_axisbelow(True)
# ax2.yaxis.grid(True, which = 'major', linestyle = '--', color = '#d3d3d3')
# # calculate axis limits
# xlimit_2 = [df_uncharted.index.min()-1, df_uncharted.index.max()+1]
# ylimit_2 = round(max(max(y_pos) + max(y_ntr2), abs(min(y_neg) + min(y_ntr1))) *1.1)
# # labels
# # ax2.set_yticklabels([abs(x) for x in ax2.get_yticks()])
# # set title
# ax2.title.set_text('Uncharted Tracks')
# # set final limtits for both subplots
# xlimit_final = [min(xlimit_1[0], xlimit_2[0]), max(xlimit_1[1], xlimit_2[1])]
# ylimit_final = max(ylimit_1, ylimit_2)
# ax1.set(ylim=[-ylimit_final, ylimit_final], xlim=xlimit_final)
# ax2.set(ylim=[-ylimit_final, ylimit_final], xlim=xlimit_final)
# plt.show()
def show_sentiment_score_ovr_time(x):
if len(discog_filtered) == 0:
no_selections_warning()
else:
# select current tab
global selected_section
selected_section = 3
# select sub tab
global selection_tab_of_section_4
selection_tab_of_section_4 = 2
#clear previous output
clear_output()
# display UI
UI()
sntm_scr_ovr_time(discog_filtered)
# sntm_scr_ovr_cht_unchta(discog_filtered)
#-------------------------------------------------------------------------------
# def oldUI():
# #---------------------------------------------------------------------------
# # SECTION 1 "Get Data" (Get discography, select albums, get lyrics)
# #---------------------------------------------------------------------------
# # SECTION 1 | TAB 1 "Artist" (Get discography)
# # global variable (input box)
# global artist_input
# # button = get artist discography
# button_get_discography = widgets.Button(description="Get discography")
# button_get_discography.on_click(get_discography)
# # wrap tab
# SECTION_1_TAB_1 = widgets.VBox([artist_input, button_get_discography,])
# #---------------------------------------------------------------------------
# # SECTION 1 | TAB 2 "Albums" (select albums to include in the analysis)
# # selector to include/exclude albums
# global album_selector
# # show current artist
# label_current_artist = widgets.HTML(value=f'''<b><font size = "+1">Selected\
# discography for <u>{artist}</u></b>''',
# layout=widgets.Layout(width="100%"))
# # button to select/deselect
# text_select_deselect_all = widgets.Label(
# 'Use checkboxes to toggle selection:',
# layout=widgets.Layout(width="80%"))
# button_select_deselect_all = widgets.Button(
# description="Select/deselect all")
# button_select_deselect_all.on_click(select_deselect_all)
# # button = confirm
# button_apply_selection = widgets.Button(description="Apply")
# button_apply_selection.on_click(apply_selection)
# # wrap elements
# SECTION_1_TAB_2 = widgets.VBox([label_current_artist,
# text_select_deselect_all,
# button_select_deselect_all,
# album_selector,
# button_apply_selection,],
# layout=widgets.Layout(width="80%",
# padding = "10px"))
# #---------------------------------------------------------------------------
# # SECTION 1 | TAB 3 "Time period" ( select time intervals for charts)
# # period size selection slider
# global period_selection_slider
# # button to update the period selection
# button_update_period_selection = widgets.Button(description="Confirm")
# button_update_period_selection.on_click(set_bin_size)
# # vertical block
# SECTION_1_TAB_3 = widgets.VBox([period_selection_slider,
# button_update_period_selection,])
# #---------------------------------------------------------------------------
# # SECTION 1 build
# section_1_children = [SECTION_1_TAB_1, SECTION_1_TAB_2,SECTION_1_TAB_3]
# section_1 = widgets.Tab(children=section_1_children)
# section_1.set_title(0, 'Artist')
# section_1.set_title(1, 'Albums')
# section_1.set_title(2, 'Time periods')
# section_1.selected_index = selection_tab_of_section_1
# #---------------------------------------------------------------------------
# #---------------------------------------------------------------------------
# # SECTION 2 "Visualisations - single artist"
# #---------------------------------------------------------------------------
# # SECTION 2 | TAB 1 "Basic Charts"
# # button to update chart
# button_show_basic_charts = widgets.Button(description="Show/refresh charts")
# button_show_basic_charts.on_click(show_basic_charts)
# # vertical block
# SECTION_2_TAB_1 = widgets.VBox([button_show_basic_charts,])
# #---------------------------------------------------------------------------
# # SECTION 2 | TAB 2 "Wordclouds"
# # dropdown
# global wordcloud_by_selection_dropdown
# # button to update chart
# button_show_wordclouds = widgets.Button(description="Show")
# button_show_wordclouds.on_click(show_wordclouds)
# # vertical block
# SECTION_2_TAB_2 = widgets.VBox([wordcloud_by_selection_dropdown,
# button_show_wordclouds,])
# #---------------------------------------------------------------------------
# # SECTION 2 | TAB 3 "Users and Ratings"
# # button to show charts
# button_users_ratings_charts = widgets.Button(description="Show")
# button_users_ratings_charts.on_click(show_users_ratings_charts)
# # vertical block
# SECTION_2_TAB_3 = widgets.VBox([button_users_ratings_charts,])
# #---------------------------------------------------------------------------
# # SECTION 2 | TAB 4 "Sentiment Analysis"
# global sentiment_dropdown1, sentiment_dropdown2
# # button to show charts
# button_sentiment_analysis = widgets.Button(description="Show")
# button_sentiment_analysis.on_click(show_sentiment_graphs)
# # vertical block
# # define tab depending on the value of the global variable
# if sentiment_dropdown1.value == 'tracks by album':
# SECTION_2_TAB_4 = widgets.VBox([sentiment_dropdown1,sentiment_dropdown2,button_sentiment_analysis, ])
# else:
# SECTION_2_TAB_4 = widgets.VBox([sentiment_dropdown1,button_sentiment_analysis])
# # trigger inner function when value of the dropdown1 changes
# sentiment_dropdown1.observe(adapt_UI, names='value')
# #---------------------------------------------------------------------------
# # SECTION 2 build
# section_2_children = [SECTION_2_TAB_1,
# SECTION_2_TAB_2,
# SECTION_2_TAB_3,
# SECTION_2_TAB_4,]
# section_2 = widgets.Tab()
# section_2.children = section_2_children
# section_2.set_title(0, 'Basic Charts')
# section_2.set_title(1, 'Wordclouds')
# section_2.set_title(2, 'Users and ratings')
# section_2.set_title(3, 'Sentiment analysis')
# section_2.selected_index = selection_tab_of_section_2
# section_2_wrapper_label = widgets.HTML(
# value=f'''Current artist: <b>{artist}</b>''',
# layout=widgets.Layout(width="100%"))
# section_2_wrapper = widgets.VBox([section_2_wrapper_label,
# section_2,])
# #---------------------------------------------------------------------------
# #---------------------------------------------------------------------------
# # SECTION 3 "Visualisations - compare artists"
# #---------------------------------------------------------------------------
# # SECTION 3 | TAB 1 "Select artists"
# SECTION_3_TAB_1 = widgets.VBox()
# #---------------------------------------------------------------------------
# # SECTION 3 | TAB 1 "Basic charts"
# SECTION_3_TAB_2 = widgets.VBox()
# #---------------------------------------------------------------------------
# # SECTION 3 build
# section_3_children = [SECTION_3_TAB_1,
# SECTION_3_TAB_2,]
# section_3 = widgets.Tab()
# section_3.children = section_3_children
# section_3.set_title(0, 'Select Artists')
# section_3.set_title(1, 'Basic Charts')
# #---------------------------------------------------------------------------
# #---------------------------------------------------------------------------
# # FINAL UI compiler
# #---------------------------------------------------------------------------
# UI = widgets.Accordion(children=[section_1,
# section_2_wrapper,
# section_3])
# UI.set_title(0, 'Configuration')
# UI.set_title(1, 'Visualisations - single artist')
# UI.set_title(2, 'Visualisations - compare artists')
# UI.selected_index = selected_section
# display(UI)
# #---------------------------------------------------------------------------
def UI():
# select font for charts
plt.rcParams["font.family"] = "DejaVu Sans"
#---------------------------------------------------------------------------
# SECTION 1 "Configuration"
#---------------------------------------------------------------------------
# SECTION 1 | TAB 1 "Artist" (Get discography)
# global variable (input box)
global artist_input
# button = get artist discography
button_get_discography = widgets.Button(description="Get discography")
button_get_discography.on_click(get_discography)
# wrap tab
SECTION_1_TAB_1 = widgets.VBox([artist_input, button_get_discography,])
#---------------------------------------------------------------------------
# SECTION 1 | TAB 2 "Albums" (select albums to include in the analysis)
# selector to include/exclude albums
global album_selector
# show current artist
label_current_artist = widgets.HTML(value=f'''
<p><font size = "+1">
Selected discography for <b><u>{artist}</u></b>
</p>
<p><font size = "-1"><i>
Source <a href="https://www.discogs.com/artist/{get_discogs.getArtistID(artist_input.value)[0]}" target="_blank">link</a></i>
</p><br>''',
layout=widgets.Layout(width="100%"))
# button to select/deselect
text_select_deselect_all = widgets.Label(
'Use checkboxes to toggle selection:',
layout=widgets.Layout(width="80%"))
button_select_deselect_all = widgets.Button(
description="Select/deselect all")
button_select_deselect_all.on_click(select_deselect_all)
# button = confirm
button_apply_selection = widgets.Button(description="Apply")
button_apply_selection.on_click(apply_selection)
# wrap elements
SECTION_1_TAB_2 = widgets.VBox([label_current_artist,
text_select_deselect_all,
button_select_deselect_all,
album_selector,
button_apply_selection,],
layout=widgets.Layout(width="80%",
padding = "10px"))
#---------------------------------------------------------------------------
# SECTION 1 | TAB 3 "Time period" ( select time intervals for charts)
# period size selection slider
global period_selection_slider
# button to update the period selection
button_update_period_selection = widgets.Button(description="Confirm")
button_update_period_selection.on_click(set_bin_size)
# vertical block
SECTION_1_TAB_3 = widgets.VBox([period_selection_slider,
button_update_period_selection,])
#---------------------------------------------------------------------------
# SECTION 1 build
section_1_children = [SECTION_1_TAB_1,
SECTION_1_TAB_2,
SECTION_1_TAB_3]
section_1 = widgets.Tab(children=section_1_children)
section_1.set_title(0, 'Artist')
section_1.set_title(1, 'Albums')
section_1.set_title(2, 'Time periods')
section_1.selected_index = selection_tab_of_section_1
section_1_wrapper_label = widgets.HTML(
value=f'''<font color='{descriptions_colour}'><font size = "-2">
<i>Choose the ARTIST to load the discography, select ALBUMS to be
included in the analysis and choose the TIME PERIOD length for time
based visualisations.</i>
<br>''',
layout=widgets.Layout(width="100%"))
section_1_wrapper = widgets.VBox([section_1_wrapper_label,
section_1,])
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# SECTION 2 "Visualisations - Overview and Lexical Diversity"
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# SECTION 2 | TAB 1 "Overview"
# button to update chart
s2t1_desc = widgets.HTML(
value=f'''<font color='{descriptions_colour}'><font size = "-2">
<i>Displays Number of Albums and Tracks in the selected Artist's
discography over time, arranged in periods of chosen size.</i>
<br>''',
layout=widgets.Layout(width="80%"))
button_show_basic_charts = widgets.Button(description="Show/refresh charts")
button_show_basic_charts.on_click(show_basic_charts)
# vertical block
SECTION_2_TAB_1 = widgets.VBox([s2t1_desc,
button_show_basic_charts,])
#---------------------------------------------------------------------------
# SECTION 2 | TAB 2 "Lexical Diversity"
# button to update chart
s2t2_desc = widgets.HTML(
value=f'''<font color='{descriptions_colour}'><font size = "-2">
<i>Displays the lexical diversity of the Artist's lyrics over time,
arranged in periods of chosen size.</i>
<br>''',
layout=widgets.Layout(width="80%"))
button_show_lexical_diversity = widgets.Button(description="Show/refresh charts")
button_show_lexical_diversity.on_click(show_lexical_diversity)
# vertical block
SECTION_2_TAB_2 = widgets.VBox([s2t2_desc,
button_show_lexical_diversity,])
#---------------------------------------------------------------------------
# SECTION 2 | TAB 3 "Wordclouds"
s2t3_desc = widgets.HTML(
value=f'''<font color='{descriptions_colour}'><font size = "-2">
<i>Displays Artist's lyrics in Wordclouds, either by selected period
size or by individual Album.</i>
<br>''',
layout=widgets.Layout(width="80%"))
# dropdown
global wordcloud_by_selection_dropdown
# button to update chart
button_show_wordclouds = widgets.Button(description="Show")
button_show_wordclouds.on_click(show_wordclouds)
# vertical block
SECTION_2_TAB_3 = widgets.VBox([s2t3_desc,
wordcloud_by_selection_dropdown,
button_show_wordclouds,])
#---------------------------------------------------------------------------
# SECTION 2 build
section_2_children = [SECTION_2_TAB_1,
SECTION_2_TAB_2,
SECTION_2_TAB_3,]
section_2 = widgets.Tab()
section_2.children = section_2_children
section_2.set_title(0, 'Basic Charts')
section_2.set_title(1, 'Lexical Diversity')
section_2.set_title(2, 'Wordclouds')
section_2.selected_index = selection_tab_of_section_2
section_2_wrapper_label = widgets.HTML(
value=f'''Current artist: <b>{artist}</b>''',
layout=widgets.Layout(width="100%"))
section_2_wrapper = widgets.VBox([section_2_wrapper_label,
section_2,])
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# SECTION 3 "Visualisations - Ratings and Success"
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# SECTION 3 | TAB 1 "Discogs Ratings"
# button to show charts
s3t1_desc = widgets.HTML(
value=f'''<font color='{descriptions_colour}'><font size = "-2">
<i>Visualises data retrieved from DISCOGS.COM, such as number of
registered record owners or average album ratings.
</i>''',
layout=widgets.Layout(width="80%"))
button_discogs_users_charts = widgets.Button(description="Show")
button_discogs_users_charts.on_click(show_discogs_users_charts)
# vertical block
SECTION_3_TAB_1 = widgets.VBox([s3t1_desc,
button_discogs_users_charts,])
#---------------------------------------------------------------------------
# SECTION 3 | TAB 2 "Billboard 100"
s3t2_desc = widgets.HTML(
value=f'''<font color='{descriptions_colour}'><font size = "-2">
<i>Visualises the proportions of tracks in the selected Artist's
discography that reached a position in the official Billboard 100 music
charts vs those that didn't, by period.</i>
<br>''',
layout=widgets.Layout(width="80%"))
# button to show charts
button_billboard_100_charts = widgets.Button(description="Show")
button_billboard_100_charts.on_click(show_billboard_100_charts)
# vertical block
SECTION_3_TAB_2 = widgets.VBox([s3t2_desc,
button_billboard_100_charts,])
#---------------------------------------------------------------------------
# SECTION 3 | TAB 3 "Billboard Albums"
s3t3_desc = widgets.HTML(
value=f'''<font color='{descriptions_colour}'><font size = "-2">
<i>Visualises the proportions of Albums in the selected Artist's
discography that reached a position in the official Billboard music
Album charts vs those that didn't, by period.</i>
<br>''',
layout=widgets.Layout(width="80%"))
# button to show charts
button_billboard_albums_charts = widgets.Button(description="Show")
button_billboard_albums_charts.on_click(show_billboard_album_charts)
# vertical block
SECTION_3_TAB_3 = widgets.VBox([s3t3_desc,
button_billboard_albums_charts,])
#---------------------------------------------------------------------------
# SECTION 3 build
section_3_children = [SECTION_3_TAB_1,
SECTION_3_TAB_2,
SECTION_3_TAB_3,]
section_3 = widgets.Tab()
section_3.children = section_3_children
section_3.set_title(0, 'Discogs Ratings')
section_3.set_title(1, 'Billboard 100')
section_3.set_title(2, 'Billboard Albums')
section_3.selected_index = selection_tab_of_section_3
section_3_wrapper_label = widgets.HTML(
value=f'''Current artist: <b>{artist}</b>''',
layout=widgets.Layout(width="100%"))
section_3_wrapper = widgets.VBox([section_3_wrapper_label,
section_3,])
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# SECTION 4 "Visualisations - Sentiment Analysis"
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# SECTION 4 | TAB 1 "Albums and Songs Sentiment"
s4t1_desc = widgets.HTML(
value=f'''<font color='{descriptions_colour}'><font size = "-2">
<i>Visualises the sentiment score of individual lyrics. Choose to
display by Album or individually by Tracks of a selected Album.</i>
<br>''',
layout=widgets.Layout(width="80%"))
global sentiment_dropdown1, sentiment_dropdown2
# button to show charts
button_sentiment_analysis = widgets.Button(description="Show")
button_sentiment_analysis.on_click(show_sentiment_graphs)
# vertical block
# define tab depending on the value of the global variable
if sentiment_dropdown1.value == 'tracks by album':
SECTION_4_TAB_1 = widgets.VBox([s4t1_desc,
sentiment_dropdown1,
sentiment_dropdown2,
button_sentiment_analysis, ])
else:
SECTION_4_TAB_1 = widgets.VBox([s4t1_desc,
sentiment_dropdown1,
button_sentiment_analysis])
# trigger inner function when value of the dropdown1 changes
sentiment_dropdown1.observe(adapt_UI, names='value')
#---------------------------------------------------------------------------
# SECTION 4 | TAB 2 "Sentiment and Charts"
s4t2_desc = widgets.HTML(
value=f'''<font color='{descriptions_colour}'><font size = "-2">
<i>Shows a chord diagram comparing the sentiment of the lyrics with
whether the track reached a position in the official Billboard 100 music
charts or not.</i>
<br>''',
layout=widgets.Layout(width="80%"))
# button to show Sentiment Vs Charts
button_sentiment_vs_charts_song = widgets.Button(description="Show")
button_sentiment_vs_charts_song.on_click(show_sentiment_vs_charts_song)
# vertical block
SECTION_4_TAB_2 = widgets.VBox([s4t2_desc,
button_sentiment_vs_charts_song,])
# SECTION 4 | TAB 3 "Sentiment Score over time"
s4t3_desc = widgets.HTML(
value=f'''<font color='{descriptions_colour}'><font size = "-2">
<i>Displays tracks grouped by sentiment score over time. Shown as all
tracks together as well as charted and uncharted tracks individually.<br>
<font color='{colour_palette['green']}'><b>GREEN</b>
<font color='{descriptions_colour}'> represents tracks with Positive,
<font color='{colour_palette['grey']}'><b>GREY</b>
<font color='{descriptions_colour}'> with Neutral, and
<font color='{colour_palette['red']}'><b>RED</b>
<font color='{descriptions_colour}'> with Negative sentiment score. Each
gridline represents a single track.
</i>
<br>''',
layout=widgets.Layout(width="80%"))
# button to show Sentiment score over time
button_sentiment_over_time = widgets.Button(description="Show")
button_sentiment_over_time.on_click(show_sentiment_score_ovr_time)
# vertical block
SECTION_4_TAB_3 = widgets.VBox([s4t3_desc,
button_sentiment_over_time,])
#---------------------------------------------------------------------------
# SECTION 4 build
section_4_children = [SECTION_4_TAB_1,
SECTION_4_TAB_2,
SECTION_4_TAB_3,]
section_4 = widgets.Tab()
section_4.children = section_4_children
section_4.set_title(0, 'Lyrics Sentiment')
section_4.set_title(1, 'Sentiment vs Charts')
section_4.set_title(2, 'Sentiment Over Time')
section_4.selected_index = selection_tab_of_section_4
section_4_wrapper_label = widgets.HTML(
value=f'''Current artist: <b>{artist}</b>''',
layout=widgets.Layout(width="100%"))
section_4_wrapper = widgets.VBox([section_4_wrapper_label,
section_4,])
#---------------------------------------------------------------------------
# FINAL UI compiler
#---------------------------------------------------------------------------
UI = widgets.Accordion(children=[section_1_wrapper,
section_2_wrapper,
section_3_wrapper,
section_4_wrapper])
UI.set_title(0, 'Configuration')
UI.set_title(1, 'Visualisations - Overview and Lexical Diversity')
UI.set_title(2, 'Visualisations - Ratings and Success')
UI.set_title(3, 'Visualisations - Sentiment Analysis')
UI.selected_index = selected_section
display(UI)
#--------------------------------------------------------------------------- | Err0neus/Santos-Discography-Analyser | functions/UI.py | UI.py | py | 80,024 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "nltk.download",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.s... |
20803043179 | from django.contrib import messages
def info(request, msg):
"""
Log the message to the current page template if request is not None. Log msg
to stdout also.
"""
assert len(msg) > 0
if request is not None:
messages.info(request, msg, extra_tags="alert alert-secondary", fail_silently=True)
print(msg)
def warning(request, msg):
"""
Log the message as a warning (orange) appearance on the page iff request is not None.
Also log to stdout.
"""
assert len(msg) > 0
if request is not None:
messages.warning(request, msg, extra_tags="alert alert-warning", fail_silently=True)
print("WARNING: {}".format(msg))
def add_messages(request, context):
assert context is not None
as_at = context.get('most_recent_date', None)
sector = context.get('sector', None)
if as_at:
info(request, 'Prices current as at {}.'.format(as_at))
if sector:
info(request, "Only stocks from {} are shown.".format(sector))
| Robinqiuau/asxtrade | src/viewer/app/messages.py | messages.py | py | 999 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.messages.info",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.contrib.messages.warning",
"line_number": 21,
"usage_type": "call"
},
{
... |
14715992978 | from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String, Numeric, text
engine = create_engine('postgresql://postgres:1@localhost/news_db', echo = True)
meta = MetaData() # box
students = Table(
'students' , meta ,
Column('id' , Integer , primary_key=True),
Column('first_name' , String),
Column('last_name' , String)
)
user = Table(
'users' , meta ,
Column('id' , Integer , primary_key=True),
Column('fullname' , String),
Column('money' , Numeric)
)
meta.create_all(engine)
# -----------------------------------------------------
conn = engine.connect()
student2 = text("insert into students(first_name , last_name) values ('jahongir' , 'Abdushukurov')")
# student1 = students.insert().values(first_name = 'Botirjon' , last_name = 'Botiraliyev')
# user1 = user.insert().values(fullname = 'Absaitov Dilshod' , money = 10_000)
conn.execute(student2)
| devabsaitov/self_study | sqlalchemy_lesson/Basic/3_insert _expression.py | 3_insert _expression.py | py | 912 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.MetaData",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Table",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sqlalchemy... |
23403038536 | #1
import mysql.connector
connection = mysql.connector.connect(
host = '127.0.0.1',
port = 3306,
database = 'flight_game',
user = 'dbuser',
password = 'pass_word'
)
def showairport(icao):
sql = "select ident, name, iso_country from airport"
sql += " WHERE ident='" + icao + "'"
print(sql)
cursor = connection.cursor()
cursor.execute(sql)
result = cursor.fetchall()
if cursor.rowcount > 0:
for row in result:
print(f"The ICAO {row[0]} corresponds to {row[1]} and the country is {row[2]}.")
return
icao = input("Enter ICAO of the airport to search: ")
showairport(icao)
#2
import mysql.connector
connection = mysql.connector.connect(
host = '127.0.0.1',
port = 3306,
database = 'flight_game',
user = 'dbuser',
password = 'pass_word')
def list_airports_by_type(country_code):
# Define the SQL query to retrieve airports in the given country, grouped by type and ordered by type
sql = """
SELECT type, COUNT(*) as count
FROM airport
WHERE iso_country = %s
GROUP BY type
ORDER BY type
"""
cursor = connection.cursor()
cursor.execute(sql, (country_code,))
result = cursor.fetchall()
if cursor.rowcount > 0:
for row in result:
print(f"{row[1]} {row[0]} airports")
else:
print(f"No airports found for country code: {country_code}")
country_code = input("Enter the country code (e.g., FI for Finland): ")
list_airports_by_type(country_code)
#3
import mysql.connector
from geopy.distance import geodesic
connection = mysql.connector.connect(
host='127.0.0.1',
port=3306,
database='flight_game',
user='dbuser',
password='pass_word'
)
def get_airport_coordinates(icao):
sql = "SELECT latitude_deg, longitude_deg FROM airport WHERE ident = %s"
cursor = connection.cursor()
cursor.execute(sql, (icao,))
result = cursor.fetchone()
if result:
return result
else:
print(f"Airport with ICAO code {icao} not found in the database.")
return None
def calculate_distance_between_airports(icao1, icao2):
coords1 = get_airport_coordinates(icao1)
coords2 = get_airport_coordinates(icao2)
if coords1 and coords2:
# Create geodesic objects using the coordinates
airport1_coords = (coords1[0], coords1[1])
airport2_coords = (coords2[0], coords2[1])
distance = geodesic(airport1_coords, airport2_coords).kilometers
return distance
else:
return None
icao1 = input("Enter ICAO code of the first airport: ")
icao2 = input("Enter ICAO code of the second airport: ")
distance = calculate_distance_between_airports(icao1, icao2)
if distance is not None:
print(f"The distance between {icao1} and {icao2} is approximately {distance:.2f} kilometers.")
else:
print("Distance calculation failed. Please check the ICAO codes and ensure they exist in the database.")
connection.close()
| nguyenhis/MODULE8 | main.py | main.py | py | 3,024 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mysql.connector.connector.connect",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 3,
"usage_type": "name"
},
{
"api... |
40965965237 | from django.urls import path
from django.views.decorators.cache import cache_page, never_cache
from catalog.apps import CatalogConfig
from catalog.views import HomeView, ContactsView, ProductDetailView, ProductListView, ProductDeleteView, \
ProductCreateView, \
ProductUpdateView, BlogRecordListView, \
BlogRecordDetailView, BlogRecordCreateView, BlogRecordUpdateView, BlogRecordDeleteView, \
BlogRecordDeactivatedListView, toggle_activity, VersionDetailView, VersionListView, VersionCreateView, CategoryListView
app_name = CatalogConfig.name
urlpatterns = [
path('', HomeView.as_view(), name='homepage'),
path('products/', ProductListView.as_view(), name='products_list'),
path('contacts/', ContactsView.as_view(), name='contacts'),
path('product/<int:pk>/', cache_page(60)(ProductDetailView.as_view()), name='product_detail'),
path('product/create/', never_cache(ProductCreateView.as_view()), name='product_create'),
path('product/update/<int:pk>/', never_cache(ProductUpdateView.as_view()), name='product_update'),
path('product/delete/<int:pk>/', never_cache(ProductDeleteView.as_view()), name='product_delete'),
path('category_list/', CategoryListView.as_view(), name='category_list'),
path('version/create/', never_cache(VersionCreateView.as_view()), name='version_create'),
path('version/<int:pk>/', VersionDetailView.as_view(), name='version_detail'),
path('blog_records/', BlogRecordListView.as_view(), name='blog_records'),
path('blog_records_deactivated/', BlogRecordDeactivatedListView.as_view(), name='blog_records_deactivated'),
path('blog_records/<slug:slug>/', BlogRecordDetailView.as_view(), name='blog_record_detail'),
path('blog_record/create/', never_cache(BlogRecordCreateView.as_view()), name='blog_record_create'),
path('blog_record/update/<slug:slug>/', never_cache(BlogRecordUpdateView.as_view()), name='blog_record_update'),
path('blog_record/delete/<slug:slug>/', never_cache(BlogRecordDeleteView.as_view()), name='blog_record_delete'),
path('blog_record/toggle/<slug:slug>/', toggle_activity, name='toggle_activity'),
]
| DSulzhits/06_2_3 | catalog/urls.py | urls.py | py | 2,135 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "catalog.apps.CatalogConfig.name",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "catalog.apps.CatalogConfig",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"... |
19141473384 | import matplotlib.pyplot as plt
import sys
from numpy import *
def _poly(a, x):
"""Returns the function value of a polynomial with coefficients a and variable x.
Parameters
----------
a : list
a list of coefficients
x : number
the variable of the polynomial
Returns
-------
number
the function value of the polynomial
"""
y = 0
for i in range(len(a)):
y = y + a[i]*x**i
return y
def _powers(vector, first_power, second_power):
"""Returns a matrix of a given vector and the same vector raised to the first and second power given as parameters.
Parameters
----------
vector : list
a list of numbers
first_power : number
the first power to raise the vector to
second_power : number
the second power to raise the vector to
Returns
-------
array
a compact representation of the powers matrix
"""
if not len(vector):
print("Error in power: vector is empty")
return array([])
if not (first_power >= 0 and second_power >= 0):
print("Error in power: powers must be larger than 0")
return array([[]])
#return matrix
power_matrix = []
#vector of all powers
power_vector = []
#make sure array contains the vector of all unique powers ranging from first_power to second_power
for i in range(first_power, second_power+1):
power_vector.append(i)
#create matrix of powers
for component in vector:
power_matrix.append([component**i for i in power_vector])
return array(power_matrix)
def _non_linear_regression(X, Y, n):
"""Returns the coefficients of a linear regression model.
Parameters
----------
dataset : list
a list of lists containing pairs of x and y values
"""
Xp = _powers(X,0,n)
Yp = _powers(Y,1,1)
Xpt = Xp.transpose()
a = matmul(linalg.inv(matmul(Xpt,Xp)),matmul(Xpt,Yp))
return a[:,0]
def main():
"""takes a file of of data points and a degree of a polynomial and plots the polynomial that best fits the data points.
Parameters
----------
arg[1] : str
the path of data file
arg[2] : int
the degree of the polynomial
"""
data_set = loadtxt(sys.argv[1])
data = transpose(data_set)
X = data[0]
Y = data[1]
a = _non_linear_regression(X, Y, int(sys.argv[2]))
x_start = X[0]
x_end = X[len(X)-1]
X2 = linspace(x_start,x_end,int((x_end-x_start)/0.2)).tolist()
Y2 = [ _poly(a, x) for x in X2 ]
plt.plot(X,Y,'ro')
plt.plot(X2,Y2)
plt.show()
if __name__ == "__main__":
main() | wiLLSE23/DAT455 | Labs/Labb3/numpy_regression.py | numpy_regression.py | py | 2,675 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot... |
3207468200 | from __future__ import annotations
import re
from typing import Any, Callable, NamedTuple
def parse(rows: list[str]) -> Board:
width = max(len(row) for row in rows)
board_rows = [" " * (width + 2)]
for index in range(len(rows) - 2):
row = rows[index]
board_rows.append(f" {row}" + " " * (width - len(row) + 1))
board_rows += [" " * (width + 2)]
commands = []
parse_int = True
for field in re.findall(r"[A-Z]+|\d+", rows[-1]):
commands.append(int(field) if parse_int else field)
parse_int = not parse_int
return Board(board_rows, commands, get_face(width))
def get_face(width: int) -> Face:
return get_face_example() if width < 20 else get_face_input()
def get_face_example() -> Face:
face1 = Face(1, Position(1, 9), Position(4, 12))
face2 = Face(2, Position(5, 1), Position(8, 4))
face3 = Face(3, Position(5, 5), Position(8, 8))
face4 = Face(4, Position(5, 9), Position(8, 12))
face5 = Face(5, Position(9, 9), Position(12, 12))
face6 = Face(6, Position(9, 13), (Position(12, 16)))
face1.right = lambda p: (face6, Position(13 - p.row, 16), Position(0, -1))
face1.down = lambda p: (face4, Position(5, p.column), Position(1, 0))
face1.left = lambda p: (face3, Position(5, 4 + p.row), Position(1, 0))
face1.up = lambda p: (face2, Position(5, 13 - p.column), Position(1, 0))
face2.right = lambda p: (face3, Position(p.row, 5), Position(0, 1))
face2.down = lambda p: (face5, Position(
16, 13 - p.column), Position(-1, 0))
face2.left = lambda p: (face6, Position(
16, 12 + p.column), Position(-1, 0))
face2.up = lambda p: (face1, Position(1, 13 - p.column), Position(1, 0))
face3.right = lambda p: (face4, Position(p.row, 9), Position(0, 1))
face3.down = lambda p: (face5, Position(17 - p.column, 9), Position(0, 1))
face3.left = lambda p: (face2, Position(p.row, 4), Position(0, -1))
face3.up = lambda p: (face1, Position(p.column - 4, 9), Position(0, 1))
face4.right = lambda p: (face6, Position(9, 21 - p.row), Position(1, 0))
face4.down = lambda p: (face5, Position(9, p.column), Position(1, 0))
face4.left = lambda p: (face3, Position(p.row, 8), Position(0, -1))
face4.up = lambda p: (face1, Position(4, p.column), Position(-1, 0))
face5.right = lambda p: (face6, Position(p.row, 13), Position(0, 1))
face5.down = lambda p: (face2, Position(8, 13 - p.column), Position(-1, 0))
face5.left = lambda p: (face3, Position(8, 17 - p.row), Position(-1, 0))
face5.up = lambda p: (face4, Position(8, p.column), Position(-1, 0))
face6.right = lambda p: (face1, Position(13 - p.row, 12), Position(0, -1))
face6.down = lambda p: (face2, Position(21 - p.column, 1), Position(0, 1))
face6.left = lambda p: (face5, Position(p.row, 12), Position(0, -1))
face6.up = lambda p: (face4, Position(21 - p.column, 12), Position(0, -1))
return face1
def get_face_input() -> Face:
face1 = Face(1, Position(1, 51), Position(50, 100))
face2 = Face(2, Position(1, 101), Position(50, 150))
face3 = Face(3, Position(51, 51), Position(100, 100))
face4 = Face(4, Position(101, 1), Position(150, 50))
face5 = Face(5, Position(101, 51), Position(150, 100))
face6 = Face(6, Position(151, 1), Position(200, 50))
face1.right = lambda p: (face2, Position(p.row, 101), Position(0, 1))
face1.down = lambda p: (face3, Position(51, p.column), Position(1, 0))
face1.left = lambda p: (face4, Position(151 - p.row, 1), Position(0, 1))
face1.up = lambda p: (face6, Position(100 + p.column, 1), Position(0, 1))
face2.right = lambda p: (face5, Position(
151 - p.row, 100), Position(0, -1))
face2.down = lambda p: (face3, Position(
p.column - 50, 100), Position(0, -1))
face2.left = lambda p: (face1, Position(p.row, 100), Position(0, -1))
face2.up = lambda p: (face6, Position(
200, p.column - 100), Position(-1, 0))
face3.right = lambda p: (face2, Position(50, p.row + 50), Position(-1, 0))
face3.down = lambda p: (face5, Position(101, p.column), Position(1, 0))
face3.left = lambda p: (face4, Position(101, p.row - 50), Position(1, 0))
face3.up = lambda p: (face1, Position(50, p.column), Position(-1, 0))
face4.right = lambda p: (face5, Position(p.row, 51), Position(0, 1))
face4.down = lambda p: (face6, Position(151, p.column), Position(1, 0))
face4.left = lambda p: (face1, Position(151 - p.row, 51), Position(0, 1))
face4.up = lambda p: (face3, Position(50 + p.column, 51), Position(0, 1))
face5.right = lambda p: (face2, Position(
151 - p.row, 150), Position(0, -1))
face5.down = lambda p: (face6, Position(
p.column + 100, 50), Position(0, -1))
face5.left = lambda p: (face4, Position(p.row, 50), Position(0, -1))
face5.up = lambda p: (face3, Position(100, p.column), Position(-1, 0))
face6.right = lambda p: (face5, Position(
150, p.row - 100), Position(-1, 0))
face6.down = lambda p: (face2, Position(1, p.column + 100), Position(1, 0))
face6.left = lambda p: (face1, Position(1, p.row - 100), Position(1, 0))
face6.up = lambda p: (face4, Position(150, p.column), Position(-1, 0))
return face1
class Position:
def __init__(self, row: int, column: int) -> None:
self.row = row
self.column = column
def _key(self) -> tuple[int, int]:
return (self.row, self.column)
def __repr__(self) -> str:
return repr(self._key())
def __eq__(self, other: object) -> bool:
return isinstance(other, Position) and self._key() == other._key()
def __hash__(self) -> int:
return hash(self._key())
def __add__(self, other: Position) -> Position:
return Position(self.row + other.row, self.column + other.column)
def __mod__(self, other: Position) -> Position:
return Position(self.row % other.row, self.column % other.column)
class Face:
def __init__(self, name: int, start: Position, end: Position) -> None:
self.name = name
self.start = start
self.end = end
self.right: Callable[[Position], tuple[Face, Position, Position]]
self.down: Callable[[Position], tuple[Face, Position, Position]]
self.left: Callable[[Position], tuple[Face, Position, Position]]
self.up: Callable[[Position], tuple[Face, Position, Position]]
def __repr__(self) -> str:
return f"{self.name}"
def move_off_edge(self, position: Position, direction: Position) -> tuple[Face, Position, Position]:
if direction == (0, 1) and position.column == self.end.column:
return self.right(position)
elif direction == (1, 0) and position.row == self.end.row:
return self.down(position)
elif direction == (0, -1) and position.column == self.start.column:
return self.left(position)
elif direction == (-1, 0) and position.row == self.start.row:
return self.up(position)
raise ValueError(
f"Cannot move off edge on face {self.name} at {position} in direction {direction}")
class Board:
def __init__(self, rows: list[str], commands: list[Any], face: Face) -> None:
self.rows = rows
self.commands = commands
self.position = Position(1, rows[1].index("."))
self.direction = Position(0, 1)
self.width = len(rows[0])
self.height = len(rows)
self.face = face
def __getitem__(self, position: Position) -> str:
return self.rows[position.row][position.column]
def go(self, part: int) -> None:
for command in self.commands:
if isinstance(command, int):
self.move(command) if part == 1 else self.cube_move(command)
elif command == "L":
self.left()
elif command == "R":
self.right()
else:
raise ValueError(f"Unexpected command: {command}.")
def move(self, steps: int) -> None:
for _ in range(steps):
new_position = self.position + self.direction
new_position %= Position(self.height, self.width)
while self[new_position] == ' ':
new_position += self.direction
new_position %= Position(self.height, self.width)
if self[new_position] == "#":
return
self.position = new_position
def cube_move(self, steps: int) -> None:
for _ in range(steps):
if self.direction == Position(0, 1) and self.position.column == self.face.end.column:
new_face, new_position, new_direction = self.face.right(
self.position)
elif self.direction == Position(1, 0) and self.position.row == self.face.end.row:
new_face, new_position, new_direction = self.face.down(
self.position)
elif self.direction == Position(0, -1) and self.position.column == self.face.start.column:
new_face, new_position, new_direction = self.face.left(
self.position)
elif self.direction == Position(-1, 0) and self.position.row == self.face.start.row:
new_face, new_position, new_direction = self.face.up(
self.position)
else:
new_face = self.face
new_position = self.position + self.direction
new_direction = self.direction
if self[new_position] == ' ':
raise ValueError("Fell of the cube.")
if self[new_position] == "#":
return
self.face = new_face
self.position = new_position
self.direction = new_direction
def right(self) -> None:
self.direction = Position(self.direction.column, -self.direction.row)
def left(self) -> None:
self.direction = Position(-self.direction.column, self.direction.row)
def password(self) -> int:
directions = [Position(0, 1), Position(1, 0), Position(0, -1), Position(-1, 0)]
return 1000 * self.position.row + 4 * self.position.column + directions.index(self.direction)
| heijp06/AoC-2022 | day22/board.py | board.py | py | 10,253 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.findall",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"l... |
29296183032 | from django.views import generic # 2nd
from other.models import Ramadan, Feature
#==========================================================
class RamadanListView(generic.ListView):
model = Ramadan
paginate_by = 4
class RamadanDetailView(generic.DetailView):
model = Ramadan
#==========================================================
class FeatureListView(generic.ListView):
model = Feature
paginate_by = 2
class FeatureDetailView(generic.DetailView):
model = Feature
| anowar143/django-news-frontend | src/other/views.py | views.py | py | 509 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.views.generic.ListView",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "other.models.Ramadan",
"line_number": 8,
"usage_type": "name"
},
{
"api_nam... |
24854285401 | import requests
STOCK_NAME = "TSLA"
COMPANY_NAME = "Tesla Inc"
STOCK_ENDPOINT = "https://www.alphavantage.co/query"
NEWS_ENDPOINT = "https://newsapi.org/v2/everything"
api_key = "6KPNON1CEDUUZNPO"
news_api_key = "9738bfb7f9b648b197ca544a5d4da261"
stock_parameters = {
"function": "TIME_SERIES_DAILY",
"symbol": STOCK_NAME,
"apikey": api_key,
}
response = requests.get(STOCK_ENDPOINT, params=stock_parameters)
data = response.json()["Time Series (Daily)"]
data_list = [value for (key, value) in data.items()]
yesterday_data = data_list[0]['4. close']
day_before_yesterday_data = data_list[1]['4. close']
difference = abs(float(yesterday_data) - float(day_before_yesterday_data))
diff_percent = (difference / float(yesterday_data)) * 100
if diff_percent > 0:
news_params = {
"apiKey": news_api_key,
"qInTitle": COMPANY_NAME,
}
news_response = requests.get(NEWS_ENDPOINT, params=news_params)
articles = news_response.json()["articles"]
three_articles = articles[:3]
print(three_articles)
| rkhidesh/100-Days-Of-Code | 100 Days/day36/main.py | main.py | py | 1,035 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 30,
"usage_type": "call"
}
] |
39946543852 | import json
import pathlib
from function import handler
from testing.mock import LambdaContext
current_dir = pathlib.Path(__file__).parent.resolve()
def test_message_ept_good_request() -> None:
"""Test /message endpoint with good request"""
with open(
f"{current_dir}/data/test_message_ept_good_request.json", "r", encoding="utf-8"
) as file:
request = json.load(file)
result = handler(request, LambdaContext())
assert result["statusCode"] == 200 and result["body"] == json.dumps(
{"message": "Hello World!"}
)
def test_message_ept_no_headers() -> None:
"""Test /message endpoint with no headers"""
with open(
f"{current_dir}/data/test_message_ept_no_headers.json", "r", encoding="utf-8"
) as file:
request = json.load(file)
result = handler(request, LambdaContext())
assert result["statusCode"] == 400 and result["body"] == json.dumps(
{"error": "Unable to parse body request"}
)
| AlgoWolf-com/aw2-api-backend | api-gateway/user-api/tests/function_test.py | function_test.py | py | 984 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "function.handler",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "testing.mock.LambdaContext",
... |
42022013353 | import pandas as pd
from docx import Document
def get_lst():
df = pd.read_excel('Sample_Questions_Compliance.xls')
lst = []
for index, row in df.iterrows():
lst.append((row['Model Question'], row['Additional Tags / Synonyms for questions (from QnA Chatbot)'],row['Model Answer']))
wordDoc = Document('Sample_Questions_Legal.docx')
for table in wordDoc.tables:
for row in table.rows:
sub = [0,'',0]
i = 0
for cell in row.cells:
try:
int(cell.text)
except Exception as e:
if i in [0, 1]:
if i == 0:
sub[i] = cell.text
if i == 1:
sub[i+1] = cell.text
if cell.text != '':
i = i + 1
lst.append(tuple(sub))
return lst
def str_in_lst(quest, lst):
for i in lst:
if quest in i[0]:
return i[2]
elif quest in str(i[1]):
return i[2]
else:
return False
#print(get_lst())
| silasalberti/gpt3-comprehendum | backend/intelligent_parse.py | intelligent_parse.py | py | 1,145 | python | en | code | 17 | github-code | 36 | [
{
"api_name": "pandas.read_excel",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "docx.Document",
"line_number": 11,
"usage_type": "call"
}
] |
28295707377 | #!/bin/env python3
import nltk
# load the grammar and sentences
grammar = nltk.data.load("grammars/atis-grammar-original.cfg")
sents = nltk.data.load("grammars/atis-test-sentences.txt")
sents = nltk.parse.util.extract_test_sentences(sents)
parser = nltk.parse.BottomUpChartParser(grammar)
for sent, _ in sents:
try:
no_trees = len(list(parser.parse(sent)))
except Exception:
no_trees = 0
print(' '.join(sent), '\t', no_trees, sep='') | zouharvi/uds-student | computational_linguistics/hw3/generate_gold_counts.py | generate_gold_counts.py | py | 464 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.data.load",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "nltk.data",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "nltk.data.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "nltk.data",
"line_numbe... |
26822270635 | import requests
import json
import openpyxl
import pandas as pd
from dotenv import load_dotenv
load_dotenv()
import os
import time
import asyncio
import aiohttp
api_key_binance = os.environ.get('API_B')
api_secret_binance = os.environ.get('SECRET_B')
async def get_binance_futures_tickers():
url = 'https://fapi.binance.com/fapi/v1/ticker/24hr'
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
data = await response.json()
futures_tickers = [ticker['symbol'] for ticker in data if 'USDT' in ticker['symbol']]
return futures_tickers
async def get_data(symbol, period, limit):
endpoint = 'https://fapi.binance.com/fapi/v1/fundingRate'
headers = {
'X-MBX-APIKEY': api_key_binance
}
params = {
'symbol': symbol,
'period': period,
'limit': limit
}
async with aiohttp.ClientSession() as session:
async with session.get(endpoint, headers=headers, params=params) as response:
data = await response.json()
return data
async def main():
start = time.time()
tickers = await get_binance_futures_tickers()
print(tickers)
data = []
tasks = []
for symbol in tickers:
task = asyncio.ensure_future(get_data(symbol, '1h', 300))
tasks.append(task)
responses = await asyncio.gather(*tasks)
for symbol_data in responses:
print(symbol_data)
for row in symbol_data:
# row["symbol"] = symbol
data.append(row)
df = pd.DataFrame(data)
# print(df)
df["fundingTime"] = pd.to_datetime(df["fundingTime"], unit='ms')
df = df.set_index("fundingTime")
df["fundingRate"] = df["fundingRate"].astype(float)
print(df)
avg_funding_rates = round(df.groupby("symbol")["fundingRate"].mean() * 365 * 3 * 100, 2)
thirteen_days = round(df.groupby("symbol").apply(lambda x: x.tail(90)["fundingRate"].mean() * 365 * 3 * 100), 2)
# Get the last funding rate for each symbol
last_funding_rates = round(df.groupby("symbol")["fundingRate"].last() * 365 * 3 * 100, 2)
seven_days = round(df.groupby("symbol").apply(lambda x: x.tail(22)["fundingRate"].mean() * 365 * 3 * 100), 2)
# print(seven_days)
three_days = round(df.groupby("symbol").apply(lambda x: x.tail(10)["fundingRate"].mean() * 365 * 3 * 100), 2)
# Concatenate the two results into a single dataframe
one_day = round(df.groupby("symbol").apply(lambda x: x.tail(3)["fundingRate"].mean() * 365 * 3 * 100), 2)
result = pd.concat([avg_funding_rates, thirteen_days, seven_days, three_days, one_day, last_funding_rates], axis=1)
result.columns = ["90 days", '30 days', '7 days', '3 days', '1day', "last_funding_rate"]
result.to_excel('funding.xlsx', sheet_name='Funding_management')
# Print the result
print(result)
# Print the top 5 symbols with the highest average funding rate
print("Top 15 symbols with highest 7 days funding rate:")
print(result.nlargest(15, "7 days"))
print("Top 15 symbols with lowest 7 days funding rate:")
print(result.nsmallest(15, "7 days"))
print("Top 15 symbols with highest last funding rate:")
print(result.nlargest(15, "last_funding_rate"))
print("Top 15 symbols with lowest last funding rate:")
print(result.nsmallest(15, "last_funding_rate"))
print(f' done in {time.time() - start} seconds')
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
| BobbyAxer/Binance_funding_calc | main.py | main.py | py | 3,519 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
... |
8758398775 | # -*- coding: utf-8 -*-
from odoo import api, fields, models
import base64
from cStringIO import StringIO
import xlsxwriter
from xlsxwriter.utility import xl_range, xl_rowcol_to_cell
class OFRapportGestionStockWizard(models.TransientModel):
_name = "of.rapport.gestion.stock.wizard"
product_ids = fields.Many2many('product.product', string=u"Articles")
file = fields.Binary(string='Fichier')
file_name = fields.Char(string='Nom du fichier', size=64, default='articles.xlsx')
location_ids = fields.Many2many(
'stock.location', string="Emplacement", required=True, domain=[('usage', '=', 'internal')])
date_stock = fields.Date(string='Date stock', required=True)
prix = fields.Float(string="PV", help="Prix de vente minimum")
brand_ids = fields.Many2many('of.product.brand', string=u"Marques")
categ_ids = fields.Many2many('product.category', string=u"Catégories")
active_product = fields.Boolean(
string="Articles actifs", help=u"Articles en stock ou présents dans un BL/BR en cours")
@api.multi
def action_generate_excel_file(self):
self.ensure_one()
display_client_order_ref = self._context.get('display_client_order_ref')
# Initialisation du document
fp = StringIO()
workbook = xlsxwriter.Workbook(fp, {'in_memory': True})
worksheet = workbook.add_worksheet()
# CSS pour les titres des colonnes
style_title_col = workbook.add_format({
'bold': True,
})
style_text_align = workbook.add_format({
'align': 'center',
})
format_default = workbook.add_format({
'bold': False,
})
# En-tête
worksheet.write(0, 0, u"Société(s) :", format_default)
worksheet.write(0, 1, ';'.join(self.location_ids.mapped('company_id').mapped('display_name')), format_default)
date_file_create = fields.Date.context_today(self)
worksheet.write(1, 0, u"Date de création\ndu fichier :", format_default)
worksheet.write(1, 1, date_file_create, format_default)
worksheet.write(2, 0, u"Date stock(s) :", format_default)
worksheet.write(2, 1, self.date_stock, format_default)
# Initialisation des lignes et colonnes
row = 4
col = 2
col_nb = 8
if display_client_order_ref:
col_nb = 9
worksheet.set_column(0, 0, 20)
worksheet.set_column(1, 1, 40)
worksheet.set_column(2, col_nb + len(self.location_ids), 20)
# Titre des colonnes
worksheet.write(row, 0, u"Référence", style_title_col)
worksheet.write(row, 1, u"Désignation", style_title_col)
for location in self.location_ids:
worksheet.write(row, col, u"%s" % location.display_name, style_title_col)
col += 1
worksheet.write(row, col, u"Client", style_title_col)
if display_client_order_ref:
worksheet.write(row, col + 1, u"Client final", style_title_col)
col += 1
worksheet.write(row, col + 1, u"Commercial", style_title_col)
worksheet.write(row, col + 2, u"BL", style_title_col)
worksheet.write(row, col + 3, u"Qté en commande", style_title_col)
worksheet.write(row, col + 4, u"BR", style_title_col)
worksheet.write(row, col + 5, u"Qté à Réceptionner", style_title_col)
worksheet.write(row, col + 6, u"Stock prévisionnel", style_title_col)
# Ecriture du rapport / insertion des données
picking_obj = self.env['stock.picking']
move_obj = self.env['stock.move']
sale_order_obj = self.env['sale.order']
row += 1
# Filtre de date pour les mouvements de stock prévisionnels
virtual_date_filter = [('create_date', '<=', self.date_stock),
'|', ('state', 'in', ('waiting', 'confirmed', 'assigned')),
'&', ('state', '=', 'done'), ('date', '>', self.date_stock)]
products = self.product_ids
if not products:
domain = []
if self.brand_ids:
domain += [('brand_id', 'in', self.brand_ids._ids)]
if self.categ_ids:
domain += [('categ_id', 'in', self.categ_ids._ids)]
if self.prix:
domain += [('list_price', '>=', self.prix)]
if self.active_product:
pickings = picking_obj.search(
[('state', 'in', ['waiting', 'confirmed', 'partially_available', 'assigned']),
'|', ('location_id', 'in', self.location_ids.ids),
('location_dest_id', 'in', self.location_ids.ids)])
additionnal_products = pickings.mapped('move_lines').mapped('product_id')
products |= self.env['product.product'].search(domain + [('id', 'in', additionnal_products._ids)])
domain += [('qty_available', '>', 0)]
products |= self.env['product.product'].search(domain)
for product in products:
worksheet.write(row, 0, product.default_code, format_default)
worksheet.write(row, 1, product.name, format_default)
products_virtual_in = sum(move_obj.search([('product_id', '=', product.id),
('location_dest_id', 'in',
self.location_ids.ids)] + virtual_date_filter)
.mapped('product_qty'))
products_virtual_out = sum(move_obj.search([('product_id', '=', product.id),
('location_id', 'in',
self.location_ids.ids)] + virtual_date_filter)
.mapped('product_qty'))
col = 2
for location in self.location_ids:
nb_products_in = sum(move_obj.search([('product_id', '=', product.id),
('location_dest_id', '=', location.id),
('state', '=', 'done'),
('date', '<=', self.date_stock)]).mapped('product_qty'))
nb_products_out = sum(move_obj.search([('product_id', '=', product.id),
('location_id', '=', location.id),
('state', '=', 'done'),
('date', '<=', self.date_stock)]).mapped('product_qty'))
stock_location_qty = nb_products_in - nb_products_out
worksheet.write(row, col, stock_location_qty, style_text_align)
col += 1
delivery_pickings = picking_obj.search([('product_id', '=', product.id),
('location_id', 'in', self.location_ids.ids),
('state', 'not in', ('draft', 'cancel', 'done'))])
delivery_names = "\n".join([picking.name or '-' for picking in delivery_pickings])
customer_names = "\n".join([picking.partner_id.name or '-' for picking in delivery_pickings])
final_customer_names = ""
if display_client_order_ref:
orders = sale_order_obj.search([('product_id', '=', product.id)])
final_customer_names = "\n".join([order.client_order_ref for order in orders if order.client_order_ref])
vendor_names = ""
# Récupère les vendeurs liés aux bons de livraisons
for picking in delivery_pickings:
orders = sale_order_obj.search([('procurement_group_id', '=', picking.group_id.id)]) \
if picking.group_id else []
vendor_names = "\n".join([order.user_id.name or '-' for order in orders])
receipt_pickings = picking_obj.search([('product_id', '=', product.id),
('location_dest_id', 'in', self.location_ids.ids),
('state', 'not in', ('draft', 'cancel', 'done'))])
receipt_names = "\n".join([picking.name or '-' for picking in receipt_pickings])
worksheet.write(row, col, customer_names, format_default)
col2 = col
col3 = col
if display_client_order_ref:
worksheet.write(row, col + 1, final_customer_names, format_default)
col += 1
col3 += 1
worksheet.write(row, col + 1, vendor_names, format_default)
worksheet.write(row, col + 2, delivery_names, format_default)
worksheet.write(row, col + 3, products_virtual_out, style_text_align)
worksheet.write(row, col + 4, receipt_names, format_default)
worksheet.write(row, col + 5, products_virtual_in, style_text_align)
worksheet.write(row, col + 6,
"=%s - %s + %s" % (" + ".join([xl_rowcol_to_cell(row, c) for c in xrange(2, col2)]),
xl_rowcol_to_cell(row, col3 + 3),
xl_rowcol_to_cell(row, col3 + 5),
), style_text_align)
row += 1
worksheet.write(row+1, 0, u"Total")
# Réinitialisation du numéro de colonne permettant le remplissage de la dernière ligne "Total"
j = 2
for _ in self.location_ids:
worksheet.write(row+1, j, '=SUM(%s)' % (xl_range(5, j, row-1, j)), style_text_align)
j += 1
if display_client_order_ref:
j += 1
worksheet.write(row+1, j+3, '=SUM(%s)' % (xl_range(5, j+3, row-1, j+3)), style_text_align)
worksheet.write(row+1, j+5, '=SUM(%s)' % (xl_range(5, j+5, row-1, j+5)), style_text_align)
worksheet.write(row+1, j+6, '=SUM(%s)' % (xl_range(5, j+6, row-1, j+6)), style_text_align)
workbook.close()
fp.seek(0)
data = fp.read()
fp.close()
self.file = base64.encodestring(data)
self.file_name = 'rapport_gestion_stock.xlsx'
action = self.env.ref('of_sale_stock.action_of_rapport_gestion_stock').read()[0]
action['views'] = [(self.env.ref('of_sale_stock.of_rapport_gestion_stock_view_form').id, 'form')]
action['res_id'] = self.ids[0]
return action
| odof/openfire | of_sale_stock/wizard/of_report_tableur_wizard.py | of_report_tableur_wizard.py | py | 10,613 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "odoo.models.TransientModel",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "odoo.fields.Many2many",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "odoo.... |
40838375848 | import re, os, sys
pathjoin = os.path.join
try:
import configparser
except ImportError:
# Python 2
import ConfigParser as configparser
from .keplerian import keplerian
import pysyzygy as ps
from .utils import need_model_setup, get_planet_mass, get_planet_semimajor_axis,\
percentile68_ranges, percentile68_ranges_latex
import io
import matplotlib.pyplot as plt
try:
from PySide.QtGui import QApplication, QImage
except ImportError:
# from PyQt5.QtCore import QApp
from PyQt5.Qt import QApplication, QImage
def add_clipboard_to_figures():
# use monkey-patching to replace the original plt.figure() function with
# our own, which supports clipboard-copying
oldfig = plt.figure
def newfig(*args, **kwargs):
fig = oldfig(*args, **kwargs)
def clipboard_handler(event):
if event.key == 'ctrl+c':
# store the image in a buffer using savefig(), this has the
# advantage of applying all the default savefig parameters
# such as background color; those would be ignored if you simply
# grab the canvas using Qt
buf = io.BytesIO()
fig.savefig(buf)
QApplication.clipboard().setImage(QImage.fromData(buf.getvalue()))
buf.close()
fig.canvas.mpl_connect('key_press_event', clipboard_handler)
return fig
plt.figure = newfig
add_clipboard_to_figures()
import numpy as np
import corner
try:
from astroML.plotting import hist_tools
hist_tools_available = True
except ImportError:
hist_tools_available = False
colors = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
class KimaResults(object):
def __init__(self, options, data_file=None,
fiber_offset=None, hyperpriors=None, trend=None, GPmodel=None,
posterior_samples_file='posterior_sample.txt'):
self.options = options
debug = False # 'debug' in options
pwd = os.getcwd()
path_to_this_file = os.path.abspath(__file__)
top_level = os.path.dirname(os.path.dirname(path_to_this_file))
if debug:
print()
print('running on:', pwd)
print('top_level:', top_level)
print()
setup = configparser.ConfigParser()
s = setup.read('kima_model_setup.txt')
if len(s) == 0:
need_model_setup()
sys.exit(0)
if data_file is None:
data_file = setup['kima']['file']
print('Loading data file %s' % data_file)
self.data_file = data_file
self.data_skip = int(setup['kima']['skip'])
self.units = setup['kima']['units']
if debug:
print('--- skipping first %d rows of data file' % self.data_skip)
self.data = np.loadtxt(self.data_file,
skiprows=self.data_skip, usecols=(0,1,2))
# to m/s
if self.units == 'kms':
self.data[:, 1] *= 1e3
self.data[:, 2] *= 1e3
self.posterior_sample = np.atleast_2d(np.loadtxt(posterior_samples_file))
try:
self.sample = np.loadtxt('sample.txt')
except IOError:
self.sample = None
start_parameters = 0
self.extra_sigma = self.posterior_sample[:, start_parameters]
# find trend in the compiled model
if trend is None:
self.trend = setup['kima']['trend'] == 'true'
else:
self.trend = trend
if debug:
print('trend:', self.trend)
if self.trend:
n_trend = 1
i1 = start_parameters + 1
i2 = start_parameters + n_trend + 1
self.trendpars = self.posterior_sample[:, i1:i2]
else:
n_trend = 0
# find fiber offset in the compiled model
if fiber_offset is None:
self.fiber_offset = setup['kima']['obs_after_HARPS_fibers'] == 'true'
else:
self.fiber_offset = fiber_offset
if debug:
print('obs_after_fibers:', self.fiber_offset)
if self.fiber_offset:
n_offsets = 1
offset_index = start_parameters+n_offsets+n_trend
self.offset = self.posterior_sample[:, offset_index]
else:
n_offsets = 0
# find GP in the compiled model
if GPmodel is None:
self.GPmodel = setup['kima']['GP'] == 'true'
else:
self.GPmodel = GPmodel
if debug:
print('GP model:', self.GPmodel)
if self.GPmodel:
n_hyperparameters = 4
for i in range(n_hyperparameters):
name = 'eta' + str(i+1)
ind = start_parameters + n_trend + n_offsets + 1 + i
setattr(self, name, self.posterior_sample[:, ind])
else:
n_hyperparameters = 0
start_objects_print = start_parameters + n_offsets + \
n_trend + n_hyperparameters + 1
# how many parameters per component
self.n_dimensions = int(self.posterior_sample[0, start_objects_print])
# maximum number of components
self.max_components = int(self.posterior_sample[0, start_objects_print+1])
# find hyperpriors in the compiled model
if hyperpriors is None:
self.hyperpriors = setup['kima']['hyperpriors'] == 'true'
else:
self.hyperpriors = hyperpriors
# number of hyperparameters (muP, wP, muK)
n_dist_print = 3 if self.hyperpriors else 0
# if hyperpriors, then the period is sampled in log
self.log_period = self.hyperpriors
# the column with the number of planets in each sample
self.index_component = start_objects_print + 1 + n_dist_print + 1
# build the marginal posteriors for planet parameters
self.get_marginals()
allowed_options = {'1': [self.make_plot1, {}],
'2': [self.make_plot2, {}],
'3': [self.make_plot3, {}],
'4': [self.make_plot4, {}],
'5': [self.make_plot5, {}],
'6': [self.plot_random_planets,
{'show_vsys':True, 'show_trend':True}],
'7': [(self.hist_vsys,
self.hist_extra_sigma), {}],
}
for item in allowed_options.items():
if item[0] in options:
methods = item[1][0]
kwargs = item[1][1]
if isinstance(methods, tuple):
[m() for m in methods]
else:
methods(**kwargs)
def get_marginals(self):
"""
Get the marginal posteriors from the posterior_sample matrix.
They go into self.T, self.A, self.E, etc
"""
max_components = self.max_components
index_component = self.index_component
# periods
i1 = 0*max_components + index_component + 1
i2 = 0*max_components + index_component + max_components + 1
s = np.s_[i1 : i2]
self.T = self.posterior_sample[:,s]
self.Tall = np.copy(self.T)
# amplitudes
i1 = 1*max_components + index_component + 1
i2 = 1*max_components + index_component + max_components + 1
s = np.s_[i1 : i2]
self.A = self.posterior_sample[:,s]
self.Aall = np.copy(self.A)
# phases
i1 = 2*max_components + index_component + 1
i2 = 2*max_components + index_component + max_components + 1
s = np.s_[i1 : i2]
self.phi = self.posterior_sample[:,s]
self.phiall = np.copy(self.phi)
# eccentricities
i1 = 3*max_components + index_component + 1
i2 = 3*max_components + index_component + max_components + 1
s = np.s_[i1 : i2]
self.E = self.posterior_sample[:,s]
self.Eall = np.copy(self.E)
# omegas
i1 = 4*max_components + index_component + 1
i2 = 4*max_components + index_component + max_components + 1
s = np.s_[i1 : i2]
self.Omega = self.posterior_sample[:,s]
self.Omegaall = np.copy(self.Omega)
# times of periastron
self.T0 = self.data[0,0] - (self.T*self.phi)/(2.*np.pi)
self.T0all = np.copy(self.T0)
which = self.T != 0
self.T = self.T[which].flatten()
self.A = self.A[which].flatten()
self.E = self.E[which].flatten()
def get_medians(self):
""" return the median values of all the parameters """
if self.posterior_sample.shape[0] % 2 == 0:
print('Median is not a solution because number of samples is even!!')
self.medians = np.median(self.posterior_sample, axis=0)
self.means = np.mean(self.posterior_sample, axis=0)
return self.medians, self.means
def get_posterior_statistics(self, N=None):
""" print the maximum likelihood estimate of the parameters and the posterior median """
N = 2
if N is None:
i = self.posterior_sample[:, -1].argmax()
pars = self.posterior_sample[i, :]
else:
mask = self.posterior_sample[:, self.index_component]==N
self.mask = mask
i = self.posterior_sample[mask, -1].argmax()
pars = self.posterior_sample[mask][i, :]
print('maximum likelihood ')
print(pars[:5])
print(pars[pars != 0])
sort_periods = False
if sort_periods:
# sort the periods (this works quite well with 2 planets...)
periods = np.exp(self.Tall)
amplitudes = self.Aall
eccentricities = self.Eall
sorted_periods = apply_argsort(periods, periods, axis=1)
sorted_amplitudes = apply_argsort(periods, amplitudes, axis=1)
sorted_eccentricities = apply_argsort(periods, eccentricities, axis=1)
P1, P2 = sorted_periods.T
K1, K2 = sorted_amplitudes.T
e1, e2 = sorted_eccentricities.T
assert P1.shape == P2.shape
if N == 2:
periods = np.exp(self.Tall[mask,:2])
amplitudes = self.Aall[mask, :2]
eccentricities = self.Eall[mask, :2]
sorted_periods = apply_argsort(periods, periods, axis=1)
sorted_amplitudes = apply_argsort(periods, amplitudes, axis=1)
sorted_eccentricities = apply_argsort(periods, eccentricities, axis=1)
P1, P2 = sorted_periods.T
K1, K2 = sorted_amplitudes.T
e1, e2 = sorted_eccentricities.T
else:
pass
print()
print('medians:')
print()
a = '$%7.5f\,^{+\,%7.5f}_{-\,%7.5f}$' % percentile68_ranges(P1)
b = ' & $%4.3f$' % P1.std()
print('%-40s' % a, b)
a, b = '$%3.2f\,^{+\,%3.2f}_{-\,%3.2f}$' % percentile68_ranges(K1), ' & $%4.3f$' % K1.std()
print('%-40s' % a, b)
a, b = '$%4.3f\,^{+\,%4.3f}_{-\,%4.3f}$' % percentile68_ranges(e1), ' & $%4.3f$' % e1.std()
print('%-40s' % a, b)
a, b = '$%7.5f\,^{+\,%7.5f}_{-\,%7.5f}$' % percentile68_ranges(P2), ' & $%4.3f$' % P2.std()
print('%-40s' % a, b)
a, b = '$%3.2f\,^{+\,%3.2f}_{-\,%3.2f}$' % percentile68_ranges(K2), ' & $%4.3f$' % K2.std()
print('%-40s' % a, b)
a, b = '$%4.3f\,^{+\,%4.3f}_{-\,%4.3f}$' % percentile68_ranges(e2), ' & $%4.3f$' % e2.std()
print('%-40s' % a, b)
############################################################
mjup2mearth = 317.828
star_mass = 0.913
m_mj = 4.919e-3 * star_mass**(2./3) * P1**(1./3) * K1 * np.sqrt(1-e1**2)
m_me = m_mj * mjup2mearth
# a = ((system.star_mass + m_me*mearth2msun)/(m_me*mearth2msun)) * sqrt(1.-ecc**2) * K * (P*mean_sidereal_day/(2*np.pi)) / au2m
print('b - $%4.2f\,^{+\,%4.2f}_{-\,%4.2f}$ [MEarth]' % percentile68_ranges(m_me))
# print '%8s %11.4f +- %7.4f [AU]' % ('a', a.n, a.s)
m_mj = 4.919e-3 * star_mass**(2./3) * P2**(1./3) * K2 * np.sqrt(1-e2**2)
m_me = m_mj * mjup2mearth
# a = ((system.star_mass + m_me*mearth2msun)/(m_me*mearth2msun)) * sqrt(1.-ecc**2) * K * (P*mean_sidereal_day/(2*np.pi)) / au2m
print('c - $%4.2f\,^{+\,%4.2f}_{-\,%4.2f}$ [MEarth]' % percentile68_ranges(m_me))
# print '%8s %11.4f +- %7.4f [AU]' % ('a', a.n, a.s)
def make_plot1(self):
""" Plot the histogram of the posterior for Np """
_, ax = plt.subplots(1,1)
# n, _, _ = plt.hist(self.posterior_sample[:, self.index_component], 100)
bins = np.arange(self.max_components+2)
nplanets = self.posterior_sample[:, self.index_component]
n, _ = np.histogram(nplanets, bins=bins)
ax.bar(bins[:-1], n)
ax.set(xlabel='Number of Planets',
ylabel='Number of Posterior Samples',
xlim=[-0.5, self.max_components+.5],
xticks=np.arange(self.max_components+1),
title='Posterior distribution for $N_p$'
)
nn = n[np.nonzero(n)]
print('Np probability ratios: ', nn.flat[1:] / nn.flat[:-1])
def make_plot2(self, bins=None):
"""
Plot the histogram of the posterior for orbital period P.
Optionally provide the histogram bins.
"""
if self.max_components == 0:
print('Model has no planets! make_plot2() doing nothing...')
return
if self.log_period:
T = np.exp(self.T)
# print('exponentiating period!')
else:
T = self.T
fig, ax = plt.subplots(1, 1)
# mark 1 year and 0.5 year
year = 365.25
ax.axvline(x=year, ls='--', color='r', lw=3, alpha=0.6)
# ax.axvline(x=year/2., ls='--', color='r', lw=3, alpha=0.6)
# plt.axvline(x=year/3., ls='--', color='r', lw=3, alpha=0.6)
# mark the timespan of the data
ax.axvline(x=self.data[:,0].ptp(), ls='--', color='b', lw=3, alpha=0.5)
# by default, 100 bins in log between 0.1 and 1e7
if bins is None:
bins = 10 ** np.linspace(np.log10(1e-1), np.log10(1e7), 100)
ax.hist(T, bins=bins, alpha=0.5)
ax.legend(['1 year', 'timespan'])
ax.set(xscale="log",
xlabel=r'(Period/days)',
ylabel='Number of Posterior Samples',
title='Posterior distribution for the orbital period(s)')
# plt.show()
def make_plot3(self, points=True):
"""
Plot the 2d histograms of the posteriors for semi-amplitude
and orbital period and eccentricity and orbital period.
If `points` is True, plot each posterior sample, else plot hexbins
"""
if self.max_components == 0:
print('Model has no planets! make_plot3() doing nothing...')
return
if self.log_period:
T = np.exp(self.T)
# print('exponentiating period!')
else:
T = self.T
A, E = self.A, self.E
_, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
if points:
ax1.semilogx(T, A, '.', markersize=2)
else:
ax1.hexbin(T, A, gridsize=50,
bins='log', xscale='log', yscale='log',
cmap=plt.get_cmap('afmhot_r'))
if points:
ax2.semilogx(T, E*T+self.data[0,0], '.', markersize=2)
else:
ax2.hexbin(T, E, gridsize=50, bins='log', xscale='log',
cmap=plt.get_cmap('afmhot_r'))
ax1.set(ylabel=r'$R_p / R_s$',
title='Joint posterior RpRs $-$ orbital period')
ax2.set(ylabel=r'$T_c$',
xlabel='Period [days]',
title='Joint posterior Tc $-$ orbital period',
# ylim=[0, 1],
# xlim=[1, 50]
)
def make_plot4(self):
""" Plot histograms for the GP hyperparameters """
if not self.GPmodel:
print('Model does not have GP! make_plot4() doing nothing...')
return
available_etas = [v for v in dir(self) if v.startswith('eta')]
labels = [r'$\eta_%d$' % (i+1) for i,_ in enumerate(available_etas)]
units = ['m/s', 'days', 'days', None]
xlabels = []
for label, unit in zip(labels, units):
xlabels.append(label + ' (%s)' % unit
if unit is not None else label)
fig, axes = plt.subplots(2, len(available_etas)//2)
fig.suptitle('Posterior distributions for GP hyperparameters')
for i, eta in enumerate(available_etas):
ax = np.ravel(axes)[i]
ax.hist(getattr(self, eta), bins=40)
ax.set(xlabel=xlabels[i], ylabel='posterior samples')
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
def make_plot5(self, show=True, save=False):
""" Corner plot for the GP hyperparameters """
if not self.GPmodel:
print('Model does not have GP! make_plot5() doing nothing...')
return
self.pmin = 10.
self.pmax = 40.
available_etas = [v for v in dir(self) if v.startswith('eta')]
labels = [r'$s$'] + [r'$\eta_%d$' % (i+1) for i,_ in enumerate(available_etas)]
units = ['m/s', 'm/s', 'days', 'days', None]
xlabels = []
for label, unit in zip(labels, units):
xlabels.append(label + ' (%s)' % unit
if unit is not None else label)
### color code by number of planets
# self.corner1 = None
# for N in range(6)[::-1]:
# mask = self.posterior_sample[:, self.index_component] == N
# if mask.any():
# self.post_samples = np.vstack((self.extra_sigma, self.eta1, self.eta2, self.eta3, self.eta4)).T
# self.post_samples = self.post_samples[mask, :]
# # self.post_samples = np.vstack((self.extra_sigma, self.eta1, self.eta2, self.eta3, self.eta4, self.eta5)).T
# print self.post_samples.shape
# # print (self.pmin, self.pmax)
# # labels = ['$\sigma_{extra}$', '$\eta_1$', '$\eta_2$', '$\eta_3$', '$\eta_4$', '$\eta_5$']
# self.corner1 = corner.corner(self.post_samples, fig=self.corner1, labels=labels, show_titles=True,
# plot_contours=False, plot_datapoints=True, plot_density=False,
# # fill_contours=True, smooth=True,
# # contourf_kwargs={'cmap':plt.get_cmap('afmhot'), 'colors':None},
# hexbin_kwargs={'cmap':plt.get_cmap('afmhot_r'), 'bins':'log'},
# hist_kwargs={'normed':True, 'color':colors[N]},
# range=[1., 1., 1., (self.pmin, self.pmax), 1],
# shared_axis=True, data_kwargs={'alpha':1, 'color':colors[N]},
# )
# ax = self.corner1.axes[3]
# ax.plot([2,2.1], color=colors[N], lw=3)
# else:
# print 'Skipping N=%d, no posterior samples...' % N
# ax.legend([r'$N_p=%d$'%N for N in range(6)[::-1]])
### all Np together
variables = [self.extra_sigma]
for eta in available_etas:
variables.append(getattr(self, eta))
self.post_samples = np.vstack(variables).T
ranges = [1.]*(len(available_etas)+1)
ranges[3] = (self.pmin, self.pmax)
c = corner.corner
self.corner1 = c(self.post_samples, labels=xlabels, show_titles=True,
plot_contours=False, plot_datapoints=True, plot_density=False,
# fill_contours=True, smooth=True,
# contourf_kwargs={'cmap':plt.get_cmap('afmhot'), 'colors':None},
hexbin_kwargs={'cmap':plt.get_cmap('afmhot_r'), 'bins':'log'},
hist_kwargs={'normed':True},
range=ranges, data_kwargs={'alpha':1},
)
self.corner1.suptitle('Joint and marginal posteriors for GP hyperparameters')
if show:
self.corner1.tight_layout(rect=[0, 0.03, 1, 0.95])
if save:
self.corner1.savefig(save)
def get_sorted_planet_samples(self):
# all posterior samples for the planet parameters
# this array is nsamples x (n_dimensions*max_components)
# that is, nsamples x 5, nsamples x 10, for 1 and 2 planets for example
try:
self.planet_samples
except AttributeError:
self.planet_samples = \
self.posterior_sample[:, self.index_component+1:-2].copy()
if self.max_components == 0:
return self.planet_samples
# here we sort the planet_samples array by the orbital period
# this is a bit difficult because the organization of the array is
# P1 P2 K1 K2 ....
samples = np.empty_like(self.planet_samples)
n = self.max_components * self.n_dimensions
mc = self.max_components
p = self.planet_samples[:, :mc]
ind_sort_P = np.arange(np.shape(p)[0])[:,np.newaxis], np.argsort(p)
for i,j in zip(range(0, n, mc), range(mc, n+mc, mc)):
samples[:,i:j] = self.planet_samples[:,i:j][ind_sort_P]
return samples
def apply_cuts_period(self, samples, pmin=None, pmax=None, return_mask=False):
""" apply cuts in orbital period """
too_low_periods = np.zeros_like(samples[:,0], dtype=bool)
too_high_periods = np.zeros_like(samples[:,0], dtype=bool)
if pmin is not None:
too_low_periods = samples[:,0] < pmin
samples = samples[~too_low_periods, :]
if pmax is not None:
too_high_periods = samples[:,1] > pmax
samples = samples[~too_high_periods, :]
if return_mask:
mask = ~too_low_periods & ~too_high_periods
return samples, mask
else:
return samples
def corner_planet_parameters(self, pmin=None, pmax=None):
""" Corner plot of the posterior samples for the planet parameters """
labels = [r'$P$', r'$R_p/R_*$', r'a/R_*', r'$\phi$']
samples = self.get_sorted_planet_samples()
samples = self.apply_cuts_period(samples, pmin, pmax)
# samples is still nsamples x (n_dimensions*max_components)
# let's separate each planets' parameters
data = []
for i in range(self.max_components):
data.append(samples[:, i::self.max_components])
# separate bins for each parameter
bins = None
# set the parameter ranges to include everythinh
def r(x, over=0.2):
return x.min() - over*x.ptp(), x.max() + over*x.ptp()
ranges = []
for i in range(self.n_dimensions):
i1, i2 = self.max_components*i, self.max_components*(i+1)
ranges.append( r(samples[:, i1:i2]) )
#
c = corner.corner
fig = None
colors = plt.rcParams["axes.prop_cycle"]
for i, (datum, colorcycle) in enumerate(zip(data, colors)):
fig = c(datum, fig=fig, labels=labels, show_titles=len(data)==1,
plot_contours=False, plot_datapoints=True, plot_density=False,
range=ranges, color=colorcycle['color'],
# fill_contours=True, smooth=True,
# contourf_kwargs={'cmap':plt.get_cmap('afmhot'), 'colors':None},
#hexbin_kwargs={'cmap':plt.get_cmap('afmhot_r'), 'bins':'log'},
hist_kwargs={'normed':True},
# range=[1., 1., (0, 2*np.pi), (0., 1.), (0, 2*np.pi)],
data_kwargs={'alpha':1, 'ms':3, 'color':colorcycle['color']},
)
plt.show()
def plot_random_planets(self, ncurves=50, over=0.1, pmin=None, pmax=None,
show_vsys=False, show_trend=False):
"""
Display the RV data together with curves from the posterior predictive.
A total of `ncurves` random samples are chosen,
and the Keplerian curves are calculated covering 100 + `over`%
of the timespan of the data.
"""
samples = self.get_sorted_planet_samples()
if self.max_components > 0:
samples, mask = \
self.apply_cuts_period(samples, pmin, pmax, return_mask=True)
else:
mask = np.ones(samples.shape[0], dtype=bool)
t = self.data[:,0].copy()
tt = np.linspace(t[0]-over*t.ptp(), t[-1]+over*t.ptp(),
10000+int(100*over))
y = self.data[:,1].copy()
yerr = self.data[:,2].copy()
# select random `ncurves` indices
# from the (sorted, period-cut) posterior samples
ii = np.random.randint(samples.shape[0], size=ncurves)
_, ax = plt.subplots(1,1)
## plot the Keplerian curves
for i in ii:
f = np.zeros_like(tt)
pars = samples[i, :].copy()
nplanets = pars.size / self.n_dimensions
for j in range(int(nplanets)):
P = pars[j + 0*self.max_components]
if P==0.0:
continue
RpRs = pars[j + 1*self.max_components]
aRs = pars[j + 2*self.max_components]
phi = pars[j + 3*self.max_components]
Tc = t[0] + (P*phi)
try:
f += ps.Transit(per=P, aRs=aRs, RpRs=RpRs, t0=Tc)(tt) - 1.0
except:
print('Failed for:', P, RpRs, aRs, Tc)
# v += keplerian(tt, P, K, ecc, w, t0, 0.)
f0 = self.posterior_sample[mask][i, -1]
f += f0
ax.plot(tt, f, alpha=0.2, color='k')
# if show_vsys:
# ax.plot(t, vsys*np.ones_like(t), alpha=0.2, color='r', ls='--')
## plot the data
ax.errorbar(t, y, yerr, fmt='.')
ax.set(xlabel='Time [days]', ylabel='Flux []')
plt.tight_layout()
# plt.show()
def hist_offset(self):
""" Plot the histogram of the posterior for the fiber offset """
if not self.fiber_offset:
print('Model has no fiber offset! hist_offset() doing nothing...')
return
units = ' (m/s)' if self.units=='ms' else ' (km/s)'
estimate = percentile68_ranges_latex(self.offset) + units
_, ax = plt.subplots(1,1)
ax.hist(self.offset)
title = 'Posterior distribution for fiber offset \n %s' % estimate
ax.set(xlabel='fiber offset (m/s)', ylabel='posterior samples',
title=title)
def hist_vsys(self):
""" Plot the histogram of the posterior for the systemic velocity """
vsys = self.posterior_sample[:,-1]
# units = ' (m/s)' if self.units=='ms' else ' (km/s)'
estimate = percentile68_ranges_latex(vsys) # + units
_, ax = plt.subplots(1,1)
ax.hist(vsys)
title = 'Posterior distribution for $F_0$ \n %s' % estimate
ax.set(xlabel=r'$F_0$', ylabel='posterior samples',
title=title)
def hist_extra_sigma(self):
""" Plot the histogram of the posterior for the additional white noise """
# units = ' (m/s)' if self.units=='ms' else ' (km/s)'
estimate = percentile68_ranges_latex(self.extra_sigma) #+ units
_, ax = plt.subplots(1,1)
ax.hist(self.extra_sigma)
title = 'Posterior distribution for extra white noise $s$ \n %s' % estimate
ax.set(xlabel='extra sigma', ylabel='posterior samples',
title=title) | j-faria/kima-light | pykimalight/display.py | display.py | py | 28,337 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "io.BytesI... |
42147496534 | from pydantic import BaseModel
class ServiceInfo(BaseModel):
name: str
service_type: str
namespace: str
classification: str = "None"
deleted: bool = False
def get_service_key(self) -> str:
return f"{self.namespace}/{self.service_type}/{self.name}"
def __eq__(self, other):
if not isinstance(other, ServiceInfo):
return NotImplemented
return (
self.name == other.name
and self.service_type == other.service_type
and self.namespace == other.namespace
and self.classification == other.classification
and self.deleted == other.deleted
)
| m8e/robusta | src/robusta/core/model/services.py | services.py | py | 670 | python | en | code | null | github-code | 36 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 4,
"usage_type": "name"
}
] |
12598914400 | # Write a program where only the process with number zero reports on how many processes there are in total.
from mpi4py import MPI
comm = (
MPI.COMM_WORLD
) # Default communicator in MPI. Groups processes together all are connected.
proc_nom = comm.Get_rank() # Current process number.
nom_procs = comm.Get_size() # How many processors there are.
if proc_nom == 0:
print("Total processors:" + str(nom_procs))
| GMW99/mpi-examples | exercises/2.5.py | 2.5.py | py | 423 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 6,
"usage_type": "name"
}
] |
12939715941 | """
AlexNet Keras Implementation
BibTeX Citation:
@inproceedings{krizhevsky2012imagenet,
title={Imagenet classification with deep convolutional neural networks},
author={Krizhevsky, Alex and Sutskever, Ilya and Hinton, Geoffrey E},
booktitle={Advances in neural information processing systems},
pages={1097--1105},
year={2012}
}
"""
# Import necessary packages
import argparse
# Import necessary components to build LeNet
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.regularizers import l2
def alexnet_model(img_shape=(240, 320, 1), n_classes=3, l2_reg=0.,
weights=None):
# Initialize model
# alexnet = Sequential()
model = Sequential()
# 1st Convolutional Layer
model.add(Conv2D(filters=96, input_shape=img_shape, kernel_size=(11,11), strides=(4,4), padding='valid'))
model.add(Activation('relu'))
# Max Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# 2nd Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(11,11), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Max Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# 3rd Convolutional Layer
model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# 4th Convolutional Layer
model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# 5th Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Max Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# Passing it to a Fully Connected layer
model.add(Flatten())
# 1st Fully Connected Layer
model.add(Dense(4096, input_shape=(224*224*3,)))
model.add(Activation('relu'))
# Add Dropout to prevent overfitting
model.add(Dropout(0.4))
# 2nd Fully Connected Layer
model.add(Dense(4096))
model.add(Activation('relu'))
# Add Dropout
model.add(Dropout(0.4))
# 3rd Fully Connected Layer
model.add(Dense(1000))
model.add(Activation('relu'))
# Add Dropout
model.add(Dropout(0.4))
# Output Layer
model.add(Dense(n_classes))
model.add(Activation('softmax'))
if weights is not None:
model.load_weights(weights)
return model
def parse_args():
"""
Parse command line arguments.
Parameters:
None
Returns:
parser arguments
"""
parser = argparse.ArgumentParser(description='AlexNet model')
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional.add_argument('--print_model',
dest='print_model',
help='Print AlexNet model',
action='store_true')
parser._action_groups.append(optional)
return parser.parse_args()
if __name__ == "__main__":
# Command line parameters
args = parse_args()
# Create AlexNet model
model = alexnet_model()
# Print
if args.print_model:
model.summary() | vedantbhatia/xAI-image-classifiers | AlexNet.py | AlexNet.py | py | 3,154 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.keras.models.Sequential",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.layers.Activation",
"line_number": 35,
"usage_type": "... |
28890469001 | """Public interface to top-level pytype functions."""
import contextlib
import dataclasses
import logging
import os
import sys
import traceback
from typing import Optional
import libcst
from pytype import __version__
from pytype import analyze
from pytype import config
from pytype import constant_folding
from pytype import context
from pytype import errors
from pytype import load_pytd
from pytype import utils
from pytype.directors import directors
from pytype.imports import builtin_stubs as pytd_builtins
from pytype.imports import pickle_utils
from pytype.pyc import pyc
from pytype.pyi import parser
from pytype.pytd import optimize
from pytype.pytd import pytd
from pytype.pytd import pytd_utils
from pytype.pytd import serialize_ast
from pytype.pytd import visitors
log = logging.getLogger(__name__)
# Webpage explaining the pytype error codes
ERROR_DOC_URL = "https://google.github.io/pytype/errors.html"
@dataclasses.dataclass
class AnalysisResult:
"""Preserve all state and results from running pytype."""
options: config.Options
loader: load_pytd.Loader
context: context.Context
errorlog: errors.ErrorLog
pyi: Optional[str]
ast: Optional[pytd.TypeDeclUnit]
def read_source_file(input_filename, open_function=open):
try:
with open_function(input_filename, "r", encoding="utf8") as fi:
return fi.read()
except OSError as e:
raise utils.UsageError(f"Could not load input file {input_filename}") from e
def _set_verbosity_from(posarg):
"""Decorator to set the verbosity for a function that takes an options arg.
Assumes that the function has an argument named `options` that is a
config.Options object.
Arguments:
posarg: The index of `options` in the positional arguments.
Returns:
The decorator.
"""
def decorator(f):
def wrapper(*args, **kwargs):
options = kwargs.get("options", args[posarg])
with config.verbosity_from(options):
return f(*args, **kwargs)
return wrapper
return decorator
@_set_verbosity_from(posarg=2)
def _call(analyze_types, src, options, loader, *, ctx=None):
"""Helper function to call analyze.check/infer_types."""
# 'deep' tells the analyzer whether to analyze functions not called from main.
deep = not options.main_only
loader = loader or load_pytd.create_loader(options)
return analyze_types(
src=src,
filename=options.input,
options=options,
loader=loader,
ctx=ctx,
deep=deep)
def check_py(src, options=None, loader=None, ctx=None):
"""Check the types of a string of source code."""
options = options or config.Options.create()
with config.verbosity_from(options):
ret = _call(analyze.check_types, src, options, loader, ctx=ctx)
return ret.errorlog
def generate_pyi(src, options=None, loader=None, ctx=None):
"""Run the inferencer on a string of source code, producing output.
Args:
src: The source code.
options: config.Options object.
loader: A load_pytd.Loader instance.
ctx: A context
Returns:
A tuple, (errors.ErrorLog, PYI Ast as string, TypeDeclUnit).
Raises:
CompileError: If we couldn't parse the input file.
UsageError: If the input filepath is invalid.
"""
options = options or config.Options.create()
with config.verbosity_from(options):
ret = _call(analyze.infer_types, src, options, loader, ctx=ctx)
mod = ret.ast
mod.Visit(visitors.VerifyVisitor())
mod = optimize.Optimize(mod,
ret.builtins,
lossy=False,
use_abcs=False,
max_union=7,
remove_mutable=False)
mod = pytd_utils.CanonicalOrdering(mod)
result = pytd_utils.Print(mod)
log.info("=========== pyi optimized =============")
log.info("\n%s", result)
log.info("========================================")
result += "\n"
if options.quick:
result = "# (generated with --quick)\n\n" + result
return ret.errorlog, result, mod
@_set_verbosity_from(posarg=0)
def check_or_generate_pyi(options, loader=None, ctx=None) -> AnalysisResult:
"""Returns results from running pytype.
Args:
options: config.Options object.
loader: load_pytd.Loader object.
ctx: A context
Returns:
A tuple, (errors.ErrorLog, PYI Ast as string or None, AST or None).
"""
deep = not options.main_only
loader = loader or load_pytd.create_loader(options)
ctx = ctx or analyze.make_context(options, loader, deep)
errorlog = errors.ErrorLog()
result = pytd_builtins.DEFAULT_SRC
ast = pytd_builtins.GetDefaultAst(
parser.PyiOptions.from_toplevel_options(options))
try:
src = read_source_file(options.input, options.open_function)
if options.check:
errorlog = check_py(src=src, options=options, loader=loader, ctx=ctx)
result, ast = None, None
else:
errorlog, result, ast = generate_pyi(
src=src, options=options, loader=loader, ctx=ctx)
except utils.UsageError:
raise
except pyc.CompileError as e:
errorlog.python_compiler_error(options.input, e.lineno, e.error)
except constant_folding.ConstantError as e:
errorlog.python_compiler_error(options.input, e.lineno, e.message)
except IndentationError as e:
errorlog.python_compiler_error(options.input, e.lineno, e.msg)
except libcst.ParserSyntaxError as e:
# TODO(rechen): We can get rid of this branch once we delete
# directors.parser_libcst.
errorlog.python_compiler_error(options.input, e.raw_line, e.message)
except SyntaxError as e:
errorlog.python_compiler_error(options.input, e.lineno, e.msg)
except directors.SkipFileError:
result += "# skip-file found, file not analyzed"
except Exception as e: # pylint: disable=broad-except
if options.nofail:
log.warning("***Caught exception: %s", str(e), exc_info=True)
if not options.check:
result += (
"# Caught error in pytype: " + str(e).replace("\n", "\n#")
+ "\n# " + "\n# ".join(traceback.format_exc().splitlines()))
else:
prefix = str(e.args[0]) if e.args else ""
e.args = (f"{prefix}\nFile: {options.input}",) + e.args[1:]
raise
return AnalysisResult(options, loader, ctx, errorlog, result, ast)
def _write_pyi_output(options, contents, filename):
assert filename
if filename == "-":
sys.stdout.write(contents)
else:
log.info("write pyi %r => %r", options.input, filename)
with options.open_function(filename, "w") as fi:
fi.write(contents)
@_set_verbosity_from(posarg=0)
def process_one_file(options):
"""Check a .py file or generate a .pyi for it, according to options.
Args:
options: config.Options object.
Returns:
An error code (0 means no error).
"""
log.info("Process %s => %s", options.input, options.output)
loader = load_pytd.create_loader(options)
try:
ret = check_or_generate_pyi(options, loader)
except utils.UsageError:
logging.exception("")
return 1
if not options.check:
if options.pickle_output:
pyi_output = options.verify_pickle
else:
pyi_output = options.output
# Write out the pyi file.
if pyi_output:
_write_pyi_output(options, ret.pyi, pyi_output)
# Write out the pickle file.
if options.pickle_output:
log.info("write pickle %r => %r", options.input, options.output)
write_pickle(ret.ast, options, loader)
exit_status = handle_errors(ret.errorlog, options)
# Touch output file upon success.
if options.touch and not exit_status:
with options.open_function(options.touch, "a"):
os.utime(options.touch, None)
return exit_status
@_set_verbosity_from(posarg=1)
def write_pickle(ast, options, loader=None):
"""Dump a pickle of the ast to a file."""
loader = loader or load_pytd.create_loader(options)
try:
ast = serialize_ast.PrepareForExport(options.module_name, ast, loader)
except parser.ParseError as e:
if options.nofail:
ast = serialize_ast.PrepareForExport(
options.module_name, loader.get_default_ast(), loader)
log.warning("***Caught exception: %s", str(e), exc_info=True)
else:
raise
if options.verify_pickle:
ast1 = ast.Visit(visitors.LateTypeToClassType())
ast1 = ast1.Visit(visitors.ClearClassPointers())
ast2 = loader.load_file(options.module_name, options.verify_pickle)
ast2 = ast2.Visit(visitors.ClearClassPointers())
if not pytd_utils.ASTeq(ast1, ast2):
raise AssertionError()
pickle_utils.StoreAst(ast, options.output, options.open_function,
src_path=options.input,
metadata=options.pickle_metadata)
def print_error_doc_url(errorlog):
names = {e.name for e in errorlog}
if names:
doclink = f"\nFor more details, see {ERROR_DOC_URL}"
if len(names) == 1:
doclink += "#" + names.pop()
print(doclink, file=sys.stderr)
@_set_verbosity_from(posarg=1)
def handle_errors(errorlog, options):
"""Handle the errorlog according to the given options."""
if not options.report_errors:
return 0
if options.output_errors_csv:
with options.open_function(options.output_errors_csv, "w") as f:
errorlog.print_to_csv_file(f)
errorlog.print_to_stderr(color=options.color)
print_error_doc_url(errorlog)
# exit code
return 1 if errorlog.has_error() and not options.return_success else 0
@_set_verbosity_from(posarg=0)
def parse_pyi(options):
"""Tries parsing a PYI file."""
loader = load_pytd.create_loader(options)
ast = loader.load_file(options.module_name, options.input)
ast = loader.finish_and_verify_ast(ast)
if options.output:
result = "# Internal AST parsed and postprocessed from {}\n\n{}".format(
options.input, pytd_utils.Print(ast))
_write_pyi_output(options, result, options.output)
return ast
def get_pytype_version():
return __version__.__version__
@contextlib.contextmanager
def wrap_pytype_exceptions(exception_type, filename=""):
"""Catch pytype errors and reraise them as a single exception type.
NOTE: This will also wrap non-pytype errors thrown within the body of the
code block; it is therefore recommended to use this to wrap a single function
call.
Args:
exception_type: The class to wrap exceptions in.
filename: A filename to use in error messages.
Yields:
nothing, just calls the code block.
"""
try:
yield
except utils.UsageError as e:
raise exception_type(f"Pytype usage error: {e}") from e
except pyc.CompileError as e:
raise exception_type("Error reading file %s at line %s: %s" %
(filename, e.lineno, e.error)) from e
except libcst.ParserSyntaxError as e:
# TODO(rechen): We can get rid of this branch once we delete
# directors.parser_libcst.
raise exception_type("Error reading file %s at line %s: %s" %
(filename, e.raw_line, e.message)) from e
except SyntaxError as e:
raise exception_type("Error reading file %s at line %s: %s" %
(filename, e.lineno, e.msg)) from e
except directors.SkipFileError as e:
raise exception_type("Pytype could not analyze file %s: "
"'# skip-file' directive found" % filename) from e
except pickle_utils.LoadPickleError as e:
raise exception_type(f"Error analyzing file {filename}: Could not load "
f"serialized dependency {e.filename}") from e
except Exception as e: # pylint: disable=broad-except
msg = f"Pytype error: {e.__class__.__name__}: {e.args[0]}"
raise exception_type(msg).with_traceback(e.__traceback__)
| google/pytype | pytype/io.py | io.py | py | 11,659 | python | en | code | 4,405 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pytype.config.Options",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "pytype.config",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "pytype.loa... |
73599802024 | from typing import List
def insert_at(original: List, value: int, target_index: int) -> List:
#return original[:i] + [value] + original[i:] (python version)
new_list = [0] * (len(original)+1) #(java version)
index = -1
for index in range(target_index):
new_list[index] = original[index]
new_list[index+1] = value
print(value)
for index in range(index+2, len(new_list)):
new_list[index] = original[index-1]
return new_list
def insert(original: List, value: int) -> List:
for i, num in enumerate(original):
if num > value:
break
else:
i += 1
return insert_at(original, value, i)
def remove_at(original: List, i: int) -> List:
return original[:i] + original[i+1:] | amark02/ICS4U-Classwork | Lists/list_functions.py | list_functions.py | py | 778 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 29,
"usage_type": "name"
}
] |
25430599066 |
import requests
class Github:
def __init__(self):
self.name = "Github"
self.base_url = 'https://api.github.com'
self.description = "This service is all about Github."
self.actions = ["detect_new_repository", "detect_new_follower", "detect_new_following"]
self.reactions = []
def get_user_followers_number(self, username):
r = requests.get(url=self.base_url + "/users/" + username)
json = r.json()
return json['followers']
def get_user_repository_number(self, username):
r = requests.get(url=self.base_url + "/users/" + username)
json = r.json()
return json['public_repos']
def get_user_following_number(self, username):
r = requests.get(url=self.base_url + "/users/" + username)
json = r.json()
return json['following']
| arkea-tech/DEV_area_2019 | server/Components/github_service.py | github_service.py | py | 855 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 25,
"usage_type": "call"
}
] |
27942071843 | import argparse
from src.screen import Screen
def check_difficulty():
difficulty = input("What difficulty do you want to play?\n 1-)easy 2-)medium\n 3-)hard\n")
if difficulty == '1' or difficulty=='easy':
main('easy')
elif difficulty == '2' or difficulty=='medium':
main('medium')
elif difficulty == '3' or difficulty=='hard':
main('hard')
else:
print("Please select a valid difficulty")
check_difficulty()
def main(difficulty):
if difficulty == 'easy':
size = 8,10
bombs = 0.1
if difficulty == 'medium':
size = 18,14
bombs = 0.16
if difficulty == 'hard':
size = 24,20
bombs = 0.2
screen = Screen(600,600, size, bombs)
screen.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-e','--easy', default=False, action='store_true',
help='start th game in easy mode')
parser.add_argument('-m','--medium', default=False, action='store_true',
help='start th game in medium mode')
parser.add_argument('-a','--hard', default=False, action='store_true',
help='start th game in hard mode')
args = parser.parse_args()
if args.easy:
main('easy')
elif args.medium:
main('medium')
elif args.hard:
main('hard')
else:
check_difficulty() | DantasEduardo/minefield | main.py | main.py | py | 1,421 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "src.screen.Screen",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 33,
"usage_type": "call"
}
] |
9817005327 | import os,torch,warnings,sys
import numpy as np
from Schrobine import *
import matplotlib.pyplot as plt
global args
warnings.filterwarnings('ignore')
SetSeed()
args = Parse()
labelupdateindex=100
TureLabel = torch.from_numpy(np.load('Label1024.npy')).long()
ActuralLabel = torch.from_numpy(np.load('Label1024.npy')).long()
LabelUpdate = np.load('LabelUpdate4)25)16)38.npy') #6400*100*8
Unlabeledtrainlabel = np.load('Unlabeledtrainlabel.npy')
LabelIndex = Unlabeledtrainlabel[:48]
TempLabelUpdate = LabelUpdate[LabelIndex][:,labelupdateindex-args.length:labelupdateindex]
selected,pseudolabel,test = [],[],[]
for i in range(len(LabelIndex)):
n = np.max(np.bincount(np.argmax(TempLabelUpdate[i],1)))
if np.max(np.bincount(np.argmax(TempLabelUpdate[i],1))) > 0.75*args.length:
pseudolabel.append(np.where(np.bincount(np.argmax(TempLabelUpdate[i],1))==n)[0][0])
selected.append(LabelIndex[i])
test.append(i)
pseudolabel = np.array(pseudolabel)
for i in range(10):
plt.subplot(5,2,i+1),plt.imshow(np.transpose(TempLabelUpdate[test[i]],(1,0)))
plt.title(str(pseudolabel[i][0])+'-'+str(TureLabel[selected[i]]))
unlabeledlogits = LabelUpdate[:16,99] | suzuqiang/TMECH-09-2022-14281 | 函数测试.py | 函数测试.py | py | 1,194 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",... |
43509231022 | import socket
import threading
from ConnectionHandler import ConnectionHandler
from enum import Enum
class Role(Enum):
SERVER = 1
CLIENT = 2
class NetworkInterface:
def __init__(self):
self.listeners = []
self.connectionHandler = ConnectionHandler()
self.running = True
def start_server(self, ip, port, callbackHandler=None):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((ip, port))
listener = threading.Thread(target=self.listen, args=(sock, callbackHandler))
listener.start()
self.listeners.append(listener)
return True
def listen(self, sock=None, callBackHandler=None):
while self.running:
# Set socket to listen for incoming connections, then block waiting for a connection
sock.listen()
conn, addr = sock.accept()
conn.setblocking(False)
connection = self.connectionHandler.add_connection(conn)
callBackHandler(connection)
def start_client(self, ip, port, duration=20, retries=30):
# Modification starts here
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Variables to track the connection attempts
connected = False
attempts = 0
# Set the timeout on the socket to 1s - this is how long we will wait for a response
conn.settimeout(duration)
while not connected and attempts < retries:
try:
conn.connect((ip, port))
connected = True
except socket.error:
attempts += 1
if connected:
return self.connectionHandler.add_connection(conn)
return None
def get_message(self, ip=None, port=None):
return self.connectionHandler.get_message(ip, port)
def push_message(self, message, ip=None, port=None):
return self.connectionHandler.push_message(ip, port, message)
def has_client(self):
return self.connectionHandler.has_client()
def get_clients(self):
return self.connectionHandler.get_clients()
def client_exists(self, ip, port):
return self.connectionHandler.client_exists(ip,port)
def quit(self):
self.connectionHandler.quit()
self.running = False
for listener in self.listeners:
listener.join()
| ChrisWindmill/NWS | 01 Network Examples/18 Wait for server to be available/NetworkInterface.py | NetworkInterface.py | py | 2,518 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "ConnectionHandler.ConnectionHandler",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "socket.AF_... |
74050346024 | import os
import random
from collections import defaultdict
from parlai.tasks.md_gender.build import build
"""
Gender utilities for the multiclass gender classification tasks.
"""
MASK_TOKEN = '[MASK]'
MASC = 'male'
FEM = 'female'
NEUTRAL = 'gender-neutral'
NONBINARY = 'non-binary'
UNKNOWN = 'unknown'
SELF_UNKNOWN_LABELS = [f'SELF:{MASC}', f'SELF:{FEM}']
PARTNER_UNKNOWN_LABELS = [f'PARTNER:{MASC}', f'PARTNER:{FEM}']
UNKNOWN_LABELS = {'self': SELF_UNKNOWN_LABELS, 'partner': PARTNER_UNKNOWN_LABELS}
PUNCTUATION_LST = [
(' .', '.'),
(' !', '!'),
(' ?', '?'),
(' ,', ','),
(" ' ", "'"),
(" . . . ", "... "),
(" ( ", " ("),
(" ) ", ") "),
(" ; ", "; "),
]
SELF_CANDS = [f'SELF:{MASC}', f'SELF:{FEM}']
PARTNER_CANDS = [f'PARTNER:{NEUTRAL}', f'PARTNER:{MASC}', f'PARTNER:{FEM}']
ABOUT_CANDS = [
f'ABOUT:{NEUTRAL}',
f'ABOUT:{FEM}',
f'ABOUT:{MASC}',
f'ABOUT:{NONBINARY}',
]
ALL_CANDS = {'self': SELF_CANDS, 'partner': PARTNER_CANDS, 'about': ABOUT_CANDS}
EMPTY_LABELS = {'self': 'SELF:{}', 'partner': 'PARTNER:{}', 'about': 'ABOUT:{}'}
def get_data_stats(data, key='label', lst=True):
counts = defaultdict(int)
for ex in data:
if lst:
label = ex[key][0]
else:
label = ex[key]
counts[label] += 1
print('Total dataset counts:')
tups = sorted([(k, v) for k, v in counts.items()], key=lambda x: x[0])
for k, v in tups:
print(f'{k}: {v}')
def add_common_args(parser):
"""
Add arguments common across all of the datasets.
"""
agent = parser.add_argument_group('Gender Multiclass args')
agent.add_argument(
'--balance',
type='bool',
default=False,
help='Whether to balance the data between classes during training',
)
agent.add_argument(
'--balance-valid',
type='bool',
default=False,
help='Whether to balance the validation data',
)
agent.add_argument(
'--add-unknown-classes',
type='bool',
default=False,
help='Add unknown classes as neutral',
)
agent.add_argument(
'--unknown-temp',
type=float,
default=1.0,
help='Rate at which to sample examples from the unknown class',
)
return parser
def balance_data(data_list, key='labels', shuffle=True, exclude_labels=None):
"""
Given a list of acts, balance the list by label.
"""
if len(data_list) == 0:
# empty set
return data_list
def get_lst_sample(lst, sample_size):
if len(lst) == sample_size:
return lst
sampled = []
sample_times = sample_size // len(lst)
for _ in range(sample_times):
sampled += lst
differential = sample_size - len(sampled)
if differential > 0:
extra_examples = random.sample(lst, differential)
sampled += extra_examples
return sampled
separate_data = {}
excluded_data = []
for x in data_list:
label = x[key]
if isinstance(label, list):
label = label[0]
if exclude_labels is not None and label in exclude_labels:
# exclude this from the balancing, but
# add it later
excluded_data.append(x)
else:
separate_data.setdefault(label, [])
separate_data[label].append(x)
max_len = max(len(value) for value in separate_data.values())
new_data = []
for _, data in separate_data.items():
exs = get_lst_sample(data, max_len)
new_data += exs
assert len(new_data) == max_len * len(separate_data)
# now add back data that was excluded from balancing
new_data += excluded_data
if shuffle:
random.shuffle(new_data)
return new_data
def get_inferred_about_data(task, opt, threshold=0.8):
"""
Load inferred ABOUT data from teh ABOUT classifier.
"""
root = os.path.join(
opt['datapath'], 'md_gender', 'data_to_release', 'inferred_about'
)
task_str = task.split(':')[-1]
dt = opt['datatype'].split(':')[0]
with open(os.path.join(root, f'{task_str}_{dt}_binary.txt'), 'r') as f:
lines = f.read().splitlines()
examples = []
for line in lines:
text, label, score = line.split('\t')
if threshold is not None and float(score) < threshold:
# replace label with NEUTRAL
label = f'ABOUT:{NEUTRAL}'
if not text or not label:
continue
examples.append(
{
'text': text,
'labels': [label],
'class_type': 'about',
'label_candidates': ABOUT_CANDS,
'episode_done': True,
}
)
return examples
def format_text(text, lower=True):
"""
Add spaces around punctuation.
"""
if lower:
text = text.lower()
for punc in PUNCTUATION_LST:
text = text.replace(punc[1], punc[0])
return text
def unformat_text(text):
"""
Remove spaces from punctuation.
"""
for punc in PUNCTUATION_LST:
text = text.replace(punc[0], punc[1])
return text
def get_explicitly_gendered_words(opt):
"""
Load list of explicitly gendered words from.
<https://github.com/uclanlp/gn_glove/blob/main/wordlist/>.
Examples include brother, girl, actress, husbands, etc.
"""
build(opt)
folder = os.path.join(opt['datapath'], 'md_gender', 'data_to_release', 'word_list')
male_words = os.path.join(folder, 'male_word_file.txt')
female_words = os.path.join(folder, 'female_word_file.txt')
with open(male_words, 'r') as f:
male = f.read().splitlines()
with open(female_words, 'r') as f:
female = f.read().splitlines()
return male, female
def mask_gendered_words(text, gendered_list, mask_token=MASK_TOKEN):
"""
Given a string of text, mask out gendered words from a list.
"""
text = format_text(text, lower=False)
to_ret = []
orig_text = text.split(' ')
lowered_text = text.lower().split(' ')
for word, word_lower in zip(orig_text, lowered_text):
if word_lower in gendered_list:
to_ret.append(mask_token)
else:
to_ret.append(word)
return unformat_text(' '.join(to_ret))
CONTRACTIONS_LIST = [
("i am", "i'm"),
("you are", "you're"),
("we are", "we're"),
("they are", "they're"),
("who are", "who're"),
("i have", "i've"),
("you have", "you've"),
("we have", "we've"),
("could have", "could've"),
("would have", "would've"),
("should have", "should've"),
("might have", "might've"),
("who have", "who've"),
("there have", "there've"),
("he is", "he's"),
("she is", "she's"),
("it is", "it's"),
("what is", "what's"),
("that is", "that's"),
("who is", "who's"),
("there is", "there's"),
("here is", "here's"),
("one is", "one's"),
("i will", "i'll"),
("you will", "you'll"),
("she will", "she'll"),
("he will", "he'll"),
("it will", "it'll"),
("we will", "we'll"),
("they will", "they'll"),
("that will", "that'll"),
("there will", "there'll"),
("this will", "this'll"),
("what will", "what'll"),
("who will", "who'll"),
("i would", "i'd"),
("you would", "you'd"),
("he would", "he'd"),
("she would", "she'd"),
("we would", "we'd"),
("they would", "they'd"),
("it would", "it'd"),
("there would", "there'd"),
("what would", "what'd"),
("who would", "who'd"),
("that would", "that'd"),
("let us", "let's"),
("cannot", "can't"),
("do not", "don't"),
("is not", "isn't"),
("will not", "won't"),
("should not", "shouldn't"),
("could not", "couldn't"),
("would not", "wouldn't"),
("are not", "aren't"),
("does not", "doesn't"),
("was not", "wasn't"),
("were not", "weren't"),
("has not", "hasn't"),
("have not", "haven't"),
("had not", "hadn't"),
("must not", "mustn't"),
("did not", "didn't"),
("might not", "mightn't"),
("need not", "needn't"),
]
CONTRACTION_SPACES = [(" " + x[0] + " ", " " + x[1] + " ") for x in CONTRACTIONS_LIST]
CONTRACTION_LEFT_SPACES = [(" " + x[0], " " + x[1]) for x in CONTRACTIONS_LIST]
CONTRACTION_RIGHT_SPACES = [(x[0] + " ", x[1] + " ") for x in CONTRACTIONS_LIST]
| facebookresearch/ParlAI | parlai/tasks/md_gender/utils.py | utils.py | py | 8,435 | python | en | code | 10,365 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "random.sample",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "os.path.join",... |
25873666341 | from flask import Flask,render_template,request,redirect,url_for
import tweepy
import textblob
import pandas as pd
import numpy as np
app= Flask(__name__)
@app.route('/', methods = ['POST', 'GET'])
def data():
consumer_key= "*********"
consumer_secret= "**********"
access_token= "**********"
access_token_secret= "**********"
auth = tweepy.OAuthHandler(consumer_key,consumer_secret)
auth.set_access_token(access_token,access_token_secret)
api= tweepy.API(auth)
if request.method == "POST":
topic= request.form.get("username")
number_of_tweets= int(request.form.get("number"))
public_tweets = api.search(topic,count=number_of_tweets)
sentimentDict = {
'Positive' : [],
'Negative' : [],
'Neutral' : []
}
positive_count = 0
negative_count = 0
neutral_count = 0
for tweet in public_tweets:
analysis = textblob.TextBlob(tweet.text)
if(analysis.sentiment.polarity > 0.0):
sentimentDict['Positive'].append(tweet.text)
positive_count +=1
elif (analysis.sentiment.polarity < 0.0):
sentimentDict['Negative'].append(tweet.text)
negative_count +=1
elif (analysis.sentiment.polarity == 0.0):
sentimentDict['Neutral'].append(tweet.text)
neutral_count +=1
df= pd.DataFrame({key:pd.Series(value) for key, value in sentimentDict.items()})
return(df.to_html())
return render_template("ment.html")
app.run(host='localhost', port=6060) | harman4498/Tweet_Sentiment_Analysis | sentiment.py | sentiment.py | py | 1,694 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tweepy.OAuthHandler",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
... |
8555906265 | import torch
import torch.nn as nn
import torch.autograd as autograd
import torch.nn.functional as F
import numpy as np
torch.set_num_threads(8)
class Model(nn.Module):
def __init__(self,
hidden_dim = 512,
representation_len=300,
dropout = .5,
property_len=3,
num_of_action=4
):
super(Model, self).__init__()
self.conv1 = nn.Conv2d(9, 16, kernel_size=(3, 3), padding=1)
self.BN1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(True)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.padding = nn.ZeroPad2d((0, 1, 0, 1))
self.conv2 = nn.Conv2d(16, 32, kernel_size=(3, 3), padding=1)
self.BN2 = nn.BatchNorm2d(32)
self.fc1 = nn.Linear(32*13*13, hidden_dim)
self.dropout = nn.Dropout(dropout)
self.fc2 = nn.Linear(hidden_dim + property_len, representation_len)
self.sigmoid = nn.Sigmoid()
self.hidden2action = nn.Linear(representation_len, num_of_action + 1)
self.tanh = nn.Tanh()
def forward(self, status, prop):
if status.size() != torch.Size([1, 9, 50, 50]):
raise Exception("Status Size Error!", status.size())
if prop.size() != torch.Size([1, 3]):
raise Exception("Property Size Error!", prop.size())
out = self.conv1(status) # A Tensor of size [1, 16, 50, 50]
out = self.BN1(out)
out = self.relu(out)
# out = out.view(-1, 50, 50)
out = self.pool(out) # A Tensor of size [1, 16, 25, 25]
# out = out.view(-1, 16, 10, 25, 25)
out = self.conv2(out) # A Tensor of size [1, 32, 25, 25]
out = self.BN2(out)
out = self.relu(out)
# out = out.view(-1, 25, 25)
out = self.padding(out) # A Tensor of size [1, 32, 26, 26]
out = self.pool(out) # A Tensor of size [1, 32, 13, 13]
out = out.view(1, -1) # A Tensor of size [1, 32*13*13]
out = self.fc1(out) # A Tensor of size [1, hidden_dim_1] (1, 512)
out = self.relu(out)
out = self.dropout(out)
out = torch.cat([out, prop], 1) # Concat properties
out = self.fc2(out) # A Tensor of size [1, hidden_dim_2] (1, 300)
out = self.sigmoid(out)
out = self.hidden2action(out) # A Tensor of size [1, 5]
out = self.tanh(out)
return out
if __name__ == "__main__":
pass
| mrwangyou/IDSD | repLearning/cnn.py | cnn.py | py | 2,495 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "torch.set_num_threads",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.