seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2505498450 |
import psycopg2
hostname = 'localhost'
database = 'demo'
username = 'postgres'
pwd = '12345'
port_id = 5432
conn = None
cur = None
conn = psycopg2.connect(host= hostname,
port = port_id,
dbname = database,
user = username,
password = pwd)
cur = conn.cursor()
create_script = ''' CREATE TABLE T_employee (
id int PRIMARY KEY,
name varchar(40) NOT NULL,
salary int,
dept_id varchar(30)) '''
cur.execute(create_script)
insert_script = 'INSERT INTO T_employee (id, name, salary, dept_id) VALUES (%s,%s,%s,%s)'
insert_value = (1, 'James', 12000,'D1')
cur.execute(insert_script,insert_value)
conn.commit()
| ELFAHIM96/Python-and-PostgreSQL | Postgre2python.py | Postgre2python.py | py | 722 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "psycopg2.connect",
"line_number": 14,
"usage_type": "call"
}
] |
34855863470 | from abc import abstractmethod
from .base_autoencoder import BaseAutoencoder
import tensorflow as tf
import numpy as np
import time
from .utils import compute_mmd
class BaseInfoVariationalAutoencoder(BaseAutoencoder):
def __init__(self, input_dims, latent_dim, hidden_dim=1024, alpha=0.1):
super(BaseInfoVariationalAutoencoder, self).__init__(input_dims, latent_dim)
self.hidden_dim = hidden_dim
self.alpha = alpha
self.inference_net = self.make_inference_net()
self.generative_net = self.make_generative_net()
@abstractmethod
def make_inference_net(self):
raise NotImplementedError()
@abstractmethod
def make_generative_net(self):
raise NotImplementedError()
@tf.function
def encode(self, x):
mean, logvar = tf.split(self.inference_net(x), num_or_size_splits=2, axis=1)
return mean, logvar
# return self.inference_net(x)
def reparameterize(self, mean, logvar):
eps = tf.random.normal(mean.shape)
return eps * tf.exp(logvar * 0.5) + mean
def decode(self, z, apply_sigmoid=False):
logits = self.generative_net(z)
if apply_sigmoid:
probs = tf.nn.sigmoid(logits)
return probs
return logits
@tf.function
def compute_loss(self, x):
mean, logvar = self.encode(x)
z = self.reparameterize(mean, logvar)
# z = self.encode(x)
probs = self.decode(z, apply_sigmoid=True)
# loss_nll = tf.reduce_mean(tf.reduce_sum(tf.square(x + 0.0 - probs), axis=[1, 2, 3]))
loss_nll = tf.reduce_mean(tf.square(x + 0.0 - probs))
true_samples = tf.random.normal(z.shape)
loss_mmd = compute_mmd(true_samples, z)
return loss_nll + loss_mmd
def reconstruct(self, x):
mean, logvar = self.encode(x)
z = self.reparameterize(mean, logvar)
# z = self.encode(x)
return self.decode(z, apply_sigmoid=True)
class MnistInfoVariationalAutoencoder(BaseInfoVariationalAutoencoder):
def make_inference_net(self):
return tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=self.input_dims),
tf.keras.layers.Conv2D(64, kernel_size=3, strides=(2, 2)),
tf.keras.layers.LeakyReLU(alpha=self.alpha),
tf.keras.layers.Conv2D(128, kernel_size=3, strides=(2, 2)),
tf.keras.layers.LeakyReLU(alpha=self.alpha),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(self.hidden_dim),
tf.keras.layers.LeakyReLU(alpha=self.alpha),
# No activation
tf.keras.layers.Dense(2 * self.latent_dim)
])
def make_generative_net(self):
return tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(self.latent_dim)),
tf.keras.layers.Dense(self.hidden_dim, activation='relu'),
tf.keras.layers.Dense(7*7*128, activation='relu'),
tf.keras.layers.Reshape(target_shape=(7, 7, 128)),
tf.keras.layers.Conv2DTranspose(filters=64, kernel_size=3, strides=(2, 2), padding='SAME', activation='relu'),
# No activation
tf.keras.layers.Conv2DTranspose(filters=self.input_dims[-1], kernel_size=3, strides=(2, 2), padding='SAME')
])
class Cifar10InfoVariationalAutoencoder(BaseInfoVariationalAutoencoder):
def make_inference_net(self):
return tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=self.input_dims),
tf.keras.layers.Conv2D(64, kernel_size=3, strides=(2, 2)),
tf.keras.layers.LeakyReLU(alpha=self.alpha),
tf.keras.layers.Conv2D(128, kernel_size=3, strides=(2, 2)),
tf.keras.layers.LeakyReLU(alpha=self.alpha),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(self.hidden_dim),
tf.keras.layers.LeakyReLU(alpha=self.alpha),
# No activation
tf.keras.layers.Dense(2 * self.latent_dim)
])
def make_generative_net(self):
return tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(self.latent_dim)),
tf.keras.layers.Dense(self.hidden_dim, activation='relu'),
tf.keras.layers.Dense(8*8*128, activation='relu'),
tf.keras.layers.Reshape(target_shape=(8, 8, 128)),
tf.keras.layers.Conv2DTranspose(filters=64, kernel_size=3, strides=(2, 2), padding='SAME', activation='relu'),
# No activation
tf.keras.layers.Conv2DTranspose(filters=self.input_dims[-1], kernel_size=3, strides=(2, 2), padding='SAME')
]) | KienMN/Autoencoder-Experiments | autoencoders/info_vae.py | info_vae.py | py | 4,251 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "base_autoencoder.BaseAutoencoder",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": ... |
3476101470 | # -*- coding: utf-8 -*-
"""
Trains and tests a Rolling Bayesian Ridge Regression model on data
@author: Nick
"""
import warnings
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import VarianceThreshold
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import Lasso
from sklearn.metrics import r2_score
from plots import parity_plot, series_plot
# In[1]: Train the model
# load dataset
Y = pd.read_csv('Y crimes.csv').iloc[:,[2]]
X = pd.read_csv('X crimes.csv')
# size the training and forecasting data sets
train_size = 52 # weeks
test_size = 8 # weeks
# define the initial train and test sets
train_idx = np.arange(train_size)
test_idx = np.arange(train_size, train_size + test_size)
# train the model on all the training data
pipeline = Pipeline([
('var', VarianceThreshold()),
('scale', MinMaxScaler()),
('model', Lasso(alpha=1)),
])
pipeline.fit(X.iloc[train_idx, :], Y.iloc[train_idx, :])
# produce a rolling forecast on all the testing data
predictions = pd.DataFrame()
actual = pd.DataFrame()
for i in range(X.shape[0] - train_size - test_size):
# forecast
pred = pd.DataFrame(pipeline.predict(X.iloc[test_idx, :])).T
pred.columns = ["t" + str(i + 1) for i in range(pred.shape[1])]
predictions = pd.concat([predictions, pred], axis=0).reset_index(drop=True)
# actual
true = Y.iloc[test_idx, :].copy().T
true.columns = ["t" + str(i + 1) for i in range(true.shape[1])]
actual = pd.concat([actual, true], axis=0).reset_index(drop=True)
# roll forecast forward
train_idx = train_idx + 1
test_idx = test_idx + 1
# train a new model
with warnings.catch_warnings():
warnings.simplefilter('ignore')
pipeline.fit(X.iloc[train_idx, :], Y.iloc[train_idx, :])
# report forecast
y_pred = pred["t1"]
y_true = Y.iloc[train_idx[-1],:][0]
print('predicted=%f, expected=%f' % (y_pred, y_true))
# compute r2 for the predictions
r2 = []
for j in predictions.columns:
r2.append(r2_score(actual[j], predictions[j]))
# In[2]: Visualize the predictions
save_plot = False
# choose a time horizon
t = 1
# series plot
series_plot(predict=predictions.iloc[:,t-1], actual=actual.iloc[:,t-1],
title="Lasso " + str(t) + "-Step Ahead Rolling Forecast - Test",
save=save_plot)
# parity plot
parity_plot(predict=predictions.iloc[:,t-1], actual=actual.iloc[:,t-1],
title="Lasso Parity Plot - Test - R2: " + str(r2[t-1]), save=save_plot)
| N-ickMorris/Time-Series | crime_rolling_lasso.py | crime_rolling_lasso.py | py | 2,646 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line... |
8699137278 | # -*- coding: utf-8 -*-
import json
import pymongo
import re
import scrapy
from scrapy import Request, FormRequest
import logging
import redis
from sqlalchemy import create_engine
import pandas as pd
from sandbox.items import SXRItem,XZCFItem
from sandbox.utility import get_header
# get
class WebGetSpider(scrapy.Spider):
name = 'website1'
# headers = {
# "User-Agent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko)",
# }
URL = 'https://www.szcredit.com.cn/xy2.outside/gspt/newGSPTDetail3.aspx?ID={}'
def __init__(self):
super(WebGetSpider, self).__init__()
self.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate,br', 'Accept-Language': 'zh,en;q=0.9,en-US;q=0.8',
'Cache-Control': 'no-cache', 'Connection': 'keep-alive',
# 'Cookie': '__jsluid=a11ca0bac7aecb2a827c4750ce0f4eea;UM_distinctid=16874b135911e6-0504ab86b1e3e3-6114147a-144000-16874b1359234b;ASP.NET_SessionId=frvl0vvxq3rdpu5w1d2ekdmh;CNZZDATA1260729719=1354580564-1548143276-%7C1552008357',
'Host': 'www.szcredit.com.cn', 'Pragma': 'no-cache', 'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0(;WOW64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/67.0.3396.99Safari/537.36'
}
self.db=pymongo.MongoClient('10.18.6.26',27018)
user = '********'
password = '********'
host = '10.18.6.105'
port = 3306
db = 'creditreport_sz'
engine = create_engine(
'mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8'.format(user, password, host, port, db))
# df = pd.read_sql('sz_credit_spider_sx', con=engine)
# self.name_list = list(set(list(df['ent_name'].values)))
# self.r = redis.StrictRedis('10.18.6.26', decode_responses=True)
self.record_ent = self.db['spider']['credit_sz_no_info']
def start_requests(self):
# TO DO
ent_info = self.db['spider']['credit_sz_v5'].find({'result':'1'})
crawl_ent = self.record_ent.find({})
crawl_ent_list = []
for c_ent in crawl_ent:
crawl_ent_list.append(c_ent.get('ent_name'))
for each_item in ent_info:
for sub_item in each_item.get('resultlist'):
# print(sub_item)
rec_id = sub_item.get('RecordID')
# name=sub_item.get('EntName')
ent_name=each_item.get('entname')
# if (ent_name not in self.name_list) and (ent_name not in crawl_ent_list):
# print(ent_name)
yield Request(url=self.URL.format(rec_id),
headers=self.headers,
meta={'name':ent_name}
)
def parse(self, response):
# logging.info(response.text)
# logging.info('pass')
# ret_data = json.loads(response.body_as_unicode())
content = response.text
ent_name=response.meta['name']
if '最高法院失信被执行人名单' in content:
case_no = response.xpath('//td[contains(text(),"执行案号")]/following-sibling::*[1]/text()').extract_first()
court = response.xpath('//td[contains(text(),"执行法院")]/following-sibling::*[1]/text()').extract_first()
current_state = response.xpath('//td[contains(text(),"当前状态")]/following-sibling::*[1]/text()').extract_first()
according_no=''
mediation_date=''
sxrItem = SXRItem()
for field in sxrItem.fields:
try:
sxrItem[field] = eval(field)
except Exception as e:
logging.warning('can not find define of {}'.format(field))
logging.warning(e)
yield sxrItem
if '市场监管行政处罚信息' in content:
xzcfItem = XZCFItem()
punish_no = response.xpath('//td[contains(text(),"处罚文号")]/following-sibling::*[1]/text()').extract_first()
punish_gist = response.xpath('//td[contains(text(),"处罚依据")]/following-sibling::*[1]/text()').extract_first()
punish_dept = ''
punish_date = response.xpath('//td[contains(text(),"处罚日期")]/following-sibling::*[1]/text()').extract_first()
for field in xzcfItem.fields:
try:
xzcfItem[field] = eval(field)
except Exception as e:
logging.warning('can not find define of {}'.format(field))
logging.warning(e)
yield xzcfItem
if ('最高法院失信被执行人名单' not in content) and ('市场监管行政处罚信息' not in content):
# 无数据的企业
self.record_ent.update({'ent_name':ent_name},{'$set':{'ent_name':ent_name}},True,False)
# post
class WebPostSpider(scrapy.Spider):
name = 'website2'
headers = {
"Host": "cha.zfzj.cn",
"Accept": "application/json, text/javascript, */*; q=0.01",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko)",
"Content-Type": "application/x-www-form-urlencoded",
"Referer": "https://cha.zfzj.cn/mainPage.html",
}
post_url = 'https://cha.zfzj.cn/bankCardDetail/select'
def start_requests(self):
# TO DO
URL = 'https://cha.zfzj.cn/'
yield Request(url=URL, callback=self.query)
def query(self, response):
# TO DO
rds = redis.StrictRedis('10.18.6.102', db=7, decode_responses=True)
data = {
"limit": "500",
"offset": '1',
"sortOrder": "asc",
"inputValue": '',
}
while 1:
card = rds.lpop('cardbin0925')
if not card:
break
logging.info('query card >>>> {}'.format(card))
data['inputValue'] = card
yield FormRequest(url=self.post_url, formdata=data, headers=self.headers, callback=self.parse,
meta={'card': card, 'page': 1})
def parse(self, response):
# logging.info(response.text)
# logging.info('pass')
ret_data = json.loads(response.body_as_unicode())
card = response.meta['card']
rows = ret_data['rows']
if not rows:
return
for row in rows:
accountLength = row['accountLength']
cardName = row['cardName']
cardType = row['cardType']
mainAccount = row['mainAccount']
mainValue = row['mainValue']
orgName = row['orgName']
spiderItem = SpiderItem()
for field in spiderItem.fields:
try:
spiderItem[field] = eval(field)
except Exception as e:
logging.warning('can not find define of {}'.format(field))
logging.warning(e)
yield spiderItem
total = ret_data['total']
pages = int(total / 500) if total % 500 == 0 else int(total / 500) + 1
current_page = response.meta['page']
if pages > current_page:
current_page += 1
data = {
"limit": "500",
"offset": str(current_page),
"sortOrder": "asc",
"inputValue": card,
}
yield FormRequest(url=self.post_url, headers=self.headers, formdata=data,
meta={'page': current_page, 'card': card})
| Rockyzsu/image_recognise | xinyong_shenzhen/sandbox/sandbox/spiders/website.py | website.py | py | 7,699 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "scrapy.Spider",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "scrap... |
6022783824 | import time
import multiprocessing
from functools import partial
from app import App
CPU_COUNT = 4 # multiprocessing.cpu_count()
TEST_TIME = 60 * 1
def run_application(index: int, test_time: int) -> int:
print("Starting application:", index)
app = App()
app.start()
# queue_sizes = []
for i in range(test_time):
time.sleep(1)
if i % 10 == 0:
# queue_sizes.append(app.report_queue_sizes())
pass
app.stop()
return app.processed_messages
def main() -> int:
with multiprocessing.Pool(CPU_COUNT) as pool:
results = pool.map(
func=partial(run_application, test_time=TEST_TIME),
iterable=range(CPU_COUNT),
)
print(
f"{CPU_COUNT} apps processed {sum(results)} messages "
f"in {TEST_TIME} seconds"
)
return 0
if __name__ == "__main__":
main()
| EvgeniiTitov/naive-events-processing | run_app_multicore.py | run_app_multicore.py | py | 889 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "app.App",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "app.start",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "app.stop",
"line_number": 22,
"u... |
28147315416 | import os
import sys
import json
import unittest
sys.path.append("../get_job/")
import jobs
class TestDB(unittest.TestCase):
def setUp(self):
self.job_db = jobs.JobDB()
self.job_db.dbFile = "data_test.json"
def test_readData(self):
""" Test loading data """
self.job_db.readData()
self.assertEqual(self.job_db.adds[0].id,
"39f99f99f6e69e98b1f6de0815a262b9")
def test_readData2(self):
""" Test only loading new data """
self.job_db.readData()
self.job_db.readData()
self.assertEqual(len(self.job_db.adds), 3, "adds list should be 3")
def test_writeData(self):
""" Test writing a new db file """
self.job_db.readData()
self.job_db.dbFile = "data_testingwrite.json"
self.job_db.writeData()
f1 = open("data_test.json")
f2 = open(self.job_db.dbFile)
self.assertListEqual(list(f1), list(f2), "Files should be equal")
# Cleanup
f1.close()
f2.close()
os.remove("./"+self.job_db.dbFile)
def test_writeData2(self):
""" Test writing to existing db file """
from shutil import copyfile
test_hashes = []
output_file = "data_testingwrite.json"
# Load data
self.job_db.readData()
# Store old hashes for later use
for add in self.job_db.adds:
test_hashes.append(add.id)
# Make loaded data unique by chaning the one prop and rehashing
for add in self.job_db.adds:
add.company = "change"
add.hash()
test_hashes.append(add.id)
# Copy dbfile
copyfile(self.job_db.dbFile, output_file)
self.job_db.dbFile = output_file
self.job_db.writeData()
with open(self.job_db.dbFile) as json_file:
data = json.load(json_file)
self.assertEqual(len(data), 6, "Should be 6")
# Load hashes from file and check with the ones previously stored
new_hashes = []
for add in data:
new_hashes.append(add["id"])
self.assertListEqual(test_hashes, new_hashes, "Hashes should be equal")
os.remove("./"+self.job_db.dbFile)
def test_getLoadedHashes(self):
self.job_db.readData()
hashes = self.job_db._getLoadedHashes()
expected_hashes = ["39f99f99f6e69e98b1f6de0815a262b9",
"cc69355f68edfda7d962d80dc179c441",
"c0cf364c725d24b1bf97aa46bdf9467c"]
self.assertEqual(hashes, expected_hashes)
def test_getStoredHashes(self):
hashes = self.job_db._getStoredHashes()
expected_hashes = ["39f99f99f6e69e98b1f6de0815a262b9",
"cc69355f68edfda7d962d80dc179c441",
"c0cf364c725d24b1bf97aa46bdf9467c"]
self.assertEqual(hashes, expected_hashes)
if __name__ == '__main__':
unittest.main()
| SV3A/Jobbi | tests/jobs_tests.py | jobs_tests.py | py | 2,972 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "jobs.JobDB",
"l... |
11404152952 | import pandas as pd
from tqdm import tqdm
from gensim.models import Doc2Vec
from sklearn import utils
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from gensim.models.doc2vec import TaggedDocument
import matplotlib.pyplot as plt
import nltk
import multiprocessing
tqdm.pandas(desc="progress-bar")
cores = multiprocessing.cpu_count()
plt.style.use('ggplot')
if __name__=='__main__':
# Read in the data, dropping nulls and columns that won't help predictions
df = pd.read_csv('../data/train-balanced-sarcasm.csv')
df.drop(['author','ups','downs','date'], axis=1, inplace=True)
df.dropna(inplace=True)
# Re-index our dataframe after dropping nulls
df.shape
df.index = range(1010773)
# Train-Test split our data in a 70-30 split, then tokenize the comment and parents comment columns
train, test = train_test_split(df, test_size=0.3, random_state=42)
def tokenize_text(text):
tokens = []
for sent in nltk.sent_tokenize(text):
for word in nltk.word_tokenize(sent):
if len(word) < 2:
continue
tokens.append(word.lower())
return tokens
train_tagged = train.apply(
lambda r: TaggedDocument(words=tokenize_text(r['comment']), tags=[r.comment]), axis=1)
test_tagged = test.apply(
lambda r: TaggedDocument(words=tokenize_text(r['comment']), tags=[r.comment]), axis=1)
train_tagged = train.apply(
lambda r: TaggedDocument(words=tokenize_text(r['parent_comment']), tags=[r.parent_comment]), axis=1)
test_tagged = test.apply(
lambda r: TaggedDocument(words=tokenize_text(r['parent_comment']), tags=[r.parent_comment]), axis=1)
# Build our vocab
model_dbow = Doc2Vec(dm=0, vector_size=300, negative=5, hs=0, min_count=2, sample = 0, workers=cores)
model_dbow.build_vocab([x for x in tqdm(train_tagged.values)])
# Train our doc2vec model in gensim with 30 epochs
for epoch in range(10):
model_dbow.train(utils.shuffle([x for x in tqdm(train_tagged.values)]), total_examples=len(train_tagged.values), epochs=1)
model_dbow.alpha -= 0.002
model_dbow.min_alpha = model_dbow.alpha
def vec_for_learning(model, tagged_docs):
sents = tagged_docs.values
targets, regressors = zip(*[(doc.tags[0], model.infer_vector(doc.words, steps=20)) for doc in sents])
return targets, regressors
# Attempt a logistic regression on our data
y_train, X_train = vec_for_learning(model_dbow, train_tagged)
y_test, X_test = vec_for_learning(model_dbow, test_tagged)
logreg = LogisticRegression(n_jobs=1, C=1e5)
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
from sklearn.metrics import accuracy_score, f1_score
print('Testing accuracy %s' % accuracy_score(y_test, y_pred))
print('Testing F1 score: {}'.format(f1_score(y_test, y_pred, average='weighted')))
| kschutter/SarcasmDetection | src/logisticReg.py | logisticReg.py | py | 2,982 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tqdm.tqdm.pandas",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "multiprocessing.cpu_count",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplo... |
70365065314 | from flask import Flask, jsonify, request
from flask_cors import CORS
from note import models as note_model
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
CORS(app, supports_credentials=True)
@app.before_request
def __db_connect():
note_model.db.connect()
@app.teardown_request
def _db_close(exc):
if not note_model.db.is_closed():
note_model.db.close()
@app.route("/", methods=["GET"])
def hello():
resp = jsonify({"hello": "world"})
return resp
@app.route("/api/marknote/notes", methods=["GET"])
def note_get():
note_title = request.args.get("note_title")
note_dict = note_model.get_note(note_title)
note_label = note_model.get_labels(note_title)
note_dict["labels"] = note_label
return jsonify(note_dict)
@app.route("/api/marknote/notes", methods=["POST"])
def note_post():
post_data = request.json
return_msg = note_model.save_note(post_data)
resp_data = {"status": return_msg}
return jsonify(resp_data)
@app.route("/api/marknote/notes", methods=["PUT"])
def note_put():
put_data = request.json
return_msg = note_model.update_note(put_data)
resp_data = {"status": return_msg}
return jsonify(resp_data)
@app.route("/api/marknote/notes", methods=["DELETE"])
def note_delete():
note_title = request.args.get("note_title")
return_msg = note_model.delete_note(note_title)
resp_data = {"status": return_msg}
return jsonify(resp_data)
@app.route("/api/marknote/labels", methods=["GET"])
def labels_get():
note_title = request.args.get("note_title")
if note_title is None:
return jsonify(note_model.get_all_labels())
else:
return jsonify(note_model.get_labels(note_title))
@app.route("/api/marknote/labels", methods=["POST"])
def labels_post():
post_data = request.json
return_msg = note_model.save_label(post_data)
resp_data = {"status": return_msg}
return jsonify(resp_data)
@app.route("/api/marknote/labels", methods=["PUT"])
def label_update():
put_data = request.json
return jsonify(note_model.update_label(put_data))
@app.route("/api/marknote/labels", methods=["DELETE"])
def labels_delete():
label_name = request.args.get("label_name")
return_msg = note_model.delete_label(label_name)
resp_data = {"status": return_msg}
return jsonify(resp_data)
@app.route("/api/marknote/menus", methods=["GET"])
def menus_get():
return jsonify(note_model.get_menu())
@app.route("/api/marknote/search", methods=["GET"])
def search_note():
search_str = request.args.get("search_str")
return jsonify(note_model.search(search_str)) | HyperionD/api | api.py | api.py | py | 2,625 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "note.models.db.connect",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "note.models.db",
... |
36921040940 | import requests, os, json, time, argparse, threading, urllib
from bs4 import BeautifulSoup
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def create_job(result):
from pandas.io.json import json_normalize
try:
address = result['jobLocation']['address']['addressLocality']+' , '+result['jobLocation']['address']['addressRegion']
return json_normalize({'title': result['title'], 'company': result['hiringOrganization']['name'], 'location': address, 'summary': result['description'], "link": result['FinalURL']})
except:
return json_normalize({'title': [''], 'company': [''], 'location': 'no city , no state', 'summary': [''], "link": result['FinalURL']})
def getListing(pages,job,level):
import pandas as pd
import time
page = 1
jobListings = []
contentURL = 'https://www.glassdoor.com/Job/jobs.htm?sc.keyword=' + job + "&jobType=" + level
while (page <= pages):
jl = {}
webPage = requests.get(contentURL, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'})
if (page == 1):
try:
pageUrl = str(webPage.content.decode("utf-8").split("untranslatedUrl' : '")[1].split('.htm')[0])
except:
time.sleep(1000)
pageUrl = str(webPage.content.decode("utf-8").split("untranslatedUrl' : '")[1].split('.htm')[0])
contentURL = pageUrl +'_IP'+ str(page + 1) + '.htm?jobType=' + level
soup = BeautifulSoup(webPage.content, 'html.parser')
soup = soup.select('span[data-job-id]')
for span in soup:
jl = {"jobID":span['data-job-id']}
jobListings.append(jl)
page += 1
results = getRealURL(jobListings)
return pd.concat([create_job(x) for x in results])
def getRealURL(jobListings):
for line in jobListings:
jobID = line['jobID']
line['url'] = "https://www.glassdoor.com/partner/jobListing.htm?jobListingId="+jobID
try:
page = requests.get(line['url'], headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'})
line['FinalURL'] = page.url
except:
line['FinalURL'] = ""
line['url'] = "https://www.glassdoor.com/job-listing/JV.htm?jl="+jobID
return [gatherJob(jobListing) for jobListing in jobListings]
def gatherJob(listing):
page = requests.get(listing['url'], headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}, verify=False)
soup = BeautifulSoup(page.content, 'html.parser')
try:
soup = str(soup).split('<script type="application/ld+json">')[1].split('</script>')[0].replace("<","<").replace(">",">").replace("\n","").replace("\r","")
listingJSON = json.loads(soup)
listingJSON['FinalURL'] = listing['FinalURL']
return(listingJSON)
except Exception:
return {'FinalURL': listing['url'],'hiringOrganization':['None'],'title':['None'],'description':['None'],'jobLocation':['None']}
| sdf94/jobsearchtool | jobsearch/jobsearch/glassdoor.py | glassdoor.py | py | 3,299 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib3.disable_warnings",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "urllib3.exceptions",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "pandas.io.json.json_normalize",
"line_number": 10,
"usage_type": "call"
},
{
"api_... |
13043003078 | from iotile.core.utilities.paths import settings_directory
import sqlite3
import os.path
import sys
import os
class SQLiteKVStore:
"""A simple string - string persistent map backed by sqlite for concurrent access
The KeyValueStore can be made to respect python virtual environments if desired
"""
DefaultFolder = settings_directory()
def __init__(self, name, folder=None, respect_venv=False):
if folder is None:
folder = SQLiteKVStore.DefaultFolder
# If we are relative to a virtual environment, place the registry into that virtual env
# Support both virtualenv and pythnon 3 venv
if respect_venv and hasattr(sys, 'real_prefix'):
folder = sys.prefix
elif respect_venv and hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
folder = sys.prefix
if not os.path.exists(folder):
os.makedirs(folder, 0o755)
dbfile = os.path.join(folder, name)
self.connection = sqlite3.connect(dbfile)
self.cursor = self.connection.cursor()
self.file = dbfile
self._setup_table()
def _setup_table(self):
query = 'create table if not exists KVStore (key TEXT PRIMARY KEY, value TEXT);'
self.cursor.execute(query)
self.connection.commit()
def size(self):
query = 'select count(*) from KVStore'
self.cursor.execute(query)
return self.cursor.fetchone()[0]
def get_all(self):
query = 'select key, value from KVStore'
self.cursor.execute(query)
vals = self.cursor.fetchall()
return vals
def get(self, my_id):
query = 'select value from KVStore where key is ?'
self.cursor.execute(query, (my_id,))
val = self.cursor.fetchone()
if val is None:
raise KeyError("id not in key-value store: %s" % str(my_id))
return val[0]
def remove(self, key):
query = "delete from KVStore where key is ?"
self.cursor.execute(query, (key,))
self.connection.commit()
def try_get(self, my_id):
try:
return self.get(my_id)
except KeyError:
return None
def set(self, key, value):
query = "insert or replace into KVStore values (?, ?)"
self.cursor.execute(query, (key, str(value)))
self.connection.commit()
def clear(self):
query = 'drop table KVStore'
self.cursor.execute(query)
self.connection.commit()
self._setup_table()
| iotile/coretools | iotilecore/iotile/core/utilities/kvstore_sqlite.py | kvstore_sqlite.py | py | 2,544 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "iotile.core.utilities.paths.settings_directory",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.prefix",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sys.base_prefix",
"line_number": 24,
"usage_type": "attribute"
},
{
... |
37919203191 | from django.utils import timezone
import math
from rest_framework.response import Response
from typing import List
from discordoauth2.models import User
from ranked.models import EloHistory, GameMode, Match, PlayerElo
from .elo_constants import N, K, R, B, C, D, A
def validate_post_match_req_body(body: dict, players_per_alliance: int):
if body is None or 'red_alliance' not in body or 'blue_alliance' not in body or \
'red_score' not in body or 'blue_score' not in body:
return Response(status=400, data={
'error': 'Missing required fields in request body.'
})
if not isinstance(body['red_alliance'], list) or not isinstance(body['blue_alliance'], list):
return Response(status=400, data={
'error': 'red_alliance and blue_alliance must be arrays.'
})
if len(body['red_alliance']) != players_per_alliance or len(body['blue_alliance']) != players_per_alliance:
return Response(status=400, data={
'error': 'Invalid number of players per alliance.'
})
if not isinstance(body['red_score'], int) or not isinstance(body['blue_score'], int):
return Response(status=400, data={
'error': 'Score must be integers.'
})
if body['red_score'] < 0 or body['blue_score'] < 0:
return Response(status=400, data={
'error': 'Score cannot be negative.'
})
return None
def validate_patch_match_req_body(body: dict):
if body is None or 'red_score' not in body or 'blue_score' not in body:
return Response(status=400, data={
'error': 'Missing required fields in request body.'
})
if not isinstance(body['red_score'], int) or not isinstance(body['blue_score'], int):
return Response(status=400, data={
'error': 'Score must be integers.'
})
if body['red_score'] < 0 or body['blue_score'] < 0:
return Response(status=400, data={
'error': 'Score cannot be negative.'
})
return None
def get_match_player_info(red_alliance: List[User], blue_alliance: List[User], game_mode: GameMode):
# Get players
red_players = []
for player_id in red_alliance:
try:
player = User.objects.get(id=player_id)
except User.DoesNotExist:
return Response(status=404, data={
'error': f'Player {player_id} does not exist.'
}), None, None, None, None
red_players.append(player)
blue_players = []
for player_id in blue_alliance:
try:
player = User.objects.get(id=player_id)
except User.DoesNotExist:
return Response(status=404, data={
'error': f'Player {player_id} does not exist.'
}), None, None, None, None
blue_players.append(player)
# Get player elos
red_player_elos = []
for player in red_players:
player_elo = PlayerElo.objects.get_or_create(
player=player, game_mode=game_mode)[0]
red_player_elos.append(player_elo)
blue_player_elos = []
for player in blue_players:
player_elo = PlayerElo.objects.get_or_create(
player=player, game_mode=game_mode)[0]
blue_player_elos.append(player_elo)
return None, red_players, blue_players, red_player_elos, blue_player_elos
def update_player_elos(match: Match, red_player_elos: List[PlayerElo], blue_player_elos: List[PlayerElo]):
red_elo = match.red_starting_elo
blue_elo = match.blue_starting_elo
red_odds = 1 / (1 + 10 ** ((blue_elo - red_elo) / N))
blue_odds = 1 / (1 + 10 ** ((red_elo - blue_elo) / N))
elo_changes = []
for player in red_player_elos + blue_player_elos:
num_played = player.matches_played
EloHistory.objects.create(
player_elo=player,
match_number=match.match_number,
elo=player.elo,
)
if player in red_player_elos:
score_diff = match.red_score - match.blue_score
odds = red_odds
player.total_score += match.red_score
else:
score_diff = match.blue_score - match.red_score
odds = blue_odds
player.total_score += match.blue_score
if (score_diff > 0):
odds_diff = 1 - odds
player.matches_won += 1
elif (score_diff == 0):
odds_diff = 0.5 - odds
player.matches_drawn += 1
else:
odds_diff = 0 - odds
player.matches_lost += 1
elo_change = ((
K / (1 + 0) + 2 * math.log(math.fabs(score_diff) + 1, 8)) * (
odds_diff)) * (((B - 1) / (A ** num_played)) + 1)
elo_changes.append(elo_change)
player.elo += elo_change
player.matches_played += 1
player.last_match_played_time = timezone.now()
player.last_match_played_number = match.match_number
player.save()
red_elo_changes = elo_changes[:len(red_player_elos)]
blue_elo_changes = elo_changes[len(red_player_elos):]
return red_elo_changes, blue_elo_changes
def revert_player_elos(match: Match, red_elo_history: List[EloHistory], blue_elo_history: List[EloHistory]):
# Revert the elo on PlayerElo to the elo on EloHistory
for elo_history_entry in red_elo_history:
elo_history_entry.player_elo.elo = elo_history_entry.elo
elo_history_entry.player_elo.matches_played -= 1
elo_history_entry.player_elo.total_score -= match.red_score
if match.red_score > match.blue_score:
elo_history_entry.player_elo.matches_won -= 1
elif match.red_score < match.blue_score:
elo_history_entry.player_elo.matches_lost -= 1
else:
elo_history_entry.player_elo.matches_drawn -= 1
elo_history_entry.player_elo.save()
elo_history_entry.delete()
for elo_history_entry in blue_elo_history:
elo_history_entry.player_elo.elo = elo_history_entry.elo
elo_history_entry.player_elo.matches_played -= 1
elo_history_entry.player_elo.total_score -= match.blue_score
if match.red_score < match.blue_score:
elo_history_entry.player_elo.matches_won -= 1
elif match.red_score > match.blue_score:
elo_history_entry.player_elo.matches_lost -= 1
else:
elo_history_entry.player_elo.matches_drawn -= 1
elo_history_entry.player_elo.save()
elo_history_entry.delete()
| SecondRobotics/SecondWebsite | ranked/api/lib.py | lib.py | py | 6,502 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "rest_framework.response.Response",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 23,
"usage_type": "ca... |
11735587857 | import pyrebase
import matplotlib.pyplot as plt
firebaseConfig = {"apiKey": "AIzaSyCfuQ46q09FozGesUxT3ZakA_7XhGrnrUM",
"authDomain": "fir-course-56a13.firebaseapp.com",
"projectId": "fir-course-56a13",
"storageBucket": "fir-course-56a13.appspot.com",
"messagingSenderId": "447378702514",
"appId": "1:447378702514:web:f15e3623cec16a1d3949ab",
"databaseURL": "https://fir-course-56a13-default-rtdb.firebaseio.com/"
}
firebase = pyrebase.initialize_app(firebaseConfig)
# auth = firebase.auth()
#
# #SignIn
# email = input("Enter your email.")
# password = input("Enter your password")
# try:
# auth.sign_in_with_email_and_password(email, password)
# print('hmm thk ase')
# except:
# print('fuck off')
#
#
# #SignUp
# email = input("Email den")
# password = input("password lekhen ekta")
# confirmpass=input("Abar lekhen password")
# if password==confirmpass:
# try:
# auth.create_user_with_email_and_password(email,password)
# print("Success")
# except:
# print("Email ase already eda")
#Storage
# storage = firebase.storage()
# # filename=input("Enter filename you want to upload")
# cloudfilename=input("enter name of the file on the cloud")
#
# # storage.child(cloudfilename).put(filename)
#
# #download
# print(storage.child(cloudfilename).get_url(None))
# storage.child(cloudfilename).download("","xmennn.txt")
#Database
db = firebase.database()
# data = {'age':40,'address':'dhaka','employed':True}
# db.push(data)
# db.child("People").push(data)
#db.child("people").child("-MVIRPe7QGX69B6d1Fby").update({'age':51})
# elapsedtime=db.child("People").child("Abdomen_1").get()
# abdomen=db.child("People").child("Abdomen_1").get()
# print(elapsedtime)
# for person in people.each():
# print(person.val())
# print(person.key())
# if person.val()['address'] == "dhaka":
# db.child("People").child(person.key()).update({'employed': True})
#Delete
# db.child("people").remove();
#Read
# list1 = []
# people=db.child("People").child(2).child("Abdomen_1").get()
# #print(people.val())
elapsedtime = []
abdomenlist = []
ECG1 = db.child("ECG1").get()
for ecg in ECG1.each():
ecgkey = ecg.key()
x = db.child("ECG1").child(ecgkey).child("Elapsed time").get()
y = db.child("ECG1").child(ecgkey).child("Abdomen_1").get()
elapsedtime.append(x.val())
abdomenlist.append(y.val())
plt.title("ECG graph")
plt.xlabel("Elapsed Time")
plt.ylabel("Abdomen")
plt.plot(elapsedtime, abdomenlist, color="blue")
plt.show()
#print(y.val())
#list1.append(y.val())
#print(list1)
| ifran-rahman/Python-Firebase | pythonProject/main.py | main.py | py | 2,608 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyrebase.initialize_app",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "mat... |
30335073768 | from django.shortcuts import render
from django.template import loader
from django.http import HttpResponse
import psycopg2
# Create your views here.
def init(request):
try:
conn = psycopg2.connect(
database = 'djangotraining',
host = 'localhost',
user = 'djangouser',
password = 'secret'
)
curr = conn.cursor()
curr.execute(''' CREATE TABLE IF NOT EXISTS ex02_movies (
title varchar(64) UNIQUE NOT NULL,
episode_nb int PRIMARY KEY,
opening_crawl text,
director varchar(32) NOT NULL,
producer varchar(128) NOT NULL,
release_date date NOT NULL
)
''')
conn.commit()
conn.close()
return HttpResponse("OK")
except Exception as e:
return HttpResponse(e)
def _make_insert(movie):
return f'''INSERT INTO ex02_movies (episode_nb, title, director, producer, release_date)
VALUES
({movie[0]}, '{movie[1]}', '{movie[2]}', '{movie[3]}', '{movie[4]}')
'''
def populate(request):
try:
conn = psycopg2.connect(
database = 'djangotraining',
host = 'localhost',
user = 'djangouser',
password = 'secret'
)
curr = conn.cursor()
inserts = {}
inserts['OK'] = []
inserts['NOK'] = []
movies = [
(1, 'The Phantom Menace', 'George Lucas', 'Rick McCallum', '1999-05-19'),
(2, 'Attack of the Clones', 'George Lucas', 'Rick McCallum', '2002-05-16'),
(3, 'Revenge of the Sith', 'George Lucas', 'Rick McCallum', '2005-05-19'),
(4, 'A New Hope', 'George Lucas', 'Gary Kurtz, Rick McCallum', '1977-05-25'),
(5, 'The Empire Strikes Back', 'Irvin Kershner', 'Gary Kurtz, Rick McCallum', '1980-05-17'),
(6, 'Return of the Jedi', 'Richard Marquand', 'Howard G. Kazanjian, George Lucas, Rick McCallum', '1983-05-25'),
(7, 'The Force Awakens', 'J. J. Abrams', 'Kathleen Kennedy, J. J. Abrams, Bryan Burk', '2015-12-11')
]
for movie in movies:
try:
curr.execute(_make_insert(movie))
conn.commit()
inserts['OK'].append(movie[1])
except Exception as e:
inserts['NOK'].append(f"{movie[1]}: error {e}")
conn.close()
return render(request, 'ex02/index.html', {'inserts_ok': inserts['OK'], 'inserts_nok': inserts['NOK']})
except Exception as e:
return HttpResponse(e)
def display(request):
pass | RickBadKan/42-mini-piscina | list05/ex02/views.py | views.py | py | 2,598 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "psycopg2.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "psy... |
29966016525 |
import types
import pickle
import os
import numpy as np
import pathlib
import pdb
import scipy.io as sio
import copy
from do_mpc.tools import load_pickle, save_pickle
import types
import logging
from inspect import signature
from typing import Union
class DataHandler:
"""Post-processing data created from a sampling plan.
Data (individual samples) were created with :py:class:`do_mpc.sampling.Sampler`.
The list of all samples originates from :py:class:`do_mpc.sampling.SamplingPlanner` and is used to
initiate this class (``sampling_plan``).
The class can be created with optional keyword arguments which are passed to :py:meth:`set_param`.
**Configuration and retrieving processed data:**
1. Initiate the object with the ``sampling_plan`` originating from :py:class:`do_mpc.sampling.SamplingPlanner`.
2. Set parameters with :py:meth:`set_param`. Most importantly, the directory in which the individual samples are located should be passe with ``data_dir`` argument.
3. (Optional) set one (or multiple) post-processing functions. These functions are applied to each loaded sample and can, e.g., extract or compile important information.
4. Load and return samples either by indexing with the :py:meth:`__getitem__` method or by filtering with :py:meth:`filter`.
**Example:**
::
sp = do_mpc.sampling.SamplingPlanner()
# Plan with two variables alpha and beta:
sp.set_sampling_var('alpha', np.random.randn)
sp.set_sampling_var('beta', lambda: np.random.randint(0,5))
plan = sp.gen_sampling_plan(n_samples=10)
sampler = do_mpc.sampling.Sampler(plan)
# Sampler computes the product of two variables alpha and beta
# that were created in the SamplingPlanner:
def sample_function(alpha, beta):
return alpha*beta
sampler.set_sample_function(sample_function)
sampler.sample_data()
# Create DataHandler object with same plan:
dh = do_mpc.sampling.DataHandler(plan)
# Assume you want to compute the square of the result of each sample
dh.set_post_processing('square', lambda res: res**2)
# As well as the value itself:
dh.set_post_processing('default', lambda res: res)
# Query all post-processed results with:
dh[:]
"""
def __init__(self, sampling_plan, **kwargs):
self.flags = {
'set_post_processing' : False,
}
# Parameters that can be set for the DataHandler:
self.data_fields = [
'data_dir',
'sample_name',
'save_format'
]
self.data_dir = './'
self.sample_name = 'sample'
self.save_format = 'pickle'
self.sampling_plan = sampling_plan
self.sampling_vars = list(sampling_plan[0].keys())
self.post_processing = {}
self.pre_loaded_data = {'id':[], 'data':[]}
if kwargs:
self.set_param(**kwargs)
@property
def data_dir(self):
"""Set the directory where the results are stored.
"""
return self._data_dir
@data_dir.setter
def data_dir(self, val):
self._data_dir = val
def __getitem__(self, ind):
""" Index results from the :py:class:`DataHandler`. Pass an index or a slice operator.
"""
try:
if isinstance(ind, int):
samples = [self.sampling_plan[ind]]
elif isinstance(ind, (tuple, slice, list)):
samples = self.sampling_plan[ind]
else:
raise Exception('ind must be of type int, tuple, slice or list. You have {}'.format(type(ind)))
except IndexError:
print('---------------------------------------------------------------')
print('Trying to access a non-existent element from the sampling plan.')
print('---------------------------------------------------------------')
raise
return_list = []
# For each sample:
for sample in samples:
# Check if this result was previously loaded. If not, add it to the dict of pre_loaded_data.
result = self._lazy_loading(sample)
result_processed = self._post_process_single(sample,result)
return_list.append(result_processed)
return return_list
def _lazy_loading(self,sample):
""" Private method: Checks if data is already loaded to reduce the computational load.
"""
if sample['id'] in self.pre_loaded_data['id']:
ind = self.pre_loaded_data['id'].index(sample['id'])
result = self.pre_loaded_data['data'][ind]
else:
result = self._load(sample['id'])
self.pre_loaded_data['id'].append(sample['id'])
self.pre_loaded_data['data'].append(result)
return result
def _post_process_single(self,sample,result):
""" Private method: Applies all post processing functions to a single sample and stores them.
"""
result_processed = copy.copy(sample)
# Post process result
if self.flags['set_post_processing']:
for key in self.post_processing:
if result is not None:
# post_processing function is either just a function of the result or of the sample and the result.
if self.post_processing[key]['n_args'] == 1:
result_processed[key] = self.post_processing[key]['function'](result)
elif self.post_processing[key]['n_args'] == 2:
result_processed[key] = self.post_processing[key]['function'](sample, result)
else:
result_processed[key] = None
# Result without post processing
else:
result_processed['res'] = result
return result_processed
def filter(self,
input_filter:Union[types.FunctionType,types.BuiltinFunctionType]=None,
output_filter:Union[types.FunctionType,types.BuiltinFunctionType]=None
)->list:
""" Filter data from the DataHandler. Filters can be applied to inputs or to results that were obtained with the post-processing functions.
Filtering returns only a subset from the created samples based on arbitrary conditions.
**Example**:
::
sp = do_mpc.sampling.SamplingPlanner()
# SamplingPlanner with two variables alpha and beta:
sp.set_sampling_var('alpha', np.random.randn)
sp.set_sampling_var('beta', lambda: np.random.randint(0,5))
plan = sp.gen_sampling_plan()
...
dh = do_mpc.sampling.DataHandler(plan)
dh.set_post_processing('square', lambda res: res**2)
# Return all samples with alpha < 0 and beta > 2
dh.filter(input_filter = lambda alpha, beta: alpha < 0 and beta > 2)
# Return all samples for which the computed value square < 5
dh.filter(output_filter = lambda square: square < 5)
Args:
input_filter: Function to filter the data.
output_filter: Function to filter the data
Raises:
assertion: No post processing function is set
assertion: filter_fun must be either Function of BuiltinFunction_or_Method
Returns:
Returns the post processed samples that satisfy the filter
"""
assert isinstance(input_filter, (types.FunctionType, types.BuiltinFunctionType, type(None))), 'input_filter must be either Function or BuiltinFunction_or_Method, you have {}'.format(type(input_filter))
assert isinstance(output_filter, (types.FunctionType, types.BuiltinFunctionType, type(None))), 'output_filter must be either Function or BuiltinFunction_or_Method, you have {}'.format(type(output_filter))
return_list = []
# Wrapper to ensure arbitrary arguments are accepted
def wrap_fun_in(**kwargs):
if input_filter is None:
return True
else:
return input_filter(**{arg_i: kwargs[arg_i] for arg_i in input_filter.__code__.co_varnames})
# Wrapper to ensure arbitrary arguments are accepted
def wrap_fun_out(**kwargs):
if output_filter is None:
return True
else:
return output_filter(**{arg_i: kwargs[arg_i] for arg_i in output_filter.__code__.co_varnames})
# For each sample:
for sample in self.sampling_plan:
if wrap_fun_in(**sample)==True:
# Check if this result was previously loaded. If not, add it to the dict of pre_loaded_data.
result = self._lazy_loading(sample)
result_processed = self._post_process_single(sample,result)
# Check if the computed post-processing value satsifies the output_filter condition:
if wrap_fun_out(**result_processed)==True:
return_list.append(result_processed)
return return_list
def _load(self, sample_id):
""" Private method: Load data generated from a sampling plan, either '.pkl' or '.mat'
"""
name = '{sample_name}_{id}'.format(sample_name=self.sample_name, id=sample_id)
if self.save_format == 'pickle':
load_name = self.data_dir + name + '.pkl'
elif self.save_format == 'mat':
load_name = self.data_dir + name+'.mat'
try:
if self.save_format == 'pickle':
result = load_pickle(load_name)
elif self.save_format == 'mat':
result = sio.loadmat(load_name)
except FileNotFoundError:
logging.warning('Could not find or load file: {}. Check data_dir parameter and make sure sample has already been generated.'.format(load_name))
result = None
return result
def set_param(self, **kwargs)->None:
"""Set the parameters of the DataHandler.
Parameters must be passed as pairs of valid keywords and respective argument.
For example:
::
datahandler.set_param(overwrite = True)
Args:
data_dir(bool): Directory where the data can be found (as defined in the :py:class:`do_mpc.sampling.Sampler`).
sample_name(str): Naming scheme for samples (as defined in the :py:class:`do_mpc.sampling.Sampler`).
save_format(str): Choose either ``pickle`` or ``mat`` (as defined in the :py:class:`do_mpc.sampling.Sampler`).
"""
for key, value in kwargs.items():
if not (key in self.data_fields):
print('Warning: Key {} does not exist for DataHandler.'.format(key))
else:
setattr(self, key, value)
def set_post_processing(self, name:str, post_processing_function:Union[types.FunctionType,types.BuiltinFunctionType])->None:
"""Set a post processing function.
The post processing function is applied to all loaded samples, e.g. with :py:meth:`__getitem__` or :py:meth:`filter`.
Users can set an arbitrary amount of post processing functions by repeatedly calling this method.
The ``post_processing_function`` can have two possible signatures:
1. ``post_processing_function(case_definition, sample_result)``
2. ``post_processing_function(sample_result)``
Where ``case_definition`` is a ``dict`` of all variables introduced in the :py:class:`do_mpc.sampling.SamplingPlanner`
and ``sample_results`` is the result obtained from the function introduced with :py:class:`do_mpc.sampling.Sampler.set_sample_function`.
Note:
Setting a post processing function with an already existing name will overwrite the previously set post processing function.
**Example:**
::
sp = do_mpc.sampling.SamplingPlanner()
# Plan with two variables alpha and beta:
sp.set_sampling_var('alpha', np.random.randn)
sp.set_sampling_var('beta', lambda: np.random.randint(0,5))
plan = sp.gen_sampling_plan(n_samples=10)
sampler = do_mpc.sampling.Sampler(plan)
# Sampler computes the product of two variables alpha and beta
# that were created in the SamplingPlanner:
def sample_function(alpha, beta):
return alpha*beta
sampler.set_sample_function(sample_function)
sampler.sample_data()
# Create DataHandler object with same plan:
dh = do_mpc.sampling.DataHandler(plan)
# Assume you want to compute the square of the result of each sample
dh.set_post_processing('square', lambda res: res**2)
# As well as the value itself:
dh.set_post_processing('default', lambda res: res)
# Query all post-processed results with:
dh[:]
Args:
name: Name of the output of the post-processing operation
post_processing_function: The post processing function to be evaluted
Raises:
assertion: name must be string
assertion: post_processing_function must be either Function of BuiltinFunction
"""
assert isinstance(name, str), 'name must be str, you have {}'.format(type(name))
assert isinstance(post_processing_function, (types.FunctionType, types.BuiltinFunctionType)), 'post_processing_function must be either Function or BuiltinFunction_or_Method, you have {}'.format(type(post_processing_function))
# Check signature of function for number of arguments.
sig = signature(post_processing_function)
n_args = len(sig.parameters.keys())
self.post_processing.update({name: {'function': post_processing_function, 'n_args': n_args}})
self.flags['set_post_processing'] = True | do-mpc/do-mpc | do_mpc/sampling/_datahandler.py | _datahandler.py | py | 14,004 | python | en | code | 729 | github-code | 1 | [
{
"api_name": "copy.copy",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "types.FunctionType",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "types.BuiltinFuncti... |
18638555644 | # reference -https://towardsdatascience.com/machine-learning-nlp-text-classification-using-scikit-learn-python-and-nltk-c52b92a7c73a
from sklearn.feature_extraction.text import CountVectorizer,TfidfTransformer
count_vect = CountVectorizer(lowercase = False, ngram_range = (1,2), max_df=0.95)
tfidf_transformer = TfidfTransformer(use_idf=False) #Count the frequency of words and divide it by total no. of words. deletes most common words like a ,is.
train_data = []
test_data = []
train_y = [] # data store for target variables for training, list of A(a)pples
def predict():
# training set text processing
X_train_counts = count_vect.fit_transform(train_data)
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
# training set text processing
X_new_counts = count_vect.transform(test_data)
X_new_tfidf = tfidf_transformer.transform(X_new_counts)
# initializing model
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB() #94
# model fitting
clf = clf.fit(X_train_tfidf, train_y)
# predicting answers for test set
predicted = clf.predict(X_new_tfidf)
for results in predicted:
print(results)
# reading trainig data from text files
for line in open('apple-computers.txt'):
if len(line.strip())>0: #skip empty lines
train_data.append(line.strip().strip('. '))
train_y.append('computer-company')
for line in open('apple-fruit.txt'):
if len(line.strip())>0: #skip empty lines
train_data.append(line.strip('. '))
train_y.append('fruit')
N = int(input())
for n in range(N):
inp = input()
test_data.append(inp)
predict() | devanshi16/hackerRank-NLP | byte-the-correct-apple.py | byte-the-correct-apple.py | py | 1,728 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.feature_extraction.text.CountVectorizer",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfTransformer",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.MultinomialNB",
"line_numb... |
3553966386 | from __future__ import print_function
import pyaudio
from ibm_watson import SpeechToTextV1
from ibm_watson.websocket import RecognizeCallback, AudioSource
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from threading import Thread
import configparser
import time
import json
import requests
from requests.auth import HTTPBasicAuth
try:
from Queue import Queue, Full
except ImportError:
from queue import Queue, Full
#carrega configuracoes
config = configparser.ConfigParser()
config.read('config.ini')
###############################################
#### inicia fila para gravar as gravacoes do microfone ##
###############################################
CHUNK = 1024
# Nota: gravacoes sao descartadas caso o websocket nao consuma rapido o suficiente
# Caso precise, aumente o max size conforme necessario
BUF_MAX_SIZE = CHUNK * 10
# Buffer para guardar o audio
q = Queue(maxsize=int(round(BUF_MAX_SIZE / CHUNK)))
# Cria o audio source com a fila
# audio_source = AudioSource(q, True, True)
#configura o speech2text
authenticator = IAMAuthenticator(config['SPEECH2TEXT']['API_KEY'])
speech_to_text = SpeechToTextV1(
authenticator=authenticator)
speech_to_text.set_service_url(config['SPEECH2TEXT']['URL'])
texto = ''
# classe de callback para o servico de reconhecimento de voz
class MyRecognizeCallback(RecognizeCallback):
def __init__(self):
RecognizeCallback.__init__(self)
def on_transcription(self, transcript):
global texto
print('transcript: ', transcript[0]['transcript'])
texto = transcript[0]['transcript']
pass
def on_connected(self):
print('Conexão OK')
def on_error(self, error):
print('Erro recebido: {}'.format(error))
def on_inactivity_timeout(self, error):
print('Timeout de inatividade: {}'.format(error))
def on_listening(self):
print('Serviço está ouvindo, aperte q + Enter para finalizar')
def on_hypothesis(self, hypothesis):
pass
def on_data(self, data):
#global resultado
print('Texto detectado: ')
#for result in data['results']:
# resultado = (result['alternatives'][0]['transcript']) #Como gravar essa saída para uso na função start_reading em tsf.py?
def on_close(self):
print("Conexão fechada")
# inicia o reconhecimento usando o audio_source
def recognize_using_websocket(recorded):
audio_source = AudioSource(recorded, False, False)
mycallback = MyRecognizeCallback()
speech_to_text.recognize_using_websocket(audio=audio_source,
content_type='audio/webm; codecs=opus',
recognize_callback=mycallback,
model='pt-BR_NarrowbandModel',
interim_results=False)
###############################################
#### Prepara gravacao usando pyaudio ##
###############################################
# Config do pyaudio para as gravacoes
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
# define callback para gravar o audio na fila
def pyaudio_callback(in_data, frame_count, time_info, status):
try:
q.put(in_data)
except Full:
pass # discard
return (None, pyaudio.paContinue)
def start_stream(recorded):
print('<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>')
print(type(recorded[0]))
#auth_var = HTTPBasicAuth('WATSON_STT_APIKEY', config['SPEECH2TEXT']['API_KEY'])
auth_var = HTTPBasicAuth('apikey', config['SPEECH2TEXT']['API_KEY'])
#?model=en-US_NarrowbandModel pt-BR_NarrowbandModel
x = requests.post(
(config['SPEECH2TEXT']['URL'] + '/v1/recognize?model=pt-BR_NarrowbandModel'),
auth=auth_var,
data = recorded[0],
headers = {"Content-Type": "audio/webm; codecs=opus"})
print('texto:')
json_resposta = x.text
print(json_resposta)
print('interpretacao')
interpretacao = json.loads(json_resposta, encoding='utf-8')['results'][0 ]['alternatives'][0]['transcript']
print(interpretacao)
auth_var = HTTPBasicAuth('apikey', config['TEXT2SPEECH']['API_KEY'])
#?model=en-US_NarrowbandModel pt-BR_NarrowbandModel
print("como ficou:")
print("{\"text\":\"" + interpretacao + "\"}")
fala = requests.post(
(config['TEXT2SPEECH']['URL'] + '/v1/synthesize?voice=pt-BR_IsabelaV3Voice'), #pt-BR_IsabelaV3Voice
auth=auth_var,
data = "{\"text\":\"" + interpretacao + "\"}",
#data = "\{\"text\":\"" + interpretacao + "\"}",
#data = interpretacao,
#headers = {"Content-Type": "application/json", "Accept":"audio/wav"})
headers = {"Content-Type": "application/json", "Accept":"audio/mp3; codecs=opus"})
#headers = {"Content-Type": "text/plain", "Accept":"audio/ogg; codecs=opus"})
#headers = {"Content-Type": "audio/ogg; codecs=opus"})
print("resposta do text2speech - tipo: ")
print(type(fala))
#print(type(fala.content))
print(type(fala.encoding))
#print(fala.text)
print(fala.headers)
print(fala.status_code)
with open("fala.mp3", "wb") as f:
f.write(fala.content)
| omboido/telefone_sem_fio | dic.py | dic.py | py | 5,238 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "ibm_cloud_sdk_core.authenticators.IAMAuthenticator",
"line_number": 36,
"usage_type": "call"
},
... |
7898200713 | from django.shortcuts import render
from .models import Product, OrderProduct, Department
from django.http import HttpResponse
from django.template import loader
import heapq
from operator import itemgetter
# server functions
def ticket_promedio():
order_products = OrderProduct.objects.all()
orders = {}
for order_product in order_products:
product_of_order = order_product.product_id
orders[order_product.order_id] = orders.get(
order_product.order_id, 0) + order_product.quantity*product_of_order.price
promedio = sum(orders.values())/len(orders)
return promedio
def margen_promedio():
order_products = OrderProduct.objects.all()
orders = {}
for order_product in order_products:
product_of_order = order_product.product_id
orders[order_product.order_id] = orders.get(
order_product.order_id, 0) + product_of_order.margin
promedio = sum(orders.values())/len(orders)
return promedio
def cantidad_promedio():
order_products = OrderProduct.objects.all()
orders = {}
for order_product in order_products:
product_of_order = order_product.product_id
orders[order_product.order_id] = orders.get(
order_product.order_id, 0) + order_product.quantity
promedio = sum(orders.values())/len(orders)
return promedio
def get_ganancia_producto(pquantity, price, margin):
return quantity*price*margin
def dic_productos_departamentos():
order_products = OrderProduct.objects.all()
departments_with_products = {}
for order_product in order_products:
product_of_order = order_product.product_id
product_department = product_of_order.department_id
dic_products = departments_with_products.get(
product_department.department, {})
dic_products[order_product.product_id] = dic_products.get(
order_product.product_id, 0) + order_product.quantity
departments_with_products[product_of_order] = dic_products
for department_products in departments_with_products:
departments_with_products[department_products] = dict(heapq.nlargest(
5, departments_with_products[department_products].items(), key=itemgetter(1)))
for department in departments_with_products:
departments_with_products[department] = list(
departments_with_products[department])
return departments_with_products
def indicadores(request):
context = {
'ticket': ticket_promedio(),
'margin': margen_promedio(),
'quantity': cantidad_promedio(),
}
return render(request, 'salesHistory/indicadores.html', context)
def tablas(request):
context = {
'top_dep': dic_productos_departamentos().items(),
'n': range(5),
}
return render(request, 'salesHistory/tablas.html', context)
def visualizador(request):
return render(request, 'salesHistory/lineaTiempo.html', context)
| PaulaGonzalez01/SalesHistory | sales_history/views.py | views.py | py | 2,968 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "models.OrderProduct.objects.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.OrderProduct.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "models.OrderProduct",
"line_number": 11,
"usage_type": "name"
},
{
... |
39409212934 | import cv2
import face_recognition
import put_chinese_text
import time
VIDEO_DIR = 'hamilton_clip.mp4'
resize_ratio = 0.5
input_video = cv2.VideoCapture(VIDEO_DIR) # 读取视频文件
length = int(input_video.get(cv2.CAP_PROP_FRAME_COUNT)) #视频帧数
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 视频编码器
output_video = cv2.VideoWriter('hamilton_clip_recognition_face.mp4', fourcc, 29.97, (640, 360)) # 打开一个视频写入文件
# encoding两张需要识别的人脸
lmm_image = face_recognition.load_image_file("lin-manuel-miranda.png")
lmm_face_encoding = face_recognition.face_encodings(lmm_image)[0]
al_image = face_recognition.load_image_file("alex-lacamoire.png")
al_face_encoding = face_recognition.face_encodings(al_image)[0]
# 已知人脸encodings
known_face_encodings = [
lmm_face_encoding,
al_face_encoding
]
known_face_names = [
'lin',
'alex'
]
frame_number = 0
while True:
start_time = time.time()
# 读取视频流的每一帧
ret, frame = input_video.read()
# 放缩每一帧,加速
small_frame = cv2.resize(frame, (0, 0), fx=resize_ratio, fy=resize_ratio)
frame_number += 1
# 视频结束
if not ret:
break
# BGR2RGB,face_recognition库需要此格式
rgb_frame = small_frame[:, :, ::-1]
# 获取当前帧所有人脸及其encodings
face_locations = face_recognition.face_locations(rgb_frame, model='cnn')
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
face_names = []
for encoding in face_encodings:
# 为每一个encoding对比已知的encodings
matches = face_recognition.compare_faces(known_face_encodings, encoding)
name = 'unknown'
# 找出结果为True所对应的名字
if True in matches:
matchIds = [i for (i, b) in enumerate(matches) if b]
name = known_face_names[matchIds[0]]
face_names.append(name)
# 标记人脸和名字
for (top, right, bottom, left), name in zip(face_locations, face_names):
# 如果使用了放缩帧,从新计算原始坐标位置
top, right, bottom, left = int(top / resize_ratio), int(right / resize_ratio), int(bottom / resize_ratio), int(left / resize_ratio)
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
top_text = top - 16 if top - 16 > 16 else top + 16
cv2.rectangle(frame, (left, top_text), (right, top), (0, 0, 255), cv2.FILLED)
frame = put_chinese_text.draw_text(frame, name, (left, top_text), (0, 0, 0))
end_time = time.time()
fps = '%.2f FPS'% (1 / (end_time - start_time))
# cv2.putText(frame, fps, (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)
cv2.putText(frame, fps, (0, 20), cv2.FONT_HERSHEY_DUPLEX, 0.7, (0, 255, 0), 1)
# 将该帧写入文件
output_video.write(frame)
# 显示
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
input_video.release()
cv2.destroyAllWindows()
if output_video is not None:
output_video.release() | Mikoto10032/FaceRecognition | face_recognition_in_video_file.py | face_recognition_in_video_file.py | py | 3,094 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FRAME_COUNT",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoWriter_fourcc",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "... |
8306566749 | import sys, os, pickle
DIR = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
sys.path.append(DIR)
from Dataset.mnist import load_mnist
from functions import *
from PIL import Image
def img_show(img):
pil_img = Image.fromarray(np.uint8(img))
pil_img.show()
def get_data():
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=True, one_hot_label=False)
return x_test, t_test
def init_network():
with open(DIR + "/Dataset/sample_weight.pkl", 'rb') as f:
network = pickle.load(f)
return network
def predict(network, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
return softmax(a3)
if __name__ == "__main__":
x, t = get_data()
network = init_network()
accuracy_cnt = 0
for i in range(len(x)):
y = predict(network, x[i])
p = np.argmax(y) # 확률이 가장 높은 원소의 인덱스를 얻는다.
if p == t[i]:
accuracy_cnt += 1
print("Accuracy:" + str(float(accuracy_cnt) / len(x)))
""" (x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False)
# flatten parameter makes input images simple array
# normalize parameter normalizes input image's pixels to between 0.0 and 1.0 (or it can be 0~255)
print(x_train.shape)
print(t_train.shape)
print(x_test.shape)
print(t_test.shape)
img = x_train[0]
label = t_train[0]
print(label)
print(img.shape)
img = img.reshape(28, 28)
print(img.shape)
img_show(img) """ | PresentJay/Deep-Learning-from-Scratch | [03]신경망/03_MNIST/01_inference-with-forward-propagation.py | 01_inference-with-forward-propagation.py | py | 1,815 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line... |
70572585953 | from django.forms import ModelForm
from django.utils.translation import gettext_lazy as _
from . import models
class LivreForm(ModelForm):
class Meta:
model = models.Livre
fields = ('titre', 'auteur', 'date_parution', 'nombre_pages','resume')
labels = {
'titre' : _('Titre'),
'auteur' : _('Auteur') ,
'date_parution' : _('date de parution'),
'nombre_pages' : _('nombres de pages'),
'resume' : _('Résumé'),
}
localized_fields = ('date_parution',)
class CategorieForm(ModelForm):
class Meta:
model = models.Categorie
fields = ('nom', 'description')
labels = {
'nom' : _('Nom de la catégorie'),
}
| arnauldAlbert/django-model | modele/bibliotheque/forms.py | forms.py | py | 751 | python | fr | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 11,
"usage_type": "cal... |
7425876986 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import logging
from ..json_struct_patch import JsonStructPatch
def test_properties_diff():
local = {'rsyslog': {'facility': 2}}
template = { "rsyslog": {
"properties": {
"facility": {
"index": "not_analyzed",
"type": "string"
}}}}
patch = JsonStructPatch(template)
delta = patch.diff_template(local)
logging.debug('delta == %s',(delta))
assert delta == []
def test_nested_properties_diff():
local = {'rsyslog': {'facility': {'local':1}}}
template = { "rsyslog": {
"properties": {
"facility": {
"properties":{
"local":{
"index": "not_analyzed",
"type": "string"
}}}}}}
patch = JsonStructPatch(template)
delta = patch.diff_template(local)
logging.debug('delta == %s',(delta))
assert delta == []
def test_neg_properties_diff():
local = {'rsyslog': {'facility':'sss', 'fromhost': 'openstack'}}
template = { "rsyslog": {
"properties": {
"facility": {
"properties":{
"local":{
"index": "not_analyzed",
"type": "string"
}}}}}}
patch = JsonStructPatch(template)
delta = patch.diff_template(local)
logging.debug('delta == %s',(delta))
assert delta == [(".rsyslog.fromhost","not_in_template")]
def test_2array_simple_diff():
local = {'CEE': {}, 'systemd': {}, 'rsyslog': {'facility':'ll','protocol-version':11}}
template = { "CEE": {
"type": "object"
}, "rsyslog":{
"properties": {
"facility": {
"index": "not_analyzed",
"type": "string"
}}},
"systemd": {
"properties": {
"k": {
"properties": {
"KERNEL_DEVICE": {
"index": "not_analyzed",
"type": "string"
}}}}}}
patch = JsonStructPatch(template)
delta = patch.diff_template(local)
logging.debug('delta == %s',(delta))
assert delta == [(".rsyslog.protocol-version","not_in_template")]
if __name__ == '__main__':
import nose
nose.runmodule()
| t0ffel/validate-es-documents | src/json_diff/test/test_template.py | test_template.py | py | 2,732 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json_struct_patch.JsonStructPatch",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json_struct_patch.JsonStructPatch",
"line_number": 31,
"usage_type": "call"
},
{
... |
11726393436 | import side_by_side
import convolution
import numpy as np
from PIL import Image
from sys import argv
import math
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def canny_img(im_init):
def gaussiano(im):
gaussian = np.ones((5,5), dtype=np.float);
gaussian[:,:] = [
[1,4,7,4,1],
[4,16,26,16,4],
[7,26,41,26,7],
[4,16,26,16,4],
[1,4,7,4,1]]
gaussian[:,:] *= 1./273;
return convolution.convolution(im, gaussian)
def edge_detection(im, kernel):
robertsx = np.zeros((2,2), dtype=np.float)
robertsx[:,:] = [[1,0],
[0,-1]]
robertsy = np.zeros((2,2), dtype=np.float)
robertsy[:,:] = [[0,1],
[-1,0]]
prewittx = np.zeros((3,3), dtype=np.float)
prewittx[:,:] = [[-1, 0, 1],
[-1, 0, 1],
[-1, 0, 1]]
prewitty = np.zeros((3,3), dtype=np.float)
prewitty[:,:] = [[1, 1, 1],
[0, 0, 0],
[-1, -1, -1]]
sobelx = np.zeros((3,3), dtype=np.float)
sobelx[:,:] = [[1, 0, -1],
[2, 0, -2],
[1, 0, -1]]
sobely = np.zeros((3,3), dtype=np.float)
sobely[:,:] = [[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]]
def border_detection(img, kernelx, kernely):
imx = convolution.convolution(img, kernelx)
imx = imx.astype(np.float)
imy = convolution.convolution(img, kernely)
imy = imy.astype(np.float)
r = np.power(np.power(imx, 2)+np.power(imy, 2), 0.5) / np.sqrt(2.0)
t = np.arctan(imy / (imx + 0.00001))
return r, t
if kernel == "roberts":
return border_detection(im1, robertsx, robertsy)
elif kernel == "prewitt":
return border_detection(im1, prewittx, prewitty)
elif kernel == "sobel":
return border_detection(im1, sobelx, sobely)
def supresion_de_no_maximos(jo, jm):
jn = np.zeros(jm.shape, dtype=np.float)
k1, k2 = jm.shape
for i in range(1, k1-1):
for j in range(1, k2-1):
angulo = jo[i,j]
if -np.pi / 8 < angulo <= np.pi / 8:
if jm[i,j] >= jm[i, j-1] and jm[i,j] >= jm[i, j+1]:
jn[i,j] = jm[i,j]
elif np.pi / 8 < angulo <= 3 * np.pi / 8:
if jm[i,j] >= jm[i-1, j+1] and jm[i,j] >= jm[i+1, j-1]:
jn[i,j] = jm[i,j]
elif -np.pi / 8 >= angulo > -3 * np.pi / 8:
if jm[i,j] >= jm[i-1, j-1] and jm[i,j] >= jm[i+1, j+1]:
jn[i,j] = jm[i,j]
elif angulo > 3 * np.pi / 8 or angulo <= -3 * np.pi / 8:
if jm[i,j] >= jm[i-1, j] and jm[i,j] >= jm[i+1, j]:
jn[i,j] = jm[i,j]
return jn / np.max(jn)
U_MAX = 0.14
U_MIN = 0.01
def umbral_por_histeresis(im, jo):
r = np.zeros(jm.shape, dtype=np.float)
k1, k2 = jm.shape
for i in range(1, k1-1):
for j in range(1, k2-1):
if im[i, j] >= U_MIN:
r[i, j] = im[i,j]
if im[i, j] >= U_MAX:
angulo = jo[i,j]
if -np.pi / 8 < angulo <= np.pi / 8:
if im[i-1, j] >= U_MIN:
r[i-1, j] = U_MAX
if im[i+1, j] >= U_MIN:
r[i+1, j] = U_MAX
elif np.pi / 8 < angulo <= 3 * np.pi / 8:
if im[i-1, j-1] >= U_MIN:
r[i-1, j-1] = U_MAX
if im[i+1, j+1] >= U_MIN:
r[i+1, j+1] = U_MAX
elif -np.pi / 8 >= angulo > -3 * np.pi / 8:
if im[i-1, j+1] >= U_MIN:
r[i-1, j+1] = U_MAX
if im[i+1, j-1] >= U_MIN:
r[i+1, j-1] = U_MAX
elif angulo > 3 * np.pi / 8 or angulo <= -3 * np.pi / 8:
if im[i, j-1] >= U_MIN:
r[i, j-1] = U_MAX
if im[i, j+1] >= U_MIN:
r[i, j+1] = U_MAX
idx = r >= U_MAX
r[idx] = 1.0
idx = r < U_MAX
r[idx] = 0.0
return r
im_gauss = gaussiano(im_init)
jm, jo = edge_detection(im_gauss, "sobel")
return umbral_por_histeresis(supresion_de_no_maximos(jo, jm), jo)
im1 = np.asarray(Image.open(argv[1]).convert('L'))
im2 = canny_img(im1)
k1, k2 = im2.shape
ts = np.arange(-np.pi / 2, np.pi / 2, np.pi / 20)
max_mod = np.power(k1*k1 + k2*k2, 0.5)
min_mod = -max_mod
pasos_mod = (max_mod - min_mod) / 20
cuenta = {}
for angulo in ts:
cuenta_angulo = {}
for i in range(k1):
for j in range(k2):
if im2[i][j] == 0.0: continue
d = int(i * np.sin(angulo) + j * np.cos(angulo))
# d = int(pasos_mod * int(d / pasos_mod))
if d not in cuenta_angulo.keys():
cuenta_angulo[d] = 0
else:
cuenta_angulo[d] += 1
cuenta[angulo] = cuenta_angulo
print(cuenta)
lineas_rectas = []
for angulo in ts:
for modulo in cuenta[angulo]:
if cuenta[angulo][modulo] > 50:
lineas_rectas.append((angulo, modulo))
print(cuenta[angulo][modulo])
print(lineas_rectas)
stacked_img = np.stack((im2,)*3, axis=-1)
print(stacked_img)
for angulo, modulo in lineas_rectas:
if -0.001 < np.cos(angulo) < 0.001:
print(i)
for j in range(k2):
stacked_img[abs(modulo),j,:] = [1.0, 0, 0]
continue
for i in range(k1):
j = int((modulo - i * np.sin(angulo)) / (np.cos(angulo) + 0.0001))
if 0 <= j < k2:
stacked_img[i,j,:] = [1.0, 0, 0]
plt.imshow(stacked_img, cmap='gray') #, vmin=0, vmax=255)
plt.show()
| gciruelos/imagenes-practicas | practica7/ejclase.py | ejclase.py | py | 6,141 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.ones",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "convolution.convolution",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
... |
17422411853 | from pathlib import Path
from typing import Any, Dict, List, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import yaml
from torch import Tensor
from torchmetrics import Dice, JaccardIndex
from transformers import EvalPrediction
class ComputeMetrics(object):
def __init__(self, resource: Union[Path, str, Dict[str, Any]]) -> None:
config = resource
if isinstance(resource, Path) or isinstance(resource, str):
resource = Path(resource)
with open(resource, "r") as file:
config = yaml.safe_load(file)
assert "compute_metrics" in config, f"wrong resource {resource} construction"
self.thresholds = config["compute_metrics"]["thresholds"]
self.dice = Dice(num_classes=1)
self.iou = JaccardIndex(task="binary")
def compute_group_area_by_relarea(self, label: float) -> int:
for i, interval in enumerate(self.thresholds):
if interval[0] <= label < interval[1]:
return i
return i
def compute_group_area_by_target(self, label: Union[Tensor, np.ndarray]) -> int:
object_area = label.sum()
image_area = np.product(label.shape[-2:])
relative_area = object_area / image_area
for i, interval in enumerate(self.thresholds):
if interval[0] <= relative_area < interval[1]:
return i
return i
def __call__(self, eval_pred: EvalPrediction) -> Dict[str, float]:
predictions, labels = eval_pred
b, _, _, _ = predictions.shape
area_groups = {i: {"dice": [], "iou": []} for i in range(len(self.thresholds))}
for i in range(b):
pred = Tensor(predictions[i : i + 1]).float()
label = Tensor(labels[i : i + 1]).long()
pred = F.softmax(pred, dim=1)
group_area = self.compute_group_area_by_target(label)
area_groups[group_area]["dice"].append(
self.dice(pred[:, 1].flatten(), label.flatten())
)
pred = pred.argmax(dim=1)
area_groups[group_area]["iou"].append(self.iou(pred, label))
metrics_result = {}
dice_all = []
iou_all = []
for gr in area_groups:
dice_all += area_groups[gr]["dice"]
iou_all += area_groups[gr]["iou"]
metrics_result[f"dice_group_{gr}"] = np.mean(area_groups[gr]["dice"])
metrics_result[f"iou_group_{gr}"] = np.mean(area_groups[gr]["iou"])
metrics_result["dice"] = np.mean(dice_all)
metrics_result["iou"] = np.mean(iou_all)
return metrics_result
| GerasimovIV/kvasir-seg | src/metrics.py | metrics.py | py | 2,666 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.Union",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number":... |
19818986748 | import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread("imagens/hospital2.jpg")
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
corners = cv.goodFeaturesToTrack(gray, 10, 0.05, 0.25)
for item in corners:
x, y = item[0]
cv.circle(img, (x, y), 4, (0, 0, 255), -1)
fig = plt.figure(figsize=(10,8))
plt.imshow(img)
plt.show() | vitormnoel/opencv | visao-comp/extracao-goodcorners.py | extracao-goodcorners.py | py | 363 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "cv2.goodFeaturesToTrack"... |
26850595638 | import logging
from celery import shared_task
logger = logging.getLogger('error')
@shared_task
def error_logger(err, exc_info=None):
if exc_info:
exc_type, exc_obj, exc_tb = exc_info
logger.error(f'{exc_tb.tb_frame.f_code.co_filename} {exc_tb.tb_lineno} {str(err)}')
else:
logger.error(f'{str(err)}')
| praneshsaminathan/Django-Multi-Tenant | multiten/tasks.py | tasks.py | py | 336 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "celery.shared_task",
"line_number": 7,
"usage_type": "name"
}
] |
15429008868 | import discord
from discord.ext import commands, tasks
import requests
import json
import html
import random
# Commands for Trivia game
class triviaCommands(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def trivia(self, ctx):
mention = ctx.author.mention
# Gets the JSON string text response from opent database
response = requests.get('https://opentdb.com/api.php?amount=1')
triviaResponse = json.loads(response.text)
triviaResponseInformation = triviaResponse['results'][0]
question = triviaResponseInformation['question']
question = html.unescape(question)
questionType = triviaResponseInformation['type']
questionCategory = triviaResponseInformation['category']
questionDifficulty = triviaResponseInformation['difficulty']
questionAnswer = triviaResponseInformation['correct_answer']
questionAnswer = html.unescape(questionAnswer)
questionInformation = ""
# Displays different formats for the answers depending on if the question is multiple choice or true or false
if questionType == 'multiple':
questionType = 'Multiple Choice'
questionsArray = []
# Gets all the possible answers in an array and randomizes the array
decodedCorrectAnswer = html.unescape(triviaResponseInformation['correct_answer'])
questionsArray.append(decodedCorrectAnswer)
for incorrectQuestions in triviaResponseInformation['incorrect_answers']:
decodedIncorrectQuestions = html.unescape(incorrectQuestions)
questionsArray.append(decodedIncorrectQuestions)
random.shuffle(questionsArray)
questionAlphabet = 97
individualQuestionsString = ""
for individualQuestions in questionsArray:
individualQuestionsString += f'{chr(questionAlphabet)}) {individualQuestions}\n'
questionAlphabet = questionAlphabet + 1
questionInformation += f'{mention}\n\nType: {questionType}\nCategory: {questionCategory}\nDifficulty: {questionDifficulty}\nQuestion: {question}\n\nAnswers: \n{individualQuestionsString}\nPlease type your answer(a/b/c/d) in the chat:'
embedVar = discord.Embed(title='Trivia', description=questionInformation, color=0xFFA500)
await ctx.channel.send(embed=embedVar)
# Ensures the author that initially started the trivia, is the one answering so other user messages aren't taken into account and they answer with 'a', 'b', 'c' or 'd'
def check(m):
return m.author == ctx.author and (m.content.lower() == 'a' or m.content.lower() == 'b' or m.content.lower() == 'c' or m.content.lower() == 'd')
# Gets the users answer based off their letter input
userAnswer = await self.bot.wait_for('message', check=check)
answerValue = questionsArray[ord(userAnswer.content.lower())-97]
# Checks if the users answer is correct
if answerValue.lower() == questionAnswer.lower():
embedVar = discord.Embed(description=f'You chose the correct answer {mention}', color=0xFFA500)
await ctx.channel.send(embed=embedVar)
else:
embedVar = discord.Embed(description=f'You chose the incorrect answer, the correct answer was \'{questionAnswer}\' {mention}', color=0xFFA500)
await ctx.channel.send(embed=embedVar)
else:
questionType = 'True or False'
questionInformation += f'{mention}\n\nType:{questionType}\nCategory: {questionCategory}\nDifficulty: {questionDifficulty}\nQuestion: {question}\n\nAnswers: \na) True\nb) False\n\nPlease type your answer(a/b) in the chat:'
embedVar = discord.Embed(title='Trivia', description=questionInformation, color=0xFFA500)
await ctx.channel.send(embed=embedVar)
# Ensures the author that initially started the trivia, is the one answering so other user messages aren't taken into account
def check(m):
return m.author == ctx.author and (m.content.lower() == 'a' or m.content.lower() == 'b')
# Gets the users answer
userAnswer = await self.bot.wait_for('message', check=check)
# Converts the users alphabetical answer to True or False
if userAnswer.content.lower() == 'a':
userAnswer = "True"
elif userAnswer.content.lower() == 'b':
userAnswer = "False"
# Checks if the users answers is correct
if userAnswer.lower() == questionAnswer.lower():
embedVar = discord.Embed(description=f'You chose the correct answer {mention}', color=0XFFA500)
await ctx.channel.send(embed=embedVar)
else:
embedVar = discord.Embed(description=f'You chose the incorrect answer, the correct answer was \'{questionAnswer}\' {mention}', color=0xFFA500)
await ctx.channel.send(embed=embedVar)
# Sets up the trivia bot command above
def setup(bot):
bot.add_cog(triviaCommands(bot)) | brandenphan/Pami-Bot | Commands/trivia.py | trivia.py | py | 5,230 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.... |
39285707049 | from PIL import Image, ImageEnhance, ImageOps
import PIL.ImageDraw as ImageDraw
import numpy as np
import random
class RandAugmentPolicy(object):
"""Randomly choose one of the best 25 Sub-policies on CIFAR10.
Example:
>>> policy = RandAugmentPolicy()
>>> transformed = policy(image)
Example as a PyTorch Transform:
>>> transform=transforms.Compose([
>>> transforms.Resize(256),
>>> RandAugmentPolicy(),
>>> transforms.ToTensor()])
"""
# I change fill color from (128, 128, 128) to (0, 0, 0)
def __init__(self, fillcolor=(0, 0, 0), N=1, M=5):
self.policies = [
"invert",
"autocontrast",
"equalize",
"rotate",
"solarize",
"color",
"posterize",
"contrast",
"brightness",
"sharpness",
"shearX",
"shearY",
"translateX",
"translateY",
"cutout",
]
self.N = N
self.M = M
def __call__(self, img):
"""Applies RandAugment on input image.
Args:
img: Input image
Returns:
np.ndarray: Augmented image based on N and M hyper-parameters chosen for RA.
"""
choosen_policies = np.random.choice(self.policies, self.N)
for policy in choosen_policies:
subpolicy_obj = SubPolicy(operation=policy, magnitude=self.M)
img = subpolicy_obj(img)
return img
# return self.policies[policy_idx](img)
def __repr__(self):
return f"RandAugment Policy with Cutout where N: {self.N} and M: {self.M}"
class SubPolicy(object):
def __init__(self, operation, magnitude, fillcolor=(128, 128, 128), MAX_PARAM=10):
ranges = {
"shearX": np.linspace(0, 0.3, MAX_PARAM),
"shearY": np.linspace(0, 0.3, MAX_PARAM),
"translateX": np.linspace(0, 150 / 331, MAX_PARAM),
"translateY": np.linspace(0, 150 / 331, MAX_PARAM),
"rotate": np.linspace(0, 30, MAX_PARAM),
"color": np.linspace(0.0, 0.9, MAX_PARAM),
"posterize": np.round(np.linspace(8, 4, MAX_PARAM), 0).astype(np.int),
"solarize": np.linspace(256, 0, MAX_PARAM),
"contrast": np.linspace(0.0, 0.9, MAX_PARAM),
"sharpness": np.linspace(0.0, 0.9, MAX_PARAM),
"brightness": np.linspace(0.0, 0.9, MAX_PARAM),
"autocontrast": [0] * MAX_PARAM,
"equalize": [0] * MAX_PARAM,
"invert": [0] * MAX_PARAM,
"cutout": np.linspace(0.0, 0.8, MAX_PARAM),
}
# from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand
def rotate_with_fill(img, magnitude):
rot = img.convert("RGBA").rotate(magnitude)
return Image.composite(
rot, Image.new("RGBA", rot.size, (0,) * 4), rot
).convert(img.mode)
def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
assert 0.0 <= v <= 0.8
if v <= 0.0:
return img
v = v * img.size[0]
return CutoutAbs(img, v)
def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
x0 = np.random.uniform(w)
y0 = np.random.uniform(h)
x0 = int(max(0, x0 - v / 2.0))
y0 = int(max(0, y0 - v / 2.0))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
# color = (125, 123, 114)
color = (0, 0, 0)
img = img.copy()
ImageDraw.Draw(img).rectangle(xy, color)
return img
func = {
"shearX": lambda img, magnitude: img.transform(
img.size,
Image.AFFINE,
(1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
Image.BICUBIC,
fillcolor=fillcolor,
),
"shearY": lambda img, magnitude: img.transform(
img.size,
Image.AFFINE,
(1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
Image.BICUBIC,
fillcolor=fillcolor,
),
"translateX": lambda img, magnitude: img.transform(
img.size,
Image.AFFINE,
(1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0),
fillcolor=fillcolor,
),
"translateY": lambda img, magnitude: img.transform(
img.size,
Image.AFFINE,
(1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])),
fillcolor=fillcolor,
),
"rotate": lambda img, magnitude: rotate_with_fill(img, magnitude),
# "rotate": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])),
"color": lambda img, magnitude: ImageEnhance.Color(img).enhance(
1 + magnitude * random.choice([-1, 1])
),
"posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude),
"solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude),
"contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance(
1 + magnitude * random.choice([-1, 1])
),
"sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(
1 + magnitude * random.choice([-1, 1])
),
"brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance(
1 + magnitude * random.choice([-1, 1])
),
"autocontrast": lambda img, magnitude: ImageOps.autocontrast(img),
"equalize": lambda img, magnitude: ImageOps.equalize(img),
"invert": lambda img, magnitude: ImageOps.invert(img),
"cutout": lambda img, magnitude: Cutout(img, magnitude),
}
self.operation = func[operation]
self.magnitude = ranges[operation][magnitude]
def __call__(self, img):
# if random.random() < self.p1: img = self.operation1(img, self.magnitude1)
img = self.operation(img, self.magnitude)
return img
class SplitAugmentPolicy(object):
def __init__(self, N=1, M=5, slices=4):
self.policies = [
"invert",
"autocontrast",
"equalize",
"rotate",
"solarize",
"color",
"posterize",
"contrast",
"brightness",
"sharpness",
"shearX",
"shearY",
"translateX",
"translateY",
"cutout",
]
self.N = N
self.M = M
self.slices = slices
# print(f"Split Augment Object initialized: N: {self.N}, M: {self.M} and slices: {self.slices}")
def __call__(self, img):
# policy_idx = random.randint(0, len(self.policies) - 1)
w, h = img.size
img = np.array(img)
if self.slices == 4:
img_1 = Image.fromarray(img[0:16, 0:16, :])
img_2 = Image.fromarray(img[0:16, 16:, :])
img_3 = Image.fromarray(img[16:, 0:16, :])
img_4 = Image.fromarray(img[16:, 16:, :])
imgess = [img_1, img_2, img_3, img_4]
elif self.slices == 2:
img_1 = Image.fromarray(img[:, 0:16, :])
img_2 = Image.fromarray(img[:, 16:, :])
imgess = [img_1, img_2]
else:
raise NotImplementedError
for i, img in enumerate(imgess):
choosen_policies = np.random.choice(self.policies, self.N)
for policy in choosen_policies:
subpolicy_obj = SubPolicy(operation=policy, magnitude=self.M)
img = subpolicy_obj(img)
imgess[i] = np.array(img)
temp_img = np.zeros(shape=(w, h, 3))
if self.slices == 4:
temp_img[0:16, 0:16, :] = imgess[0]
temp_img[0:16, 16:, :] = imgess[1]
temp_img[16:, 0:16, :] = imgess[2]
temp_img[16:, 16:, :] = imgess[3]
elif self.slices == 2:
temp_img[:, 0:16, :] = imgess[0]
temp_img[:, 16:, :] = imgess[1]
else:
raise NotImplementedError
temp_img = Image.fromarray(temp_img.astype(np.uint8))
return temp_img
def __repr__(self):
return "SplitAugment CIFAR Policy with Cutout"
| PrateekMunjal/TorchAL | al_utils/autoaugment.py | autoaugment.py | py | 8,697 | python | en | code | 56 | github-code | 1 | [
{
"api_name": "numpy.random.choice",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",... |
8404044280 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Класс управления картой 2GIS.
"""
import jinja2
from . import double_gis_util
from ic.utils import extfunc
__version__ = (0, 1, 1, 1)
# Шаблон результирующего HTML документа
HTML_TEMPLATE = '''<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>{{ title }}</title>
<script src="https://maps.api.2gis.ru/2.0/loader.js?pkg=full"></script>
{{ map }}
</head>
<body>
<div id="map" style="width:{{ width[0] }}{{ width[1] }}; height:{{ height[0] }}{{ height[1] }}"></div>
</body>
</html>
'''
# Шаблон карты
MAP_TEMPLATE = '''
<script type="text/javascript">
var map;
DG.then(function () {
map = DG.map('map', {
center: {{ location }},
zoom: {{ zoom }}
});
{% for marker in markers %}
{{ marker.render() }}
{% endfor %}
});
</script>
'''
class ic2GISMap(object):
"""
Класс управления картой 2GIS.
"""
_map_template = jinja2.Template(MAP_TEMPLATE)
_html_template = jinja2.Template(HTML_TEMPLATE)
def __init__(self, location=None,
width='2000px', height='1300px',
left='0%', top='0%',
position=None,
tiles='OpenStreetMap',
attr=None,
min_zoom=0,
max_zoom=18,
zoom_start=10,
min_lat=-90,
max_lat=90,
min_lon=-180,
max_lon=180,
max_bounds=False,
crs='EPSG3857',
control_scale=False,
prefer_canvas=False,
no_touch=False,
disable_3d=False,
png_enabled=False,
zoom_control=True,
**kwargs):
"""
Конструктор. Создание карты.
:param location: Точка геолокации (ширина, долгота) центра карты.
:param width: Ширина карты.
:param height: Высота карты.
:param left:
:param top:
:param position:
:param tiles: Карта набора плиток для использования.
:param attr: Атрибуция плитки карты; требуется только при передаче пользовательского URL плитки.
:param min_zoom: Минимально допустимый уровень масштабирования для создаваемого слоя листов.
:param max_zoom: Максимально допустимый уровень масштабирования для создаваемого слоя листов.
:param zoom_start: Начальный уровень масштабирования для карты.
:param min_lat:
:param max_lat:
:param min_lon:
:param max_lon:
:param max_bounds:
:param crs:
:param control_scale: Независимо от того, чтобы добавить масштаб управления на карте.
:param prefer_canvas:
:param no_touch:
:param disable_3d:
:param png_enabled:
:param zoom_control: Отображение управления масштабированием на карте.
"""
if location is None:
# If location is not passed we center and zoom out.
self.location = [0, 0]
zoom_start = 1
else:
self.location = double_gis_util.validate_location(location)
# Размеры
self.width = double_gis_util.parse_size(width)
self.height = double_gis_util.parse_size(height)
self.left = double_gis_util.parse_size(left)
self.top = double_gis_util.parse_size(top)
# self.position = position
# Масштабирование
self.zoom = zoom_start
# Маркеры
self.markers = list()
def add_marker(self, marker):
"""
Добавить маркер на карту.
:param marker: Объект маркера.
:return: True/False.
"""
self.markers.append(marker)
return True
def render(self, **kwargs):
"""
Генерирует HTML-представление элемента.
:return: Сгенерированное HTML представление карты.
"""
return self._map_template.render(location=self.location,
zoom=self.zoom,
markers=self.markers,
**kwargs)
def save(self, html_filename):
"""
Сохранить карту в HTML файле.
:param html_filename: Полное имя HTML файла.
:return: True/False.
"""
map_html = self.render()
html = self._html_template.render(map=map_html,
width=self.width,
height=self.height)
return extfunc.save_file_text(html_filename, html)
# Для поддержки рендеринга имена классов необходимо переопределить
Map = ic2GISMap
| XHermitOne/defis3 | analitic/analitic/double_gis/double_gis_map.py | double_gis_map.py | py | 5,543 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "jinja2.Template",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "jinja2.Template",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "ic.utils.extfunc.save_file_text",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "ic.u... |
40214308408 | import requests
import json
import unicodedata
from bs4 import BeautifulSoup
import os
from dotenv import load_dotenv
import time
def find_env_file(folder):
for filename in os.listdir(folder):
if filename.endswith(".env"):
return os.path.join(folder, filename)
return None
def normalize_text(text):
normalized_text = unicodedata.normalize('NFKD', text)
return normalized_text.replace('“', "'").replace('”', "'")
def scrape_quotes(url, output_file):
load_dotenv(find_env_file(os.getcwd()))
page_num = 1
while True:
response = requests.get(url)
content = response.text
soup = BeautifulSoup(content, 'html.parser')
target_script = None
for script in soup.find_all('script'):
if 'text' in script.text:
target_script = script.text
break
if target_script is None:
break
start_index = target_script.find('[')
end_index = target_script.find('];') + 1
quotes_data = target_script[start_index:end_index]
try:
quotes = json.loads(quotes_data)
for quote in quotes:
quote['text'] = normalize_text(quote['text'])
except json.JSONDecodeError as e:
print("JSON parse invalid:", e)
with open(output_file, "a", encoding='utf-8') as file:
for quote in quotes:
quote_data = {
"text": quote['text'],
"by": quote['author']['name'],
"tags": quote['tags']
}
json.dump(quote_data, file, ensure_ascii=False, indent=2)
file.write("\n")
next_page = soup.find('li', class_='next')
if not next_page:
break
url = next_page.find('a')['href']
url = f'http://quotes.toscrape.com{url}'
page_num += 1
time.sleep(2)
print(f"Quotes from {page_num} pages have been saved in the output file.")
load_dotenv(find_env_file(os.getcwd()))
input_url = os.getenv("INPUT_URL")
output_file = os.getenv("OUTPUT_FILE") | kdambrowski/Scraping_quotes_from_page | settings.py | settings.py | py | 2,140 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "unicodedata.normalize",
"lin... |
71720555874 | import pandas as pd
import streamlit as st
from st_aggrid import AgGrid, GridOptionsBuilder
from st_aggrid.shared import GridUpdateMode
STREAMLIT_AGGRID_URL = "https://github.com/PablocFonseca/streamlit-aggrid"
st.set_page_config(
layout="centered", page_icon="🖱️" , page_title="Interactive table app"
)
st.title("🖱️ Interactive table app" )
st.write(
"""Indiana Births and Infant Mortality."""
)
st.write("Go ahead, click on a row in the table below!")
def aggrid_interactive_table(df: pd.DataFrame):
"""Creates an st-aggrid interactive table based on a dataframe.
Args:
df (pd.DataFrame]): Source dataframe
Returns:
dict: The selected row
"""
options = GridOptionsBuilder.from_dataframe(
df, enableRowGroup=True, enableValue=True, enablePivot=True
)
options.configure_side_bar()
options.configure_selection("single")
selection = AgGrid(
df,
enable_enterprise_modules=True,
gridOptions=options.build(),
theme="light",
update_mode=GridUpdateMode.MODEL_CHANGED,
allow_unsafe_jscode=True,
)
return selection
##"https://raw.githubusercontent.com/mwaskom/seaborn-data/master/iris.csv"
iris = pd.read_csv(
"http://www.indybiosystems.com/datasets/ibid_5000.txt"
)
selection = aggrid_interactive_table(df=iris)
if selection:
st.write("You selected:")
st.json(selection["selected_rows"])
st.write("## Code")
| carywoods/app1 | app1_v3.py | app1_v3.py | py | 1,462 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "streamlit.wr... |
30171139606 | from functools import partial, wraps
from usage_model import Redis
def init_redis(func=None, *, redis: Redis = None):
if func is None:
return partial(init_redis, redis=redis)
@wraps(func)
async def wrapper(*args, **kwargs):
if not redis.is_connected:
await redis.connect()
try:
result = func(*args, **kwargs)
finally:
if redis.is_connected:
await redis.dis_connected()
return result
return wrapper
| ruicore/python | 02-usecase/redis/__init__.py | __init__.py | py | 511 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "usage_model.Redis",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "functools.wraps",
"line_number": 10,
"usage_type": "call"
}
] |
23549497570 | import os
import re
import shutil
import subprocess
import sys
import toml
MD_ANCHOR_LINKS = r"\[(.+)\]\(#.+\)"
def slugify(s):
"""
From: http://blog.dolphm.com/slugify-a-string-in-python/
Simplifies ugly strings into something URL-friendly.
>>> print slugify("[Some] _ Article's Title--")
some-articles-title
"""
s = s.lower()
for c in [' ', '-', '.', '/']:
s = s.replace(c, '_')
s = re.sub('\W', '', s)
s = s.replace('_', ' ')
s = re.sub('\s+', ' ', s)
s = s.strip()
return s.replace(' ', '-')
# Store errors to print them at the end.
errors = []
class Theme(object):
def __init__(self, name, path):
print("Loading %s" % name)
self.name = name
self.path = path
try:
with open(os.path.join(self.path, "theme.toml")) as f:
self.metadata = toml.load(f)
except Exception as e:
error_message = f"Theme '{self.name}' encountered a TOML parsing issue: {str(e)}"
errors.append(error_message)
self.metadata = None
return # exit the constructor early
with open(os.path.join(self.path, "README.md")) as f:
self.readme = f.read()
self.readme = self.readme.replace("{{", "{{/*").replace("}}", "*/}}").replace("{%", "{%/*").replace("%}", "*/%}")
self.readme = re.sub(MD_ANCHOR_LINKS, r"\1", self.readme)
self.repository = self.get_repository_url()
self.initial_commit_date, self.last_commit_date = self.get_commit_dates()
def get_repository_url(self):
command = "git -C {} remote -v".format(self.path)
(_, git_remotes) = subprocess.getstatusoutput(command)
cleaned = (
git_remotes
.split("\n")[0]
.split("\t")[1]
.replace(" (fetch)", "")
)
if cleaned.startswith("git@"):
cleaned = cleaned.replace("git@github.com:", "https://github.com/").replace(".git", "")
return cleaned
def get_commit_dates(self):
command = 'git log --pretty=format:"%aI" {}'.format(self.path)
(_, date) = subprocess.getstatusoutput(command)
dates = date.split("\n")
# last, first
return dates[0], dates[len(dates) - 1]
def to_zola_content(self):
"""
Returns the page content for Gutenberg
"""
return """
+++
title = "{title}"
description = "{description}"
template = "theme.html"
date = {updated}
[extra]
created = {created}
updated = {updated}
repository = "{repository}"
homepage = "{homepage}"
minimum_version = "{min_version}"
license = "{license}"
demo = "{demo}"
[extra.author]
name = "{author_name}"
homepage = "{author_homepage}"
+++
{readme}
""".format(
title=self.metadata["name"],
description=self.metadata["description"],
created=self.initial_commit_date,
updated=self.last_commit_date,
repository=self.repository,
homepage=self.metadata.get("homepage", self.repository),
min_version=self.metadata["min_version"],
license=self.metadata["license"],
author_name=self.metadata["author"]["name"],
author_homepage=self.metadata["author"].get("homepage", ""),
demo=self.metadata.get("demo", ""),
readme=self.readme,
)
def to_zola_folder(self, container):
"""
Creates the page folder containing the screenshot and the info in
content/themes
"""
page_dir = os.path.join(container, self.name)
os.makedirs(page_dir)
with open(os.path.join(page_dir, "index.md"), "w") as f:
print("Writing theme info as zola content: {}".format(self.name))
f.write(self.to_zola_content())
shutil.copyfile(
os.path.join(self.path, "screenshot.png"),
os.path.join(page_dir, "screenshot.png"),
)
def read_themes():
base = "./"
themes = []
for item in sorted(os.listdir(base)):
full_path = os.path.join(base, item)
if item == "env" or item == "venv":
continue
# themes is the name i'm giving locally when building in this folder
if item.startswith(".") or not os.path.isdir(full_path) or item == "themes":
continue
if not os.path.exists(os.path.join(full_path, "README.md")):
error_message = f"Theme '{item}' is missing README.md."
errors.append(error_message)
continue
if not os.path.exists(os.path.join(full_path, "screenshot.png")):
error_message = f"Theme '{item}' is missing screenshot.png."
errors.append(error_message)
continue
theme = Theme(item, full_path)
# Check if metadata was successfully loaded.
if theme.metadata is None:
continue
required_metadata = ['name']
metadata_check = [required for required in required_metadata if required not in theme.metadata]
if len(metadata_check) > 0:
error_message = f"Theme '{theme.name}' is missing required metadata: {', '.join(metadata_check)}."
errors.append(error_message)
continue
themes.append(theme)
return themes
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception("Missing destination folder as argument!")
destination = sys.argv[1]
all_themes = read_themes()
# Delete everything first in this folder
if os.path.exists(destination):
shutil.rmtree(destination)
os.makedirs(destination)
with open(os.path.join(destination, "_index.md"), "w") as f:
f.write("""
+++
template = "themes.html"
sort_by = "date"
+++
""")
for t in all_themes:
t.to_zola_folder(destination)
# Display errors.
if errors:
print("\n\n" + "="*60)
print("ERROR SUMMARY:")
print("-"*60)
for error in errors:
print(error)
print("-"*60)
print("="*60 + "\n")
# Print summary of themes processed.
print(f"\nThemes successfully processed: {len(all_themes)}")
print(f"Themes with errors: {len(errors)}")
| getzola/themes | generate_docs.py | generate_docs.py | py | 6,271 | python | en | code | 53 | github-code | 1 | [
{
"api_name": "re.sub",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usag... |
30243853001 | import collections
import datetime
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
from PIL import Image
class AverageMeter(object):
'''
Taken from:
https://github.com/keras-team/keras
'''
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def return_avg(self):
return self.avg
class Progbar(object):
'''
Taken from:
https://github.com/keras-team/keras
'''
"""Displays a progress bar.
# Arguments
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over time. Metrics in this list
will be displayed as-is. All others will be averaged
by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1, interval=0.05,
stateful_metrics=None):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
sys.stdout.isatty()) or
'ipykernel' in sys.modules)
self._total_width = 0
self._seen_so_far = 0
self._values = collections.OrderedDict()
self._start = time.time()
self._last_update = 0
def update(self, current, values=None):
"""Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples:
`(name, value_for_last_step)`.
If `name` is in `stateful_metrics`,
`value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
"""
values = values or []
for k, v in values:
if k not in self.stateful_metrics:
if k not in self._values:
self._values[k] = [v * (current - self._seen_so_far),
current - self._seen_so_far]
else:
self._values[k][0] += v * (current - self._seen_so_far)
self._values[k][1] += (current - self._seen_so_far)
else:
# Stateful metrics output a numeric value. This representation
# means "take an average from a single value" but keeps the
# numeric formatting.
self._values[k] = [v, 1]
self._seen_so_far = current
now = time.time()
info = ' - %.0fs' % (now - self._start)
if self.verbose == 1:
if (now - self._last_update < self.interval and
self.target is not None and current < self.target):
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%d [' % (numdigits, self.target)
bar = barstr % current
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self._total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
if self.target is not None and current < self.target:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = ('%d:%02d:%02d' %
(eta // 3600, (eta % 3600) // 60, eta % 60))
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
else:
if time_per_unit >= 1:
info += ' %.0fs/step' % time_per_unit
elif time_per_unit >= 1e-3:
info += ' %.0fms/step' % (time_per_unit * 1e3)
else:
info += ' %.0fus/step' % (time_per_unit * 1e6)
for k in self._values:
info += ' - %s:' % k
if isinstance(self._values[k], list):
avg = np.mean(
self._values[k][0] / max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if self.target is not None and current >= self.target:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if self.target is None or current >= self.target:
for k in self._values:
info += ' - %s:' % k
avg = np.mean(
self._values[k][0] / max(1, self._values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None):
self.update(self._seen_so_far + n, values)
class Memory(object):
def __init__(self, device, size=2000, weight=0.5):
self.memory = np.zeros((size, 128))
self.weighted_sum = np.zeros((size, 128))
self.weighted_count = 0
self.weight = weight
self.device = device
def initialize(self, net, train_loader):
self.update_weighted_count()
print('Saving representations to memory')
bar = Progbar(len(train_loader), stateful_metrics=[])
for step, batch in enumerate(train_loader):
with torch.no_grad():
images = batch['original'].to(self.device)
index = batch['index']
output = net(images=images, mode=0)
self.weighted_sum[index, :] = output.cpu().numpy()
self.memory[index, :] = self.weighted_sum[index, :]
bar.update(step, values=[])
def update(self, index, values):
self.weighted_sum[index, :] = values + (1 - self.weight) * self.weighted_sum[index, :]
self.memory[index, :] = self.weighted_sum[index, :]/self.weighted_count
pass
def update_weighted_count(self):
self.weighted_count = 1 + (1 - self.weight) * self.weighted_count
def return_random(self, size, index):
if isinstance(index, torch.Tensor):
index = index.tolist()
#allowed = [x for x in range(2000) if x not in index]
allowed = [x for x in range(index[0])] + [x for x in range(index[0] + 1, 2000)]
index = random.sample(allowed, size)
return self.memory[index, :]
def return_representations(self, index):
if isinstance(index, torch.Tensor):
index = index.tolist()
return torch.Tensor(self.memory[index, :])
class ModelCheckpoint():
def __init__(self, mode, directory):
self.directory = directory
if mode == 'min':
self.best = np.inf
self.monitor_op = np.less
elif mode == 'max':
self.best = 0
self.monitor_op = np.greater
else:
print('\nChose mode \'min\' or \'max\'')
raise Exception('Mode should be either min or max')
if os.path.isdir(self.directory):
shutil.rmtree(self.directory)
os.mkdir(self.directory)
else:
os.mkdir(self.directory)
def save_model(self, model, current_value, epoch):
if self.monitor_op(current_value, self.best):
print('\nSave model, best value {:.3f}, epoch: {}'.format(current_value, epoch))
self.best = current_value
torch.save(model.state_dict(), os.path.join(self.directory, 'epoch_{}'.format(epoch)))
class NoiseContrastiveEstimator():
def __init__(self, device):
self.device = device
def __call__(self, original_features, path_features, index, memory, negative_nb=1000):
loss = 0
for i in range(original_features.shape[0]):
temp = 0.07
cos = torch.nn.CosineSimilarity()
criterion = torch.nn.CrossEntropyLoss()
negative = memory.return_random(size=negative_nb, index=[index[i]])
negative = torch.Tensor(negative).to(self.device).detach()
image_to_modification_similarity = cos(original_features[None, i, :], path_features[None, i, :])/temp
matrix_of_similarity = cos(path_features[None, i, :], negative) / temp
similarities = torch.cat((image_to_modification_similarity, matrix_of_similarity))
loss += criterion(similarities[None, :], torch.tensor([0]).to(self.device))
return loss / original_features.shape[0]
def pil_loader(path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
class Logger:
def __init__(self, file_name):
self.file_name = file_name
index = ['Epoch']
with open('{}.csv'.format(self.file_name), 'w') as file:
file.write('Epoch,Loss,Time\n')
def update(self, epoch, loss):
now = datetime.datetime.now()
with open('{}.csv'.format(self.file_name), 'a') as file:
file.write('{},{:.4f},{}\n'.format(epoch, loss, now))
| akwasigroch/Pretext-Invariant-Representations | utils.py | utils.py | py | 11,632 | python | en | code | 89 | github-code | 1 | [
{
"api_name": "sys.stdout",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.isatty",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "sys.modules",
"... |
75065182433 | __all__ = [
"calc",
]
import copy
import json
import logging
from pathlib import Path
import pickle as pk
from typing import NoReturn, Optional, Tuple
import numpy as np
import torch
from torch import Tensor
from torch.utils.data import DataLoader
from . import _config as config
from . import dirs
from . import influence_utils
from .influence_utils import InfluenceMethod
from .influence_utils.nn_influence_utils import InfFuncTensors
from .losses import TORCH_DEVICE
from . import tracin_utils
from .types import CustomTensorDataset, LearnerParams
from . import utils
DEFAULT_R = 10
INF_RES_DIR = None
@influence_utils.log_time(res_type=InfluenceMethod.INF_FUNC)
def calc(block: utils.ClassifierBlock, tr_dl: DataLoader, te_x: Tensor,
te_y: Tensor, ex_id: Optional[int] = None, use_precompute: bool = False):
r"""
:param block: Block of interest
:param tr_dl: \p DataLoader used to train the learners
:param te_x:
:param te_y:
:param ex_id:
:param use_precompute:
:return:
"""
global INF_RES_DIR
INF_RES_DIR = dirs.RES_DIR / config.DATASET.name.lower() / "inf_func" / block.name().lower()
INF_RES_DIR.mkdir(exist_ok=True, parents=True)
# Use the tensors to create a supervised training set. Supports optional test transform
kw_args = {}
if config.has_tfms():
kw_args = {"transform": config.get_test_tfms()}
ds = CustomTensorDataset([te_x, te_y], **kw_args)
te_dl = DataLoader(ds, drop_last=False, shuffle=False, num_workers=utils.NUM_WORKERS,
batch_size=1)
# Do not use the original training dataloader since it may require transforms or drop part of
# the dataset. Create temporary Dataloader that overcomes those issues
assert isinstance(tr_dl.dataset, CustomTensorDataset), "Code supports certain dataset type"
# Each learner will have different influence function values
_calc_block_inf(block=block, tr_dl=tr_dl, te_dl=te_dl, ex_id=ex_id,
use_precompute=use_precompute)
def _calc_block_inf(block: utils.ClassifierBlock, tr_dl: DataLoader, te_dl: DataLoader,
ex_id: Optional[int],
use_precompute: bool) -> InfFuncTensors:
r"""
Encapsulates calculating the influence of blocks
:param block: Block for which influence is calculated
:param tr_dl: Train \p DataLoader
:param te_dl: Test \p DataLoader
:return: Tuple of the sorted estimated loss, backdoor IDs, and dataset IDs
"""
block.eval()
wd = config.get_learner_val(block.name(), LearnerParams.Attribute.WEIGHT_DECAY)
bl_x, bl_y, bd_ids, ds_ids = _build_learner_tensors(block=block, train_dl=tr_dl)
n_tr = bl_y.shape[0]
batch_tr_dl, instance_tr_dl = _build_block_dataloaders(bl_x=bl_x, bl_y=bl_y)
flds = [block.name(), "inf-fin"]
prefix = "-".join(flds).lower()
# noinspection PyTypeChecker
filename = utils.construct_filename(prefix=prefix, out_dir=INF_RES_DIR, ex_id=ex_id,
file_ext="pkl", add_ds_to_path=False)
# if not filename.exists():
msg = f"calculation of {InfluenceMethod.INF_FUNC.value} influence"
logging.info(f"Starting {msg}")
if True:
precomputed_s_test = None
if use_precompute:
assert filename.exists(), "Precomputed s_test file does not exist"
with open(filename, "rb") as f_in:
precomputed_s_test = pk.load(f_in).s_test
# Order of res: influences, train_inputs_collections, s_test
res = influence_utils.compute_influences(model=block,
n_gpu=1 if torch.cuda.is_available() else 0,
device=TORCH_DEVICE,
f_loss=block.loss.calc_train_loss,
test_dl=te_dl,
batch_train_data_loader=batch_tr_dl,
instance_train_data_loader=instance_tr_dl,
weight_decay=wd,
s_test_damp=config.DAMP,
s_test_scale=config.SCALE,
s_test_num_samples=config.R_DEPTH,
s_test_iterations=config.T_REPEATS,
precomputed_s_test=precomputed_s_test)
# Extract the result fields
res.ds_ids, res.bd_ids = ds_ids.clone(), bd_ids.clone()
with open(filename, "wb+") as f_out:
pk.dump(res, f_out)
logging.info(f"COMPLETED {msg}")
with open(filename, "rb") as f_in:
res = pk.load(f_in) # type: InfFuncTensors
flds = (
(res.inf_base, InfluenceMethod.INF_FUNC),
(res.inf_sim, InfluenceMethod.INF_FUNC_RENORM),
)
for influence_vals, method in flds:
# Convert the estimated loss
est_loss = -1 / n_tr * influence_vals
est_loss_sorted, helpful = torch.sort(est_loss, dim=0, descending=True)
tmp_bd_ids, tmp_ds_ids = res.bd_ids[helpful], res.ds_ids[helpful]
tracin_utils.results.generate_epoch_stats(ep=None, subepoch=None, method=method,
block=block, inf_vals=est_loss_sorted,
bd_ids=tmp_bd_ids, ds_ids=tmp_ds_ids,
ex_id=ex_id, log_cutoff=True)
if config.USE_WANDB:
train_dl = tracin_utils.configure_train_dataloader(train_dl=tr_dl)
tracin_utils.generate_wandb_results(block=block, inf_vals=est_loss_sorted.cpu(),
ds_ids=tmp_ds_ids.cpu(), bd_ids=tmp_bd_ids.cpu(),
method=method, train_dl=train_dl, ex_id=ex_id)
return res
def _log_influence_results(block: utils.ClassifierBlock, est_loss: Tensor, helpful: Tensor,
ds_ids: Tensor, bl_y: Tensor) -> NoReturn:
r"""
:param block:
:param est_loss: Estimated change in loss if training example is removed
:param helpful: Training examples numbered from 0 to (# training examples - 1)
:param ds_ids: Dataset ID numbers for the training examples used by the block
:param bl_y: Labels for the training examples used by the block
"""
b_name = block.name()
influence_utils.check_duplicate_ds_ids(ds_ids=ds_ids)
harmful = torch.flip(helpful, dims=[0])
for i in range(2):
if i == 0:
name = "helpful"
else:
name = "harmful"
top_ord = locals()[name][:5]
top_ids = ds_ids[top_ord]
logging.info(f"{b_name}: Top {name} IDs: {top_ids.tolist()}")
logging.info(f"{b_name}: Top {name} Est. Change Loss: {est_loss[top_ord].tolist()}")
logging.info(f"{b_name}: Top {name} Labels: {bl_y[top_ord].tolist()}")
def _build_inf_results_file(block: utils.ClassifierBlock, helpful: Tensor, influence_vals: Tensor,
est_loss_vals: Tensor, ds_ids: Tensor, bl_y: Tensor) -> NoReturn:
r"""
Constructs the influence results file
:param block:
:param helpful:
:param influence_vals:
:param est_loss_vals:
:param ds_ids: Dataset ID numbers for the training examples used by the block
:param bl_y: Labels for the training examples used by the block
:return:
"""
assert ds_ids.shape == helpful.shape, "Helpful tensor shape does not match the ID tensor"
assert bl_y.shape == helpful.shape, "Helpful tensor shape does not match the y tensor"
ord_ids, ord_y = ds_ids[helpful], bl_y[helpful]
influence_utils.check_duplicate_ds_ids(ds_ids=ds_ids)
ord_inf, ord_est_loss = influence_vals[helpful], est_loss_vals[helpful]
inf_res = {"block_name": block.name(),
"dataset": config.DATASET.name,
"hvp_batch_size": config.HVP_BATCH_SIZE,
"damp": config.DAMP,
"scale": config.SCALE,
"helpful-ids": ord_ids.tolist(),
"helpful-ord-loss": ord_est_loss.tolist(),
"helpful-y": ord_y.tolist(),
"helpful-influence": ord_inf.tolist(),
"test_id": config.TARG_IDX,
"test-targ-cls": config.TARG_CLS,
"test-pois-cls": config.POIS_CLS
}
res_path = _build_inf_res_filename(block)
with open(res_path, "w+") as f_out:
json.dump(inf_res, f_out)
def _build_inf_res_filename(block: utils.ClassifierBlock) -> Path:
r""" Construct the filename for the results """
prefix = f"inf-{block.name()}-t-id={config.TARG_IDX}"
return utils.construct_filename(prefix, out_dir=dirs.RES_DIR, file_ext="json",
add_timestamp=True)
def _build_learner_tensors(block: utils.ClassifierBlock,
train_dl: DataLoader) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
r"""
Construct the X/y/IDs tensors based on any example filtering by the block
:param block:
:param train_dl:
:return: Tuple of X, y, backdoor IDs, and dataset IDs tensors respectively
"""
tmp_ds = copy.copy(train_dl.dataset)
tmp_ds.transform = None
cp_tr_dl = DataLoader(tmp_ds, batch_size=config.BATCH_SIZE,
drop_last=False, shuffle=False, num_workers=utils.NUM_WORKERS)
verify_contents = True # ToDo Revert to false to speed up final version
all_x, all_y, all_bd_ids, all_ds_ids = [], [], [], []
for batch_tensors in cp_tr_dl:
batch = block.organize_batch(batch_tensors, process_mask=True,
verify_contents=verify_contents)
if batch.skip():
continue
all_x.append(batch.xs.cpu())
all_y.append(batch.lbls.cpu())
all_bd_ids.append(batch.bd_ids.cpu())
all_ds_ids.append(batch.ds_ids.cpu())
all_ds_ids, id_ordering = torch.sort(torch.cat(all_ds_ids, dim=0), dim=0)
# Consolidate into tensors and sort by the image IDs
n_tr = id_ordering.shape[0] # Number of training examples
# Strange count for number of training examples
assert n_tr == id_ordering.numel(), "Weird size mismatch"
filename = "_".join([block.name().lower(), "inf-func", block.start_time])
# noinspection PyUnresolvedReferences
np.savetxt(INF_RES_DIR / (filename + "_ord-id.csv"), all_ds_ids.numpy(), fmt='%d',
delimiter=',')
# Combine the subtensors and ensure the order aligns with all_ds_ids
tr_x, tr_y = torch.cat(all_x, dim=0)[id_ordering], torch.cat(all_y, dim=0)[id_ordering]
all_bd_ids = torch.cat(all_bd_ids, dim=0)[id_ordering]
return tr_x, tr_y, all_bd_ids, all_ds_ids
def _build_block_dataloaders(bl_x: Tensor, bl_y: Tensor) -> Tuple[DataLoader, DataLoader]:
r"""
Constructs two separate dataloaders. You may want different properties dataloader properties
when estimating the Hessian vector product (HVP) and when estimating influence. By specifying
separate \p DataLoaders, those two roles are separated.
:param bl_x: Block's X tensor
:param bl_y: Blocks y (i.e. label) tensor
:return: Tuple of the batch \p DataLoader (used for generating the HVP) and the instance
\p DataLoader used when estimating influence.
"""
# ToDo Determine whether to use transforms
ds = CustomTensorDataset((bl_x, bl_y), transform=config.get_train_tfms())
batch_tr_dl = DataLoader(ds, batch_size=config.HVP_BATCH_SIZE,
shuffle=True, drop_last=True, num_workers=utils.NUM_WORKERS)
ds = CustomTensorDataset((bl_x, bl_y), transform=config.get_test_tfms())
instance_tr_dl = DataLoader(ds, batch_size=1, shuffle=False, drop_last=False,
num_workers=utils.NUM_WORKERS)
return batch_tr_dl, instance_tr_dl
| ZaydH/target_identification | fig01_cifar_vs_mnist/poison/influence_func.py | influence_func.py | py | 12,090 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "typing.Optional... |
32259577603 | import copy
import threading
import socketserver
import json
from typing import List
from mcsu_data import *
_BUFFER_SIZE = 1024
_CLIENTS_MAX = 8
class State:
def __init__(self):
self.userdata = DataGame(0, [])
self.uid_free = []
self.uid = 0
self.lock = threading.Semaphore()
def get_userdata(self):
self.lock.acquire()
mycopy: DataGame = copy.deepcopy(self.userdata)
self.lock.release()
return mycopy
def set_userdata(self, player: DataPlayer):
self.lock.acquire()
found = False
for i, curplayer in enumerate(self.userdata.players):
if curplayer.uid == player.uid:
self.userdata.players[i] = player
found = True
break
if not found:
self.userdata.players.append(player)
self.lock.release()
def next_turn(self):
self.lock.acquire()
self.uid = (self.uid + 1) % len(self.userdata.players)
self.lock.release()
def del_uid(self, uid: int):
self.lock.acquire()
for player in self.userdata.players:
if uid == player.uid:
self.uid_free.append(uid)
# we can't remain in this turn anymore
if self.userdata.turn == player.uid:
self.next_turn()
del self.userdata.players[uid]
break
self.lock.release()
def new_uid(self) -> int:
self.lock.acquire()
if len(self.uid_free) > 0:
uid = self.uid_free.pop(0)
self.lock.release()
return uid
uid = self.uid
self.uid += 1
self.lock.release()
return uid
class McsuServer:
def __init__(self, host: str, port: int):
self.host = host
self.port = port
self.state: State = State()
self.turn: int = 0
def run(self):
class Handler(socketserver.ThreadingTCPServer):
thisref = self
def handle(self):
this = Handler.thisref
uid = -1
# DC if too many clients
if len(self.state) >= _CLIENTS_MAX:
print("Too many clients!")
return
self.request.setblocking(True)
while True:
data = self.receive()
if data is None:
if uid != -1: self.state.del_uid(uid)
print("No data")
break
command = data.get('_command')
if command is None:
if uid != -1: self.state.del_uid(uid)
print("No command")
break
elif command == 'register':
uid = self.state.new_uid()
units = data.get('units')
if not units is None:
data = DataPlayer(uid, units)
self.state.set_userdata(data)
data = DataRegister(uid)
self.send(data)
elif command == 'status':
if uid == -1:
print("No status uid")
break
data: DataGame = self.state.get_userdata()
self.send(data)
elif command == 'finish':
if uid == -1:
print("No finish uid")
break
self.state.next_turn()
elif command == 'quit':
if uid != -1:
print("No quit uid")
self.state.del_uid(uid)
self.state.del_uid(uid)
break
elif command == 'move':
if uid == -1:
print("No move uid")
break
# get commanded position
selected_unit = data.get('mover')
x = data.get('x')
y = data.get('y')
if selected_unit is None or x is None or y is None:
print("No selected unit/x/y")
break
state: DataGame = self.state.get_userdata()
need_to_break = False
for player in state.players:
if not uid == player.uid: continue
# find data unit from selected unit
for unit in player.units:
if unit.x == selected_unit.x and unit.y == selected_unit.y:
# move it!
unit.x = x
unit.y = y
if need_to_break: break
elif command == 'attack':
pass
def send(self, obj):
myobj = obj if type(obj) is dict else obj.__dict__
try:
message = json.dumps(myobj)
except:
print('Failed to dumps')
return
try:
self.request.sendall(message)
except:
print('Failed to sendall')
return
def receive(self):
try:
self.message = self.request.recv(_BUFFER_SIZE)
except:
print("Failed to receive")
return None
# disconnected
if len(self.message) == 0: return None
try:
data: dict = json.loads(self.message)
except:
print("Failed to loads")
return None
return data
with socketserver.ThreadingTCPServer((self.host, self.port), Handler) as server:
server.serve_forever()
def main(args):
server = McsuServer('0.0.0.0', 20000)
server.run()
return 0
if __name__ == '__main__':
import sys
exit(main(sys.argv))
| JacobLondon/mcsu2 | mcsu_server.py | mcsu_server.py | py | 6,444 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "threading.Semaphore",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "socketserver.ThreadingTCPServer",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name":... |
27214350964 | import numpy as np
import time
import uuid
from models.Basic import Basic
from gurobipy import *
class FairIR(Basic):
"""Fair paper matcher via iterative relaxation.
"""
def __init__(self, loads, loads_lb, coverages, weights, thresh=0.0):
"""Initialize.
Args:
loads - a list of integers specifying the maximum number of papers
for each reviewer.
loads_lb - a list of ints specifying the minimum number of papers
for each reviewer.
coverages - a list of integers specifying the number of reviews per
paper.
weights - the affinity matrix (np.array) of papers to reviewers.
Rows correspond to reviewers and columns correspond to
papers.
Returns:
initialized makespan matcher.
"""
self.n_rev = np.size(weights, axis=0)
self.n_pap = np.size(weights, axis=1)
self.loads = loads
self.loads_lb = loads_lb
self.coverages = coverages
self.weights = weights
self.id = uuid.uuid4()
self.m = Model("%s : FairIR" % str(self.id))
self.makespan = thresh
self.solution = None
self.m.setParam('OutputFlag', 0)
self.load_ub_name = 'lib'
self.load_lb_name = 'llb'
self.cov_name = 'cov'
self.ms_constr_prefix = 'ms'
self.round_constr_prefix = 'round'
# primal variables
start = time.time()
self.lp_vars = []
for i in range(self.n_rev):
self.lp_vars.append([])
for j in range(self.n_pap):
self.lp_vars[i].append(self.m.addVar(ub=1.0,
name=self.var_name(i, j)))
self.m.update()
print('#info FairIR:Time to add vars %s' % (time.time() - start))
start = time.time()
# set the objective
obj = LinExpr()
for i in range(self.n_rev):
for j in range(self.n_pap):
obj += self.weights[i][j] * self.lp_vars[i][j]
self.m.setObjective(obj, GRB.MAXIMIZE)
print('#info FairIR:Time to set obj %s' % (time.time() - start))
start = time.time()
# load upper bound constraints.
for r, load in enumerate(self.loads):
self.m.addConstr(sum(self.lp_vars[r]) <= load,
self.lub_constr_name(r))
# load load bound constraints.
if self.loads_lb is not None:
for r, load in enumerate(self.loads_lb):
self.m.addConstr(sum(self.lp_vars[r]) >= load,
self.llb_constr_name(r))
# coverage constraints.
for p, cov in enumerate(self.coverages):
self.m.addConstr(sum([self.lp_vars[i][p]
for i in range(self.n_rev)]) == cov,
self.cov_constr_name(p))
# makespan constraints.
for p in range(self.n_pap):
self.m.addConstr(sum([self.lp_vars[i][p] * self.weights[i][p]
for i in range(self.n_rev)]) >= self.makespan,
self.ms_constr_name(p))
self.m.update()
print('#info FairIR:Time to add constr %s' % (time.time() - start))
def ms_constr_name(self, p):
"""Name of the makespan constraint for paper p."""
return '%s%s' % (self.ms_constr_prefix, p)
def lub_constr_name(self, r):
"""Name of load upper bound constraint for reviewer r."""
return '%s%s' % (self.load_ub_name, r)
def llb_constr_name(self, r):
"""Name of load lower bound constraint for reviewer r."""
return '%s%s' % (self.load_lb_name, r)
def cov_constr_name(self, p):
"""Name of coverage constraint for paper p."""
return '%s%s' % (self.cov_name, p)
def change_makespan(self, new_makespan):
"""Change the current makespan to a new_makespan value.
Args:
new_makespan - the new makespan constraint.
Returns:
Nothing.
"""
for c in self.m.getConstrs():
if c.getAttr("ConstrName").startswith(self.ms_constr_prefix):
self.m.remove(c)
# self.m.update()
for p in range(self.n_pap):
self.m.addConstr(sum([self.lp_vars[i][p] * self.weights[i][p]
for i in range(self.n_rev)]) >= new_makespan,
self.ms_constr_prefix + str(p))
self.makespan = new_makespan
self.m.update()
def sol_as_mat(self):
if self.m.status == GRB.OPTIMAL or self.m.status == GRB.SUBOPTIMAL:
solution = np.zeros((self.n_rev, self.n_pap))
for v in self.m.getVars():
i, j = self.indices_of_var(v)
solution[i, j] = v.x
self.solution = solution
return solution
else:
raise Exception(
'You must have solved the model optimally or suboptimally '
'before calling this function.')
def integral_sol_found(self):
"""Return true if all lp variables are integral."""
sol = self.sol_as_dict()
return all(sol[self.var_name(i, j)] == 1.0 or
sol[self.var_name(i, j)] == 0.0
for i in range(self.n_rev) for j in range(self.n_pap))
def fix_assignment(self, i, j, val):
"""Round the variable x_ij to val."""
self.lp_vars[i][j].ub = val
self.lp_vars[i][j].lb = val
def find_ms(self):
"""Find an the highest possible makespan.
Perform a binary search on the makespan value. Each time, solve the
makespan LP without the integrality constraint. If we can find a
fractional value to one of these LPs, then we can round it.
Args:
None
Return:
Highest feasible makespan value found.
"""
mn = 0.0
mx = np.max(self.weights) * np.max(self.coverages)
ms = mx
best = None
self.change_makespan(ms)
start = time.time()
self.m.optimize()
print('#info FairIR:Time to solve %s' % (time.time() - start))
for i in range(10):
print('#info FairIR:ITERATION %s ms %s' % (i, ms))
if self.m.status == GRB.INFEASIBLE:
mx = ms
ms -= (ms - mn) / 2.0
else:
assert(best is None or ms > best)
assert(self.m.status == GRB.OPTIMAL)
best = ms
mn = ms
ms += (mx - ms) / 2.0
self.change_makespan(ms)
self.m.optimize()
return best
def solve(self):
"""Find a makespan and solve the ILP.
Run a binary search to find an appropriate makespan and then solve the
ILP. If solved optimally or suboptimally then save the solution.
Args:
mn - the minimum feasible makespan (optional).
mx - the maximum possible makespan( optional).
itr - the number of iterations of binary search for the makespan.
log_file - the string path to the log file.
Returns:
The solution as a matrix.
"""
if self.makespan <= 0:
print('#info FairIR: searching for fairness threshold')
ms = self.find_ms()
else:
print('#info FairIR: config fairness threshold: %s' % self.makespan)
ms = self.makespan
self.change_makespan(ms)
self.round_fractional(np.ones((self.n_rev, self.n_pap)) * -1)
sol = {}
for v in self.m.getVars():
sol[v.varName] = v.x
def sol_as_dict(self):
"""Return the solution to the optimization as a dictionary.
If the matching has not be solved optimally or suboptimally, then raise
an exception.
Args:
None.
Returns:
A dictionary from var_name to value (either 0 or 1)
"""
if self.m.status == GRB.OPTIMAL or self.m.status == GRB.SUBOPTIMAL:
_sol = {}
for v in self.m.getVars():
_sol[v.varName] = v.x
return _sol
else:
raise Exception(
'You must have solved the model optimally or suboptimally '
'before calling this function.\nSTATUS %s\tMAKESPAN %f' % (
self.m.status, self.makespan))
def round_fractional(self, integral_assignments=None, count=0):
"""Round a fractional solution.
This is the meat of the iterative relaxation approach. First, if the
solution to the relaxed LP is integral, then we're done--return the
solution. Otherwise, here's what we do:
1. if a variable is integral, lock it's value to that integer.
2. find all papers with exactly 2 or 3 fractionally assigned revs and
drop the makespan constraint on that reviewer.
3. if no makespan constraints dropped, find a reviewer with exactly two
fraction assignments and drop the load constraints on that reviewer.
Args:
integral_assignments - np.array of revs x paps (initially None).
log_file - the log file if exists.
count - (int) to keep track of the number of calls to this function.
Returns:
Nothing--has the side effect or storing an assignment matrix in this
class.
"""
if integral_assignments is None:
integral_assignments = np.ones((self.n_rev, self.n_pap)) * -1
self.m.optimize()
if self.m.status != GRB.OPTIMAL and self.m.status != GRB.SUBOPTIMAL:
assert False, '%s\t%s' % (self.m.status, self.makespan)
if self.integral_sol_found():
return
else:
frac_assign_p = {}
frac_assign_r = {}
sol = self.sol_as_dict()
fractional_vars = []
# Find fractional vars.
for i in range(self.n_rev):
for j in range(self.n_pap):
if j not in frac_assign_p:
frac_assign_p[j] = []
if i not in frac_assign_r:
frac_assign_r[i] = []
if sol[self.var_name(i, j)] == 0.0 and \
integral_assignments[i][j] != 0.0:
self.fix_assignment(i, j, 0.0)
integral_assignments[i][j] = 0.0
elif sol[self.var_name(i, j)] == 1.0 and \
integral_assignments[i][j] != 1.0:
self.fix_assignment(i, j, 1.0)
integral_assignments[i][j] = 1.0
elif sol[self.var_name(i, j)] != 1.0 and \
sol[self.var_name(i, j)] != 0.0:
frac_assign_p[j].append(
(i, j, sol[self.var_name(i, j)]))
frac_assign_r[i].append(
(i, j, sol[self.var_name(i, j)]))
fractional_vars.append((i, j, sol[self.var_name(i, j)]))
integral_assignments[i][j] = sol[self.var_name(i, j)]
# First try to elim a makespan constraint.
removed = False
for (paper, frac_vars) in frac_assign_p.items():
if len(frac_vars) == 2 or len(frac_vars) == 3:
for c in self.m.getConstrs():
if c.ConstrName == self.ms_constr_name(paper):
self.m.remove(c)
removed = True
# If necessary remove a load constraint.
if not removed:
for (rev, frac_vars) in frac_assign_r.items():
if len(frac_vars) == 2:
for c in self.m.getConstrs():
if c.ConstrName == self.lub_constr_name(rev) or \
c.ConstrName == self.llb_constr_name(rev):
self.m.remove(c)
self.m.update()
return self.round_fractional(integral_assignments, count + 1)
if __name__ == "__main__":
init_makespan = 0.7
ws = np.array([
np.array([0.9, 0.9, 0.5, 0.5]),
np.array([0.9, 0.9, 0.6, 0.6]),
np.array([0.1, 0.1, 0.2, 0.2]),
np.array([0.2, 0.1, 0.2, 0.3])
])
print(ws)
a = np.array([2, 2, 2, 2])
b = np.array([2, 2, 2, 2])
x = FairIR(a, [0, 0, 0, 0], b, ws)
s = time.time()
x.solve()
print(x.sol_as_mat())
print(x.objective_val())
print(time.time() - s)
print("[done.]")
| iesl/fair-matching | src/models/FairIR.py | FairIR.py | py | 12,878 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "models.Basic.Basic",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "numpy.size",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.size",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number... |
71187185633 | import gym
class GymSpec:
def __init__(self, name, env_id):
self.name = name
self.env_id = env_id
GYM_ENVS = [
GymSpec('gym_CartPole-v0', 'CartPole-v0'),
GymSpec('gym_CartPolev-1', 'CartPole-v1'),
]
def gym_env_by_name(name):
for cfg in GYM_ENVS:
if cfg.name == name:
return cfg
raise Exception('Unknown Gym env')
def make_gym_env(env_name, cfg, **kwargs):
mujoco_spec = gym_env_by_name(env_name)
env = gym.make(mujoco_spec.env_id)
return env
| Garytoner/Asynchronous-Reinforcement-Learning | Asynchronous_Reinforcement_Learning/envs/gym/gym_utils.py | gym_utils.py | py | 518 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "gym.make",
"line_number": 24,
"usage_type": "call"
}
] |
69890623394 | import requests
from django.shortcuts import redirect, render
from animal.models import Siliao,Zhongzhu,Peizhong,Renjian,Fenmian,Caijing,Xingweiy
from django.http import JsonResponse
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect
from animal.models import Site_Info, User
from django.contrib import auth
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic.edit import CreateView
import os
# Create your views here.
def index(request):
return render(request, "animal/index.html")
def xinxiyulan(request):
return render(request, "animal/xinxiyulan.html")
def xiputu(request):
return render(request, "animal/xiputu.html")
def shengchantishi(request):
data1_list = Zhongzhu.objects.filter(Q(peizhong__contains='否')).count()
data2_list = Zhongzhu.objects.filter(Q(zaizhu__contains='是')).count()
data3_list = Zhongzhu.objects.filter(pinzhong__contains='种公猪',yimiao__contains='否').count()
data4_list = Zhongzhu.objects.filter(pinzhong__contains='种母猪', yimiao__contains='否').count()
return render(request, "animal/shengchantishi.html", {"data1_list": data1_list,"data2_list": data2_list,"data3_list": data3_list,"data4_list": data4_list})
def siliaokucun(request):
data_list = Siliao.objects.all()
return render(request, "animal/siliaokucun.html",{'data_list':data_list})
def xingweijilu(request):
data1_list = Peizhong.objects.all()
data2_list = Renjian.objects.all()
data3_list = Fenmian.objects.all()
data4_list = Caijing.objects.all()
data5_list = Xingweiy.objects.all()
return render(request, "animal/xingweijilu.html", {'data1_list': data1_list,'data2_list':data2_list,'data3_list':data3_list,'data4_list':data4_list,'data5_list':data5_list})
def peiadd(request):
if request.method == "GET":
return render(request,"animal/peiadd.html")
bianhao = request.POST.get("bianhao")
jiluneirong = request.POST.get("jiluneirong")
Peizhong.objects.create(bianhao=bianhao,jiluneirong=jiluneirong)
return redirect("/xingweijilu")
def peidelete(request):
#删除饲料库存
nid = request.GET.get('nid')
Peizhong.objects.filter(id=nid).delete()
return redirect("/xingweijilu")
def renadd(request):
if request.method == "GET":
return render(request,"animal/renadd.html")
bianhao = request.POST.get("bianhao")
jiluneirong = request.POST.get("jiluneirong")
Renjian.objects.create(bianhao=bianhao,jiluneirong=jiluneirong)
return redirect("/xingweijilu")
def rendelete(request):
nid = request.GET.get('nid')
Renjian.objects.filter(id=nid).delete()
return redirect("/xingweijilu")
def fenadd(request):
if request.method == "GET":
return render(request,"animal/fenadd.html")
bianhao = request.POST.get("bianhao")
jiluneirong = request.POST.get("jiluneirong")
Fenmian.objects.create(bianhao=bianhao,jiluneirong=jiluneirong)
return redirect("/xingweijilu")
def fendelete(request):
nid = request.GET.get('nid')
Fenmian.objects.filter(id=nid).delete()
return redirect("/xingweijilu")
def caiadd(request):
if request.method == "GET":
return render(request,"animal/caiadd.html")
bianhao = request.POST.get("bianhao")
jiluneirong = request.POST.get("jiluneirong")
Caijing.objects.create(bianhao=bianhao,jiluneirong=jiluneirong)
return redirect("/xingweijilu")
def caidelete(request):
nid = request.GET.get('nid')
Caijing.objects.filter(id=nid).delete()
return redirect("/xingweijilu")
def yadd(request):
if request.method == "GET":
return render(request,"animal/yadd.html")
bianhao = request.POST.get("bianhao")
jiluneirong = request.POST.get("jiluneirong")
Xingweiy.objects.create(bianhao=bianhao,jiluneirong=jiluneirong)
return redirect("/xingweijilu")
def ydelete(request):
nid = request.GET.get('nid')
Xingweiy.objects.filter(id=nid).delete()
return redirect("/xingweijilu")
def siliaoadd(request):
if request.method == "GET":
return render(request,"animal/siliaoadd.html")
sname = request.POST.get("sname")
kucun = request.POST.get("kucun")
Siliao.objects.create(sname=sname,kucun=kucun)
return redirect("/siliaokucun")
def sdelete(request):
#删除饲料库存
nid = request.GET.get('nid')
Siliao.objects.filter(id=nid).delete()
return redirect("/siliaokucun")
def edit(request,nid):
if request.method =="GET":
row_object = Siliao.objects.filter(id=nid).first()
print(row_object.sname,row_object.kucun)
return render(request,'animal/edit.html',{"row_object": row_object})
#获取
sname = request.POST.get("sname")
Siliao.objects.filter(id=nid).update(sname=sname)
kucun = request.POST.get("kucun")
Siliao.objects.filter(id=nid).update(kucun=kucun)
return redirect("/siliaokucun")
def chart_bar(request):
data1_list = Zhongzhu.objects.filter(Q(zhushe__contains='1')).count()
data2_list = Zhongzhu.objects.filter(Q(zhushe__contains='2')).count()
data3_list = Zhongzhu.objects.filter(Q(zhushe__contains='3')).count()
data4_list = Zhongzhu.objects.filter(Q(zhushe__contains='4')).count()
data5_list = Zhongzhu.objects.filter(Q(zhushe__contains='5')).count()
legend = ["数量"]
series_list =[
{
"name": '数量',
"type": 'bar',
"data": [data1_list, data2_list, data3_list, data4_list, data5_list]
}
]
x_axis = ['一号猪舍','二号猪舍','三号猪舍','四号猪舍','五号猪舍']
result ={
"status":True,
"data":{
'legend':legend,
'series_list':series_list,
'x_axis':x_axis,
}
}
return JsonResponse(result)
def chart_bing(request):
data1_list = Zhongzhu.objects.filter(Q(pinzhong__contains='种公猪')).count()
data2_list = Zhongzhu.objects.filter(Q(pinzhong__contains='种母猪')).count()
data =[
{ "value": data1_list, "name": '种公猪' },
{ "value": data2_list, "name": '种母猪' },
]
result ={
"status":True,
'data':data,
}
return JsonResponse(result)
def chart_zong(request):
data_list = Zhongzhu.objects.all().count()
data =[
{
"value": data_list,
"name": '种猪数'
}
]
result ={
"status":True,
'data':data
}
return JsonResponse(result)
def chart_ku(request):
data1_list = Siliao.objects.filter(sname='麸皮').values_list('kucun').first()
data2_list = Siliao.objects.filter(sname='蛋白质类').values_list('kucun').first()
data3_list = Siliao.objects.filter(sname='矿物质类').values_list('kucun').first()
data4_list = Siliao.objects.filter(sname='维生素类').values_list('kucun').first()
data5_list = Siliao.objects.filter(sname='大麦').values_list('kucun').first()
data6_list = Siliao.objects.filter(sname='小麦').values_list('kucun').first()
data7_list = Siliao.objects.filter(sname='玉米').values_list('kucun').first()
data8_list = Siliao.objects.filter(sname='油脂').values_list('kucun').first()
data =[
{"value": data1_list, "name": '麸皮'},
{"value": data2_list, "name": '蛋白质类'},
{"value": data3_list, "name": '矿物质类'},
{"value": data4_list, "name": '维生素类'},
{"value": data5_list, "name": '大麦'},
{"value": data6_list, "name": '小麦'},
{"value": data7_list, "name": '玉米'},
{"value": data8_list, "name": '油脂'}
]
result ={
"status":True,
'data':data
}
return JsonResponse(result)
def chart_graph(request):
data_list =[
{
"name":1,
"category": '种公猪',
"draggable": "true",
}, {
"name": 2,
"category": '种母猪',
"draggable": "true",
}, {
"name": 3,
"category": '种公猪',
"draggable": "true",
}, {
"name": 4,
"category": '种母猪',
"draggable": "true",
}, {
"name": 5,
"category": '种公猪',
"draggable": "true",
}, {
"name": 6,
"category": '种公猪',
"draggable": "true",
}, {
"name": 7,
"category": '种母猪',
"draggable": "true",
},
]
links =[
{
"source": '1',
"target": '5',
"value": '父系',
}, {
"source": '2',
"target": '5',
"value": '母系',
}, {
"source": '3',
"target": '7',
"value": '父系',
}, {
"source": '4',
"target": '7',
"value": '母系',
}, {
"source": '5',
"target": '6',
"value": '父系',
}, {
"source": '7',
"target": '6',
"value": '母系',
},
]
result ={
"status":True,
'data':{
'data_list':data_list,
'links':links,
}
}
return JsonResponse(result)
| yurooc/Breed-pigs-Management-system | animalManage/views.py | views.py | py | 9,322 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.shortcuts.render",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 19,
"usage_type": "call"
},
{
"api_name"... |
24887452366 | import os
import json
import aloe
from werkzeug.datastructures import MultiDict
from nose.tools import assert_equals
import flask_login
import sqlalchemy
from app import app
from app.database import db
from app.models.university import University, UniversityPending
from ..steps import fieldname_with_language
from pprint import pprint
def university_step_data_to_post_data(step_data):
post_data = MultiDict()
languages = []
for data_dict in step_data:
language = data_dict.pop("language")
languages.append(language)
for k, v in data_dict.items():
if University.is_translatable(k):
k = k + "[" + language + "]"
post_data.add(k, v)
if len(languages) == 0:
languages = ["en", "fr"]
for l in languages:
post_data.setdefault(fieldname_with_language("university_name", l), "")
post_data.setdefault(fieldname_with_language("university_intro", l), "")
post_data.setdefault("university_latlong", "")
post_data.setdefault("university_web_address", "")
post_data["languages"] = ",".join(languages)
return post_data
@aloe.step(u'the user pends addition of university \"([\w\d ]*)\"')
def the_user_pends_addition_of_university(step, university_name):
with app.test_request_context():
aloe.world.response = aloe.world.app.post(
'/university/create',
data={
fieldname_with_language("university_name", aloe.world.language):university_name,
fieldname_with_language("university_intro", aloe.world.language):"A new university",
"university_latlong":"0.0,0.0",
"university_web_address": "www.some_university.com",
'languages': [aloe.world.language]
}
)
@aloe.step(u'the user pends those courses to that university')
def the_user_pends_addition_of_courses_to_university(step):
with app.app_context():
university = University.get_single(university_id = aloe.world.university_ids[0])
university_data = university.request_dict()
university_data.setlist("courses[]", [str(c) for c in aloe.world.course_ids])
pprint(university_data)
with app.test_request_context():
aloe.world.response = aloe.world.app.post(
"/university/" + str(aloe.world.university_ids[0]) + "/edit",
data=university_data
)
@aloe.step(u'the user approves pending changes to university \"([\w\d ]*)\"')
def the_user_approves_pending_changes_to_university(step, university_name):
with app.test_request_context():
university = UniversityPending.get_by_name(university_name=university_name, language=aloe.world.language)
aloe.world.response = aloe.world.app.post(
'/university/pending/approve',
data={
'data_id':university.pending_id
}
)
@aloe.step(u'the following university details are correct in response')
def the_following_university_details_are_returned(step):
returned_json = json.loads(aloe.world.response.data.decode("utf-8"))["data"]
expected_json = University.json_skeleton()
expected_json = json.loads(step.multiline)
for k, v in expected_json.items():
assert_equals(v, returned_json[k])
@aloe.step(u'the following data is pending for that university')
def the_following_university_details_are_pending(step):
with app.app_context():
if aloe.world.last_pending_university_id is None:
aloe.world.last_pending_university_id = UniversityPending.get_single(
university_id=aloe.world.university_ids[0]
)
actual_json = aloe.world.last_pending_university_id.json()
expected_json = University.json_skeleton()
expected_json = json.loads(step.multiline)
for k, v in expected_json.items():
assert_equals(v, actual_json[k])
@aloe.step(u'the university \"([\w\d ]*)\" should exist in \"([\w\d ]*)\"')
def the_university_should_exist(step, university_name, language):
with app.app_context():
university = University.get_by_name(university_name=university_name, language=language)
assert_equals(university.translations[language].university_name, university_name)
@aloe.step(u"the university \"([\w\d ]*)\" exists")
def the_university_exists(step, university_name):
with app.app_context():
try:
uni = University.create({aloe.world.language: {"university_name": university_name}})
aloe.world.university_ids.append(uni.university_id)
except sqlalchemy.exc.IntegrityError:
db.session.rollback() # University already in the system
@aloe.step(u"the university \"([\w\d ]*)\" is pending for addition")
def the_university_is_pending_for_addition(step, university_name):
with app.app_context():
try:
aloe.world.last_pending_university_id = UniversityPending.addition(
{aloe.world.language: {"university_name": university_name}},
)
except sqlalchemy.exc.IntegrityError:
db.session.rollback() # University already in the system
@aloe.step(u"the university \"([\w\d ]*)\" is pending for edit")
def the_university_is_pending_for_edit(step, university_name):
with app.app_context():
try:
university = University.get_single(university_name=university_name, language=aloe.world.language)
aloe.world.last_pending_university_id = UniversityPending.edit(university)
except sqlalchemy.exc.IntegrityError:
db.session.rollback() # University already in the system
@aloe.step(u"the translation \"([\w\d ]*)\" in \"([\w\d ]*)\" of university \"([\w\d ]*)\" is pending")
def the_translation_of_university_is_pending(step, translation, language, university_name):
with app.app_context():
try:
university = University.get_single(university_name=university_name, language=aloe.world.language)
aloe.world.last_pending_university_id = UniversityPending.edit(university)
aloe.world.last_pending_university_id.set_translations(
{language: {"university_name": translation}}
)
aloe.world.last_pending_university_id.save()
except sqlalchemy.exc.IntegrityError:
db.session.rollback() # University already in the system
@aloe.step(u'the university \"([\w\d ]*)\" should have the following data')
def the_university_has_data(step, university_name):
with app.test_request_context():
university = University.get_by_name(university_name=university_name, language="en")
expected_data = json.loads(step.multiline)
assert_equals(university.json(), expected_data)
@aloe.step(u'the user pends the following data to it')
def the_user_pends_university_data_for_edit(step):
with app.test_request_context():
aloe.world.response = aloe.world.app.post(
"/university/" + str(aloe.world.university_ids[0]) + "/edit",
data=university_step_data_to_post_data(step.hashes)
)
@aloe.step(u'those courses should be pending for addition')
def courses_should_be_pending_for_addition(step):
with app.app_context():
university = UniversityPending.get_single(university_id = aloe.world.university_ids[0])
for course_id in aloe.world.course_ids:
assert(university.has_course(course_id))
| jamesfowkes/golden-futures-site | aloe-test/features/university-features/university_steps.py | university_steps.py | py | 7,482 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "werkzeug.datastructures.MultiDict",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "app.models.university.University.is_translatable",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "app.models.university.University",
"line_number": 31,
... |
34498271094 | """add geographic name table
Revision ID: 4d1ddc1ec574
Revises: 36d2a94e6894
Create Date: 2020-11-09 13:45:37.277092
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4d1ddc1ec574'
down_revision = '36d2a94e6894'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('geo_name',
sa.Column('id', sa.String(length=255), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('type_', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_geo_name_name'), 'geo_name', ['name'], unique=False)
op.create_index(op.f('ix_geo_name_type_'), 'geo_name', ['type_'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_geo_name_type_'), table_name='geo_name')
op.drop_index(op.f('ix_geo_name_name'), table_name='geo_name')
op.drop_table('geo_name')
# ### end Alembic commands ###
| ThreeSixtyGiving/360insights | migrations/versions/4d1ddc1ec574_add_geographic_name_table.py | 4d1ddc1ec574_add_geographic_name_table.py | py | 1,135 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Strin... |
25224482568 | import logging
# import gevent
import asyncio
from typing import Optional
from xbox.sg.crypto import PKCS7Padding
from xbox.sg.utils.events import Event
from xbox.auxiliary import packer
from xbox.auxiliary.packet import aux_header_struct, AUX_PACKET_MAGIC
from xbox.auxiliary.crypto import AuxiliaryStreamCrypto
from xbox.sg.utils.struct import XStructObj
log = logging.getLogger(__name__)
class AuxiliaryPackerException(Exception):
pass
class ConsoleConnection(object):
BUFFER_SIZE = 2048
def __init__(self, address, port, crypto):
self.address = address
self.port = port
self.crypto = crypto
self._reader: Optional[asyncio.StreamReader] = None
self._writer: Optional[asyncio.StreamWriter] = None
self._recv_task: Optional[asyncio.Task] = None
self.on_message = Event()
def start(self):
self._reader, self._writer = asyncio.open_connection(
self.address, self.port
)
self._recv_task = asyncio.create_task(self._recv())
def stop(self):
self._recv_task.cancel()
def handle(self, data):
try:
msg = packer.unpack(data, self.crypto)
# Fire event
self.on_message(msg)
except Exception as e:
log.exception("Exception while handling Console Aux data, error: {}".format(e))
async def _recv(self):
while True:
data = await self._reader.read(4)
header = aux_header_struct.parse(data)
if header.magic != AUX_PACKET_MAGIC:
raise Exception('Invalid packet magic received from console')
payload_sz = header.payload_size + PKCS7Padding.size(
header.payload_size, 16
)
remaining_payload_bytes = payload_sz
while remaining_payload_bytes > 0:
tmp = await self._reader.read(remaining_payload_bytes)
remaining_payload_bytes -= len(tmp)
data += tmp
data += await self._reader.read(32)
self.handle(data)
async def send(self, msg):
packets = packer.pack(msg, self.crypto)
if not packets:
raise Exception('No data')
for packet in packets:
self._writer.write(packet)
class LocalConnection(asyncio.Protocol):
data_received_event = Event()
connection_made_event = Event()
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self.transport = transport
self.connection_made(transport)
def data_received(self, data: bytes) -> None:
self.data_received(data)
def close_connection(self) -> None:
print('Close the client socket')
self.transport.close()
class AuxiliaryRelayService(object):
def __init__(
self,
loop: asyncio.AbstractEventLoop,
connection_info: XStructObj,
listen_port: int
):
if len(connection_info.endpoints) > 1:
raise Exception(
'Auxiliary Stream advertises more than one endpoint!'
)
self._loop = loop
self.crypto = AuxiliaryStreamCrypto.from_connection_info(
connection_info
)
self.target_ip = connection_info.endpoints[0].ip
self.target_port = connection_info.endpoints[0].port
self.console_connection = ConsoleConnection(
self.target_ip,
self.target_port,
self.crypto
)
self.server = self._loop.create_server(
lambda: LocalConnection(),
'0.0.0.0', listen_port
)
self.client_transport = None
async def run(self):
async with self.server as local_connection:
local_connection.data_received_event += self._handle_client_data
local_connection.connection_made_event += self.connection_made
while True:
# HACK / FIXME
await asyncio.sleep(10000)
def connection_made(self, transport):
self.client_transport = transport
peername = transport.get_extra_info('peername')
print('Connection from {}'.format(peername))
self.console_connection.on_message += self._handle_console_data
self.console_connection.start()
def _handle_console_data(self, data):
# Data from console gets decrypted and forwarded to aux client
if self.client_transport:
self.client_transport.send(data)
def _handle_client_data(self, data):
# Data from aux client gets encrypted and sent to console
self.console_connection.send(data)
| OpenXbox/xbox-smartglass-core-python | xbox/auxiliary/relay.py | relay.py | py | 4,650 | python | en | code | 71 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "asyncio.StreamReader",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "typing.Op... |
69905287713 | """
Entry point for the DB Load dispatcher based on scheduler events.
"""
import json
import queue
import uuid
from concurrent import futures
from typing import Any, Callable, Dict, List
from google.cloud import pubsub_v1
from common import settings as CFG
from common.data_representation.config import ConfigException
from common.data_representation.config.participant import ParticipantConfig
from common.dispatcher import is_cloud_function_exists
from common.elapsed_time import elapsed_timer
from common.logging import Logger
from common.thread_pool_executor import run_thread_pool_executor
from dispatcher.base_dispatcher import BaseDispatcher
DEFAULT_WORKER_REPLICA_AMOUNT = 20
class DbLoadDispatcher(BaseDispatcher):
"""Read prticipant config and dispatch dbload tasks"""
__description__ = "DB LOAD DISPATCHER"
__local_cfg_name_tmpl__ = "dbload_{participant_id}_payload.json"
def __init__(self, env_tz_info=CFG.ENVIRONMENT_TIME_ZONE) -> None:
super().__init__(env_tz_info=env_tz_info)
self._topic_path = self._publisher.topic_path( # pylint: disable=no-member
CFG.PROJECT, CFG.DB_LOAD_FUNCTION_NAME
)
self._payloads = queue.Queue()
def _get_payload(self, raw_cfg: dict, participant_id: int) -> dict:
"""Generate connector payload
Args:
raw_mtr_cfg (dict): Meter full configuration
Returns:
dict: Connector payload
"""
with elapsed_timer() as elapsed:
payload = {
"meters": [],
"extra": {"participant_id": participant_id},
}
for cnctr_cfg in raw_cfg.get("connectors", []):
for meter_name, meter_info in cnctr_cfg.get("meters", {}).items():
payload["meters"].append(
{
"meter_name": meter_name,
"meter_id": meter_info["meter_id"],
"meter_uri": meter_info["meter_uri"],
"type": meter_info["type"],
"extra": {
"timezone": meter_info.get("timezone", ""),
"standardized": {
"bucket": meter_info.get(
"meteredDataLocation", {}
).get("bucket", ""),
"path": meter_info.get(
"meteredDataLocation", {}
).get("path", ""),
},
},
}
)
self._logger.debug(
"The payload has been created.",
extra={"labels": {"elapsed_time": elapsed()}},
)
return payload
def _parse_participant_config(self, bucket) -> Dict:
with elapsed_timer() as elapsed:
cfg = ParticipantConfig()
cfg.read_from_bucket(
bucket=bucket.name,
subdirectory=CFG.CONFIG_BASE_PATH,
filename=CFG.PARTICIPANT_CONFIG_NAME,
binary_mode=False,
)
json_cfg = cfg.as_json()
self._logger.debug(
f"Completed `{bucket.name}` config parsing.",
extra={"labels": {"elapsed_time": elapsed()}},
)
return json_cfg
def _parse_participant_configs(self, buckets: List[Any]) -> None:
self._logger.info("Processing participants configuration.")
with elapsed_timer() as elapsed:
for bucket in buckets:
participant_id = int(bucket.name.lstrip(CFG.BUCKET_PREFFIX))
self._logger.debug(
f"Processing participant {participant_id} configuration"
)
try:
json_cfg = self._parse_participant_config(bucket)
except ConfigException as err:
self._logger.error(
f"Cannot parse {bucket.name}/{CFG.CONFIG_BASE_PATH}/"
f"{CFG.PARTICIPANT_CONFIG_NAME} due to the error "
f"'{err}'. Skipping."
)
continue
if not json_cfg.get("connectors", []):
self._logger.warning(
f"The participant {participant_id} configuration does "
"not contain connectors. Skipping"
)
continue
# TODO: OPENWEATHER ISSUE. DEBUG CODE. REMOVE AFTER COMPLETION
payload = self._get_payload(json_cfg, participant_id)
message_json = json.dumps(
{
"data": payload,
}
)
payload_data = message_json.encode("utf-8")
self._logger.debug("=" * 40)
self._logger.debug(f"payload_data - '{payload_data}'")
self._logger.debug("=" * 40)
# ============================================================
self._payloads.put(payload)
self._logger.debug(
"Processed participants configuration.",
extra={"labels": {"elapsed_time": elapsed()}},
)
@staticmethod
def _pub_sub_done_callback(
publish_future: pubsub_v1.publisher.futures.Future, # pylint:disable=unused-argument
payload: str,
description: str,
timeout: int = 120,
) -> Callable[[pubsub_v1.publisher.futures.Future], None]:
def callback(publish_future: pubsub_v1.publisher.futures.Future) -> None:
call_logger = Logger(
name=description,
level="DEBUG",
description=description,
trace_id=uuid.uuid4(),
)
try:
# Wait timeout seconds for the publish call to succeed.
call_logger.info(
f"Result of call dbload for participant "
f"{payload['extra']['participant_id']} is"
f"{publish_future.result(timeout=timeout)}"
)
except futures.TimeoutError:
call_logger.error(
f"Publishing {payload['extra']['participant_id']} timed out."
)
return callback
@staticmethod
def _call_dbload_worker( # pylint:disable=too-many-arguments
publisher: pubsub_v1.PublisherClient,
topic_path: str,
payload_queue: queue.Queue,
futures_queue: queue.Queue,
done_callback: Callable,
description: str,
worker_idx: int, # pylint:disable=unused-argument
) -> None:
empty_run_count = 0
while True:
if payload_queue.empty():
if empty_run_count == 3:
break
empty_run_count += 1
else:
payload = payload_queue.get()
message_json = json.dumps(
{
"data": payload,
}
)
payload_data = message_json.encode("utf-8")
publish_future = publisher.publish(topic_path, data=payload_data)
publish_future.add_done_callback(
done_callback(publish_future, payload, description=description)
)
futures_queue.put(publish_future)
payload_queue.task_done()
def _save_payload_locally(self):
with elapsed_timer() as elapsed:
self._logger.debug("Saving payload locally.")
if self._payloads.empty():
self._logger.warning(
"There is no payload required for the call "
"`{CFG.DB_LOAD_FUNCTION_NAME}` function. Exit."
)
return None
while not self._payloads.empty():
payload = self._payloads.get()
json_cfg = {
"data": payload,
}
filename = self.__local_cfg_name_tmpl__.format(
participant_id=json_cfg["data"]["extra"]["participant_id"]
)
file_path = self.__local_path__.joinpath(filename)
with open(file_path, "w", encoding="UTF-8") as cfg_fl:
json.dump(json_cfg, cfg_fl, indent=4)
self._logger.debug(
"Saved payload locally.", extra={"labels": {"elapsed_time": elapsed()}}
)
def _run_dbload(self, workers_replica: int = DEFAULT_WORKER_REPLICA_AMOUNT) -> None:
with elapsed_timer() as elapsed:
self._logger.debug("Running db load.")
if self._payloads.empty():
self._logger.warning(
f"The data for call `{CFG.DB_LOAD_FUNCTION_NAME}` function "
"absent. Exit."
)
return None
is_connector_exists = is_cloud_function_exists(CFG.DB_LOAD_FUNCTION_NAME)
if not is_connector_exists:
self._logger.error(
f"The `{CFG.DB_LOAD_FUNCTION_NAME}` connector is not "
f"deployed in the '{CFG.PROJECT}' environment. Exit"
)
return None
result_queue = queue.Queue()
pub_futures = run_thread_pool_executor(
workers=[
(
self._call_dbload_worker,
[
self._publisher,
self._topic_path,
self._payloads,
result_queue,
self._pub_sub_done_callback,
self.__description__,
],
)
],
worker_replica=workers_replica,
)
for res in pub_futures:
try:
res.result()
except Exception as err: # pylint: disable=broad-except
self._logger.error(
f"Received error '{err}' during call "
f"{CFG.DB_LOAD_FUNCTION_NAME}"
)
pub_sub_futures = []
while not result_queue.empty():
pub_sub_futures.append(result_queue.get())
result_queue.task_done()
# Wait for all the publish futures to resolve before exiting.
futures.wait(pub_sub_futures, return_when=futures.ALL_COMPLETED)
self._logger.debug(
"Completed db load run.", extra={"labels": {"elapsed_time": elapsed()}}
)
def run(self) -> None:
"""Run dispatcher loop."""
with elapsed_timer() as elapsed:
self._logger.debug("Run db load from scratch")
buckets = self.get_buckets()
self._parse_participant_configs(buckets)
if not CFG.DEBUG:
self._run_dbload()
else:
self._save_payload_locally()
self._logger.debug(
"Completed load run.", extra={"labels": {"elapsed_time": elapsed()}}
)
def main():
"""Entry point"""
dblod_dispatcher = DbLoadDispatcher()
dblod_dispatcher.run()
# TODO: Commented code below should be removed after restore logic completion
# def main1():
# """Entry point logic."""
# publisher = pubsub_v1.PublisherClient()
# restore_function_names_to_call = [
# func_name
# for func_name in list_cloud_functions()
# if func_name.startswith(CFG.DB_LOAD_RESTORE_FUNCTION_NAME_TEMPLATE)
# ]
# print(
# f"DEBUG: DB LOAD DISPATCHER: RESTORE FUNCTIONS TO CALL {restore_function_names_to_call}."
# )
# with elapsed_timer() as global_ellapsed:
# with elapsed_timer() as bckts_elpsd:
# buckets = get_buckets_list(
# project=CFG.PROJECT, preffix=CFG.BUCKET_PREFFIX
# )
# print(
# f"DEBUG: DB LOAD DISPATCHER: Loaded buckets list. Ellapsed time {bckts_elpsd()}."
# )
# for bucket in buckets:
# print(f"DEBUG: DB LOAD DISPATCHER: Processing {bucket.name} configuration")
# if DEBUG and bucket.name not in ALLOWED_BUCKETS:
# print(
# f"WARNING: DB LOAD DISPATCHER: Enabled DEBUG mode. "
# f"Disabled {bucket.name} processing. Skipping"
# )
# continue
# full_config_name = (
# f"{bucket.name}/{CFG.CONFIG_BASE_PATH}/{CFG.PARTICIPANT_CONFIG_NAME}"
# )
# # Entry Point for bucket operation multiprocessing
# participant_id = int(bucket.name.lstrip(CFG.BUCKET_PREFFIX))
# with elapsed_timer() as cnfg_prs_elpsd:
# try:
# cfg = ParticipantConfig()
# cfg.read_from_bucket(
# bucket=bucket.name,
# subdirectory=CFG.CONFIG_BASE_PATH,
# filename=CFG.PARTICIPANT_CONFIG_NAME,
# binary_mode=False,
# )
# json_cfg = cfg.as_json()
# print(
# f"DEBUG: DB LOAD DISPATCHER: Completed "
# f"{full_config_name} config parsing. "
# f"Ellapsed time {cnfg_prs_elpsd()}."
# )
# except ConfigException as err:
# print(
# f"ERROR: DB LOAD DISPATCHER: Cannot parse "
# f"{full_config_name} due "
# f"to the error {err}. Skipping"
# )
# continue
# if not json_cfg.get("connectors", []):
# print(
# f"WARNING: DB LOAD DISPATCHER: The participant "
# f'configuration "{full_config_name}" does not contain '
# "defined connectors"
# )
# with elapsed_timer() as payload_ellapsed:
# payload = _get_payload(json_cfg, participant_id)
# print(
# "DEBUG: DB LOAD DISPATCHER: The function "
# f"{CFG.DB_LOAD_FUNCTION_NAME} payload formed. "
# f"Ellapsed time {payload_ellapsed()}."
# )
# print(
# f"INFO: DB LOAD DISPATCHER: Calling {CFG.DB_LOAD_FUNCTION_NAME} to load data."
# )
# with elapsed_timer() as gcp_func_ex_elpsd:
# is_connector_exists = is_cloud_function_exists(
# CFG.DB_LOAD_FUNCTION_NAME
# )
# print(
# f"DEBUG: DB LOAD DISPATCHER: Completed check of cloud function "
# f"{CFG.DB_LOAD_FUNCTION_NAME} deployment. "
# f"Ellapsed time {gcp_func_ex_elpsd()}."
# )
# if not is_connector_exists:
# print(
# "ERROR: DB LOAD DISPATCHER: The given db load "
# f'connector "{CFG.DB_LOAD_FUNCTION_NAME}" '
# f'is not deployed to the "{CFG.PROJECT}" environment. '
# f"Skipping"
# )
# continue
# with elapsed_timer() as gcp_func_call_ellapsed:
# topic_path = publisher.topic_path( # pylint: disable=no-member
# CFG.PROJECT, CFG.DB_LOAD_FUNCTION_NAME
# )
# message_json = json.dumps(
# {
# "data": payload,
# }
# )
# data = message_json.encode("utf-8")
# print(f"DEBUG: TOPIC PATH {topic_path}")
# future = publisher.publish(topic_path, data=data)
# result = future.result()
# print(
# f"DEBUG: DISPATCHER: Data pushed to gcp topic "
# f"{CFG.DB_LOAD_FUNCTION_NAME}. "
# f"{gcp_func_call_ellapsed()}, with result {result}"
# )
# if restore_function_names_to_call:
# print(
# "DEBUG: DISPATCHER: Start calling DW restore cloud "
# f"functions {restore_function_names_to_call}. "
# )
# for func in restore_function_names_to_call:
# topic_path = publisher.topic_path( # pylint: disable=no-member
# CFG.PROJECT, func
# )
# function_id = func.split(
# CFG.DB_LOAD_RESTORE_FUNCTION_NAME_TEMPLATE
# )[-1]
# payload["function_id"] = function_id
# if not DEBUG:
# message_json = json.dumps(
# {
# "data": payload,
# }
# )
# data = message_json.encode("utf-8")
# print(f"DEBUG: TOPIC PATH {topic_path}")
# future = publisher.publish(topic_path, data=data)
# result = future.result()
# print(
# f"DEBUG: DISPATCHER: Data pushed to gcp topic {func}. "
# f"with result {result}"
# )
# else:
# json_cfg = {
# "data": payload,
# }
# with open(
# f"/tmp/participant_db_load_payload_{bucket[-1]}.json",
# "w",
# encoding="UTF-8",
# ) as prtcpnt_fl:
# json.dump(json_cfg, prtcpnt_fl, indent=4)
# print(
# "DEBUG: DISPATCHER: Finish calling DW restore cloud "
# f"functions {restore_function_names_to_call}. "
# )
# print(
# "DEBUG: DB LOAD DISPATCHER: Completed loop. "
# f"Execution time {global_ellapsed()}."
# )
# return "Prototype scheduler executed successfully"
# def _get_payload(raw_cfg: dict, participant_id: int) -> dict:
# """Generate connector payload
# Args:
# raw_mtr_cfg (dict): Meter full configuration
# Returns:
# dict: Connector payload
# """
# payload = {
# "meters": [],
# "extra": {"participant_id": participant_id},
# }
# for cnctr_cfg in raw_cfg.get("connectors", []):
# for meter_name, meter_info in cnctr_cfg.get("meters", {}).items():
# payload["meters"].append(
# {
# "meter_name": meter_name,
# "meter_id": meter_info["meter_id"],
# "meter_uri": meter_info["meter_uri"],
# "type": meter_info["type"],
# "extra": {
# "timezone": meter_info.get("timezone", ""),
# "standardized": {
# "bucket": meter_info.get("meteredDataLocation", {}).get(
# "bucket", ""
# ),
# "path": meter_info.get("meteredDataLocation", {}).get(
# "path", ""
# ),
# },
# },
# }
# )
# return payload
if __name__ == "__main__":
# In general to be able run this code locally a few steps must be comleted before
# 1. Setup GCP authentication on local environment
# 1.1 MAke shure that shell command 'gcloud auth application-default login' completed before
# 2. Setup Project Environment Variables - PROJECT, BUCKET_PREFFIX
# NOTE: If variables aren't defined in your ~/.bashrc file or directly by export command
# the following values wil be use
# PROJECT = 'develop-epbp'
# BUCKET_PREFFIX = 'prototype_develop-epbp_participant_'
# import debugpy
# debugpy.listen(CFG.DEBUG_PORT)
# debugpy.wait_for_client() # blocks execution until client is attached
# debugpy.breakpoint()
main()
| JarosBaumBolles/platform | dispatcher/db_load_meters_data_dispatcher.py | db_load_meters_data_dispatcher.py | py | 20,905 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dispatcher.base_dispatcher.BaseDispatcher",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "common.settings.ENVIRONMENT_TIME_ZONE",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "common.settings",
"line_number": 30,
"usage_type": "... |
12624694803 | from flask import Blueprint
from flask_restful import Api
from .resources import PersonResource, PersonIdResource, PostUserResource, PostAccountResource, GetAccountResource, GetAccountIdResource
bp = Blueprint("restapi", __name__, url_prefix="/api")
api = Api(bp)
def init_app(app):
api.add_resource(PersonResource, "/person/")
api.add_resource(PersonIdResource, "/person/<person_id>")
api.add_resource(PostUserResource, "/person/post/")
api.add_resource(GetAccountResource, "/account/get/")
api.add_resource(PostAccountResource, "/account/post/")
api.add_resource(GetAccountIdResource, "/account/get/<account_id>")
app.register_blueprint(bp)
| danielfernandow/Ies-Bank | bank/blueprints/restapi/__init__.py | __init__.py | py | 678 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "resources.PersonResource",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "resource... |
37756167576 | class employee:
no_of_emp=0
percent=1.04
def __init__(self,first,last,pay):
self.fname=first
self.lname=last
self.pay=pay
self.mail=self.fname+self.lname+'@veoneer.com'
employee.no_of_emp+=1
def raise_amount(self):
self.pay=int(self.pay*self.percent)
def fullname(self):
return '{}.{}'.format(self.fname,self.lname)
def __repr__(self):
return '{} ' '{} ' '{} '.format(self.fname,self.lname,self.pay)
def __str__(self):
return '{}-{}'.format(self.fullname(),self.mail)
def __add__(self, other):
return self.pay+other.pay
@classmethod
def amount_raise(cls,amount):
cls.percent=amount
@classmethod
def from_string(cls,emp_str):
first, last, pay =emp_str.split('-')
return cls(first,last,pay)
@staticmethod
def work_day(day):
if day.weekday()==5 or day.weekday()==6:
return True
return False
class developer(employee):
percent = 1.10
def __init__(self,first,last,pay,p_lang):
super().__init__(first,last,pay)
self.p_lang=p_lang
class manager(employee):
def __init__(self,first,last,pay,Employees=None):
super().__init__(first,last,pay)
if Employees is None:
self.Employees= []
else:
self.Employees=Employees
def add_emp(self,emp):
if emp not in self.Employees:
self.Employees.append(emp)
def remove_emp(self,emp):
if emp in self.Employees:
self.Employees.remove(emp)
def print_emp(self):
for emp in self.Employees:
print('---->',emp.fullname())
e1=developer('subhash','regati',30000,'python')
e2=developer('sam','jack',50000,'java')
e3=manager('test','engineer',90000,[e1])
e3.add_emp(e2)
e3.print_emp()
print(e1.pay)
employee.amount_raise(1.10)
e1.raise_amount()
new_emp_1=employee.from_string("subhash-regati1-32123")
print(e1.pay)
print(e2.mail)
k=e2.fullname()
print(k)
print(new_emp_1.pay)
print(e1.no_of_emp)
import datetime
my_date=datetime.date(2022,9,1)
work_day=employee.work_day(my_date)
print(work_day)
print(e1.p_lang)
print(isinstance(e1,employee))
print(issubclass(manager,employee))
print(e1)
print(int.__add__(1,2))
print(str.__add__('1','2'))
print(e1+e2) | subhash-regati/practice_project | practice1.py | practice1.py | py | 2,310 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.date",
"line_number": 74,
"usage_type": "call"
}
] |
29727773984 | """
Various utility functions, probably mostly for plotting.
"""
from collections import OrderedDict
import pandas as pd
from .pymcGrowth import GrowthModel
def reformatData(dfd, alldoses, alldrugs, drug, params):
"""
Sample nsamples number of points from sampling results,
Reformat dataframe so that columns of the dataframe are params
Returns dataframe of columns: parameters and dose
"""
# Set up ordered dictionary for dose:idx
doseidx = OrderedDict()
flag = True
# Iterate from the last condition to the first condition
for i in range(len(alldrugs) - 1, -1, -1):
# Condition matches drug of interest
if alldrugs[i] == drug:
doseidx[alldoses[i]] = i
# Include the first control after drug conditions
elif alldrugs[i] == "Control" and flag and bool(doseidx):
doseidx[alldoses[i]] = i
flag = False
# Put dictionary items in order of increasing dosage
doseidx = OrderedDict(reversed(list(doseidx.items())))
# Reshape table for violinplot
# Columns: div, deathRate, apopfrac, dose
# Initialize a dataframe
dfplot = pd.DataFrame()
# Interate over each dose
# Columns: div, d, deathRate, apopfrac, dose
for dose in doseidx:
dftemp = pd.DataFrame()
for param in params:
dftemp[param] = dfd[param + "__" + str(doseidx[dose])]
dftemp["dose"] = dose
if "Data Type" in dfd.columns:
dftemp["Data Type"] = dfd["Data Type"]
dfplot = pd.concat([dfplot, dftemp], axis=0)
return dfplot
def violinplot(filename, swapDrugs=False):
"""
Takes in a list of drugs
Makes 1*len(parameters) violinplots for each drug
"""
# Load model and dataset
# Read in dataframe
classM = GrowthModel(filename)
classM.performFit()
# Get a list of drugs
drugs = list(OrderedDict.fromkeys(classM.drugs))
drugs.remove("Control")
if swapDrugs:
drugs = list(reversed(drugs))
params = ["div", "deathRate", "apopfrac"]
dfdict = {}
# Interate over each drug
for drug in drugs:
dfdict[drug] = reformatData(classM.df, classM.doses, classM.drugs, drug, params)
return dfdict
| meyer-lab/ps-growth-model | grmodel/utils.py | utils.py | py | 2,238 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "collections.OrderedDict",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pand... |
3945339864 | #!/usr/bin/python
import web
import smbus
import math
urls = (
'/', 'index'
)
import time
#Hard iron offsets
x_offset = -618.954
y_offset = 733.05
# Power management registers
power_mgmt_1 = 0x6b
power_mgmt_2 = 0x6c
gyro_scale = 131.0
accel_scale = 16384.0
address = 0x68 # This is the address value read via the i2cdetect command
bus = smbus.SMBus(1) # or bus = smbus.SMBus(1) for Revision 2 boards
now = time.time()
K = 0.85
K1 = 1 - K
time_diff = 0.01
def read_byte(adr):
return bus.read_byte_data(address, adr)
def read_word(adr):
high = bus.read_byte_data(address, adr)
low = bus.read_byte_data(address, adr+1)
val = (high << 8) + low
return val
def read_word_2c(adr):
val = read_word(adr)
if (val >= 0x8000):
return -((65535 - val) + 1)
else:
return val
def read_all():
raw_gyro_data = bus.read_i2c_block_data(address, 0x43, 6)
raw_accel_data = bus.read_i2c_block_data(address, 0x3b, 6)
gyro_scaled_x = twos_compliment((raw_gyro_data[0] << 8) + raw_gyro_data[1]) / gyro_scale
gyro_scaled_y = twos_compliment((raw_gyro_data[2] << 8) + raw_gyro_data[3]) / gyro_scale
gyro_scaled_z = twos_compliment((raw_gyro_data[4] << 8) + raw_gyro_data[5]) / gyro_scale
accel_scaled_x = twos_compliment((raw_accel_data[0] << 8) + raw_accel_data[1]) / accel_scale
accel_scaled_y = twos_compliment((raw_accel_data[2] << 8) + raw_accel_data[3]) / accel_scale
accel_scaled_z = twos_compliment((raw_accel_data[4] << 8) + raw_accel_data[5]) / accel_scale
return (gyro_scaled_x, gyro_scaled_y, gyro_scaled_z, accel_scaled_x, accel_scaled_y, accel_scaled_z)
def twos_compliment(val):
if (val >= 0x8000):
return -((65535 - val) + 1)
else:
return val
def dist(a, b):
return math.sqrt((a * a) + (b * b))
def get_x_rotation(x,y,z):
radians = math.atan2(y, dist(x,z))
return math.degrees(radians)
def get_y_rotation(x,y,z):
radians = math.atan2(x, dist(y,z))
return -math.degrees(radians)
def get_z_rotation(x,y,z):
return 0.5
(gyro_scaled_x, gyro_scaled_y, gyro_scaled_z, accel_scaled_x, accel_scaled_y, accel_scaled_z) = read_all()
last_x = get_x_rotation(accel_scaled_x, accel_scaled_y, accel_scaled_z)
last_y = get_y_rotation(accel_scaled_x, accel_scaled_y, accel_scaled_z)
last_z = get_z_rotation(accel_scaled_x, accel_scaled_y, accel_scaled_z)
gyro_offset_x = gyro_scaled_x
gyro_offset_y = gyro_scaled_y
gyro_offset_z = gyro_scaled_z
gyro_total_x = (last_x) - gyro_offset_x
gyro_total_y = (last_y) - gyro_offset_y
gyro_total_z = (last_z) - gyro_offset_z
class index:
def GET(self):
global time_diff
global gyro_scaled_x
global gyro_scaled_y
global gyro_scaled_z
global accel_scaled_x
global accel_scaled_y
global accel_scaled_z
global gyro_x_delta
global gyro_y_delta
global gyro_z_delta
global rotation_x
global rotation_y
global rotation_z
global last_x
global last_y
global last_z
global gyro_total_x
global gyro_total_y
global gyro_total_z
(gyro_scaled_x, gyro_scaled_y, gyro_scaled_z, accel_scaled_x, accel_scaled_y, accel_scaled_z) = read_all()
gyro_scaled_x -= gyro_offset_x
gyro_scaled_y -= gyro_offset_y
gyro_scaled_z -= gyro_offset_z
gyro_x_delta = (gyro_scaled_x * time_diff)
gyro_y_delta = (gyro_scaled_y * time_diff)
gyro_z_delta = (gyro_scaled_z * time_diff)
gyro_total_x += gyro_x_delta
gyro_total_y += gyro_y_delta
gyro_total_z += gyro_z_delta
rotation_x = get_x_rotation(accel_scaled_x, accel_scaled_y, accel_scaled_z)
rotation_y = get_y_rotation(accel_scaled_x, accel_scaled_y, accel_scaled_z)
last_x = K * (last_x + gyro_x_delta) + (K1 * rotation_x)
last_y = K * (last_y + gyro_y_delta) + (K1 * rotation_y)
rotation_z = get_z_rotation(last_x, last_y, gyro_total_z)
last_z = K * (last_z + gyro_z_delta) + (K1 * rotation_z)
z_angle = (gyro_total_z*6)%360
#print str(last_x)+" "+str(last_y)+" "+str(z_angle)
return str(last_x)+" "+str(last_y)+" "+str(get_z_rotation(accel_scaled_x, accel_scaled_y, accel_scaled_z))
if __name__ == "__main__":
# Now wake the 6050 up as it starts in sleep mode
bus.write_byte_data(address, power_mgmt_1, 0)
app = web.application(urls, globals())
app.run()
| brewerdaniel/Minerva | arduino/Orientation/server.py | server.py | py | 4,542 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "smbus.SMBus",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "math.atan2",
"line_number": 75,
... |
20255659902 | import numpy as np
from PIL import Image
from progress.bar import Bar
# (parameter), point
def ikeda(u):
def a(p):
x, y = p
t = 6.0/((x**2)+(y**2)+1)
c = np.cos(t)
s = np.sin(t)
return (1+u*((x*c)-(y*s)), u*((x*s)+(y*c)))
return a
# size, range, point
def toidx(size, R, p):
x, y = p
lx = R[0][1] - R[0][0]
ly = R[1][1] - R[1][0]
sx = lx/(size[0]-1)
sy = ly/(size[1]-1)
xr = int(np.round((x-R[0][0])/sx))
yr = int(np.round((y-R[1][0])/sy))
return (xr, yr)
# (factor), value
def invscale(f):
def a(x):
return 255-x*(255/f)
return a
# size, range, N, point, function
def frame(size, R, N, p, func):
T = np.zeros(size)
for _ in range(N):
new_p = func(p)
p = new_p
if(p[0] >= R[0][0] and p[0] <= R[0][1] and p[1] >= R[1][0] and p[1] <= R[1][1]):
xi, yi = toidx(size, R, p)
T[yi][xi] += 1
f = np.max(T)
for x, y in np.ndindex(T.shape):
T[y][x] = invscale(f)(T[y][x])
return Image.fromarray(T).convert('L').convert('P')
# size, range, N, point, parameter range, func
def generate(size, R, N, p, U, func):
imgs = []
with Bar('Rendering', max=np.shape(U)[0]) as bar:
for u in U:
imgs.append(frame(size, R, N, p, func(u)))
bar.next()
imgs[0].save('ikeda.gif', save_all=True, append_images=imgs[1:], duration=30, loop=0)
if __name__ == "__main__":
generate((1000, 1000), ((-1.5, 2.5),(-1.5, 3.5)), 1000000, (0, 0), np.linspace(0.6, 0.999, 100), ikeda) | OneAndZero24/ikeda | ikeda.py | ikeda.py | py | 1,572 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.cos",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 23,
... |
11053554578 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from environments.mc_model.mc_environment import MonteCarloEnv
from collections import defaultdict
import pickle
def heatmap_Q(Q_tab, file_path=None, skip_T=False):
"""
generates a heatmap based on Q_tab
Parameters
----------
Q_tab : dictionary
a dictionary with values for all state-action pairs
file_path : str
where the files should be saved
skip_T : bool
whether or not the final time step should be included
Returns
-------
None
"""
optimal_bid = dict()
optimal_ask = dict()
optimal_MO = dict()
plt.figure()
for state in list(Q_tab.keys()):
optimal_action = np.unravel_index(Q_tab[state].argmax(), Q_tab[state].shape)
optimal_bid[state] = optimal_action[0] + 1
optimal_ask[state] = optimal_action[1] + 1
optimal_MO[state] = optimal_action[2]
ser = pd.Series(
list(optimal_bid.values()), index=pd.MultiIndex.from_tuples(optimal_bid.keys())
)
df = ser.unstack().fillna(0)
if skip_T:
df = df.iloc[:, :-1]
fig = sns.heatmap(df)
fig.set_title("Optimal bid depth")
fig.set_xlabel("t (grouped)")
fig.set_ylabel("inventory (grouped)")
if file_path == None:
plt.show()
else:
plt.savefig(file_path + "opt_bid_heat")
plt.close()
plt.figure()
ser = pd.Series(
list(optimal_ask.values()), index=pd.MultiIndex.from_tuples(optimal_ask.keys())
)
df = ser.unstack().fillna(0)
if skip_T:
df = df.iloc[:, :-1]
fig = sns.heatmap(df)
fig.set_title("Optimal ask depth")
fig.set_xlabel("t (grouped)")
fig.set_ylabel("inventory (grouped)")
if file_path == None:
plt.show()
else:
plt.savefig(file_path + "opt_ask_heat")
plt.close()
plt.figure()
ser = pd.Series(
list(optimal_MO.values()), index=pd.MultiIndex.from_tuples(optimal_MO.keys())
)
df = ser.unstack().fillna(0)
if skip_T:
df = df.iloc[:, :-1]
fig = sns.heatmap(df)
fig.set_title("Market order")
fig.set_xlabel("t (grouped)")
fig.set_ylabel("inventory (grouped)")
if file_path == None:
plt.show()
else:
plt.savefig(file_path + "opt_mo_heat")
plt.close()
def heatmap_Q_std(Q_std, file_path=None):
"""
Plots a heatmap of the standard deviation of the q-value of the optimal actions
Parameters
----------
Q_std : defaultdict
a defaultdict with states as keys and standard deviations as values
file_path : str
where the files should be saved
Returns
-------
None
"""
plt.figure()
ser = pd.Series(list(Q_std.values()), index=pd.MultiIndex.from_tuples(Q_std.keys()))
df = ser.unstack().fillna(0)
fig = sns.heatmap(df)
fig.set_title("Standard deviation of optimal actions")
fig.set_xlabel("t (grouped)")
fig.set_ylabel("inventory (grouped)")
if file_path == None:
plt.show()
else:
plt.savefig(file_path + "heatmap_of_std")
plt.close()
def heatmap_Q_n_errors(Q_mean, Q_tables, n_unique=True, file_path=None):
"""
Plots a heatmap of the difference in optimal actions between runs. Can show number of
unique actions or number of actions not agreeing with mean optimal.
Parameters
----------
Q_mean : defaultdict
a defaultdict with states as keys and mean q-values as values
Q_tables : list
a list with defaultdicts with states as keys and q-values as values
n_unique : bool
whether or not number of unique actions should be used or not. If False,
errors compared to mean optimal will be used
file_path : str
where the files should be saved
Returns
-------
None
"""
Q_n_errors = Q_mean
if n_unique:
# ----- CALCULATE THE NUMBER OF UNIQUE ACTIONS -----
title = "Number of unique of optimal actions"
vmin = 1
for state in list(Q_mean.keys()):
opt_action_array = []
for Q_tab in Q_tables:
opt_action = np.unravel_index(Q_tab[state].argmax(), Q_tab[state].shape)
opt_action_array.append(opt_action)
n_unique_opt_actions = len(set(opt_action_array))
Q_n_errors[state] = n_unique_opt_actions
else:
# ----- CALCULATE THE NUMBER ERRORS COMPARED TO MEAN OPTIMAL -----
title = "Number of actions not agreeing with mean optimal action"
vmin = 0
for state in list(Q_mean.keys()):
num_errors = 0
for Q_tab in Q_tables:
error = np.unravel_index(
Q_tab[state].argmax(), Q_tab[state].shape
) != np.unravel_index(Q_mean[state].argmax(), Q_mean[state].shape)
num_errors += error
Q_n_errors[state] = num_errors
plt.figure()
ser = pd.Series(
list(Q_n_errors.values()), index=pd.MultiIndex.from_tuples(Q_n_errors.keys())
)
df = ser.unstack().fillna(0)
fig = sns.heatmap(df, vmin=vmin, vmax=len(Q_tables))
fig.set_title(title)
fig.set_xlabel("t (grouped)")
fig.set_ylabel("inventory (grouped)")
if file_path == None:
plt.show()
else:
if n_unique:
plt.savefig(file_path + "n_unique_opt_actions")
plt.close()
else:
plt.savefig(file_path + "n_errors_compared_to_mean")
plt.close()
def show_Q(Q_tab, file_path=None):
"""
plotting the optimal depths from Q_tab
Parameters
----------
Q_tab : dictionary
a dictionary with values for all state-action pairs
file_path : str
where the files should be saved
Returns
-------
None
"""
optimal_bid = dict()
optimal_ask = dict()
for state in list(Q_tab.keys()):
optimal_action = np.array(
np.unravel_index(Q_tab[state].argmax(), Q_tab[state].shape)
)
[optimal_bid[state], optimal_ask[state]] = optimal_action[0:2] + 1
ser = pd.Series(
list(optimal_bid.values()), index=pd.MultiIndex.from_tuples(optimal_bid.keys())
)
df = ser.unstack()
df = df.T
df.columns = "q=" + df.columns.map(str)
df.plot.line(title="Optimal bid depth", style=".-")
plt.legend(loc="upper right")
plt.xlabel("t (grouped)")
plt.ylabel("depth")
plt.xticks(np.arange(1, df.shape[0] + 1))
if file_path == None:
plt.show()
else:
plt.savefig(file_path + "opt_bid_strategy")
plt.close()
ser = pd.Series(
list(optimal_ask.values()), index=pd.MultiIndex.from_tuples(optimal_ask.keys())
)
df = ser.unstack()
df = df.T
df.columns = "q=" + df.columns.map(str)
df.plot.line(title="Optimal ask depth", style=".-")
plt.legend(loc="upper right")
plt.xlabel("t (grouped)")
plt.ylabel("depth")
plt.xticks(np.arange(1, df.shape[0] + 1))
if file_path == None:
plt.show()
else:
plt.savefig(file_path + "opt_ask_strategy")
plt.close()
def load_Q(filename, default=True):
"""
loads a Q table from a pkl file
Parameters
----------
filename : str
a string for the filename
default : bool
if a defaultdictionary or a dictionary should be returned
Returns
-------
Q : dict
a defaultdictionary/dictionary will all Q tables. the keys are actions and the values are the actual Q tables
args : dict
the model parameters
n : int
the number of episodes the Q-learning was run for
rewards : list
the saved rewards during training
"""
# Load the file
file = open("Q_tables/" + filename + ".pkl", "rb")
Q_raw, args, n, rewards = pickle.load(file)
# If we don't want a defaultdict, just return a dict
if not default:
return Q_raw
# Find d
dim = Q_raw[(0, 0)].shape[0]
# Transform to a default_dict
Q_loaded = defaultdict(lambda: np.zeros((dim, dim)))
Q_loaded.update(Q_raw)
return Q_loaded, args, n, rewards
| KodAgge/Reinforcement-Learning-for-Market-Making | code/utils/mc_model/plotting.py | plotting.py | py | 8,201 | python | en | code | 85 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "numpy.unravel_index",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pandas... |
37441104113 | from random import randint
from time import sleep
import colorama
from colorama import Fore, Style
from multiprocessing import Process
print(Fore.BLUE + Style.BRIGHT)
game_open = """
------------------------------------------------------------------------
------------------------------------------------------------------------
--------------------------/ \---\-----/---|-----------------------------
-------------------------/___\---\---/----|-----------------------------
------------------------/ \---\-/-----|-----------------------------
------------------------------------------------------------------------
--------------------Has a Game for YOU to Play!!------------------------
------------------------------------------------------------------------
"""
game_over = """
------------------------------------------------------------------------
--------------------GAME OVER YA WEIRDO...GO HOME!!---------------------
------------------------------------------------------------------------
"""
print(game_open)
print(Fore.CYAN)
question_list = 0
score = 0
while question_list < 10:
play = input('Are you interested? Y/N: ').lower()
if play == "y":
print('Get ready for your first question!')
else:
print(f'Thanks anyway...Your final score was {score}/10')
break
sleep(1)
print('')
q1 = int(input('What year was I born? '))
if q1 == 2013:
score += 1
print(f'Correct! Your score is now {score}/10')
else:
print('Sorry thats wrong!')
sleep(1)
print('')
q2 = input('What\'s my favorite color? ').lower()
if q2 == 'cyan':
score += 1
print(f'Correct! Your score is now {score}/10')
else:
print('Sorry thats wrong!')
sleep(1)
print('')
q3 = int(input('How many toes does a Panda have? '))
if q3 == 6:
score += 1
print(f'Correct! Your score is now {score}/10')
else:
print('Sorry thats wrong!')
sleep(1)
print('')
q4 = input('What is the longest word in English? ').lower()
if q4 == 'pneumonoultramicroscopicsilicovolcanoconiosis':
score += 1
print(f'Correct! Your score is now {score}/10')
else:
print('Sorry thats wrong!')
sleep(1)
print('')
q5 = input('Does Avi hate Football? T/F: ').lower()
if q5 == 't':
score += 1
print(f'Correct! Your score is now {score}/10')
else:
print('Sorry thats wrong!')
sleep(1)
print('')
q6 = input('Does cat urine glow in black-light? T/F ').lower()
if q6 == 't':
score += 1
print(f'Correct! Your score is now {score}/10')
else:
print('Sorry thats wrong!')
sleep(1)
print('')
q7 = input('Do more people drown in Fresh or Salt water? ').lower()
if q7 == 'fresh':
score += 1
print(f'Correct! Your score is now {score}/10')
else:
print('Sorry thats wrong!')
sleep(1)
print('')
q8 = int(input('What percentage of your body\'s oxygen does the brain use? '))
if q8 == '20':
score += 1
print(f'Correct! Your score is now {score}/10')
else:
print('Sorry thats wrong!')
sleep(1)
print('')
q9 = int(input('What year did DiVinci finish his last major artwork? '))
if q9 == 1515:
score += 1
print(f'Correct! Your score is now {score}/10')
else:
print('Sorry thats wrong!')
sleep(1)
print('')
q10 = int(input('What percentage of your body weight is blood!!?? '))
if q10 == 8:
score += 1
print(f'Correct! Your score is now {score}/10')
else:
print('Sorry thats wrong!')
sleep(1)
print(game_over)
break
print(f"Your final score was {score}/10")
| PrairieWaltz/AviGame | avigame.py | avigame.py | py | 3,779 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "colorama.Fore.BLUE",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "colorama.Style.BRIGHT",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "coloram... |
39366146128 | import json, os
from PyQtX import QtWebKitWidgets, QtCore, QtWidgets, QtWebKit
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, view):
super(MainWindow, self).__init__()
self.installEventFilter(self)
self.view = view
self.setCentralWidget(view)
def eventFilter(self, object, event):
if event.type() == QtCore.QEvent.WindowActivate:
self.view.evtHandler("focusin", [])
elif event.type() == QtCore.QEvent.WindowDeactivate:
self.view.evtHandler("focusout", [])
return False
def gen_event_handler(self, event):
def func():
self.view.evtHandler(event, [])
return func
def build_menu(self, data):
self.statusBar()
menubar = self.menuBar()
for menu_d in data:
menu = menubar.addMenu(menu_d['name'])
for entry in menu_d['items']:
tmp = QtWidgets.QAction(entry['name'], self)
tmp.setShortcut(entry['shortcut'])
tmp.setStatusTip(entry['statustip'])
tmp.triggered.connect(
self.gen_event_handler(entry['trigger'])
)
menu.addAction(tmp)
class Viewer(QtWebKitWidgets.QWebView):
def __init__(self, args):
QtWebKitWidgets.QWebView.__init__(self)
self.page().mainFrame().loadFinished.connect(self.loadFinished)
self.page().mainFrame().javaScriptWindowObjectCleared.connect(
self.javaScriptWindowObjectCleared
)
self.file = QtCore.QUrl('file:///' + os.path.abspath(args['file']))
self.load(self.file)
if args['verbose']:
QtWebKit.QWebSettings.globalSettings().setAttribute(
QtWebKit.QWebSettings.DeveloperExtrasEnabled,
True
)
self.web_inspector = QtWebKitWidgets.QWebInspector()
self.web_inspector.setPage(self.page())
self.web_inspector.show()
def evtHandler(self, key, args):
args = json.dumps(args)
key = json.dumps(key)
print("Events.__pyTrigger("+key+", "+args+")")
self.page().mainFrame().evaluateJavaScript(
"Events.__pyTrigger("+key+", "+args+")"
)
def keyPressEvent(self, e):
self.evtHandler("keypress", [str(e.key())])
QtWebKitWidgets.QWebView.keyPressEvent(self, e)
def keyReleaseEvent(self, e):
self.evtHandler("keyrelease", [str(e.key())])
QtWebKitWidgets.QWebView.keyReleaseEvent(self, e)
def loadFinished(self, ok):
self.show()
def set_js_interface(self, jsi):
self.js_interface = jsi
def javaScriptWindowObjectCleared(self):
self.page().mainFrame().addToJavaScriptWindowObject(
"PyInterface",
self.js_interface
)
| kpj/WebWrapper | python/gui.py | gui.py | py | 2,402 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "PyQtX.QtWidgets.QMainWindow",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "PyQtX.QtWidgets",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "PyQtX.QtCore.QEvent",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name"... |
28672285057 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import platform
import sys
import typing
import numpy as np
import pandas as pd
import tensorflow as tf
sys.path.append(".")
import delay
import delay.agents
import delay.core
tf.disable_eager_execution()
keras = tf.keras
if typing.TYPE_CHECKING:
from typing import *
print('Python', platform.python_version())
print('Tensorflow', tf.VERSION)
print('Keras', keras.__version__)
# Configuration
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
H = 3600
D = 24 * H
FIT_PREDICT_KWARGS = dict(
batch_size=10000,
verbose=0,
callbacks=None,
)
DATA_PATH = './data/TencentCVR_base.feather'
COLUMN_CONFIG = delay.core.ColumnConfig(
click_ts='clickTime',
convert_ts='conversionTime',
features={
# name: (shape, dtype, categorical_size + 1, embedding_size)
'creativeID': ((), np.int32, 48836, 256),
'positionID': ((), np.int16, 21488, 256),
'connectionType': ((), np.int8, 5, 8),
'telecomsOperator': ((), np.int8, 4, 8),
'adID': ((), np.int16, 29726, 256),
'camgaignID': ((), np.int16, 6583, 128),
'advertiserID': ((), np.int16, 639, 32),
'appID': ((), np.int16, 465, 32),
'appPlatform': ((), np.int8, 2, 8),
'appCategory': ((), np.int8, 28, 8),
'sitesetID': ((), np.int8, 3, 8),
'positionType': ((), np.int8, 6, 8),
'age': ((), np.int8, 81, 16),
'gender': ((), np.int8, 3, 8),
'education': ((), np.int8, 8, 8),
'marriageStatus': ((), np.int8, 4, 8),
'haveBaby': ((), np.int8, 7, 8),
'hometown': ((), np.int16, 365, 32),
'residence': ((), np.int16, 405, 32),
},
other_embedding_size=8,
)
MIN_TS, MAX_TS, STEP_TS, EVAL_TS = 0, 12 * D, 1 * H, 7 * D
def intermediate_layers_config_fn():
return [
(keras.layers.Dense, dict(units=128,
activation=keras.layers.LeakyReLU(),
kernel_regularizer=keras.regularizers.L1L2(l2=1e-7),
name='hidden_1')
),
(keras.layers.Dense, dict(units=128,
activation=keras.layers.LeakyReLU(),
kernel_regularizer=keras.regularizers.L1L2(l2=1e-7),
name='hidden_2')
),
]
# Load data
data_provider = delay.core.DataProvider(df=pd.read_feather(DATA_PATH), cc=COLUMN_CONFIG,
fast_index=(MIN_TS, MAX_TS, STEP_TS))
print(len(data_provider.df), data_provider.df[COLUMN_CONFIG.convert_ts].notnull().mean())
# Experiments
def run_exp(method: 'delay.core.MethodABC'):
if isinstance(method, delay.core.Methods.FTP):
agent_class = delay.agents.FTPAgent
else:
agent_class = delay.agents.SimpleDNNAgent
agent = agent_class(method=method,
data_provider=data_provider,
intermediate_layers_config_fn=intermediate_layers_config_fn,
fit_predict_kwargs=FIT_PREDICT_KWARGS)
result = delay.core.run_streaming(agent, min_ts=MIN_TS, max_ts=MAX_TS, step_ts=STEP_TS, eval_ts=EVAL_TS)
print(method.description, result)
return {'method': method.description, **result}
result_df = pd.DataFrame()
result_df = result_df.append(run_exp(delay.core.Methods.Prophet(
description='Prophet*',
)), ignore_index=True)
result_df = result_df.append(run_exp(delay.core.Methods.Waiting(
window_size=4 * H,
description='Waiting(4h)',
)), ignore_index=True)
result_df = result_df.append(run_exp(delay.core.Methods.Waiting(
window_size=6 * H,
description='Waiting(6h)',
)), ignore_index=True)
result_df = result_df.append(run_exp(delay.core.Methods.Waiting(
window_size=12 * H,
description='Waiting(12h)',
)), ignore_index=True)
result_df = result_df.append(run_exp(delay.core.Methods.Waiting(
window_size=24 * H,
description='Waiting(24h)',
)), ignore_index=True)
result_df = result_df.append(run_exp(delay.core.Methods.Waiting(
window_size=48 * H,
description='Waiting(48H)',
)), ignore_index=True)
result_df = result_df.append(run_exp(delay.core.Methods.FTP(
task_window_sizes=[1 * H, 6 * H, 24 * H, 48 * H],
n_more_shared_layers=1,
fp_train_min_ts=1 * D,
ema_loss_init=0.10,
enable_gradients=[1, 1, 1, 1, 1],
description='FTP',
)), ignore_index=True)
print(result_df)
| kmdict/FollowTheProphet | examples/delay_tencent2017.py | delay_tencent2017.py | py | 4,611 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.disable_eager_execution",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tens... |
14778728267 | import sqlalchemy
from sqlalchemy.orm import sessionmaker
import json
from models import create_tables, Publisher, Book, Shop, Stock, Sale
if __name__ == '__main__':
DSN = 'postgresql://postgres:postgres@localhost:5432/netology_db'
engine = sqlalchemy.create_engine(DSN)
create_tables(engine)
Session = sessionmaker(bind=engine)
session = Session()
with open('data.json') as f:
data = json.load(f)
models = {'publisher': Publisher,
'book': Book,
'shop': Shop,
'stock': Stock,
'sale': Sale
}
for element in data:
session.add(models[element['model']](id=element['pk'], **element['fields']))
session.commit()
query = session.query(Shop)
query = query.join(Stock, Stock.id_shop == Shop.id)
query = query.join(Book, Book.id == Stock.id_book)
query = query.join(Publisher, Publisher.id == Book.id_publisher)
pub = input('Введите имя издателя или его id: ')
try:
publ = int(pub)
records = query.filter(Publisher.id == pub)
except ValueError:
records = query.filter(Publisher.name == pub)
for shop in records:
print(shop)
session.close()
| Stelihon/SQL6 | main.py | main.py | py | 1,254 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.create_tables",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 13,
"usage_type": "call"
},
{
"api_name... |
30003130370 | #!/usr/bin/env python3
import os, sys, logging, subprocess, json, traceback
def main():
s = Setup()
s.interpret_arguments(sys.argv)
s.ensure_java_maven_exists()
s.ensure_conda_exists()
s.recreate_conda_environment()
s.install_via_pip('clingo==5.5.0.post3 jpype1==1.2.1')
s.reclone_hexlite('v1.4.0')
s.build_hexlite_java_api()
s.install_hexlite()
s.build_this_plugin()
# either use full classpath provided by maven (that is slow)
#s.get_classpath()
# or use only jar with dependencies (created by maven-shade-plugin, faster than asking mvn for classpath)
# the "./" is for being able to put log4j2.xml into ./
cwd = os.getcwd()
s.config['classpath'] = '%s/:%s/plugin/target/owlapiplugin-1.1.0.jar' % (cwd, cwd)
s.run_example('koala', ['querykoala1.hex'])
s.run_example('koala', ['querykoala2.hex'])
s.run_example('factory', ['domain.hex', 'query_allpainted.hex'])
s.run_example('factory', ['domain.hex', 'query_deactivatable.hex'])
s.run_example('factory', ['domain.hex', 'query_skippable.hex'])
class Setup:
PYTHONVER='3.7'
# for public access
#HEXLITE_CLONE_SOURCE='https://github.com/hexhex/hexlite.git'
# for developer access (including push possibility)
HEXLITE_CLONE_SOURCE='git@github.com:hexhex/hexlite.git'
def __init__(self):
self.config = {}
def __run_shell_get_stdout(self, cmd, allow_fail=False, wd=None):
logging.info("running %s", cmd)
env = os.environ.copy()
if 'classpath' in self.config:
# the first path is for the built plugin
# the second path is for the logging configuration
env['CLASSPATH'] = self.config['classpath'] + ':plugin/target/classes/:plugin/'
cwd = os.getcwd()
if wd is not None:
cwd = wd
p = subprocess.Popen('bash -c "%s"' % cmd, env=env, cwd=cwd, shell=True, stdout=subprocess.PIPE, stderr=sys.stderr)
stdout = p.communicate()[0].decode('utf-8')
if not allow_fail and p.returncode != 0:
raise Exception("failed program: "+cmd)
return stdout
def interpret_arguments(self, argv):
if len(argv) != 2:
raise Exception("expect exactly the following arguments: <conda-environment>")
self.config['env'] = argv[1]
logging.info("config is %s", self.config)
def ensure_conda_exists(self):
ver = self.__run_shell_get_stdout('conda --version')
logging.info("found conda version %s", ver)
def ensure_java_maven_exists(self):
ver = self.__run_shell_get_stdout('java --version')
logging.info("found java version %s", ver)
ver = self.__run_shell_get_stdout('mvn --version')
logging.info("found maven version %s", ver)
def recreate_conda_environment(self):
env = self.config['env']
assert(env != 'base')
self.__run_shell_get_stdout('conda env remove -y --name %s >&2' % env, allow_fail=True)
self.__run_shell_get_stdout('conda create -y --name %s python=%s pandas >&2' % (env, self.PYTHONVER))
self.__run_shell_get_stdout('source activate %s' % env)
def install_jpype_via_build(self, github_ref):
env = self.config['env']
logging.info('cloning jpype')
self.__run_shell_get_stdout("rm -rf jpype")
self.__run_shell_get_stdout("git clone https://github.com/jpype-project/jpype.git >&2 && cd jpype && git checkout %s >&2" % github_ref)
self.__run_shell_get_stdout("source activate %s && cd jpype && python setup.py sdist >&2" % (env,))
logging.info('building jpype into conda env')
# $ is interpreted by outer shell, but we want it to be interpreted by inner shell (the 'bash' started by __run_shell_get_stdout)
path = self.__run_shell_get_stdout('source activate %s && echo \\$CONDA_PREFIX' % env).strip()
logging.info('got path %s', path)
ld_orig = path+'/compiler_compat/ld'
ld_temp = path+'/compiler_compat/ld_temp'
logging.info("hiding %s as %s", ld_orig, ld_temp)
os.rename(ld_orig, ld_temp) # conda bug, see readme in hexlite repo
try:
self.__run_shell_get_stdout('source activate %s && cd jpype && pip install dist/* >&2' % env)
finally:
os.rename(ld_temp, ld_orig) # restore conda env
def install_jpype_via_conda(self, version=None):
env = self.config['env']
ver = ''
if version is not None:
ver = '='+version
self.__run_shell_get_stdout("source activate %s && conda install -y -c conda-forge jpype1%s >&2" % (env, ver))
def install_via_pip(self, what='"clingo>=5.5.0" "jpype1>=1.2.1"'):
env = self.config['env']
self.__run_shell_get_stdout("source activate %s && pip3 install %s >&2" % (env, what))
def reclone_hexlite(self, github_ref):
logging.info('cloning hexlite')
self.__run_shell_get_stdout("rm -rf hexlite")
self.__run_shell_get_stdout("git clone %s >&2 && cd hexlite && git checkout %s >&2" % (self.HEXLITE_CLONE_SOURCE,github_ref) )
def build_hexlite_java_api(self):
logging.info('building and installing hexlite Java API')
env = self.config['env']
self.__run_shell_get_stdout("source activate %s && cd hexlite/ && mvn clean compile package install >&2" % env)
def install_hexlite(self):
logging.info('installing hexlite')
env = self.config['env']
self.__run_shell_get_stdout("source activate %s && cd hexlite/ && python setup.py install >&2" % env)
def build_this_plugin(self):
logging.info('building OWLAPI Plugin')
env = self.config['env']
self.__run_shell_get_stdout("source activate %s && cd plugin && mvn clean compile package >&2" % env)
def get_classpath(self):
env = self.config['env']
self.config['classpath'] = self.__run_shell_get_stdout("source activate %s && cd plugin && mvn dependency:build-classpath -Dmdep.outputFile=/dev/stdout -q" % env)
logging.info("got classpath %s", self.config['classpath'])
def run_example(self, directory, hexfiles):
env = self.config['env']
cwd = os.getcwd()
call = "hexlite --pluginpath %s/hexlite/plugins/ --plugin javaapiplugin at.ac.tuwien.kr.hexlite.OWLAPIPlugin --number 33" % cwd
# call += ' --verbose'
# call += ' --debug'
call += ' --stats'
#call += ' --noeatomlearn'
logging.warning("TODO fix bug in FLP checker (parser?)")
call += ' --flpcheck=none'
execdir = os.path.join('examples', directory)
stdout = self.__run_shell_get_stdout("cd %s && source activate %s && %s -- %s" % (execdir, env, call, ' '.join(hexfiles)))
sys.stdout.write(stdout)
logging.basicConfig(level=logging.INFO)
try:
main()
except Exception:
logging.error(traceback.format_exc())
# vim:noet:nolist:
| hexhex/hexlite-owlapi-plugin | setup_and_test_within_conda.py | setup_and_test_within_conda.py | py | 6,303 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.environ.copy",
"line_numbe... |
36043600775 | import numpy as np
np.random.seed(1)
from keras.layers import Dense
from keras.models import Sequential
import matplotlib.pyplot as plt
X = np.linspace(-1,1,200)
Y = X*0.3 +2 +np.random.normal(0,0.05,(200,))
X_train = X[:160]
X_test = X[160:]
Y_train = Y[:160]
Y_test = Y[160:]
model = Sequential(
[
Dense(1,input_dim=1),
]
)
model.compile(loss='mse', optimizer='sgd')
print("train---------")
print('Training -----------')
for step in range(301):
cost = model.train_on_batch(X_train, Y_train)
if step % 100 == 0:
print('train cost: ', cost)
# test
print('\nTesting ------------')
cost = model.evaluate(X_test, Y_test, batch_size=40)
print('test cost:', cost)
W, b = model.layers[0].get_weights()
print('Weights=', W, '\nbiases=', b)
# plotting the prediction
Y_pred = model.predict(X_test)
plt.scatter(X_test, Y_test)
plt.plot(X_test, Y_pred)
plt.show()
| ByronGe/machine-Learning | keras/classifier_exampleTest.py | classifier_exampleTest.py | py | 895 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.random.seed",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",... |
44011211302 | import numpy as np
import matplotlib.pyplot as plt
import sys
# Define the van Leer flux limiter function
def vanLeerFunc(X):
return (X + np.abs(X)) / (1 + X)
# Define required constants
A = 4e3
R = 8.314
NA = 6.022e23
VmMolar = 1.65e-5 # m^3 mol^-1, molar volume of monomer
rmMolar = (3/4*np.pi) * pow(VmMolar, 1/3) # effective radius of monomer (from the molar volume)
Vm = VmMolar / NA #(4/3)*np.pi*pow(rm,3)
rm = 1.87e-10 # pow((3*Vm) / (4*np.pi), (1/3))
D = 5e-11
Vtot = 1e-4 #0.0252-3 # m^3
MInf = 4e-3 #mol m^-3 #
u = 0.48 # coagulation parameter
gamma = 1.1 #0.4 # J m^-2
kB = 1.38e-23
#k = 1e12 # J mol^-1 m^-1 distributio n exponent constant
kr = 1.6e-9
density = 5.816e3 # kg m^-3
MW = 0.09586 # kg mol^-1
Q = (4*np.pi*density) / (3 * MW * Vtot * MInf)
PMEnergy = 1
# Define timesteps to iterate through & create an array
tmax = 1 # s
tdiff = 5e-6
tmin = tdiff
timeArray = np.linspace(tdiff, tmax, int((tmax-tmin+tdiff)/tdiff))
timeCount = 0 # Used to track number of time iterations
# Define radius bins
rdiff = 3e-11 # m
rmax = 8.1e-9
rmin = 5e-10
rBins = np.linspace(rmin,rmax,int((rmax-rmin+rdiff)/rdiff))
# Define temperature variable & heating rate
Temp = 573 # 453
#Tf = 567
#HR = 0.025 # K s^-1 (1.5 K min^-1)
# Define precursor population variable & array
PPop = 1e24
PConc = PPop / (NA * Vtot)
PArray = [PConc]
# Define supersaturation variable & array
SS = 8e3
SSArray = [SS]
# Monomers
MConc = 32 # mol m^-3
NArraysArray = [np.zeros(len(rBins))]
NNumArray = [np.sum(NArraysArray[0])]
"""
# Create an array of arrays to hold number of nuclei in each radius bin for different timesteps
NArraysArray = [[]]
# Give number of nanopartices initially
NNumMole = 60e-6 # mol
NNum = NNumMole * NA # number
print("Initial number of nanoparticles = " + str(NNum))
# Create the input distribution of nanoparticles
distributionArray = []
sum = 0
for r in rBins:
g = (1 / (1e-10 * np.sqrt((2*np.pi)))) * np.exp(-0.5*(np.power((r - 1e-9)/(1e-10), 2))) # Normal distribution with mean radius 1e-9 m & standard deviation 1e-10 m
gN = NNum * g * rdiff
sum += gN # Calculated to check this sums to NNum (approximately)
distributionArray.append(g)
NArraysArray[0].append(gN)
print("Sum of nanoparticles in initial distribution = " + str(sum))
# Plot the initial probability density function
plt.plot(rBins, distributionArray)
plt.title("Nanoparticle radius distribution t = 0")
plt.ylim(0, 4.5e9)
plt.show()
"""
# Plot the initial distribution of nanoparticles
plt.plot(rBins, NArraysArray[0])
plt.title("Nanoparticles number against radius at t = 0")
plt.ylim(0, 4.5e18)
plt.show()
# Create an array to hold the mean radius values
NArrayAvgR = [0] #[np.average(rBins, weights = NArraysArray[0])]
print("Initial avg N = " + str(NArrayAvgR[0]))
# Main loop steps forward in time
for time in timeArray:
timeCount += 1
# Calculate the change to the precursor population and add the result to an array
PConc -= (A*np.exp(-PMEnergy/Temp) * PConc * tdiff)
PArray.append(PConc)
#print("temp " +str(Temp))
# Calculate the critical radius
rCrit = (2*gamma*VmMolar) / (R*Temp*np.log(SS))
# Plot the nanoparticle distribution every few hundred iterations
if timeCount % 400 == 0:
plt.plot(rBins, NArraysArray[0])
plt.title("Nanoparticles number against radius at t = " + f'{(time-tdiff):.3g}') # Take away tdiff since we are actually plotting the graph from the previous here
#plt.ylim(0, 4.5e18)
#plt.plot(rCrit,1e9,'ro')
plt.show()
# Calculate the de-dimensionalised variables
phi = (R * Temp) / (2 * gamma *VmMolar)
psi = phi**2 * D * VmMolar * MInf
zeta = 1e5 # (D * phi) / kr
tau = time * psi
# Calculate p value
p = np.power((rCrit/rm), 3) # Need actual value of rm not molar value
# Calculate nucleation rate
RNuc = 8*np.pi*rm*D*NA * pow(SS, (p*u+1)) * pow(MInf, 2) * np.exp((-4*np.pi*pow(rCrit, 2)*gamma)/(3*kB*Temp)) #Need actual value of rm not molar value
# Add a new empty array to the N array of arrays to hold the newly calculated N values
NArraysArray.append([])
# Create empty arrays for the SS integral elements and the N values at radii values of (i+1/2) and (i-1/2) (reset each time iteration)
SSSumsArray = []
NHalfPosList = []
NHalfNegList = []
rCount = -1 # Keep track of the number of radius iterations (resets each time iteration)
for r in rBins:
rCount += 1
# Calculate standard deviation
sigma = np.sqrt((kB*Temp)/2) # (kB*Temp/2)
# Calculate value of distribution for current r value
g = (1 / (sigma * np.sqrt((2*np.pi)))) * np.exp((-np.power((r - rCrit), 2)) / (2*np.power(sigma, 2)))
#print("g = " + str(g))
#print("radius = " + str(r))
# Calculate the intermediate value of N, i.e. the nucleated N (for this radius value)
Nprime = NArraysArray[0][rCount] + (tdiff * RNuc * g)
# Deltapos & deltaneg use number of nanoparticles at radii (i+1) & (i-1), which don't exist at the boundaries, so set NHalfPos and NHalfNeg to previous N for these edge cases
if r == rBins[0] or r == rBins[len(rBins) - 1]:
NHalfPos = NArraysArray[0][rCount]
NHalfNeg = NArraysArray[0][rCount]
NHalfPosList.append(NHalfPos)
NHalfNegList.append(NHalfNeg)
else:
# Calculate deltapos and deltaneg
deltapos = NArraysArray[0][rCount+1] - NArraysArray[0][rCount]
deltaneg = NArraysArray[0][rCount] - NArraysArray[0][rCount-1]
# If one or both of the deltas = 0, one or both of their ratios will diverge, so include a condition to avoid this
if deltapos == 0 or deltaneg == 0:
NHalfPos = NArraysArray[0][rCount] # Set NHalfPos and NHalfNeg values equal to the previous N value
NHalfNeg = NArraysArray[0][rCount]
NHalfPosList.append(NHalfPos)
NHalfNegList.append(NHalfNeg)
else:
# Calculate NHalfPos and NHalfNeg using the van Leer function
NHalfPos = NArraysArray[0][rCount] + 0.5*vanLeerFunc(deltapos/deltaneg)*deltaneg
NHalfPosList.append(NHalfPos)
NHalfNeg = NArraysArray[0][rCount] - 0.5*vanLeerFunc(deltaneg/deltapos)*deltapos
NHalfNegList.append(NHalfNeg)
# Find beta values for radii values (i+1/2) and (i-1/2)
betaPos = (r+0.5*rdiff) * phi # de-dimensionalised radius
betaNeg = (r-0.5*rdiff) * phi # de-dimensionalised radius
# Calculate the growth rates using these beta values & then redimensionalise them
growthRatePosDD = (SS - np.exp(1/betaPos))/(betaPos + zeta) # De-dimensionalised
growthRateNegDD = (SS - np.exp(1/betaNeg))/(betaNeg + zeta) # De-dimensionalised
growthRatePos = growthRatePosDD * (psi/phi) # Re-dimensionalised
growthRateNeg = growthRateNegDD * (psi/phi) # Re-dimensionalised
# Check Courant condition & stop the program if not satisfied
if (np.abs(growthRatePos) * tdiff/rdiff) > 1 or (np.abs(growthRateNeg) * tdiff/rdiff) >1:
print("Courant condition not satisfied.")
print("Courant value for growthratepos " + str(np.abs(growthRatePos) * tdiff/rdiff))
print("Courant value for growthrateneg " + str(np.abs(growthRateNeg) * tdiff/rdiff))
sys.exit()
# Calculate new N & add to the array
NNew = Nprime - (tdiff/rdiff) * ((growthRatePos*NHalfPos) - (growthRateNeg*NHalfNeg)) # Calculate new N, i.e. N for current timestep
NArraysArray[1].append(NNew)
# Calculate element of SS integral for current r & add to array
SSIntegralElement = pow(r,3) * (NArraysArray[1][rCount] - NArraysArray[0][rCount])
SSSumsArray.append(SSIntegralElement)
#print("Q * r^3 = " + str(Q*pow(r,3)))
#print("SS Element = " + str(SSIntegralElement))
# Add mean radius of N to the array
NArrayAvgR.append(np.average(rBins, weights = NArraysArray[1]))
#print("Avg N radius = " + str(NArrayAvgR[timeCount]))
# Delete N array for previous timestep (to conserve memory, since it is no longer needed)
del NArraysArray[0]
# Calculate SS value for this timestep & add to the array
SS = SS - (Q * np.sum(SSSumsArray)) + ((A*PConc*tdiff/MInf)*np.exp(-PMEnergy/(R*Temp))) # Sum all integral element values and multiply by Q to approximate the integral (no precursor part to consider for growth only case)
SSArray.append(SS)
#print("SS " + str(SS))
#print(" ")
NNumArray.append(np.sum(NArraysArray[0]))
"""
# Increase the temperature by the heating rate up to the maximum
if (Temp < Tf):
Temp += tdiff*HR
"""
# Create new time array to include t = 0
timeArrayFull = np.linspace(0, tmax, int(((tmax-tmin+tdiff)/tdiff) + 1))
# Plot the final distribution of nanoparticles
plt.plot(rBins, NArraysArray[0])
plt.title("Final nanoparticle size distribution")
#plt.ylim(0, 4.5e18)
plt.plot(rCrit,1e9,'ro')
plt.show()
# Plot the change in precursor population over time
plt.plot(timeArrayFull, PArray)
plt.title('Precursor concentration against time')
plt.show()
print(NArrayAvgR)
# Plot mean radius against time
plt.plot(timeArrayFull, NArrayAvgR)
plt.title("Mean radius against time")
plt.xscale('log')
plt.show()
# Plot supersaturation against time
plt.plot(timeArrayFull, SSArray)
plt.title("Supersaturation against time")
plt.xscale('log')
plt.yscale('log')
plt.show()
# Plot the total number of nanoparticles over time
plt.plot(timeArrayFull, NNumArray)
plt.title("Number of nanoparticles over time")
plt.xscale('log')
plt.show()
| renjygit/FYP | Nucleation and Growth.py | Nucleation and Growth.py | py | 10,150 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.abs",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_numbe... |
21029279523 | from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("prices/", views.get_price, name="get_price"),
path("home/", views.trending_tickers_view, name="trending_tickers"),
path("new_stock/", views.new_user_stock, name="new_stock"),
]
| AirFryedCoffee/mystocksite | stocks/urls.py | urls.py | py | 309 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
22630967569 | """ Collection of general utilities """
import contextlib
import json
import logging
import os
from pathlib import Path
import shutil
import subprocess
from typing import Union
import yaml
from datalad.distribution.dataset import require_dataset
from datalad.support.exceptions import NoDatasetFound
class ConfigError(Exception):
"""Raised when there are missing parameters in the configuration"""
class NotPossible(Exception):
""" Raised when the requested action is not possible """
class UsageError(Exception):
""" Raised when the action is not possible due to wrong usage """
def get_config(filename: str) -> dict:
""" Read config from file.
Args:
filename: File to read the config from.
Returns:
The configuration as dictionary.
"""
config_file = Path(filename)
with config_file.open('r') as config_f:
config = yaml.safe_load(config_f)
return config
def write_config(config: dict, filename: str):
""" Write config to to file.
Args:
config: The configuration to write.
filename: File to write the config to.
"""
config_file = Path(filename)
with config_file.open('w') as config_f:
yaml.dump(config, config_f)
def setup_logging(log_level=logging.DEBUG):
""" Set up a logger.
Args:
log_level: The logging level to end in the log handler.
"""
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter(
'[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s'
# default:
# '%(levelname)s:%(name)s:%(message)s'
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(log_level)
return logger
def read_subjects(filename: str) -> Union[list, dict]:
""" Read in a json file
Args:
filename: The file to read.
Returns:
The contant of the file as
"""
print("Reading file", filename)
sub_file = Path(filename)
with sub_file.open("r") as my_file:
data = my_file.read()
subjects = json.loads(data)
for i in subjects:
print(i)
return subjects
def write_subjects(subjects: list, filename: str):
""" Write subject in a file.
Args:
subjects: list of subject dictionaries
e.g. [{"anon-sub": "20","acq": "cn85_3942"0},
{"anon-sub": "21","acq": "test"}]
filename: The filename where to write the subjects to.
"""
print("Write", filename)
with open(filename, "w") as my_file:
json.dump(subjects, my_file, indent=4, sort_keys=True)
def read_spec(file_name: Union[str, Path]) -> list:
""" Reads a datalad spec file and converts it into proper python objects
Args:
file_name: the studyspec file name.
"""
# allow string
file_name = Path(file_name)
# strip: file may contain empty lines
lines = file_name.read_text().strip().split("\n")
return list(map(json.loads, lines))
def copy_template(template: Union[str, Path], target: Union[str, Path],
this_file_path: Path = Path(__file__)):
""" Copies the template file to the target path
Args:
template: The path of the template file. Can be either absolute or
relative inside the current toolbox.
target: The file path where the template should be copied to. If the
target file does already exist it is not overwritten.
this_file_path: Optional; The path of the file in which this function
was called. In case the template dir is in a differnent path than
utils.
"""
template = Path(template)
target = Path(target)
if not target.exists():
# account for relative paths for template file
if not template.is_absolute():
template = this_file_path.parent.absolute()/template
shutil.copy(template, target)
class ChangeWorkingDir(contextlib.ContextDecorator):
""" Change the working directory temporaly """
def __init__(self, new_wd):
self.current_wd = None
self.new_wd = new_wd
def __enter__(self):
self.current_wd = Path.cwd()
os.chdir(self.new_wd)
def __exit__(self, exc_type, exc_val, exc_tb):
os.chdir(self.current_wd)
# signal that the exception was handled and the program should continue
return True
def get_logger_name() -> str:
""" Returns a common logger name
This should make it easier to identify the modules as part of the
data-pipeline tool set.
"""
return "data-pipeline"
def get_logger(my_class, postfix: str = None) -> logging.Logger:
"""Return a logger with a name corresponding to the tool set.
Will return a logger of the the name data-pipeline.<class_module>.<class>
or if postfix is set data-pipeline.<class_module>.<class><postfix>
Args:
my_class: class descriptor opject __class__
postfix: optional postfix to be added after the class logger name
"""
name = "{}.{}.{}".format(get_logger_name(),
my_class.__module__,
my_class.__name__)
if postfix is not None:
name += postfix
return logging.getLogger(name)
def run_cmd(cmd: list, log: logging.Logger, error_message: str = None,
raise_exception: bool = True, env: dict = None,
suppress_output: bool = False) -> str:
""" Runs a command via subprocess and returns the output
Args:
cmd: A list of strings definied as for subpocess.run method
log: a logging logger
error_message: Message to user when an error occures
raise_exception: Optional; If an exception should be raised or not in
case something went wrong during command execution.
env: Optional; In case the command should be exectued in a special
environment
suppress_output: Optional; In case the calling application want to
control the output separately, it can be disabled.
"""
try:
# pylint: disable=subprocess-run-check
proc = subprocess.run(cmd, capture_output=True, env=env)
except Exception:
if error_message:
log.error(error_message)
else:
log.error("Something went wrong when calling subprocess run",
exc_info=True)
raise
# capture return code explicitely instead of useing subprocess
# parameter check=True to be able to log error message
if proc.returncode:
if not suppress_output:
if proc.stdout:
log.info(proc.stdout.decode("utf-8"))
log.debug("cmd: %s", " ".join(cmd))
if error_message:
log.error("%s, error was: %s", error_message,
proc.stderr.decode("utf-8"))
raise Exception(error_message)
if raise_exception:
log.error("Command failed with error %s",
proc.stderr.decode("utf-8"))
raise Exception()
return proc.stdout.decode("utf-8")
def run_cmd_piped(cmds: list,
log: logging.Logger,
error_message: str = None) -> str:
""" Runs piped commands via subprocess and return the output
Args:
cmds: A list of commands, where a command is a list of strings definied
as for subpocess.run method
log: a logging logger
error_message: Message to user when an error occures
"""
if not cmds:
return ""
proc = subprocess.Popen(cmds[0], stdout=subprocess.PIPE)
try:
# pylint: disable=subprocess-run-check
for cmd in cmds:
proc = subprocess.Popen(cmd, stdin=proc.stdout,
stdout=subprocess.PIPE)
output, errors = proc.communicate()
except Exception:
if error_message:
log.error(error_message)
else:
log.error("Something went wrong when calling subprocess "
"communicate", exc_info=True)
raise
# capture return code explicitely instead of useing subprocess
# parameter check=True to be able to log error message
if errors:
if output:
log.info(output.decode("utf-8"))
log.debug("cmds: %s", " ".join(cmds))
if error_message:
log.error("%s, error was: %s", error_message,
errors.decode("utf-8"))
raise Exception(error_message)
log.error("Command failed with error %s", errors.decode("utf-8"))
raise Exception()
return output.decode("utf-8")
def check_cmd(cmd: list) -> bool:
""" Runs the command and checks if it runs through
Args:
cmd: The command to run in subprocess syntax, i.e. as a list of
strings.
Returns:
True if the command worked, False if not.
"""
try:
subprocess.run(cmd, check=True, capture_output=False,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
return True
except subprocess.CalledProcessError:
return False
def show_side_by_side(left: list, right: list) -> str:
""" Shows two lists side by side
For example:
left = ["first line left"]
right = ["first line right", second line right]
will result in
"llllllllllllllllll rrrrrrrrrrrrrr\n
llllllll r\n
rrrrrrrrrr\n"
Args:
left: Content of the left column
right: Content of the right column
Return:
A string where both lists are printed side by side.
"""
col_width = max(len(line) for line in left) + 2 # padding
max_len = max(len(left), len(right))
left.extend([""] * (max_len - len(left)))
right.extend([""] * (max_len - len(right)))
result = ""
for row in zip(left, right):
result += "".join(word.ljust(col_width) for word in row) + "\n"
return result
def get_dataset(dataset_path: Union[str, Path], log):
""" Check if a dataset exist and get it
Args:
dataset_path: The path of the dataset
Returns:
The datalad dataset corresponding to the Path.
Raises:
UsageError: If the dataset was not created by the user.
"""
try:
dataset = require_dataset(dataset=dataset_path, check_installed=True)
except (ValueError, NoDatasetFound) as excp:
log.error("Dataset %s does not exist. Please run configuration "
"first.", dataset_path)
raise UsageError("Dataset does not exist. Please run configuration "
"first.") from excp
return dataset
| cbbs-md/data-pipeline | src/data_pipeline/utils.py | utils.py | py | 10,755 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "yaml.dump",
"line_number... |
28838376515 | from flask import Flask, jsonify, request
weather_data = {
'San Francisco': {'temperature': 14, 'weather': 'Cloudy'},
'New York': {'temperature': 20, 'weather': 'Sunny'},
'Los Angeles': {'temperature': 24, 'weather': 'Sunny'},
'Seattle': {'temperature': 10, 'weather': 'Rainy'},
'Austin': {'temperature': 32, 'weather': 'Hot'},
}
# crud app
def create_app():
app = Flask(__name__)
@app.route('/weather/<string:city>', methods=['GET'])
def get_weather(city):
if city in weather_data:
return jsonify({'city': city, **weather_data[city]})
else:
return jsonify({'error': 'City not found'}), 404
@app.route('/weather', methods=['POST'])
def add_weather():
data = request.get_json()
city = data.get('city')
temperature = data.get('temperature')
weather = data.get('weather')
if not city or not temperature or not weather:
return jsonify({'error': 'Invalid data'}), 400
weather_data[city] = {'temperature': temperature, 'weather': weather}
return jsonify({'city': city, 'temperature': temperature, 'weather': weather}), 201
@app.route('/weather/<string:city>', methods=['PUT'])
def update_weather(city):
if city in weather_data:
data = request.get_json()
temperature = data.get('temperature')
weather = data.get('weather')
if temperature:
weather_data[city]['temperature'] = temperature
if weather:
weather_data[city]['weather'] = weather
return jsonify({'city': city, **weather_data[city]})
else:
return jsonify({'error': 'City not found'}), 404
@app.route('/weather/<string:city>', methods=['DELETE'])
def delete_weather(city):
if city in weather_data:
del weather_data[city]
return '', 204
else:
return jsonify({'error': 'City not found'}), 404
return app
if __name__ == '__main__':
app = create_app()
app.run()
| 9802HEMENSAN/GPT-3.0 | sprint-2/pytest/pytest-whether-2/app.py | app.py | py | 2,081 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
... |
44755585332 | from django.contrib import admin
from django.urls import path
from .views import *
urlpatterns = [
path('',MaincourseListView.as_view(),name='main-course-list'),
path('explore/',courseListView.as_view(),name='course-list'),
path('create/',courseCreateView.as_view(),name='course-create'),
# path('',courseListView.as_view(),name='course-list'),
path('dashboard/',teacher_dashboard,name='course-dashboard'),
path('dashboard/list',dashboardCourseListView.as_view(),name='course-dashboard-list'),
path('dashboard/course/create',courseCreateView.as_view(),name='course-dashboard-create'),
path('dashboard/course/module/create',moduleCreateView.as_view(),name='course-dashboard-module-create'),
path('dashboard/course/module/content/create',contentCreateView.as_view(),name='course-dashboard-content-create'),
path('dashboard/course/<int:pk>',dashboardCourseDetailView.as_view(),name='course-dashboard-detail'),
path('dashboard/course/module/<int:pk>',dashboardModuleDetailView.as_view(),name='course-dashboard-module-detail'),
path('detail/<int:pk>/',courseDetailView.as_view(),name='course-detail'),
path('dashboard/course/update/<int:pk>',courseUpdate.as_view(),name='course-dashboard-update'),
path('dashboard/course/module/update/<int:pk>',modeleUpdate.as_view(),name='module-dashboard-update'),
path('dashboard/course/module/content/update/<int:pk>',contentUpdate.as_view(),name='content-dashboard-update'),
path('course/<int:pk>/',enrollCourse.as_view(),name='enroll-course'),
path('course/module/<int:pk>/',enrollModule.as_view(),name='enroll-module'),
path('course/module/content/<int:pk>/',content.as_view(),name='enroll-content'),
path('course/enroll/<int:pk>/',Enroll,name='enroll'),
path('course/module/content/completed/<int:pk>/',completeContent,name='completed-content'),
path('course/module/completed/<int:pk>/',completedModule,name='completed-module'),
path('course/completed/<int:pk>/',completedCourse,name='completed-course'),
path('course/review/',review.as_view(),name='review'),
path('course/discussoin/',discussionListView.as_view(),name='discuss'),
path('course/discussoin/create',discussionCreateView.as_view(),name='discuss-create'),
path('course/discussoin/<int:pk>',discussionDetailView.as_view(),name='discuss-det'),
path('course/likes/<int:pk>',like_main_post, name='di-like_main_post'),
path('course/comment/',comment, name='di-comments'),
path('search/',search,name="search"),
] | Floran-Github/CoderHifi-Code | backend/course/urls.py | urls.py | py | 2,548 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
24535880130 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 15:10:04 2021
@author: davidfordjour
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def Read_Two_Column_File(file_name):
"""Reading in the data for time and date."""
with open(file_name, 'r') as data:
x = [] # Creating empty lists for date and running times.
times = []
count = 0 # A line counter that starts at the top of the page
for line in data:
count+=1 # Moving to the next line
d = line.split(" ") #Indicating how columns are separated
times.append((d[0])) #Data in first column goes into times list
x.append(d[1]) #Data in second column goes into x-axis
return x, times
x, times = Read_Two_Column_File('times.txt') #Reading times.txt file
def String_To_Minutes(time):
"""Converting strings in times-list to minutes."""
if type(time) == bytes:
time = time.decode()
t = time.split(":") #Times are split with ":"
minutes = (float(t[0])) + (float(t[1])*0.05/3)
return minutes
y_axis = []
for y in times:
print(String_To_Minutes(y))
y_axis.append(String_To_Minutes(y))
print(x)
#print(y)
plt.plot(x,y_axis)
plt.xlabel("Date")
plt.ylabel("Mins")
plt.title("jogging times")
| davidfordjour/jog-times | jogtracker.py | jogtracker.py | py | 1,357 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "mat... |
31904766774 | from pyxavi.config import Config
from pyxavi.logger import Logger
from janitor.lib.system_info import SystemInfo
from janitor.lib.system_info_templater import SystemInfoTemplater
from janitor.lib.publisher import Publisher
from janitor.lib.mastodon_helper import MastodonHelper
from janitor.objects.queue_item import QueueItem
from janitor.objects.message import Message, MessageType
from flask import Flask
from flask_restful import Resource, Api, reqparse
app = Flask(__name__)
api = Api(app)
DEFAULT_MESSAGE_TYPE = str(MessageType.NONE)
class ListenSysInfo(Resource):
'''
Listener from remote SysInfo Report requests to log
'''
def __init__(self):
self._config = Config()
self._logger = Logger(self._config).getLogger()
self._sys_info = SystemInfo(self._config)
self._parser = reqparse.RequestParser()
self._parser.add_argument(
'sys_data', type=dict, required=True, help='No sys_data provided', location='json'
)
self._logger.info("Init SysInfo Listener")
super(ListenSysInfo, self).__init__()
def post(self):
"""
This is going to receive the POST request
"""
self._logger.info("Run SysInfo Listener app")
# Get the data
args = self._parser.parse_args()
if "sys_data" in args:
sys_data = args["sys_data"]
else:
return {
"error": "Expected dict under a \"sys_data\" variable was not present."
}, 400
# If there is no issue, just stop here.
if not self._sys_info.crossed_thresholds(sys_data, ["hostname"]):
self._logger.info("No issues found. Ending here.")
return 200
# Make it a message
message = SystemInfoTemplater(self._config).process_report(sys_data)
# Publish the queue
mastodon = MastodonHelper.get_instance(self._config)
publisher = Publisher(self._config, mastodon)
self._logger.info("Publishing one message")
publisher.publish_one(QueueItem(message))
self._logger.info("End.")
return 200
class ListenMessage(Resource):
'''
Listener from remote Message requests to log
'''
def __init__(self):
self._config = Config()
self._logger = Logger(self._config).getLogger()
self._parser = reqparse.RequestParser()
self._parser.add_argument(
'summary',
# type = str,
# required = True,
# help = 'No message provided',
location='form'
)
self._parser.add_argument(
'message',
# type = str,
# required = True,
# help = 'No message provided',
location='form'
)
self._parser.add_argument(
'hostname',
# type = str,
# required = True,
# help = 'No hostname provided',
location='form'
)
self._parser.add_argument(
'message_type',
# type = str,
# required = True,
# help = 'No message provided',
location='form'
)
self._logger.info("Init Message Listener Listener")
super(ListenMessage, self).__init__()
def post(self):
"""
This is going to receive the POST request
"""
self._logger.info("Run Message Listener app")
# Get the data
args = self._parser.parse_args()
if "summary" in args and args["summary"] is not None:
summary = args["summary"]
else:
summary = None
if "message_type" in args and args["message_type"] is not None:
message_type = args["message_type"]
else:
message_type = DEFAULT_MESSAGE_TYPE
if "message" in args and args["message"] is not None:
text = args["message"]
else:
return {
"error": "Expected string under a \"message\" variable was not present."
}, 400
if "hostname" in args and args["hostname"] is not None:
hostname = args["hostname"]
else:
return {
"error": "Expected string under a \"hostname\" variable was not present."
}, 400
# Build the message
icon = MessageType.icon_per_type(message_type)
if not summary:
message = Message(text=f"{icon} {hostname}:\n\n{text}")
else:
message = Message(summary=f"{icon} {hostname}:\n\n{summary}", text=f"{text}")
# Publish the queue
mastodon = MastodonHelper.get_instance(self._config)
publisher = Publisher(self._config, mastodon)
self._logger.info("Publishing one message")
publisher.publish_one(QueueItem(message))
self._logger.info("End.")
return 200
api.add_resource(ListenSysInfo, '/sysinfo')
api.add_resource(ListenMessage, '/message')
if __name__ == '__main__':
app.run(
host=Config().get("app.service.listen.host"),
port=Config().get("app.service.listen.port")
)
| XaviArnaus/janitor | listen.py | listen.py | py | 5,150 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "janitor.objects.message.MessageType.NONE",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_n... |
25290296184 | from dataclasses import dataclass
import cv2
import math
import numpy as np
import base64
import struct
from numpy import int16, int8, uint16, uint8, uint32, int32
def drawHor(frame, theta, phi):
widAngle = 65
heiAngle = widAngle*3.0/4.0
height, width, channels = frame.shape
pm = (width/2, height/2)
pm = (pm[0], pm[1]+theta/widAngle*height)
r = width * .25
p1 = (int(pm[0]-r*math.cos(phi/57.0)), int(pm[1]+r*math.sin(phi/57.0)))
p2 = (int(pm[0]+r*math.cos(phi/57.0)), int(pm[1]-r*math.sin(phi/57.0)))
cv2.line(frame,p1,p2,(0,255,0),3)
return frame
def drawRange(frame, range):
height, width, channels = frame.shape
L = 12
R = 28
B = 50
Um = height - B
min = 50
max = 1000
if (range > 1000):
range=1000
if (range < 50):
range=50
U = int((Um-B)*range/(max-min))
color = (0, int(range/(max-min)*255), 255-int(range/(max-min)*255))
frame = cv2.rectangle(frame, (L,B), (R, U), color, -1)
return frame
def drawState(frame,state):
height, width, channels = frame.shape
text = ''
if (state == 0x01):
text = 'MAN'
font = cv2.FONT_HERSHEY_SIMPLEX
org = (width-150, height-50)
fontScale = 1
color = (255, 0, 0)
thickness = 2
frame = cv2.putText(frame, text, org, font, fontScale, color, thickness, cv2.LINE_AA)
return frame
@dataclass
class tlmAHRS:
time: uint32 = 0
phi: int16 = 0
theta: int16 = 0
psi: int16 = 0
P: int16 = 0
Q: int16 = 0
R: int16 = 0
accX: int16 = 0
accY: int16 = 0
accZ: int16 = 0
smiec: int16 = 0 #?????
@dataclass
class dataAHRS:
time: float = 0.0
phi: float = 0.0
theta: float = 0.0
psi: float = 0.0
P: float = 0.0
Q: float = 0.0
R: float = 0.0
accX: float = 0.0
accY: float = 0.0
accZ: float = 0.0
def encodeTlm(line):
bytes = base64.b64decode(line)
tlm = struct.unpack('@Lhhhhhhhhhh',bytes)
tlm = tlmAHRS(tlm[0], tlm[1], tlm[2], tlm[3], tlm[4], tlm[5], tlm[6], tlm[7], tlm[8], tlm[9], tlm[10])
return tlm | ksklorz/ITproj | src/cam/hud.py | hud.py | py | 2,095 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "math.cos",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 17,
"usa... |
14005775602 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 12 15:42:52 2018
@author: liuyang
"""
from datetime import datetime
import numpy as np
import pandas as pd
import MYSQLPD as sql
import math
def DatetoDigit(date):
digitdate=date.year*10000+date.month*100+date.day
return digitdate
#读取交易日期列表
def GetTradeDates(BegT,EndT,Freq='D'):
TDs=pd.read_csv(r"D:\Sam\PYTHON\Tradedates.csv",encoding='utf_8_sig',parse_dates=[0])
BTTD=TDs[(TDs['TRADEDATE']>=BegT)&(TDs['TRADEDATE']<=EndT)]
if Freq=='W':
TDDF=BTTD[['TRADEDATE','IsWeekEnd']]
TDList=TDDF[TDDF['IsWeekEnd']==1].TRADEDATE.tolist()
return TDList
elif Freq=='D':
TDList=BTTD[['TRADEDATE']].TRADEDATE.tolist()
return TDList
"""得到每日收益率数据的函数"""
def GetDayReturn(date):
digitdate=DatetoDigit(date)
dbname1='tyb_stock'
#SQLstrHead1='select * from stockdaily_basic where TRADEDATE='
SQLstrHead1='select TRADEDATE,STOCKID,BFQCLOSE,HFQCLOSE,DAYRETURN,TRADABLE from stockdaily_basic where TRADEDATE='
SQLstr1=SQLstrHead1+str(digitdate)
#SQLstr2=SQLstrHead2+str(digitPD)
DF=sql.toDF(SQLstr1,dbname1)
DF=DF.rename(columns={'DAYRETURN':'RETURN'})
#DF=DF[['TRADEDATE','STOCKID','BFQCLOSE','DAYRETURN','TRADABLE']]
#DF=DF.loc[(DF['SWLV1']!=0)&(DF['TRADABLE']==1)]
print(date)
return DF
"""得到指定间隔区间收益率的函数"""
def GetIntervalReturn(d1,d2):
DF1=GetDayReturn(d1)
DF2=GetDayReturn(d2)
DFL=DF1[['STOCKID','HFQCLOSE']]
DFL=DFL.rename(columns={'HFQCLOSE':'LASTCLOSE'})
DFM=pd.merge(DF2, DFL, how='left',on='STOCKID')
DFM['RETURN']=(DFM['HFQCLOSE']-DFM['LASTCLOSE'])*100/DFM['HFQCLOSE']
DFR=DFM[['TRADEDATE','STOCKID','HFQCLOSE','LASTCLOSE','TRADABLE','RETURN']]
print(d2)
return DFR
"""得到某一日的因子数据"""
def GetFactor(date):
digitdate=DatetoDigit(date)
dbname1='tyb_stock'
#SQLstrHead1='select * from stockdaily_basic where TRADEDATE='
SQLstrHead1='select TRADEDATE,STOCKID,MARKETVALUE,SWLV1,SWLV2,TRADABLE,BP from stockdaily_factor1 where TRADEDATE='
SQLstr1=SQLstrHead1+str(digitdate)
#SQLstr2=SQLstrHead2+str(digitPD)
DF=sql.toDF(SQLstr1,dbname1)
print(date)
#DF=DF.rename(columns={'DAYRETURN':'RETURN'})
#DF=DF[['TRADEDATE','STOCKID','BFQCLOSE','DAYRETURN','TRADABLE']]
#DF=DF.loc[(DF['SWLV1']!=0)&(DF['TRADABLE']==1)]
return DF
starttime = datetime.now()
#设定回测起止日期
BegT=datetime(2007,1,1)
EndT=datetime(2017,12,31)
TDList=GetTradeDates(BegT,EndT,Freq='D')
TDNum=len(TDList)
AllData=pd.DataFrame()
#Data=[GetDayReturn(TDList[i]) for i in range(0,TDNum)] #每日收益率数据
#Data=[GetIntervalReturn(TDList[i-1],TDList[i]) for i in range(1,TDNum)] #每日收益率数据
Data=[GetFactor(TDList[i]) for i in range(0,TDNum)]
AllData=pd.concat(Data,ignore_index=True)
"""
for i in range(0,TDNum):
d0=TDList[i]
ASDF=GetDayReturn(d0)
AllData=pd.concat([AllData,ASDF])
print (d0)
"""
"""
for i in range(1,TDNum):
d0=TDList[i-1]
d1=TDList[i]
ASDF=GetIntervalReturn(d0,d1)
AllData=pd.concat([AllData,ASDF])
print (d1)
"""
stoptime = datetime.now()
print(stoptime-starttime) | SamLiuYang/MFM | SFData.py | SFData.py | py | 3,509 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "MYSQLPD.toDF",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "MYSQLPD.toDF",
"line_nu... |
41287150445 | from django.db import models, transaction
from django.dispatch import receiver
from django.db.models.signals import post_delete
from api.espo_api_client import EspoClientMixin
from django.conf import settings
from petuni_main.celery import app
CRM_DO_NOTHING = 0
CRM_SOFT_DELETE = 1
CRM_TRUE_DELETE = 2
class CRMSignalMixin(EspoClientMixin, models.Model):
crm_id = models.CharField(max_length=18, null=True, blank=True, unique=True)
crm_api_path = None
serializer_class = None
request_type = None
queryset = None
sync_delete = CRM_DO_NOTHING # 0 не делаем ничего, 1 - шлем PATCH, 2 - шлем DELETE
def get_crm_api_action(self):
"""
Возвращает action для запроса EspoApi
"""
assert self.crm_api_path is not None, f'crm_api_path for {self.__class__.__name__} is undefined'
if self.crm_id:
return f'{self.crm_api_path}/{self.crm_id}'
return self.crm_api_path
@classmethod
def get_queryset(cls):
if cls.queryset is None:
return cls.objects.all()
return cls.queryset
@classmethod
def get_crm_serializer_class(cls):
"""
Возвращает сериализатор для запроса EspoApi
"""
assert cls.serializer_class is not None, f'serializer for {cls.__name__} is undefined'
return cls.serializer_class
def get_request_type(self):
"""
Возвращает method для запроса EspoApi
"""
if self.crm_id:
return 'PATCH'
return 'POST'
def get_sync_delete(self):
"""
Если sync_delete == True, будет отправлена таска на удаление в crm,
иначе обновление (для неудаляемых записей с safe-delete)
"""
assert self.sync_delete is not None, f'sync_delete for {self.__class__.__name__} is undefined'
return self.sync_delete
def crm_sync_delete(self):
"""
Синхронизация удаления
"""
method = 'DELETE'
data = {}
action = self.get_crm_api_action()
response = self.client.request(method, action, data)
def send_espo_request(self, *args, **kwargs):
"""
Синхронизация сохранения модели с crm
"""
queryset = self.get_queryset()
queryset = queryset.select_for_update()
with transaction.atomic():
instance = queryset.get(pk=self.pk) # TODO теперь мы берем инстанс в самой таске.
# можно переделать через селф
serializer_ = instance.get_crm_serializer_class()
data = serializer_(instance).data
data['skipDuplicateCheck'] = True
method = instance.get_request_type()
action = instance.get_crm_api_action()
response = self.client.request(method, action, data)
if not instance.crm_id:
instance.crm_id = response.get('id')
instance.save(*args, dont_sync=True, **kwargs)
def crm_request_related_link(self, category_id, link):
"""
Для создания связи между сущностями в срм. link - имя сущности,
category_id - айди записи этой сущности.
"""
self.refresh_from_db()
assert self.crm_id is not None, "impossible to relate without crm_id"
params = {'id': category_id}
action = self.get_crm_api_action()
link_action = f'{action}/{link}'
self.client.request('POST', link_action, params=params)
def crm_push(self, *args, **kwargs):
"""
Вызов синхронизации с crm
"""
self.send_espo_request(*args, **kwargs)
def save(self, *args, **kwargs):
dont_sync = kwargs.pop('dont_sync', False)
with transaction.atomic():
super().save(*args, **kwargs)
if (dont_sync != True and settings.CRM_ENABLED and
self.get_queryset().filter(pk=self.pk).exists()):
s = app.signature(
'crm.crm_sync',
kwargs={
'instance_id': self.pk,
'class_name': self.__class__.__name__
}
)
transaction.on_commit(lambda: s.apply_async())
class Meta:
abstract = True
@receiver(post_delete)
def crm_sync_delete(sender, instance, **kwargs):
if isinstance(instance, CRMSignalMixin) and instance.sync_delete == CRM_TRUE_DELETE:
with transaction.atomic():
s = app.signature(
'crm.crm_sync_delete',
kwargs={
'instance': instance
}
)
transaction.on_commit(lambda:s.apply_async()) | sdmitrievlolx/code_samples | crm/models.py | models.py | py | 5,118 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "api.espo_api_client.EspoClientMixin",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 14,
"usage_type": "name"
},
{
"... |
26308582150 | from flask.views import MethodView
from flask_jwt_extended import jwt_required
from flask_smorest import Blueprint, abort
from db import db
from Models import NoteModel, CategoryModel
from schemas import NoteSchema, NoteQuerySchema, CategorySchema
from sqlalchemy.exc import IntegrityError
blp = Blueprint("note", __name__, description="Operations on note")
@blp.route("/note")
class NoteList(MethodView):
@jwt_required()
@blp.arguments(NoteQuerySchema, location="query", as_kwargs=True)
@blp.response(200, NoteSchema(many=True))
def get(self, kwargs):
user_id = kwargs.get("user_id")
category_id = kwargs.get("category_id")
if user_id and category_id:
query = NoteModel.query.filter_by(user_id=user_id, category_id=category_id)
return query
if user_id:
query = NoteModel.query.filter_by(user_id=user_id)
return query
if category_id:
query = NoteModel.query.filter_by(category_id=category_id)
return query
return NoteModel.query.all()
@jwt_required()
@blp.arguments(NoteSchema)
@blp.response(200, NoteSchema)
def post(self, request_data):
note = NoteModel(**request_data)
category_id = request_data.get("category_id")
category_owner_id = CategoryModel.query.with_entities(CategoryModel.owner_id).filter_by(id=category_id).scalar()
if category_owner_id == request_data["user_id"] or category_owner_id is None:
try:
db.session.add(note)
db.session.commit()
except IntegrityError:
abort(400, message="Error when creating note")
return note
abort(403, message="User has no access to this category") | NATASHKAS/backlab1 | resources/NOTES.py | NOTES.py | py | 1,775 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask_smorest.Blueprint",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.views.MethodView",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "Models.NoteModel.query.filter_by",
"line_number": 22,
"usage_type": "call"
},
{
"a... |
24049590178 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__authors__ = 'Bruno Adelé <bruno@adele.im>'
__copyright__ = 'Copyright (C) 2013 Bruno Adelé'
__description__ = """A metar domolib"""
__license__ = 'GPL'
__version__ = '0.0.1'
# Require metar
# pip install git+https://github.com/tomp/python-metar.git
import re
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from metar import Metar
from domolib.commons.decorator import command
from domolib.commons import cache
class dlmetar():
def getresults(self, station):
results = {}
# Get Metar information
url = 'http://weather.noaa.gov/pub/data/observations/metar/stations'
r = None
r = urlopen('%s/%s.TXT' % (url, station))
if r.getcode() is None or r.getcode() != 200:
return None
# Extract only Metar informations
m = re.search(
'%s .*' % station,
r.read().decode('utf-8')
)
if not m:
return
# Decode metar informations
code = m.group(0)
decode = Metar.Metar(code)
# Get temperature
if decode.temp:
results['temp'] = decode.temp.value()
# Get dewpt temperature
if decode.dewpt:
results['dewpt'] = decode.dewpt.value()
# Get pressure
if decode.press:
results['pressure'] = decode.press.value()
# Visibility
if decode.vis:
results['visibility'] = int(decode.vis.value())
# Get wind speed
if decode.wind_speed:
results['wind_speed'] = decode.wind_speed.value() * 1.852
# Calculate the relative humidity
if decode.temp and decode.dewpt:
temp = decode.temp.value()
dewpt = decode.dewpt.value()
results['humidity'] = round(
100 * ((112 - 0.1 * temp + dewpt) / (112 + 0.9 * temp)) ** 8,
2
)
# Calculate the wind chill or heat index
if decode.temp and decode.wind_speed:
speed = decode.wind_speed.value() * 1.852
temp = decode.temp.value()
results['wind_chill'] = round(
13.12 + 0.6215 * temp +
(0.3965 * temp - 11.37) * speed ** 0.16,
2
)
return results
@command
def get_metarinfo(oaci_station):
"""
Get a weather from metar report
:param oaci_station:
:return:
"""
# Get result from cache
mycache = cache.cache(cachefile='dlmetar.get_metarinfo')
result = mycache.getcachedresults()
if result:
return result
# Compute the result and store in the cache file
obj = dlmetar()
result = obj.getresults(station=oaci_station)
mycache.setcacheresults(result)
return result | badele/domolib | domolib/plugins/weather/dlmetar/__init__.py | __init__.py | py | 2,849 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib.urlopen",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "metar.Metar.Metar",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "metar.Metar",
"line_nu... |
12168411543 | #imports csv functions
import csv
import datetime
from os import read
#Dictionary for storing products
prod_dict = {
}
#Dictionary for storing requests
req_dict = {
}
#Dictionary for the receipt
receipt_dict = {
}
number_of_items = 0
subtotal = 0
current_date_and_time = datetime.datetime.now()
# Format the current date and time to include only
# the day of the week, the hour, and the minute.
formatted_dt = current_date_and_time.strftime("%A %I:%M %p")
#Open products.csv
with open("/Users/elijah/BYUI/Programming with functinos/Week3/products.csv") as products:
reader = csv.reader(products)
#Skip first row
next(reader)
#Populate Dictionary
for row in reader:
#Elements
#0 - Product Number
prod_num = row[0]
#1 - Product Name
prod_name = row[1]
#2 - Retail Price
prod_cost = row[2]
#add element to dict
prod_dict[prod_num] = [prod_name,prod_cost]
#Open requests.csv
with open("/Users/elijah/BYUI/Programming with functinos/Week3/request.csv") as request:
reader = csv.reader(request)
next(reader)
for item in reader:
# 0 - Product Number
prod_num = item[0]
# 1 - Quantity
prod_quant = item[1]
#add item to dictionary
req_dict[prod_num] = prod_quant
# Display Products
#print(f"THIS IS THE PRODUCTS DICT : \n{prod_dict}")
# Display Requests
#print(f"THIS IS THE REQUEST DICT : \n{req_dict}")
print("\nRequested Items : \n")
for req_item in req_dict:
#receipt_dict[req_item] = [prod_dict[req_item]]
name = prod_dict[req_item][0]
cost = prod_dict[req_item][1]
count = req_dict[req_item][0]
subtotal += (float(cost) * int(count))
print(f"{name:.15} : {count:.2} @ ${cost:.5} \n")
tax = subtotal * 0.06
total = subtotal + tax
print("------------------------------\n")
print(f"Subtotal : {subtotal:.2f}")
print(f"Tax : {tax:.2f}")
print(f"Total : {total:.2f}")
print("\n------------------------------\n")
print(f"Date and time of checkout : {formatted_dt}\n\n")
| Elijah3502/CSE110 | Programming with functinos/Week3/03Prove.py | 03Prove.py | py | 2,087 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "csv.reader",
... |
4565998185 |
import json
from math import e
import stat
from typing import Dict
from data.google.google_email_repository import GooleCalendarEventRepository
from services.google_auth_service import GoogleAuthService
from framework.logger import get_logger
from googleapiclient.discovery import build
from google.oauth2.credentials import Credentials
from framework.serialization import Serializable
from framework.crypto.hashing import sha256
from datetime import datetime, timedelta
from dateutil import parser
logger = get_logger(__name__)
GOOGLE_CALENDAR_SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
class CalendarEvent(Serializable):
def __init__(
self,
id: str,
status: str,
link: str,
created_date,
updated_date,
summary: str,
description: str,
event_type: str,
location: str,
creator: str,
organizer: str,
start_date: Dict,
end_date: Dict,
visibility: str,
attendees: list,
reminders: list,
extended_properties,
recurring_event_id: str
):
self.id = id
self.status = status
self.link = link
self.created_date = created_date
self.updated_date = updated_date
self.summary = summary
self.description = description
self.event_type = event_type
self.location = location
self.creator = creator
self.organizer = organizer
self.start_date = start_date
self.end_date = end_date
self.visibility = visibility
self.attendees = attendees
self.reminders = reminders
self.extended_properties = extended_properties
self.recurring_event_id = recurring_event_id
def generate_hash_key(
self
):
data = json.dumps(self.to_dict(), default=str)
return sha256(data)
@staticmethod
def from_entity(data: Dict):
return CalendarEvent(
id=data.get('id'),
status=data.get('status'),
link=data.get('link'),
created_date=data.get('created_date'),
updated_date=data.get('updated_date'),
summary=data.get('summary'),
description=data.get('description'),
event_type=data.get('event_type'),
location=data.get('location'),
creator=data.get('creator'),
organizer=data.get('organizer'),
start_date=data.get('start_date'),
end_date=data.get('end_date'),
visibility=data.get('visibility'),
attendees=data.get('attendees'),
reminders=data.get('reminders'),
extended_properties=data.get('extended_properties'),
recurring_event_id=data.get('recurring_event_id'))
@staticmethod
def from_event(data: Dict):
start_date = {
'datetime': data.get('start', dict()).get('dateTime'),
'timezone': data.get('start', dict()).get('timeZone')
}
end_date = {
'datetime': data.get('end', dict()).get('dateTime'),
'timezone': data.get('end', dict()).get('timeZone')
}
return CalendarEvent(
id=data.get('id'),
status=data.get('status'),
link=data.get('htmlLink'),
created_date=data.get('created'),
updated_date=data.get('updated'),
summary=data.get('summary'),
description=data.get('description'),
event_type=data.get('eventType'),
location=data.get('location'),
creator=data.get('creator', dict()).get('email'),
organizer=data.get('organizer', dict()).get('email'),
start_date=start_date,
end_date=end_date,
visibility=data.get('visibility'),
attendees=data.get('attendees'),
reminders=data.get('reminders'),
extended_properties=data.get('extendedProperties'),
recurring_event_id=data.get('recurringEventId'))
def ensure_datetime(
value: datetime | str
) -> datetime:
if isinstance(value, datetime):
return value
return parser.parse(value)
class CalendarService:
def __init__(
self,
auth_service: GoogleAuthService,
repository: GooleCalendarEventRepository
):
self.__auth_service = auth_service
self.__repository = repository
async def get_calendar_client(
self
) -> Credentials:
logger.info(f'Fetching calendar client')
auth = await self.__auth_service.get_auth_client(
scopes=GOOGLE_CALENDAR_SCOPES)
return build("calendar", "v3", credentials=auth)
async def get_calendar_events(
self,
start_date: str | datetime,
end_date: str | datetime
):
logger.info(
f'Fetching calendar events from {start_date} to {end_date}')
start_date = ensure_datetime(start_date)
end_date = ensure_datetime(end_date)
service = await self.get_calendar_client()
events_result = service.events().list(
calendarId='primary',
timeMin=f'{start_date.isoformat()}Z',
timeMax=f'{end_date.isoformat()}Z',
singleEvents=True,
orderBy='startTime'
).execute()
events = events_result.get('items', [])
events = [CalendarEvent.from_event(event) for event in events]
return events
async def sync_calendar_events(
self
):
logger.info(f'Syncing calendar events')
end_date = datetime.now().strftime('%Y-%m-%d')
start_date = (datetime.now() - timedelta(days=90)).strftime('%Y-%m-%d')
| danleonard-nj/kube-tools-api | services/kube-tools/services/calendar_service.py | calendar_service.py | py | 5,734 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "framework.logger.get_logger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "framework.serialization.Serializable",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 36,
"usage_type": "name"
},
{
"api_... |
70735133154 | import os
import pandas as pd
from joblib import Parallel, delayed
from util import get_interfaces_path, iter_cdd
NUM_WORKERS = 20
def get_counts(sfam_id):
path = os.path.join(get_interfaces_path(dataset_name), "by_superfamily",
str(int(sfam_id)), "{}_bsa.h5".format(int(sfam_id)))
store = pd.HDFStore(path)
obs = store.get("/observed")
obs_sizes = obs.groupby("ppi_type").size().reset_index(name='observed').T
del obs
inf = store.get("/inferred")
inf_sizes = inf.groupby("ppi_type").size().reset_index(name='inferred').T
sizes = pd.concat([obs_size, inf_sizes], axis=1)
changed = inf.groupby(["ppi_type", "ppi_type_obs"]).size().reset_index(name='count')
#changed = changed[changed["ppi_type"]!=changed["ppi_type_obs"]]
changed = changed.apply(lambda r: pd.Series({
"{}->{}".format(r.ppi_type_obs, r.ppi_type):r.count}, index="inferred"), axis=1)
del inf
store.close()
del store
import pdb; pdb.set_trace()
sizes = pd.concat([sizes, changed], axis=1)
def plot_ppi_types(superfams=None):
if superfams is not None:
return
sizes = Parallel(n_jobs=NUM_WORKERS)(delayed(get_counts)(sfam_id) \
for _, sfam_id in iter_cdd(use_id=True, group_superfam=True))
| bouralab/Prop3D | Prop3D/visualize/plot_ppi_types.py | plot_ppi_types.py | py | 1,264 | python | en | code | 16 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "util.get_interfaces_path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.HDFStore",
... |
9204198406 | from django.conf.urls.defaults import *
from django.contrib.auth.models import User
from django.views.generic.simple import direct_to_template
from board.feeds import LatestPosts
from board.rpc import rpc_post, rpc_lookup, rpc_preview, rpc_ban
from board.views import *
feeds = {'latest': LatestPosts}
js_info_dict = {
'packages': ('board',),
}
urlpatterns = patterns('',
(r'^$', thread_index, {}, 'board_index'),
(r'search-results/$', search),
(r'feedback/sent/$', direct_to_template, {'template':
'board/feedback_sent.html'}),
(r'^private/$', private_index, {}, 'board_private_index'),
(r'^categories/$', category_index, {}, 'board_category_index'),
(r'^favorites/$', favorite_index, {}, 'board_favorite_index'),
(r'^edit_post/(?P<original>\d+)/$', edit_post, {}, 'board_edit_post'),
(r'^threads/$', thread_index, {}, 'board_thread_index'),
(r'^threads/id/(?P<thread_id>\d+)/$', thread, {}, 'board_thread'),
(r'^threads/category/(?P<cat_id>\d+)/$', category_thread_index, {}, 'board_category_thread_index'),
(r'^threads/category/(?P<cat_id>\d+)/newtopic/$', new_thread, {}, 'board_new_thread'),
(r'^threads/post/(?P<post_id>\d+)/$', locate_post, {}, 'board_locate_post'),
(r'^settings/$', edit_settings, {}, 'board_edit_settings'),
# Groups
(r'^groups/(?P<group_id>\d+)/manage/$', manage_group, {}, 'board_manage_group'),
(r'^groups/(?P<group_id>\d+)/invite/$', invite_user_to_group, {}, 'board_invite_user_to_group'),
(r'^groups/(?P<group_id>\d+)/remuser/$', remove_user_from_group, {}, 'board_remove_user_from_group'),
(r'^groups/(?P<group_id>\d+)/grant_admin/$', grant_group_admin_rights, {}, 'board_grant_group_admin_rights'),
(r'^del_thread/(?P<the_id>\d+)/$', 'board.views.del_thread'),
# Invitations
(r'invitations/(?P<invitation_id>\d+)/discard/$', discard_invitation, {}, 'board_discard_invitation'),
(r'invitations/(?P<invitation_id>\d+)/answer/$', answer_invitation, {}, 'board_answer_invitation'),
# RPC
(r'^rpc/action/$', rpc, {}, 'board_rpc_action'),
(r'^rpc/postrev/$', rpc_post, {}, 'board_rpc_postrev'),
(r'^rpc/preview/$', rpc_preview, {}, 'board_rpc_preview'),
(r'^rpc/user_lookup/$', rpc_lookup,
{
'queryset':User.objects.all(),
'field':'username',
}, 'board_rpc_user_lookup'
),
(r'^rpc/ban/$', rpc_ban),
# feeds
(r'^feeds/(?P<url>.*)/$', 'django.contrib.syndication.views.feed', {'feed_dict': feeds}, 'board_feeds'),
# javascript translations
(r'^jsi18n/$', 'django.views.i18n.javascript_catalog', js_info_dict, 'board_js_i18n'),
)
| bawaaaaah/django-torrent-tracker | board/urls.py | urls.py | py | 2,676 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "board.feeds.LatestPosts",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.views.generic.simple.direct_to_template",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "board.rpc.rpc_post",
"line_number": 47,
"usage_type": "name"
},
... |
14314103404 | import numpy as np
import pandas as pd
import tensorflow as tf
import json
from keras.models import Model, Sequential
from keras.layers import Input, Activation, Dense
from keras.optimizers import SGD
from keras.utils.np_utils import to_categorical
from flask import Flask, render_template, jsonify
app = Flask(__name__, template_folder ='templates',static_url_path='/static')
# Pandas read CSV
sf_train = pd.read_csv('p5_training_data.csv')
# korelasi matriks ke target
corr_matrix = sf_train.corr()
print(corr_matrix['type'])
# buang kolom
sf_train.drop(sf_train.columns[[5, 12, 14, 21, 22, 23]], axis=1, inplace=True)
print(sf_train.head())
# baca CSV
sf_train = pd.read_csv('p5_training_data.csv')
# Korelasi matriks target
corr_matrix = sf_train.corr()
print(corr_matrix['type'])
# buang kolom
sf_train.drop(sf_train.columns[[5, 12, 14, 21, 22, 23]], axis=1, inplace=True)
print(sf_train.head())
# panda baca data Val
sf_val = pd.read_csv('p5_val_data.csv')
# Buang kolom
sf_val.drop(sf_val.columns[[5, 12, 14, 21, 22, 23]], axis=1, inplace=True)
# Get Pandas array value (Convert ke array NumPy)
train_data = sf_train.values
val_data = sf_val.values
# Use columns 2-akhir sbg Input
train_x = train_data[:,2:]
val_x = val_data[:,2:]
# Use columns 1 sebagai Output/Target (pake One-Hot Encoding)
train_y = to_categorical( train_data[:,1] )
val_y = to_categorical( val_data[:,1] )
# bikin jaringan
inputs = Input(shape=(16,))
h_layer = Dense(10, activation='sigmoid')(inputs)
# aktivasi softmax baut klasifikasi multiklas
outputs = Dense(3, activation='softmax')(h_layer)
model = Model(inputs=inputs, outputs=outputs)
# Optimizer / Update Rule
sgd = SGD(lr=0.001)
# Compile model dengan Cross Entropy Loss
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
# Training model dan pake validation data
model.fit(train_x, train_y, batch_size=10, epochs=5000, verbose=1, validation_data=(val_x, val_y))
model.save_weights('weights.h5')
# Predict semual Validation data
predict = model.predict(val_x)
# Visualisasi Prediksi
#df = pd.DataFrame(predict)
#df.columns = [ 'Strength', 'Agility', 'Intelligent' ]
#df.index = val_data[:,0]
#web server pake flask buat render template
@app.route('/index')
@app.route('/')
def index():
return render_template("index.html")
@app.route('/dota2', methods=['GET'])
def dota():
'''
df = pd.DataFrame(predict)
df.columns = [ 'Strength', 'Agility', 'Intelligent' ]
df.index = val_data[:,0]
data = df.to_dict(orient='index')
return jsonify({'data':data})
'''
df = pd.DataFrame(predict)
df.columns = [ 'Strength', 'Agility', 'Intelligent' ]
df.index = val_data[:,0]
temp = df.to_dict('records')
columnNames = df.columns.values
rowwNames = df.index
return render_template('record.html', records=temp, colnames=columnNames,len=len(rowwNames), rownames=rowwNames)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| qualityassurancetools/JSTbackprop | dota2.py | dota2.py | py | 2,924 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"li... |
22743479426 | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('monitoramento.urls')),
path('users/', include('users.urls')),
path('clientes/', include('clientes.urls')),
path('veiculos/', include('veiculos.urls')),
path('', include('grupoveiculos.urls')),
path('', include('escritorio.urls')),
path('', include('tecnico.urls')),
path('cadastro/', include('cadastros.urls')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| joaaovitorrodrigues/trackerreine | TrackerReine/urls.py | urls.py | py | 724 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "dja... |
16953372863 | from classifiers.decision_tree import DecisionTree
from collections import Counter
from utils.utility import pick_result
from random import randrange
import numpy
__author__ = 'Simon & Oskar'
class RandomForest:
def __init__(self, max_depth = None, min_samples_leaf = 1, n_estimators = 10, sample_size = 200, max_features = None):
self.criterion = "gini"
self.max_features = max_features
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.laplace = 0
self.n_estimators = n_estimators
self.bagging = 0
self.sample_size = sample_size
self.n_classes = None
self.decision_trees = list()
def fit(self, X, y):
if self.n_classes is None:
self.n_classes = numpy.unique(y)
if self.sample_size is None:
self.sample_size = X.shape[0] / self.n_estimators
rand_features = numpy.zeros(shape = (self.n_estimators, self.sample_size, X.shape[1]),
dtype = numpy.float)
rand_classes = numpy.zeros((self.n_estimators, self.sample_size))
for i in range(self.n_estimators):
temp = numpy.zeros(shape = (self.sample_size, X.shape[1]),
dtype = numpy.float)
temp_class = numpy.zeros(shape = self.sample_size,
dtype = numpy.float)
for j in range(self.sample_size):
r = randrange(0, X.shape[0])
temp[j] = X[r]
temp_class[j] = y[r]
rand_features[i] = temp
rand_classes[i] = temp_class
for i in range(self.n_estimators):
decision_tree = DecisionTree(max_features = self.max_features,
max_depth = self.max_depth,
min_samples_leaf = self.min_samples_leaf,
laplace = self.laplace)
decision_tree.fit(X = rand_features[i],
y = rand_classes[i])
self.decision_trees.append(decision_tree)
def predict(self, X):
final_result = numpy.zeros((X.shape[0]))
for row in range(X.shape[0]):
result = numpy.zeros((self.n_estimators, 1))
for i in range(self.n_estimators):
result[i] = self.decision_trees[i].predict(numpy.array([X[row]]))
result_flattened = result.ravel()
c = Counter(result_flattened).most_common(numpy.unique(result_flattened).size)
final_result[row] = pick_result(c)
return final_result
def predict_proba(self, X):
final_result = numpy.zeros((X.shape[0], len(self.n_classes)), numpy.float)
for row in range(X.shape[0]):
result = numpy.zeros((self.n_estimators, len(self.n_classes)), numpy.float)
for i in range(self.n_estimators):
result[i] = self.decision_trees[i].predict_proba(numpy.array([X[row]]))
result.astype(numpy.float)
for r in result:
for i in range(final_result.shape[1]):
final_result[row, i] += r[i].astype(numpy.float) / float(self.n_estimators)
return final_result
| lazi3b0y/RandomActsOfPizza | classifiers/random_forest.py | random_forest.py | py | 3,293 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.unique",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_num... |
38948168626 | from copy import copy
import itertools
from typing import List, Union
import pytest # type: ignore[import]
from testlib import on_time
from cmk.base.plugins.agent_based import job
from cmk.base.plugins.agent_based.agent_based_api.v1 import (
clusterize,
Result,
State as state,
Metric,
type_defs,
)
SECTION_1: job.Section = {
'SHREK': {
'running': False,
'start_time': 1547301201,
'exit_code': 0,
'metrics': {
'real_time': 120.0,
'user_time': 1.0,
'system_time': 0.0,
'reads': 0,
'writes': 0,
'max_res_bytes': 1234000,
'avg_mem_bytes': 1000,
'invol_context_switches': 12,
'vol_context_switches': 23,
},
},
'SNOWWHITE': {
'running': False,
'start_time': 1557301201,
'exit_code': 1,
'running_start_time': [
1557301261, 1557301321, 1557301381, 1557301441, 1537301501, 1557301561
],
'metrics': {
'real_time': 360.0,
'user_time': 0.0,
'system_time': 0.0,
'reads': 0,
'writes': 0,
'max_res_bytes': 2224000,
'avg_mem_bytes': 0,
'invol_context_switches': 1,
'vol_context_switches': 2,
},
},
}
SECTION_2: job.Section = {
'backup.sh': {
'running': False,
'start_time': 1415204091,
'exit_code': 0,
'running_start_time': [1415205713],
'metrics': {
'real_time': 281.65,
'user_time': 277.7,
'system_time': 32.12,
'reads': 0,
'writes': 251792,
'max_res_bytes': 130304000,
'avg_mem_bytes': 0,
'invol_context_switches': 16806,
'vol_context_switches': 32779,
},
},
'cleanup_remote_logs': {
'running': False,
'start_time': 1415153430,
'exit_code': 0,
'metrics': {
'real_time': 9.9,
'user_time': 8.85,
'system_time': 0.97,
'reads': 96,
'writes': 42016,
'max_res_bytes': 11456000,
'avg_mem_bytes': 0,
'invol_context_switches': 15,
'vol_context_switches': 274,
},
},
}
TIME = 1594300620.0, "CET"
def _modify_start_time(
j: job.Job,
start_time: Union[float, List[int]],
) -> job.Job:
new_job: job.Job = copy(j)
if isinstance(start_time, list):
new_job['running_start_time'] = start_time
else:
new_job['start_time'] = start_time
return new_job
@pytest.mark.parametrize("timestr,expected_result", [
('0:00.00', 0.),
('1:02.00', 62.),
('35:30:2.12', 35 * 60**2 + 30 * 60 + 2.12),
])
def test_job_parse_real_time(timestr, expected_result):
assert job._job_parse_real_time(timestr) == expected_result
@pytest.mark.parametrize("string_table,expected_parsed_data", [
(
[
['==>', 'SHREK', '<=='],
['start_time', '1547301201'],
['exit_code', '0'],
['real_time', '2:00.00'],
['user_time', '1.00'],
['system_time', '0.00'],
['reads', '0'],
['writes', '0'],
['max_res_kbytes', '1234'],
['avg_mem_kbytes', '1'],
['invol_context_switches', '12'],
['vol_context_switches', '23'],
['==>', 'SNOWWHITE', '<=='],
['start_time', '1557301201'],
['exit_code', '1'],
['real_time', '6:00.00'],
['user_time', '0.00'],
['system_time', '0.00'],
['reads', '0'],
['writes', '0'],
['max_res_kbytes', '2224'],
['avg_mem_kbytes', '0'],
['invol_context_switches', '1'],
['vol_context_switches', '2'],
['==>', 'SNOWWHITE.27997running', '<=='],
['start_time', '1557301261'],
['==>', 'SNOWWHITE.28912running', '<=='],
['start_time', '1557301321'],
['==>', 'SNOWWHITE.29381running', '<=='],
['start_time', '1557301381'],
['==>', 'SNOWWHITE.30094running', '<=='],
['start_time', '1557301441'],
['==>', 'SNOWWHITE.30747running', '<=='],
['start_time', '1537301501'],
['==>', 'SNOWWHITE.31440running', '<=='],
['start_time', '1557301561'],
],
SECTION_1,
),
(
[
['==>', 'backup.sh', '<=='],
['start_time', '1415204091'],
['exit_code', '0'],
['real_time', '4:41.65'],
['user_time', '277.70'],
['system_time', '32.12'],
['reads', '0'],
['writes', '251792'],
['max_res_kbytes', '130304'],
['avg_mem_kbytes', '0'],
['invol_context_switches', '16806'],
['vol_context_switches', '32779'],
['==>', 'backup.sh.running', '<=='],
['start_time', '1415205713'],
['==>', 'cleanup_remote_logs', '<=='],
['start_time', '1415153430'],
['exit_code', '0'],
['real_time', '0:09.90'],
['user_time', '8.85'],
['system_time', '0.97'],
['reads', '96'],
['writes', '42016'],
['max_res_kbytes', '11456'],
['avg_mem_kbytes', '0'],
['invol_context_switches', '15'],
['vol_context_switches', '274'],
],
SECTION_2,
),
])
def test_parse(string_table, expected_parsed_data):
assert job.parse_job(string_table) == expected_parsed_data
RESULTS_SHREK: List[Union[Metric, Result]] = [
Result(state=state.OK, summary='Latest exit code: 0'),
Result(state=state.OK, summary='Real time: 2 minutes 0 seconds'),
Metric('real_time', 120.0),
Result(state=state.OK, notice='Latest job started at Jan 12 2019 14:53:21'),
Metric('start_time', 1547301201.0),
Result(state=state.OK, summary='Job age: 1 year 178 days'),
Result(state=state.OK, notice='Avg. memory: 1000 B'),
Metric('avg_mem_bytes', 1000.0),
Result(state=state.OK, notice='Invol. context switches: 12'),
Metric('invol_context_switches', 12.0),
Result(state=state.OK, notice='Max. memory: 1.18 MiB'),
Metric('max_res_bytes', 1234000.0),
Result(state=state.OK, notice='Filesystem reads: 0'),
Metric('reads', 0.0),
Result(state=state.OK, notice='System time: 0 seconds'),
Metric('system_time', 0.0),
Result(state=state.OK, notice='User time: 1 second'),
Metric('user_time', 1.0),
Result(state=state.OK, notice='Vol. context switches: 23'),
Metric('vol_context_switches', 23.0),
Result(state=state.OK, notice='Filesystem writes: 0'),
Metric('writes', 0.0),
]
def _aggr_shrek_result(node: str) -> Result:
return Result(**dict(
zip( # type: ignore
("state", "notice"),
clusterize.aggregate_node_details(node, RESULTS_SHREK),
)))
@pytest.mark.parametrize(
"job_data, age_levels, exit_code_to_state_map, expected_results",
[
(
SECTION_1['SHREK'],
(0, 0),
{
0: state.OK
},
RESULTS_SHREK,
),
(
SECTION_1['SHREK'],
(1, 2),
{
0: state.OK
},
itertools.chain(
RESULTS_SHREK[0:5],
[
Result(
state=state.CRIT,
summary='Job age: 1 year 178 days (warn/crit at 1 second/2 seconds)',
),
],
RESULTS_SHREK[6:],
),
),
(
SECTION_1['SHREK'],
(0, 0),
{
0: state.WARN
},
itertools.chain(
[
Result(
state=state.WARN,
summary='Latest exit code: 0',
),
],
RESULTS_SHREK[1:],
),
),
(
_modify_start_time(
SECTION_1['SHREK'],
[1557301261, 1557301321, 1557301381, 1557301441, 1537301501, 1557301561],
),
(1, 2),
{
0: state.OK
},
itertools.chain(
RESULTS_SHREK[:3],
[
Result(
state=state.OK,
notice=('6 jobs are currently running, started at'
' May 08 2019 09:41:01, May 08 2019 09:42:01,'
' May 08 2019 09:43:01, May 08 2019 09:44:01,'
' Sep 18 2018 22:11:41, May 08 2019 09:46:01'),
),
Result(
state=state.CRIT,
summary=('Job age (currently running): '
'1 year 63 days (warn/crit at 1 second/2 seconds)'),
),
],
RESULTS_SHREK[6:],
),
),
],
)
def test_process_job_stats(
job_data,
age_levels,
exit_code_to_state_map,
expected_results,
):
with on_time(*TIME):
assert list(job._process_job_stats(
job_data,
age_levels,
exit_code_to_state_map,
)) == list(expected_results)
@pytest.mark.parametrize(
"item, params, section, expected_results",
[
(
'SHREK',
type_defs.Parameters({'age': (0, 0)},),
SECTION_1,
RESULTS_SHREK,
),
(
'item',
type_defs.Parameters({'age': (0, 0)},),
{
'item': {}
},
[Result(state=state.UNKNOWN, summary='Got incomplete information for this job')],
),
(
'cleanup_remote_logs',
type_defs.Parameters({'age': (0, 0)},),
SECTION_2,
[
Result(state=state.OK, summary='Latest exit code: 0',
details='Latest exit code: 0'),
Result(
state=state.OK, summary='Real time: 9 seconds', details='Real time: 9 seconds'),
Metric('real_time', 9.9),
Result(state=state.OK, notice='Latest job started at Nov 05 2014 03:10:30'),
Metric('start_time', 1415153430.0),
Result(state=state.OK, summary='Job age: 5 years 248 days'),
Result(state=state.OK, notice='Avg. memory: 0 B'),
Metric('avg_mem_bytes', 0.0),
Result(state=state.OK, notice='Invol. context switches: 15'),
Metric('invol_context_switches', 15.0),
Result(state=state.OK, notice='Max. memory: 10.9 MiB'),
Metric('max_res_bytes', 11456000.0),
Result(state=state.OK, notice='Filesystem reads: 96'),
Metric('reads', 96.0),
Result(state=state.OK, notice='System time: 970 milliseconds'),
Metric('system_time', 0.97),
Result(state=state.OK, notice='User time: 8 seconds'),
Metric('user_time', 8.85),
Result(state=state.OK, notice='Vol. context switches: 274'),
Metric('vol_context_switches', 274.0),
Result(state=state.OK, notice='Filesystem writes: 42016'),
Metric('writes', 42016.0),
],
),
(
'backup.sh',
type_defs.Parameters({'age': (1, 2)},),
SECTION_2,
[
Result(state=state.OK, summary='Latest exit code: 0'),
Result(state=state.OK, summary='Real time: 4 minutes 41 seconds'),
Metric('real_time', 281.65),
Result(state=state.OK,
notice='1 job is currently running, started at Nov 05 2014 17:41:53'),
Result(
state=state.CRIT,
summary=
'Job age (currently running): 5 years 247 days (warn/crit at 1 second/2 seconds)'
),
Result(state=state.OK, notice='Avg. memory: 0 B'),
Metric('avg_mem_bytes', 0.0),
Result(state=state.OK, notice='Invol. context switches: 16806'),
Metric('invol_context_switches', 16806.0),
Result(state=state.OK, notice='Max. memory: 124 MiB'),
Metric('max_res_bytes', 130304000.0),
Result(state=state.OK, notice='Filesystem reads: 0'),
Metric('reads', 0.0),
Result(state=state.OK, notice='System time: 32 seconds'),
Metric('system_time', 32.12),
Result(state=state.OK, notice='User time: 4 minutes 37 seconds'),
Metric('user_time', 277.7),
Result(state=state.OK, notice='Vol. context switches: 32779'),
Metric('vol_context_switches', 32779.0),
Result(state=state.OK, notice='Filesystem writes: 251792'),
Metric('writes', 251792.0),
],
),
(
'missing',
type_defs.Parameters({'age': (1, 2)},),
SECTION_2,
[],
),
],
)
def test_check_job(item, params, section, expected_results):
with on_time(*TIME):
assert list(job.check_job(item, params, section)) == expected_results
@pytest.mark.parametrize(
"item, params, section, expected_results",
[
(
'SHREK',
type_defs.Parameters({'age': (0, 0)},),
{
'node1': SECTION_1
},
[
_aggr_shrek_result('node1'),
Result(
state=state.OK,
summary=
'1 node in state OK, 0 nodes in state WARN, 0 nodes in state CRIT, 0 nodes in state UNKNOWN',
),
],
),
(
'SHREK',
type_defs.Parameters({'age': (0, 0)},),
{
'node1': SECTION_1,
'node2': SECTION_1,
},
[
_aggr_shrek_result('node1'),
_aggr_shrek_result('node2'),
Result(
state=state.OK,
summary=
'2 nodes in state OK, 0 nodes in state WARN, 0 nodes in state CRIT, 0 nodes in state UNKNOWN',
),
],
),
(
'SHREK',
type_defs.Parameters({
'age': (3600, 7200),
'outcome_on_cluster': 'best',
},),
{
'node1': SECTION_1,
'node2': {
'SHREK': _modify_start_time(
SECTION_1['SHREK'],
1594293430.9147654,
),
},
},
[
Result(state=state.OK,
notice=('[node1]: Latest exit code: 0\n'
'[node1]: Real time: 2 minutes 0 seconds\n'
'[node1]: Latest job started at Jan 12 2019 14:53:21\n'
'[node1]: Job age: 1 year 178 days (warn/crit at 1 hour'
' 0 minutes/2 hours 0 minutes)(!!)\n'
'[node1]: Avg. memory: 1000 B\n'
'[node1]: Invol. context switches: 12\n'
'[node1]: Max. memory: 1.18 MiB\n'
'[node1]: Filesystem reads: 0\n'
'[node1]: System time: 0 seconds\n'
'[node1]: User time: 1 second\n'
'[node1]: Vol. context switches: 23\n'
'[node1]: Filesystem writes: 0')),
Result(state=state.OK,
notice=('[node2]: Latest exit code: 0\n'
'[node2]: Real time: 2 minutes 0 seconds\n'
'[node2]: Latest job started at Jul 09 2020 13:17:10\n'
'[node2]: Job age: 1 hour 59 minutes (warn/crit at 1 hour'
' 0 minutes/2 hours 0 minutes)(!)\n'
'[node2]: Avg. memory: 1000 B\n'
'[node2]: Invol. context switches: 12\n'
'[node2]: Max. memory: 1.18 MiB\n'
'[node2]: Filesystem reads: 0\n'
'[node2]: System time: 0 seconds\n'
'[node2]: User time: 1 second\n'
'[node2]: Vol. context switches: 23\n'
'[node2]: Filesystem writes: 0')),
Result(state=state.WARN,
summary=('0 nodes in state OK, 1 node in state WARN,'
' 1 node in state CRIT, 0 nodes in state UNKNOWN')),
],
),
(
'missing',
type_defs.Parameters({'age': (0, 0)},),
{
'node1': SECTION_1,
'node2': SECTION_2,
},
[
Result(
state=state.UNKNOWN,
summary='Received no data for this job from any of the nodes',
),
],
),
],
)
def test_cluster_check_job(item, params, section, expected_results):
with on_time(*TIME):
assert list(job.cluster_check_job(item, params, section)) == expected_results
| superbjorn09/checkmk | tests/unit/cmk/base/plugins/agent_based/test_job.py | test_job.py | py | 17,925 | python | en | code | null | github-code | 1 | [
{
"api_name": "cmk.base.plugins.agent_based.job.Section",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "cmk.base.plugins.agent_based.job",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "cmk.base.plugins.agent_based.job.Section",
"line_number": 55,
... |
41206628067 | # Library and Modules used
import os
import cv2
import numpy as np
import customtkinter as ctk
from tkinter import *
from rembg import remove
from pathlib import Path
from threading import Thread
from datetime import datetime
from deepface import DeepFace
from PIL import Image ,ImageTk
from tkinter import ttk, filedialog
from pdf2image import convert_from_path
from tkinter.messagebox import showerror, showinfo
# Creating functions whihch used to provide fnctionality to pages
def Imgo( file, w, h) :
# Image processing
img = Image.open( file )
pht = ImageTk.PhotoImage( img.resize( (w,h), Image.Resampling.LANCZOS))
return pht
def change( can, page) :
# Switching canvas
can.destroy()
page()
def mistake( message) :
# Pop up window for errors
showerror( title = "Error Occured", message = message )
def inform( message) :
# Pop up window for info
showinfo( title = "Done", message = message )
def analysis1( path1, path2, sample ) :
# First method for image analysis
result_1 = DeepFace.verify( img1_path = path1 , img2_path = path2,
enforce_detection = False, model_name = "VGG-Face",
distance_metric = "cosine", detector_backend = "opencv" )
if result_1["verified"] :
# Add image if match
matches.add( sample )
def analysis2( path1, path2, sample ) :
# Second method for image analysis
result_2 = DeepFace.verify( img1_path = path1 , img2_path = path2,
enforce_detection = False, model_name = "VGG-Face",
distance_metric = "euclidean_l2", detector_backend = "ssd" )
if result_2["verified"] :
# Add image if match
matches.add( sample )
def sortingImages() :
# Sorting Images and Saving them in folders
if ( values[2] != "" and values[4] != "" ) :
main_img = {}
check_img = {}
# Processing First folder
for img in os.listdir( values[2] ) :
main_path = values[2] + '/' + str(img)
main_img[img] = cv2.imread( main_path )
# Processing Second folder
for img in os.listdir( values[4] ) :
check_path = values[4] + '/' + str(img)
check_img[img] = cv2.imread( check_path )
# Analysing both the folders together
for x in main_img.keys() :
for y in check_img.keys() :
# Both at same time
Thread(target = analysis1( main_img[x], check_img[y], y )).start()
Thread(target = analysis2( main_img[x], check_img[y], y )).start()
new_dir = os.path.join( values[2], str( x[ : len(x)-4] + "match") )
os.mkdir( new_dir )
for i in matches :
# Saving Images to new directory
cv2.imwrite( os.path.join( new_dir, i), check_img[i] )
# Clearing the previous data from set
matches.clear()
# Message of completion
inform( "Sorted And Saved" )
values[2] = ""
values[4] = ""
else :
# Showing error due to empty credientials
mistake( "Empty Fields" )
def findingImages( label ) :
# Finding similar images
if ( values[0] != "" and values[2] != "" ) :
# Processing image
values[1] = cv2.imread( values[0] )
# Getting into images of given folder
for img in os.listdir( values[2] ) :
img2_path = values[2] + '/' + str(img)
values[3] = cv2.imread( img2_path )
# Both at same time
Thread(target = analysis1( values[1], values[3], img )).start()
Thread(target = analysis2( values[1], values[3], img )).start()
# For searching folder, method 2
# df = DeepFace.find( img_path = values[0], db_path = values[2], enforce_detection = False )
# res = pd.DataFrame( df )
# for i in res["identity"] :
# print(i)
# Show data
if matches == set() :
# Message in the frame
label.configure( text = "No File Found")
else :
# Showing results in order
x = 50
output = ""
for i in matches :
if ( len(output + i) > x ) :
x = x + 50
output = output + os.linesep
output = str(output + i + ", ")
label.configure( text = output[:len(output)-2],
text_font = (ft[1], 20, "bold"))
label.place_configure( x = 50, y = 50, anchor = "nw" )
# Clearing previously processed data
values[0] = ""
values[1] = np.array([0,0,0])
values[2] = ""
values[3] = np.array([0,0,0])
matches.clear()
def convertFile( can, formate ) :
if ( values[0] == "" ) :
mistake("Add or Enter File Path")
return
# Coverting files to another specified formate
convert_to = formate.get()
if (convert_to == "Select Type ") :
mistake("Select File Format")
return
file_types = { "Select Type " : False,
" PDF" : [ "PDF file", "*.pdf"],
" PNG" : [ "PNG file", "*.png"],
" JPG" : [ "JPG file", "*.jpg"],
" JPEG" : [ "JPEG file", "*.jpeg"] }
# Formate to which we have to convert
convert_to = file_types[convert_to]
try :
# Checking Entrybox
if( values[0][-3:] == convert_to[1][2:] ) :
# Error due to converting to same formate
mistake("SAME FORMATE")
elif ( values[0] != "" ) and convert_to :
# Converting pdf files
if ( values[0][-3:] == 'pdf' ) :
# Finding address to save file
dirc = filedialog.askdirectory( initialdir = os.getcwd(), title = "Save file")
# Poppler's Path
poppler_path = os.path.join( os.getcwd(), r"poppler-23.01.0\Library\bin")
# Processing page(s) of pdf
pages = convert_from_path( pdf_path = values[0], poppler_path = poppler_path )
# Getting into page(s) of pdf
for page in pages :
file = datetime.now().strftime('%Y%m%d%H%M%S')
file = dirc + '/' + str(file) + convert_to[1][1:]
page.save( Path(file), convert_to[1][2:])
# Converting to pdf formate
elif ( convert_to[1][2:] == "pdf" ) :
# Finding address to save file
file = filedialog.asksaveasfile( initialdir = os.getcwd(),
title = "Save file",
defaultextension = f"{convert_to[1]}",
filetypes =[( convert_to[0], f"{convert_to[1]}" )] )
# Saving file
let = Image.open( values[0] )
to_pdf = let.convert('RGB')
to_pdf.save( file.name )
file.close()
# For other types of formate conversion
else :
# Finding address to save file
file = filedialog.asksaveasfile( initialdir = os.getcwd(), title = "Save file",
defaultextension = f"{convert_to[1]}",
filetypes =[( convert_to[0], f"{convert_to[1]}" )] )
# Saving file
values[1] = cv2.imread( values[0] )
cv2.imwrite( file.name, values[1])
file.close()
# Clearing previously processed data
values[0] = ""
values[1] = np.array([0,0,0])
# Message of complition and return back
inform( "FILE SAVED" )
change( can, menuPage)
else :
# Showing error due empty credientials
mistake( "ENTER FILE NAME!" )
except :
mistake("Invalid Entry")
def removeBackground( click ) :
# Removing the background of given Image
# Checking entry
if values[0] == "" :
# Showing error due to empty credientials
mistake( "FILE NOT FOUND!")
else :
# Removing the background of the Image
original = cv2.imread( values[0] )
values[1] = remove(original)
values[0] = ""
# Disabling the button after removing
click.configure( state = DISABLED)
def savingFile( can ) :
# Saving the file to a particular address
if values[1].any() != 0 :
# Finding address to save file
file = filedialog.asksaveasfile( initialdir = os.getcwd(), title = "Save file",
defaultextension = "*.png",
filetypes = [( "PNG file", "*.png"), ( "JPG file", "*.jpg")] )
# Saving file
cv2.imwrite( file.name, values[1])
values[1] = np.array([0,0,0])
file.close()
# Showing message of complition and returning back
inform( "FILE SAVED" )
change( can, menuPage)
else :
# Showing error due to empty credientials
mistake( "ENTER FILE NAME!" )
def openingFolder( folder_path ) :
# Opening Folder using filedialog
if ( folder_path.get() != "" ) :
# Getting path of folder from entry box
open_folder = folder_path.get()
else :
# Getting path of folder from dialog
open_folder = filedialog.askdirectory( initialdir = os.getcwd() ,title = "Browse Folder")
# Checking for empty address
if ( open_folder != "" ) :
values[2] = open_folder
if ( folder_path.get() != "" ) :
folder_path.delete( 0, END)
folder_path.insert( 0, open_folder )
else :
# Showing error due to empty credientials
mistake( "FIELD EMPTY!" )
def openingFolder2( folder_path ) :
# Opening Folder using filedialog
if ( folder_path.get() != "" ) :
# Getting path of folder from entry box
open_folder = folder_path.get()
else :
# Getting path of folder from dialog
open_folder = filedialog.askdirectory( initialdir = os.getcwd(), title = "Browse Folder")
# Checking for empty address
if ( open_folder != "" ) :
values[4] = open_folder
if ( folder_path.get() != "" ) :
folder_path.delete( 0, END)
folder_path.insert( 0, open_folder )
else :
# Showing error due to empty credientials
mistake( "FIELD EMPTY!" )
def openingFile( file_path, file_formate ) :
# Opening File
if ( file_path.get() != "" ) :
# Getting path of file from entry box
open_file = file_path.get()
else :
# Getting path of file from filedialog
open_file = filedialog.askopenfilename( initialdir = os.getcwd(), title = "Open file", filetypes = file_formate )
# Checking for empty address
if ( open_file != "" ) :
values[0] = open_file
if ( file_path.get() != "" ) :
file_path.delete( 0, END)
file_path.insert( 0, open_file )
else :
# Showing error due to empty credientials
mistake( "FIELD EMPTY!" )
def checkLogin( can, page, user, pwrd ) :
# Checking the login credientials
if ( user.get() != "" and pwrd.get() != "" ) :
if ( user.get() == "JustFunfact" and pwrd.get() == "star is burning" ) :
# Login, same entry found
change( can, page )
else :
# Showing error due to no match
mistake("ENTRY DOESN'T MATCH")
else :
# Showing error due to empty credientials
mistake( "EMPTY FIELDS")
# Designing pages of respective functionality
def clearBack() :
# Image Background removing
# Defining Structure
third_page = Canvas( root,
width = wid, height = hgt,
bg = "black", highlightcolor = "#3c5390",
borderwidth = 0 )
third_page.pack( fill = "both", expand = True )
# Background Image
back_image = Imgo( os.path.join( os.getcwd(), "Background\Clear_Back_Page.jpg" ), 1498, 875)
third_page.create_image( 0, 0, image = back_image , anchor = "nw")
# Heading
third_page.create_text( 500, 120, text = "Remove Background",
font = ( ft[0], 45, "bold"), fill = "#1c54df" )
# Return Button
ret = Imgo( os.path.join( os.getcwd(), r'Design\arrow.png' ), 45, 35)
ret_bt = ctk.CTkButton( master = root, image = ret, text = None,
width = 60, height = 40, corner_radius = 23,
bg_color = "#d3eafc", fg_color = "red",
hover_color = "#ff5359", border_width = 0,
command = lambda : change( third_page, menuPage) )
ret_bt_win = third_page.create_window( 30, 20, anchor = "nw", window = ret_bt )
# Accessing the file
file_path = ctk.CTkEntry( master = root,
placeholder_text = "Enter Path", text_font = ( ft[4], 20 ),
width = 580, height = 30, corner_radius = 14,
placeholder_text_color = "#494949", text_color = "#242424",
fg_color = "#c3c3c3", bg_color = "#d3eafc",
border_color = "white", border_width = 3)
file_path_win = third_page.create_window( 125, 200, anchor = "nw", window = file_path )
file_formate = [( "PNG file", "*.png"), ( "JPG file", "*.jpg")]
# Adding file path
add_bt = ctk.CTkButton( master = root,
text = "Add..", text_font = ( ft[1], 20 ),
width = 60, height = 40, corner_radius = 14,
bg_color = "#d3eafc", fg_color = "red", text_color = "white",
hover_color = "#ff5359", border_width = 0,
command = lambda : openingFile( file_path, file_formate) )
add_bt_win = third_page.create_window( 860, 200-2, anchor = "nw", window = add_bt )
#Design to display
img_to_rem = Imgo( os.path.join( os.getcwd(), 'Design\Clear_back_design.png' ), 370, 370)
third_page.create_image( 600-20, 350, image = img_to_rem , anchor = "nw")
# Background removing button
rem_bt = ctk.CTkButton( master = root,
text = "Remove", text_font = ( ft[4], 25 ),
width = 170, height = 50, corner_radius = 14,
bg_color = "#98e2fe", fg_color = "red", text_color = "white",
hover_color = "#ff5359", border_width = 0,
text_color_disabled = "#a4a4a4",
command = lambda : removeBackground(rem_bt) )
rem_bt_win = third_page.create_window( 300, 500, anchor = "nw", window = rem_bt )
# Saving Image button
save_bt = ctk.CTkButton( master = root,
text = "Save Image", text_font = ( ft[4], 25 ),
width = 220, height = 50, corner_radius = 14,
bg_color = "#98e2fe", fg_color = "red", text_color = "white",
hover_color = "#ff5359", border_width = 0,
command = lambda : savingFile( third_page) )
save_bt_win = third_page.create_window( 1000, 500, anchor = "nw", window = save_bt )
root.mainloop()
def convertImage() :
# Defining Structure
fourth_page = Canvas( root,
width = wid, height = hgt,
bg = "black", highlightcolor = "#3c5390",
borderwidth = 0 )
fourth_page.pack( fill = "both", expand = True )
# Background Image
back_image = Imgo( os.path.join( os.getcwd(), "Background\Convert_Img_Page.jpg" ), 1498, 875)
fourth_page.create_image( 0, 0, image = back_image , anchor = "nw")
# Heading
fourth_page.create_text( 500, 120, text = "Convert Images",
font = ( ft[0], 45, "bold" ), fill = "#1c54df" )
# Return Button
ret = Imgo( os.path.join( os.getcwd(), r'Design\arrow.png' ), 45, 35)
ret_bt = ctk.CTkButton( master = root,
image = ret, text = None,
width = 60, height = 40, corner_radius = 23,
bg_color = "#d3eafc", fg_color = "red",
hover_color = "#ff5359", border_width = 0,
command = lambda : change( fourth_page, menuPage) )
ret_bt_win = fourth_page.create_window( 30, 20, anchor = "nw", window = ret_bt )
# Accessing the file
file_path = ctk.CTkEntry( master = root,
placeholder_text = "Enter Path", text_font = ( ft[4], 20 ),
width = 580, height = 30, corner_radius = 14,
placeholder_text_color = "#494949", text_color = "#242424",
fg_color = "#c3c3c3", bg_color = "#d3eafc",
border_color = "white", border_width = 3)
file_path_win = fourth_page.create_window( 300, 210, anchor = "nw", window = file_path )
file_formate = [( "PNG file", "*.png"), ( "JPG file", "*.jpg"), ( "JPEG file", "*.jpeg"), ( "PDF file", "*.pdf") ]
# Adding file path
add_bt = ctk.CTkButton( master = root,
text = "Add..", text_font = ( ft[1], 20 ),
width = 60, height = 40, corner_radius = 14,
bg_color = "#d3eafc", fg_color = "red", text_color = "white",
hover_color = "#ff5359", border_width = 0,
command = lambda : openingFile( file_path, file_formate) )
add_bt_win = fourth_page.create_window( 1035, 210-2, anchor = "nw", window = add_bt )
#Design to display
img_to_con = Imgo( os.path.join( os.getcwd(), "Design\Convert_img_design.png" ), 390, 390)
fourth_page.create_image( 600-50, 350, image = img_to_con , anchor = "nw")
# Option menu
opt = ctk.StringVar(value = "Select Type " )
com = ctk.CTkOptionMenu( master = root, variable = opt,
values = [ " PDF", " PNG", " JPG", " JPEG"],
text_font = ( ft[4], 20),
width = 170, height = 40, corner_radius = 15,
bg_color = "#9ae2fe", fg_color = "red", text_color = "white",
button_color = "#363fc8", button_hover_color = "#676fe8",
dropdown_color = "#ff6c3d", dropdown_hover_color = "red",
dropdown_text_color = "white", dropdown_text_font = ( ft[4], 16),
dynamic_resizing = True )
# com.set("Select File")
com_win = fourth_page.create_window( 1000, 400, anchor = "nw", window = com )
# Saving Image button
save_bt = ctk.CTkButton( master = root,
text = "Save Image", text_font = ( ft[4], 22 ),
width = 200, height = 40, corner_radius = 14,
bg_color = "#98e2fe", fg_color = "red", text_color = "white",
hover_color = "#ff5359", border_width = 0,
command = lambda : convertFile( fourth_page, com) )
save_bt_win = fourth_page.create_window( 1005, 620, anchor = "nw", window = save_bt )
root.mainloop()
def findImage() :
# Defining Structure
fifth_page = Canvas( root, width =
wid, height = hgt,
bg = "black", highlightcolor = "#3c5390",
borderwidth = 0 )
fifth_page.pack( fill = "both", expand = True )
# Background Image
back_image = Imgo( os.path.join( os.getcwd(), "Background\Find_Img_Page.jpg" ), 1498, 875)
fifth_page.create_image( 0, 0, image = back_image , anchor = "nw")
# Heading
fifth_page.create_text( 400, 120, text = "Find Image", font = ( ft[0], 45, "bold" ), fill = "#1c54df" )
# Return Button
ret = Imgo( os.path.join( os.getcwd(), r'Design\arrow.png' ), 45, 35 )
ret_bt = ctk.CTkButton( master = root,
image = ret, text = None,
width = 60, height = 40, corner_radius = 23,
bg_color = "#d3eafc", fg_color = "red",
hover_color = "#ff5359", border_width = 0,
command = lambda : change( fifth_page, menuPage) )
ret_bt_win = fifth_page.create_window( 30, 20, anchor = "nw", window = ret_bt )
# Accessing the image
file_path = ctk.CTkEntry( master = root,
placeholder_text = "Enter Image Path", text_font = ( ft[4], 20 ),
width = 603, height = 30, corner_radius = 14,
placeholder_text_color = "#494949", text_color = "#242424",
fg_color = "#c3c3c3", bg_color = "#d3eafc",
border_color = "white", border_width = 3)
file_path_win = fifth_page.create_window( 300, 210, anchor = "nw", window = file_path )
file_formate = [( "PNG file", "*.png"), ( "JPG file", "*.jpg") ]
# Adding image path
add_bt = ctk.CTkButton( master = root,
text = "Add..", text_font = ( ft[1], 20 ),
width = 60, height = 40, corner_radius = 14,
bg_color = "#d3eafc", fg_color = "red", text_color = "white",
hover_color = "#ff5359", border_width = 0,
command = lambda : openingFile( file_path, file_formate) )
add_bt_win = fifth_page.create_window( 1065, 210-2, anchor = "nw", window = add_bt )
# Accessing the folder
folder_path = ctk.CTkEntry( master = root,
placeholder_text = "Enter Folder Path", text_font = ( ft[4], 20 ),
width = 580, height = 30, corner_radius = 14,
placeholder_text_color = "#494949", text_color = "#242424",
fg_color = "#c3c3c3", bg_color = "#d3eafc",
border_color = "white", border_width = 3)
folder_path_win = fifth_page.create_window( 300, 295, anchor = "nw", window = folder_path )
# Browse folder button
browse_bt = ctk.CTkButton( master = root,
text = "Browse", text_font = ( ft[1], 20 ),
width = 100, height = 40, corner_radius = 14,
bg_color = "#d3eafc", fg_color = "red", text_color = "white",
hover_color = "#ff5359", border_width = 0,
command = lambda : openingFolder( folder_path ) )
browse_bt_win = fifth_page.create_window( 1035, 295-2, anchor = "nw", window = browse_bt )
# Frame
mess = ctk.CTkFrame( master = fifth_page,
width = 780, height = 300, corner_radius = 30,
bg_color = "#d5eafd", fg_color = "#97e1fe",
border_color = "#4d89eb", border_width = 6)
mess.place_configure( x = 280, y = 480, anchor = "nw")
# Label in frame
frm_label = ctk.CTkLabel( master = mess,
text = "Insert Values", text_font = (ft[0], 45, "bold"),
width = 200, height = 50, corner_radius = 15,
bg_color = "#97e1fe", fg_color = "#97e1fe", text_color = "#1c54df" )
frm_label.place_configure( x = 220, y = 120, anchor = "nw" )
# Image Finding button
find_bt = ctk.CTkButton( master = root,
text = "Find", text_font = ( ft[1], 24 ),
width = 160, height = 45, corner_radius = 14,
bg_color = "#d3eafc", fg_color = "red", text_color = "white",
hover_color = "#ff5359", border_width = 0,
command = lambda : findingImages( frm_label ) )
find_bt_win = fifth_page.create_window( 650, 400, anchor = "nw", window = find_bt )
root.mainloop()
def sortImage() :
# Defining Structure
sixth_page = Canvas( root,
width = wid, height = hgt,
bg = "black", highlightcolor = "#3c5390",
borderwidth = 0 )
sixth_page.pack( fill = "both", expand = True )
# Background Image
back_image = Imgo( os.path.join( os.getcwd(), "Background\Sort_Image_Page.jpg" ), 1498, 875)
sixth_page.create_image( 0, 0, image = back_image , anchor = "nw")
# Heading
sixth_page.create_text( 400, 120, text = "Sort Images",
font = ( ft[0], 45, "bold" ), fill = "#1c54df" )
# Return Button
ret = Imgo( os.path.join( os.getcwd(), r'Design\arrow.png' ), 45, 35)
ret_bt = ctk.CTkButton( master = root,
image = ret, text = None,
width = 60, height = 40, corner_radius = 23,
bg_color = "#d3eafc", fg_color = "red",
hover_color = "#ff5359", border_width = 0,
command = lambda : change( sixth_page, menuPage) )
ret_bt_win = sixth_page.create_window( 30, 20, anchor = "nw", window = ret_bt )
# Accessing the folder1
folder1_path = ctk.CTkEntry( master = root,
placeholder_text = "Enter Folder 1 Path", text_font = ( ft[4], 20 ),
width = 580, height = 30, corner_radius = 14,
placeholder_text_color = "#494949", text_color = "#242424",
fg_color = "#c3c3c3", bg_color = "#d3eafc",
border_color = "white", border_width = 3)
folder1_path_win = sixth_page.create_window( 300, 210, anchor = "nw", window = folder1_path )
# Browse folde1 button
browse_bt_1 = ctk.CTkButton( master = root,
text = "Browse", text_font = ( ft[1], 20 ),
width = 100, height = 40, corner_radius = 14,
bg_color = "#d3eafc", fg_color = "red", text_color = "white",
hover_color = "#ff5359", border_width = 0,
command = lambda : openingFolder( folder1_path ) )
browse_bt_1_win = sixth_page.create_window( 1035, 210-2, anchor = "nw", window = browse_bt_1 )
# Accessing the folder2
folder2_path = ctk.CTkEntry( master = root,
placeholder_text = "Enter Folder 2 Path", text_font = ( ft[4], 20 ),
width = 580, height = 30, corner_radius = 14,
placeholder_text_color = "#494949", text_color = "#242424",
fg_color = "#c3c3c3", bg_color = "#d3eafc",
border_color = "white", border_width = 3)
folder2_path_win = sixth_page.create_window( 300, 295, anchor = "nw", window = folder2_path )
# Browse folder2 button
browse_bt_2 = ctk.CTkButton( master = root,
text = "Browse", text_font = ( ft[1], 20 ),
width = 100, height = 40, corner_radius = 14,
bg_color = "#d3eafc", fg_color = "red", text_color = "white",
hover_color = "#ff5359", border_width = 0,
command = lambda : openingFolder2( folder2_path ) )
browse_bt_2_win = sixth_page.create_window( 1035, 295-2, anchor = "nw", window = browse_bt_2 )
# Sorting button
sort_bt = ctk.CTkButton( master = root,
text = "Start Sorting", text_font = ( ft[0], 24 ),
width = 250, height = 45, corner_radius = 14,
bg_color = "#99e2fe", fg_color = "red", text_color = "white",
hover_color = "#ff5359", border_width = 0,
command = lambda : sortingImages() )
sort_bt_win = sixth_page.create_window( 610, 540, anchor = "nw", window = sort_bt )
root.mainloop()
def menuPage() :
# Defining Structure
second_page = Canvas( root,
width = wid, height = hgt,
bg = "black", highlightcolor = "#3c5390",
borderwidth = 0 )
second_page.pack( fill = "both", expand = True )
# Background Image
back_image = Imgo( os.path.join( os.getcwd(), "Background\Menu_Page.jpg" ), 1498, 875)
second_page.create_image( 0, 0, image = back_image , anchor = "nw")
# Heading
second_page.create_text( 350, 120, text = "Content...",
font = ( ft[0], 45, "bold" ), fill = "#1c54df" )
# Back Ground remover page window
backRem = Imgo( os.path.join( os.getcwd(), "Design\Clear_Back_logo.png" ), 220, 200 )
backRem_bt = ctk.CTkButton( master = root,
image = backRem, compound = "top",
text = "Clear Back", text_font = ( ft[0], 22, "bold" ),
text_color = "white",
width = 230, height = 240, corner_radius = 10,
bg_color = "#d3eafc", fg_color = "#2d435b",
hover_color = "#fdbf38", border_width = 0,
command = lambda : change( second_page, clearBack ))
backRem_bt_win = second_page.create_window( 190, 250, anchor = "nw", window = backRem_bt)
# Image converter page window
imgConvert = Imgo( os.path.join( os.getcwd(), "Design\Convert_Img_logo.png" ), 220, 200 )
imgConvert_bt = ctk.CTkButton( master = root,
image = imgConvert, compound = "top",
text = "Converter", text_font = ( ft[0], 22, "bold" ),
text_color = "white",
width = 230, height = 240, corner_radius = 10,
bg_color = "#d3eafc", fg_color = "#2d435b",
hover_color = "#fdbf38", border_width = 0,
command = lambda : change( second_page, convertImage ))
imgConvert_bt_win = second_page.create_window( 500, 400, anchor = "nw", window = imgConvert_bt )
# Search image page window
findImg = Imgo( os.path.join( os.getcwd(), "Design\Find_Img_logo.png" ), 220, 200 )
findImg_bt = ctk.CTkButton( master = root,
image = findImg, compound = "top",
text = "Find Image", text_font = ( ft[0], 22, "bold" ),
text_color = "white",
width = 230, height = 240, corner_radius = 10,
bg_color = "#d3eafc", fg_color = "#2d435b",
hover_color = "#fdbf38", border_width = 0,
command = lambda : change( second_page, findImage ))
findImg_bt_win = second_page.create_window( 810, 250, anchor = "nw", window = findImg_bt )
# Sort image page window
imgSort =Imgo( os.path.join( os.getcwd(), "Design\Sort_Img_logo.png" ), 220, 200 )
imgSort_bt = ctk.CTkButton( master = root,
image = imgSort, compound = "top",
text = "Sort Images", text_font = ( ft[0], 22, "bold" ),
text_color = "white",
width = 230, height = 240, corner_radius = 10,
bg_color = "#d3eafc", fg_color = "#2d435b",
hover_color = "#fdbf38", border_width = 0,
command = lambda : change( second_page, sortImage ))
imgSort_bt_win = second_page.create_window( 1120, 400, anchor = "nw", window = imgSort_bt )
# Logout button
log = Imgo( os.path.join( os.getcwd(), "Design\logout.png" ), 35, 35 )
log_bt = ctk.CTkButton( master = root,
image = log, text = None,
width = 45, height = 45, corner_radius = 23,
bg_color = "#357adf", fg_color = "red",
hover_color = "#ff5359", border_width = 0,
command = lambda : change( second_page, loginPage ))
log_bt_win = second_page.create_window( 1420, 20, anchor = "nw", window = log_bt )
root.mainloop()
def loginPage() :
global user, pwrd, first_page
# Defining Structure
first_page = Canvas( root,
width = wid, height = hgt,
bg = "black", highlightcolor = "#3c5390",
borderwidth = 0 )
first_page.pack( fill = "both", expand = True )
# Background Image
back_image = Imgo( os.path.join( os.getcwd(), "Background\Login_Page.jpg" ), 1498, 875)
design_image = Imgo( os.path.join( os.getcwd(), "Design\Login_Design.png" ), 600, 400)
first_page.create_image( 0, 0, image = back_image , anchor = "nw")
first_page.create_image( 350, 325, image = design_image, anchor = "nw")
# Heading
first_page.create_text( 450, 150, text = "Image Sorter",
font = ( ft[0], 45, "bold" ), fill = "#1c54df" )
first_page.create_text( 1150, 380, text = "Welcome\n Back",
font = ( ft[0], 26, "bold" ), fill = "#0b4bf5" )
# Entry of username and password
user = ctk.CTkEntry( master = root,
placeholder_text = "Username", text_font = ( ft[1], 20 ),
width = 220, height = 30, corner_radius = 14,
placeholder_text_color = "#666666", text_color = "#191919",
fg_color = "#e1f5ff", bg_color = "#9ae2fe",
border_color = "white", border_width = 3)
user_win = first_page.create_window( 1015, 470, anchor = "nw", window = user )
pwrd = ctk.CTkEntry( master = root,
placeholder_text = "Password", text_font = ( ft[1], 20 ),
width = 220, height = 30, corner_radius = 14,
placeholder_text_color = "#666666", text_color = "#191919",
fg_color = "#e1f5ff", bg_color = "#9ae2fe",
border_color = "white", border_width = 3, show = "*" )
pwrd_win = first_page.create_window( 1015, 550, anchor = "nw", window = pwrd )
# Login button
log_bt = ctk.CTkButton( master = root,
text = "Login", text_font = ( ft[0], 22 ),
text_color = "white",
width = 50, height = 25, corner_radius = 15,
bg_color = "#9ae2fe", fg_color = "red",
hover_color = "#ff5359", border_width = 0,
command = lambda : checkLogin( first_page, menuPage, user, pwrd ))
log_bt_win = first_page.create_window( 1090, 650, anchor = "nw", window = log_bt )
root.mainloop()
if __name__ == "__main__" :
# Defining Main theme of all widgets
ctk.set_appearance_mode( "dark" )
ctk.set_default_color_theme( "dark-blue" )
wid = 1200
hgt = 700
global root
root = ctk.CTk()
root.title( "Image Sorter" )
root.iconbitmap( os.path.join( os.getcwd(), "Design\Project_Icon.ico" ))
root.geometry( "1200x700+200+80" )
root.resizable( False, False )
ft = [ "Tahoma", "Seoge UI", "Heloia", "Book Antiqua", "Microsoft Sans Serif"]
values = [ "", np.array([0,0,0]), "", np.array([0,0,0]), ""]
matches = set()
loginPage() | Raghvendra5448/Image_Sorter | image_sorter.py | image_sorter.py | py | 37,326 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "PIL.Image.open",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"li... |
26987174906 | # Enter your code here. Read input from STDIN. Print output to STDOUT
if __name__ == '__main__':
n = int(input())
#print(n)
input_string = input()
numbers = list(map(int, input_string.split()))
#mean
numbersum = sum(numbers)
# print (numbersum)
mean = numbersum/n
print (mean)
#meadian
numbers.sort()
#print (numbers)
if (n %2 ==1):
median = numbers[int(n/2)-1]
else:
middle = [int(n/2)-1,int(n/2)]
denominator = [numbers[i] for i in middle]
#print (denominator)
donominatorsum = sum(denominator)
#print (donominatorsum)
median = donominatorsum/2
print (median)
from collections import Counter
c = Counter(numbers)
print (c.most_common(1)[0][0]) | Seppel1985/HackerRank | 10_Days_of_Statistics/s10-basic-statistics.py | s10-basic-statistics.py | py | 773 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 27,
"usage_type": "call"
}
] |
5281842157 |
import shutil
import gc
import copy
import numpy
import random
import cv2
from PIL import Image, ImageDraw
import os
from functools import partial
from scipy.ndimage.filters import gaussian_filter
import time
import pickle
import re
from sklearn import preprocessing
import scipy.io as sio
from Att_BiLSTM_training import test_att_BiLSTM
from keras.models import *
from statistics import mode
from scipy import interpolate
from scipy.io import loadmat
import sys
import matplotlib.pyplot as plt
from SMGaccessSample import GestureSample
from scipy.special import softmax
'''
load SMG training dataset
'''
def loader_SMG(parsers):
# start counting tim
time_tic = time.time()
#counting feature number
HMM_state_feature_count = 0
n_HMM_state_feature_count = 0
#how many joints are used
njoints = parsers['njoints']
used_joints = parsers['used_joints']
#HMM temporal steps
Time_step_NO = parsers['STATE_NUM']#3
#feature dimension of the LSTM input
featurenum = int((njoints*(njoints-1)/2 + njoints**2)*3)
#get sample list
Sample_list = sorted(os.listdir(parsers['data']))
dictionaryNum = parsers['class_count']*parsers['STATE_NUM']+1
'''pre-allocating the memory '''
#gesture features
Feature_all_states = numpy.zeros(shape=(400000, featurenum), dtype=numpy.float32)
Targets_all_states = numpy.zeros(shape=(400000, dictionaryNum), dtype=numpy.uint8)
n_Feature_all_states = numpy.zeros(shape=(400000, featurenum), dtype=numpy.float32)
n_Targets_all_states = numpy.zeros(shape=(400000, dictionaryNum), dtype=numpy.uint8)
# HMM pror and transition matrix
Prior = numpy.zeros(shape=(dictionaryNum))
Transition_matrix = numpy.zeros(shape=(dictionaryNum,dictionaryNum))
#start traversing samples
for sampleID in Sample_list[0:35]:
print("\t Processing file " + str(sampleID))
smp=GestureSample(os.path.join(parsers['data'],sampleID),sampleID)
gesturesList=smp.getGestures()
Transition_matrix[-1, -1] += smp.getNumFrames()
MG_flag = smp.getMGFlag()
'''traverse all ges in samples'''
#get the skeletons of the gesture
for ges_info in gesturesList:
gestureID, startFrame, endFrame = ges_info
# print(endFrame - startFrame)
'''
1. extract skeleton features of ges
'''
Skeleton_matrix, valid_skel = Extract_feature_UNnormalized(smp, used_joints, startFrame, endFrame)
# to see we actually detect a skeleton:
### extract the features according to the CVPR2014 paper
Feature = Extract_feature_Realtime(Skeleton_matrix, njoints)
'''generatre the corresponding labels'''
sample_label = extract_HMMstate_label(Time_step_NO, Feature.shape[0], dictionaryNum, gestureID)
#assign seg_length number of features to current gesutre
Feature_all_states[HMM_state_feature_count:HMM_state_feature_count + Feature.shape[0], :] = Feature
#assign seg_length number of labels to corresponding features
Targets_all_states[HMM_state_feature_count:HMM_state_feature_count + Feature.shape[0], :] = sample_label
#update feature count
HMM_state_feature_count = HMM_state_feature_count + Feature.shape[0]
'''
2. extract skeleton features of non ges
'''
'''non movement before 5 frames'''
if 1 not in MG_flag[startFrame-5:startFrame]:
n_startFrame = startFrame-5
n_endFrame = startFrame
Skeleton_matrix, valid_skel = Extract_feature_UNnormalized(smp, used_joints, n_startFrame, n_endFrame)
Feature = Extract_feature_Realtime(Skeleton_matrix, njoints)
'''generatre the corresponding labels'''
#generatre the corresponding labels
sample_label = extract_nonActionlabel(Feature.shape[0], dictionaryNum)
#assign seg_length number of features to current gesutre
n_Feature_all_states[n_HMM_state_feature_count:n_HMM_state_feature_count + Feature.shape[0], :] = Feature
#assign seg_length number of labels to corresponding features
n_Targets_all_states[n_HMM_state_feature_count:n_HMM_state_feature_count + Feature.shape[0], :] = sample_label
#update feature count
n_HMM_state_feature_count = n_HMM_state_feature_count + Feature.shape[0]
'''non movement after 5 frames'''
if 1 not in MG_flag[endFrame:endFrame+5]:
n_startFrame = endFrame
n_endFrame = endFrame+5
Skeleton_matrix, valid_skel = Extract_feature_UNnormalized(smp, used_joints, n_startFrame, n_endFrame)
Feature = Extract_feature_Realtime(Skeleton_matrix, njoints)
'''generatre the corresponding labels'''
#generatre the corresponding labels
sample_label = extract_nonActionlabel(Feature.shape[0], dictionaryNum)
#assign seg_length number of features to current gesutre
n_Feature_all_states[n_HMM_state_feature_count:n_HMM_state_feature_count + Feature.shape[0], :] = Feature
#assign seg_length number of labels to corresponding features
n_Targets_all_states[n_HMM_state_feature_count:n_HMM_state_feature_count + Feature.shape[0], :] = sample_label
#update feature count
n_HMM_state_feature_count = n_HMM_state_feature_count + Feature.shape[0]
'''
3. extract HMM transition info of this ges
'''
for frame in range(endFrame-startFrame+1-4):
# print(gestureID)
state_no_1,state_no_2 = HMMmatrix(gestureID, frame, endFrame-startFrame+1-4, Time_step_NO)
# print(state_no_2)
## we allow first two states add together:
Prior[state_no_1] += 1
Transition_matrix[state_no_1, state_no_2] += 1
Transition_matrix[-1, -1] -= 1
if frame<2:
Transition_matrix[-1, state_no_1] += 1
Prior[-1] += 1
if frame> (endFrame-startFrame+1-4-2):
Transition_matrix[state_no_2, -1] += 1
Prior[-1] += 1
Feature_all_states = Feature_all_states[:HMM_state_feature_count, :]
#assign seg_length number of labels to corresponding features
Targets_all_states = Targets_all_states[:HMM_state_feature_count, :]
print('gesture')
print(HMM_state_feature_count)
n_Feature_all_states = n_Feature_all_states[:n_HMM_state_feature_count, :]
#assign seg_length number of labels to corresponding features
n_Targets_all_states = n_Targets_all_states[:n_HMM_state_feature_count, :]
print('non gesture')
print(n_HMM_state_feature_count)
num_samples = 20000
idx = numpy.random.randint(0,len(n_Feature_all_states),size=(num_samples))
# print(idx)
n_Feature_all_states = n_Feature_all_states[idx]
n_Targets_all_states = n_Targets_all_states[idx]
# save the feature file:
Feature_all_states = numpy.concatenate((Feature_all_states, n_Feature_all_states))
Targets_all_states = numpy.concatenate((Targets_all_states, n_Targets_all_states))
HMM_state_feature_count +=num_samples
Feature_all,Targets_all,SK_normalizationfilename = process_feature(parsers,Feature_all_states,Targets_all_states,HMM_state_feature_count)
print ("Processing data done with consuming time %d sec" % int(time.time() - time_tic))
return Prior, Transition_matrix, Feature_all,Targets_all,SK_normalizationfilename
'''
sparse data list
'''
def process_feature(parsers, Feature_all,Targets_all,action_feature_count):
# save the feature file:
print ('total training samples: ' + str(action_feature_count))
Feature_all = Feature_all[0:action_feature_count, :]
Targets_all = Targets_all[0:action_feature_count, :]
#random the samples
rand_num = numpy.random.permutation(Feature_all.shape[0])
Feature_all = Feature_all[rand_num]
Targets_all = Targets_all[rand_num]
#[train_set_feature_normalized, Mean1, Std1] = preprocessing.scale(train_set_feature)
scaler = preprocessing.StandardScaler().fit(Feature_all)
Mean1 = scaler.mean_
Std1 = scaler.scale_
Feature_all = normalize(Feature_all,Mean1,Std1)
# save the normalization files
SK_normalizationfilename = parsers['outpath']+ parsers['experi_ID'] +'SK_normalization.pkl'
f = open(SK_normalizationfilename,'wb')
pickle.dump( {"Mean1": Mean1, "Std1": Std1 },f)
f.close()
return Feature_all,Targets_all, SK_normalizationfilename
'''
using DNN to get HMM emission probability
'''
def emission_prob(modelname, parsers, Feature, Mean1, Std1,best_threshold,modeltype,AES=True,mu=0.0,lamd=1.0):
Feature_normalized = normalize(Feature, Mean1, Std1)
#print Feature_normalized.max()
#print Feature_normalized.min()
# feed to Network
y_pred_4 = test_att_BiLSTM(modelname, parsers, Feature_normalized, best_threshold)
#y_pred_4 = test_torch_BiLSTM(modelname, parsers, Feature_normalized,best_threshold, modeltype, iflog)
if AES:
y_pred_4 = y_pred_4.T
observ_likelihood = y_pred_4
# print(y_pred_4)
atten_value = y_pred_4[:-1, :]* y_pred_4[:-1, :]/9
# print(atten_value)softmax(x, axis=0)
SFMX = softmax(atten_value, axis=0)
#
# print(SFMX)
observ_likelihood[:-1, :] = SFMX* y_pred_4[:-1, :]*mu + y_pred_4[:-1, :]
# print(observ_likelihood)
observ_likelihood= numpy.log(observ_likelihood)
# print(observ_likelihood)
observ_likelihood[-1, :] = observ_likelihood[-1, :] *lamd
else:
observ_likelihood = numpy.log(y_pred_4.T)
return observ_likelihood
'''
save features
'''
def datasaver(parsers, Prior, Transition_matrix,Feature_all,Targets_all):
'''define name'''
Prior_Transition_matrix_filename = parsers['outpath']+ parsers['experi_ID'] + str(parsers['STATE_NUM']) + 'Prior_Transition_matrix.mat'
Feature_filename = parsers['outpath'] + parsers['experi_ID'] + str(parsers['STATE_NUM']) + 'feature'+ str(parsers['featureNum']) +'Feature_all.pkl'
'''HMM transition state'''
#save HMM transition matrix
sio.savemat( Prior_Transition_matrix_filename, {'Transition_matrix':Transition_matrix, 'Prior': Prior})
'''feature storage'''
# save the skeleton file:
f = open(Feature_filename, 'wb')
pickle.dump({"Feature_all": Feature_all, "Targets_all": Targets_all }, f,protocol=4)
f.close()
return Prior_Transition_matrix_filename, Feature_filename
'''
package Parameters into one file
'''
def packagePara(parsers, model1name, norm_name,HMM_file):
Paras = {'model1':model1name,
'norm_para':norm_name,
'HMM_model':HMM_file,
}
path = parsers['outpath'] + parsers['experi_ID'] + '/Paras.pkl'
afile = open(path, 'wb')
pickle.dump(Paras, afile)
afile.close()
'''
test result
'''
def tester_SMG(parsers,best_threshold):
used_joints = parsers['used_joints']
dictionaryNum = parsers['class_count']*parsers['STATE_NUM']+1
MODEL4 = load_model(parsers['outpath']+ parsers['experi_ID'] +'/my_model.h5')
#print(ges_info_list)
correct_count = 0.0
total_count = 0.0
acc_total = 0.0
time_tic = time.time()
datacheck = []
path = parsers['outpath']+ parsers['experi_ID'] + '/Paras.pkl'
file2 = open(path, 'rb')
Paras = pickle.load(file2)
file2.close()
### load the pre-store normalization constant
f = open(Paras['norm_para'],'rb')
SK_normalization = pickle.load(f)
Mean1 = SK_normalization ['Mean1']
Std1 = SK_normalization['Std1']
## Load networks
modelname1 = Paras['model1']
## Load Prior and transitional Matrix
dic=sio.loadmat(Paras['HMM_model'])
Transition_matrix = dic['Transition_matrix']
Transition_matrix[-1, -1]= Transition_matrix[-1, -1]
Prior = dic['Prior']
## Load trained networks
njoints = parsers['njoints']
#get sample list
Sample_list = sorted(os.listdir(parsers['data']))
total_F1 = 0.0
total_acc = 0.0
total_rec = 0.0
recall_len = 0
acc_len = 0
correct_count = 0.0
threshold_alpha = parsers['threshold_alpha']
#start traversing samples
for sampleID in Sample_list[35:]:
print(sampleID)
time_single = time.time()
'''1 extract skeleton features of this ges'''
'''process the gesture parts'''
smp=GestureSample(os.path.join(parsers['data'],sampleID),sampleID)
gesturesList=smp.getGestures()
frame_count =smp.getNumFrames()
MG_flag = smp.getMGFlag()
Skeleton_matrix, valid_skel = Extract_feature_UNnormalized(smp, used_joints, 1, frame_count - 1)
# to see we actually detect a skeleton:
### extract the features according to the CVPR2014 paper
Feature = Extract_feature_Realtime(Skeleton_matrix, njoints)
#ratio = 0.8
#visiblenumber = int(ratio* (frame_count))
# sample_feature1 = copy.copy(sample_feature)
# observ_likelihood1 = emission_prob(modelname1, parsers, sample_feature1, Mean1, Std1, best_threshold, parsers['netmodel'],False)
sample_feature2 = copy.copy(Feature)
log_observ_likelihood1 = emission_prob(MODEL4, parsers, sample_feature2, Mean1, Std1, best_threshold, parsers['netmodel'], True,parsers['mu'],parsers['lambda'] )#parsers['AES'])
log_observ_likelihood1[-1, 0:5] = 0
log_observ_likelihood1[-1, -5:] = 0
#
log_observ_likelihood = log_observ_likelihood1#[:,mask]# + log_observ_likelihood1
#print("\t Viterbi path decoding " )
#do it in log space avoid numeric underflow
[path, _, global_score] = viterbi_path_log(numpy.log(Prior), numpy.log(Transition_matrix), log_observ_likelihood)
pred_label, begin_frame, end_frame, Individual_score, frame_length = viterbi_colab_MES(path, global_score, state_no = parsers['STATE_NUM'], threshold=-3, mini_frame=15, cls_num = parsers['class_count'])#viterbi_colab_clean_straight(parsers['STATE_NUM'], parsers['class_count'], path, global_score,)
MG_flag = numpy.zeros(smp.getNumFrames())
#
# if True:
# # global_score=global_score[:,0:1000]
# im = imdisplay(global_score)
# plt.imshow(im, cmap='Greys')## cmap='gray')
# plt.plot(range(global_score.shape[-1]), path, color='c',linewidth=2.0)
# plt.xlim((0, 2000))#global_score.shape[-1]))
# plt.ylim((0, 100))#global_score.shape[-2]))
# # plot ground truth
# for gesture in gesturesList:
# # Get the gesture ID, and start and end frames for the gesture
# gestureID,startFrame,endFrame=gesture
# frames_count = numpy.array(range(startFrame, endFrame+1))
# pred_label_temp = ((gestureID-1) *5+3) * numpy.ones(len(frames_count))
# plt.plot(frames_count, pred_label_temp, color='r', linewidth=5.0)
#
# # plot clean path
# for i in range(len(begin_frame)):
# frames_count = numpy.array(range(begin_frame[i], end_frame[i]+1))
# pred_label_temp = ((pred_label[i]-1) *5+3) * numpy.ones(len(frames_count))
# plt.plot(frames_count, pred_label_temp, color='b', linewidth=2.0)
#
# plt.show()
# #
pred_len = len(pred_label)
# if True:
# # fig = plt.figure()
# # fig.set_facecolor("antiquewhite")
# im = imdisplay(global_score)
# plt.imshow(im, cmap = 'antiquewhite')
# plt.plot(range(global_score.shape[-1]), path, color='c',linewidth=2.0)
# plt.xlim((0, 1000))#global_score.shape[-1]))
# plt.ylim((0, 100))#global_score.shape[-2]))
# # plot ground truth
# for gesture in gesturesList[:10]:
# # Get the gesture ID, and start and end frames for the gesture
# gestureID,startFrame,endFrame=gesture
# frames_count = numpy.array(range(startFrame, endFrame+1))
# pred_label_temp = ((gestureID-1) *10 +5) * numpy.ones(len(frames_count))
# plt.plot(frames_count, pred_label_temp, color='r', linewidth=5.0)
#
# # plot clean path
# for i in range(len(begin_frame)):
# frames_count = numpy.array(range(begin_frame[i], end_frame[i]+1))
# pred_label_temp = ((pred_label[i]-1) *10 +5) * numpy.ones(len(frames_count))
# plt.plot(frames_count, pred_label_temp, color='b', linewidth=2.0)
#
# plt.show()
current_correct_count = 0.0
for ges in gesturesList:
gt_label, gt_begin_frame, gt_end_frame = ges
for pred_i in range(len(begin_frame)):
# print(pred_i)
if pred_label[pred_i] == gt_label:
alpha = (min(end_frame[pred_i],gt_end_frame)-max(begin_frame[pred_i],gt_begin_frame))/(max(end_frame[pred_i],gt_end_frame)-min(begin_frame[pred_i],gt_begin_frame))
# print(pred_i)
if alpha> threshold_alpha:
# print(pred_label)
numpy.delete(pred_label, pred_i)
numpy.delete(begin_frame, pred_i)
numpy.delete(end_frame, pred_i)
#print(pred_label)
correct_count +=1
current_correct_count +=1
break
recall_len =recall_len + len(gesturesList)
acc_len = acc_len + pred_len
print(len(gesturesList))
print(pred_len)
# pred_label
'''recall'''
current_recall = current_correct_count/len(gesturesList)
'''precise'''
current_precision = current_correct_count/pred_len
current_F1_score = 2*current_recall* current_precision/(current_recall + current_precision+0.0000001)
print("Used time %d sec, processing speed %f fps, F1 score%f, Recall %f, precision%f" %(int(time.time() - time_single),frame_count/float(time.time() - time_single),current_F1_score, current_recall, current_precision))
'''recall'''
recall = correct_count/recall_len
'''precise'''
precision = correct_count/acc_len
total_acc += precision
total_rec += recall
F1_score = 2*recall* precision/(recall + precision+0.0000001)
total_F1 += F1_score
print ("Processing testing data done with consuming time %d sec" % int(time.time() - time_tic))
#F1_total = total_F1/len(Sample_list[35:])
print(parsers['experi_ID']+". The rec for this prediction is " + "{:.12f}".format(total_rec))
print(parsers['experi_ID']+". The acc for this prediction is " + "{:.12f}".format(total_acc))
print(parsers['experi_ID']+". The score for this prediction is " + "{:.12f}".format(total_F1))
print('mu'+str(parsers['mu'])+'lambda'+str(parsers['lambda']))
numpy.savetxt(parsers['outpath']+ parsers['experi_ID'] +'_score_'+ str(acc_total) +'.txt', [])
def extract_temporal_movingPose(skeleton_all, frame_compens , LSTM_step, njoints):
frame_count = skeleton_all.shape[0] - frame_compens*2
feature_all = Extract_moving_pose_Feature(skeleton_all, njoints)
feature_dim = feature_all.shape[1]
feature_n = numpy.zeros(shape=(frame_count, feature_dim*LSTM_step))
for frame in range(frame_count):
for step in range(LSTM_step):
feature_n[frame, step*feature_dim:(step+1)*feature_dim] = feature_all[frame+step,:]
return feature_n, feature_n.shape[0]
def extract_HMMstate_label(STATE_NO, action_count, dictionaryNum, gestureID):
# label the features
target = numpy.zeros(shape=(action_count, dictionaryNum))
# HMM states force alignment
for i in range(STATE_NO):
# get feature index of the current time step
begin_feature_index = int(numpy.round(action_count * i / STATE_NO) + 1)
end_feature_index = int(numpy.round(action_count * (i + 1) / STATE_NO))
# get feature length of the current time step
seg_length = end_feature_index - begin_feature_index + 1
labels = numpy.zeros(shape=(dictionaryNum, 1))
# assign the one hot labels
try:
labels[ i + STATE_NO*gestureID] = 1
except:
print(labels.shape)
print( i + STATE_NO*gestureID)
target[begin_feature_index-1:end_feature_index,:] = numpy.tile(labels.T, (seg_length, 1))
return target
def extract_nonActionlabel (action_count, dictionaryNum):
target_n = numpy.zeros(shape=(action_count, dictionaryNum))
target_n[:,-1] = 1
return target_n
'''
record HMM transition matrix
'''
def HMMmatrix(gestureID, frame, frame_count,STATE_NO):
state_no_1 = numpy.floor(STATE_NO*(frame*1.0/(frame_count+3)))
state_no_1 = int(state_no_1+STATE_NO*(gestureID))
state_no_2 = numpy.floor(STATE_NO*((frame+1)*1.0/(frame_count+3)))
state_no_2 = int(state_no_2+STATE_NO*(gestureID))
return state_no_1,state_no_2
def normalize(Data, Mean, Std):
# print(Data.shape)
# print(Mean.shape)
Data -= Mean
Data /= Std
return Data
def Extract_feature_UNnormalized(smp, used_joints, startFrame, endFrame):
"""
Extract original features
"""
frame_num = 0
Skeleton_matrix = numpy.zeros(shape=(endFrame-startFrame+1, len(used_joints)*3))
for numFrame in range(startFrame,endFrame+1):
# Get the Skeleton object for this frame
skel=smp.getSkeleton(numFrame)
for joints in range(len(used_joints)):
# print((skel.joins[used_joints[joints]][0]))
# print(Skeleton_matrix[frame_num, joints*3: (joints+1)*3])
print(skel.joins[used_joints[joints]][0])
Skeleton_matrix[frame_num, joints*3: (joints+1)*3] =skel.joins[used_joints[joints]][0]
frame_num += 1
if numpy.allclose(sum(sum(numpy.abs(Skeleton_matrix))),0):
valid_skel = False
else:
valid_skel = True
return Skeleton_matrix, valid_skel
def Extract_moving_pose_Feature(Skeleton_matrix_Normalized, njoints):
#pose
F_pose = Skeleton_matrix_Normalized
#velocity
F_velocity = Skeleton_matrix_Normalized[2:,:] - Skeleton_matrix_Normalized[0:-2,:]
#accelerate
F_accelerate = Skeleton_matrix_Normalized[4:,:] + Skeleton_matrix_Normalized[0:-4,:] - 2 * Skeleton_matrix_Normalized[2:-2,:]
#absolute pose
FeatureNum = 0
F_abs = numpy.zeros(shape=(Skeleton_matrix_Normalized.shape[0], int(njoints * (njoints-1)/2)))
for joints1 in range(njoints-1):
for joints2 in range(joints1+1,njoints):
all_X = Skeleton_matrix_Normalized[:, joints1*3] - Skeleton_matrix_Normalized[:, joints2*3]
all_Y = Skeleton_matrix_Normalized[:, joints1*3+1] - Skeleton_matrix_Normalized[:, joints2*3+1]
all_Z = Skeleton_matrix_Normalized[:, joints1*3+2] - Skeleton_matrix_Normalized[:, joints2*3+2]
Abs_distance = numpy.sqrt(all_X**2 + all_Y**2 + all_Z**2)
F_abs[:, FeatureNum] = Abs_distance
FeatureNum += 1
Features = numpy.concatenate((F_pose[2:-2, :], F_velocity[1:-1,:], F_accelerate, F_abs[2:-2, :]), axis = 1)
return Features
def viterbi_path_log(prior, transmat, observ_likelihood):
""" Viterbi path decoding
Wudi first implement the forward pass.
Future works include forward-backward encoding
input: prior probability 1*N...
transmat: N*N
observ_likelihood: N*T
"""
T = observ_likelihood.shape[-1]
N = observ_likelihood.shape[0]
path = numpy.zeros(T, dtype=numpy.int32)
global_score = numpy.zeros(shape=(N,T))
predecessor_state_index = numpy.zeros(shape=(N,T), dtype=numpy.int32)
t = 1
global_score[:, 0] = observ_likelihood[:, 0]
# print(global_score.shape)
# need to normalize the data
for t in range(1, T):
for j in range(N):
# print(global_score[:, t-1].shape)
# print(prior.shape)
temp = global_score[:, t-1] + transmat[:, j] + observ_likelihood[j, t]-prior[0]
global_score[j, t] = max(temp)
predecessor_state_index[j, t] = temp.argmax()
path[T-1] = global_score[:, T-1].argmax()
for t in range(T-2, -1, -1):
path[t] = predecessor_state_index[ path[t+1], t+1]
return [path, predecessor_state_index, global_score]
def viterbi_colab_MES(path, global_score, state_no = 5, threshold=-3, mini_frame=15, cls_num = 10):
"""
Clean the viterbi path output according to its global score,
because some are out of the vocabulary
"""
# just to accommodate some frame didn't start right from the begining
all_label = state_no * cls_num # 20 vocabularies
start_label = numpy.concatenate((range(0,all_label,state_no), range(1,all_label,state_no),range(2,all_label,state_no)))
end_label = numpy.concatenate((range(state_no-3,all_label,state_no), range(state_no-2,all_label,state_no),range(state_no-1,all_label,state_no)))
begin_frame = []
end_frame = []
pred_label = []
frame = 1
while(frame < path.shape[-1]-1):
if path[frame-1]==all_label and path[frame] in start_label:
begin_frame.append(frame)
# python integer divsion will do the floor for us :)
pred_label.append( int(path[frame]/state_no))
while(frame < path.shape[-1]-1):
if path[frame] in end_label and path[frame+1]==all_label:
end_frame.append(frame)
break
else:
frame += 1
frame += 1
end_frame = numpy.array(end_frame)
begin_frame = numpy.array(begin_frame)
pred_label= numpy.array(pred_label)
if len(begin_frame)> len(end_frame):
begin_frame = begin_frame[:-1]
pred_label = pred_label[:-1]
elif len(begin_frame)< len(end_frame):# risky hack! just for validation file 668
end_frame = end_frame[1:]
## First delete the predicted gesture less than 15 frames
frame_length = end_frame - begin_frame
## now we delete the gesture outside the vocabulary by choosing
## frame number small than mini_frame
mask = frame_length > mini_frame
begin_frame = begin_frame[mask]
end_frame = end_frame[mask]
pred_label = pred_label[mask]
Individual_score = []
for idx, g in enumerate(begin_frame):
score_start = global_score[path[g], g]
score_end = global_score[path[end_frame[idx]], end_frame[idx]]
Individual_score.append(score_end - score_start)
## now we delete the gesture outside the vocabulary by choosing
## score lower than a threshold
Individual_score = numpy.array(Individual_score)
frame_length = end_frame - begin_frame
# should be length independent
Individual_score = Individual_score/frame_length
order = Individual_score.argsort()
ranks = order.argsort()
mask = Individual_score > threshold
begin_frame = begin_frame[mask]
end_frame = end_frame[mask]
pred_label = pred_label[mask]
Individual_score = Individual_score[mask]
return [pred_label, begin_frame, end_frame, Individual_score, frame_length]
def Extract_feature_Realtime(Pose, njoints):
#Fcc
FeatureNum = 0
Fcc = numpy.zeros(shape=(Pose.shape[0], int(njoints * (njoints-1)/2*3)))
for joints1 in range(njoints-1):
for joints2 in range(joints1+1,njoints):
Fcc[:, FeatureNum*3:(FeatureNum+1)*3] = Pose[:, joints1*3:(joints1+1)*3]-Pose[:, joints2*3:(joints2+1)*3];
FeatureNum += 1
#F_cp
FeatureNum = 0
Fcp = numpy.zeros(shape=(Pose.shape[0]-1, njoints **2*3))
for joints1 in range(njoints):
for joints2 in range(njoints):
Fcp[:, FeatureNum*3: (FeatureNum+1)*3] = Pose[1:,joints1*3:(joints1+1)*3]-Pose[0:-1,joints2*3:(joints2+1)*3]
FeatureNum += 1
#Instead of initial frame as in the paper Eigenjoints-based action recognition using
#naive-bayes-nearest-neighbor, we use final frame because it's better initiated
# F_cf
Features = numpy.concatenate( (Fcc[0:-1, :], Fcp), axis = 1)
return Features
def imdisplay(im):
""" display grayscale images
"""
im_min = im.min()
im_max = im.max()
return (im - im_min) / (im_max -im_min)
| mikecheninoulu/SMG | online recognition/SMG/THDutils.py | THDutils.py | py | 28,848 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "time.time",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 5... |
32939752267 | # coding=utf-8
from glob import glob
import re
from time import time
import requests
import os
# from xml.dom import minidom
from lxml import etree
import base64
import traceback
import html2text as ht #
from selenium.webdriver.chrome.options import Options
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import configparser
## 配置文件名称
configFileName = "config.ini"
res = "res" # 资源目录名称
head={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36 Edg/84.0.522.52"}
base64ImgHead = "data:image/png;base64,"
request_url = ""
bypass_tables = False
chromeDriverName = ""
# 初始化配置信息
def init_config():
global request_url
global bypass_tables
global chromeDriverName
config = configparser.ConfigParser()
config.read(configFileName)
request_url = config['base_config']['url']
bypass_tables = config['base_config'].getboolean('bypass_tables')
chromeDriverName = config['base_config']['config_fileName']
## 判断当前协议
def http_protocol(url):
if url.startswith('https://'):
return "https:"
elif url.startswith('http://'):
return "http:"
def get_image_type(Url,Number):
if Url.find(base64ImgHead) != -1:
return ("png_base64", str(Number)+".png")
else:
if Url.find(".jpg") != -1:
return("url", str(Number)+".jpg")
elif Url.find(".png") != -1:
return ("url", str(Number)+".png")
else:
return ("url", str(Number)+".png")
def init_browser(url):
chrome_options = webdriver.ChromeOptions()
# # 禁用浏览器弹窗
prefs = {"profile.default_content_setting_values.notifications": 2}
chrome_options.add_experimental_option("prefs", prefs)
# 设置浏览器不显示
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
driver = webdriver.Chrome(os.path.join(os.getcwd(),chromeDriverName) ,chrome_options=chrome_options)
driver.maximize_window()
driver.get(url)
return driver
# 创建无头浏览器,来处理浏览器执行 跑 n 秒后的页面状态返回回来
def browser_request(url, loadmore = False, waittime = 2):
browser = init_browser(url)
time.sleep(waittime)
if loadmore:
while True:
try:
next_button = browser.find_element_by_class_name("ant-btn")
next_button.click()
time.sleep(waittime)
except Exception as e:
# traceback.print_exc()
# info = traceback.format_exc()
# print(info)
break
else:
time.sleep(waittime)
html = browser.page_source
browser.quit()
return html
def request(url):
Result=requests.get(url,headers=head)
Result.encoding = 'utf-8'
return Result.text
def save_file(fileName, text):
fileRef = open(fileName, "w+", encoding='utf-8') # win 环境必须指定编码类型,否则默认会使用系统的 gbk编码
fileRef.write(text)
fileRef.close()
def get_csdn_title(root):
r = root.xpath('//h1[@id="articleContentId"][@class="title-article"]/text()')[0].replace("/","%")
return r
def get_jianshu_title(root): # //*[@id="__next"]/div[1]/div/div[1]/section[1]/article
r = root.xpath('//section/h1[@class="_1RuRku"]/text()')[0].replace("/","%")
return r
# 下载资源到本地
def download_res(img_url,srcType,name):
# print(img_url)
if srcType == "png_base64" :
imageBase64Str = img_url.replace(base64ImgHead,"")
binary_img_data = base64.b64decode(imageBase64Str)
with open(name,'wb') as fp:
fp.write(binary_img_data)
# print(name,'资源下载成功!!!')
elif srcType == "url":
img_data = requests.get(url=img_url,headers=head).content
with open(name,'wb') as fp:
fp.write(img_data)
# print(name,'资源下载成功!!!')
# 初始化目录
def init_file(dirName):
import shutil
if not os.path.isdir(dirName):
# shutil.rmtree(titleName)
os.mkdir(dirName)
resPath = os.path.join(dirName,res)
if not os.path.isdir(resPath):
os.mkdir(resPath)
return resPath
# 转换为 makedown 并存盘 titleName/titleName.md 文件
def generate_makedown(root, titleName):
allItem = root.xpath('*')
need_str = ""
for p_item in allItem:
get_str = etree.tostring(p_item,encoding="utf-8")
need_str += get_str.decode("utf-8")
text = makedown(need_str)
save_file(os.path.join(titleName,titleName+".md"), text)
def makedown(need_str):
text_maker = ht.HTML2Text()
text_maker.bypass_tables = bypass_tables # 是否使用表格翻译
text = text_maker.handle(need_str)
return text
# 处理好 图片的下载逻辑
def downLoadImgMgr(root, resPath):
imgL = root.xpath('descendant::img')
record_list = {}
Number = 0
for img in imgL:
DownloadUrl = ""
if 'data-original-src' in img.attrib and img.attrib['data-original-src'] != "" :
DownloadUrl = img.attrib['data-original-src']
elif 'src' in img.attrib and img.attrib['src'] != "" :
DownloadUrl = img.attrib['src']
else:
continue
if DownloadUrl.startswith('//'):
DownloadUrl = http_protocol(request_url) + DownloadUrl
if DownloadUrl in record_list: # 已经下载过了不处理
img.attrib['src']=record_list[DownloadUrl]
else:
srcType,ImageName = get_image_type(DownloadUrl, Number)
DownloadImgLocalPath = os.path.join(resPath, ImageName)
ImageNewSrc = os.path.join(res, ImageName)
download_res(DownloadUrl,srcType, DownloadImgLocalPath)
img.attrib['src'] = ImageNewSrc
record_list[DownloadUrl] = ImageNewSrc
# if srcType == "png_base64":
Number+=1
# 请求回一个根目录的节点实例
def request_html(url):
htmlContent = request(url)
return etree_html(htmlContent)
# 请求回一个根目录的节点实例,通过虚拟一个无头浏览器处理,拿到让js跑完后的结构
def request_html_by_browser(url):
htmlContent = browser_request(url, True)
return etree_html(htmlContent)
def etree_html(htmlContent):
return etree.HTML(htmlContent, parser=etree.HTMLParser(encoding='utf-8'))
def scoll_down(html_page):# 滚轮下拉到最底部
html_page.send_keys(Keys.END)
time.sleep(1)
def generate_article(user, content1, content2, AccHtmlContent):
userHtml = user.get_attribute('outerHTML')
content1Html = content1.get_attribute('outerHTML')
content2Html = ""
if len(content2) != 0 :
content2Html = content2[0].get_attribute('outerHTML')
saveHtml = "<div>"+ userHtml + "\n" + content1Html + "\n" + content2Html + "\n" + "</div> \n"
AccHtmlContent += saveHtml
return AccHtmlContent
# csnd 的相关处理
def start_make_csdn():
tree = request_html(request_url)
titleName = get_csdn_title(tree)
resPath = init_file(titleName)
makeRoot = tree.xpath('//div[@id="content_views"]')[0]
downLoadImgMgr(makeRoot, resPath)
generate_makedown(makeRoot, titleName)
# 处理简书
def make_jianshu():
tree = request_html_by_browser(request_url) # 拿到html结构
titleName = get_jianshu_title(tree) # 获取标题
makeRoot = tree.xpath('//section/article')[0]
resPath = init_file(titleName) # 初始化所需目录
downLoadImgMgr(makeRoot, resPath) # 处理资源下载
generate_makedown(makeRoot, titleName) # 处理makedown转换
def start():
init_config()
print(request_url)
if request_url == "" :
print("url配置为空")
return
if request_url.find("jianshu") != -1:
make_jianshu()
elif request_url.find("csdn") != -1:
start_make_csdn()
else:
print("url 错误 不属于csdn也不属于简书")
print("处理 完成")
start() | DuanShaoCheng/csdn_jianshu_to_makedown | csdn_jianshu_to_makedown.py | csdn_jianshu_to_makedown.py | py | 8,178 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 64,
"usage_type": "name"
},
{
"api... |
15235514484 | import win32serviceutil
import win32service
import win32event
import servicemanager
import configparser
import os
import inspect
from multiprocessing import Process, Pipe
from db.sqlitemanager import SQLiteManager
from proc.node_client_process import NodeClientProcess
import utils.script_manager as sm
import utils.logging as logutils
def pipe_recv_handler(node_process, parent_pipe):
node_process._logger.info("Node Pipe Recv Handler Spawned. Listening For Messages")
while True:
command = parent_pipe.recv()
node_process._logger.info("Received Command: " + str(command))
message_for = command["to"]
if message_for == "NODE":
answer = node_process.handle_node_requests(command)
# send the answer back wherever it came (most likely the http)
# send answer if it is not None
if answer is not None:
parent_pipe.send(answer)
else:
node_process._logger.warning("Could Not Determine What Message Is For. Can't Forward Appropriatly")
def bootstrapper(wrapper_object, initialization_tuple):
instance = wrapper_object(initialization_tuple)
instance.start()
exit(0)
class AppServerSvc(win32serviceutil.ServiceFramework):
_svc_name_ = "VesselNode"
_svc_display_name_ = "Vessel Service Node"
_config = configparser.ConfigParser()
_log_dir = None
_role = None
_node_process = None
_script_dir = None
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
self._config.read(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
+ '/conf/service.ini')
self._log_dir = self._config["LOGGING"]["log_dir"]
self._root_dir = self._config["DEFAULT"]["root_dir"]
self._script_dir = self._config["DEFAULT"].get("scripts_dir", self._root_dir + "/scripts")
logutils.initialize_all_logging_configuration(self._log_dir)
self._logger = logutils.node_logger
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_, ''))
self._logger.info("Service Is Starting")
self.main()
def handle_node_requests(self, command):
if command["command"] == "SYS" and command["param"] == "SHUTDOWN":
self._logger.info("Shutdown Request Received. Terminating Node")
self.SvcStop()
return None
def main(self):
self._logger.info("Service Is Initializing...")
# setup database
sqlite_manager = SQLiteManager(self._config, self._logger)
# catalogue all the scripts in the system
self._logger.info("Catalogueing Engines On The System")
sm.catalogue_local_engines(sqlite_manager, self._logger)
self._logger.info("Catalogueing Scripts On The System")
sm.catalogue_local_scripts(sqlite_manager, self._script_dir, self._logger)
# create process for listening for node connections
# READ through parent_pipe, WRITE through child_pipe
try:
self._logger.info("Now Creating Pipe")
parent_pipe, child_pipe = Pipe()
self._logger.info("Now Creating NodeClientProcess Class")
# node_listener = NodeListenerProcess(to_parent_pipe, to_child_pipe, self._config)
self._logger.info("Now Creating Process With BootStrapper")
self._node_process = Process(target=bootstrapper,
args=(NodeClientProcess, (child_pipe, self._config, logutils.logging_queue)))
self._logger.info("Now Starting Process")
self._node_process.start()
self._logger.info("Node Process Has Started Running")
except Exception as e:
self._logger.exception("An Exception Was Thrown Starting The Node Listener Process")
self._logger.error("Later - An Exception Was Thrown")
return
# create process for listening for http connections
# start logging thread
l_thread = logutils.start_logging_thread()
rc = None
while rc != win32event.WAIT_OBJECT_0:
self._logger.info("Service Is Now Running")
# hang for 1 minute or until service is stopped - whichever comes first
rc = win32event.WaitForSingleObject(self.hWaitStop, (1 * 60 * 1000))
self._node_process.terminate()
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(AppServerSvc) | bensoer/vessel | node.py | node.py | py | 4,918 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "win32serviceutil.ServiceFramework",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "win32serviceutil.ServiceFramework.__init__",
"line_number": 52,
"usage_t... |
22123154667 | import unittest
from typing import List
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
if digits is None or len(digits) == 0:
return []
res = [""]
alph = ["", "", "abc", "def", "ghi", "jkl", "mno", "pqrs", "tuv", "wxyz"]
def combine(res, str):
temp = []
for i in range(0, len(str)):
for j in range(0, len(res)):
temp.append(res[j] + str[i])
return temp
for digit in digits:
res = combine(res, alph[int(digit)])
return res
class UnitTest(unittest.TestCase):
def test(self):
digits = "23"
self.assertEqual(["ad","ae","af","bd","be","bf","cd","ce","cf"], Solution().letterCombinations(digits))
if __name__ == "__main__":
unittest.main() | AllieChen02/LeetcodeExercise | String/P17LetterCombinationsOfAPhoneNumber/LetterCombinationsOfAPhoneNumber.py | LetterCombinationsOfAPhoneNumber.py | py | 842 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "unittest.main",
"line_number": 27,
"usage_type": "call"
}
] |
23319819904 | from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, render
from main.models.models import Devices, Like
@login_required
def add_like(request, device_id):
user=request.user
device = get_object_or_404(Devices, pk=device_id)
# Sprawdź, czy użytkownik ma już niepolubienie dla tego urządzenia
existing_dislike = Like.objects.filter(user_id=user, devices_id=device, dislike=True).first()
if existing_dislike:
existing_dislike.delete() # Usuń dislike
like = Like(user_id=user, devices_id=device, like=True, dislike=False)
like.save()
return HttpResponseRedirect(request.META['HTTP_REFERER'])
@login_required
def add_dislike(request, device_id):
user=request.user
device = get_object_or_404(Devices, pk=device_id)
# Sprawdź, czy użytkownik ma już like dla tego urządzenia
existing_like = Like.objects.filter(user_id=user, devices_id=device, like=True).first()
if existing_like:
existing_like.delete() # Usuń like
dislike = Like(user_id=user, devices_id=device, like=False, dislike=True)
dislike.save()
return HttpResponseRedirect(request.META['HTTP_REFERER'])
@login_required
def remove_like(request, device_id):
device = get_object_or_404(Devices, pk=device_id)
user=request.user
like = Like.objects.filter(user_id=user, devices_id=device, like=True).first()
if like:
like.delete()
return HttpResponseRedirect(request.META['HTTP_REFERER'])
@login_required
def remove_dislike(request, device_id):
device = get_object_or_404(Devices, pk=device_id)
user=request.user
dislike = Like.objects.filter(user_id=user, devices_id=device, dislike=True).first()
if dislike:
dislike.delete()
return HttpResponseRedirect(request.META['HTTP_REFERER']) | MarcinzNS/mdvos-priv | mdvos/main/views/like_devices.py | like_devices.py | py | 1,887 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "main.models.models.Devices",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "main.models.models.Like.objects.filter",
"line_number": 14,
"usage_type... |
20382049470 | import time
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
import Adafruit_LSM303
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
# Raspberry Pi pin configuration:
RST = 24
DC = 23
SPI_PORT = 0
SPI_DEVICE = 0
disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST)
# Note you can change the I2C address by passing an i2c_address parameter like:
disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, i2c_address=0x3d)
# Create a LSM303 instance.
lsm303 = Adafruit_LSM303.LSM303()
disp.begin()
disp.clear()
disp.display()
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
x = 2
top = 2
padding = 12
# Load default font.
font = ImageFont.load_default()
dot_arr = []
dot_max = 50
def map_range(a,b, s):
(a1, a2), (b1, b2) =a,b
return b1 + ((s -a1) * (b2 -b1) / (a2 - a1))
while True:
draw.rectangle((0,0,width,height), outline=0, fill=0)
# draw graph
draw.line((padding, height-padding, padding, -height+padding), fill=255)
draw.line((padding, height-padding, padding+width, height-padding), fill=255)
# Read the X, Y, Z axis acceleration values and print them.
accel, mag = lsm303.read()
# Grab the X, Y, Z components from the reading and print them out.
accel_x = accel[0]
if len(dot_arr) <= 115:
dot_arr.append(accel_x)
else:
del dot_arr[0]
dot_arr.append(accel_x)
x_pos = 15
for i in range(1,len(dot_arr)):
y_pos = map_range((520, -520), (0 , height - padding), dot_arr[i])
last_y_pos = map_range((520, -520), (0 , height - padding), dot_arr[i-1])
y_pos = round(y_pos)
draw.point((x_pos, y_pos), fill=255)
draw.line((x_pos,y_pos,x_pos-1,last_y_pos),fill=255)
x_pos += 1
disp.image(image)
disp.display()
# Wait half a second and repeat.
time.sleep(0.5)
| pguiffr62/Engineering_4_Notebook | Python/headless.py | headless.py | py | 1,901 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "Adafruit_SSD1306.SSD1306_128_32",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "Adafruit_SSD1306.SSD1306_128_64",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "Adafruit_LSM303.LSM303",
"line_number": 21,
"usage_type": "call"
},
{... |
17324810580 | '''
Created on 2012/03/20
@author: hogelog
'''
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
import threading
import logging
class HttpControlerHandler(SimpleHTTPRequestHandler):
def do_GET(self):
mapping = self.mapping()
path = self.path
if path in mapping:
content = mapping[path]()
if content:
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(content)
else:
self.send_error(500, "Server Error")
self.end_headers()
else:
self.send_error(404, "Not Found")
self.end_headers()
def mapping(self):
return {}
class BaseHttpControlerHandler(HttpControlerHandler):
def index(self):
return "Hello Http Controler!"
def mapping(self):
return {
"/": self.index,
}
class HttpControler(threading.Thread, HTTPServer):
def __init__(self, host, port, handler):
self.address = (host, port)
self.handler = handler
threading.Thread.__init__(self)
HTTPServer.__init__(self, self.address, self.handler)
def run(self):
logging.info("start server: %s", self.address)
self.serve_forever()
def __stop(self):
threading.Thread.__stop(self)
logging.info("stop server: %s", self.address)
if __name__ == '__main__':
class Handler(BaseHttpControlerHandler):
def start(self):
return "Start!"
def stop(self):
return "Stop!"
def mapping(self):
mapping = BaseHttpControlerHandler.mapping(self)
mapping["/start"] = self.start
mapping["/stop"] = self.stop
return mapping
server = HttpControler("0.0.0.0", 12345, Handler)
server.start()
# server.shutdown()
| hogelog/real-lamp | src/controler.py | controler.py | py | 1,977 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "SimpleHTTPServer.SimpleHTTPRequestHandler",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "BaseHTTPServer.HTTPServer",
"line_number": 45,
"usage_type": "name"
},
... |
42042144582 | import sqlite3
name = input("Name of the pdgm project you are working on: ")
db = sqlite3.connect(name + ".sqlite")
cursor = db.cursor()
print("""Please enter source and target of your dependencies.
After each entry you have to confirm the entry. The question can be answered in four ways:
y - yes: Valid entry, continue with next entry
n - no: Invalid entry, continue with next entry
d - done: Valid entry, submit and stop the program
q - quit: Invalid entry, stop without submitting
The answers are case-insensitive. The default (empty) answer means [y]es.
Every other answer is a [n]o.
""")
source = ""
def newlink():
global source, cursor
source = input("Source[" + source + "]: ") or source
target = input("Target: ")
check = input("Do you want to add link (" + source + ", " + target + ")? [y]ndq: ") or "y"
check = check.lower()
if check == "y":
cursor.execute("INSERT INTO links VALUES (?,?)", (source, target))
print("Added link (" + source + ", " + target + ")")
return 1
elif check == "d":
cursor.execute("INSERT INTO links VALUES (?,?)", (source, target))
print("Added link (" + source + ", " + target + ")")
return -1
elif check == "q":
return -2
else:
return 0
additions = 0
while True:
v = newlink()
if v == -1:
additions += 1
break
elif v == 1:
additions += 1
elif v == -2:
break
print("Added %d links to the database." % (additions,))
db.commit()
db.close()
| grietje/proof-dependency-graph-maker | fill.py | fill.py | py | 1,438 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlite3.connect",
"line_number": 5,
"usage_type": "call"
}
] |
29443696453 | ########################################################################################################
#
# Determinado banco possui os dados de histórico de empréstimo, vistos na tabela abaixo. Com esse dados,
# o banco solicitou que fosse construído um modelo que fornecendo os dados de entrada, indique se deverá
# fornecer ou não o empréstimo
#
#########################################################################################################
# Importando as bibliotecas
import pandas as pd
from sklearn.naive_bayes import GaussianNB
from sklearn import preprocessing
# Gerando o dataset
dados = {'Tempo': ['Ensolarado', 'Ensolarado', 'Nublado', 'Chuvoso', 'Chuvoso', 'Chuvoso',
'Nublado', 'Ensolarado', 'Ensolarado', 'Chuvoso'],
'Umidade': ['Alta', 'Alta', 'Alta', 'Alta', 'Normal', 'Normal', 'Normal', 'Alta',
'Normal', 'Normal'],
'Vento': ['Fraco', 'Forte', 'Fraco','Fraco', 'Fraco', 'Forte', 'Forte', 'Fraco', 'Fraco', 'Fraco'],
'Treinou': ['Não', 'Não', 'Sim', 'Sim', 'Sim', 'Não', 'Sim', 'Não', 'Sim','Sim']}
dados = pd.DataFrame(data = dados)
# Criando o LabelEncoder
tempo_lbencoder = preprocessing.LabelEncoder()
umidade_lbencoder = preprocessing.LabelEncoder()
vento_lbencoder = preprocessing.LabelEncoder()
treinou_lbencoder = preprocessing.LabelEncoder()
# Usando o LabelEncoder para atribuir números às variáveis qualitativas
tempo_lbencoder.fit(dados['Tempo'].unique())
umidade_lbencoder.fit(dados['Umidade'].unique())
vento_lbencoder.fit(dados['Vento'].unique())
treinou_lbencoder.fit(dados['Treinou'].unique())
# Transformando o dataset de variáveis qualitativas para variáveis quantitativas
dados['Tempo'] = tempo_lbencoder.transform(dados['Tempo'])
dados['Umidade'] = umidade_lbencoder.transform(dados['Umidade'])
dados['Vento'] = vento_lbencoder.transform(dados['Vento'])
dados['Treinou'] = treinou_lbencoder.transform(dados['Treinou'])
# Separando o nosso dataset nos atributos previsores e na classe objetivo
previsor = dados[['Tempo','Umidade','Vento']]
classe = dados['Treinou']
# Criando o classificador NaiveBayes
gnb = GaussianNB()
gnb.fit(previsor, classe)
# Verificando a precisão
print("\n Precisão = ", gnb.score(previsor, classe)*100,"%")
# Inserindo novos dados para serem previstos
previsao = {'Tempo': ['Ensolarado', 'Nublado', 'Nublado','Chuvoso'],
'Umidade': ['Normal', 'Alta','Normal','Alta'],
'Vento':['Forte', 'Forte', 'Fraco', 'Forte']}
previsao = pd.DataFrame(data = previsao)
previsao['Tempo'] = tempo_lbencoder.transform(previsao['Tempo'])
previsao['Umidade'] = umidade_lbencoder.transform(previsao['Umidade'])
previsao['Vento'] = vento_lbencoder.transform(previsao['Vento'])
# Verificando o resultado
print("\n", gnb.predict(previsao))
print("\n", treinou_lbencoder.inverse_transform(gnb.predict(previsao)))
# Verificando as probabilidades
print("\n", gnb.predict_proba(previsao))
| Grinduim/Bosch-2022.2 | Bosch/InnoHub/Treinamento de IA/materiais/Exemplos_1/NAIVE_BAYES_EXAMPLES/TREINO_DE_FUTEBOL.py | TREINO_DE_FUTEBOL.py | py | 2,980 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 25,
"usage_type": "name"
},
{
"api_nam... |
73034299553 | # _*_ coding: utf-8 _*_
import os
from datetime import datetime
from scrapy import Spider, Request
from pyquery import PyQuery as pq
class Itjuzi(Spider):
name = 'itjuzi_spider'
def start_requests(self):
meta = {}
meta['page'] = 0
meta['url'] = 'http://itjuzi.com/company?page=%d'
url = 'http://itjuzi.com/company?page=%d' % meta['page']
yield Request(url, callback=self.parse_list, meta=meta)
def parse_list(self, response):
meta = response.meta
html = response.body_as_unicode()
html_pq = pq(html)
divs = html_pq('div.company-list.clearfix.childen-hover div.company-list-item')
for div in divs:
url = pq(div)('a:eq(0)').attr['href']
#url = 'http://itjuzi.com/company/21253'
yield Request(url, callback=self.parse_detail)
#return [Request(url, callback=self.parse_detail)]
if u'下一页' in html_pq('div.pagination.pagination-right').text():
meta['page'] += 1
url = meta['url'] % meta['page']
yield Request(url, callback=self.parse_list, meta=meta)
def parse_detail(self, response):
meta = response.meta
html = response.body_as_unicode()
html_pq = pq(html)
keys = [u'网址', u'公司', u'时间', u'地点', u'状态', u'阶段', u'行业', u'子行业', u'TAG', u'简介']
values = ['' for _ in keys]
team = []
products = []
fund_demand = fund_status = ''
funds = []
records = []
project_name = html_pq('#com_id_value').text()
divs = html_pq('div.normal-box, div.normal-box-no-pad')
for div in divs:
h2_text = pq(div)('h2:eq(0)').text()
if u'基础信息' in h2_text:
lis = pq(div)('ul.detail-info li')
for li in lis:
key = pq(li).contents()[0]
for i in range(len(keys)):
if key.startswith(keys[i]):
contents = map(lambda x: pq(x).text().replace(',', u',').replace('\r', '').replace('\n', '').strip() if hasattr(x, 'text') \
else x.replace(',', u',').replace('\r', '').replace('\n', '').strip(), pq(li).contents())
values[i] = ''.join(contents[1:])
elif u'团队介绍' in h2_text:
trs = html_pq('#company-member-list-tbl tr')
for tr in trs:
td = pq(tr)('td')
name = td.eq(1).text().strip()
position = td.eq(2).text().strip()
member = '|'.join([name, position]).replace(',', u',').replace('\r', '').replace('\n', '')
team.append(member)
elif u'产品介绍' in h2_text:
product_divs = pq(div)('div.company-product-item')
for product_div in product_divs:
product_name = pq(product_div)('h3').text().strip()
product_brief = pq(product_div)('p').text().strip()
product = '|'.join([product_name, product_brief]).replace(',', u',').replace('\r', '').replace('\n', '')
products.append(product)
elif u'融资需求' in h2_text:
fund_demand = pq(div)('#company-fund-status').text()
elif u'获投状态' in h2_text:
fund_status = pq(div)('#company-fund-status').text()
fund_divs = pq(div)('div.company-fund-item')
for fund_div in fund_divs:
fund_time = ''
fund_stage = pq(fund_div)('h3 b').text()
tmp = pq(fund_div)('h3').contents()
if len(tmp) > 1:
fund_time = tmp[1].strip()
money = pq(fund_div)('p.company-fund-item-money').text()
investor = pq(fund_div)('p:eq(1)').text()
fund = '|'.join([fund_time, fund_stage, money, investor]).replace(',', u',').replace('\r', '').replace('\n', '')
funds.append(fund)
elif u'里程碑' in h2_text:
record_lis = pq(div)('#company-mile li')
for li in record_lis:
time = pq(li)('b').text()
description = pq(li)('p').text()
record = '|'.join([time, description]).replace(',', u',').replace(';', u'。').replace('\r', '').replace('\n', '')
records.append(record)
today = datetime.today().strftime('%Y-%m-%d')
info = [project_name, ','.join(values), response.url, today, ';'.join(team), ';'.join(products), \
fund_demand, fund_status, ';'.join(funds), ';'.join(records)]
result_path = os.path.abspath(__file__).replace('python/itjuzi/itjuzi/spiders/itjuzi_spider.py', 'result/itjuzi.txt')
fw = open(result_path, 'a')
fw.write(','.join(info).encode('utf8') + '\n')
fw.close()
| shineforever/ops | vdian_spider/python/itjuzi/itjuzi/spiders/itjuzi_spider.py | itjuzi_spider.py | py | 5,060 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "scrapy.Spider",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "scrapy.Request",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pyquery.PyQuery",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pyquery.PyQuery",
"l... |
22837266278 | # %% [markdown]
# # Extracting attributes from timedelta columns via a ColumnTransformationPlugin
#
# inspired by Sailu and the following stackoverflow question:
# - https://stackoverflow.com/questions/38355816/pandas-add-timedelta-column-to-datetime-column-vectorized
#
# __Goal:__ extract the number of weeks as float based on the timedelta column
# %%
import pandas as pd
import numpy as np
import bamboolib as bam
# %%
df = pd.DataFrame()
# %%
df["date"] = ["2016-01-10", "2016-05-11", "2016-02-23", "2015-12-08"]
df["date"] = pd.to_datetime(df["date"])
# %%
df["days"] = [28, 7, 15, 30]
df["days"] = pd.to_timedelta(df["days"], "d")
# %%
# # solution:
# df['weeks'] = df['days'] / np.timedelta64(1, 'W')
# %%
import ipywidgets as widgets
from bamboolib.plugins import ColumnTransformationPlugin, DF_OLD, SelectizeDropdown
class TimedeltaExtractAttribute(ColumnTransformationPlugin):
name = "Timedelta: extract attribute"
def __init__(self, *args, column=None, **kwargs):
super().__init__(*args, **kwargs)
self.column = column
# based on https://docs.scipy.org/doc/numpy/reference/arrays.datetime.html#datetime-units
self.attribute = SelectizeDropdown(
options=[
("years", "Y"),
("months", "M"),
("weeks", "W"),
("days", "D"),
("hours", "h"),
("minutes", "m"),
("seconds", "s"),
],
value="D",
focus_after_init=True,
)
self.new_column_name = widgets.Text(value=self.column)
def render(self):
self.set_title("Extract attribute")
self.set_content(
widgets.HTML(f"Convert <b>{self.column}</b> to"),
self.attribute,
widgets.HTML("New column name:"),
self.new_column_name,
)
def get_description(self):
return (
f"Extract timedelta attribute {self.attribute.label} from '{self.column}'"
)
def get_code(self):
return f"{DF_OLD}['{self.new_column_name.value}'] = {DF_OLD}['{self.column}'] / np.timedelta64(1, '{self.attribute.value}')"
# %% [markdown]
# __Hint:__ The plugin is shown in bamboolib when clicking on the column header of 'days' and searching for the transformation
# %%
df
# %%
| hussien-hussien/bamboolib | plugins/examples/timedelta_extract_attributes.py | timedelta_extract_attributes.py | py | 2,343 | python | en | code | null | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.to_timedelta",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "bamboolib.plu... |
12335503668 | import subprocess
import os
import csv
import sys
from sys import platform as _platform
import traceback
import argparse
import re
# Platform
os_platform = ""
if _platform == "linux" or _platform == "linux2":
os_platform = "linux"
elif _platform == "darwin":
os_platform = "macos"
elif _platform == "win32":
os_platform = "windows"
# Win32 Imports
if os_platform == "windows":
try:
import wmi
import win32api
from win32com.shell import shell
import win32file
except Exception as e:
print("Linux System - deactivating process memory check ...")
os_platform = "linux" # crazy guess
if os_platform == "":
print("Unable to determine platform - LOKI is lost.")
sys.exit(1)
class Node:
def __init__(self, pid, ppid, image_file_name):
self.pid = pid
self.ppid = ppid
self.image_file_name = image_file_name
self.children = []
class AutoVol3(object):
normal_proc = {}
suspect_proc = {}
regex_patterns = []
whitelist_paths = []
normal_paths =[]
normal_paths_x86 =[] # The normal path for a process if x86
normal_process_path = [] # The normal path for a process
process_path = {} # The path for the running process from dlllist
normal_sids = {}
suspecious_proc_sids = {}
suspect_cmdlines = {}
suspect_netscan = {}
dll_stacking = {}
process_tree = {}
pslist = {}
psscan ={}
dlllist = {}
cmdline = {}
netscan ={}
getsids ={}
handles = {}
baseline_proc = {}
evidence_bag = []
score = {}
plugins = ['pslist', 'psscan', 'pstree', 'dlllist', 'cmdline', 'netstat', 'netscan', 'handle', 'getsids']
analyze = ['List suspecious processes from EPROCESS', 'List suspecious processes with memory scanning', 'List suspecious cmdlines', 'List suspecious DLLs',
'List suspecious communications from EPROCESS', 'List suspecious communications with memory scanning', 'Compare the suspect image with baseline',
'List processes running with suspecious account']
csv_files = ['pslist.csv', 'psscan.csv', 'pstree.csv', 'dlllist.csv', 'cmdline.csv', 'netstat.csv', 'netscan.csv', 'handles.csv', 'getsids.csv']
def __init__(self, image):
self.image = image
self.app_path = get_application_path()
self.csvgen(os.path.join(self.app_path, args.p))
self.initialize_normal_proc(os.path.join(self.app_path, 'normal_proc.txt'))
self.initialize_normal_paths(os.path.join(self.app_path, 'normal_paths.txt'))
self.initialize_normal_sids(os.path.join(self.app_path, 'normal_sids.txt'))
self.initialize_whitelist_paths('whitelist.txt')
self.initialize_regex_patterns('regex_patterns.txt')
# Read the required file from running the volatility plugins.
self.csv_reader('pslist.csv')
self.csv_reader('psscan.csv')
self.csv_reader('dlllist.csv')
self.csv_reader('cmdline.csv')
self.csv_reader('netscan.csv')
self.csv_reader('getsids.csv')
self.csv_reader('handles.csv')
self.malhandles()
# Analyzing memory for suspicious processes
self.procTree(os.path.join(self.app_path, args.p) + '/'+'pslist.csv')
self.malproc(self.process_tree['4'])
self.malpath()
self.malcmdline()
self.malcomm('blacklist.txt')
self.baseline('proc_baseline.txt')
self.malgetsids()
self.find_processes_without_parents()
# The following function must be run the last one
self.malicious_weight()
header = ['Source Name', 'PID', 'PPID','Process Name', 'Path', 'Timestamps', 'Long Description']
self.evidence_bag.append(','.join(header))
def run_plugin(self, memory_image_path, output_file):
# if output_file == 'netscan.csv':
# command = ["vol2.py", "--profile", "Win10x64_15063", "-f", memory_image_path, output_file[:-4]]
# else:
command = ["vol", "-r", "csv", "-f", memory_image_path, 'windows.'+output_file[:-4]]
try:
result = subprocess.run(command, capture_output=True, text=True, check=True)
output = result.stdout
# Save the result to the specified CSV file
with open(os.path.join(self.app_path, args.p)+'/'+output_file, 'w', encoding='utf-8') as csv_file:
csv_file.write(output)
except subprocess.CalledProcessError as e:
print("Error:", e)
print("Return Code:", e.returncode)
print("Command Output:", e.output)
except Exception as e:
print("An error occurred:", str(e))
def csvgen(self, memory_image_path):
# Specify the directory path you want to list files from
#directory_path = "./memory"
try:
# Use the os.listdir() function to get a list of files in the directory
files = os.listdir(memory_image_path)
# Check if there are any .img files in the directory
img_files = [file for file in files if file.endswith(".img")]
if not img_files:
print(f"No '.img' files found in '{memory_image_path}'.")
else:
# Loop through and print the names of .img fil
for file in self.csv_files:
if file not in files:
print('File not found:', file)
print('Generate file:', file,'...')
self.run_plugin(memory_image_path+'/'+img_files[0],file)
# if file == 'dlllist.csv':
# result = list_dlls(img_files[0])
# elif file == 'cmdline.csv':
except FileNotFoundError:
print(f"The directory '{memory_image_path}' does not exist.")
except PermissionError:
print(f"You do not have permission to access '{memory_image_path}'.")
except Exception as e:
print(f"An error occurred: {str(e)}")
def baseline(self, baseline_file):
try:
# Use the os.listdir() function to get a list of files in the directory
fpath = os.path.join(self.app_path, args.p)
files = os.listdir(fpath)
# If proc_baseline.txt is not found, uncomment the next two lines to create it. It will takkke sometime. Be patient
if baseline_file not in files:
# Define the command as a list of strings
print('proc_baseline.txt file was found, generating this file ...')
command = ["python3", "/opt/memory-baseliner/baseline.py", "-proc", "-i", "/cases/memory/base-rd01-memory.img",
"--loadbaseline", '--jsonbaseline', '/cases/precooked/memory/Win10x64_proc.json', '-o', fpath+'/'+'proc_baseline.txt']
# Run the command and capture its output
completed_process = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
except FileNotFoundError:
print(f"The directory '{fpath}' does not exist.")
except PermissionError:
print(f"You do not have permission to access '{fpath}'.")
except Exception as e:
print(f"An error occurred: {str(e)}")
# Read the content of the input file and replace '|' with ','
with open(fpath+'/'+'proc_baseline.txt', "r") as input_file:
content = input_file.read()
modified_content = content.replace("|", ",")
modified_content = modified_content.strip().split('\n')[1:]
for row in modified_content:
columns = row.split(',')
pid = columns[0]
if '.exe' in columns[6]:
# if pid not in self.baseline_proc:
# self.baseline_proc[pid] = []
self.baseline_proc[pid.strip('"')] = [''.join(row)]
#print(self.baseline_proc)
# Write the modified content back to the output file
# with open(fpath+'/'+'suspecious_proc.txt', "w") as output_file:
# output_file.write(modified_content)
# print("Replacement complete. Output written to", 'suspecious_proc.txt')
def initialize_normal_proc(self, normal_proc_file):
# Open and read the "normal_proc.txt" file
with open(normal_proc_file, 'r') as file:
for line in file:
# Split each line into parent and child using ':' as the separator
parent, child = line.strip().split(':')
# If the parent is not in the dictionary, add it with an empty list of children
if parent not in self.normal_proc:
self.normal_proc[parent] = []
# Append the child to the parent's list of children
self.normal_proc[parent].append(child)
# The resulting dictionary is parsed_normal_proc
def initialize_whitelist_paths(self, whitelist_file):
# Open and read the "normal_proc.txt" file
# You need to change this list "normal_proc.txt", because it is not clean, missing a lot of system files and dlls.
with open(whitelist_file, 'r') as file:
for line in file:
# Split each line into parent and child using ':' as the separator
self.whitelist_paths.append(line.strip('\n').split(',')[0].lower())
#print(self.whitelist_paths)
def initialize_normal_sids(self, normal_sids_file):
# Open and read the "normal_proc.txt" file
with open(normal_sids_file, 'r') as file:
for line in file:
# Split each line into parent and child using ':' as the separator
parent, child = line.strip().split(':')
# If the parent is not in the dictionary, add it with an empty list of children
if parent not in self.normal_sids:
self.normal_sids[parent] = []
# Append the child to the parent's list of children
self.normal_sids[parent].append(child)
def initialize_normal_paths(self, fpath):
try:
with open(fpath, newline='') as file:
for line in file:
# self.normal_process_path.append(line.strip('\n').split('\\')[-1])
if 'program files (x86)' in line:
self.normal_paths_x86.append(line.strip('\n'))
else:
self.normal_paths.append(line.strip('\n'))
except FileNotFoundError:
print(f"The directory '{fpath}' does not exist.")
except PermissionError:
print(f"You do not have permission to access '{fpath}'.")
except Exception as e:
print(f"An error occurred: {str(e)}")
def initialize_blacklist_addresses(self, file_path):
# Read the blacklist addresses from a file and return them as a set
with open(file_path, mode='r') as blacklist_file:
return set(line.strip() for line in blacklist_file)
def initialize_regex_patterns(self, regex_file):
# Read regex patterns from the file
with open(regex_file, 'r') as patterns_file:
for line in patterns_file:
if line.startswith('#') or not line:
continue # Skip this line and move to the next one
line = line.strip() # Remove leading/trailing whitespace
if line:
self.regex_patterns.append(line)
#print(self.regex_patterns)
def procTree(self, proc_file):
try:
with open(proc_file, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
pid = row['PID']
ppid = row['PPID']
image_file_name = row['ImageFileName']
# Create a new node if it doesn't exist
if pid not in self.process_tree:
self.process_tree[pid] = Node(pid, ppid, image_file_name)
# Create a new node for the parent (PPID) if it doesn't exist
if ppid not in self.process_tree:
self.process_tree[ppid] = Node(ppid, 0, '')
# Update the image file name for the current node
self.process_tree[pid].image_file_name = image_file_name
# Add the current node as a child to its parent
add_child(self.process_tree[ppid], self.process_tree[pid])
except FileNotFoundError:
print(f"The directory '{proc_file}' does not exist.")
except PermissionError:
print(f"You do not have permission to access '{proc_file}'.")
except Exception as e:
print(f"An error occurred: {str(e)}")
def get_parent(self, pid):
if pid in self.process_tree:
node = self.process_tree[pid]
parent_pid = node.ppid
if parent_pid in self.process_tree:
return self.process_tree[parent_pid]
return None
def find_parent_recursive(self, child_pid):
# Get the parent node for the given child_pid
parent_node = self.get_parent(child_pid)
# Check if a parent node was found
if parent_node:
if parent_node.pid not in self.score:
#print(f"Parent PID: {parent_node.pid}, Image File Name: {parent_node.image_file_name}")
# Because score keeps track of pid while evidence_bag not. In addition, let the score of the parent is equal to child
self.score[parent_node.pid] = self.score[child_pid]
self.collect_evidence(parent_node.pid )
# If the parent is not services.exe, recursively find its parent
if parent_node.image_file_name != 'svchost.exe':
self.find_parent_recursive(parent_node.pid)
# else:
# print(f"No parent found for PID {child_pid}.")
def malproc(self,node):
# 1. Should find abnormal parent-child relationship ==> Done
# 2. Should find zero parent processes ==> Done
# 3. Should find processes running from weired paths ==> Done
# 4. Should hidden/Unlinked processes like processes found in psscan, but not in pslist ==> Done
# 5. When does the process start? ==>TBD
# 6. Should find any process that impersonate known processes and trying to blind in with normal processes ==> Done
# 1. Should find abnormal parent-child relationship
if not node:
return #suspect_proc
for child in node.children:
if not(node.image_file_name in self.normal_proc and child.image_file_name in self.normal_proc[node.image_file_name]):
# print(node.image_file_name, child.image_file_name)
self.suspect_proc[child.pid] =self.pslist[child.pid][2:] + ', Suspecious parent-child relationship'
self.malproc(child)
return #suspect_proc
# 2. Should find zero parent processes
def find_processes_without_parents(self):
for pid, node in self.process_tree.items():
if node.ppid not in self.process_tree:
#root_processes.append(pid)
if pid == '0':
continue
# print(self.pslist[node.children[0].pid])
self.suspect_proc[node.children[0].pid] =self.pslist[node.children[0].pid][2:]+ ', Has a zero root parent'
# 3. Should find processes running from weired paths
def malpath(self):
for pid in self.pslist:
# print(self.process_path)
if pid in self.process_path:
path = self.process_path[pid].lower()
if not ((path in self.normal_paths) or (path in self.normal_paths_x86)):
self.suspect_proc[pid] = self.pslist[pid]
def find_cmd_child(self, path):
# Iterate through each row in the CSV file and find the pid of a process executed by cmd.exe
for pid in self.cmdline:
# print(pid)
row = self.cmdline[pid]
args = row[2]
# print(args)
# Find the match in the 'Args' column
if (path in args) and ('cmd.exe' not in args):
pid = row[0]
return pid # Return the PID if a match is found
return None # Return None if no match is found for the given path
def malcmdline(self):
for pid in self.cmdline:
# print(pid)
row = self.cmdline[pid]
args = row[2]
pid = row[0]
# Find the match in the 'Args' column
# match = re.search(regex_pattern, args)
# Iterate through each regex pattern
for regex_pattern in self.regex_patterns:
match = re.search(regex_pattern, args)
if match:
path_executed = match.group(0)
if pid not in self.suspect_cmdlines:
self.suspect_cmdlines[pid] = row
#matched_paths.append(path_executed)
# print(matched_paths)
# When cmd.exe executed, it will create a process. search for this process and add it to the list.
# If not found in cmdline list, look for it in psscan list
# You should also create the same for powershell.exe, wscript.exe, wmiprvse.exe, rundll32.exe, dllhost.exe, ...
if 'cmd\.exe' in regex_pattern:
pid = self.find_cmd_child(path_executed)
if pid:
if pid not in self.suspect_cmdlines:
# print(f"Path executed: {path_executed}, PID: {pid}")
self.suspect_cmdlines[pid] = self.cmdline[pid]
else:
print(f"No match found for {path_executed}")
def malcomm(self, blacklist_file_path):
# Add option for VPN concentrator
blacklist_addresses = self.initialize_blacklist_addresses(blacklist_file_path)
# Define the browser processes you want to exclude
browsers = ["chrome.exe", "firefox.exe", "iexplore.exe", "edge.exe"] # Add more if needed
for pid in self.netscan:
suspect_comm = []
if len(self.netscan[pid]) > 1:
for item in self.netscan[pid]:
if 'WinStore.App.e' in item:
continue
row = item.split(',')
if (':::0' in row[2]) or ('0.0.0.0:0' in row[2]) or ('::1:0' in row[2]) :
continue
else:
tmp_addr = row[2].split(':')
src_ip = tmp_addr[0]
local_port = tmp_addr[1]
tmp_addr = row[3].split(':')
dst_ip = tmp_addr[0]
foreign_port = tmp_addr[1]
owner = row[6].lower() # Convert owner to lowercase for case-insensitive comparison
# 1. Any process communicating over port 80, 443, or 8080 that is not a browser
if foreign_port in ["80", "443", "8080"] and not any(browser in owner for browser in browsers):
suspect_comm.append(','.join(row) + ' A process that is not browser using port: '+foreign_port)
# 2. Any browser process not communicating over port 80, 443, or 8080
elif any(browser in owner for browser in browsers) and foreign_port not in ["80", "443", "8080"]:
suspect_comm.append(','.join(row)+ ' A browser communicating over unusual port: '+ foreign_port)
# 3. RDP connections (port 3389), particularly if originating from odd IP addresses. External RDP
# connections are typically routed through a VPN concentrator. If the src_ip is not from a VPN concentrator, this is malicious
elif foreign_port == "3389" and not src_ip.startswith(("*", "::", "0.0.0.0", "127.0.0.1", "172.16.", "192.168.")):
suspect_comm.append(','.join(row)+' External IP communicating directly with RDP port')
# 4. Connections to unexplained internal or external IP addresses.
# External resources like IP reputation services can also provide additional context.
elif dst_ip in blacklist_addresses:
suspect_comm.append(','.join(row))
# 7. Workstation to workstation connections. Workstations don’t typically RDP, map shares, or authenticate to other workstations.
# The expected model is workstations communicate with servers. Workstation to workstation connections often uncover lateral movement.
elif ((src_ip.startswith("172.16.") and dst_ip.startswith("172.16.")) or \
(src_ip.startswith("192.168.") and dst_ip.startswith("192.168."))):
suspect_comm.append(','.join(row)+' Workstation to workstation communication')
elif foreign_port in ["5985", "5986"] and not dst_ip.startswith(("0.0.0.0", "127.0.0.1")):
suspect_comm.append(','.join(row))
else:
row = self.netscan[pid][0].split(',')
if (':::0' in row[2]) or ('0.0.0.0:0' in row[2]) or ('::1:0' in row[2]) :
continue
else:
tmp_addr = row[2].split(':')
src_ip = tmp_addr[0]
local_port = tmp_addr[1]
tmp_addr = row[3].split(':')
dst_ip = tmp_addr[0]
foreign_port = tmp_addr[1]
owner = row[6].lower() # Convert owner to lowercase for case-insensitive comparison
# 1. Any process communicating over port 80, 443, or 8080 that is not a browser
if foreign_port in ["80", "443", "8080"] and not any(browser in owner for browser in browsers):
suspect_comm.append(','.join(row)+ ' A process that is not browser using port: '+foreign_port)
# 2. Any browser process not communicating over port 80, 443, or 8080
elif any(browser in owner for browser in browsers) and foreign_port not in ["80", "443", "8080"]:
suspect_comm.append(','.join(row)+ ' A browser communicating over unusual port: '+ foreign_port)
# 3. RDP connections (port 3389), particularly if originating from odd IP addresses. External RDP
# connections are typically routed through a VPN concentrator. If the src_ip is not from a VPN concentrator, this is malicious
elif foreign_port == "3389" and not src_ip.startswith(("*", "::", "0.0.0.0", "127.0.0.1", "172.16.", "192.168.")):
suspect_comm.append(','.join(row)+' External IP communicating directly with RDP port')
# 4. Connections to unexplained internal or external IP addresses.
# External resources like IP reputation services can also provide additional context.
elif dst_ip in blacklist_addresses:
suspect_comm.append(','.join(row))
# 7. Workstation to workstation connections. Workstations don’t typically RDP, map shares, or authenticate to other workstations.
# The expected model is workstations communicate with servers. Workstation to workstation connections often uncover lateral movement.
elif ((src_ip.startswith("172.16.") and dst_ip.startswith("172.16.")) or \
(src_ip.startswith("192.168.") and dst_ip.startswith("192.168."))):
suspect_comm.append(','.join(row)+' Workstation to workstation communication')
elif foreign_port in ["5985", "5986"] and not dst_ip.startswith(("0.0.0.0", "127.0.0.1")):
suspect_comm.append(','.join(row))
# suspect_comm is a temp variable collect suspecious procs for every for loop. At the end of every for loop it add the result to suspect_netscan.
if suspect_comm:
if pid not in self.suspect_netscan:
self.suspect_netscan[pid] = []
self.suspect_netscan[pid].append(','.join(suspect_comm))
def malgetsids(self):
# suspecious_proc_sids = []
for pid in self.getsids:
row = self.getsids[pid][0]
row = row.split(',')
process = row[2]
sid = row[3]
if sid not in self.normal_sids:
if process in self.normal_sids.get(sid, []):
#print(','.join(row), ' --> Malicious Process: System process running with user account') # Join the values of the row into a CSV formatted string
row.append(' --> Malicious Process: System process running with user account')
self.suspecious_proc_sids[pid] = ','.join(row)
else:
self.suspecious_proc_sids[pid] = ','.join(row)
#print(','.join(row)) # Join the values of the row into a CSV formatted string
else:
# Check if the process is not in the list of normal processes for the SID
if process not in self.normal_sids[sid]:
row.append('Malicious Process: Uknown process running with system account')
self.suspecious_proc_sids[pid] =','.join(row)
def malhandles(self):
# 1. Identify Anomalous Handles:
# Look for handles with unusual or unexpected types, such as file handles for system files or handles to uncommon objects.
# Pay attention to handles associated with processes that are not part of the normal system configuration.
# 2. Examine Handle Counts:
# Check for processes with an unusually high number of handles. This could indicate a process that is performing a lot of file or resource operations, which may be suspicious.
# 3. Investigate Suspicious File Handles:
# Focus on file handles and look for processes that have handles to sensitive files, such as system binaries or critical configuration files.
# Cross-reference file handles with file paths to determine if any files are accessed from suspicious locations.
# 4. Analyze Network-Related Handles:
# Investigate handles related to network resources, such as sockets or network connections. Look for processes communicating with unusual IP addresses or ports.
# 5. Correlate with Process Information:
# Correlate the handles with other process information like process names, PIDs, and parent-child relationships to identify any processes that appear suspicious or out of place.
# 6. Check for Code Injection:
# Look for processes that have handles to memory sections with Execute (X) permissions. This could indicate code injection or process hollowing, common techniques used by malware.
# 7. Review Timestamps:
# Analyze the timestamps associated with handles. Suspicious processes may have handles with timestamps that don't align with the system's normal behavior.
# 8. Cross-Reference with Known Malware Indicators:
# Compare the handles and associated process information with known indicators of compromise (IOCs) and malware signatures to identify matches.
# 9. Analyze Parent-Child Relationships:
# Look for unusual parent-child relationships between processes. Malware often spawns child processes to perform malicious activities, so identifying such relationships can be crucial.
# 10. Employ YARA Rules:
# Use YARA rules to scan the memory and handles for specific patterns or signatures associated with known malware.
# 11. Leverage Threat Intelligence:
# Consult threat intelligence feeds and databases to check if any of the handles or processes are associated with known malware campaigns or threat actors.
# 12. Behavioral Analysis:
# Consider the overall behavior of processes with suspicious handles. Look for patterns that deviate from normal system behavior, such as excessive file manipulation or network communication.
# Handles represent “pointers” to objects and it is not unusual for a process to have a pointer to a file it is reading or writing to. However,
# this is not the common way code is loaded into a process, so it is unusual to see a DLL file referenced in a process handle (recall that loaded
# DLLs are typically referenced in the Process Environment Block and displayed via the windows.dlllist plugin).
# CLASSES Key Registry
# Storing scripts and data in randomly named registry keys has become a common technique for “fileless” malware, so it pays to keep an eye out strange key
# The interesting handle in the “msiexec.exe” PID 6192 process is a little more obvious. This process only had 16 file handles. If you think about the
# purpose of “msiexec.exe” (installing .msi files), finding a reference to “flash_installer.msi” likely indicates what was installed on the system.
# The name and folder location are of particular interest and ultimately led to recovery of the original infection vector.
# Named pipes can be identified via “File” handles in Volatility and thus when narrowing your focus, you can filter your output on that handle type
# (this was not done in the example on the slide since output was filtered with grep for the text “pipe”).
# However, sometimes things stick out with some connected pipes appending IP addresses or process identifiers as part of the pipe name.
# Malware commonly sets mutants as a way of "marking" a compromised system to prevent reinfection. The malware will first check for the absence of
# its mutant before performing an infection. During code analysis, reverse engineers can identify what mutant name is set and checked for by the malware.
pattern = r'\.dll(?!\.mui)'
for pid in self.handles:
for row in self.handles[pid]:
#print(row)
if re.search(pattern, row[6]):
print(row)
if '.msi' in row[6]:
print(row)
if 'NamedPipe' in row[6]:
print(row)
print('Here will goes handles function')
def csv_reader(self, csv_file):
# Create an empty dictionary to store data with "PID" as the index
# Read and process the CSV file
filePath = os.path.join(self.app_path, args.p) + '/'+csv_file
if csv_file == 'netscan.csv':
try:
with open(filePath, 'r', newline='') as file:
header = next(file).split()
header[2] = ' '.join(header[2:4])
header[3] = ' '.join(header[4:6])
header[4] = header[6]
header[5] = header[7]
header[6] = header[8]
header[7] = header[9]
header = header[0:8]
pid_index = header.index('Pid')
for line in file:
row = line.split()
if len(row) < 10:
row.insert(4, '')
row[7] = ' '.join(row[7:])
row = row[0:8]
if len(row) > pid_index:
pid = row[pid_index]
if pid not in self.netscan:
self.netscan[pid] = []
self.netscan[pid].append(','.join(row))
except FileNotFoundError:
print(f"File '{csv_file}' not found.")
else:
try:
with open(filePath, 'r', newline='') as csvfile:
csvreader = csv.reader(csvfile)
header = next(csvreader) # Read the header
# Find the index of the "PID" column
pid_index = header.index('PID')
path_index = 0
if csv_file == 'dlllist.csv':
path_index = header.index('Path')
if csv_file == 'pslist.csv':
# Store the row as a comma-separated string in the dictionary
for row in csvreader:
if len(row) > pid_index:
pid = row[pid_index]
self.pslist[pid] = ','.join(row)
elif csv_file == 'psscan.csv':
for row in csvreader:
if len(row) > pid_index:
pid = row[pid_index]
self.psscan[pid] = ','.join(row)
elif csv_file == 'cmdline.csv':
for row in csvreader:
if len(row) >= 2:
if 'process exited' in row[3]:
continue
pid = row[pid_index]
self.cmdline[pid] = row[1:]
elif csv_file == 'dlllist.csv':
for row in csvreader:
if len(row) > pid_index:
pid = row[pid_index]
if pid not in self.dlllist:
self.dlllist[pid] = [] # Every process PID has more than one DLL. Keep all dlls for every process in the same pid
self.dlllist[pid].append(','.join(row))
if len(row) > path_index:
path = row[path_index]
if '.exe' in path: #Skipp process path
self.process_path[pid] = path
continue
if path not in self.dll_stacking:
self.dll_stacking[path] = 0
self.dll_stacking[path] += 1
elif csv_file == 'getsids.csv':
for row in csvreader:
if len(row) == 0:
break
if len(row) > pid_index:
pid = row[pid_index]
if pid not in self.getsids:
self.getsids[pid] = []
self.getsids[pid].append(','.join(row))
elif csv_file == 'handles.csv':
filter = ['File', 'Key', 'Mutant'] #
for row in csvreader:
#print(','.join(row))
if len(row) == 0:
continue
if row[5] not in filter or row[7] == '':
continue
if len(row) > pid_index:
pid = row[pid_index]
if pid not in self.handles:
self.handles[pid] = [] # Every process PID has a lot of handles. Keep all handles for every process in the same pid
self.handles[pid].append(row[1:])
except FileNotFoundError:
print(f"File '{csv_file}' not found.")
def collect_evidence(self, pid):
"""This funtion collects artifacts from all memory objects. This how you can print it print(self.artifact_bag['4'][0]['pslist'])"""
#print(self.artifact_bag['4'][0]['pslist'])
#print("I will collect artifacts using this funtion!")
#print('This is the blueprint for this bag: ioc_collection = [pid: {<plugin1>: <row from plugin csv file>}, {<plugin2>: <row from plugin2 csv file>'+'}]')
#self.evidence_bag[4] = [{'pslist':'0,2876,868,WmiPrvSE.exe,0x8c88b14e7580,10,-,0,False,2018-08-30 13:52:26.000000 ,N/A,Disabled'}]
# if pid not in self.evidence_bag:
# self.evidence_bag[pid] = []
# Collect evidence from PsList
# reason = 'High privilege account SID: '
# if pid in self.getsids:
# if len(self.getsids[pid]) > 1:
# for item in self.getsids[pid]:
# if ('Everyone' in item) or ('Users' in item) or ('This Organization' in item):
# continue
# columns = item.split(',')
# if 'Local System' == columns[4]:
# reason += ' '.join(columns[3:])
# # Don't forget to add the enrichment from malgetsids function.
# elif '544' in columns[3]:
# reason += 'The process spawned from a user context with Administrator privileges: '+' '.join(columns[3:])
# if pid in self.suspecious_proc_sids:
# reason2 = self.suspecious_proc_sids[pid].split(',')
# if len(reason2) == 6:
# reason += reason2[5]
ppid = ''
if pid in self.pslist:
#print(self.psscan[pid])
# evidence = {'pslist': self.pslist[pid]}
#self.evidence_bag[pid].append(evidence)
row = []
row.append('pslist')
columns = self.pslist[pid].split(',')
row.append(columns[1])
ppid = columns[2]
row.append(columns[2])
row.append(columns[3])
path = ''
if pid in self.process_path:
path = self.process_path[pid]
# You need to change this list because it is not clean, missing a lot of system files and dlls.
# if path in self.whitelist_paths and self.score[pid] < 40:
# return
row.append(path)
row.append(columns[9])
if pid in self.suspect_proc:
reason = self.suspect_proc[pid].split(',')[11]
row.append(reason)
self.evidence_bag.append(','.join(row))
# print(pid, ': ',self.evidence_bag)
# Collect evidence from PsScan
if (pid in self.psscan) and (pid not in self.pslist):
#print(self.psscan[pid])
# evidence = {'psscan': self.psscan[pid]}
# self.evidence_bag[pid].append(evidence)
row = []
row.append('psscan')
columns = self.psscan[pid].split(',')
row.append(columns[1])
row.append(columns[2])
row.append(columns[3])
path = ''
if pid in self.process_path:
path = self.process_path[pid]
row.append(path)
row.append(columns[9])
# 4. Should hidden/Unlinked processes like processes found in psscan, but not in pslist
# Hidden processes: Also add more details such as number of threads, parent process, ....
if row[1] not in self.pslist:
row.append('Hidden/Unlinked: The process found in psscan, but not in pslist')
self.evidence_bag.append(','.join(row))
# Collect evidence from DllList
if pid in self.dlllist:
if len(self.dlllist[pid]) > 1:
for item in self.dlllist[pid]:
row =[]
row.append('dlllist')
# evidence = {'dlllist': self.dlllist[pid]}
# self.evidence_bag[pid].append(evidence)
columns = item.split(',')
if '.exe' in columns[6]:
continue
# We can add option to use this filtering or not. If we want to find malicious dlls we can use filtering, but
# if we want o understand the malcious process capabilities, we ignore filtering becuase used libraries can tell
# you the malware can do
if columns[6].lower() in self.whitelist_paths:
continue
if self.dll_stacking[columns[6]] > 2: # Only keep dlls with low frequency as malicious dlls always rare
continue
row.append(columns[1])
row.append(ppid)
row.append(columns[2])
row.append(columns[6])
row.append(columns[7])
row.append('Whitelist the clean DLLs')
self.evidence_bag.append(','.join(row))
else:
row =[]
row.append('dlllist')
# evidence = {'dlllist': self.dlllist[pid]}
# self.evidence_bag[pid].append(evidence)
columns = self.dlllist[pid].split(',')
if self.dll_stacking[columns[6]] > 5: # Only keep dlls with low frequency as malicious dlls always rare
return
row.append(columns[1])
row.append(ppid)
row.append(columns[2])
row.append(columns[6])
row.append(columns[7])
row.append('Whitelist the clean DLLs')
self.evidence_bag.append(','.join(row))
# Collect evidence from CmdLine
if pid in self.cmdline:
# evidence = {'cmdline': self.cmdline[pid]}
# self.evidence_bag[pid].append(evidence)
row =[]
row.append('cmdline')
#columns = self.cmdline[pid].split(',')
row.append(self.cmdline[pid][0]) # Add pid to the list
row.append(ppid)
row.append(self.cmdline[pid][1]) # Add process name
pattern = r'"?([C|c]:\\[a-zA-Z0-9%\\ \(\)\.]*\.[eE][xX][eE]|[%\\]SystemRoot[%\\][a-zA-Z0-9%\\]*\.exe)\b'
paths = re.findall(pattern, self.cmdline[pid][2])
if paths:
paths = paths[0].strip('"')
row.append(path)
row.append('')
row.append(self.cmdline[pid][2].replace('"', ''))
self.evidence_bag.append(','.join(row))
if pid in self.netscan:
if len(self.netscan[pid]) > 1:
for item in self.netscan[pid]:
row =[]
row.append('netscan')
# evidence = {'dlllist': self.dlllist[pid]}
# self.evidence_bag[pid].append(evidence)
columns = item.split(',')
row.append(columns[5])
row.append(ppid)
row.append(columns[6])
row.append(columns[1]+' ' + ' --> '.join(columns[2:4])+' '+columns[4])
row.append(columns[7])
row.append('I Will add info later')
self.evidence_bag.append(','.join(row))
else:
# evidence = {'netscan': self.netscan[pid]}
# self.evidence_bag[pid].append(evidence)
row =[]
row.append('netscan')
# evidence = {'dlllist': self.dlllist[pid]}
# self.evidence_bag[pid].append(evidence)
columns = self.netscan[pid][0].split(',')
row.append(columns[5])
row.append(ppid)
row.append(columns[6])
row.append(columns[1]+' ' + ' --> '.join(columns[2:4])+' '+columns[4])
row.append(columns[7])
row.append('I Will add info later')
self.evidence_bag.append(','.join(row))
if pid in self.getsids:
if len(self.getsids[pid]) > 1:
local_system = False
for item in self.getsids[pid]:
if any(keyword in item for keyword in ['Power', 'Logon Session', 'Authentication Authority Asserted Identity', 'PlugPlay', 'Users',
'This Organization', 'Everyone', 'DcomLaunch', 'High Mandatory Level']):
continue
row =[]
row.append('getsids')
columns = item.split(',')
if columns[4] == '-':
continue
row.append(columns[1])
row.append(ppid)
row.append(columns[2])
row.append(columns[3])
row.append('')
# Don't forget to add the enrichment from malgetsids function.
reason = columns[4]
if '544' in columns[3]:
reason = 'The process spawned from a user context with high privileges account: ' + reason
if pid in self.suspecious_proc_sids:
reason2 = self.suspecious_proc_sids[pid].split(',')
if len(reason2) == 6:
reason = reason2[5] +': '+ reason
row.append(reason)
self.evidence_bag.append(','.join(row))
def create_SuperMemoryParser(self):
header = ['Source Name', 'PID', 'Process Name', 'Path', 'Timestamps', 'Long Description']
with open('SuperMemoryParser.csv', 'w') as mparser:
mparser.write('\n'.join(self.evidence_bag))
def malicious_weight(self):
if self.pslist:
for pid in self.pslist:
weight = 0
if pid in self.suspect_proc:
# if pid =='9036':
# print(f"Found {pid} in suspect_proc: {self.suspect_proc[pid]}")
weight += 10
if 'Has a zero root parent' in self.suspect_proc[pid]:
weight += 30
if pid in self.suspecious_proc_sids:
# if pid =='9036':
# print(f"Found {pid} in suspecious_proc_sids: {self.suspecious_proc_sids[pid]}")
weight += 10
if pid in self.suspect_cmdlines:
# if '9036' in self.suspect_cmdlines[pid]:
# print(f"Found {pid} in suspect_cmdlines: {self.suspect_cmdlines[pid]}")
weight += 10
if pid in self.suspect_netscan:
# if pid =='9036':
# print(f"Found {pid} in suspect_netscan: {self.suspect_netscan[pid]}")
weight += 10
# print(self.baseline_proc['1096'])
if pid in self.baseline_proc:
# if pid =='9036':
# print(f"Found {pid} in baseline_proc: {self.baseline_proc[pid]}")
weight += 10
if weight > 30:
self.score[pid] = weight
# Function to add a child node to a parent node
def add_child(parent, child):
parent.children.append(child)
# Function to recursively print the tree with '*' depth
def print_process_tree(node, depth=0):
# # Print the node's information with depth represented by '*' characters
# print("*" * depth + ' '*(12-depth)+ f"{node.pid}, {node.ppid}, {node.image_file_name}")
# # Recursively print children
if node:
# Print the header only for the root node
if depth == 0:
print(f"{'':<12}{'PID':<12}{'PPID':<12}{'Image File Name'}")
# Check if the node is not the root (0, 0)
if node.pid != '0':
# Print the node's information with depth represented by '*' characters
print("*" * depth + ' ' * (12 - depth) + f"{node.pid:<12}{node.ppid:<12}{node.image_file_name}")
# Recursively print children
for child in node.children:
print_process_tree(child, depth + 1)
def get_application_path():
try:
if getattr(sys, 'frozen', False):
application_path = os.path.dirname(os.path.realpath(sys.executable))
else:
application_path = os.path.dirname(os.path.realpath(__file__))
if "~" in application_path and os_platform == "windows":
# print "Trying to translate"
# print application_path
application_path = win32api.GetLongPathName(application_path)
#if args.debug:
# logger.log("DEBUG", "Init", "Application Path: %s" % application_path)
return application_path
except Exception as e:
print("Error while evaluation of application path")
traceback.print_exc()
if args.debug:
sys.exit(1)
def main(memory):
"""
Argument parsing function
:return:
"""
# for pid in memory.score:
# print(pid, ': ', memory.score[pid])
# child_pid = '8260' # Replace with the PID of the child you want to find the parent for
# # parent_node = memory.get_parent(child_pid)
# memory.find_parent_recursive(child_pid)
# parent_node = memory.get_parent(child_pid)
# while parent_node.image_file_name != 'svchost.exe':
# parent_node = memory.get_parent(child_pid)
# if parent_node:
# print(f"Parent PID: {parent_node.pid}, Image File Name: {parent_node.image_file_name}")
# else:
# print(f"No parent found for PID {child_pid}.")
# child_pid = parent_node.pid
pid_list = [key for key in memory.score]
for pid in pid_list:
memory.collect_evidence(pid)
#print(pid, ': ', memory.score[pid])
# parent = memory.pslist[pid].split(',')[2]
memory.find_parent_recursive(pid)
# if parent not in memory.score:
# memory.collect_evidence(parent)
memory.create_SuperMemoryParser()
while True:
print("Available plugins to explore memory data:")
for index, command in enumerate(memory.plugins, start=1):
print(f"{index}. {command}")
print("0. Exit")
try:
choice = int(input("Enter the plugin number you want to select (or 0 to quit): "))
if choice > len(memory.plugins):
print("\nInvalid input. Please enter a valid number.")
continue
if choice == 0:
print(memory.score)
break
selected_command = memory.plugins[choice - 1]
corresponding_file = memory.csv_files[choice - 1]
print(f"Selected command: {selected_command}")
print(f"Corresponding CSV file: {corresponding_file}")
if selected_command == 'pslist':
for pid in memory.pslist:
print(memory.pslist[pid])
if selected_command == 'psscan':
for pid in memory.psscan:
print(memory.psscan[pid])
if selected_command == 'pstree':
print_process_tree(memory.process_tree['0'])
if selected_command == 'dlllist':
for pid in memory.dlllist:
print(memory.dlllist[pid])
if selected_command == 'cmdline':
for pid in memory.cmdline:
print(memory.cmdline[pid])
if selected_command == 'netscan':
for pid in memory.netscan:
print(memory.netscan[pid])
if selected_command == 'getsids':
for pid in memory.getsids:
print(memory.getsids[pid])
print("1. Display only suspecious processes")
print("2. Continue with full list")
print("0. Back")
try:
choice = int(input("Select one option (or 0 go back): "))
if choice == 0:
continue
elif choice == 1:
while True:
print("Display only suspecious processes:")
for index, command in enumerate(memory.analyze, start=1):
print(f"{index}. {command}")
print("0. Back")
try:
choice = int(input("Enter the number corresponding to the analysis you want to select (or 0 go back): "))
if choice == 0:
break
elif choice <= len(memory.analyze):
if choice == 1:
#suspect_proc = memory.malproc(memory.process_tree['4'])
for pid in memory.suspect_proc:
print(memory.suspect_proc[pid])
print('\n\nThe above processes are more likely the most suspecious processes. Select one of them by PID\n\n')
elif choice == 3:
for pid in memory.suspect_cmdlines:
print(memory.suspect_cmdlines[pid])
elif choice == 5:
print('\nThis function have not been implemted, please do it!')
continue
suspect_proc = memory.malcomm('blacklist.txt')
print('\n'.join(suspect_proc))
elif choice == 6:
for pid in memory.suspect_netscan:
print(memory.suspect_netscan[pid])
elif choice == 7:
for pid in memory.baseline_proc:
print(memory.baseline_proc[pid].replace('"', ''))
elif choice == 8:
memory.malgetsids()
for pid in memory.suspecious_proc_sids:
print(memory.suspecious_proc_sids[pid])
while True:
# Prompt for the PID to search for
pid_to_search = input("\n\nEnter the suspecious process PID that you want to collect as evidence (or type 'back' to go back to plugins selection): ")
if pid_to_search.lower() == 'back':
break
memory.collect_evidence(pid_to_search)
#print(memory.pslist[pid_to_search])
else:
print("Invalid choice. Please select a valid number.")
except ValueError:
print("Invalid input. Please enter a valid number.")
elif choice == 2:
while True:
# Prompt for the PID to search for
pid_to_search = input("\n\nEnter the suspecious process PID that you want to collect as evidence (or type 'back' to go back to plugins selection): ")
if pid_to_search.lower() == 'back':
break
memory.collect_evidence(pid_to_search)
#print(memory.pslist[pid_to_search])
#print(memory.evidence_bag)
else:
print("Invalid choice. Please select a valid number.")
except ValueError:
print("Invalid input. Please enter a valid number.")
# for key, value in memory.evidence_bag.items():
# formatted_output = []
# for item in value:
# for entry_type, entry_data in item.items():
# if entry_type == 'dlllist':
# formatted_output.append(f'{entry_type}:')
# formatted_output.extend([f' {line}' for line in entry_data])
# elif entry_type == 'netscan':
# formatted_output.append(f'{entry_type}:')
# formatted_output.extend([f' {line}' for line in entry_data])
# else:
# formatted_output.append(f'{entry_type}: {entry_data}')
# print("\n".join(formatted_output))
# print('\n')
except ValueError:
print("Invalid input. Please enter a valid number.")
if __name__ == "__main__":
# Argument parsing
# Parse Arguments
parser = argparse.ArgumentParser(description='AutVol3 - Simple Memoray Image Analyzer')
parser.add_argument('-p', help='Path to memory image and csv files like pslist, netscan ...', metavar='path', default='')
parser.add_argument('--version', action='store_true', help='Shows welcome text and version of AutoVol3, then exit', default=False)
# Add option for baseline
# Add option for image profile if using vol2
# Add option for blacklist IPs
# Add option for chosing memory image file
# Add option to add VPN concentrator
args = parser.parse_args()
if not args.p:
print('Must specify memory image path!')
parser.print_help()
sys.exit(1)
autovol3 = AutoVol3(args)
# Show version
if args.version:
sys.exit(0)
#suspect_proc = autovol3.malcomm('blacklist.txt')
#autovol3.malgetsids()
main(autovol3)
# print('This is the last push')
| ibrahim0x20/AutoVol3 | autovol3.py | autovol3.py | py | 60,175 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.platform",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "sys.platform",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "sys.platform",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": ... |
8862670450 | import numpy as np
import matplotlib.pyplot as plt
black1 = np.zeros((50,50))
black2 = black1.copy()
white1 = np.ones((50,50))
white2 = white1.copy()
row1 = np.hstack((black1,white1))
row2 = np.hstack((white2, black2))
full = np.vstack((row1,row2))
plt.figure()
plt.imshow(full, cmap='gray')
plt.show() | MatanBuljubasic/OsnoveStrojnogUcenja_LV | LV2/zad4.py | zad4.py | py | 303 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number": 9,... |
19957710129 | import random
import sqlite3
def sql_create():
global db, cursor
db = sqlite3.connect("bot.sqlite3")
cursor = db.cursor()
if db:
print("База данных подключена!")
db.execute("CREATE TABLE IF NOT EXISTS anketa "
"(id INTEGER PRIMARY KEY, "
"name VARCHAR (100), "
"name VARCHAR (100), "
"direction VARCHAR(100), "
"age INTEGER (20), "
"group VARCHAR(10), ")
db.commit()
async def sql_command_insert(state):
async with state.proxy() as data:
cursor.execute("INSERT INTO anketa VALUES (?, ?, ?, ?, ?, ?, ?)",
tuple(data.values()))
db.commit()
async def sql_command_random():
result = cursor.execute("SELECT * FROM anketa").fetchall()
random_user = random.choice(result)
return random_user
async def sql_command_all():
return cursor.execute("SELECT * FROM anketa").fetchall()
async def sql_command_delete(id):
cursor.execute("DELETE FROM anketa WHERE id = ?", (id,))
db.commit() | solvur/home-works | database/bot_db.py | bot_db.py | py | 1,134 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "sqlite3.connect",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 32,
"usage_type": "call"
}
] |
64006031 | import cv2
from pykuwahara import kuwahara
image = cv2.imread('selfie.jpg')
image = (image / 255).astype('float32') # pykuwahara supports float32 as well
lab_image = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
l, a, b = cv2.split(lab_image)
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_image)
filt1 = kuwahara(image, method='gaussian', radius=5, sigma=2., image_2d=l)
filt2 = kuwahara(image, method='gaussian', radius=5, sigma=2., image_2d=v)
#filt = kuwahara(lab_image, method='gaussian', radius=5, sigma=2., image_2d=l)
cv2.imwrite('selfie-kfilt-gaus1.jpg', filt1 * 255)
cv2.imwrite('selfie-kfilt-gaus2.jpg', filt2 * 255)
#cv2.imwrite('selfie-kfilt-gaus.jpg', cv2.cvtColor(filt, cv2.COLOR_Lab2BGR) * 255)
| yoch/pykuwahara | examples/selfie.py | selfie.py | py | 741 | python | en | code | 14 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2Lab",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.split",
"line_num... |
11001989881 | # Support Vector Machine (SVM)
# from https://www.superdatascience.com/machine-learning/
# Part 1 - Data Preprocessing
# Importing the libraries
import os
import glob
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
train_dir = os.path.join(os.path.curdir, "data/train")
validation_dir = os.path.join(os.path.curdir, "data/validate")
train_dirs = []
train_dirs.append(os.path.join(train_dir, "cima"))
train_dirs.append(os.path.join(train_dir, "baixo"))
train_dirs.append(os.path.join(train_dir, "esq"))
train_dirs.append(os.path.join(train_dir, "dir"))
validation_dirs = []
validation_dirs.append(os.path.join(validation_dir, "cima"))
validation_dirs.append(os.path.join(validation_dir, "baixo"))
validation_dirs.append(os.path.join(validation_dir, "esq"))
validation_dirs.append(os.path.join(validation_dir, "dir"))
# Preparando os dados de treinamento
X = []
y = []
for dir in train_dirs:
classe = dir.rsplit('/')[-1]
if classe == 'cima':
classe_num = 0
if classe == 'baixo':
classe_num = 1
if classe == 'esq':
classe_num = 2
if classe == 'dir':
classe_num = 3
files = os.listdir(dir)
for file in files:
image = cv.imread(dir+"/"+file, 0)
# flatten transforma o array 2D de imagem em 1D
X.append(image.flatten())
y.append(classe_num)
X_train = np.array(X)
y_train = np.array(y)
print(X_train.shape)
print(y_train.shape)
# Preparando os dados de validação
X = []
y = []
for dir in validation_dirs:
classe = dir.rsplit('/')[-1]
if classe == 'cima':
classe_num = 0
if classe == 'baixo':
classe_num = 1
if classe == 'esq':
classe_num = 2
if classe == 'dir':
classe_num = 3
files = os.listdir(dir)
for file in files:
image = cv.imread(dir+"/"+file, 0)
# flatten transforma o array 2D de imagem em 1D
X.append(image.flatten())
y.append(classe_num)
X_val = np.array(X)
y_val = np.array(y)
print(X_val.shape)
print(y_val.shape)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_val = sc.transform(X_val)
# Fitting SVM to the Training set
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
#classifier = SVC(kernel = 'linear', random_state = 0)
classifier = Pipeline((
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="rbf", gamma=0.01, C=1000000))
))
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_val)
print(y_val[0:35])
print(y_pred[0:35])
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_val, y_pred)
# matriz de confusão
print(cm)
print("Score da validação: ", classifier.score(X_val, y_val))
# Salvando modelo
import joblib as jl
model_file = "svm_eyes.pkl"
jl.dump(classifier, model_file)
| amelco/webcam-eyetracking | svm.py | svm.py | py | 2,928 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1... |
22377442955 | # Testzone para o apêndice
import apendice as ap
import matplotlib.pyplot as plt
#%% Plotando os sharp_test
## Sinal 1
vector1 = ap.sharpe_test_1(combined = combined, tam_p = 0.1, qtde = 200, inverso = True)
plt.hist(vector1.iloc[:,0], bins = 40)
plt.ylabel('Quantas vezes')
plt.xlabel('Sharpe')
plt.title('Para o Sinal 1')
plt.show()
## Sinal 2
vector2 = ap.sharpe_test_2(combined = combined, tam_p = 0.1, qtde = 200, inverso = True)
plt.hist(vector2.iloc[:,0], bins = 40)
plt.ylabel('Quantas vezes')
plt.xlabel('Sharpe')
plt.title('Para o Sinal 2')
plt.show()
## Sinal 3
vector3 = ap.sharpe_test_3(combined = combined, tam_p = 0.1, qtde = 200, inverso = True, dias = 17)
plt.hist(vector3.iloc[:,0], bins = 40)
plt.ylabel('Quantas vezes')
plt.xlabel('Sharpe')
plt.title('Para o Sinal 3')
plt.show()
## Sinal 4
vector4 = ap.sharpe_test_4(combined = combined, tam_p = 0.1, qtde = 200, inverso = True, dias = 21)
plt.hist(vector4.iloc[:,0], bins = 40)
plt.ylabel('Quantas vezes')
plt.xlabel('Sharpe')
plt.title('Para o Sinal 4')
plt.show()
#%% Avaliando a normalização via correlação média
## Se quer ver a avaliação, insira 1
ap.av_corr_med(insert = 0)
| Insper-Data/Data_Fin_2021.1 | testzone_apendice.py | testzone_apendice.py | py | 1,176 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "apendice.sharpe_test_1",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matpl... |
9417647037 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 17 05:15:30 2018
@author: LeeMH
"""
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
import requests
import re
from utils.processor_checker import timeit
from gobble.module.crawler import Crawler
class NaverRealtimeCrawler(Crawler):
'''
네이버 증권 실시간 속보 크롤러
'''
def __init__(self):
super().__init__()
# func (기능): all (오늘 날짜의 모든 뉴스를 크롤링), new (1page만 크롤링)
@timeit
def create_url_list(self, func):
req = self.request_get(self.real_time_list.format(self.rt_today, 1), self.user_agent)
print(self.real_time_list.format(self.rt_today, 1))
soup = self.html_parser(req)
url_list = []
if func == 'new':
url_data = self.find_navernews_url(soup, url_list)
elif func == 'all':
pgRR = self.soup_find(soup, 'td', {'class':'pgRR'})
last_page = self.soup_find(pgRR, 'a')['href'][-3:]
last_page = re.findall("\d+", last_page)[0]
sub_list = []
for i in range(1,int(last_page)+1):
req = self.request_get(self.real_time_list.format(self.rt_today, i), self.user_agent)
sub_soup = self.html_parser(req)
url_list += self.find_navernews_url(sub_soup, sub_list)
url_data = url_list
else:
print("Choose between 'all' and 'new'")
url_data = list(set(url_data))
print(len(url_data))
real_time_url = [self.soup_find(sub, 'a')['href'].replace('§', '§') for sub in url_data]
return real_time_url
@timeit
def get_data(self, url_list, checklist):
url_list = url_list
data_list = []
for url in url_list:
self.req = self.request_get(self.fin_nhn.format(url), self.user_agent)
soup = self.html_parser(self.req)
url = self.fin_nhn.format(url)
title = self.soup_find(soup, 'h3').text.replace('\n','').replace('\t','').strip()
checkurl = re.sub(r'&page=\d+$', '', url)
if checkurl in checklist:
print('Already up-to-date.')
continue
upload_time = self.soup_find(soup, 'span', {'class':'article_date'}).text
media = self.soup_find(soup, 'span', {'class':'press'}).img['title']
data_dict = {'title':title, 'media':media, 'url':checkurl, 'data_type':'R', 'upload_time': upload_time}
data_list.append(data_dict)
print(len(data_list))
return data_list
| veggieavocado/Gobble-v.1 | gobble/module/naver_rt_crawler.py | naver_rt_crawler.py | py | 2,620 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "gobble.module.crawler.Crawler",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "utils.processor_checker.timeit",
"line_number": 23,
"usage_type": "name"
},
{
"api_name"... |
36473075239 | from wsgiref.simple_server import make_server
import psutil,datetime
import sqlite3
def interview_scores(environ, start_response):
conn_result=sqlite3.connect("results.sqlite")
cursor_result = conn_result.cursor()
print_results = cursor_result.execute("select * from interview_results")
status = '200 OK'
headers = [('Content-type', 'html; charset=utf-8')]
start_response(status, headers)
message = "<h1>Interview Scores</h1>"
message +="<table style=\"width:100%\">"
message += "<tr>"
message +="<td style=\"background-color:Aqua\"><strong>Username</strong></td>"
message +="<td style=\"background-color:Aqua\"><strong>Correct Answers</td>"
message +="<td style=\"background-color:Aqua\"><strong>Total Questions</td>"
message +="<td style=\"background-color:Aqua\"><strong>Result</td></tr>"
for data in print_results:
message += "<tr>"
message += "<td><strong>"+(str(data[0]))+"</td>"
message += "<td>"+(str(data[1]))+"</td>"
message += "<td>"+(str(data[2]))+"</td>"
message += "<td><strong>"+(str(data[3]))+"</td>"
message += "<tr>"
return[bytes(message,'utf-8')]
httpd = make_server('', 8000,interview_scores)
print("Serving on port 8000...")
httpd.serve_forever()
| avikrb/PROGRAMS | Python/GUI interview program/check_scores_htlm.py | check_scores_htlm.py | py | 1,286 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlite3.connect",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "wsgiref.simple_server.make_server",
"line_number": 30,
"usage_type": "call"
}
] |
10085711322 | #!/usr/bin/python3
import os
import argparse
BASE_CMD = 'gtimeout 60 stack exec -- hplus \"{query}\" --stop-refine\
--stop-threshold=10 --cnt=5'
def main():
parser = argparse.ArgumentParser(description='Run a single hoogle+ query')
parser.add_argument('query', help='the signature to be searched')
args = parser.parse_args()
os.system(BASE_CMD.format(query=args.query))
if __name__ == "__main__":
main()
| TyGuS/hoogle_plus | scripts/run_query.py | run_query.py | py | 442 | python | en | code | 56 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 14,
"usage_type": "call"
}
] |
8529990174 | #!/bin/python3
import os
import pandas
import argparse
import traceback
import numpy as np
from datetime import datetime, timedelta
# lisflood
import lisf1
from lisflood.global_modules.decorators import Cache
from liscal import hydro_model, templates, config, subcatchment, calibration, objective
class ScalingModel():
def __init__(self, cfg, subcatch, lis_template, lock_mgr, objective):
self.cfg = cfg
self.subcatch = subcatch
self.lis_template = lis_template
self.lock_mgr = lock_mgr
self.objective = objective
self.start = cfg.forcing_start.strftime('%d/%m/%Y %H:%M')
self.end = cfg.forcing_end.strftime('%d/%m/%Y %H:%M')
def init_run(self):
# dummy Individual, doesn't matter here
param_ranges = self.cfg.param_ranges
Individual = 0.5*np.ones(len(param_ranges))
cfg = self.cfg
gen = self.lock_mgr.get_gen()
run_id = str(gen)
out_dir = os.path.join(self.subcatch.path_out, run_id)
os.makedirs(out_dir, exist_ok=True)
parameters = self.objective.get_parameters(Individual)
prerun_file, run_file = self.lis_template.write_init(run_id, self.start, self.end, self.start, self.end, cfg.param_ranges, parameters)
# -i option to exit after initialisation, we just load the inputs map in memory
lisf1.main(prerun_file, '-i')
lisf1.main(run_file, '-i')
# store lisflood cache size to make sure we don't load anything else after that
self.lisflood_cache_size = Cache.size()
def run(self, Individual):
cfg = self.cfg
gen = self.lock_mgr.get_gen()
run = self.lock_mgr.increment_run()
print('Generation {}, run {}'.format(gen, run))
run_id = '{}_{}'.format(gen, run)
out_dir = os.path.join(self.subcatch.path_out, run_id)
os.makedirs(out_dir, exist_ok=True)
parameters = self.objective.get_parameters(Individual)
prerun_file, run_file = self.lis_template.write_template(run_id, self.start, self.end, self.start, self.end, cfg.param_ranges, parameters)
lisf1.main(prerun_file, '-v')
lisf1.main(run_file, '-v')
def scaling_subcatchment(cfg, obsid, subcatch, n_runs):
lis_template = templates.LisfloodSettingsTemplate(cfg, subcatch)
lock_mgr = calibration.LockManager(cfg.num_cpus)
lock_mgr.set_gen(cfg.num_cpus)
obj = objective.ObjectiveKGE(cfg, subcatch, read_observations=False)
model = ScalingModel(cfg, subcatch, lis_template, lock_mgr, obj)
# load forcings and input maps in cache
# required in front of processing pool
# otherwise each child will reload the maps
model.init_run()
dummy_params = [0.5*np.ones(len(cfg.param_ranges)) for i in range(n_runs)]
scaling_map, pool = lock_mgr.create_mapping()
mapped = list(scaling_map(model.run, dummy_params))
if pool:
pool.close()
class ConfigScaling(config.Config):
def __init__(self, settings_file, num_cpus):
super().__init__(settings_file)
self.num_cpus = int(num_cpus)
# paths
self.subcatchment_path = self.parser.get('Path','subcatchment_path')
# Date parameters
self.forcing_start = datetime.strptime(self.parser.get('Main','forcing_start'),"%d/%m/%Y %H:%M") # Start of forcing
self.forcing_end = datetime.strptime(self.parser.get('Main','forcing_end'),"%d/%m/%Y %H:%M") # Start of forcing
self.timestep = int(self.parser.get('Main', 'timestep')) # in minutes
self.prerun_timestep = 360 # in minutes
if self.prerun_timestep != 360 and self.prerun_timestep != 1440:
raise Exception('Pre-run timestep {} not supported'.format(self.prerun_timestep))
# Load param ranges file
self.param_ranges = pandas.read_csv(self.parser.get('Path','param_ranges'), sep=",", index_col=0)
# template
self.lisflood_template = self.parser.get('Templates','LISFLOODSettings')
# pcraster commands
self.pcraster_cmd = {}
for execname in ["pcrcalc", "map2asc", "asc2map", "col2map", "map2col", "mapattr", "resample", "readmap"]:
self.pcraster_cmd[execname] = execname
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('settings', help='Settings file')
parser.add_argument('stations_data', help='Stations metadata CSV file')
parser.add_argument('obsid', help='Station obsid')
parser.add_argument('n_cpus', help='Station obsid')
parser.add_argument('n_runs', help='Station obsid')
args = parser.parse_args()
print(' - obsid: {}'.format(args.obsid))
print(' - settings file: {}'.format(args.settings))
obsid = int(args.obsid)
cfg = ConfigScaling(args.settings, args.n_cpus)
print(">> Reading stations.csv file...")
stations = pandas.read_csv(args.stations_data, sep=",", index_col=0)
try:
station_data = stations.loc[obsid]
except KeyError as e:
print(stations)
raise Exception('Station {} not found in stations file'.format(obsid))
subcatch = subcatchment.SubCatchment(cfg, obsid, station_data, create_links=False)
scaling_subcatchment(cfg, obsid, subcatch, int(args.n_runs)) | ec-jrc/lisflood-calibration | bin/scaling.py | scaling.py | py | 5,307 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "liscal.objective",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "numpy.ones",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number":... |
36170474621 | # This file is Copyright 2020 Volatility Foundation and licensed under the Volatility Software License 1.0
# which is available at https://www.volatilityfoundation.org/license/vsl-v1.0
import json
import logging
import ntpath
import os
import re
from typing import List, Dict, Union
from volatility3.framework import (
renderers,
interfaces,
objects,
exceptions,
constants,
layers,
)
from volatility3.framework.configuration import requirements
from volatility3.framework.renderers import format_hints
from volatility3.framework.symbols.windows.extensions import registry
from volatility3.plugins.windows import pslist
from volatility3.plugins.windows.registry import hivelist
vollog = logging.getLogger(__name__)
def find_sid_re(
sid_string, sid_re_list
) -> Union[str, interfaces.renderers.BaseAbsentValue]:
for reg, name in sid_re_list:
if reg.search(sid_string):
return name
return renderers.NotAvailableValue()
class GetSIDs(interfaces.plugins.PluginInterface):
"""Print the SIDs owning each process"""
_version = (1, 0, 0)
_required_framework_version = (2, 0, 0)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for plugin_dir in constants.PLUGINS_PATH:
sids_json_file_name = os.path.join(
plugin_dir, os.path.join("windows", "sids_and_privileges.json")
)
if os.path.exists(sids_json_file_name):
break
else:
vollog.log(
constants.LOGLEVEL_VVV,
"sids_and_privileges.json file is missing plugin error",
)
raise RuntimeError(
"The sids_and_privileges.json file missed from you plugin directory"
)
# Get all the sids from the json file.
with open(sids_json_file_name, "r") as file_handle:
sids_json_data = json.load(file_handle)
self.servicesids = sids_json_data["service sids"]
self.well_known_sids = sids_json_data["well known"]
# Compile all the sids regex.
self.well_known_sid_re = [
(re.compile(c_list[0]), c_list[1])
for c_list in sids_json_data["sids re"]
]
@classmethod
def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface]:
return [
requirements.ModuleRequirement(
name="kernel",
description="Windows kernel",
architectures=["Intel32", "Intel64"],
),
requirements.ListRequirement(
name="pid",
description="Filter on specific process IDs",
element_type=int,
optional=True,
),
requirements.PluginRequirement(
name="pslist", plugin=pslist.PsList, version=(2, 0, 0)
),
requirements.PluginRequirement(
name="hivelist", plugin=hivelist.HiveList, version=(1, 0, 0)
),
]
def lookup_user_sids(self) -> Dict[str, str]:
"""
Enumerate the registry for all the users.
Returns:
An dictionary of {sid: user name}
"""
key = "Microsoft\\Windows NT\\CurrentVersion\\ProfileList"
val = "ProfileImagePath"
kernel = self.context.modules[self.config["kernel"]]
sids = {}
for hive in hivelist.HiveList.list_hives(
context=self.context,
base_config_path=self.config_path,
layer_name=kernel.layer_name,
symbol_table=kernel.symbol_table_name,
filter_string="config\\software",
hive_offsets=None,
):
try:
for subkey in hive.get_key(key).get_subkeys():
sid = str(subkey.get_name())
path = ""
for node in subkey.get_values():
try:
value_node_name = node.get_name() or "(Default)"
except (
exceptions.InvalidAddressException,
layers.registry.RegistryFormatException,
) as excp:
continue
try:
value_data = node.decode_data()
if isinstance(value_data, int):
value_data = format_hints.MultiTypeData(
value_data, encoding="utf-8"
)
elif (
registry.RegValueTypes(node.Type)
== registry.RegValueTypes.REG_BINARY
):
value_data = format_hints.MultiTypeData(
value_data, show_hex=True
)
elif (
registry.RegValueTypes(node.Type)
== registry.RegValueTypes.REG_MULTI_SZ
):
value_data = format_hints.MultiTypeData(
value_data, encoding="utf-16-le", split_nulls=True
)
else:
value_data = format_hints.MultiTypeData(
value_data, encoding="utf-16-le"
)
if value_node_name == val:
path = str(value_data).replace("\\x00", "")[:-1]
user = ntpath.basename(path)
sids[sid] = user
except (
ValueError,
exceptions.InvalidAddressException,
layers.registry.RegistryFormatException,
) as excp:
continue
except (KeyError, exceptions.InvalidAddressException):
continue
return sids
def _generator(self, procs):
user_sids = self.lookup_user_sids()
# Go all over the process list, get the token
for task in procs:
# Make sure we have a valid token
try:
token = task.Token.dereference().cast("_TOKEN")
except exceptions.InvalidAddressException:
token = False
if not token or not isinstance(token, interfaces.objects.ObjectInterface):
yield (
0,
[
int(task.UniqueProcessId),
str(task.ImageFileName),
"Token unreadable",
"",
],
)
continue
# Go all over the sids and try to translate them with one of the tables we have
for sid_string in token.get_sids():
if sid_string in self.well_known_sids:
sid_name = self.well_known_sids[sid_string]
elif sid_string in self.servicesids:
sid_name = self.servicesids[sid_string]
elif sid_string in user_sids:
sid_name = user_sids[sid_string]
else:
sid_name_re = find_sid_re(sid_string, self.well_known_sid_re)
if sid_name_re:
sid_name = sid_name_re
else:
sid_name = ""
yield (
0,
(
task.UniqueProcessId,
objects.utility.array_to_string(task.ImageFileName),
sid_string,
sid_name,
),
)
def run(self):
filter_func = pslist.PsList.create_pid_filter(self.config.get("pid", None))
kernel = self.context.modules[self.config["kernel"]]
return renderers.TreeGrid(
[("PID", int), ("Process", str), ("SID", str), ("Name", str)],
self._generator(
pslist.PsList.list_processes(
context=self.context,
layer_name=kernel.layer_name,
symbol_table=kernel.symbol_table_name,
filter_func=filter_func,
)
),
)
| volatilityfoundation/volatility3 | volatility3/framework/plugins/windows/getsids.py | getsids.py | py | 8,864 | python | en | code | 1,879 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "volatility3.framework.renderers.NotAvailableValue",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "volatility3.framework.renderers",
"line_number": 34,
"usage_type": "n... |
2494095628 | import bson
import ujson
from bson import ObjectId
from .utils import db
def retrieve_info(object_id):
# Retrieve info from DB
mongo = db.MongoDBConnection()
result = list()
with mongo:
database = mongo.connection['mydb']
collection = database['registrations']
if object_id is not None:
# Retrieve particular object
try:
single_object = collection.find_one({"_id": ObjectId(object_id)})
if single_object is None:
return {"error": "Object id does not exist!"}
except bson.errors.InvalidId:
# catch the exception calling ObjectId
return {"err": "Invalid object_id"}
return {
'id': object_id,
'first_name': single_object['first_name'],
'last_name': single_object['last_name'],
'email': single_object['email']
}
else:
# Retrieve all info
for user in collection.find():
result.append({
'id': str(user["_id"]),
'first_name': user['first_name'],
'last_name': user['last_name'],
'email': user['email']
})
return result
def lambda_handler(event, context):
try:
print(event)
object_id = event['pathParameters']['id']
# print("#######OBJECT_ID#########\n" + object_id)
except TypeError:
object_id = None
except KeyError:
object_id = None
try:
return {
"statusCode": 200,
"body": ujson.dumps({
"message": "Success",
"data": retrieve_info(object_id)
})
}
except Exception as err:
return {
"statusCode": 400,
"body": ujson.dumps({
"message": "Something went wrong. Unable to parse data !",
"error": str(err)
})
}
| zouhanrui/AWS_Serverless_CRUD_MongoDB | Organizations_pkg/user/read/app.py | app.py | py | 2,040 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils.db.MongoDBConnection",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "utils.db",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "bson.ObjectId",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "bson.errors",
"... |
552754902 | from flask import Flask, render_template, request, redirect, flash, session, config
from flask_debugtoolbar import DebugToolbarExtension
from surveys import satisfaction_survey as survey
app = Flask(__name__)
app.config['SECRET_KEY'] = "austin"
debug = DebugToolbarExtension(app)
responses = []
currentNum = 0
RESPONSES_KEY = "responses"
@app.route('/')
def show_home():
"""Main home page viewing."""
global responses
answers = responses
responses = []
instruction_text = str(survey.instructions)
return render_template('base.html', instruction_text=instruction_text, answers=answers)
@app.route('/questions/1')
def question1():
if len(responses) == 0:
answers = responses
global currentNum
currentNum = 1
question = survey.questions[0].question
choices = survey.questions[0].choices
return render_template('questions.html', question=question, num=currentNum, choices=choices, answers=answers)
else:
flash(f"Invalid question ID. Please take the survey in sequential order.")
return redirect("/")
@app.route('/questions/2')
def question2():
if len(responses) == 1:
answers = responses
global currentNum
currentNum = 2
question = survey.questions[1].question
choices = survey.questions[1].choices
return render_template('questions.html', question=question, num=currentNum, choices=choices, answers=answers)
else:
flash(f"Invalid question ID. Please take the survey in sequential order.")
return redirect("/")
@app.route('/questions/3')
def question3():
if len(responses) == 2:
answers = responses
global currentNum
currentNum = 3
question = survey.questions[2].question
choices = survey.questions[2].choices
return render_template('questions.html', question=question, num=currentNum, choices=choices, answers=answers)
else:
flash(f"Invalid question ID. Please take the survey in sequential order.")
return redirect("/")
@app.route('/questions/4')
def question4():
if len(responses) == 3:
answers = responses
global currentNum
currentNum = 4
question = survey.questions[3].question
choices = survey.questions[3].choices
return render_template('questions.html', question=question, num=currentNum, choices=choices, answers=answers)
else:
flash(f"Invalid question ID. Please take the survey in sequential order.")
return redirect("/")
@app.route('/questions/5')
def question5():
return redirect('/thankyou')
@app.route('/thankyou')
def thankyou_page():
answers = responses
session[RESPONSES_KEY] = responses
return render_template('thankyou.html', answers=answers)
@app.route("/answer", methods=["POST"])
def post_answer():
choice = request.form['answer']
responses.append(choice)
global currentNum
newPageNum = currentNum + 1
# return redirect('/questions/{newPageNum}')
return redirect(f"/questions/{newPageNum}")
| austindreosch/springboard | exercises/section2/flask-session/app.py | app.py | py | 3,081 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask_debugtoolbar.DebugToolbarExtension",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "surveys.satisfaction_survey.instructions",
"line_number": 21,
"usage_type": "attribute... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.