seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
41873337753 | import sys
from firebaseConfig import firebase
from config import CLAN_CODE, CLAN_INFO_FILE_PATH, CLAN_QUEST_INFO_FILE_PATH
from clanDatabase import ClanDatabase
def main():
db = ClanDatabase(
CLAN_CODE,
CLAN_INFO_FILE_PATH,
CLAN_QUEST_INFO_FILE_PATH,
firebase.database()
)
process_command(db)
def process_command(db):
done = False
help = (
"Available commands:\n\n" +
"1. (initial_load): populates the database with clan info (note: no clan quests), if your clan is not in the database. Else, it fails and does nothing.\n\n" +
"2. (update_stats): updates all clan members' stats in the database (note: no clan quest damage).\n\n" +
"3. (update_damages): updates all clan members' damages in the database (adds damage to an array of existing damages).\n\n" +
"4. (weekly_reset): move current week's stats and damages to last week and reset current week.\n\n"
)
print(help)
while (not done):
try:
command = input()
if (command == "initial_load"):
db.initial_load_clan_info()
done = True
elif (command == "update_stats"):
db.update_everyone_stats()
done = True
elif (command == "update_damages"):
db.update_everyone_damage()
done = True
elif (command == "weekly_reset"):
db.weekly_reset()
done = True
else:
print("Sorry command not understood. Try Again.")
except Exception as error:
print(error)
if __name__ == "__main__":
main()
| ygongdev/FishBotScripts | main.py | main.py | py | 1,424 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "clanDatabase.ClanDatabase",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "config.CLAN_CODE",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "config.CLAN_INFO_FILE_PATH",
"line_number": 9,
"usage_type": "argument"
},
{
"api_na... |
36193733670 | from graftm.graftm_package import GraftMPackage, GraftMPackageVersion3
import dendropy
import logging
import tempfile
from Bio import SeqIO
import extern
from .singlem_package import SingleMPackageVersion2
import shutil
import os
import tempdir
class PackageCreator:
def create(self, **kwargs):
input_graftm_package_path = kwargs.pop('input_graftm_package')
output_singlem_package_path = kwargs.pop('output_singlem_package')
hmm_position = kwargs.pop('hmm_position')
window_size = kwargs.pop('window_size')
force = kwargs.pop('force')
if len(kwargs) > 0:
raise Exception("Unexpected arguments detected: %s" % kwargs)
if force and os.path.exists(output_singlem_package_path):
shutil.rmtree(output_singlem_package_path)
# For protein packages, remove sequences from diamond database that are
# not in the tree so that hits can be mapped onto the tree and used for
# alpha and beta diversity metrics.
gpkg = GraftMPackage.acquire(input_graftm_package_path)
is_protein_package = SingleMPackageVersion2.graftm_package_is_protein(gpkg)
logging.info("Detected package type as %s" %
('protein' if is_protein_package else 'nucleotide'))
if is_protein_package:
tree_leaves = set()
for node in dendropy.Tree.get(
path=gpkg.reference_package_tree_path(),
schema='newick').leaf_node_iter():
# need to replace here because otherwise they don't line up with the
# diamond database IDs
node_name = node.taxon.label.replace(' ','_')
if node_name in tree_leaves:
raise Exception("Found duplicate tree leaf name in graftm package "
"tree. Currently this case is not handled, sorry")
tree_leaves.add(node_name)
for name in tree_leaves: #I don't think there is a 'peek' ?
eg_name = name
break
logging.info("Read in %i tree tip names e.g. %s" % (
len(tree_leaves), eg_name))
# Make a new fasta file of all the sequences that are leaves
found_sequence_names = set()
num_seqs_unaligned = 0
filtered_aligned_tempfile = tempfile.NamedTemporaryFile(
prefix='singlem_package_creator',
suffix='.fasta',
mode='w')
for s in SeqIO.parse(gpkg.unaligned_sequence_database_path(), "fasta"):
num_seqs_unaligned += 1
if s.id in tree_leaves:
if s.id in found_sequence_names:
raise Exception("Found duplicate sequence names in graftm unaligned"
" sequence fasta file. Currently this case is not handled,"
" sorry")
SeqIO.write([s], filtered_aligned_tempfile, "fasta")
found_sequence_names.add(s.id)
filtered_aligned_tempfile.flush()
if len(tree_leaves) != len(found_sequence_names):
for t in tree_leaves:
if t not in found_sequence_names:
raise Exception("Found some sequences that were in the tree but not the"
" unaligned sequences database e.g. %s. Something is"
" likely amiss with the input GraftM package" % t)
raise Exception("Programming error, shouldn't get here")
logging.info("All %i sequences found in tree extracted successfully from unaligned"
" sequences fasta file, which originally had %i sequences" % (
len(found_sequence_names), num_seqs_unaligned))
# Create a new diamond database
dmnd_tf = tempfile.NamedTemporaryFile(prefix='singlem_package_creator',suffix='.dmnd')
cmd = "diamond makedb --in '%s' -d '%s'" % (filtered_aligned_tempfile.name, dmnd_tf.name)
logging.info("Creating DIAMOND database")
extern.run(cmd)
# Compile the final graftm/singlem package
if len(gpkg.search_hmm_paths()) == 1 and \
gpkg.search_hmm_paths()[0] == gpkg.alignment_hmm_path():
search_hmms = None
else:
search_hmms = gpkg.search_hmm_paths()
with tempdir.TempDir() as tmpdir:
gpkg_name = os.path.join(
tmpdir,
os.path.basename(
os.path.abspath(input_graftm_package_path)).replace('.gpkg',''))
GraftMPackageVersion3.compile(gpkg_name,
gpkg.reference_package_path(),
gpkg.alignment_hmm_path(),
dmnd_tf.name if is_protein_package else None,
gpkg.maximum_range(),
filtered_aligned_tempfile.name if is_protein_package else \
gpkg.unaligned_sequence_database_path(),
gpkg.use_hmm_trusted_cutoff(),
search_hmms)
logging.debug("Finished creating GraftM package for conversion to SingleM package")
SingleMPackageVersion2.compile(output_singlem_package_path,
gpkg_name, hmm_position, window_size)
shutil.rmtree(gpkg_name)
if is_protein_package:
filtered_aligned_tempfile.close()
dmnd_tf.close()
logging.info("SingleM-compatible package creation finished")
| ye00ye/singlem | singlem/package_creator.py | package_creator.py | py | 5,879 | python | en | code | null | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "graftm.graftm_package.Graft... |
8331020218 | import os
import datetime
import mysql.connector
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph
from reportlab.lib import colors
from reportlab.lib.styles import getSampleStyleSheet
# pip install mysql-connector-python
# pip install reportlab
def get_current_date():
# Get the current date in "YYYY-MM-DD" format
return datetime.datetime.now().strftime("%Y-%m-%d")
def get_data_from_mysql():
# Connect to the MySQL database
connection = mysql.connector.connect(
host="localhost",
user="root",
password="root",
database="asistencia_del_cia"
)
cursor = connection.cursor()
# Get the current date in "YYYY-MM-DD" format
current_date = get_current_date()
# Get the data from the "registro" table for the current date
query = f"SELECT id, fecha, nombre, hora FROM registro WHERE fecha = '{current_date}'"
cursor.execute(query)
data = cursor.fetchall()
# Close the connection
cursor.close()
connection.close()
return data
def create_pdf(data):
# Create the PDF file named "lista_asistencia_fecha.pdf"
filename = f"lista_asistencia_{get_current_date()}.pdf"
doc = SimpleDocTemplate(filename, pagesize=letter)
elements = []
# Add the title at the top of the document as a Flowable object
title = f"Attendance list for the day ({get_current_date()})"
title_style = getSampleStyleSheet()['Title']
title_paragraph = Paragraph(title, title_style)
elements.append(title_paragraph)
# Convert the data into a list for the table
data_table = [['ID', 'Date', 'Name', 'Time']] + data
# Create the table
table = Table(data_table)
# Table style
style = TableStyle([
# ... (your style code here, just like before)
])
table.setStyle(style)
# Add the table to the document
elements.append(table)
# Build the document
doc.build(elements)
if __name__ == "__main__":
# Get the data from MySQL
data = get_data_from_mysql()
# Create the PDF with the table data
create_pdf(data)
| IgnaciodeJesusMedinaUrrunaga/Attendance-Registration-with-Facial-Recognition | Attendance_Registration_CIA/Attendance-list-with-facial-recognition-using-Python/transfer_today_records_to_pdf.py | transfer_today_records_to_pdf.py | py | 2,145 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector.connector.connect",
"line_number": 18,
"usage_type": "call"
},
{
"ap... |
3520676139 | import json
import os
import time
import pytest
from src.rss_config_io import RssConfigIO
from src.rss_configuration import RssConfiguration
class TestRssConfiguration:
rss = "http://g1.globo.com/dynamo/rss2.xml"
timestamp = [2019, 8, 24, 2, 56, 52, 5, 236, 0]
temp = "missing_file.json"
base_content = json.dumps({rss: {RssConfiguration._timestamp_field: timestamp}})
@pytest.fixture(autouse=True)
def before_after_all(self):
try:
os.remove(self.temp)
except:
pass
yield
with open(RssConfigIO()._file, 'w') as f:
f.write(self.base_content)
def test_is_callable(self):
assert RssConfiguration(self.rss) is not None
def test_config_is_json(self):
json_config = RssConfiguration(self.rss)._config
assert (type(json_config) is dict)
assert json.dumps(json_config) is not None
def test_config_is_consistent(self):
json_config = str(RssConfiguration(self.rss)._config)
json_config2 = str(RssConfiguration(self.rss)._config)
assert (json_config == json_config2)
def test_update_on_destroy(self):
config = RssConfiguration(self.rss)
assert "x" not in config._future_config
config._future_config["x"] = "y"
del config
new_config = RssConfiguration(self.rss)._config
assert type(new_config) is dict
assert new_config["x"] == "y"
def test_default(self):
sample = RssConfiguration('b')._get_default()
assert 'timestamp' in sample
def test_default_values(self):
config = RssConfiguration('non_existing')._config
assert config['timestamp'] is not None
assert type(config['timestamp']) is list
assert config['timestamp'] == list(time.gmtime(1))
def test_get_timestamp(self):
timestamp = RssConfiguration(self.rss).get_timestamp()
assert timestamp is not None
def test_set_timestamp(self):
config = RssConfiguration(self.rss)
timestamp = [2022, 8, 24, 2, 56, 52, 5, 236, 0]
assert timestamp > config._future_config[config._timestamp_field]
config.set_timestamp(timestamp)
assert timestamp == config._future_config[config._timestamp_field]
def test_set_timestamp_no_downgrade(self):
config = RssConfiguration(self.rss)
old_timestamp = config.get_timestamp()
assert old_timestamp == self.timestamp
new_timestamp = list(time.gmtime(200))
assert old_timestamp > new_timestamp
config.set_timestamp(new_timestamp)
assert config.get_timestamp() == old_timestamp
| maxpeixoto/rss_filterer | test/test_rss_configuration.py | test_rss_configuration.py | py | 2,655 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.dumps",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "src.rss_configuration.RssConfiguration._timestamp_field",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "src.rss_configuration.RssConfiguration",
"line_number": 15,
"usag... |
28914738818 | import os
import unittest
import json
from functools import wraps
from flask_sqlalchemy import SQLAlchemy
from app import create_app
from models import setup_db, Movie, Actor
class CapstoneTestCase(unittest.TestCase):
"""This class represents the trivia test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_name = "capstone1"
self.database_path = "postgres://{}:{}@{}/{}".format(
'postgres',
'123456',
'localhost:5432',
self.database_name)
setup_db(self.app)
self.header = {'Content-Type': 'application/json', 'Authorization': "Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6InRQVVJiUEVPYTBVZWt4YmE0MVhjayJ9.eyJpc3MiOiJodHRwczovL2Rldi11ZGFjaXR5LWNhcHN0b25lMjAyMS51cy5hdXRoMC5jb20vIiwic3ViIjoiYXV0aDB8NjEwMjBlMzZjNjFmZDcwMDc3ZDA1OWEzIiwiYXVkIjoiaHR0cDovL2xvY2FsaG9zdDo1MDAwL2FwaSIsImlhdCI6MTYzMDIwMTUyMCwiZXhwIjoxNjMwMjg3OTIwLCJhenAiOiJTZWxNZ3U5RUdWRVBjNzZCdW9DaWZ1cklkOGxkendFQiIsInNjb3BlIjoiIiwicGVybWlzc2lvbnMiOlsiZGVsZXRlOmFjdG9yIiwiZGVsZXRlOm1vdmllIiwiZ2V0OmFjdG9ycyIsImdldDptb3ZpZXMiLCJwYXRjaDphY3RvciIsInBhdGNoOm1vdmllIiwicG9zdDphY3RvciIsInBvc3Q6bW92aWUiXX0.NncE9PLAGT1t0hvoZTKqeKqEYwe8SgbV-5KN-D61CNMMt4k16Dkw-nVi_0V0VzxnI3dFgRzNFZ-XnbFeej_lV583pGURGYjr8n362NI7AeumnC8ONO7na0rAgSzx-IrQ-eE9ANcNjcvOCBq_S2e6KBHbDNJLQ19kC9AhHOA6QmVzg_fmyDUkWOiVybOzaj6Zn2UaDnviRYRINWaL_jR-_PqrNCP3k6XcxA5p38y73tXAqj2TWHqGw99oQLyRBrPH2n8PQc5HC3HSFn-ZEPJUYhK0gOnBApTqADVstGSrahkgKG3pVDiyI2hE2FSxB0h4jfxNAgUmcuweeJ8_ajVyhQ"}
# binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
def tearDown(self):
"""Executed after reach test"""
pass
#GET MOVIES
def test_get_movies(self):
res = self.client().get('http://localhost:5000/api/movies', headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(len(data['movies']))
self.assertTrue(data['total_movies'])
def test_404_if_movies_doesnt_exist(self):
res = self.client().get('http://localhost:5000/api/moviss', headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'resource not found')
# #POST MOVIE
def test_post_new_movie(self):
res = self.client().post(
'http://localhost:5000/api/movies/create',
json={
'title': 'Ocean Eyes',
'release_date': '2018-10-01'
},
headers=self.header)
movies = Movie.query.all()
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(len(movies))
def test_422_if_new_movie_is_unprocessable(self):
res = self.client().post(
'http://localhost:5000/api/movies/create',
json={'title': ""},
headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'unprocessable')
#DELETE MOVIES
def test_delete_movie(self):
res = self.client().delete('http://localhost:5000/api/movies/43', headers=self.header)
data = json.loads(res.data)
movie = Movie.query.filter(Movie.id == 43).one_or_none()
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(movie, None)
def test_404_if_movie_delete_doesnt_exist(self):
res = self.client().delete('http://localhost:5000/api/movies/1000', headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'resource not found')
#PATCH MOVIES
def test_patch_movie(self):
res = self.client().patch(
'http://localhost:5000/api/movies/11',
json={
'title': 'The Gifteddddddddddddddd',
'release_date': '2000-02-01'
},
headers=self.header)
movies = Movie.query.all()
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(len(movies))
def test_404_if_movie_patch_doesnt_exist(self):
res = self.client().patch(
'http://localhost:5000/api/movies/8000',
json={
'title': '',
'release_date':''
},
headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'resource not found')
def test_422_if_patch_movie_is_unprocessable(self):
res = self.client().patch(
'http://localhost:5000/api/movies/6',
json={'title': ""},
headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'unprocessable')
# #GET ACTORS
def test_get_actors(self):
res = self.client().get('/api/actors', headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(len(data['actors']))
self.assertTrue(data['total_actors'])
def test_404_if_actors_doesnt_exist(self):
res = self.client().get('http://localhost:5000/api/actores', headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'resource not found')
# #POST ACTOR
def test_post_new_actor(self):
res = self.client().post(
'http://localhost:5000/api/actors/create',
json={
'name': 'Viola Davis',
'age': 51,
'gender': 'F'
},
headers=self.header)
actors = Movie.query.all()
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(len(actors))
def test_422_if_new_actor_is_unprocessable(self):
res = self.client().post(
'http://localhost:5000/api/actors/create',
json={},
headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'unprocessable')
#DELETE ACTOR
def test_delete_actor(self):
res = self.client().delete('http://localhost:5000/api/actors/88', headers=self.header)
data = json.loads(res.data)
actor = Actor.query.filter(Actor.id == 88).one_or_none()
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(actor, None)
def test_404_if_actor_delete_doesnt_exist(self):
res = self.client().delete('http://localhost:5000/api/actors/10000', headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'resource not found')
#PATCH ACTOR
def test_patch_actor(self):
res = self.client().patch(
'http://localhost:5000/api/actors/19',
json={
'name': 'Steve Carrell',
'age': 58,
'gender': 'M'
},
headers=self.header)
actors = Actor.query.all()
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(len(actors))
def test_404_if_actor_patch_doesnt_exist(self):
res = self.client().patch(
'http://localhost:5000/api/actors/8000',
json={
'name': 'pepe grillo',
'age': '',
'gender': ''
},
headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'resource not found')
def test_422_if_patch_actor_is_unprocessable(self):
res = self.client().patch(
'http://localhost:5000/api/actors/8',
json={},
headers=self.header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'unprocessable')
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main()
| steffaru/FSND_CaptionProject | test_flaskr.py | test_flaskr.py | py | 9,453 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "app.create_app",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.setup_db",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchem... |
30217328394 | # Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#Import all datasets
df_customers=pd.read_csv('olist_customers_dataset.csv')
df_geolocation=pd.read_csv('olist_geolocation_dataset.csv')
df_order_items=pd.read_csv('olist_order_items_dataset.csv')
df_order_pay=pd.read_csv('olist_order_payments_dataset.csv')
df_order_reviews=pd.read_csv('olist_order_reviews_dataset.csv')
df_orders=pd.read_csv('olist_orders_dataset.csv')
df_products=pd.read_csv('olist_products_dataset.csv')
df_sellers=pd.read_csv('olist_sellers_dataset.csv')
df_product_cat=pd.read_csv('product_category_name_translation.csv')
## Merge datasets for one big with all informations
df=pd.merge(df_orders,df_order_items,on='order_id', how='right')
df=df.merge(df_products, on='product_id')
df=df.merge(df_order_reviews,on='order_id')
df=df.merge(df_sellers,on='seller_id')
df=df.merge(df_customers,on='customer_id')
df = df.rename(columns={'price':'product_price','order_item_id':'quantity'})
df = df.drop(['review_id', 'review_creation_date','review_answer_timestamp','review_comment_title','review_comment_message','customer_id'], axis=1)
print(df.groupby(by='order_status').count()) #Take look at the distribution of order status
df = df[df['order_status'] == 'delivered'] # just delivered products are relevant for rating_review
## Creating Features for Dataset: Product avg Score, Product Price avg, Seller Score avg
#Create product score and product avg price
product_scored=df.groupby(by='product_id')['review_score'].mean()
product_avg_price=df.groupby(by='product_id')['product_price'].mean()
df_product_calc=pd.concat([product_scored,product_avg_price],axis=1)
df_product_calc=df_product_calc.reset_index()
df_product_calc=df_product_calc.rename(columns={'review_score':'score_product_avg','product_price':'product_price_avg'})
#Create Seller Score
seller_scored=df.groupby(by='seller_id')['review_score'].mean()
df_seller_scored=pd.DataFrame(data=seller_scored)
df_seller_scored=df_seller_scored.reset_index()
df_seller_scored=df_seller_scored.rename(columns={'review_score':'seller_score_avg'})
#Merge new Features to major dataset
df=df.merge(df_product_calc, on='product_id')
df=df.merge(df_seller_scored, on='seller_id')
#Show all nan_values
#sns.heatmap(df.isnull(),yticklabels=False,cbar=False,cmap='terrain')
dfnull=df[df.product_name_lenght=='nan']
## Dimensions Reduction - PCA and Feature Selection
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
X=df[['product_price','freight_value','product_name_lenght','product_description_lenght','product_photos_qty',
'product_weight_g','product_length_cm','product_height_cm','product_width_cm','score_product_avg','product_price_avg','seller_score_avg','review_score']]
X=X.dropna()
y = X['review_score']
X=X.drop(['review_score'],axis=1)
chi2_selector = SelectKBest(chi2, k='all').fit_transform(X, y)
scores = chi2_selector.scores_
df_feat=pd.DataFrame(X_new)
df_feat['reviews'] = y
df_feat=df_feat.rename(columns= {0:'feat1',1:'feat2',2:'feat3',3:'feat4'})
#sns.lmplot(x='feat1',y='feat2',data=df_feat,hue='reviews',palette='cubehelix',#hue: Trennung von kategorischen Parameter
# #markers=['o','v'], #Veränderung der Symbole
# scatter_kws={'s':10}) #scatter_kws: Definition von der Göße der Symbole
## Train Test Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df_feat[['feat1','feat2','feat3','feat4']], y, test_size=0.30, random_state=101) #Testsize (70%Training,30%Test), Random_state= Startwert für zufällige Datenauswahl
#Decision Tree
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier()
dtree.fit(X_train,y_train)
predictions_trees = dtree.predict(X_test)
from sklearn.metrics import classification_report,confusion_matrix
matrix_trees= confusion_matrix(y_test,predictions_trees)
report_trees= classification_report(y_test,predictions_trees)
# Suport Vector Machine
from sklearn.svm import SVC
svc_model = SVC() #Instanzieren des Algorithmus
svc_model.fit(X_train,y_train) #Training
predictions_svm = svc_model.predict(X_test) # Vorhersage
# Gridsearch - siehe Beispiel SVM
# Die richtigen Parameter zu finden (wie C oder Gamma Werte) ist etwas knifflig.
# Glücklicherweise können wir ein bisschen "faul" sein und eine Kombination verschiedener Varianten testen und sehen was am besten funktioniert.
# Die Idee ein "Grid" an Parametern zu erstellen und einfach auszuprobieren wird "Gridsearch" genannt.
# Da diese Methode recht üblich ist bietet SciKit Learn eine eingebaute Funktion namens GridSearchCV.
# Dabei steht das CV für "Cross Validation". Und dies wiederum bedeutet, dass GridSearchCV ein Dictionary verwendet, das die Parameter beschreibt, die getestet werden sollen, und ein Modell, das es zu trainieren gilt
#Decision Tree
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier()
dtree.fit(X_train,y_train)
predictions_trees = dtree.predict(X_test)
#Random Forrest
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=10)
rfc.fit(X_train, y_train)
predictions_r_forest = rfc.predict(X_test)
#KNN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=4,algorithm='auto',metric='euclidean') # Auswahl der Paramater für das Modell: Anzahl Nearst neighbor, Algorithmus(auto,kd_tree etc) Metric(euclidean distance, manhattan etc)
knn.fit(X_train,y_train)
predictions_knn =knn.predict(X_test)
## Auswertungen
#Classifaction Metriken
from sklearn.metrics import classification_report,confusion_matrix
matrix_svm= confusion_matrix(y_test,predictions_svm)
report_svm= classification_report(y_test,predictions_svm)
matrix_trees= confusion_matrix(y_test,predictions_trees)
report_trees= classification_report(y_test,predictions_trees)
matrix_r_forest= confusion_matrix(y_test,predictions_r_forest)
report_r_forest= classification_report(y_test,predictions_r_forest)
matrix_knn= confusion_matrix(y_test,predictions_knn)
report_knn= classification_report(y_test,predictions_knn)
| ThePeziBear/MyPythonLibrary | Masterthesis/Olist/Data_Manipulation_V2.py | Data_Manipulation_V2.py | py | 6,260 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"... |
37303345220 | # Superposition of 2 spirals
import tkinter
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.figure import Figure
import matplotlib.animation as animation
import numpy as np
from matplotlib.patches import Circle
import mpl_toolkits.mplot3d.art3d as art3d
def change_k1(value):
global k1
k1 = float(value)
def change_k2(value):
global k2
k2 = float(value)
def change_o1(value):
global omega1
omega1 = float(value)
def change_o2(value):
global omega2
omega2 = float(value)
def change_a1(value):
global a1
a1 = float(value)
def change_a2(value):
global a2
a2 = float(value)
def reset_parameter():
global k1, k2, omega1, omega2, a1, a2
k1 = 1.
k2 = 1.
omega1 = omega_max
omega2 = omega_min
a1 = a_max
a2 = a_max
# Initial value of spinbox
var_a1.set(a1)
var_k1.set(k1)
var_o1.set(omega1)
var_a2.set(a2)
var_k2.set(k2)
var_o2.set(omega2)
def set_axis():
ax1.set_xlim(x_min, x_max)
ax1.set_ylim(y_min, y_max)
ax1.set_zlim(z_min, z_max)
ax1.set_title('2 spirals ')
ax1.set_xlabel('x * pi')
ax1.set_ylabel('y')
# ax1.set_zlabel('z * i')
ax1.grid()
ax2.set_xlim(x_min, x_max)
ax2.set_ylim(y_min, y_max)
ax2.set_zlim(z_min, z_max)
ax2.set_title('Superposed spiral')
ax2.set_xlabel('x * pi')
ax2.set_ylabel('y')
ax2.set_zlabel('z * i')
ax2.grid()
def update(f):
ax1.cla() # Clear ax
ax2.cla() # Clear ax
set_axis()
ax1.text(x_min, y_max, z_max * 1.3, "Step(as t)=" + str(f))
if k1 != 0:
vp1 = omega1 / k1
ax1.text(x_min, y_max, z_max * 1.0, "Phase velocity1(omega1/k1)=" + str(vp1))
if k2 != 0:
vp2 = omega2 / k2
ax1.text(x_min, y_max, z_max * 0.7, "Phase velocity2(omega2/k2)=" + str(vp2))
if (k1 - k2) != 0:
vg = (omega1 - omega2) / (k1 - k2)
ax2.text(x_min, y_max, z_max * 1.3, "Group velocity(d_omega/dk)")
ax2.text(x_min, y_max, z_max * 1.0, "=(omega1-omega2)/(k1-k2)=" + str(vg))
else:
ax2.text(x_min, y_max, z_max * 1.3, "None group velocity")
# Draw a circle
c1 = Circle((0, 0), 1, ec='gray', fill=False)
ax1.add_patch(c1)
art3d.pathpatch_2d_to_3d(c1, z=0, zdir="x")
c2 = Circle((0, 0), 1, ec='gray', fill=False)
ax2.add_patch(c2)
art3d.pathpatch_2d_to_3d(c2, z=0, zdir="x")
# Draw a center line
line1 = art3d.Line3D([x_min, x_max], [0, 0], [0, 0], color='gray', ls="-.", linewidth=1)
ax1.add_line(line1)
line2 = art3d.Line3D([x_min, x_max], [0, 0], [0, 0], color='gray', ls="-.", linewidth=1)
ax2.add_line(line2)
# Draw sine wave and superposed
y1 = a1 * np.sin((k1 * x - omega1 * f) * np.pi) # Note: math.pi for adjustment x axis as x * pi
ax1.plot(x, y1, z_min, color='gray', ls="-", linewidth=1)
y2 = a2 * np.sin((k2 * x - omega2 * f) * np.pi) # Note: math.pi for adjustment x axis as x * pi
ax1.plot(x, y2, z_min, color='gray', ls="--", linewidth=1)
ax2.plot(x, y1 + y2, z_min, color='gray', ls="-", linewidth=1)
# Draw cosine wave and superposed
y = x * 0. + y_max
z1 = a1 * np.cos((k1 * x - omega1 * f) * np.pi) # Note: math.pi for adjustment x axis as x * pi
ax1.plot(x, y, z1, color='gray', ls="-", linewidth=1)
z2 = a2 * np.cos((k2 * x - omega2 * f) * np.pi) # Note: math.pi for adjustment x axis as x * pi
ax1.plot(x, y, z2, color='gray', ls="--", linewidth=1)
ax2.plot(x, y, z1 + z2, color='gray', ls="-", linewidth=1)
# Draw additional lines
inter = abs(x_max - x_min) / num_additional_lines
for i in range(num_additional_lines):
xx = i * inter
yy1 = a1 * np.sin((k1 * xx - omega1 * f) * np.pi) # Note: math.pi for adjustment x axis as x * pi
zz1 = a1 * np.cos((k1 * xx - omega1 * f) * np.pi)
yy2 = a2 * np.sin((k2 * xx - omega2 * f) * np.pi) # Note: math.pi for adjustment x axis as x * pi
zz2 = a2 * np.cos((k2 * xx - omega2 * f) * np.pi)
line = art3d.Line3D([xx, xx], [0, yy1 + yy2], [0, zz1 + zz2], color='gray', ls="--", linewidth=1)
ax2.add_line(line)
# Draw spiral
y1 = a1 * np.sin((k1 * x - omega1 * f) * np.pi) # Note: math.pi for adjustment x axis as x * pi
z1 = a1 * np.cos((k1 * x - omega1 * f) * np.pi)
ax1.plot(x, y1, z1, label="A1=" + str(a1) + ", k1=" + str(k1) + ", omega1=" + str(omega1))
y2 = a2 * np.sin((k2 * x - omega2 * f) * np.pi) # Note: math.pi for adjustment x axis as x * pi
z2 = a2 * np.cos((k2 * x - omega2 * f) * np.pi)
ax1.plot(x, y2, z2, label="A2=" + str(a2) + ", k2=" + str(k2) + ", omega2=" + str(omega2))
ax1.legend(prop={"size": 8}, loc="best")
# Draw superposed
ax2.plot(x, y1 + y2, z1 + z2)
# Global variables
x_min = 0.
x_max = 10.
y_min = -2.
y_max = 2.
z_min = -2.
z_max = 2.
num_additional_lines = 100
# Parameter of spiral
k1 = 1.
k2 = 1.
k_min = 0.
k_max = 20.
k_step = 1.
omega1 = 0.1
omega2 = - 0.1
omega_min = -0.25
omega_max = 0.25
omega_step = 0.01
a1 = 1.
a2 = 1.
a_min = 0.
a_max = 1.
a_step = 0.1
# Generate arrange
x = np.arange(x_min, x_max, 0.005)
# Generate tkinter
root = tkinter.Tk()
root.title("Spiral")
# Generate figure and axes
fig = Figure(figsize=(10, 6))
ax1 = fig.add_subplot(121, projection='3d')
ax1.set_box_aspect((2, 1, 1))
ax2 = fig.add_subplot(122, projection='3d')
ax2.set_box_aspect((2, 1, 1))
# Embed a figure in canvas
canvas = FigureCanvasTkAgg(fig, root)
canvas.get_tk_widget().pack()
# Animation
anim = animation.FuncAnimation(fig, update, interval=50)
# Toolbar
toolbar = NavigationToolbar2Tk(canvas, root)
canvas.get_tk_widget().pack()
# Label and spinbox for a1
label_a1 = tkinter.Label(root, text="A1")
label_a1.pack(side='left')
var_a1 = tkinter.StringVar(root) # variable for spinbox-value
var_a1.set(a1) # Initial value
s_a1 = tkinter.Spinbox(root, textvariable=var_a1, format="%.1f", from_=a_min, to=a_max, increment=a_step, command=lambda: change_a1(var_a1.get()), width=5)
s_a1.pack(side='left')
# Label and spinbox for k1
label_k1 = tkinter.Label(root, text="k1")
label_k1.pack(side='left')
var_k1 = tkinter.StringVar(root) # variable for spinbox-value
var_k1.set(k1) # Initial value
s_k1 = tkinter.Spinbox(root, textvariable=var_k1, format="%.1f", from_=k_min, to=k_max, increment=k_step, command=lambda: change_k1(var_k1.get()), width=5)
s_k1.pack(side='left')
# Label and spinbox for omega1
label_o1 = tkinter.Label(root, text="omega1")
label_o1.pack(side='left')
var_o1 = tkinter.StringVar(root) # variable for spinbox-value
var_o1.set(omega1) # Initial value
s_o1 = tkinter.Spinbox(root, textvariable=var_o1, format="%.2f", from_=omega_min, to=omega_max, increment=omega_step, command=lambda: change_o1(var_o1.get()), width=5)
s_o1.pack(side='left')
# Label and spinbox for a2
label_a2 = tkinter.Label(root, text=", A2")
label_a2.pack(side='left')
var_a2 = tkinter.StringVar(root) # variable for spinbox-value
var_a2.set(a2) # Initial value
s_a2 = tkinter.Spinbox(root, textvariable=var_a2, format="%.1f", from_=a_min, to=a_max, increment=a_step, command=lambda: change_a2(var_a2.get()), width=5)
s_a2.pack(side='left')
# Label and spinbox for k2
label_k2 = tkinter.Label(root, text="k2")
label_k2.pack(side='left')
var_k2 = tkinter.StringVar(root) # variable for spinbox-value
var_k2.set(k1) # Initial value
s_k2 = tkinter.Spinbox(root, textvariable=var_k2, format="%.1f", from_=k_min, to=k_max, increment=k_step, command=lambda: change_k2(var_k2.get()), width=5)
s_k2.pack(side='left')
# Label and spinbox for omega2
label_o2 = tkinter.Label(root, text="omega2")
label_o2.pack(side='left')
var_o2 = tkinter.StringVar(root) # variable for spinbox-value
var_o2.set(omega2) # Initial value
s_o2 = tkinter.Spinbox(root, textvariable=var_o2, format="%.2f", from_=omega_min, to=omega_max, increment=omega_step, command=lambda: change_o2(var_o2.get()), width=5)
s_o2.pack(side='left')
# Reset button
b_reset = tkinter.Button(root, text="Reset", command=reset_parameter)
b_reset.pack(side='left')
# main loop
set_axis()
tkinter.mainloop()
| marukatsutech/superposition_of_2_spirals | superposition_of_2_spirals.py | superposition_of_2_spirals.py | py | 8,121 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.patches.Circle",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "mpl_toolkits.mplot3d.art3d",
"line_number": 98,
"usage_type": "na... |
3828632722 | from collections import Counter
from data import SentimentDataset
import json
class Preprocessor:
def __init__(self, max_vocab):
self.max_vocab = max_vocab
self.vocab2enc = None
self.enc2vocab = None
self.max_len = 0
def fit(self, dataset):
words = list()
for i in range(len(dataset)):
item = dataset.getitem(i)
if item[1] is not None:
words.extend(item[1].split(' '))
vocab = Counter(words).most_common(self.max_vocab)
self.vocab2enc = {word: i+1 for i, (word, _) in enumerate(vocab)}
self.enc2vocab = {i+1: word for i, (word, _) in enumerate(vocab)}
self.enc2vocab[0] = ''
self.enc2vocab[self.max_vocab+2] = 'OOV'
def encode(self, dataset):
encoded = list()
for i in range(len(dataset)):
item = dataset.getitem(i)
encoding = list()
for word in item[1].split(' '):
encoding.append(self.vocab2enc.get(word, self.max_vocab+2))
encoded.append(list([item[0], encoding]))
return SentimentDataset(data=encoded, data_from_file=False)
def decode(self, dataset):
encoded = list()
for i in range(len(dataset)):
item = dataset.getitem(i)
encoding = list()
for word in item[1]:
encoding.append(self.enc2vocab.get(word, 'NAN'))
encoded.append(list([item[0], ' '.join(encoding).strip()]))
return SentimentDataset(data=encoded, data_from_file=False)
def pad(self, dataset):
for i in range(len(dataset)):
item = dataset.getitem(i)
if len(item[1]) > self.max_len:
self.max_len = len(item[1])
padded_data = list()
for i in range(len(dataset)):
item = dataset.getitem(i)
padded_data.append([item[0], item[1].extend([0 for _ in range(self.max_len-len(item[1]))])])
return SentimentDataset(data=padded_data, data_from_file=False)
def transform(self, dataset):
dataset = self.encode(dataset)
self.pad(dataset)
return dataset
def fit_transform(self, dataset):
self.fit(dataset)
return self.transform(dataset)
def save(self, file_name='./prepro_vocab.json'):
with open(file_name, 'w') as f_out:
json.dump({
'vocab2enc': self.vocab2enc,
'enc2vocab': self.enc2vocab,
'max_len': self.max_len,
}, f_out)
def load(self, file_name='./prepro_vocab.json'):
with open(file_name, 'r') as f_in:
data = json.load(f_in)
self.vocab2enc = data['vocab2enc']
self.enc2vocab = data['enc2vocab']
self.max_len = data['max_len']
# if __name__ == '__main__':
# p = Preprocessor(500)
# s = SentimentDataset(data='./train.csv')
# p.fit(s)
#
# s_e = p.encode(s)
# p.pad(s_e)
# s_d = p.decode(s_e)
#
# idx = 2
# print(s.getitem(idx))
# print(s_e.getitem(idx))
# print(s_d.getitem(idx))
| yuvalofek/NLP | DeepLearning/prepro.py | prepro.py | py | 3,090 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.Counter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "data.SentimentDataset",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "data.SentimentDataset",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "data.... |
27984628392 | from scipy.io.wavfile import read, write
import io
import matplotlib.pyplot as plt
## This may look a bit intricate/useless, considering the fact that scipy's read() and write() function already return a
## numpy ndarray, but the BytesIO "hack" may be useful in case you get the wav not through a file, but trough some websocket or
## HTTP Post request. This should obviously work with any other sound format, as long as you have the proper decoding function
with open("gn.wav", "rb") as wavfile:
input_wav = wavfile.read()
# here, input_wav is a bytes object representing the wav object
rate, data = read(io.BytesIO(input_wav))
# data is a numpy ND array representing the audio data. Let's do some stuff with it
reversed_data = data[::-1] #reversing it
print(reversed_data.shape)
plt.plot(reversed_data)
plt.show()
#then, let's save it to a BytesIO object, which is a buffer for bytes object
bytes_wav = bytes()
byte_io = io.BytesIO(bytes_wav)
write(byte_io, rate, reversed_data)
output_wav = byte_io.read() | Hrithik0the0research/gan-discrimator | gan/synthetic-data-generator-main/audio_read.py | audio_read.py | py | 1,017 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scipy.io.wavfile.read",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.py... |
29867399693 | import os
import csv
import sqlite3
DATA_DIR="data"
DATABASE="database.db"
sensors = {
"dht22": {
"table": "temperaturUndLuftdruck",
"mapping": {
"sensor_id": "SensorID",
"timestamp": "datetime",
"humidity": "luftwert",
"temperature": "tempwert"
}
},
"sds011": {
"table": "feinstaubsensor",
"mapping": {
"sensor_id": "SensorID",
"timestamp": "datetime",
"P1": "p1wert",
"P2": "p2wert"
}
}
}
def main():
if not os.path.isdir(DATA_DIR):
print("Daten wurden nicht heruntergeladen")
return
count = 0
con = sqlite3.connect(DATABASE)
for sensor, conf in sensors.items():
table = conf["table"]
mapping = conf["mapping"]
mapping_count = len(mapping)
with open("schema/{}.sql".format(table), "r") as schema:
schema = schema.read()
cur = con.cursor()
try:
cur.executescript(schema)
except sqlite3.OperationalError:
pass
TABLE_PLACEHOLDERS = ", ".join(mapping.values())
VALUE_PLACEHOLDER = ", ".join([":{}".format(key) for key in mapping.keys()])
QUERY = """
INSERT OR IGNORE
INTO {0}({1})
VALUES ({2})
""".format(table, TABLE_PLACEHOLDERS, VALUE_PLACEHOLDER)
for root, dirs, files in os.walk("{}/{}".format(DATA_DIR, sensor)):
for name in files:
if not name.endswith(".csv"):
continue
full_name = "{}/{}".format(root, name)
with open(full_name, "r") as raw_data:
data = csv.DictReader(raw_data, delimiter=";")
data = list(data)
cur = con.cursor()
cur.executemany(QUERY, data)
con.commit()
con.close()
if __name__ == "__main__":
main() | Jan200101/feinstaub-projekt | import.py | import.py | py | 1,971 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.isdir",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sqlite3.OperationalError",... |
6246195016 | def set_font():
import platform
import matplotlib.font_manager as fm
system_name = platform.system()
if system_name == 'Windows':
return 'Malgun Gothic'
elif system_name == 'Darwin':
return 'AppleGothic'
elif system_name == 'Linux':
path = '/usr/share/font/truetype/nanum/NanumMyeongjo.ttf'
font_name = fm.FontProperties(fname=path, size=12)
return font_name
if __name__ == "__main__":
set_font()
# usage: plc.rt('font', family=set_font())
# to present minus sign: plt.rcParams['axes.unicode_minus'] = False
| cheesecat47/ML_DL_Jan2020 | Jan16/matplot_font.py | matplot_font.py | py | 588 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "platform.system",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.font_manager.FontProperties",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.font_manager",
"line_number": 12,
"usage_type": "name"
}
] |
19523144511 | from celery import shared_task
from time import sleep
from .models import Movie
@shared_task
def increase_ranking():
# increase_ranking: This task increases the ranking of upcoming movies by 10 every 5 minutes. It gets a list of
# all upcoming movies from the database, iterates over them, and adds 10 to each movie's ranking field. The
# changes are saved back to the database.
upcoming_movies = Movie.objects.filter(status='upcoming')
for movie in upcoming_movies:
movie.ranking += 10
movie.save()
@shared_task
def add(x, y):
# add: This task simulates a long-running task by sleeping for 5 seconds, and then returns the sum of two numbers
# x and y. This task is used as an example to demonstrate how to define a simple task with Celery.
sleep(5) # Simulate a long-running task
return x + y
| Optimustprime/cinema_program | app/movies/tasks.py | tasks.py | py | 853 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.Movie.objects.filter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.Movie.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.Movie",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "ce... |
12634573747 | import pygame
import sys
from snake_object import Snake
from setting import Setting
from apple import Apple
def press_keydown(snake, event):
if event.key == pygame.K_LEFT:
snake.go_x = -1
snake.go_y = 0
elif event.key == pygame.K_RIGHT:
snake.go_x = 1
snake.go_y = 0
elif event.key == pygame.K_UP:
snake.go_x = 0
snake.go_y = -1
elif event.key == pygame.K_DOWN:
snake.go_x = 0
snake.go_y = 1
elif event.key == pygame.K_q:
sys.exit()
elif event.key == pygame.K_SPACE:
snake.del_tail=False
def show_apple(sn_setting):
for apple in sn_setting.mass_apple:
apple.blitme()
def add_apple(sn_setting,screen):
sn_setting.new_apple += 1
if sn_setting.new_apple == sn_setting.speed_apple:
sn_setting.new_apple = 0
new_apple = Apple(sn_setting, screen)
sn_setting.mass_apple.append(new_apple)
def play_game():
pygame.init()
sn_setting = Setting()
screen = pygame.display.set_mode((sn_setting.screen_width, sn_setting.screen_heigth))
pygame.display.set_caption("Snake ))")
snake = Snake(sn_setting)
new_apple = Apple(sn_setting, screen)
sn_setting.mass_apple.append(new_apple)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
press_keydown(snake, event)
screen.fill(sn_setting.bg_color)
snake.update_snake()
add_apple(sn_setting,screen)
snake.test_tail()
snake.eat_apple()
snake.flip_tail(screen)
snake.flip_head(screen)
show_apple(sn_setting)
pygame.display.flip()
play_game()
| BarSerhey/Python | snake/snake.py | snake.py | py | 1,770 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.K_LEFT",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RIGHT",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_UP",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_DOWN"... |
24276436490 | #!/usr/bin/env python3
import os
import requests
from fmiopendata.wfs import download_stored_query
import datetime
import json
import pandas as pd
import json
def give_prediction(stationId, month, day, hour):
place = "latlon=60.3267,24.95675" # default place is Veromiehenkylä
stationShortCode = ''
weather_area = 0
station_name = ''
with open("utils/stations.json", 'r', encoding="utf-8") as f:
stations = json.load(f)
for station in stations:
if station['stationShortCodeCategory'] == stationId:
stationShortCode = station['stationShortCode']
station_name = station['stationName']
break
if stationShortCode != '':
with open("utils/weather_stations.json", 'r', encoding="utf-8") as f:
weather_stations = json.load(f)
weather_area = weather_stations.get(stationShortCode)
with open("utils/weather-locations.json", 'r', encoding="utf-8") as f:
places = json.load(f)
place = places.get(str(weather_area))['latlon']
now = datetime.datetime.utcnow()
end_time = datetime.datetime(now.year, month, day, hour)
start_time = end_time - datetime.timedelta(hours=1)
# Convert times to properly formatted strings
start_time = start_time.isoformat(timespec="seconds") + "Z"
# -> 2020-07-07T12:00:00Z
end_time = end_time.isoformat(timespec="seconds") + "Z"
# -> 2020-07-07T13:00:00Z
obs = download_stored_query("fmi::forecast::hirlam::surface::point::multipointcoverage",
args=["starttime=" + start_time, "endtime=" + end_time, place])
print(obs.location_metadata)
print(obs.data)
time_of_day = max(obs.data.keys())
print('timestamp', time_of_day)
weather_station = list(obs.data[time_of_day].keys())[0]
print(weather_station)
data = obs.data[time_of_day][weather_station]
rain = data['Precipitation amount 1 hour']['value']
celcius = data['Air temperature']['value']
windGustSpeed = data['Wind gust']['value']
windSpeed = data['Wind speed']['value']
weather = [rain, celcius, windGustSpeed, windSpeed, station_name, weather_station, time_of_day, weather_area]
return weather
| millalin/Train-predictor | application/helpers/weather_for_model.py | weather_for_model.py | py | 2,255 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_nu... |
29841696751 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 8 16:09:50 2022
@author: Owner
"""
from Hamiltonian import Hamiltonian
import numpy as np
import math
import matplotlib.pyplot as plt
import copy
from Hamiltonian import Hamiltonian
from Fock_vector import fock_vector
import Ryser_Algorithm as ryser
import config as configs
from numpy import linalg as la
from decimal import Decimal
from Fock_vector import fock_vector
from Disc_Hamiltonian import disc_Hamiltonian
params = {
'axes.labelsize': 35,
'font.size': 35,
'legend.fontsize': 35,
'lines.linewidth' : 2,
'lines.markersize' : 35,
'xtick.labelsize': 35,
'ytick.labelsize': 35,
'figure.figsize': [40, 20]
}
plt.rcParams.update(params)
def Dirac_Delta(a, b):
'''
Simple delta function
'''
if (a == b):
return 1
else:
return 0
class non_rotatingHamiltonian(Hamiltonian):
'''
Core class for impelementing Full-Configuration Interaction
Hamiltonians in bosonic Fock space basis for non-rotating pancakes
'''
def __init__(self,N, M, S, length_ratio=10, L=0):
super().__init__(N,M)
self.tolerance = 1e-10
self. L = L # Restrict total angular momentum for each Fock vector
self.S = S
assert M == 2*S + 1
self.M = M
# Set interaction energy scale to 1
self.V0 = 1
self.lengthratio = length_ratio # = (a_z/a_s: trap length/scattering length)
# Scale kinetic energy scale accordingly
self.T0 = (np.pi**2)*self.lengthratio
self.condensate_fraction = None # No. excitations in lowest SP state
self.GP_amplitude = None # Weight of Gross-Pitaevskii (fully condensed) permanent
self.GP_index = None
self.MF_perm = None # Amplitude of dominant permanent in FCI expansion
self.MF_energy = None # Energy content of dominant permanent in FCI expansion
self.MF_amplitude = None # Amplitude of Gross-Pitaevskii permanent
def generate_basis(self):
'''
Generate many-body basis states from repeated combinations
and index them
'''
print('Basis generation...')
configs.sphere_config_fast(int(self.N), int(self.M), int(self.L), int(self.S))
index = 0
file='Sphere_Configurations_N%dM%dL%dS%d.txt'%(self.N, self.M, self.L, self.S)
print('Reading in configurations...')
with open(file, 'r') as f:
for line in f:
split_line = line.split()
if (split_line[0]=='N,'):
continue
N = split_line[0]
M = split_line[1]
L = split_line[2]
S = split_line[3]
basis = []
config = split_line[4:]
#print(config)
for item in config:
#print(item)
basis.append(int(item))
#print(N, M, L, len(config), len(basis))
#print(config, basis)
#print(self.N, N, self.M, M)
assert int(N) == self.N and int(M) == self.M and int(M) == 2*(self.S) +1
vector = fock_vector(int(N), int(M), np.array(basis), int(S))
assert vector.ang_mom() == self.L
vector = fock_vector(int(N), int(M), np.array(basis), S= int(S), index=index)
self.basis.append(vector)
if (self.S in vector.occup_basis):
if (vector.occups[self.S] == self.N):
self.GP_index = index
index += 1
if (index % 100 == 0):
print('No. basis read-in ', index)
print('Basis generation complete')
print('Fock space size: ', self.fock_size)
self.basis = np.array(self.basis)
self.fock_size = index
self.many_body_H = np.zeros((self.fock_size, self.fock_size))
def matrix_overlap(self, i, j, k, l):
'''
Construct many-body overlap matrix for disc Hamiltonian
'''
self.additionalfactor = 1
if (i+j != k+l):
return 0
else:
return self.V0*self.additionalfactor
def kineticterm(self, i):
return self.T0*((i-self.S)**2)
def diag_entry(self, basis):
'''
Returns diagonal entry for contact repulsion Hamiltonian
'''
assert len(self.basis) == self.fock_size # Check if basis generation has not been invoked
diag_element = 0
occup_basis = np.sort(basis.occup_basis)
#print(basis.print_info())
#print(occup_basis)
for index in range(len(occup_basis)):
i = occup_basis[index]
diag_element += self.kineticterm(i)*basis.occups[i]
if basis.occups[i] > 1:
#print(i)
# Half factor comes from Hamiltonian definition
diag_element += 0.5*self.matrix_overlap(i, i, i, i)\
*basis.occups[i]*(basis.occups[i]-1)
# we only have to consider non-equal i, j pairs as symmetry
# gives equal elements for ijij jiij, ijji, jiji basis indices
for jndex in range(index+1, len(occup_basis)):
j = occup_basis[jndex]
#print(i, j)
diag_element += 2*self.matrix_overlap(i, j, i, j)\
*basis.occups[i]*(basis.occups[j])
return diag_element
def construct_off_diag_entries(self, basis):
off_diag_element = 0
occup_basis = np.sort(basis.occup_basis)
new_occups = np.zeros(self.M)
for index in range(len(occup_basis)):
i = occup_basis[index]
for jndex in range(index, len(occup_basis)):
j = occup_basis[jndex]
for k in range(self.M):
new_occups = np.zeros(self.M)
new_basis_index = None
l = i + j - k
if (l >= self.M or l < 0):
continue
if (k == i or k == j or l == k or l == i or l == j):
continue
if (i != j):
# Copy basis occupation
for q in basis.occup_basis:
new_occups[q] = basis.occups[q]
# Construct basis with non-zero entry
new_occups[i] = basis.occups[i] - 1
new_occups[j] = basis.occups[j] - 1
if (k in basis.occups):
new_occups[k] = basis.occups[k] + 1
else:
new_occups[k] = 1
if (l in basis.occups):
new_occups[l] = basis.occups[l] + 1
else:
new_occups[l] = 1
new_fock = fock_vector(self.N, self.M, new_occups)
new_basis_index = None
# Search newly constructed basis index
for basis2 in self.basis:
if basis2.occups == new_fock.occups:
if (basis2.index != basis.index):
new_basis_index = basis2.index
break
if (new_basis_index is None):
print('New basis not in Hamiltonian space')
print(new_fock.print_info)
self.show_basis()
assert 0
# Assign matrix element
self.many_body_H[basis.index, new_basis_index] = \
2*np.sqrt(basis.occups[i]*basis.occups[j]*new_occups[k]*new_occups[l])*self.matrix_overlap(i, j, k, l)
self.many_body_H[new_basis_index, basis.index] = self.many_body_H[basis.index, new_basis_index]
else:
if (basis.occups[i] < 2):
continue
# Construct basis with non-zero entry for i = j
for q in basis.occup_basis:
new_occups[q] = basis.occups[q]
# See Wilkin paper for angular momentum transfer rules
new_occups[i] = basis.occups[i] - 2
if (k in basis.occups):
new_occups[k] = basis.occups[k] + 1
else:
new_occups[k] = 1
if (l in basis.occups):
new_occups[l] = basis.occups[l] + 1
else:
new_occups[l] = 1
new_fock = fock_vector(self.N, self.M, new_occups)
new_basis_index = None
# Search newly constructed basis index
for basis2 in self.basis:
if basis2.occups == new_fock.occups:
if (basis2.index != basis.index):
new_basis_index = basis2.index
break
if (new_basis_index is None):
print('New basis not in Hamiltonian space')
print(new_fock.print_info)
self.show_basis()
assert 0
# Assign matrix element
self.many_body_H[basis.index, new_basis_index] = \
np.sqrt(basis.occups[i]*(basis.occups[i]-1)*new_occups[k]*new_occups[l])*self.matrix_overlap(i, j, k, l)
self.many_body_H[new_basis_index, basis.index] = self.many_body_H[basis.index, new_basis_index]
def construct_Hamiltonian_fast(self):
# Wilkin exact eigenstates paper prescription
assert len(self.basis) == self.fock_size # Check if basis generation has not been invoked
# Diagonal entries
#print(self.basis)
print('Hamiltonian construction...')
print('Fock size: ', self.fock_size)
counter = 1
for basis in self.basis:
self.many_body_H[basis.index, basis.index] = self.diag_entry(basis)
self.construct_off_diag_entries(basis)
if (counter % 100 == 0):
print('Fast Hamiltonian construction progress [%] ', (counter/self.fock_size)*100)
counter += 1
def ground_state_analysis(self):
# Index of MF permanent
#print(np.max(self.e_vector_ground.T))
#print(self.e_vector_ground[0])
max_index = np.where(max(self.e_vector_ground[0], key=abs) == self.e_vector_ground[0])[0][0]
print('max index', max_index)
self.MF_perm = self.basis[max_index]
self.MF_amplitude = self.e_vector_ground[0][max_index]
print('Mean-field permanent info')
print(self.MF_perm.print_info())
print('Amplitude: ', self.MF_amplitude)
self.GP_amplitude = self.e_vector_ground[0][self.GP_index]
#print('All permanents:', self.e_vector_ground[0])
print('Gross-Pitaevskii permanent amplitude: ', self.GP_amplitude)
#print('Permanent energy: ', self.many_body_H[max_index, max_index])
#print('MF energy / E0: ', self.many_body_H[max_index, max_index]/self.e_ground)
# Calculating condensate fraction
self.condensate_fraction = 0
for index, amplitude in enumerate(self.e_vector_ground.squeeze()):
if (self.S not in self.basis[index].occup_basis):
continue
else:
self.condensate_fraction += \
abs(amplitude)**2 * self.basis[index].occups[self.S]/self.N
print('Expected condensate fraction: ', self.condensate_fraction)
print('Condensate depletion: ', 1-self.condensate_fraction)
return self.MF_amplitude, self.GP_amplitude, self.condensate_fraction
def check_degeneracy(self):
'''
Find degeneracies within spectrum
'''
self.degen_evalues = []
self.degen_evectors = []
for i in range(len(self.evalues)):
self.degen_evalues.append([i])
self.degen_evectors.append([self.evectors.T[i]])
for j in range(i+1, len(self.evalues)):
if abs(self.evalues[i] - self.evalues[j]) <= self.tolerance:
self.degen_evalues[-1].append(j)
self.degen_evectors[-1].append(self.evectors.T[j])
degeneracy = np.zeros(len(self.evalues))
for i in range(len(self.evalues)):
degeneracy[i] = len(self.degen_evalues[i])
plt.title('Degeneracy of spectrum\n'+\
'Disc geometry\nN = %d M = %d L = %d'%(self.N, self.M, self.L))
plt.bar(x=np.arange(1, self.fock_size+1), height=degeneracy)
plt.xlabel('Sorted eigenstate index')
plt.ylabel('Degeneracy')
plt.grid()
plt.legend()
plt.savefig('BEC_Degeneracy_N%d_M%d_S%d_L%d.jpeg'%(self.N, self.M, self.S, self.L))
plt.close()
plt.title('Eigenvalue spectrum\n'+\
'Disc geometry\nN = %d M = %d L = %d'%(self.N, self.M, self.L))
nT, binsT, patchesT = plt.hist(x=self.evalues, bins=15, color='red',
alpha=0.7, rwidth=0.85, label='FCI Spectrum')
plt.xlabel('Eigenvalues [$V_0$]')
plt.ylabel('Degeneracy')
plt.legend()
plt.grid()
plt.savefig('BEC_Spectrum_N%d_M%d_S%d_L%d.jpeg'%(self.N, self.M, self.S, self.L))
plt.close()
assert (self.evalues.min() == self.evalues[0])
assert (self.fock_size == len(self.evalues))
for ground_index in range(len(self.degen_evalues[0])):
print(len(self.degen_evectors[0]), len(self.degen_evalues[0]))
assert len(self.degen_evectors[0]) == len(self.degen_evalues[0])
#print(self.degen_evectors[0][ground_index])
print(len(self.degen_evectors[0][ground_index]))
print(ground_index)
plt.figure(ground_index)
plt.title('Degenerate ground state configuration index %d \n'%(ground_index)+\
'Disc geometry\nN = %d M = %d L = %d'%(self.N, self.M, self.L))
plt.bar(x=np.arange(1, self.fock_size+1), height=self.degen_evectors[0][ground_index][0])
#nT, binsT, patchesT =\
#plt.hist(x=self.degen_evectors[0][ground_index],bins=self.fock_size, color='red',alpha=0.7, rwidth=0.85, label='Full Configuration')
plt.xlabel('Many-body basis index')
plt.ylabel('Amplitude')
plt.grid()
plt.legend()
plt.savefig('BEC_Ground_Config_i%d_N%d_M%d_S%d_L%d.jpeg'%(ground_index, self.N, self.M,self.S, self.L))
plt.close()
#print('Degenerate evector indices')
#print(self.degen_evalues)
#print(self.degen_evectors)
'''
H = non_rotatingHamiltonian(N=3,S=1,M=3)
H.generate_basis()
H.construct_Hamiltonian_fast()
#H.print_matrix(H.many_body_H)
evalues, evecs = H.diagonalise()
print('Hamiltonian eigenvalues [V0]')
print(evalues)
print('Ground state energy [V0] ', H.e_ground)
print('Ground state configuration', H.e_vector_ground)
#H.show_basis()
MF_amp, GP_amp = H.ground_state_analysis()
print(MF_amp, GP_amp)
#H.check_sign_problem()
H.check_degeneracy()
'''
| ahadriaz99/MSci-Project | NonRotatingDisc.py | NonRotatingDisc.py | py | 16,186 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.rcParams.update",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
... |
17419658730 | import json
import os
from pyui.geom import Size
from .base import View
DATA_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "data"
)
class Text(View):
def __init__(self, text, **options):
super().__init__(**options)
self.text = str(text)
self._min_cache = None
self._width_cache = None
self._size_cache = None
self._line_cache = None
def reuse(self, other):
return self.text == other.text and self._font == other._font
@property
def _font(self):
return self.env.theme.font(self.env.font, self.env.font_size)
def minimum_size(self):
if self._min_cache is None:
self._min_cache = self._font.measure(self.text)
return self._min_cache
def content_size(self, available: Size):
if self._size_cache is None or self._width_cache != available.w:
self._width_cache = available.w
self._size_cache = self._font.measure(self.text, width=available.w)
self._line_cache = None
return self._size_cache
def draw(self, renderer, rect):
super().draw(renderer, rect)
self._line_cache = self._font.draw(
renderer, self.text, rect, self.env.blended_color, lines=self._line_cache
)
class Icon(Text):
data = json.load(open(os.path.join(DATA_DIR, "icons.json")))
def __init__(self, name, style=None, size=None):
info = Icon.data["icons"][name]
fontname = "{}/{}.otf".format(Icon.data["font"], style or info["sets"][0])
super().__init__(info["text"])
self.font(fontname, size)
| dcwatson/pyui | pyui/views/text.py | text.py | py | 1,649 | python | en | code | 21 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9... |
74918974586 | from dataclasses import dataclass
from collections import defaultdict
import math
@dataclass
class Punto:
x: int
y: int
owner: int
def distancia(p1,p2):
return abs(p1.x-p2.x)+abs(p1.y-p2.y)
def day6(file):
with open(file) as f:
lines = f.readlines()
puntosControl = list()
xlist = list()
ylist = list()
for i,line in enumerate(lines):
l = line.split(",")
xlist.append(int(l[0]))
ylist.append(int(l[1]))
puntosControl.append(Punto(x=int(l[0]),y=int(l[1]),owner=i))
esquinaSuperiorIzquierda = Punto(x=min(xlist),y=min(ylist),owner=-1)
esquinaInferiorDerecha = Punto(x=max(xlist),y=max(ylist),owner=-1)
# Los que están fuera del rango esquinaSuperiorIzquierdaxesquinaInferiorDerecha se excluyen automáticamente
excluidos = set()
world = defaultdict(lambda: -1)
#world_total = set()
world_total = 0
for i in range(esquinaSuperiorIzquierda.x-1,esquinaInferiorDerecha.x+2):
for j in range(esquinaSuperiorIzquierda.y-1,esquinaInferiorDerecha.y+2):
punto = Punto(x=i,y=j,owner=-1)
distanciaMin = math.inf
total = 0
for p in puntosControl:
if distancia(punto,p) == distanciaMin:
punto.owner = -1
if distancia(punto,p) < distanciaMin:
distanciaMin = distancia(punto,p)
punto.owner = p.owner
total += distancia(punto,p)
if total < 10000:
world_total += 1
#world_total.add((i,j))
if i == esquinaSuperiorIzquierda.x-1 or i == esquinaInferiorDerecha.x+1 or j == esquinaSuperiorIzquierda.y-1 or j == esquinaInferiorDerecha.y+1:
excluidos.add(punto.owner)
if punto.owner > -1:
world[(i,j)] = punto.owner
conteo = defaultdict(lambda: 0)
for p in world:
if not world[p] in excluidos:
conteo[world[p]] += 1
max_finite_area = max(conteo.values())
region_size = world_total
return max_finite_area,region_size
| aarroyoc/advent-of-code-2018 | python/day6/day6.py | day6.py | py | 2,152 | python | es | code | 1 | github-code | 6 | [
{
"api_name": "dataclasses.dataclass",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "math.inf",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "collection... |
19072626002 | import requests
import json
import logging
import sys
import json
import pandas as pd
from pathlib import Path
from requests_html import HTMLSession
def parse_and_download_files(servicetags_public, msftpublic_ips, officeworldwide_ips):
# URL for Feeds
azurepublic = "https://www.microsoft.com/en-us/download/confirmation.aspx?id=56519"
msftpublic = "https://www.microsoft.com/en-us/download/confirmation.aspx?id=53602"
officeworldwide = "https://endpoints.office.com/endpoints/worldwide?clientrequestid=b10c5ed1-bad1-445f-b386-b919946339a7"
session = HTMLSession()
azure_resp = session.get(azurepublic)
links = azure_resp.html.links
json_link = [link for link in links if ".json" in link]
msft_resp = session.get(msftpublic)
links = msft_resp.html.links
csv_link = [link for link in links if ".csv" in link]
# Download JSON link
azure_json = requests.get(json_link[0])
msft_csv = requests.get(csv_link[0], stream=True)
o365_json = requests.get(officeworldwide, stream=True)
# Write output file
logging.info("Writing ServiceTags_Public.json file to output directory")
with open(servicetags_public, "w") as f:
json.dump(azure_json.json(), f, indent=4)
logging.info("Writing MSFT_PublicIPs.csv file to output directory")
with open(msftpublic_ips, "wb") as f:
for line in msft_csv.iter_lines():
f.write(line + "\n".encode())
logging.info("Writing OfficeWorldWide-IPRanges.json file to output directory")
with open(officeworldwide_ips, "w") as f:
json.dump(o365_json.json(), f, indent=4)
def main():
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format="%(asctime)s:%(levelname)s: %(message)s",
)
curr_path = Path.cwd()
out_path = curr_path / "master" / "PublicFeeds" / "MSFTIPRanges"
try:
out_path.mkdir(parents=True, exist_ok=False)
except FileExistsError:
logging.info("Folder is already present")
else:
logging.info(f"{out_path} Folder was created")
servicetags_public = (
curr_path
/ "master"
/ "PublicFeeds"
/ "MSFTIPRanges"
/ "ServiceTags_Public.json"
)
msftpublic_ips = (
curr_path / "master" / "PublicFeeds" / "MSFTIPRanges" / "MSFT_PublicIPs.csv"
)
officeworldwide_ips = (
curr_path
/ "master"
/ "PublicFeeds"
/ "MSFTIPRanges"
/ "OfficeWorldWide-IPRanges.json"
)
logging.info(f"Writing json file to output directory : {servicetags_public}")
logging.info(f"Writing csv file to output directory : {msftpublic_ips}")
logging.info(f"Writing json file to output directory : {officeworldwide_ips}")
parse_and_download_files(servicetags_public, msftpublic_ips, officeworldwide_ips)
if __name__ == "__main__":
main()
| microsoft/mstic | .script/get-msftpubliip-servicetags.py | get-msftpubliip-servicetags.py | py | 2,873 | python | en | code | 87 | github-code | 6 | [
{
"api_name": "requests_html.HTMLSession",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "requests.get",
... |
73252316348 | """"
Для заданного набора N точек на плоскости найти прямоугольник минимальной площади,
содержащий все указанные точки.
Стороны прямоугольника не обязаны быть параллельными координатным осям
"""
# important functions: MinimumBoundingBox
from scipy.spatial import ConvexHull
from math import sqrt,atan2
import numpy as np
from math import atan2, cos, sin, pi
from collections import namedtuple
import matplotlib.pyplot as plt
from min_square import Point
def unit_vector(pt0, pt1):
dis_0_to_1 = sqrt((pt0[0] - pt1[0])**2 + (pt0[1] - pt1[1])**2)
return (pt1[0] - pt0[0]) / dis_0_to_1, \
(pt1[1] - pt0[1]) / dis_0_to_1
def orthogonal_vector(vector):
return -1 * vector[1], vector[0]
def bounding_area(index, hull):
unit_vector_p = unit_vector(hull[index], hull[index+1])
unit_vector_o = orthogonal_vector(unit_vector_p)
dis_p = tuple(np.dot(unit_vector_p, pt) for pt in hull)
dis_o = tuple(np.dot(unit_vector_o, pt) for pt in hull)
min_p = min(dis_p)
min_o = min(dis_o)
len_p = max(dis_p) - min_p
len_o = max(dis_o) - min_o
return {'area': len_p * len_o,
'length_parallel': len_p,
'length_orthogonal': len_o,
'rectangle_center': (min_p + len_p / 2, min_o + len_o / 2),
'unit_vector': unit_vector_p,
}
def to_xy_coordinates(unit_vector_angle, point):
angle_orthogonal = unit_vector_angle + pi / 2
return point[0] * cos(unit_vector_angle) + point[1] * cos(angle_orthogonal), \
point[0] * sin(unit_vector_angle) + point[1] * sin(angle_orthogonal)
def rotate_points(center_of_rotation, angle, points):
rot_points = []
ang = []
for pt in points:
diff = tuple([pt[d] - center_of_rotation[d] for d in range(2)])
diff_angle = atan2(diff[1], diff[0]) + angle
ang.append(diff_angle)
diff_length = sqrt(sum([d**2 for d in diff]))
rot_points.append((center_of_rotation[0] + diff_length * cos(diff_angle),
center_of_rotation[1] + diff_length * sin(diff_angle)))
return rot_points
def rectangle_corners(rectangle):
corner_points = []
for i1 in (.5, -.5):
for i2 in (i1, -1 * i1):
corner_points.append((rectangle['rectangle_center'][0] + i1 * rectangle['length_parallel'],
rectangle['rectangle_center'][1] + i2 * rectangle['length_orthogonal']))
return rotate_points(rectangle['rectangle_center'], rectangle['unit_vector_angle'], corner_points)
BoundingBox = namedtuple('BoundingBox', ('area',
'length_parallel',
'length_orthogonal',
'rectangle_center',
'unit_vector',
'unit_vector_angle',
'corner_points'
)
)
# use this function to find the listed properties of the minimum bounding box of a point cloud
def MinimumBoundingBox(points):
# Requires: points to be a list or tuple of 2D points. ex: ((5, 2), (3, 4), (6, 8))
# needs to be more than 2 points
# Effects: returns a namedtuple that contains:
# area: area of the rectangle
# length_parallel: length of the side that is parallel to unit_vector
# length_orthogonal: length of the side that is orthogonal to unit_vector
# rectangle_center: coordinates of the rectangle center
# (use rectangle_corners to get the corner points of the rectangle)
# unit_vector: direction of the length_parallel side. RADIANS
# (it's orthogonal vector can be found with the orthogonal_vector function
# unit_vector_angle: angle of the unit vector
# corner_points: set that contains the corners of the rectangle
if len(points) <= 2: raise ValueError('More than two points required.')
hull_ordered = [points[index] for index in ConvexHull(points).vertices]
hull_ordered.append(hull_ordered[0])
hull_ordered = tuple(hull_ordered)
min_rectangle = bounding_area(0, hull_ordered)
for i in range(1, len(hull_ordered)-1):
rectangle = bounding_area(i, hull_ordered)
if rectangle['area'] < min_rectangle['area']:
min_rectangle = rectangle
min_rectangle['unit_vector_angle'] = atan2(min_rectangle['unit_vector'][1], min_rectangle['unit_vector'][0])
min_rectangle['rectangle_center'] = to_xy_coordinates(min_rectangle['unit_vector_angle'], min_rectangle['rectangle_center'])
# this is ugly but a quick hack and is being changed in the speedup branch
return BoundingBox(
area = min_rectangle['area'],
length_parallel = min_rectangle['length_parallel'],
length_orthogonal = min_rectangle['length_orthogonal'],
rectangle_center = min_rectangle['rectangle_center'],
unit_vector = min_rectangle['unit_vector'],
unit_vector_angle = min_rectangle['unit_vector_angle'],
corner_points = set(rectangle_corners(min_rectangle))
)
if __name__ =='__main__':
points = []
for _ in range(int(input('Count points\n'))):
# points.append(Point.create_point()) # для мануального ввода точек многогранника
points.append(Point().get_tuple_style())
rectangle = MinimumBoundingBox(points).corner_points
rectangle = sorted(rectangle, key=lambda p: atan2(p[1], p[0]))
print(rectangle)
print([Point(p[0], p[1]).__str__('blue') for p in points])
plt.fill(
[p[0] for p in rectangle],
[p[1] for p in rectangle],
fill=False
)
plt.show() | ded-evsey/TandACG | 21.py | 21.py | py | 6,002 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "math.sqrt",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 47,
"u... |
4930731104 | # -*- coding: utf-8 -*-
"""
Description: Deep Patch Learning Model
Author: wondervictor
"""
import math
import torch
import torch.nn as nn
import numpy as np
import layers
import basenet
class PatchHeadNetwork(nn.Module):
def __init__(self, use_cuda, num_classes, use_relation=False):
super(PatchHeadNetwork, self).__init__()
self.roi_align = layers.ROIAlign(out_size=7, spatial_scale=0.0625)
self.fc = nn.Sequential(
nn.Linear(512*7*7, 4096),
nn.LeakyReLU(negative_slope=0.02, inplace=True),
nn.Dropout(0.5),
nn.Linear(4096, 4096),
nn.LeakyReLU(negative_slope=0.02, inplace=True),
nn.Dropout(0.5)
)
self.patch_encoder = nn.Linear(4096, 256)
self.cls_score1 = nn.Linear(256*8, num_classes)
self.cls_score2 = nn.Linear(4096, num_classes)
self.patch_pooling = layers.MaxPatchPooling(use_cuda)
self.spm_pooling = layers.SPMMaxPooling()
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, 0.01)
m.bias.data.uniform_(-0.5, 0.5)
def forward(self, features, shapes, rois):
# N denotes the num_rois, B denotes the batchsize
# features: B*C*H*W
# rois: N*5
# shapes: B*2
batch_size = features.size()[0]
roi_output = self.roi_align(features, rois)
# N*512*7*7
num_rois = rois.size()[0]
output_batch_id = np.zeros(num_rois, dtype=np.int32)
for i in xrange(num_rois):
batch_id = int(rois[i].data[0])
output_batch_id[i] = batch_id
patch_features = roi_output.view(-1, 512*7*7)
# patch_features: N * (512*7*7)
patch_features = self.fc(patch_features)
# patch_features: N * 4096
encoded_features = self.patch_encoder(patch_features)
spm_features = self.spm_pooling(encoded_features, shapes, rois)
spm_features = spm_features.view(batch_size, 256 * 8)
cls_score1 = self.cls_score1(spm_features)
cls_score2_features = self.cls_score2(patch_features)
cls_score2 = self.patch_pooling(cls_score2_features, batch_size, output_batch_id)
det_scores = cls_score2_features
return cls_score1, cls_score2, det_scores
class DPL(nn.Module):
def __init__(self, use_cuda, num_classes=20, enable_base_grad=False, base='vgg', pretrained=True, use_relation=False):
super(DPL, self).__init__()
if base == 'vgg':
self.cnn = basenet.VGG16()
elif base == 'resnet50':
self.cnn = basenet.ResNet50(pretrained=True)
elif base == 'resnet34':
self.cnn = basenet.ResNet34(pretrained=True)
if not enable_base_grad:
print("Not Enable Base Model Gradient")
for param in self.cnn.parameters():
param.require_grad = False
self.use_cuda = use_cuda
self.head_network = PatchHeadNetwork(use_cuda=use_cuda, num_classes=num_classes, use_relation=use_relation)
def freeze_bn(self):
for layer in self.cnn.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.eval()
def forward(self, images, shapes, rois):
features = self.cnn(images)
cls_score1, cls_score2, det_scores = self.head_network(features, shapes, rois)
return cls_score1, cls_score2, det_scores
| wondervictor/dpl.pytorch | models/dpl.py | dpl.py | py | 3,460 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "layers.ROIAlign",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
... |
10036308173 | """
Input pipeline (tf.dataset and input_fn) for GQN datasets.
Adapted from the implementation provided here:
https://github.com/deepmind/gqn-datasets/blob/acca9db6d9aa7cfa4c41ded45ccb96fecc9b272e/data_reader.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import tensorflow as tf
# ---------- ad-hoc data structures ----------
DatasetInfo = collections.namedtuple(
'DatasetInfo',
['basepath', 'train_size', 'test_size', 'frame_size', 'sequence_size']
)
Context = collections.namedtuple('Context', ['frames', 'cameras'])
Query = collections.namedtuple('Query', ['context', 'query_camera'])
TaskData = collections.namedtuple('TaskData', ['query', 'target'])
# ---------- dataset constants ----------
_DATASETS = dict(
jaco=DatasetInfo(
basepath='jaco',
train_size=3600,
test_size=400,
frame_size=64,
sequence_size=11),
mazes=DatasetInfo(
basepath='mazes',
train_size=1080,
test_size=120,
frame_size=84,
sequence_size=300),
rooms_free_camera_with_object_rotations=DatasetInfo(
basepath='rooms_free_camera_with_object_rotations',
train_size=2034,
test_size=226,
frame_size=128,
sequence_size=10),
rooms_ring_camera=DatasetInfo(
basepath='rooms_ring_camera',
train_size=2160,
test_size=240,
frame_size=64,
sequence_size=10),
# super-small subset of rooms_ring for debugging purposes
rooms_ring_camera_debug=DatasetInfo(
basepath='rooms_ring_camera_debug',
train_size=1,
test_size=1,
frame_size=64,
sequence_size=10),
rooms_free_camera_no_object_rotations=DatasetInfo(
basepath='rooms_free_camera_no_object_rotations',
train_size=2160,
test_size=240,
frame_size=64,
sequence_size=10),
shepard_metzler_5_parts=DatasetInfo(
basepath='shepard_metzler_5_parts',
train_size=900,
test_size=100,
frame_size=64,
sequence_size=15),
shepard_metzler_7_parts=DatasetInfo(
basepath='shepard_metzler_7_parts',
train_size=900,
test_size=100,
frame_size=64,
sequence_size=15)
)
_NUM_CHANNELS = 3
_NUM_RAW_CAMERA_PARAMS = 5
_MODES = ('train', 'test')
# ---------- helper functions ----------
def _convert_frame_data(jpeg_data):
decoded_frames = tf.image.decode_jpeg(jpeg_data)
return tf.image.convert_image_dtype(decoded_frames, dtype=tf.float32)
def _get_dataset_files(dataset_info, mode, root):
"""Generates lists of files for a given dataset version."""
basepath = dataset_info.basepath
base = os.path.join(root, basepath, mode)
if mode == 'train':
num_files = dataset_info.train_size
else:
num_files = dataset_info.test_size
length = len(str(num_files))
template = '{:0%d}-of-{:0%d}.tfrecord' % (length, length)
record_paths = [ # indexing runs from 1 to n
os.path.join(base, template.format(i, num_files))
for i in range(1, num_files + 1)]
return record_paths
def _get_randomized_indices(context_size, dataset_info, seed):
"""Generates randomized indices into a sequence of a specific length."""
example_size = context_size + 1
indices = tf.range(0, dataset_info.sequence_size)
indices = tf.random_shuffle(indices, seed=seed)
indices = tf.slice(indices, begin=[0], size=[example_size])
return indices
def _parse(raw_data, dataset_info):
"""Parses raw data from the tfrecord."""
feature_map = {
'frames': tf.FixedLenFeature(
shape=dataset_info.sequence_size, dtype=tf.string),
'cameras': tf.FixedLenFeature(
shape=[dataset_info.sequence_size * _NUM_RAW_CAMERA_PARAMS],
dtype=tf.float32)
}
# example = tf.parse_example(raw_data, feature_map)
example = tf.parse_single_example(raw_data, feature_map)
return example
def _preprocess(example, indices, context_size, custom_frame_size, dataset_info):
"""Preprocesses the parsed data."""
# frames
example_size = context_size + 1
frames = tf.concat(example['frames'], axis=0)
frames = tf.gather(frames, indices, axis=0)
frames = tf.map_fn(
_convert_frame_data, tf.reshape(frames, [-1]),
dtype=tf.float32, back_prop=False)
dataset_image_dimensions = tuple(
[dataset_info.frame_size] * 2 + [_NUM_CHANNELS])
frames = tf.reshape(
frames, (example_size, ) + dataset_image_dimensions)
if (custom_frame_size and
custom_frame_size != dataset_info.frame_size):
frames = tf.reshape(frames, dataset_image_dimensions)
new_frame_dimensions = (custom_frame_size,) * 2 + (_NUM_CHANNELS,)
frames = tf.image.resize_bilinear(
frames, new_frame_dimensions[:2], align_corners=True)
frames = tf.reshape(
frames, (-1, example_size) + new_frame_dimensions)
# cameras
raw_pose_params = example['cameras']
raw_pose_params = tf.reshape(
raw_pose_params,
[dataset_info.sequence_size, _NUM_RAW_CAMERA_PARAMS])
raw_pose_params = tf.gather(raw_pose_params, indices, axis=0)
pos = raw_pose_params[:, 0:3]
yaw = raw_pose_params[:, 3:4]
pitch = raw_pose_params[:, 4:5]
cameras = tf.concat(
[pos, tf.sin(yaw), tf.cos(yaw), tf.sin(pitch), tf.cos(pitch)], axis=-1)
# return preprocessed tuple
preprocessed_example = {}
preprocessed_example['frames'] = frames
preprocessed_example['cameras'] = cameras
return preprocessed_example
def _prepare(preprocessed_example):
"""Prepares the preprocessed data into (feature, label) tuples."""
# decompose
frames = preprocessed_example['frames']
cameras = preprocessed_example['cameras']
# split data
context_frames = frames[:-1]
context_cameras = cameras[:-1]
target = frames[-1]
query_camera = cameras[-1]
context = Context(cameras=context_cameras, frames=context_frames)
query = Query(context=context, query_camera=query_camera)
data = TaskData(query=query, target=target)
return data, data.target
# ---------- input_fn ----------
def gqn_input_fn(
dataset_name,
root,
mode,
context_size,
batch_size=1,
num_epochs=1,
# optionally reshape frames
custom_frame_size=None,
# queue params
num_threads=4,
buffer_size=256,
seed=None):
"""
Creates a tf.data.Dataset based op that returns data.
Args:
dataset_name: string, one of ['jaco', 'mazes', 'rooms_ring_camera',
'rooms_free_camera_no_object_rotations',
'rooms_free_camera_with_object_rotations', 'shepard_metzler_5_parts',
'shepard_metzler_7_parts'].
root: string, path to the root folder of the data.
mode: one of tf.estimator.ModeKeys.
context_size: integer, number of views to be used to assemble the context.
batch_size: (optional) batch size, defaults to 1.
num_epochs: (optional) number of times to go through the dataset,
defaults to 1.
custom_frame_size: (optional) integer, required size of the returned
frames, defaults to None.
num_threads: (optional) integer, number of threads used to read and parse
the record files, defaults to 4.
buffer_size: (optional) integer, capacity of the underlying prefetch or
shuffle buffer, defaults to 256.
seed: (optional) integer, seed for the random number generators used in
the dataset.
Returns:
tf.data.dataset yielding tuples of the form (features, labels)
shapes:
features.query.context.cameras: [N, K, 7]
features.query.context.frames: [N, K, H, W, 3]
features.query.query_camera: [N, 7]
features.target (same as labels): [N, H, W, 3]
Raises:
ValueError: if the required version does not exist; if the required mode
is not supported; if the requested context_size is bigger than the
maximum supported for the given dataset version.
"""
# map estimator mode key to dataset internal mode strings
if mode == tf.estimator.ModeKeys.TRAIN:
str_mode = 'train'
else:
str_mode = 'test'
# check validity of requested dataset and split
if dataset_name not in _DATASETS:
raise ValueError('Unrecognized dataset {} requested. Available datasets '
'are {}'.format(dataset_name, _DATASETS.keys()))
if str_mode not in _MODES:
raise ValueError('Unsupported mode {} requested. Supported modes '
'are {}'.format(str_mode, _MODES))
# retrieve dataset parameters
dataset_info = _DATASETS[dataset_name]
if context_size >= dataset_info.sequence_size:
raise ValueError(
'Maximum support context size for dataset {} is {}, but '
'was {}.'.format(
dataset_name, dataset_info.sequence_size-1, context_size))
# collect the paths to all tfrecord files
record_paths = _get_dataset_files(dataset_info, str_mode, root)
# create TFRecordDataset
dataset = tf.data.TFRecordDataset(
filenames=record_paths, num_parallel_reads=num_threads)
# parse the data from tfrecords
dataset = dataset.map(
lambda raw_data: _parse(raw_data, dataset_info),
num_parallel_calls=num_threads)
# preprocess into context and target
indices = _get_randomized_indices(context_size, dataset_info, seed)
dataset = dataset.map(
lambda example: _preprocess(example, indices, context_size, custom_frame_size, dataset_info),
num_parallel_calls=num_threads)
# parse into tuple expected by tf.estimator input_fn
dataset = dataset.map(_prepare, num_parallel_calls=num_threads)
# shuffle data
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.shuffle(buffer_size=(buffer_size * batch_size), seed=seed)
# set up batching
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(buffer_size)
return dataset
| ogroth/tf-gqn | data_provider/gqn_provider.py | gqn_provider.py | py | 9,919 | python | en | code | 189 | github-code | 6 | [
{
"api_name": "collections.namedtuple",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "... |
5522014624 | import openpyxl
def get_exl(file,Sheet):
exl = openpyxl.load_workbook(file)
table = exl[Sheet]
max_rows = table.max_row
max_column = table.max_column
# print(max_rows,max_column)
data = []
for row in range(1, max_rows):
rowdata = []
for column in range(3, max_column-1):
rowdata.append(table.cell(row+1, column+1).value)
data.append(rowdata)
return data
if __name__ == '__main__':
run = get_exl('../TestData/data/testdata.xlsx','查询终端设备')
print(run)
| commiting/TEST | Tools/getexcel.py | getexcel.py | py | 541 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 5,
"usage_type": "call"
}
] |
23920525234 | import threading
import tkinter as tk
from motor import Motor
motor = Motor()
thread_motor = threading.Thread(target=motor.inicia_motor, args=(True,))
thread_motor.daemon = True
thread_motor.start()
def update_value():
# Função para atualizar o valor
# Aqui você pode implementar a lógica desejada para atualizar o valor
# Exemplo: Atualizar o valor somando 1
global value
value = motor.rotacoes
value_label.config(text=str(f'{value:0.2f} RPM'))
# Chama a função novamente após 100ms
value_label.after(70, update_value)
def slider_callback(value):
# Função de callback do slider
# Exibe o valor selecionado pelo slider
motor.amperes = int(value)
# Cria a janela principal
window = tk.Tk()
window.title("Atualização de Valor")
# Valor inicial
value = 0
# Cria o rótulo para exibir o valor
value_label = tk.Label(window, text=str(value), font=("Arial", 24), height=5, width=20)
value_label.pack(pady=20)
# Slider
slider = tk.Scale(window, from_=motor.amperes, to=100, orient=tk.HORIZONTAL, command=slider_callback)
slider.pack(pady=10)
# Botão "Desligar"
desligar_button = tk.Button(window, text="Desligar", command=motor.desliga_motor)
desligar_button.pack(pady=10)
# Chama a função de atualização inicialmente
update_value()
# Inicia o loop principal da janela
window.mainloop()
| PotatoMexicano/sistema-controle-eletrico | sistema_controle_eletrico/screen.py | screen.py | py | 1,354 | python | pt | code | 0 | github-code | 6 | [
{
"api_name": "motor.Motor",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "motor.inicia_motor",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "motor.rotacoes",
... |
15598838122 | import torch
from torch import nn
import torch.nn.functional as F
from ssd.utils_ssd.box_utils import match, log_sum_exp
# evaluate conf_loss and loc_loss
class MultiBoxLoss(nn.Module):
def __init__(self, cfg):
super(MultiBoxLoss, self).__init__()
self.num_classes = cfg.num_classes
self.threshold = cfg.overlap_thresh
self.negpos_ratio = cfg.neg_pos
self.variance = cfg.variance
def forward(self, preds, targets):
loc_data, conf_data, priors = preds
num = loc_data.size(0)
num_priors = priors.size(0)
# match priors (priors->nearest target)
loc_t = torch.Tensor(num, num_priors, 4)
conf_t = torch.LongTensor(num, num_priors)
if loc_data.is_cuda:
loc_t, conf_t = loc_t.cuda(), conf_t.cuda()
for idx in range(num):
truths = targets[idx][:, :-1]
labels = targets[idx][:, -1]
defaults = priors
match(self.threshold, truths, defaults, self.variance, labels, loc_t, conf_t, idx)
pos = conf_t > 0
# location loss
pos_idx = pos.unsqueeze(2).expand_as(loc_data)
loc_p = loc_data[pos_idx].view(-1, 4)
loc_t = loc_t[pos_idx].view(-1, 4)
loss_l = F.smooth_l1_loss(loc_p, loc_t, size_average=False)
# evaluate each priors's loss (the same as the paper)
batch_conf = conf_data
loss_c = (log_sum_exp(batch_conf) - batch_conf.gather(2, conf_t.unsqueeze(2))).squeeze(2)
# hard negative mining: note: the batch size of each iteration is not the same
# find the "max loss" background
loss_c[pos] = 0 # filter out pos boxes
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1, keepdim=True)
num_neg = torch.clamp(self.negpos_ratio * num_pos, max=pos.size(1) - 1) # size: [num, 1]
neg = idx_rank < num_neg.expand_as(idx_rank)
# confidence loss (pos:neg=1:3)
pos_idx = pos.unsqueeze(2).expand_as(conf_data)
neg_idx = neg.unsqueeze(2).expand_as(conf_data)
conf_p = conf_data[(pos_idx + neg_idx).gt(0)].view(-1, self.num_classes)
targets_weightd = conf_t[(pos + neg).gt(0)]
loss_c = F.cross_entropy(conf_p, targets_weightd, size_average=False)
return loss_l / num_pos.sum(), loss_c / num_pos.sum()
| AceCoooool/detection-pytorch | ssd/utils_ssd/multiloss.py | multiloss.py | py | 2,399 | python | en | code | 24 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.LongTensor",
"line... |
12570656588 | import discord
import requests
client = discord.Client()
tokenFile = open('secret.secret','r')
token = tokenFile.readline()
@client.event
async def on_message(msg):
if msg.content.startswith('$$$$'):
name = msg.content[4::]
apiCall = 'https://na.whatismymmr.com/api/v1/summoner?name=' + name
response = requests.get(apiCall)
if(response.status_code == 200):
data = response.json()
Ammr = data['ARAM']['avg']
Apct = data['ARAM']['percentile']
Arank = data['ARAM']['closestRank']
builtMsg = f'{name}\'s ARAM MMR: {Ammr}\n{Apct}th percentile, about {Arank}'
await msg.channel.send(builtMsg)
else:
await msg.channel.send('RIP LMAO.')
##test push
client.run(token) | gpulia/kitchenSync | server.py | server.py | py | 790 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "discord.Client",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
}
] |
31014267679 | from transformers import (
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallTokenizer,
BlenderbotForConditionalGeneration,
BlenderbotTokenizer,
)
from transformers import GPT2LMHeadModel, GPT2Tokenizer
from transformers import AutoTokenizer, AutoModelForCausalLM
import sys
download_type = sys.argv[1]
if download_type == 'dialogpt':
#------dialogpt samll------#
model = GPT2LMHeadModel.from_pretrained("microsoft/DialoGPT-small")
tokenizer = GPT2Tokenizer.from_pretrained("microsoft/DialoGPT-small")
#------dialogpt medium------#
model = GPT2LMHeadModel.from_pretrained("microsoft/DialoGPT-medium")
tokenizer = GPT2Tokenizer.from_pretrained("microsoft/DialoGPT-medium")
print("dialogpt is done!")
elif download_type == 'gptneo':
#------gptneo small------#
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M")
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-125M")
#------gptneo large------#
#model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B")
#tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
print("gptneo is done!")
elif download_type == 'blender':
#------blender small------#
model = BlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
tokenizer = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot_small-90M")
#------blender medium------#
model = BlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
print("blender is done!")
| DariuszO/openchat | model_download.py | model_download.py | py | 1,690 | python | en | code | null | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "transformers.GPT2LMHeadModel.from_pretrained",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "transformers.GPT2LMHeadModel",
"line_number": 15,
"usage_type": "name"
},
... |
34346397693 | """
Exercises of the book "Think python"
2.10 Exercises
"""
import math
import datetime
import decimal
# Exercise 2
# Using the Python as a calculator
# 1. The volume of a sphere with radius r is 4/3 π r3. What is the volume of a sphere with radius 5?
radius = 5
print("The volume of a sphere: ", (4 / 3 * math.pi * radius**3))
# 2. Suppose the cover price of a book is $24.95, but bookstores get a 40% discount.
# Shipping costs $3 for the first copy and 75 cents for each additional copy.
# What is the total wholesale cost for 60 copies?
cover_price = decimal.Decimal('24.95')
discount = decimal.Decimal('0.4')
copies_amount = decimal.Decimal(60)
s_cost_first = decimal.Decimal(3) # Shipping_cost for the first copy
s_cost_add = decimal.Decimal('0.75') # Shipping_cost for each additional copy
# Total wholesale cost
sum = (
(cover_price * (1 - discount)) * copies_amount
+ (s_cost_add * (copies_amount - 1))
+ s_cost_first
)
print("Total wholesale cost for 60 copies: ", round(sum, 2))
# 3. If I leave my house at 6:52 am and run 1 mile at an easy pace (8:15 per mile),
# then 3 miles at tempo (7:12 per mile) and 1 mile at easy pace again,
# what time do I get home for breakfast?
# Speed (minutes)
easy_pace = "08:15"
tempo = "07:12"
def time_to_seconds(time: str):
"""Function convert time from str format('07:12', MM:SS) to amount of seconds."""
return (int(time[: time.find(":")]) * 60) + (int(time[time.find(":") + 1 :]))
# Convert time to seconds
easy_pace = time_to_seconds(easy_pace)
tempo = time_to_seconds(tempo)
# Start time of the run
start_time = datetime.datetime.strptime("6:52", "%H:%M")
# Calculate duration of the run in seconds (miles*time per mile)
whole_time = 2 * easy_pace + 3 * tempo
# Calculate time of the end of the run
home_time = (start_time + datetime.timedelta(seconds=whole_time)).time()
print("You will get home at: ", home_time)
| LiliiaMykhaliuk/think-python | chapter2/2.10.2.py | 2.10.2.py | py | 1,916 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "math.pi",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "decimal.Decimal",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"l... |
74888826427 | from rest_framework.serializers import CharField, ModelSerializer
from .models.base import CanadianCommonCv
from .models.employment import AcademicWorkExperience, Employment
from .models.personal_information import Identification, Email, Website
from .models.recognitions import AreaOfResearch
from .models.user_profile import UserProfile
class AreaOfResearchSerializer(ModelSerializer):
class Meta:
model = AreaOfResearch
fields = [
'area',
'sector',
'field'
]
class WebsiteSerializer(ModelSerializer):
class Meta:
model = Website
fields = ['url']
class EmailSerializer(ModelSerializer):
class Meta:
model = Email
fields = ['address']
class IdentificationSerializer(ModelSerializer):
email = EmailSerializer(many=True, read_only=True, source='email_set')
website = WebsiteSerializer(many=True, read_only=True, source="website_set")
class Meta:
model = Identification
fields = [
'email',
'title',
'website',
'family_name',
'first_name',
'middle_name',
'previous_family_name',
'previous_first_name'
]
class AcademicWorkExperienceSerializer(ModelSerializer):
class Meta:
model = AcademicWorkExperience
fields = [
'department',
'position_title'
]
class EmploymentSerializer(ModelSerializer):
academic_work_experience = AcademicWorkExperienceSerializer(many=True, read_only=True)
class Meta:
model = Employment
fields = ['academic_work_experience']
class UserProfileSerializer(ModelSerializer):
research_description = CharField(source='research_interest', read_only=True)
research_interests = AreaOfResearchSerializer(many=True, read_only=True, source='user_aor')
class Meta:
model = UserProfile
fields = [
'research_description',
'research_interests'
]
class CanadianCommonCvSerializer(ModelSerializer):
identification = IdentificationSerializer(read_only=True)
employment = EmploymentSerializer(read_only=True)
user_profile = UserProfileSerializer(read_only=True)
def to_representation(self, instance):
ret = super().to_representation(instance)
ret['research_description'] = ret['user_profile']['research_description']
ret['research_interests'] = ret['user_profile']['research_interests']
ret.pop('user_profile')
return ret
class Meta:
model = CanadianCommonCv
fields = [
'identification',
'employment',
'user_profile'
]
| c3g/ccv_api | ccv/serializers.py | serializers.py | py | 2,733 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "models.recognitions.AreaOfResearch",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 20,... |
33810339763 | import os
from random import shuffle
import tensorflow as tf
import glob
from config import config
# all functions except init and create_iterators should be empty
class Preprocessing:
def __init__(self):
print('preprocessing instance creation started')
self.dir_name = config['data_dir']
self.input_len = config['input_len']
#maybe add later noise augmentation
def create_iterators(self):
# get the filenames split into train test validation
test_files = self.get_files_from_txt('testing_list.txt')
val_files = self.get_files_from_txt('validation_list.txt')
filenames = glob.glob(os.path.join(self.dir_name, '*/**.wav'), recursive=True)
filenames = [filename for filename in filenames if 'background_noise' not in filename]
train_files = list(set(filenames) - set(val_files) - set(test_files))
shuffle(train_files)
# get the commands and some prints
self.commands = self.get_commands()
self.num_classes = len(self.commands)
print('len(train_data)', len(train_files))
print('prelen(test_data)', len(test_files))
print('len(val_data)', len(val_files))
print('commands: ', self.commands)
print('number of commands: ', len(self.commands))
# make tf dataset object
train_dataset = self.make_tf_dataset_from_list(train_files)
val_dataset = self.make_tf_dataset_from_list(val_files, is_validation = True)
test_dataset = self.make_tf_dataset_from_list(test_files)
return train_dataset, val_dataset, test_dataset
def get_files_from_txt(self, which_txt):
assert which_txt == 'testing_list.txt' or which_txt == 'validation_list.txt', 'wrong argument'
path = os.path.join(self.dir_name, which_txt)
with open(path) as f:
paths = f.readlines()
paths = [os.path.join(self.dir_name, path[:len(path) - 1]) for path in paths]
shuffle(paths)
return paths
def get_commands(self):
dirs = glob.glob(os.path.join(self.dir_name, "*", ""))
commands = [os.path.split(os.path.split(dir)[0])[1] for dir in dirs if 'background' not in dir]
return commands
@staticmethod
def decode_audio(audio_binary):
audio, _ = tf.audio.decode_wav(audio_binary)
return tf.squeeze(audio, axis=-1)
def get_label(self, file_path):
parts = tf.strings.split(file_path, os.path.sep)
label = parts[-2]
label_id = tf.argmax(label == self.commands)
label = tf.one_hot(label_id, self.num_classes)
return label
def make_tf_dataset_from_list(self, filenames_list, is_validation = False):
files = tf.data.Dataset.from_tensor_slices(filenames_list)
dataset = files.map(self.get_waveform_and_label, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.map(self.pad_map_func, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.shuffle(buffer_size=5000, reshuffle_each_iteration=True)
if is_validation:
dataset = dataset.repeat()
dataset = dataset.batch(config['train_params']['batch_size']).prefetch(tf.data.AUTOTUNE)
return dataset
def get_waveform_and_label(self, file_path):
label = self.get_label(file_path)
audio_binary = tf.io.read_file(file_path)
waveform = self.decode_audio(audio_binary)
return waveform, label
def pad_map_func(self, audio, label):
return [self.add_paddings(audio), label]
def add_paddings(self, wav):
len_wav = len(wav)
if len_wav < self.input_len:
paddings = tf.zeros((self.input_len - len_wav))
wav = tf.concat([wav, paddings], axis=0)
return wav
| ashnik777/Audio-Classification | preprocessing.py | preprocessing.py | py | 3,788 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "config.config",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "config.config",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "glob.glob",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number... |
73183731388 | import numpy as np
import cv2
img = cv2.imread('/Users/macbookair/PycharmProjects/PR/homework2/unpro.jpg')
bg = cv2.imread('/Users/macbookair/PycharmProjects/PR/homework2/back2.png')#---->3750*2500
mask = np.zeros(img.shape[:2],np.uint8)
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
rect = (1250,25,2000,2325)
# rect = (1,1,l-2,w-2)
#1250 25 2400 2450
cv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
mask2 = cv2.erode(mask2,None,iterations=2)
mask2 = cv2.dilate(mask2,None,iterations=1)
mask_inv = np.where((mask2==0),1,0).astype('uint8')
img1 = img*mask2[:,:,np.newaxis]
img2 = bg*mask_inv[:,:,np.newaxis]
dst=cv2.addWeighted(img1,1,img2,1,0)
cv2.imshow('output',dst)
cv2.imwrite('output.jpg',dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
| xubinchen-very6/Pattern-recognition | prml/homework2/背景变变变.py | 背景变变变.py | py | 855 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 7,
... |
30938963931 | import pandas as pd
from config import CONFIG_DICT
import networkx as nx
import matplotlib.pyplot as plt
import random
import cv2
import numpy as np
import math
from MplCanvas import MplCanvas
import Equirec2Perspec as E2P
new_min = -100
new_max = 100
lat_min = 40.42524817 ## this is for first 500 in pittsburg need to generalize this for all places
lat_max = 40.44497464
long_min = -80.03468568
long_max = -79.98858816
class DataHelper():
def __init__(self):
self.G = nx.Graph()
self.end_points = []
# Create canvas for plot rendering:
self.canvas = MplCanvas(self, width=5, height=4, dpi=100)
self.bev_graph = MplCanvas(self, width=5, height=4, dpi=100)
self.xdata = []
self.ydata = []
# Set of visited locations.
self.visited_locations = set()
self.read_routes(CONFIG_DICT["csv_file"])
def new_lat_scale(self, x):
normalized_new_val = ((x - lat_min) / (lat_max - lat_min) * (new_max - new_min)) + new_min
return normalized_new_val
def new_long_scale(self, x):
normalized_new_val = ((x - long_min) / (long_max - long_min) * (new_max - new_min)) + new_min
return normalized_new_val
def image_name(self, pos):
return self.image_names[pos]
def panorama_split(self, theta, image, resolution = (720, 1080)):
print("\n")
print("Showing image: ", image + ".jpg")
print("Current viewing angle: ", theta)
print("\n")
equ = E2P.Equirectangular("data/Pittsburgh/"+image+".jpg") # Load equirectangular image
#
# FOV unit is degree
# theta is z-axis angle(right direction is positive, left direction is negative)
# phi is y-axis angle(up direction positive, down direction negative)
# height and width is output image dimension
#
img = equ.GetPerspective(90, -theta, 0, *resolution) # Specify parameters(FOV, theta, phi, height, width)
return img
def build_graph(self, data):
i = 0
prev_route = -1
prev_pos = (-1,-1)
prev_name = "abc"
x = []
y = []
print("\n")
print("Building graph. \n")
for index, row in data.iterrows():
route_no = row['route_no']
lat = row['latitude']
long = row['longitude']
scaled_lat = self.new_lat_scale(lat)
scaled_long = self.new_long_scale(long)
image_name = row['image_name']
current_node = (scaled_lat,scaled_long)
if (image_name not in self.G):
self.G.add_node((scaled_lat,scaled_long),image_name = image_name, latitude = lat, longitude=long, yaw =row['yaw'], ) # saves a node as image name
#print((scaled_lat,scaled_long), image_name)
if route_no == prev_route and prev_pos != (-1,-1): # Why is prev_pos only compares to one integer while it is a tuple?
# So the edges only connect nodes of the same route?
#print("adding edge")
x.append(scaled_lat) # What are these x, y lists for? Look at the elif below.
y.append(scaled_long)
self.G.add_edge(prev_pos, current_node) # need to add something like a direction on this edge like right left straight...
elif route_no != prev_route: ## going to a new route
plt.plot(x, y, '-o', linewidth=1, markersize=2) # x and y used to plot the previous route.
if(prev_pos!= (-1,-1)): # end_points mean the end point of each route.
self.end_points.append(prev_pos)
x=[]
y=[]
prev_pos = current_node
prev_route = route_no
#save the graph as a json?
self.image_names = nx.get_node_attributes(self.G, 'image_name')
plt.savefig("filename5.png")
def read_routes(self, csv_file = "data/pittsburg_500.csv" ):
data = pd.read_csv(csv_file, keep_default_na=False)
self.build_graph(data)
def find_adjacent(self, pos, action = "next"):
#print("Finding next position based on action/direction and position \n")
if action == "next":
#print(self.end_points)
#print("Current node: \n", pos)
#print("Adjacent nodes and edges: \n", (self.G.adj[pos])) # Finding adjacent nodes and edges to pos node.
#adj_nodes_list = [keys for keys,value in self.G.adj[pos].items()]
#print("Coordinate of the adjacent nodes: \n", adj_nodes_list)
return list([keys for keys,value in self.G[pos].items()]) # Return list of keys of nodes adjacent to node with key pos.
def reset(self):
# reset the position of the agent
print("Resets the position to a start \n")
#i = random.choice(range(len(self.end_points)))
i = 1000
return self.end_points[i]
def sample_location(self):
location_list = [keys for keys, values in self.G.nodes.items()]
location = random.choice(location_list)
return location
# Function to find the distances to adjacent nodes.
# This is used to check to see if the node found is actually the nearest node.
def find_distances(self, pos, adj_nodes_list):
distance_list = []
for node in adj_nodes_list:
distance_list.append(np.linalg.norm(np.array(pos) - np.array(node)))
return distance_list
def fix_angle(self, angle):
if angle < 0:
angle = 360 + angle
elif angle >= 360:
angle = angle - 360
return angle
# This function should also convert from triangular to abosulute angle?
def get_angle(self, curr, prev):
if (curr[0] - prev[0]) != 0 :
slope = (curr[1] - prev[1]) / (curr[0] - prev[0])
else:
return 0
#print(slope)
angle = math.degrees(math.atan(slope))
# The direction is from the second to the fourth quadrant.
# So angle is negative.
if (curr[0] > prev[0] and curr[1] < prev[1]):
angle = 360 + angle
# Direction: from fourth to second quadrant.
# Angle is negative.
elif (curr[0] < prev[0] and curr[1] > prev[1]):
angle = 180 + angle
# Direction: from first to third.
# Angle is positive.
elif (curr[0] < prev[0] and curr[1] < prev[1]):
angle = 180 + angle
#angle = fix_angle(angle)
return angle
# Convention we are using: in the angle_range, the first value always represent the right boundary of the cone.
# While the second value represent the left boundary of the cone.
# This function return 1 if angle is in range, 0 if not.
def angle_in_range(self, angle, angle_range):
# This is the case where the fix_angle adjusted the angle to be only from 0 to 360.
if angle_range[0] > angle_range[1]:
if angle < angle_range[1] or angle > angle_range[0]:
return 1
else:
return 0
# This is the regular case:
else:
if angle > angle_range[0] and angle < angle_range[1]:
return 1
else:
return 0
# Note on the process of finding the nearest node:
# My speculation:
# 1. Find the current angle cone of the agent, which is where the agent is looking in absolute angles.
# 2. Then get the adjacent nodes' absolute angles. Note: adjacent is defined as being connected by an edge.
# 3. Filter the adjacent nodes by fov cone using the abosolute angles.
# 4. Move to the nearest node within the cone.
# Note: Process of graph creation: Dynamic_plot.py called build_graph. Build_graph go through every line
# of the csv file then add all the nodes. What about edges?
def find_nearest(self, curr_pos, prev_pos,curr_angle, direction):
print("\n")
# This is the view angle.
center_angle = self.fix_angle(curr_angle)
# The search angle is based on positions. Independent of viewing angle.
search_angle = self.get_angle(curr_pos, prev_pos)
left_bound = self.fix_angle(search_angle+90)
right_bound = self.fix_angle(search_angle-90)
# Check the current view angle against the search angle range:
if direction == "forward":
if self.angle_in_range(curr_angle, (right_bound, left_bound)) :
search_angle_range = (right_bound , left_bound)
else:
search_angle_range = (left_bound, right_bound)
elif direction == "backward":
if self.angle_in_range(curr_angle, (right_bound, left_bound)) :
search_angle_range = (left_bound , right_bound)
else:
search_angle_range = (right_bound, left_bound)
print("Current center angle: ", center_angle)
next_pos_list = self.find_adjacent(curr_pos) # This is a list of adjacent nodes to node agents_pos_1
decision = curr_pos
image_name = self.image_name(curr_pos)
print("Current node: ", curr_pos)
print("Possible next nodes: ", len(next_pos_list))
print("List of adjacent nodes: ", next_pos_list)
print("Distances from current node to the adjacent nodes: ", self.find_distances(curr_pos, next_pos_list))
print("Search angle range: ", search_angle_range)
filtered_pos_list = []
# Filtering the adjacent nodes by angle cone.
for pos in next_pos_list:
# Getting the angle between the current nodes and all adjacent nodes.
angle = self.get_angle(pos, curr_pos)
print("Angle from ", curr_pos,"to ", pos, "is ", angle)
if self.angle_in_range(angle, search_angle_range):
filtered_pos_list.append(pos)
print("Filtered adjacent nodes list: ", filtered_pos_list)
if (len(filtered_pos_list) == 0):
print("\n")
print("No nodes found. Agent standing still.")
else:
filtered_distances_list = self.find_distances(curr_pos, filtered_pos_list)
print("Distances from current node to the filtered adjacent nodes: ", filtered_distances_list)
print("Index of min value: ", (min(filtered_distances_list)))
decision = filtered_pos_list[filtered_distances_list.index(min(filtered_distances_list))]
print("The nearest node within the angle cone is: " , decision)
print("Found a node within the angle cone. New node position: ", decision)
image_name = self.image_name(decision)
print("Showing new node's image: ", image_name)
self.panorama_split(center_angle, image_name)
return decision, image_name, center_angle
# The next two functions help in the render method.
def draw_angle_cone(self, curr_pos, angle, color = 'm'):
x = curr_pos[0]
y = curr_pos[1]
angle_range = [self.fix_angle(angle - 45), self.fix_angle(angle + 45)]
line_length = 50
for angle in angle_range:
end_y = y + line_length * math.sin(math.radians(angle))
end_x = x + line_length * math.cos(math.radians(angle))
self.canvas.axes.plot([x, end_x], [y, end_y], ':' + color)
self.canvas.draw()
def update_plot(self, curr_pos, prev_pos, curr_angle):
y_prev = prev_pos[1]
x_prev = prev_pos[0]
y = curr_pos[1]
x = curr_pos[0]
self.ydata = self.ydata + [y]
self.xdata = self.xdata + [x]
self.canvas.axes.cla() # Clear the canvas.
self.canvas.axes.plot(self.xdata, self.ydata, '-ob')
adj_nodes_list = [keys for keys, values in self.G.adj[(x,y)].items()]
num_adj_nodes = len(adj_nodes_list)
adj_nodes_list = np.array( [[x_coor, y_coor] for x_coor, y_coor in adj_nodes_list])
x_pos_list = np.array([x] * num_adj_nodes)
y_pos_list = np.array([y] * num_adj_nodes)
self.canvas.axes.plot([x_pos_list,adj_nodes_list[:,0]], [y_pos_list, adj_nodes_list[:,1]], '--or')
self.canvas.axes.plot(x, y, color = 'green', marker = 'o')
self.canvas.axes.text(x, y, '({}, {})'.format(x, y))
self.canvas.axes.plot(x_prev, y_prev, color = 'purple', marker = 'o')
# Current view of the agent.
self.draw_angle_cone(curr_pos, curr_angle, color = 'g')
self.canvas.axes.set_xlim([new_min, new_max])
self.canvas.axes.set_ylim([new_min, new_max])
self.canvas.draw()
self.canvas.show()
def bird_eye_view(self, curr_pos, radius):
adjacent_pos_list = self.find_adjacent(curr_pos)
distances_list = self.find_distances(curr_pos, adjacent_pos_list)
in_range_nodes_list = []
for distance, pos in zip(distances_list, adjacent_pos_list):
if distance <= radius:
in_range_nodes_list.append(pos)
if len(in_range_nodes_list) == 0:
print("No nodes found in range for bird eye's view.")
return None
bird_eye_graph = self.G.subgraph(in_range_nodes_list)
return bird_eye_graph
def draw_bird_eye_view(self, curr_pos, radius, graph):
#self.bev_graph.axes.cla()
nodes_list = [keys for keys, values in graph.nodes().items()]
num_nodes = len(nodes_list)
nodes_list = np.array([[x_coor, y_coor] for x_coor, y_coor in nodes_list])
x = curr_pos[0]
y = curr_pos[1]
x_pos_list = np.array([x] * num_nodes)
y_pos_list = np.array([y] * num_nodes)
self.bev_graph.axes.plot([x_pos_list,nodes_list[:,0]], [y_pos_list, nodes_list[:,1]], '--or')
self.bev_graph.axes.plot(x, y, color = 'green', marker = 'o')
self.bev_graph.axes.text(x, y, '({}, {})'.format(x, y))
self.bev_graph.axes.set_xlim([new_min, new_max])
self.bev_graph.axes.set_ylim([new_min, new_max])
# Draw a circle to see if the BEV is done correctly.
draw_circle= plt.Circle(curr_pos, radius = radius, fill = False)
self.bev_graph.axes.add_artist(draw_circle)
self.bev_graph.draw()
self.bev_graph.show()
def distance_to_goal(self, curr_pos, goal):
return np.linalg.norm(np.array(curr_pos) - np.array(goal))
| klekkala/usc_navigate | src/data_helper.py | data_helper.py | py | 14,529 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "networkx.Graph",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "MplCanvas.MplCanvas",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "MplCanvas.MplCanvas",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "config.CONFIG_... |
13239474097 | from __future__ import division
from __future__ import print_function
from builtins import range
from past.utils import old_div
from numpy import *
from matplotlib.pyplot import *
import sys
def solver(I, a, T, dt, theta):
"""Solve u'=-a*u, u(0)=I, for t in (0,T]; step: dt."""
dt = float(dt) # avoid integer division
N = int(round(old_div(T,dt))) # no of time intervals
T = N*dt # adjust T to fit time step dt
u = zeros(N+1) # array of u[n] values
t = linspace(0, T, N+1) # time mesh
u[0] = I # assign initial condition
for n in range(0, N): # n=0,1,...,N-1
u[n+1] = (1 - (1-theta)*a*dt)/(1 + theta*dt*a)*u[n]
return u, t
def exact_solution(t, I, a):
return I*exp(-a*t)
def explore(I, a, T, dt, theta=0.5, makeplot=True):
"""
Run a case with the solver, compute error measure,
and plot the numerical and exact solutions (if makeplot=True).
"""
u, t = solver(I, a, T, dt, theta) # Numerical solution
u_e = exact_solution(t, I, a)
e = u_e - u
E = sqrt(dt*sum(e**2))
if makeplot:
figure() # create new plot
t_e = linspace(0, T, 1001) # very fine mesh for u_e
u_e = exact_solution(t_e, I, a)
plot(t, u, 'r--o') # red dashes w/circles
plot(t_e, u_e, 'b-') # blue line for u_e
legend(['numerical', 'exact'])
xlabel('t')
ylabel('u')
title('Method: theta-rule, theta=%g, dt=%g' % (theta, dt))
theta2name = {0: 'FE', 1: 'BE', 0.5: 'CN'}
savefig('%s_%g.png' % (theta2name[theta], dt))
show()
return E
def define_command_line_options():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--I', '--initial_condition', type=float,
default=1.0, help='initial condition, u(0)',
metavar='I')
parser.add_argument('--a', type=float,
default=1.0, help='coefficient in ODE',
metavar='a')
parser.add_argument('--T', '--stop_time', type=float,
default=1.0, help='end time of simulation',
metavar='T')
parser.add_argument('--makeplot', action='store_true',
help='display plot or not')
parser.add_argument('--dt', '--time_step_values', type=float,
default=[1.0], help='time step values',
metavar='dt', nargs='+', dest='dt_values')
return parser
def read_command_line(use_argparse=True):
if use_argparse:
parser = define_command_line_options()
args = parser.parse_args()
print('I={}, a={}, makeplot={}, dt_values={}'.format(
args.I, args.a, args.makeplot, args.dt_values))
return args.I, args.a, args.T, args.makeplot, args.dt_values
else:
if len(sys.argv) < 6:
print('Usage: %s I a on/off dt1 dt2 dt3 ...' % \
sys.argv[0]); sys.exit(1)
I = float(sys.argv[1])
a = float(sys.argv[2])
T = float(sys.argv[3])
makeplot = sys.argv[4] in ('on', 'True')
dt_values = [float(arg) for arg in sys.argv[5:]]
return I, a, T, makeplot, dt_values
def main():
I, a, T, makeplot, dt_values = read_command_line()
r = {}
for theta in 0, 0.5, 1:
E_values = []
for dt in dt_values:
E = explore(I, a, T, dt, theta, makeplot=False)
E_values.append(E)
# Compute convergence rates
m = len(dt_values)
r[theta] = [old_div(log(old_div(E_values[i-1],E_values[i])),
log(old_div(dt_values[i-1],dt_values[i])))
for i in range(1, m, 1)]
for theta in r:
print('\nPairwise convergence rates for theta=%g:' % theta)
print(' '.join(['%.2f' % r_ for r_ in r[theta]]))
return r
def verify_convergence_rate():
r = main()
tol = 0.1
expected_rates = {0: 1, 1: 1, 0.5: 2}
for theta in r:
r_final = r[theta][-1]
diff = abs(expected_rates[theta] - r_final)
if diff > tol:
return False
return True # all tests passed
if __name__ == '__main__':
if 'verify_rates' in sys.argv:
sys.argv.remove('verify_rates')
if not '--dt' in sys.argv:
print('Must assign several dt values through the --dt option')
sys.exit(1) # abort
if verify_convergence_rate():
pass
else:
print('Bug in the implementation!')
else:
# Perform simulations
main()
| hplgit/doconce | doc/src/slides/src/solver.py | solver.py | py | 4,682 | python | en | code | 305 | github-code | 6 | [
{
"api_name": "past.utils.old_div",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "builtins.range",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "sys.argv",
... |
23391816430 | import os
import shutil
import subprocess
import time
import pylab
import imageio
import numpy as np
from tqdm import tqdm
from skimage.io import imread, imsave
def create_options(model, epoch):
opts_test = {
"loadSize": 512,
"fineSize": 512,
"how_many": 'all',
"phase": 'test',
"name": model,
"checkpoints_dir": './checkpoints',
"results_dir": '.temp_output',
"which_epoch": str(epoch),
"which_direction": 'AtoB',
"resize_or_crop": '"scale_width"',
}
return opts_test
def create_bash_cmd_test(opts_test):
"""constructs bash command to run CycleGAN with the given settings"""
cmd = []
cmd.append("DATA_ROOT=.temp_input")
for opt in opts_test.keys():
cmd.append(opt + "=" + str(opts_test[opt]))
cmd += ['th', 'test.lua']
return(" ".join(cmd))
def check_correct_directory():
"""check if the script is being run from CycleGAN"""
fpath = os.path.realpath(__file__)
dirname = os.path.dirname(fpath).split('/')[-1]
if not dirname == "CycleGAN":
raise ValueError("Script should be run from CycleGAN base directory.")
def prep_directories():
"""ensures clean temporary directories for CycleGAN"""
for dir_ in ['.temp_input', '.temp_output']:
if os.path.exists(dir_):
shutil.rmtree(dir_)
for dir_ in ['testA', 'testB']:
os.makedirs(os.path.join('.temp_input', dir_))
os.makedirs(os.path.join('.temp_output'))
def grab_epochs(model):
"""
given a model name or a folder path,
returns an array of available epochs
"""
if not os.path.isdir(model):
model = os.path.join('checkpoints', model)
assert os.path.isdir(model), model + " not a valid model"
epochs = []
for file in os.listdir(model):
if file.split('.')[-1] == "t7":
epochs.append(file.split('_')[0])
epochs = [e for e in epochs if not e == 'latest']
return list(set(epochs))
def test(img, opts):
"""
performs a test inference on img, saves to a temp directory
returns the stylized image
"""
prep_directories()
for dir_ in ['testA', 'testB']:
imsave(os.path.join('.temp_input', dir_, 'img.png'), img)
# run the bash command for test phase of CycleGAN
cmd = create_bash_cmd_test(opts)
start = time.time()
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
process.wait()
print("Stylizing complete. Time elapsed:", time.time() - start)
# read image back into python
path = os.path.join('.temp_output', opts['name'], str(opts['which_epoch']) + "_test", "images", "fake_B", "img.png")
stylized_img = imread(path)
return stylized_img
def stylize_image_all_epochs(img_path, output_dir, model):
"""
processes an image with a model at all available epochs
"""
imname = os.path.split(img_path)[1].split('.')[0]
os.makedirs(output_dir, exist_ok=True)
img = imread(img_path)
available_epochs = grab_epochs(model)
for epoch in tqdm(available_epochs):
opts = create_options(model, epoch)
stylized_img = test(img, opts)
imsave(os.path.join(output_dir, imname + "-" + model + "-epoch-" + str(epoch)) + ".png", stylized_img)
def stylize_video(vid_path, out_path, model, epoch):
"""
stylizes all frames of a video
"""
video = imageio.get_reader(vid_path, 'mpeg')
writer = imageio.get_writer(out_path, fps=30)
opts = create_options(model, epoch)
# TODO: don't hardcode 30fps downsampling
for i, frame in enumerate(video):
if i % 2 == 0:
frame = test(np.array(frame), opts)
writer.append_data(frame)
if i % 10 == 0:
print(i, "of", len(video), "frames done.")
if i == len(video) - 10: # TAKE THIS OUT AFTER DONE TESTING
break
writer.close()
def repeat_stylization(img_path, out_dir, n_iter, model, epoch):
"""
Repeatedly applies a style to an image
"""
fname = os.path.splitext(img_path)[0].split("/")[-1]
img = imread(img_path)
os.makedirs(out_dir, exist_ok=True)
opts = create_options(model, epoch)
for i in range(n_iter):
img = test(img, opts)
imsave(os.path.join(out_dir, fname + "-" + model + "-" + str(epoch) + "-iter" + i))
def stylize_dir_hacky(input_dir, output_dir):
"""
this is horrible and last minute
temporary function to perform stylization
images: applies 3 available styles at 5 different epochs
the bean - pop art at all epochs
northwestern - cubism at all epochs
video: applies 3 styles to each video
"""
models = ['cubism_v2', 'impressionism', 'pop_art']
epochs_img = [50, 100, 150, 200]
files = os.listdir(input_dir)
files = [f for f in files if not f[0] == "."]
os.makedirs(output_dir)
for file in files:
filename = file.split(".")[0]
output_subdir = os.path.join(output_dir, filename + "-stylized")
os.makedirs(output_subdir, exist_ok=True)
print("Stylizing", file, "\nSaving to", output_subdir)
# Videos
if ".mp4" in file:
for model in models:
print("Applying", model, "to", file, ", saving to")
stylize_video(vid_path=os.path.join(input_dir, file),
out_path=os.path.join(output_subdir, file + '-' + model + '.mp4'),
model=model,
epoch=200)
# Photos
else:
# Images, all epochs, all models
if file in ['northwestern.jpeg', 'the_bean.jpeg']:
output_subdir_all_epochs = os.path.join(output_dir, file.split(".")[0] + "-all-epochs")
os.makedirs(output_subdir_all_epochs, exist_ok=True)
for model in models:
print("Applying", model, "to", file, "all epochs", "\nSaving to", output_subdir_all_epochs)
# try:
stylize_image_all_epochs(img_path=os.path.join(input_dir, file),
output_dir=output_subdir_all_epochs,
model=model)
# except:
# pass
# Images, only certain styles
for model in models:
for epoch in epochs_img:
try:
img = imread(os.path.join(input_dir, file))
opts = create_options(model, epoch)
stylized_img = test(img, opts)
imsave(os.path.join(output_subdir, filename + "-" + model + "-epoch-" + epoch + ".png"), stylized_img)
except:
pass
def stylize_image_all_styles(img_path, models):
pass
if __name__ == "__main__":
stylize_dir_hacky("input_5-28-17", "output_5-28-17")
| chang/DeepPainting | train/test_cyclegan.py | test_cyclegan.py | py | 6,948 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.realpath",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
26552246319 | #!/usr/bin/env python3
import fnmatch
import os
import re
import ntpath
import sys
import argparse
def get_private_declare(content):
priv_declared = []
srch = re.compile('private.*')
priv_srch_declared = srch.findall(content)
priv_srch_declared = sorted(set(priv_srch_declared))
priv_dec_str = ''.join(priv_srch_declared)
srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ ,\}\]\)";]')
priv_split = srch.findall(priv_dec_str)
priv_split = sorted(set(priv_split))
priv_declared += priv_split;
srch = re.compile('params \[.*\]|PARAMS_[0-9].*|EXPLODE_[0-9]_PVT.*|DEFAULT_PARAM.*|KEY_PARAM.*|IGNORE_PRIVATE_WARNING.*')
priv_srch_declared = srch.findall(content)
priv_srch_declared = sorted(set(priv_srch_declared))
priv_dec_str = ''.join(priv_srch_declared)
srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ ,\}\]\)";]')
priv_split = srch.findall(priv_dec_str)
priv_split = sorted(set(priv_split))
priv_declared += priv_split;
srch = re.compile('(?i)[\s]*local[\s]+(_[\w\d]*)[\s]*=.*')
priv_local = srch.findall(content)
priv_local_declared = sorted(set(priv_local))
priv_declared += priv_local_declared;
return priv_declared
def check_privates(filepath):
bad_count_file = 0
def pushClosing(t):
closingStack.append(closing.expr)
closing << Literal( closingFor[t[0]] )
def popClosing():
closing << closingStack.pop()
with open(filepath, 'r') as file:
content = file.read()
priv_use = []
priv_use = []
# Regex search privates
srch = re.compile('(?<![_a-zA-Z0-9])(_[a-zA-Z0-9]*?)[ =,\^\-\+\/\*\%\}\]\)";]')
priv_use = srch.findall(content)
priv_use = sorted(set(priv_use))
# Private declaration search
priv_declared = get_private_declare(content)
if '_this' in priv_declared: priv_declared.remove('_this')
if '_this' in priv_use: priv_use.remove('_this')
if '_x' in priv_declared: priv_declared.remove('_x')
if '_x' in priv_use: priv_use.remove('_x')
if '_forEachIndex' in priv_declared: priv_declared.remove('_forEachIndex')
if '_forEachIndex' in priv_use: priv_use.remove('_forEachIndex')
if '_foreachIndex' in priv_declared: priv_declared.remove('_foreachIndex')
if '_foreachIndex' in priv_use: priv_use.remove('_foreachIndex')
if '_foreachindex' in priv_declared: priv_declared.remove('_foreachindex')
if '_foreachindex' in priv_use: priv_use.remove('_foreachindex')
missing = []
for s in priv_use:
if s.lower() not in map(str.lower,priv_declared):
if s.lower() not in map(str.lower,missing):
missing.append(s)
if len(missing) > 0:
print (filepath)
private_output = 'private[';
first = True
for bad_priv in missing:
if first:
first = False
private_output = private_output + '"' + bad_priv
else:
private_output = private_output + '", "' + bad_priv
private_output = private_output + '"];';
print (private_output)
for bad_priv in missing:
print ('\t' + bad_priv)
bad_count_file = bad_count_file + 1
return bad_count_file
def main():
print("#########################")
print("# Search your Privates #")
print("#########################")
sqf_list = []
bad_count = 0
parser = argparse.ArgumentParser()
parser.add_argument('-m','--module', help='only search specified module addon folder', required=False, default=".")
args = parser.parse_args()
for root, dirnames, filenames in os.walk('../addons' + '/' + args.module):
for filename in fnmatch.filter(filenames, '*.sqf'):
sqf_list.append(os.path.join(root, filename))
for filename in sqf_list:
bad_count = bad_count + check_privates(filename)
print ("Bad Count {0}".format(bad_count))
if __name__ == "__main__":
main()
| acemod/ACE3 | tools/search_privates.py | search_privates.py | py | 4,143 | python | en | code | 966 | github-code | 6 | [
{
"api_name": "re.compile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 30,
... |
30798123556 | # -*- coding: UTF-8 -*-
# 百度人脸识别:https://ai.baidu.com/ai-doc/FACE/ek37c1qiz#%E4%BA%BA%E8%84%B8%E6%A3%80%E6%B5%8B
from aip import AipFace
from config import BAIDU_ID, BAIDU_KEY, BAIDU_SECRET_KEY
'''
百度人脸识别
优点:可免费使用,个人账户的限制为2QPS,企业账户的限制为10QPS
'''
""" 你的 APPID AK SK """
APP_ID = BAIDU_ID
API_KEY = BAIDU_KEY
SECRET_KEY = BAIDU_SECRET_KEY
client = AipFace(APP_ID, API_KEY, SECRET_KEY)
"""
image = "取决于image_type参数,传入BASE64字符串或URL字符串或FACE_TOKEN字符串"
imageType = "BASE64"
# 调用人脸检测
client.detect(image, imageType)
# 如果有可选参数
options = dict()
options["face_field"] = "age"
options["max_face_num"] = 2
options["face_type"] = "LIVE"
options["liveness_control"] = "LOW"
# 带参数调用人脸检测
client.detect(image, imageType, options)
"""
if __name__ == "__main__":
url = "https://smartracing.oss-cn-hangzhou.aliyuncs.com/shared/images/profiles/full/1571196895035.jpg"
options = {
"face_field": "age,beauty,expression,face_shape,emotion"
}
res = client.detect(url, "URL", options)
face = res['result']['face_list'][0]
expression = {"none": "不笑", "smile": "微笑", "laugh": "大笑"}
face_shape = {"square":" 正方形", "triangle": "三角形", "oval": "椭圆", "heart": "心形", "round": "圆形"}
emotion = {"angry": "愤怒", "disgust": "厌恶", "fear": "恐惧", "happy": "高兴", "sad": "伤心", "surprise": "惊讶", "neutral": "无情绪"}
print(f"检测年龄:{face['age']},颜值:{face['beauty']},表情:{expression.get(face['expression']['type'])},脸型:{face_shape.get(face['face_shape']['type'])}, 情绪:{emotion.get(face['emotion']['type'])}")
| bobcjxin/rank_face | base_baidu.py | base_baidu.py | py | 1,773 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "config.BAIDU_ID",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "config.BAIDU_KEY",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "config.BAIDU_SECRET_KEY",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "aip.AipFace"... |
10192615887 |
import numpy as np
import matplotlib.pyplot as plt
# data I/O
filename = 'dataset.txt'
file = open(filename, 'r')
data = file.read()
# use set() to count the vacab size
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print ('data has %d characters, %d unique.' % (data_size, vocab_size))
# dictionary to convert char to idx, idx to char
char_to_ix = { ch:i for i,ch in enumerate(chars) }
ix_to_char = { i:ch for i,ch in enumerate(chars) }
# hyperparameters
hidden_size = 50 # size of hidden layer of neurons
seq_length = 50 # number of steps to unroll the RNN for
learning_rate = 1e-1
Wxh = np.random.randn(hidden_size, vocab_size)*0.01 # input to hidden
Whh = np.random.randn(hidden_size, hidden_size)*0.01 # hidden to hidden
Why = np.random.randn(vocab_size, hidden_size)*0.01 # hidden to output
bh = np.zeros((hidden_size, 1)) # hidden bias
by = np.zeros((vocab_size, 1)) # output bias
def lossFun(inputs, targets, hprev):
xs, hs, ys, ps = {}, {}, {}, {}
## record each hidden state of
hs[-1] = np.copy(hprev)
loss = 0
# forward pass for each training data point
for t in range(len(inputs)):
xs[t] = np.zeros((vocab_size, 1)) # encode in 1-of-k representation
xs[t][inputs[t]] = 1
## hidden state, using previous hidden state hs[t-1]
hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh)
## unnormalized log probabilities for next chars
ys[t] = np.dot(Why, hs[t]) + by
## probabilities for next chars, softmax
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t]))
## softmax (cross-entropy loss)
loss += -np.log(ps[t][targets[t], 0])
# backward pass: compute gradients going backwards
dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
dbh, dby = np.zeros_like(bh), np.zeros_like(by)
dhnext = np.zeros_like(hs[0])
for t in reversed(range(len(inputs))):
## compute derivative of error w.r.t the output probabilites
## dE/dy[j] = y[j] - t[j]
dy = np.copy(ps[t])
dy[targets[t]] -= 1 # backprop into y
## output layer doesnot use activation function, so no need to compute the derivative of error with regard to the net input
## of output layer.
## then, we could directly compute the derivative of error with regard to the weight between hidden layer and output layer.
## dE/dy[j]*dy[j]/dWhy[j,k] = dE/dy[j] * h[k]
dWhy += np.dot(dy, hs[t].T)
dby += dy
## backprop into h
## derivative of error with regard to the output of hidden layer
## derivative of H, come from output layer y and also come from H(t+1), the next time H
dh = np.dot(Why.T, dy) + dhnext
## backprop through tanh nonlinearity
## derivative of error with regard to the input of hidden layer
## dtanh(x)/dx = 1 - tanh(x) * tanh(x)
dhraw = (1 - hs[t] * hs[t]) * dh
dbh += dhraw
## derivative of the error with regard to the weight between input layer and hidden layer
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t-1].T)
## derivative of the error with regard to H(t+1)
## or derivative of the error of H(t-1) with regard to H(t)
dhnext = np.dot(Whh.T, dhraw)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
## given a hidden RNN state, and a input char id, predict the coming n chars
def sample(h, seed_ix, n):
## a one-hot vector
x = np.zeros((vocab_size, 1))
x[seed_ix] = 1
ixes = []
for t in range(n):
## self.h = np.tanh(np.dot(self.W_hh, self.h) + np.dot(self.W_xh, x))
h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)
## y = np.dot(self.W_hy, self.h)
y = np.dot(Why, h) + by
## softmax
p = np.exp(y) / np.sum(np.exp(y))
## sample according to probability distribution
ix = np.random.choice(range(vocab_size), p=p.ravel())
## update input x
## use the new sampled result as last input, then predict next char again.
x = np.zeros((vocab_size, 1))
x[ix] = 1
ixes.append(ix)
return ixes
## iterator counter
n = 0
## data pointer
p = 0
mWxh, mWhh, mWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
mbh, mby = np.zeros_like(bh), np.zeros_like(by) # memory variables for Adagrad
smooth_loss = -np.log(1.0/vocab_size)*seq_length # loss at iteration 0
l=[]
sl=[]
## main loop
while True:
# prepare inputs (we're sweeping from left to right in steps seq_length long)
if p + seq_length + 1 >= len(data) or n == 0:
# reset RNN memory
## hprev is the hiddden state of RNN
hprev = np.zeros((hidden_size, 1))
# go from start of data
p = 0
inputs = [char_to_ix[ch] for ch in data[p : p + seq_length]]
targets = [char_to_ix[ch] for ch in data[p + 1 : p + seq_length + 1]]
# sample from the model now and then
if n % 1000 == 0:
sample_ix = sample(hprev, inputs[0], 5000)
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
print ('---- sample -----')
print ('----\n %s \n----' % (txt, ))
# forward seq_length characters through the net and fetch gradient
loss, dWxh, dWhh, dWhy, dbh, dby, hprev = lossFun(inputs, targets, hprev)
## author using Adagrad(a kind of gradient descent)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if n % 1000 == 0:
print ('iter %d, loss: %f' % (n, smooth_loss)) # print progress
l.append(n)
sl.append(smooth_loss)
for param, dparam, mem in zip([Wxh, Whh, Why, bh, by],
[dWxh, dWhh, dWhy, dbh, dby],
[mWxh, mWhh, mWhy, mbh, mby]):
mem += dparam * dparam
param += -learning_rate * dparam / np.sqrt(mem + 1e-8)
p += seq_length # move data pointer
n += 1 # iteration counter
# gradient checking
from random import uniform
def gradCheck(inputs, target, hprev):
global Wxh, Whh, Why, bh, by
num_checks, delta = 10, 1e-5
_, dWxh, dWhh, dWhy, dbh, dby, _ = lossFun(inputs, targets, hprev)
for param,dparam,name in zip([Wxh, Whh, Why, bh, by], [dWxh, dWhh, dWhy, dbh, dby], ['Wxh', 'Whh', 'Why', 'bh', 'by']):
s0 = dparam.shape
s1 = param.shape
if(s0 == s1):
print('Error dims dont match: %s and %s.' % (s0, s1))
print (name)
for i in range(num_checks):
ri = int(uniform(0,param.size))
old_val = param.flat[ri]
param.flat[ri] = old_val + delta
cg0, _, _, _, _, _, _ = lossFun(inputs, targets, hprev)
param.flat[ri] = old_val - delta
cg1, _, _, _, _, _, _ = lossFun(inputs, targets, hprev)
param.flat[ri] = old_val
grad_analytic = dparam.flat[ri]
grad_numerical = (cg0 - cg1) / ( 2 * delta )
rel_error = abs(grad_analytic - grad_numerical) / abs(grad_numerical + grad_analytic)
print ('%f, %f => %e ' % (grad_numerical, grad_analytic, rel_error))
gradCheck(inputs,targets,hprev)
plt.plot(loss,smoothloss,label='Epoch Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('EPOCH _LOSS PLOT')
plt.legend()
plt.show() | shesikiran03/science-fiction-writer | Task-1 .py | Task-1 .py | py | 7,092 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.random.randn",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randn",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.random"... |
8215468580 | import cv2
import numpy as np
from PIL import Image
# Load the image
img = cv2.imread('ParkingLot.jpg')
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Apply edge detection
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
# Apply Hough line detection
lines = cv2.HoughLines(edges, rho=1, theta=np.pi/180, threshold=100)
# Cluster the detected lines
line_clusters = []
for line in lines:
rho, theta = line[0]
if len(line_clusters) == 0:
line_clusters.append([(rho, theta)])
else:
found_cluster = False
for cluster in line_clusters:
if abs(cluster[0][0] - rho) < 50 and abs(cluster[0][1] - theta) < np.pi/36:
cluster.append((rho, theta))
found_cluster = True
break
if not found_cluster:
line_clusters.append([(rho, theta)])
# Find intersection points
intersections = []
for cluster in line_clusters:
for i in range(len(cluster)):
rho1, theta1 = cluster[i]
for j in range(i+1, len(cluster)):
rho2, theta2 = cluster[j]
A = np.array([[np.cos(theta1), np.sin(theta1)], [np.cos(theta2), np.sin(theta2)]])
b = np.array([rho1, rho2])
x, y = np.linalg.solve(A, b)
if x >= 0 and x < img.shape[1] and y >= 0 and y < img.shape[0]:
intersections.append((int(x), int(y)))
# Find parking space polygons
polygons = []
for i in range(len(intersections)):
for j in range(i+1, len(intersections)):
for k in range(j+1, len(intersections)):
for l in range(k+1, len(intersections)):
p1, p2, p3, p4 = intersections[i], intersections[j], intersections[k], intersections[l]
sides = [cv2.norm(np.array(p1) - np.array(p2)),
cv2.norm(np.array(p2) - np.array(p3)),
cv2.norm(np.array(p3) - np.array(p4)),
cv2.norm(np.array(p4) - np.array(p1))]
if all(side > 30 for side in sides):
area = cv2.contourArea(np.array([p1, p2, p3, p4]))
if area > 1000:
polygons.append([p1, p2, p3, p4])
# Draw the polygons on the image
for i, parking_space in enumerate(polygons):
pts = np.array(parking_space, np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(img, [pts], True, (0, 255, 0), thickness=2)
# Display the image with polygons drawn
cv2.imshow('Parking Lot with Polygons', img)
cv2.waitKey(0)
cv2.destroyAllWindows() | TongshenH/AuE8200_perception | hw3/test.py | test.py | py | 2,541 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "cv2.Canny",
"line_nu... |
40960795543 | import pytest
from collections import Counter
from ottoscript.base import OttoBase, OttoContext
from ottoscript.datatypes import (Number,
String,
Var,
Entity,
List,
Dict,
Target,
Area,
Input)
@pytest.mark.asyncio
async def test_numeric():
"""Verify we correctly parse a number"""
n = Number().parseString("15")[0]
output = await n.eval()
assert output == 15
@pytest.mark.asyncio
async def test_string():
"""Verify we correctly parse a string"""
n = String().parseString("'foo'")[0]
output = await n.eval()
assert output == 'foo'
@pytest.mark.asyncio
async def test_var_no_fetch():
"""Verify we correctly parse a var"""
n = Var().parseString("@foo")[0]
assert n.name == '@foo'
@pytest.mark.asyncio
async def test_var_with_attributes():
"""Verify we correctly parse a var with attributes"""
ctx = OttoContext()
d_string = "(one=1, two=2)"
e_string = "ship.crew"
ctx.update_vars({'@foo_entity': Entity().parseString(e_string)[0]})
ctx.update_vars({'@foo_dict': Dict().parseString(d_string)[0]})
OttoBase.set_context(ctx)
n = Var().parseString("@foo_entity:name")[0]
r = await n.eval()
assert r == "ship.crew"
n = Var().parseString("@foo_entity:brightness")[0]
r = await n.eval()
assert r == 1
r = n.fetch()
assert r.name == "ship.crew.brightness"
n = Var().parseString("@foo_entity:number")[0]
r = await n.eval()
assert r == 1
n = Var().parseString("@foo_dict:one")[0]
r = await n.eval()
assert r == 1
n = Var().parseString("@foo_dict:two")[0]
r = await n.eval()
assert r == 2
@pytest.mark.asyncio
async def test_entity():
"""Verify we correctly parse an entity"""
test_list = [('ship.crew', 'ship.crew', 'ship.crew'),
('ship.crew:uniform', 'ship.crew.uniform', 1)
]
for test in test_list:
n = Entity().parseString(test[0])[0]
assert n.name == test[1]
assert await n.eval() == test[2]
@pytest.mark.asyncio
async def test_list():
"""Verify we correctly parse a list"""
ctx = OttoContext()
ctx.update_vars({'@foo': 'foostring'})
OttoBase.set_context(ctx)
string = "'test1', 27, ship.crew, @foo"
expected = [String().parseString('"test1"')[0],
Number().parseString('27')[0],
Entity().parseString('ship.crew')[0],
Var().parseString('@foo')[0]]
n1 = List().parseString(string)[0]
assert Counter([type(x) for x in n1.contents]) \
== Counter([type(x) for x in expected])
n2 = List().parseString(f"({string})")[0]
assert Counter([type(x) for x in n2.contents]) \
== Counter([type(x) for x in expected])
@pytest.mark.asyncio
async def test_list_single():
"""Verify we correctly parse a number"""
string = "ship.crew"
expected = list
n1 = List().parseString(string)[0]
assert type(n1.contents) == expected
@pytest.mark.asyncio
async def test_dictionary():
"""Verify we correctly parse a dictionary"""
ctx = OttoContext()
ctx.update_vars({'@foo': 'foostring'})
OttoBase.set_context(ctx)
string = "(first = 1, second = 'foo', third = ship.crew, fourth = @foo)"
expected = {'first': 1,
'second': 'foo',
"third": 'ship.crew',
"fourth": 'foostring'}
n1 = Dict().parseString(string)[0]
result = await n1.eval()
assert result == expected
@pytest.mark.asyncio
async def test_target():
ctx = OttoContext()
area = Area().parseString('kitchen')[0]
ctx.update_vars({'@area': area})
arealist = List(Area()).parseString('kitchen, living_room')[0]
ctx.update_vars({'@arealist': arealist})
OttoBase.set_context(ctx)
tests = [('ship.crew, ship.phasers',
{'entity_id': ['ship.crew', 'ship.phasers'], 'area_id': []}
),
('AREA kitchen, living_room',
{'area_id': ['kitchen', 'living_room'], 'entity_id': []}
),
('AREA @area',
{'area_id': ['kitchen'], 'entity_id': []}
),
('AREA @arealist',
{'area_id': ['kitchen', 'living_room'], 'entity_id': []}
)
]
for test in tests:
n = Target().parseString(test[0])[0]
result = await n.eval()
assert result == test[1]
@pytest.mark.asyncio
async def test_input():
ctx = OttoContext()
ctx.update_vars({'@foostring': String().parseString("'foostring'")[0],
'@foonumber': Number().parseString("30.0")[0]})
OttoBase.set_context(ctx)
tests = [{"type": "text",
"string": "'foostring'",
"expected": "foostring"},
{"type": "text",
"string": "@foostring",
"expected": "foostring"},
{"type": "text",
"string": "foo.string",
"expected": "foo.string"},
{"type": "numeric",
"string": "15",
"expected": 15.0},
{"type": "numeric",
"string": "@foonumber",
"expected": 30.0},
{"type": "numeric",
"string": "foo.number:attr",
"expected": 1.0},
{"type": "any",
"string": "'foostring'",
"expected": "foostring"},
{"type": "any",
"string": "@foostring",
"expected": "foostring"},
{"type": "any",
"string": "foo.string",
"expected": "foo.string"},
{"type": "any",
"string": "15",
"expected": 15.0},
{"type": "any",
"string": "@foonumber",
"expected": 30.0}
]
for test in tests:
n = Input(test["type"]).parseString(test["string"])[0]
print(test["string"])
result = await n.eval()
assert result == test["expected"]
| qui3xote/otto | tests/test_datatypes/datatype_test.py | datatype_test.py | py | 6,300 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "ottoscript.datatypes.Number",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "ottoscript.datatypes.String",
"line_number": 28,
"usage_type": "call"
},
{
"api_name... |
27855486756 | import numpy as np
import torch
import os
from collections import OrderedDict
from torch.autograd import Variable
import itertools
import util.util as util
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
import sys
# TODO (1) remove CycleLoss?
# We have feat_loss_ArecA, which computes the feature loss between A and recreated A.
# It's kind of redundant with CycleLoss, which computes the pixelwise L1 loss between those two.
# But then again, we might want to keep both, so that we keep both similar
# in terms of "style" and "pixelwise resemblence".
# TODO use MSELoss of Pytorch?
def mse_loss(input, target):
return torch.sum((input - target)**2) / input.data.nelement()
def printnorm(self, input, output):
# input is a tuple of packed inputs
# output is a Variable. output.data is the Tensor we are interested
print('')
print('Inside ' + self.__class__.__name__ + ' forward')
# print('input: ', type(input))
# print('input[0]: ', type(input[0]))
# print('output: ', type(output))
# print('')
# print('input size:', input[0].size())
# print('output size:', output.data.size())
print('output norm:', output.data.norm())
def printgradnorm(self, grad_input, grad_output):
print('Inside ' + self.__class__.__name__ + ' backward')
#print('Inside class:' + self.__class__.__name__)
# print('grad_input: ', type(grad_input))
# print('grad_input[0]: ', type(grad_input[0]))
# print('grad_output: ', type(grad_output))
# print('grad_output[0]: ', type(grad_output[0]))
# print('')
# print('grad_input size:', grad_input[0].size())
# print('grad_output size:', grad_output[0].size())
print('grad_input norm:', grad_input[0].data.norm())
class CycleWGANModel(BaseModel):
def name(self):
return 'CycleWGANModel'
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.one = self.Tensor([1])
self.mone = self.one * -1
if opt.which_model_netD != 'dcgan':
self.ones = torch.ones(1, 19, 19) # FIXME compute size from input and architecture of netD
self.ones = self.ones.type(new_type=self.Tensor)
# init G related losses to 0 to print in the first few iterations
self.loss_G_A = Variable(self.Tensor([0]))
self.loss_G_B = Variable(self.Tensor([0]))
self.loss_idt_A = Variable(self.Tensor([0]))
self.loss_idt_B = Variable(self.Tensor([0]))
self.loss_cycle_A = Variable(self.Tensor([0]))
self.loss_cycle_B = Variable(self.Tensor([0]))
self.feat_loss_AfB = Variable(self.Tensor([0]))
self.feat_loss_BfA = Variable(self.Tensor([0]))
self.feat_loss_fArecB = Variable(self.Tensor([0]))
self.feat_loss_fBrecA = Variable(self.Tensor([0]))
self.feat_loss_ArecA = Variable(self.Tensor([0]))
self.feat_loss_BrecB = Variable(self.Tensor([0]))
self.feat_loss = Variable(self.Tensor([0]))
#self.disp_sumGA = self.loss_G_A.clone() + self.loss_cycle_A.clone()
#self.disp_sumGB = self.loss_G_B.clone() + self.loss_cycle_B.clone()
self.loss_sumGA = Variable(self.Tensor([0]))
self.loss_sumGB = Variable(self.Tensor([0]))
self.rec_A = None
self.rec_B = None
# ----------------------------------------------------------------
nb = opt.batchSize
size = opt.fineSize
self.input_A = self.Tensor(nb, opt.input_nc, size, size)
self.input_B = self.Tensor(nb, opt.output_nc, size, size)
# load/define networks
# The naming conversion is different from those used in the paper
# Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc,
opt.ngf, opt.which_model_netG, opt.norm, not opt.no_dropout, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc,
opt.ngf, opt.which_model_netG, opt.norm, not opt.no_dropout, self.gpu_ids)
if self.isTrain:
use_sigmoid = opt.no_lsgan
self.netD_A = networks.define_D(opt.output_nc, opt.ndf,
opt.which_model_netD,
opt.n_layers_D, opt.norm, use_sigmoid, self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf,
opt.which_model_netD,
opt.n_layers_D, opt.norm, use_sigmoid, self.gpu_ids)
if (self.opt.lambda_feat > 0):
self.netFeat = networks.define_feature_network(opt.which_model_feat, self.gpu_ids)
#self.netD_A.model[11].register_forward_hook(printnorm)
#self.netD_A.model[11].register_backward_hook(printgradnorm)
#self.netG_A.register_forward_hook(printnorm)
#self.netG_A.register_backward_hook(printgradnorm)
#self.netD_B.model[11].register_forward_hook(printnorm)
#self.netD_B.model[11].register_backward_hook(printgradnorm)
if not self.isTrain or opt.continue_train:
which_epoch = opt.which_epoch
self.load_network(self.netG_A, 'G_A', which_epoch)
self.load_network(self.netG_B, 'G_B', which_epoch)
if self.isTrain:
self.load_network(self.netD_A, 'D_A', which_epoch)
self.load_network(self.netD_B, 'D_B', which_epoch)
if self.isTrain:
self.old_lr = opt.lr
# create pools of fake images, if pool size > 0
if opt.pool_size > 0:
self.fake_A_pool = ImagePool(opt.pool_size)
self.fake_B_pool = ImagePool(opt.pool_size)
self.fake_A = None
self.fake_B = None
else:
self.fake_A_pool = None
self.fake_B_pool = None
# define loss functions
# Note: use WGAN loss for cases where we use D_A or D_B, otherwise use default loss functions
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
self.criterionFeat = mse_loss
self.criterionWGAN = networks.WGANLoss()
# initialize optimizers
if opt.adam:
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D_A = torch.optim.Adam(self.netD_A.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D_B = torch.optim.Adam(self.netD_B.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
else:
# in https://github.com/martinarjovsky/WassersteinGAN, only LR is provided to RMSProp
self.optimizer_G = torch.optim.RMSprop(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
lr=opt.lr)
self.optimizer_D_A = torch.optim.RMSprop(self.netD_A.parameters(), lr=opt.lr)
self.optimizer_D_B = torch.optim.RMSprop(self.netD_B.parameters(), lr=opt.lr)
# manage lambdas for perceptual loss
if (self.opt.lambda_feat > 0):
print("sets all lambda_feat* to lambda_feat")
self.opt.lambda_feat_AfB = self.opt.lambda_feat
self.opt.lambda_feat_BfA = self.opt.lambda_feat
self.opt.lambda_feat_fArecB = self.opt.lambda_feat
self.opt.lambda_feat_fBrecA = self.opt.lambda_feat
self.opt.lambda_feat_ArecA = self.opt.lambda_feat
self.opt.lambda_feat_BrecB = self.opt.lambda_feat
print('---------- Networks initialized -------------')
networks.print_network(self.netG_A)
networks.print_network(self.netG_B)
if self.isTrain:
networks.print_network(self.netD_A)
networks.print_network(self.netD_B)
print('-----------------------------------------------')
def set_input(self, input):
AtoB = self.opt.which_direction == 'AtoB'
input_A = input['A' if AtoB else 'B']
input_B = input['B' if AtoB else 'A']
self.input_A.resize_(input_A.size()).copy_(input_A)
self.input_B.resize_(input_B.size()).copy_(input_B)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
self.real_A = Variable(self.input_A)
self.real_B = Variable(self.input_B)
def test(self):
self.real_A = Variable(self.input_A, volatile=True)
self.fake_B = self.netG_A.forward(self.real_A)
self.rec_A = self.netG_B.forward(self.fake_B)
self.real_B = Variable(self.input_B, volatile=True)
self.fake_A = self.netG_B.forward(self.real_B)
self.rec_B = self.netG_A.forward(self.fake_A)
def freeze_discriminators(self, freeze=True):
for p in self.netD_A.parameters():
p.requires_grad = not freeze
for p in self.netD_B.parameters():
p.requires_grad = not freeze
def freeze_generators(self, freeze=True):
for p in self.netG_A.parameters():
p.requires_grad = not freeze
for p in self.netG_B.parameters():
p.requires_grad = not freeze
# get image paths
def get_image_paths(self):
return self.image_paths
def backward_D_basic(self, netD, real, fake):
# compute outputs for real and fake images
outD_real = netD(real)
outD_fake = netD(fake.detach())
#self.disp_outD_real = outD_real.mean()
#self.disp_outD_fake = outD_fake.mean()
wloss = self.criterionWGAN(fake=outD_fake, real=outD_real)
# import pdb; pdb.set_trace()
if self.opt.which_model_netD == 'dcgan':
wloss.backward()
else:
wloss.backward(self.ones)
return outD_real.mean(), outD_fake.mean()
def backward_D_A(self):
#if self.fake_B_pool is None or self.fake_B is None:
self.fake_B = self.netG_A(self.real_A.detach()) # generate a fake image
self.loss_D_A_real, self.loss_D_A_fake = self.backward_D_basic(self.netD_A, self.real_B, self.fake_B)
#self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, self.fake_B)
#else:
# fake_B = self.fake_B_pool.query(self.fake_B)
# self.loss_D_A_real, self.loss_D_A_fake = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
#self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
def backward_D_B(self):
#if self.fake_A_pool is None or self.fake_A is None:
self.fake_A = self.netG_B(self.real_B.detach())
self.loss_D_B_real, self.loss_D_B_fake = self.backward_D_basic(self.netD_B, self.real_A, self.fake_A)
#self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, self.fake_A)
#else:
# fake_A = self.fake_A_pool.query(self.fake_A)
# self.loss_D_B_real, self.loss_D_B_fake = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
#self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
def backward_G(self):
# Identity loss
if self.opt.identity > 0:
# G_A should be identity if real_B is fed.
self.idt_A = self.netG_A(self.real_B)
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * self.opt.lambda_B * self.opt.identity
#self.loss_idt_A = self.criterionWGAN(fake=self.idt_A, real=self.real_B) * lambda_B * lambda_idt
#self.loss_idt_A = self.criterionWGAN(fake=self.idt_A, real=self.real_B) * lambda_idt
# G_B should be identity if real_A is fed.
self.idt_B = self.netG_B(self.real_A)
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * self.opt.lambda_A * self.opt.identity
#self.loss_idt_B = self.criterionWGAN(fake=self.idt_B, real=self.real_A) * lambda_A * lambda_idt
#self.loss_idt_B = self.criterionWGAN(fake=self.idt_B, real=self.real_A) * lambda_idt
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
# Freeze discriminators so that they are NOT updated
self.freeze_discriminators(True)
# WGAN loss
# D_A(G_A(A))
self.fake_B = self.netG_A(self.real_A)
outD_A_fake = self.netD_A(self.fake_B)
self.loss_G_A = self.criterionWGAN(real=outD_A_fake) # we give as it was a true sample
#self.loss_G_A.backward(retain_graph=True)
# FIXME: Api docs says not to use retain_graph and this can be done efficiently in other ways
# D_B(G_B(B))
self.fake_A = self.netG_B(self.real_B)
outD_B_fake = self.netD_B(self.fake_A)
self.loss_G_B = self.criterionWGAN(real=outD_B_fake)
#self.loss_G_B.backward(retain_graph=True)
# Forward cycle loss
if self.opt.lambda_A != 0:
self.rec_A = self.netG_B(self.fake_B)
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * self.opt.lambda_A
#self.loss_cycle_A = self.criterionWGAN(fake=self.netD_B(self.rec_A), real=self.netD_B(self.real_A)) * lambda_A
else:
self.loss_cycle_A = 0
# Backward cycle loss
if self.opt.lambda_B != 0:
self.rec_B = self.netG_A(self.fake_A)
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * self.opt.lambda_B
#self.loss_cycle_B = self.criterionWGAN(fake=self.netD_A(self.rec_B), real=self.netD_A(self.real_B)) * lambda_B
else:
self.loss_cycle_B = 0
self.loss_sumGA = self.loss_G_A + self.loss_cycle_A + self.loss_idt_A
self.loss_sumGB = self.loss_G_B + self.loss_cycle_B + self.loss_idt_B
#self.disp_sumGA = self.loss_G_A.clone() + self.loss_cycle_A.clone()
#self.disp_sumGB = self.loss_G_B.clone() + self.loss_cycle_B.clone()
# Perceptual losses:
if (self.opt.lambda_feat_AfB > 0 and self.opt.lambda_feat_BfA > 0):
self.feat_loss_AfB = self.criterionFeat(self.netFeat(self.real_A), self.netFeat(self.fake_B)) * self.opt.lambda_feat_AfB
self.feat_loss_BfA = self.criterionFeat(self.netFeat(self.real_B), self.netFeat(self.fake_A)) * self.opt.lambda_feat_BfA
# self.feat_loss_AfB = self.criterionWGAN(real=self.netFeat(self.real_A), fake=self.netFeat(self.fake_B)) * lambda_feat_AfB
# self.feat_loss_BfA = self.criterionWGAN(real=self.netFeat(self.real_B), fake=self.netFeat(self.fake_A)) * lambda_feat_BfA
else:
self.feat_loss_AfB = 0
self.feat_loss_BfA = 0
if (self.opt.lambda_feat_fArecB > 0 and self.opt.lambda_feat_fBrecA > 0):
self.feat_loss_fArecB = self.criterionFeat(self.netFeat(self.fake_A), self.netFeat(self.rec_B)) * self.opt.lambda_feat_fArecB
self.feat_loss_fBrecA = self.criterionFeat(self.netFeat(self.fake_B), self.netFeat(self.rec_A)) * self.opt.lambda_feat_fBrecA
# self.feat_loss_fArecB = self.criterionWGAN(self.netFeat(self.fake_A), self.netFeat(self.rec_B)) * lambda_feat_fArecB
# self.feat_loss_fBrecA = self.criterionWGAN(self.netFeat(self.fake_B), self.netFeat(self.rec_A)) * lambda_feat_fBrecA
else:
self.feat_loss_fArecB = 0
self.feat_loss_fBrecA = 0
if (self.opt.lambda_feat_ArecA > 0 and self.opt.lambda_feat_BrecB > 0):
self.feat_loss_ArecA = self.criterionFeat(self.netFeat(self.real_A), self.netFeat(self.rec_A)) * self.opt.lambda_feat_ArecA
self.feat_loss_BrecB = self.criterionFeat(self.netFeat(self.real_B), self.netFeat(self.rec_B)) * self.opt.lambda_feat_BrecB
# self.feat_loss_ArecA = self.criterionWGAN(real=self.netFeat(self.real_A), fake=self.netFeat(self.rec_A)) * lambda_feat_ArecA
# self.feat_loss_BrecB = self.criterionWGAN(real=self.netFeat(self.real_B), fake=self.netFeat(self.rec_B)) * lambda_feat_BrecB
else:
self.feat_loss_ArecA = 0
self.feat_loss_BrecB = 0
# first sum the feat losses
self.feat_loss = self.feat_loss_AfB + self.feat_loss_BfA + self.feat_loss_fArecB \
+ self.feat_loss_fBrecA + self.feat_loss_ArecA + self.feat_loss_BrecB
haveFeatLoss = not (type(self.feat_loss) is int)
# then backprop OTHER losses, with or without retaining the graph
if self.opt.which_model_netD == 'dcgan':
self.loss_sumGA.backward(retain_graph=haveFeatLoss)
self.loss_sumGB.backward(retain_graph=haveFeatLoss)
else:
self.loss_sumGA.backward(self.ones, retain_graph=haveFeatLoss)
self.loss_sumGB.backward(self.ones, retain_graph=haveFeatLoss)
if haveFeatLoss:
self.feat_loss.backward()
# Unfreeze them for the next iteration of optimize_parameters_D()
self.freeze_discriminators(False)
def optimize_parameters_D(self):
# call self.forward outside!
# D_A
self.optimizer_D_A.zero_grad()
self.backward_D_A() # generates the first fake_B for the iteration
self.optimizer_D_A.step()
# D_B
self.optimizer_D_B.zero_grad()
self.backward_D_B() # generates fake_A for the iteration
self.optimizer_D_B.step()
# clip weights for both discriminators
for p in self.netD_A.parameters():
p.data.clamp_(self.opt.clip_lower, self.opt.clip_upper)
for p in self.netD_B.parameters():
p.data.clamp_(self.opt.clip_lower, self.opt.clip_upper)
def optimize_parameters_G(self):
# call self.forward outside!
# G_A and G_B
self.optimizer_G.zero_grad()
self.backward_G()
# print("GRADS A : First conv (mean: %.8f) Last Deconv: (mean: %.8f)" % (self.netG_A.model.model[0].weight.grad.mean(), self.netG_A.model.model[3].weight.grad.mean()))
# print("GRADS B : First conv (mean: %.8f) Last Deconv: (mean: %.8f)" % (self.netG_B.model.model[0].weight.grad.mean(), self.netG_B.model.model[3].weight.grad.mean()))
self.optimizer_G.step()
# print("WEIGHTS A: First conv (mean: %.8f) Last Deconv: (mean: %.8f)" % (self.netG_A.model.model[0].weight.mean(), self.netG_A.model.model[3].weight.mean()))
# print("WEIGHTS B: First conv (mean: %.8f) Last Deconv: (mean: %.8f)" % (self.netG_B.model.model[0].weight.mean(), self.netG_B.model.model[3].weight.mean()))
#print("mean(G_A_LastConvLayer): %.9f mean(G_B_LastConvLayer): %.9f" % (self.netG_A.model[26].weight.mean(), self.netG_B.model[26].weight.mean()))
def get_current_errors(self):
#D_A = self.loss_D_A.data[0]
#D_B = self.loss_D_B.data[0]
G_A = self.loss_G_A.data[0]
G_B = self.loss_G_B.data[0]
if self.opt.which_model_netD != 'dcgan' and type(G_A) == self.Tensor:
G_A = G_A.mean()
G_B = G_B.mean()
D_A_real, D_A_fake = self.loss_D_A_real.data[0], self.loss_D_A_fake.data[0]
D_B_real, D_B_fake = self.loss_D_B_real.data[0], self.loss_D_B_fake.data[0]
#sumGA = self.loss_sumGA.data[0]
#sumGB = self.loss_sumGB.data[0]
#currentErrors = OrderedDict([('D_A', D_A), ('D_B', D_B), ('sumGA', sumGA), ('sumGB', sumGB)])
currentErrors = OrderedDict([('D_A_real', D_A_real), ('D_A_fake', D_A_fake), ('D_B_real', D_B_real), ('D_B_fake', D_B_fake),
('G_A', G_A), ('G_B', G_B)])
if self.loss_cycle_A is not 0:
Cyc_A = self.loss_cycle_A.data[0]
# this is relevant only we use WGAN for CycleLoss
if self.opt.which_model_netD != 'dcgan' and type(Cyc_A) == self.Tensor:
Cyc_A = Cyc_A.mean()
currentErrors['Cyc_A'] = Cyc_A
if self.loss_cycle_B is not 0:
Cyc_B = self.loss_cycle_B.data[0]
# this is relevant only we use WGAN for CycleLoss
if self.opt.which_model_netD != 'dcgan' and type(Cyc_B) == self.Tensor:
Cyc_B = Cyc_B.mean()
currentErrors['Cyc_B'] = Cyc_B
if self.opt.identity > 0.0:
idt_A = self.loss_idt_A.data[0]
idt_B = self.loss_idt_B.data[0]
currentErrors['idt_A'] = idt_A
currentErrors['idt_B'] = idt_B
# feat_AfB = self.feat_loss_AfB.data[0]
# feat_BfA = self.feat_loss_BfA.data[0]
#feat_fArecB = self.feat_loss_fArecB.data[0]
#feat_fBrecA = self.feat_loss_fBrecA.data[0]
#feat_ArecA = self.feat_loss_ArecA.data[0]
#feat_BrecB = self.feat_loss_BrecB.data[0]
featL = self.feat_loss.data[0]
if featL > 0.0:
currentErrors['featL'] = featL
return currentErrors
def get_current_visuals(self):
real_A = util.tensor2im(self.real_A.data)
fake_B = util.tensor2im(self.fake_B.data)
real_B = util.tensor2im(self.real_B.data)
fake_A = util.tensor2im(self.fake_A.data)
currentVisuals = OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B), ('fake_A', fake_A)])
if self.rec_A is not None:
rec_A = util.tensor2im(self.rec_A.data)
currentVisuals['rec_A'] = rec_A
if self.rec_B is not None:
rec_B = util.tensor2im(self.rec_B.data)
currentVisuals['rec_B'] = rec_B
if self.opt.identity > 0.0:
idt_A = util.tensor2im(self.idt_A.data)
idt_B = util.tensor2im(self.idt_B.data)
currentVisuals['idt_B'] = idt_B
currentVisuals['idt_A'] = idt_A
return currentVisuals
def save(self, label):
self.save_network(self.netG_A, 'G_A', label, self.gpu_ids)
self.save_network(self.netD_A, 'D_A', label, self.gpu_ids)
self.save_network(self.netG_B, 'G_B', label, self.gpu_ids)
self.save_network(self.netD_B, 'D_B', label, self.gpu_ids)
def update_learning_rate(self):
lrd = self.opt.lr / self.opt.nepoch_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_D_A.param_groups:
param_group['lr'] = lr
for param_group in self.optimizer_D_B.param_groups:
param_group['lr'] = lr
for param_group in self.optimizer_G.param_groups:
param_group['lr'] = lr
print('update learning rate: %f -> %f' % (self.old_lr, lr))
self.old_lr = lr | amandaullvin/CycleGAN_destreak_MRI | models/cycle_wgan_model.py | cycle_wgan_model.py | py | 23,263 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.sum",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "base_model.BaseModel",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "base_model.BaseModel.initialize",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "base_m... |
3492737799 | """Provides functional layers for the model"""
import numpy as np
import torch
import torch.nn.functional as F
from common_types import Tensor, Union, _float, _int
from torch.types import Device, _size
_opt_arg = Union[_int, _size]
_opt_tensor = Union[Tensor, None]
def conv2d(x: Tensor,
weight: Tensor,
bias: _opt_tensor = None,
device: Device = 'cpu',
stride: _opt_arg = 1,
padding: _opt_arg = 0,
dilation: _opt_arg = 1,
groups: _int = 1) -> Tensor:
return F.conv2d(x, weight.to(device), bias.to(device), stride, padding,
dilation, groups)
def batchnorm(x: Tensor,
weight: Tensor = None,
bias: _opt_tensor = None,
device: Device = 'cpu',
running_mean: _opt_tensor = None,
running_var: _opt_tensor = None,
training: bool = True,
eps: _float = 1e-5,
momentum: _float = 0.1) -> Tensor:
''' momentum = 1 restricts stats to the current mini-batch '''
# This hack only works when momentum is 1 and avoids needing to track
# running stats by substuting dummy variables
running_mean = torch.zeros(np.prod(np.array(x.data.size()[1]))).to(device)
running_var = torch.ones(np.prod(np.array(x.data.size()[1]))).to(device)
return F.batch_norm(x, running_mean, running_var, weight, bias, training,
momentum, eps)
def leaky_relu(x: Tensor, negative_slope: _float = 0.01) -> Tensor:
return F.leaky_relu(x, negative_slope, True)
def pixel_shuffle(x: Tensor, scale: _int):
return F.pixel_shuffle(x, scale)
| RashedDoha/meta-drn-pytorch | model/layers.py | layers.py | py | 1,647 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "common_types.Union",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "common_types._int",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.types._size",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "common_types.Unio... |
11307160967 | from django.conf.urls import url
from .views import Discount_view, Category_view, Product_view, Product_detail_view, Category_detail_view
#These two added for viewsets
# from django.conf.urls import include
from rest_framework.routers import DefaultRouter
from django.urls import path, include
from django.contrib import admin
from . import views
router = DefaultRouter()
urlpatterns = [
path('api/discounts', Discount_view, name='discount'),
path('api/categories', Category_view, name='category api'),
path('api/categories/<int:category_id>', Category_detail_view, name='category detail'),
path('api/products', Product_view, name='product'),
path('api/products/<int:product_id>', Product_detail_view, name='product detail api'),
path('', views.homepage, name='home'),
path('categories', views.categories, name='category'),
path('categories/<int:category_id>/products', views.products, name='products'),
path('categories/<int:category_id>/products/<int:product_id>', views.product_detail, name='product detail'),
path('products/register', views.product_regi, name="product register"),
path('cart/', views.cart, name='cart'),
path('about/', views.about, name='about'),
path('support/', views.support, name='support'),
path('signin/', views.signin, name='signin'),
path('register/', views.register, name='register'),
path('signout/', views.signout, name='signout'),
path('account/', views.account, name='account'),
path('payment/', views.payment, name='payment'),
path('shipping/', views.shipping, name='shipping'),
path('application/', views.application, name='application'),
path('order/', views.order, name='order')
]
| wjbarng/INFO441-Wholesale | wholesale/urls.py | urls.py | py | 1,708 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "views.Discount_view",
"line_number": 13,
"usage_type": "argument"
},
{
"api... |
3345686330 | import argparse
import subprocess
import os.path
import math
def dispatch(out_file, err_file, cmd, go, num_cores=1, num_nodes=1, max_hours=1, memory_in_gb=16):
"""
Populates 'runscript.sh' file to run 'dqn_original.py' file
on cluster's GPU partition for 'max_hours' hours with 1 node, 1 core, and 32GB memory
"""
with open('runscript.sh', 'w+') as f:
f.write(
f"""#!/bin/bash
#SBATCH -n {num_cores} # Number of cores
#SBATCH -N {num_nodes} # Ensure that all cores are on one machine
#SBATCH -t {format_time(max_hours)} # Runtime in D-HH:MM, minimum of 10 minutes
#SBATCH -p gpu # Partition to submit to
#SBATCH --gres=gpu # number of GPUs (here 1; see also --gres=gpu:n)
#SBATCH --mem={gb_to_mb(memory_in_gb)} # Memory pool for all cores (see also --mem-per-cpu)
#SBATCH -o {out_file} # File to which STDOUT will be written, %j inserts jobid
#SBATCH -e {err_file} # File to which STDERR will be written, %j inserts jobid
module load Anaconda3/5.0.1-fasrc01 # Load modules
module load cudnn/7.6.5.32_cuda10.1-fasrc01
module load cuda/10.0.130-fasrc01
source activate gpt2 # Switch to correct conda environment
{cmd} # Run code
"""
)
if go:
subprocess.call(['sbatch', 'runscript.sh'])
def format_time(total_hours):
'''Converts hours to D-HH:MM format.'''
days = total_hours // 24
frac_hour, hours = math.modf(total_hours % 24)
minutes = math.ceil(frac_hour * 60.0)
if minutes == 60:
hours += 1
minutes = 0
if hours == 24:
hours = 0
days += 1
return f'{int(days)}-{int(hours):02d}:{int(minutes):02d}'
def gb_to_mb(gb):
'''Converts gb to mb'''
mb = int(gb * 1000)
return mb
def print_red(string):
print('\033[1;31;40m' + string + '\033[0;37;40m')
def print_yellow(string):
print('\033[1;33;40m' + string + '\033[0;37;40m')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('run_name', type=str,
help="""
(str) Base name for output and error files to which SLURM writes results,
and ID for storing checkpoints and samples.
""")
parser.add_argument('dataset', type=str,
help='(str) Path to dataset for training')
parser.add_argument('restore_from', type=str,
help='(str) Either "latest", "fresh", or a path to a checkpoint file')
parser.add_argument('--sample_every', default=100, type=int,
help='(int) How often to generate samples (every N steps)')
parser.add_argument('--save_every', default=1000, type=int,
help='(int) How often to create model checkpoint (every N steps)')
parser.add_argument('--go', action='store_true',
help='(flag) Submits jobs to cluster if present. Default disabled')
parser.add_argument('--num_cores', default=1, type=int,
help='(int) Number of cores to run on')
parser.add_argument('--num_nodes', default=1, type=int,
help='(int) Number of nodes to run on')
parser.add_argument('--hours', default=1., type=float,
help='(float) Wall clock time to request on SLURM')
parser.add_argument('--gb_memory', default=16., type=float,
help='(float) Memory (in GB) to request')
args = parser.parse_args()
basename = args.run_name
out_file = basename + '.txt'
err_file = basename + '.err.txt'
cmd = f'python3 train.py --dataset {args.dataset} --restore_from {args.restore_from} --run_name {args.run_name}\
--sample_every {args.sample_every} --save_every {args.save_every}'
# If file for a configuration exists, skip over that configuration
if os.path.exists(out_file) or os.path.exists(err_file):
print_red(f'{basename} (already exists; skipping)')
else:
# Otherwise, generate and run script on cluster
# Populates 'runscript.sh' file to run specified file
# on cluster's GPU partition with specified number of nodes, cores, and memory
# Dispatches 'runscript.sh' to SLURM if '--go' flag was specified in CLI
print(basename)
dispatch(out_file=out_file,
err_file=err_file,
cmd=cmd,
go=args.go,
num_cores=args.num_cores,
num_nodes=args.num_nodes,
max_hours=args.hours,
memory_in_gb=args.gb_memory)
if not args.go:
print_yellow('''
*** This was just a test! No jobs were actually dispatched.
*** If the output looks correct, re-run with the "--go" argument.''')
print(flush=True)
if __name__ == '__main__':
main()
| osiajod/cs205_project | singlenode_parallel/src/cluster_serialtrain.py | cluster_serialtrain.py | py | 4,870 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "subprocess.call",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "math.modf",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"li... |
8765175527 | import bpy
import csv
import os
from bpy import context
import builtins as __builtin__
def console_print(*args, **kwargs):
for a in context.screen.areas:
if a.type == 'CONSOLE':
c = {}
c['area'] = a
c['space_data'] = a.spaces.active
c['region'] = a.regions[-1]
c['window'] = context.window
c['screen'] = context.screen
s = " ".join([str(arg) for arg in args])
for line in s.split("\n"):
bpy.ops.console.scrollback_append(c, text=line)
def print(*args, **kwargs):
"""Console print() function."""
console_print(*args, **kwargs) # to py consoles
__builtin__.print(*args, **kwargs) # to system console
def importLogos():
for i in range(1,22):
image_name = "Team{}.Logo1".format(i)
file_name = image_name + ".png"
bpy.ops.import_image.to_plane(files=[{"name":file_name, "name":file_name}], directory="Users/hyungsoobae/Desktop/K-League/image/")
models[image_name].location= (0,position1[i-1],1)
bpy.ops.object.editmode_toggle()
bpy.ops.transform.translate(value=(0,0,0.5), constraint_axis=(False,False,True), constraint_orientation='GLOBAL', mirror=False, proportional='DISABLED', proportional_edit_falloff='SMOOTH', proportional_size=1)
bpy.ops.object.editmode_toggle()
models = bpy.data.objects
scn = bpy.context.scene
data_file_path = "/Users/hyungsoobae/Desktop/K-League/data"
position1 = [0.0, 1.1, 2.2, 3.3000000000000003, 4.4, 5.5, 6.6000000000000005, 7.700000000000001, 8.8, 9.9, 11.0, 12.100000000000001, 13.200000000000001, 14.3, 15.400000000000002, 16.5, 17.6, 18.700000000000003, 19.8, 20.900000000000002, 22.0, 23.1]
tposition1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]
outlier = 27
def reset():
scn.frame_set(0)
for model in models:
if 'Year' in model.name:
continue
if 'Team' in model.name and 'Name' in model.name:
index = []
for i,action in enumerate(model.animation_data.action.fcurves):
if action.data_path == 'location':
index.append(i)
for i in range(len(index)):
print (index)
model.animation_data.action.fcurves.remove(model.animation_data.action.fcurves[index[i]])
index = list(map(lambda x: x-1, index))
print (index)
continue
model.animation_data_clear()
model.data.animation_data_clear()
for i in range(1,23):
#print (models['Team{}'.format(i)].location)
models['Team{}'.format(i)].location[1] = position1[i-1]
models['Team{}'.format(i)].scale[2] = 0
models['Team{}.Point'.format(i)].location[0] = 0.4
models['Team{}.Point'.format(i)].location[1] = position1[i-1]
models['Team{}.Point'.format(i)].location[2] = 0.4
models['Team{}.Point'.format(i)].data.text_counter_props.ifAnimated=True
models['Team{}.Point'.format(i)].data.text_counter_props.counter = 0
for j in range(1,6):
try:
models['Team{}.Name{}'.format(i,j)].location[1] = position1[i-1]
models['Team{}.Name{}'.format(i,j)].location[2] = 2.1
models['Team{}.Name{}'.format(i,j)].rotation_euler[1] = -0.872665
models['Team{}.Name{}'.format(i,j)].data.size = 0.3
except:
pass
for j in range(1,8):
try:
models['Team{}.Logo{}'.format(i, j)].location[1] = position1[i-1]
models['Team{}.Logo{}'.format(i, j)].location[2] = 1.0
except:
pass
def get_current_teams(frame=0):
result = []
scn.frame_set(frame)
for model in models:
if 'Team' in model.name and '.' not in model.name:
if (model.location[1]) < outlier:
result.append(model)
result.sort(key=lambda x : x.location[1])
result = list(map(lambda x: x.name, result))
return result
def setNameLocation(ffrom, frame, teamName, value):
for i in range(1,6):
try:
scn.frame_set(ffrom)
models['{}.Name{}'.format(teamName,i)].keyframe_insert(data_path='location')
scn.frame_set(ffrom+frame)
models['{}.Name{}'.format(teamName, i)].location[1] = value
models['{}.Name{}'.format(teamName, i)].location[2] = models[teamName].scale[2] * 2 + 2.1
models['{}.Name{}'.format(teamName, i)].keyframe_insert(data_path='location')
except:
pass
def setLogoLocation(ffrom, frame, teamName, value):
for i in range(1,8):
try:
scn.frame_set(ffrom)
models['{}.Logo{}'.format(teamName,i)].keyframe_insert(data_path='location')
scn.frame_set(ffrom+frame)
models['{}.Logo{}'.format(teamName, i)].location[1] = value
models['{}.Logo{}'.format(teamName, i)].location[2] = models[teamName].scale[2] * 2 + 1.0
models['{}.Logo{}'.format(teamName, i)].keyframe_insert(data_path='location')
except:
pass
def setPointLocation(ffrom, frame, teamName, value, point=None):
scn.frame_set(ffrom)
models['{}.Point'.format(teamName)].keyframe_insert(data_path='location')
if point is not None:
models['{}.Point'.format(teamName)].data.keyframe_insert(data_path='text_counter_props.counter')
scn.frame_set(ffrom+frame)
models['{}.Point'.format(teamName)].location[1] = value
if models[teamName].scale[2] > 0:
models['{}.Point'.format(teamName)].location[2] = models[teamName].scale[2] * 2 + 0.4
else:
models['{}.Point'.format(teamName)].location[2] = 0.4
if point is not None:
models['{}.Point'.format(teamName)].data.text_counter_props.counter = point
models['{}.Point'.format(teamName)].data.keyframe_insert(data_path='text_counter_props.counter')
models['{}.Point'.format(teamName)].keyframe_insert(data_path='location')
def transition(year, ffrom, frame):
new_teams = []
new_teams_set = set()
fp = data_file_path + "/" + year + ".csv"
with open(fp, 'r', encoding="utf-8") as csvfile:
rdr = csv.reader(csvfile)
for i,v in enumerate(rdr):
new_teams.append((i,v[2],int(v[1])))
new_teams_set.add(v[2])
current_teams = get_current_teams(ffrom)
print (current_teams)
#Remove all non participitating teams from the table
np_teams = set(current_teams) - new_teams_set
for team in np_teams:
#print (team)
scn.frame_set(ffrom)
models[team].keyframe_insert(data_path='location')
models[team].keyframe_insert(data_path='scale')
scn.frame_set(ffrom+frame)
models[team].location[1] = outlier
models[team].scale[2] = 0
models[team].keyframe_insert(data_path='location')
models[team].keyframe_insert(data_path='scale')
setNameLocation(ffrom, frame, team, outlier)
setLogoLocation(ffrom, frame, team, outlier)
setPointLocation(ffrom, frame, team, outlier, 0)
#Move the old teams in order
current_teams = list(filter(lambda x: x not in np_teams, current_teams))
current_number = len(current_teams)
for i,team in enumerate(current_teams):
scn.frame_set(ffrom)
models[team].keyframe_insert(data_path='location')
scn.frame_set(ffrom+frame)
models[team].location[1] = position1[i]
models[team].keyframe_insert(data_path='location')
setNameLocation(ffrom, frame, team, position1[i])
setLogoLocation(ffrom, frame, team, position1[i])
setPointLocation(ffrom, frame, team, position1[i])
#Add new teams
new_teams_set = new_teams_set - set(current_teams)
for i,team in enumerate(new_teams_set):
scn.frame_set(ffrom)
models[team].keyframe_insert(data_path='location')
scn.frame_set(ffrom+frame)
models[team].location[1] = position1[current_number+i]
models[team].keyframe_insert(data_path='location')
setNameLocation(ffrom, frame, team, position1[current_number+i])
setLogoLocation(ffrom, frame, team, position1[current_number+i])
setPointLocation(ffrom, frame, team, position1[current_number+i])
def league_type_1(year, ffrom, frame, scale=10):
new_teams = []
new_teams_set = set()
fp = data_file_path + "/" + year + ".csv"
with open(fp, 'r', encoding="utf-8") as csvfile:
rdr = csv.reader(csvfile)
for i,v in enumerate(rdr):
new_teams.append((i,v[2],int(v[1])))
new_teams_set.add(v[2])
#print (new_teams)
for team in new_teams:
scn.frame_set(ffrom)
models[team[1]].keyframe_insert(data_path='location')
models[team[1]].keyframe_insert(data_path='scale')
scn.frame_set(ffrom+frame)
models[team[1]].location[1] = position1[team[0]]
models[team[1]].scale[2] = team[2] / scale
models[team[1]].keyframe_insert(data_path='location')
models[team[1]].keyframe_insert(data_path='scale')
setNameLocation(ffrom, frame, team[1], position1[team[0]])
setLogoLocation(ffrom, frame, team[1], position1[team[0]])
setPointLocation(ffrom, frame, team[1], position1[team[0]], team[2])
def league_type_3(year, ffrom, frame, scale):
league_type_1(year,ffrom,frame, scale)
def league_type_4(year, ffrom, frame, scale):
league_type_1(year,ffrom,frame, scale)
def post_season(year, ffrom, frame, scale):
league_type_1(year+'p',ffrom,frame, scale)
def league_type_5(year, ffrom, frame, scale):
league_type_1(year,ffrom,frame, scale)
def split(year, ffrom, frame, gap=2, scale=10):
new_teams = []
new_teams_set = set()
#GROUP A
fp = data_file_path + "/" + year + "a.csv"
with open(fp, 'r', encoding="utf-8") as csvfile:
rdr = csv.reader(csvfile)
for i,v in enumerate(rdr):
new_teams.append((i,v[2],int(v[1])))
new_teams_set.add(v[2])
length = len(new_teams)
for team in new_teams:
scn.frame_set(ffrom)
models[team[1]].keyframe_insert(data_path='location')
models[team[1]].keyframe_insert(data_path='scale')
scn.frame_set(ffrom+frame)
models[team[1]].location[1] = position1[team[0]]
models[team[1]].scale[2] = team[2] / scale
models[team[1]].keyframe_insert(data_path='location')
models[team[1]].keyframe_insert(data_path='scale')
setNameLocation(ffrom, frame, team[1], position1[team[0]])
setLogoLocation(ffrom, frame, team[1], position1[team[0]])
setPointLocation(ffrom, frame, team[1], position1[team[0]], team[2])
#GROUP B
new_teams = []
new_teams_set = set()
fp = data_file_path + "/" + year + "b.csv"
with open(fp, 'r', encoding="utf-8") as csvfile:
rdr = csv.reader(csvfile)
for i,v in enumerate(rdr):
new_teams.append((i,v[2],int(v[1])))
new_teams_set.add(v[2])
length = len(new_teams)
for team in new_teams:
scn.frame_set(ffrom)
models[team[1]].keyframe_insert(data_path='location')
models[team[1]].keyframe_insert(data_path='scale')
scn.frame_set(ffrom+frame)
models[team[1]].location[1] = position1[length-1+gap+team[0]]
models[team[1]].scale[2] = team[2] / scale
models[team[1]].keyframe_insert(data_path='location')
models[team[1]].keyframe_insert(data_path='scale')
setNameLocation(ffrom, frame, team[1],position1[length-1+gap+team[0]])
setLogoLocation(ffrom, frame, team[1],position1[length-1+gap+team[0]])
setPointLocation(ffrom, frame, team[1],position1[length-1+gap+team[0]], team[2])
'''
reset()
transition("1983", 0, 5)
league_type_1("1983", 5, 50, 40)
transition("1984a", 105, 15)
league_type_1("1984a", 120, 50, 40)
league_type_1("1984b", 195, 50, 40)
league_type_1("1984c", 270, 25, 40)
transition("1985", 345, 15)
league_type_1("1985", 360, 50, 40)
transition("1986a", 460, 15)
league_type_1("1986a", 475, 50, 40)
league_type_1("1986b", 550, 50, 40)
league_type_1("1986c", 625, 25, 40)
transition("1987", 700, 15)
league_type_1("1987", 715, 50, 40)
transition("1988", 815, 15)
league_type_1("1988", 830, 50, 40)
transition("1989", 930, 15)
league_type_1("1989", 945, 50, 40)
transition("1990", 1045, 15)
league_type_1("1990", 1060, 50, 40)
transition("1991", 1160, 15)
league_type_1("1991", 1175, 50, 40)
transition("1992", 1275, 15)
league_type_1("1992", 1290, 50, 40)
transition("1993", 1390, 15)
league_type_1("1993", 1405, 50, 40)
transition("1994", 1505, 15)
league_type_1("1994", 1520, 50, 40)
transition("1995a", 1620, 15)
league_type_1("1995a", 1635, 50, 40)
league_type_1("1995b", 1710, 50, 40)
league_type_1("1995c", 1785, 25, 40)
transition("1996a", 1860, 15)
league_type_1("1996a", 1875, 50, 40)
league_type_1("1996b", 1950, 50, 40)
league_type_1("1996c", 2025, 25, 40)
transition("1997", 2100, 15)
league_type_1("1997", 2115, 50, 40)
transition("1998", 2215, 15)
league_type_1("1998", 2230, 50, 40)
post_season("1998", 2305, 25, 40)
transition("1999", 2380, 15)
league_type_1("1999", 2395, 50, 40)
post_season("1999", 2470, 25, 40)
transition("2000", 2545, 15)
league_type_1("2000", 2560, 50, 40)
post_season("2000", 2635, 25, 40)
transition("2001", 2710, 15)
league_type_1("2001", 2725, 50, 40)
transition("2002", 2825, 15)
league_type_1("2002", 2840, 50, 40)
transition("2003", 2940, 15)
league_type_1("2003", 2955, 50, 40)
transition("2004a", 3055, 15)
league_type_1("2004a", 3070, 50, 40)
league_type_1("2004b", 3145, 50, 40)
league_type_1("2004c", 3220, 40, 40)
league_type_1("2004d", 3285, 25, 40)
transition("2005a", 3360, 15)
league_type_1("2005a", 3375, 50, 40)
league_type_1("2005b", 3450, 50, 40)
league_type_1("2005c", 3525, 40, 40)
league_type_1("2005d", 3590, 25, 40)
transition("2006a", 3665, 15)
league_type_1("2006a", 3680, 50, 40)
league_type_1("2006b", 3680+50+25, 50, 40)
league_type_1("2006c", 3680+50+25+50+25, 40, 40)
league_type_1("2006d", 3695+50+25+50+25+25+25, 25, 40)
transition("2007a", 3970, 15)
league_type_1("2007a", 3985, 50, 40)
league_type_1("2007b", 3985+50+25, 25, 40)
transition("2008a", 4135, 15)
league_type_1("2008a", 4150, 50, 40)
league_type_1("2008b", 4150+50+25, 25, 40)
transition("2009a", 4300, 15)
league_type_1("2009a", 4315, 50, 40)
league_type_1("2009b", 4315+50+25, 25, 40)
transition("2010a", 4465, 15)
league_type_1("2010a", 4480, 50, 40)
league_type_1("2010b", 4480+50+25, 25, 40)
transition("2011a", 4630, 15)
league_type_1("2011a", 4645, 50, 40)
league_type_1("2011b", 4645+50+25, 25, 40)
transition("2012", 4795, 15)
league_type_1("2012", 4810, 50, 40)
split("2012", 4885, 40, 2, 40)
transition("2013", 4975, 15)
league_type_1("2013", 4990, 50, 40)
split("2013", 4990+50+25, 40, 2, 40)
transition("2014", 5155, 15)
league_type_1("2014", 5170, 50, 40)
split("2014", 5245, 40, 2, 40)
transition("2015", 5335, 15)
league_type_1("2015", 5350, 50, 40)
split("2015", 5350+50+25, 40, 2, 40)
transition("2016", 5515, 15)
league_type_1("2016", 5530, 50, 40)
split("2016", 5530+50+25, 40, 2, 40)
transition("2017", 5700, 15)
league_type_1("2017", 5715, 50, 40)
split("2017", 5715+50+25, 40, 2, 40)
transition("2018", 5880, 15)
league_type_1("2018", 5910-15, 50, 40)
split("2018", 5910+50+25-15, 40, 2, 40)
'''
| baehs1989/blender-script | K_LEAGUE.py | K_LEAGUE.py | py | 15,516 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "bpy.context.screen",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "bpy.context.window",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "bpy.context... |
8978366140 | import os
import pandas as pd
from darts import TimeSeries
from darts.models import LightGBMModel
from enfobench import AuthorInfo, ModelInfo, ForecasterType
from enfobench.evaluation.server import server_factory
from enfobench.evaluation.utils import periods_in_duration
class DartsLightGBMModel:
def __init__(self, seasonality: str):
self.seasonality = seasonality.upper()
def info(self) -> ModelInfo:
return ModelInfo(
name=f"Darts.LightGBM.Direct.{self.seasonality}",
authors=[
AuthorInfo(name="Mohamad Khalil", email="coo17619@newcastle.ac.uk")
],
type=ForecasterType.point,
params={
"seasonality": self.seasonality,
},
)
def forecast(
self,
horizon: int,
history: pd.DataFrame,
past_covariates: pd.DataFrame | None = None,
future_covariates: pd.DataFrame | None = None,
**kwargs,
) -> pd.DataFrame:
# Fill missing values
history = history.fillna(history.y.mean())
# Create model
periods = periods_in_duration(history.index, duration=self.seasonality)
model = LightGBMModel(
lags=list(range(-periods, 0)),
output_chunk_length=horizon,
multi_models=False,
)
# Fit model
series = TimeSeries.from_dataframe(history, value_cols=["y"])
model.fit(series)
# Make forecast
pred = model.predict(horizon)
# Postprocess forecast
forecast = (
pred.pd_dataframe().rename(columns={"y": "yhat"}).fillna(history.y.mean())
)
return forecast
# Load parameters
seasonality = os.getenv("ENFOBENCH_MODEL_SEASONALITY")
# Instantiate your model
model = DartsLightGBMModel(seasonality=seasonality)
# Create a forecast server by passing in your model
app = server_factory(model)
| attila-balint-kul/energy-forecast-benchmark-examples | models/dt-lightgbm-direct/src/main.py | main.py | py | 1,933 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "enfobench.ModelInfo",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "enfobench.AuthorInfo",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "enfobench.ForecasterType.point",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_... |
18399278652 | # SPDX-License-Identifier: GPL-2.0-only
import threading
from pprint import pprint
import pytest
from flask import url_for
import libeagle
from tests.simulator import eagle200sim
import re
@pytest.fixture(scope="session", autouse=True)
def app():
app = eagle200sim.create_app()
return app
@pytest.mark.usefixtures("live_server")
class TestLiveServer:
def test_eagle200(self):
url = url_for("process_request", _external=True)
port = int(re.search(":([0-9]+)/", url)[1])
conn = libeagle.Connection("localhost", "0077dd", "6e61a3a94882eef9", port=port, debug=True)
devices = conn.device_list()
pprint(devices)
details = conn.device_details(devices[0]["HardwareAddress"])
pprint(details)
query = conn.device_query(
devices[0]["HardwareAddress"],
details[0]["Name"],
details[0]["Variables"][0],
)
pprint(query)
assert (
query[0]["Variables"]["zigbee:InstantaneousDemand"] == "21.499 kW"
)
query = conn.device_query(devices[0]["HardwareAddress"])
pprint(query)
assert (
query[0]["Variables"]["zigbee:Message"] == "Hello, World!"
)
| lrusak/py-eagle-200 | tests/test_eagle200.py | test_eagle200.py | py | 1,240 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tests.simulator.eagle200sim.create_app",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tests.simulator.eagle200sim",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 14,
"usage_type": "call"
},
{
... |
43417946445 | import cv2
import matplotlib.pyplot as plt
import pandas as pd
img1_path = 'U14.png'
csv_path = 'colours.csv'
img2 = cv2.imread(img1_path)
img2 = cv2.resize(img2, (800, 600))
plt.figure(figsize=(20, 8))
plt.imshow(img2)
grid_RGB = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
plt.figure(figsize=(20, 8))
plt.imshow(grid_RGB)
index = ['colour', 'colour_name', 'hex', 'R', 'G', 'B']
df = pd.read_csv(csv_path, names=index, header=None)
clicked = False
r = g = b = xpos = ypos = 0
def get_color_name(R, G, B):
minimum = 1000
for i in range(len(df)):
d = abs(R - int(df.loc[i, 'R'])) + abs(G - int(df.loc[i, 'G'])) + abs(
B - int(df.loc[i, 'B']))
if d <= minimum:
minimum = d
cname = df.loc[i, 'colour_name']
return cname
def draw_function(event, x, y, flags, params):
if event == cv2.EVENT_LBUTTONDBLCLK:
global b, g, r, xpos, ypos, clicked
clicked = True
xpos = x
ypos = y
b, g, r = img2[y, x]
b = int(b)
g = int(g)
r = int(r)
cv2.namedWindow('Detection')
cv2.setMouseCallback('Detection', draw_function)
while True:
cv2.imshow('Detection', img2)
if clicked:
cv2.rectangle(img2, (20, 20), (600, 60), (b, g, r), -1)
text = get_color_name(r, g, b) + ' R =' + str(r) + ' G = ' + str(
g) + ' B = ' + str(b)
cv2.putText(img2, text, (50, 50), 2, 0.8, (0, 255, 255), 2, cv2.LINE_AA)
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows()
| AnupCloud/Color_Detection | color_detection.py | color_detection.py | py | 1,604 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
73829193786 | # Usage:
import asyncio
from starknet_py.net.gateway_client import GatewayClient
from starknet_py.net.networks import TESTNET
from starknet_py.net import AccountClient, KeyPair
from starknet_py.contract import Contract
from starknet_py.net.models.chains import StarknetChainId
uuid = '2f530e87-a2c5-47c9-8ebf-e704dc06e9d8'
rpc_endpoint = 'http://2f530e87-a2c5-47c9-8ebf-e704dc06e9d8@18.157.198.111:5061'
private_key = 0x417ea85a3231ed89e745f9623ee2c32b
player_address = 0x6fb14af9a52544466d0b00b536930d57c49f9140c3ee989102a930a88cec521
contract_address = 0x22307a497c26e0766e6701e3ed78c21166ba691e9fad47d2f3e836cbbdaf52c
PRIME = 3618502788666131213697322783095070105623107215331596699973092056135872020481
async def run():
gateway_client = GatewayClient(rpc_endpoint, TESTNET)
account_client = AccountClient(
client=gateway_client,
address=player_address,
key_pair=KeyPair.from_private_key(private_key),
chain=StarknetChainId.TESTNET,
supported_tx_version=1,
)
block = await gateway_client.get_block(block_number=1)
print(block)
ts = block.timestamp
print('timestamp1', ts)
contract = await Contract.from_address(contract_address, account_client)
print(contract.functions)
call = contract.functions['solve'].prepare()
tx_r = await account_client.execute(call, auto_estimate=True)
await account_client.wait_for_tx(tx_r.transaction_hash)
print(tx_r)
print(tx_r.transaction_hash)
if __name__ == "__main__":
asyncio.run(run())
| feltroidprime/CTF-starknet-cc | challenges/solve-me/deploy.py | deploy.py | py | 1,531 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "starknet_py.net.gateway_client.GatewayClient",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "starknet_py.net.networks.TESTNET",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "starknet_py.net.AccountClient",
"line_number": 20,
"usa... |
19130670937 | from datetime import datetime
import docker
import mock
import unittest
from infra.services.android_docker import containers
class FakeDevice(object):
"""Mocks a usb_device.Device"""
def __init__(self, serial, physical_port):
self.serial = serial
self.physical_port = physical_port
self.major = 0
self.minor = 0
self.bus = 0
self.dev_file_path = ''
class FakeClient(object):
"""Mocks the client object returned from docker's client API.
containers.DockerClient wraps it. Mocked here to verify wrapper class
bheaves correctly.
"""
def __init__(self):
self.containers = None
class FakeContainer(object):
"""Used to mock containers.Container"""
def __init__(self, name, uptime=None):
self._container = FakeContainerBackend(name)
self.name = name
self.uptime = uptime
self.swarming_bot_killed = False
def get_container_uptime(self, now): # pylint: disable=unused-argument
return self.uptime
def kill_swarming_bot(self):
self.swarming_bot_killed = True
class FakeContainerBackend(object):
"""Mocks the container objects returned from docker's client API.
containers.Container wraps each one. Mocked here to verify the wrapper class
behaves correctly.
"""
def __init__(self, name):
self.name = name
self.was_deleted = False
self.was_started = False
self.is_paused = False
self.exec_outputs = []
self.exec_inputs = []
self.attrs = {}
def remove(self):
self.was_deleted = True
def start(self):
self.was_started = True
def pause(self):
assert not self.is_paused
self.is_paused = True
def unpause(self):
assert self.is_paused
self.is_paused = False
def exec_run(self, cmd):
self.exec_inputs.append(cmd)
return self.exec_outputs.pop(0)
class FakeContainerList(object):
"""Mocks the container list objects returned from docker's client API."""
def __init__(self, containers_list):
self._list = containers_list
def create(self, **kwargs):
return FakeContainerBackend(kwargs['name'])
def list(self, filters=None): # pylint: disable=unused-argument
return self._list
def get(self, name):
for c in self._list:
if c.name == name:
return c
raise docker.errors.NotFound('omg container missing')
class TestGetNames(unittest.TestCase):
def setUp(self):
self.device = FakeDevice('serial123', 1)
def test_container_name(self):
container_name = containers.get_container_name(self.device)
self.assertEqual(container_name, 'android_serial123')
@mock.patch('socket.gethostname')
def test_container_hostname(self, mock_gethostname):
mock_gethostname.return_value = 'build123-a4'
container_hostname = containers.get_container_hostname(self.device)
self.assertEqual(container_hostname, 'build123-a4--device1')
class TestDockerClient(unittest.TestCase):
def setUp(self):
self.fake_client = FakeClient()
self.container_names = ['android_serial1', 'android_serial2']
self.fake_client.containers = FakeContainerList(
[FakeContainerBackend(name) for name in self.container_names])
@mock.patch('docker.from_env')
def test_get_running_containers(self, mock_from_env):
mock_from_env.return_value = self.fake_client
running_containers = containers.DockerClient().get_running_containers()
self.assertEqual(
set(c.name for c in running_containers), set(self.container_names))
@mock.patch('docker.from_env')
def test_get_container(self, mock_from_env):
mock_from_env.return_value = self.fake_client
fake_device = FakeDevice('serial2', 2)
container = containers.DockerClient().get_container(fake_device)
self.assertEqual(container.name, 'android_serial2')
@mock.patch('docker.from_env')
def test_get_missing_container(self, mock_from_env):
mock_from_env.return_value = self.fake_client
fake_device = FakeDevice('missing_device', 1)
container = containers.DockerClient().get_container(fake_device)
self.assertEqual(container, None)
@mock.patch('docker.from_env')
def test_stop_old_containers(self, mock_from_env):
young_container = FakeContainer('young_container', uptime=10)
old_container = FakeContainer('old_container', uptime=999)
mock_from_env.return_value = self.fake_client
containers.DockerClient().stop_old_containers(
[young_container, old_container], 100)
self.assertFalse(young_container.swarming_bot_killed)
self.assertTrue(old_container.swarming_bot_killed)
@mock.patch('docker.from_env')
def test_delete_stopped_containers(self, mock_from_env):
mock_from_env.return_value = self.fake_client
containers.DockerClient().delete_stopped_containers()
self.assertTrue(
all(c.was_deleted for c in self.fake_client.containers.list()))
@mock.patch('docker.from_env')
def test_create_missing_containers(self, mock_from_env):
running_containers = [
FakeContainer('android_serial1'),
FakeContainer('android_serial2'),
]
devices = [
FakeDevice('serial1', 1),
FakeDevice('serial2', 2),
FakeDevice('serial3', 3),
]
self.fake_client.containers = FakeContainerList(running_containers)
mock_from_env.return_value = self.fake_client
needs_reboot = containers.DockerClient().create_missing_containers(
running_containers, devices, 'image')
# Ensure serial3 needs to be rebooted. This indicates that a new container
# was created for it.
self.assertEquals([d.serial for d in needs_reboot], ['serial3'])
class TestContainer(unittest.TestCase):
def setUp(self):
self.container_backend = FakeContainerBackend('container1')
self.container = containers.Container(self.container_backend)
def test_get_container_uptime(self):
now = datetime.strptime(
'2000-01-01T01:30:00.000000', '%Y-%m-%dT%H:%M:%S.%f')
self.container_backend.attrs = {
'State': {'StartedAt': '2000-01-01T00:00:00.0000000000'}
}
uptime = self.container.get_container_uptime(now)
self.assertEquals(uptime, 90)
def test_get_swarming_bot_pid(self):
self.container_backend.exec_outputs = ['123']
pid = self.container.get_swarming_bot_pid()
self.assertEquals(pid, 123)
def test_get_swarming_bot_pid_backend_error(self):
self.container_backend.exec_outputs = ['rpc error: omg failure']
pid = self.container.get_swarming_bot_pid()
self.assertEquals(pid, None)
def test_get_swarming_bot_pid_lsof_error(self):
self.container_backend.exec_outputs = ['omg lsof failure']
pid = self.container.get_swarming_bot_pid()
self.assertEquals(pid, None)
def test_kill_swarming_bot(self):
self.container_backend.exec_outputs = ['123', '']
self.container.kill_swarming_bot()
self.assertEquals(self.container_backend.exec_inputs[-1], 'kill -15 123')
def test_kill_swarming_bot_error(self):
self.container_backend.exec_outputs = ['omg failure']
self.container.kill_swarming_bot()
# Ensure nothing was killed when the bot's pid couldn't be found.
self.assertFalse(
any('kill -15' in cmd for cmd in self.container_backend.exec_inputs))
@mock.patch('time.sleep')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.close')
@mock.patch('os.path.exists')
def test_add_device(self, mock_path_exists, mock_close, mock_write, mock_open,
mock_sleep):
mock_sleep.return_value = None
mock_path_exists.return_value = True
self.container_backend.attrs = {'Id': 'abc123'}
self.container_backend.exec_outputs = ['', '']
device = FakeDevice('serial1', 1)
device.major = 111
device.minor = 9
device.bus = 1
device.dev_file_path = '/dev/bus/usb/001/123'
self.container.add_device(device)
self.assertTrue('abc123' in mock_open.call_args[0][0])
# Ensure the device's major and minor numbers were written to the
# cgroup file.
self.assertEqual(mock_write.call_args[0][1], 'c 111:9 rwm')
self.assertTrue(mock_close.called)
self.assertFalse(self.container_backend.is_paused)
@mock.patch('time.sleep')
@mock.patch('os.open')
@mock.patch('os.path.exists')
def test_add_device_missing_cgroup(self, mock_path_exists, mock_open,
mock_sleep):
mock_sleep.return_value = None
mock_path_exists.return_value = False
self.container_backend.attrs = {'Id': 'abc123'}
self.container_backend.exec_outputs = ['']
device = FakeDevice('serial1', 1)
self.container.add_device(device)
self.assertFalse(mock_open.called)
self.assertEquals(len(self.container_backend.exec_inputs), 1)
self.assertFalse(self.container_backend.is_paused)
@mock.patch('time.sleep')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.close')
@mock.patch('os.path.exists')
def test_add_device_os_open_error(self, mock_path_exists, mock_close,
mock_write, mock_open, mock_sleep):
mock_sleep.return_value = None
mock_path_exists.return_value = True
mock_open.side_effect = OSError('omg open error')
self.container_backend.attrs = {'Id': 'abc123'}
self.container_backend.exec_outputs = ['']
device = FakeDevice('serial1', 1)
device.major = 111
device.minor = 9
self.container.add_device(device)
self.assertTrue('abc123' in mock_open.call_args[0][0])
self.assertFalse(mock_write.called)
self.assertFalse(mock_close.called)
self.assertEquals(len(self.container_backend.exec_inputs), 1)
self.assertFalse(self.container_backend.is_paused)
@mock.patch('time.sleep')
@mock.patch('os.open')
@mock.patch('os.write')
@mock.patch('os.close')
@mock.patch('os.path.exists')
def test_add_device_os_write_error(self, mock_path_exists, mock_close,
mock_write, mock_open, mock_sleep):
mock_sleep.return_value = None
mock_path_exists.return_value = True
mock_write.side_effect = OSError('omg write error')
self.container_backend.attrs = {'Id': 'abc123'}
self.container_backend.exec_outputs = ['']
device = FakeDevice('serial1', 1)
device.major = 111
device.minor = 9
self.container.add_device(device)
self.assertTrue('abc123' in mock_open.call_args[0][0])
self.assertEquals(mock_write.call_args[0][1], 'c 111:9 rwm')
self.assertTrue(mock_close.called)
self.assertEquals(len(self.container_backend.exec_inputs), 1)
self.assertFalse(self.container_backend.is_paused)
| mithro/chromium-infra | infra/services/android_docker/test/containers_test.py | containers_test.py | py | 10,531 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "docker.errors.NotFound",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "docker.errors",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "infr... |
73814975546 | from abc import ABCMeta, abstractmethod
from asyncio.queues import Queue as AioQueue
from queue import Queue
from bonobo.constants import BEGIN, END
from bonobo.errors import AbstractError, InactiveReadableError, InactiveWritableError
from bonobo.nodes import noop
BUFFER_SIZE = 8192
class Readable(metaclass=ABCMeta):
"""Interface for things you can read from."""
@abstractmethod
def get(self, block=True, timeout=None):
"""Read. Block/timeout are there for Queue compat."""
raise AbstractError(self.get)
class Writable(metaclass=ABCMeta):
"""Interface for things you can write to."""
@abstractmethod
def put(self, data, block=True, timeout=None):
"""Write. Block/timeout are there for Queue compat."""
raise AbstractError(self.put)
class Input(Queue, Readable, Writable):
def __init__(self, maxsize=BUFFER_SIZE):
Queue.__init__(self, maxsize)
self._runlevel = 0
self._writable_runlevel = 0
self.on_initialize = noop
self.on_begin = noop
self.on_end = noop
self.on_finalize = noop
def put(self, data, block=True, timeout=None):
# Begin token is a metadata to raise the input runlevel.
if data == BEGIN:
if not self._runlevel:
self.on_initialize()
self._runlevel += 1
self._writable_runlevel += 1
# callback
self.on_begin()
return
# Check we are actually able to receive data.
if self._writable_runlevel < 1:
raise InactiveWritableError("Cannot put() on an inactive {}.".format(Writable.__name__))
if data == END:
self._writable_runlevel -= 1
return Queue.put(self, data, block, timeout)
def _decrement_runlevel(self):
if self._runlevel == 1:
self.on_finalize()
self._runlevel -= 1
self.on_end()
def get(self, block=True, timeout=None):
if not self.alive:
raise InactiveReadableError("Cannot get() on an inactive {}.".format(Readable.__name__))
data = Queue.get(self, block, timeout)
if data == END:
self._decrement_runlevel()
if not self.alive:
raise InactiveReadableError(
"Cannot get() on an inactive {} (runlevel just reached 0).".format(Readable.__name__)
)
return self.get(block, timeout)
return data
def shutdown(self):
while self._runlevel >= 1:
self._decrement_runlevel()
def empty(self):
self.mutex.acquire()
while self._qsize() and self.queue[0] == END:
self._runlevel -= 1
Queue._get(self)
self.mutex.release()
return Queue.empty(self)
@property
def alive(self):
return self._runlevel > 0
class AioInput(AioQueue):
pass
| python-bonobo/bonobo | bonobo/structs/inputs.py | inputs.py | py | 2,922 | python | en | code | 1,564 | github-code | 6 | [
{
"api_name": "abc.ABCMeta",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "bonobo.errors.AbstractError",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "abc.abstractmethod",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "abc.ABCMet... |
16199644126 | ############################################################################
## Django ORM Standalone Python Template
############################################################################
# Turn off bytecode generation
from datetime import time
import sys
sys.dont_write_bytecode = True
# Django specific settings
import os
project_path = "../"
project_root = "../../"
os.environ.get("DJANGO_SETTINGS_MODULE", "ctimanager.settings")
sys.path.append(project_path)
os.chdir(project_path)
import django
django.setup()
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Import your models for use in your script
from content.models import *
############################################################################
## START OF APPLICATION
############################################################################
import requests
import json
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.utils import ChromeType
from urllib.parse import urlparse, parse_qs
from PIL import Image, ImageChops, ImageDraw2, ImageFont
from io import BytesIO
import boto3
SPACES_APIKEY = os.environ.get('SPACES_APIKEY')
SPACES_APISECRET = os.environ.get('SPACES_APISECRET')
"""
class NewsSources(models.Model):
name = models.CharField(max_length=100, blank=True)
title = models.TextField(blank=False)
domain = models.CharField(max_length=100, blank=False)
rss_url = models.URLField()
article_selector = models.CharField(max_length=255)
region = models.CharField(max_length=5, blank=True)
def __str__(self):
return self.name
class News(models.Model):
cryptopanic_id = models.IntegerField(blank=True)
cryptopanic_url = models.URLField(blank=True)
type = models.CharField(max_length=20, blank=False)
domain = models.CharField(max_length=100, blank=True, null=True)
projects = models.ManyToManyField(Project)
# Note! The JSON1 module needs to be enables in SQL, if you get an error this might be the problem.
votes = models.JSONField(blank=True, null=True)
article_url = models.URLField(blank=False)
source = models.ForeignKey(NewsSources, on_delete=models.SET_NULL, null=True)
publish_data = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
"""
def resize_and_store(news_id, image_url):
try:
if image_url:
file_path = f"{BASE_PATH}/static/content/media/news_image_{news_id}.png"
proxies = {'http': "socks5://84.107.32.223:1080", 'https': "socks5://84.107.32.223:1080"}
headers = {
"Connection": "keep-alive",
"DNT": "1",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Sec-Fetch-Site": "none",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Dest": "document",
"Referer": "https://www.google.com/",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8" }
response = requests.get(image_url, proxies=proxies, headers=headers)
if response.status_code == 200:
img = Image.open(BytesIO(response.content))
if img.height < 629:
myheight = 629
hpercent = (myheight/float(img.size[1]))
wsize = int((float(img.size[0])*float(hpercent)))
img = img.resize((wsize,myheight), resample=Image.ANTIALIAS)
mywidth = 1200
wpercent = (mywidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((mywidth,hsize), resample=Image.ANTIALIAS)
new_width = 1200
new_height = 629
width, height = img.size # Get dimensions
left = (width - new_width)/2
top = (height - new_height)/2
right = (width + new_width)/2
bottom = (height + new_height)/2
# Crop the center of the image
im = img.crop((left, top, right, bottom))
im.save(file_path, format="png")
print(f"saving: {file_path}")
if upload_image_to_s3(file_path,f"news_image_{news_id}.png"):
print("image uploaded to s3")
else:
print("image not uploaded to s3")
else:
print(f"Response code {response.status_code} message: {response.text}")
except Exception as e:
print(f"Error reading image with error: {e}")
def check_news_source(source):
try:
if NewsSources.objects.filter(title=source['title']).exists():
return True
else:
try:
title = source['title']
except Exception as e:
print("No title found for source {source}")
try:
domain = source['domain']
except Exception as e:
print("No domain found for source {source}")
try:
region = source['region']
except Exception as e:
print("No region found for source {source}")
try:
path = source['path']
except Exception as e:
print("No path found for source {source}")
NewsSources.objects.create(domain=domain, region=region, title=title, path=path)
return True
except Exception as e:
print(f"Trouble checking and adding the news source with error {e}")
return False
def extract_video_id(url):
query = urlparse(url)
if query.hostname == 'youtu.be': return query.path[1:]
if query.hostname in {'www.youtube.com', 'youtube.com'}:
if query.path == '/watch': return parse_qs(query.query)['v'][0]
if query.path[:7] == '/watch/': return query.path.split('/')[1]
if query.path[:7] == '/embed/': return query.path.split('/')[2]
if query.path[:3] == '/v/': return query.path.split('/')[2]
# below is optional for playlists
if query.path[:9] == '/playlist': return parse_qs(query.query)['list'][0]
# returns None for invalid YouTube url
return None
def get_real_url(cryptopanic_url,source_domain):
print(f"getting real url for cryptopanic_url: {cryptopanic_url}")
ua = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36'
os.environ['WDM_LOG_LEVEL'] = '0'
os.environ['WDM_PRINT_FIRST_LINE'] = 'False'
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument('ignore-certificate-errors')
chrome_options.add_argument('--proxy-server=socks5://84.107.32.223:1080')
chrome_options.add_argument(f"--user-agent={ua}")
try:
if sys.platform == "darwin":
browser = webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options)
else:
browser = webdriver.Chrome(executable_path="chromedriver", options=chrome_options)
browser.get(cryptopanic_url)
time.sleep(3)
url = browser.find_element(By.XPATH, '//*[@id="detail_pane"]/div[1]/h1/a[2]').get_attribute('href')
print(f"article url: {url}")
browser.quit()
return url
except Exception as e:
print(f"error: {e}")
def get_article_image(article_url):
if 'youtube.com' in article_url or 'youtu.be' in article_url:
video_id = extract_video_id(article_url)
url = f"https://metafetcher.gurustacks.com/video/youtube/{video_id}"
response = requests.get(url)
if response.status_code==200:
if 'standard' in response.json()['images']:
return response.json()['images']['standard']['url']
else:
return None
elif 'twitter.com' in article_url:
url = f"https://metafetcher.gurustacks.com/website/{article_url}"
response = requests.get(url)
if response.status_code==200:
if 'icon_192x192' in response.json()['images']:
return response.json()['images']['icon_192x192']
else:
return None
else:
url = f"https://metafetcher.gurustacks.com/website/{article_url}"
response = requests.get(url)
if response.status_code==200:
if 'image' in response.json()['images']:
return response.json()['images']['image']
else:
return None
def upload_image_to_s3(image_url, image_name):
try:
session = boto3.session.Session()
client = session.client('s3', region_name='ams3',
endpoint_url='https://ams3.digitaloceanspaces.com',
aws_access_key_id=SPACES_APIKEY,
aws_secret_access_key=SPACES_APISECRET)
client.upload_file(image_url, 'cryptapi-news-images', image_name, ExtraArgs={'ACL':'public-read'})
return True
except Exception as e:
print(f"Error uploading file: {e}")
return False
for num in range(1,5):
url = f"https://cryptopanic.com/api/v1/posts/?auth_token={os.environ.get('CRYPTO_PANIC_API_KEY')}&page={num}"
try:
response = requests.get(url)
if response.status_code==200:
for item in response.json()['results']:
if check_news_source(item['source']):
cryptopanic_id = item['id']
if item['source']['domain'] == 'twitter.com':
type = "twitter"
else:
type = item['kind']
title = item['title']
published_at = item['published_at']
cryptopanic_url = item['url']
votes = item['votes']
domain = item['domain']
try:
source_obj = NewsSources.objects.get(title=item['source']['title'])
except Exception as e:
print(f"News Source Not Found with error {e} for {item}")
if News.objects.filter(cryptopanic_id=cryptopanic_id).exists():
try:
news = News.objects.get(cryptopanic_id=cryptopanic_id)
if news.article_url == "":
article_url = get_real_url(cryptopanic_url,item['source']['domain'])
if article_url:
news.article_url = article_url
else:
news.delete()
continue
news.votes = item['votes']
news.title = item['title']
news.save()
print(f"Updating news item {item['title']}")
except Exception as e:
print(f"Failed updating news item with error {e}")
else:
try:
article_url = get_real_url(cryptopanic_url,item['source']['domain'])
if article_url is not None:
article_image = get_article_image(article_url)
if article_image is not None:
news_item = News.objects.create(cryptopanic_id=cryptopanic_id, article_url=article_url, type=type, title=title, image=article_image, domain=domain, published_at=published_at, cryptopanic_url=cryptopanic_url, votes=votes, source=source_obj)
print(f"Adding news item with title {title} and new news_id: {news_item.id}")
# Resize and store imnage
if article_image:
try:
resize_and_store(news_item.id, news_item.image)
except Exception as e:
print("Failed downloading image for news item")
try:
if 'currencies' in item.keys():
for currency in item['currencies']:
symbol = currency['code'].lower()
if Project.objects.filter(symbol=symbol,status='ACTIVE').exists():
news_item.projects.add(Project.objects.filter(symbol=symbol,status='ACTIVE').first())
print(f"adding {symbol} to news item")
else:
print(f"No project found for currency {symbol}")
except Exception as e:
print(f"Problems adding projects to news item with error {e}")
else:
raise Exception(f"No image found for news item {item['url']}")
else:
raise Exception("Article URL not found")
except Exception as e:
print(f"Failed adding news item with error {e}")
else:
print(f"Problems with the news source.. Skipping..")
else:
print(f"Not Hotdog! {response.status_code}")
time.sleep(5)
except Exception as e:
print("Time out! Skipping") | barrydaniels-nl/crypto-api | ctimanager/scripts/update_news_items.py | update_news_items.py | py | 15,028 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.dont_write_bytecode",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sys.path.a... |
27980959232 | import glob
import sqlite3
import csv
import time;
conn = sqlite3.connect('gdax_0.1.db')
cur = conn.cursor()
cur.execute("SELECT * FROM quotes_BTC_LTC") # WHERE start >?", (1420160461, ))
results1 = cur.fetchall()
conn2 = sqlite3.connect('gdaxLTC.db')
cur2 = conn2.cursor()
cur2.execute("SELECT * FROM quotes_BTC_LTC") # WHERE start >?", (1420160461, ))
results2 = cur2.fetchall()
for i in range(0, len(results1)):
if(results1[i] != results2[i]):
print("Different")
# tsListResults = []
#
# # print(results1.pop(0))
# # print(results1.pop())
#
# for row in results1:
# tup = (row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])
# #print(tup)
# tsListResults.append(row[1])
# # if (row[1] == 1509949420 or row[1] == 1375731700 or row[1] == 1417674740 or row[1] == 1501560820 or row[1] == 1493172220):
# # print(tup)
#
# #tsListResults.sort()
#
# tsList = []
# for i in range(1471407360, 1504213860, 60):
# tsList.append(i)
#
# # diff = list(set(tsList) - (set(tsListResults)))
# diff = list(set(tsList).symmetric_difference(set(tsListResults)))
# diff.sort()
# for row in diff:
# print(row)
#
# print("Start", min(tsListResults))
# print("End", max(tsListResults))
| HristoHr/backTestEngine | CheckDataCompletenessDB.py | CheckDataCompletenessDB.py | py | 1,226 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 11,
"usage_type": "call"
}
] |
36781902171 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
__author__ = 'ktulhy'
# TODO: убрать дублирование кода
ERROR = "\x1b[31m[---ERROR--] \x1b[0m"
SYSTEM = "\x1b[34m[--SYSTEM--] \x1b[0m"
INFO = "[---INFO---] "
WARNING = "\x1b[33m[--WARNING-] \x1b[0m"
test_types = []
from lxml import etree
def parse_inp(var):
attrib = var.attrib
v_type = attrib.get('type', None)
text = var.text
if None is v_type:
v_type = ""
typecast = ""
else:
typecast = "(" + v_type + ") "
if "char" in v_type:
text = '"' + text + '"'
return typecast + text
def parse_out(var):
return parse_inp(var)
def parse_file(conf, file):
includes = []
test_file = conf['folder'] + "tests/" + conf["test_file_prefix"] + file['name']
try:
f = open(test_file)
except FileNotFoundError:
print(ERROR + "Test file for '%s' not found , terminate" % file['name'])
return None, None
try:
xml_doc = etree.parse(f)
except etree.XMLSyntaxError:
print(ERROR + "Error parsing file '%s', terminate" % file['name'])
return None, None
xml_root = xml_doc.getroot()
xml_libraries = xml_root.find("libraries")
if (None != xml_libraries) and (None != xml_libraries.text):
for lib in xml_libraries.text.split(','):
includes.append(lib.rstrip().lstrip())
xml_tests = xml_root.find("tests")
if None == xml_tests:
print(WARNING + "Tests for file '%s' not written, please, check test file '%s'" % (file['name'], test_file))
print(ERROR + "Terminate")
return None, None
tests = []
for test in xml_tests.getiterator("test"):
t_attrib = test.attrib
t_type = t_attrib.get('type', None)
if t_type == 'IS_EQ_INT64':
pass
t_func = t_attrib.get('func', None)
if t_func is None:
print(WARNING + "In file '%s': Func does not contains, continue" % test_file)
continue
if t_type not in test_types:
print(WARNING + "In file '%s': Test type '%s' is not recognized, continue" % (test_file, t_type))
continue
_t_variables = test.find('variables')
if _t_variables is not None:
t_variables = _t_variables.text
if t_variables == None:
t_variables = ''
t_input = []
for inp in test.getiterator("inp"):
t_input.append(parse_inp(inp))
_t_output = test.find("out")
if _t_output is None:
print(WARNING + "Test for file '%s' has not output" % file['name'])
t_output = parse_out(_t_output)
tests.append({"type": t_type, "func": t_func, "variables": t_variables, "input": t_input, "output": t_output})
return tests, includes
class Test():
def __init__(self, string, libs):
self.string = string
self.libs = "\n".join(["#include <" + lib + ">" for lib in libs])
def __str__(self):
return self.string
def get_vars(self, t_index, var_index):
pass
def get_test(self, output):
pass
def get_out(self):
pass
test_types = {}
# ================ IS_[NOT_]_EQ_[U]INT[Ø,8,16,32,64] =================
class TestInt(Test):
def __init__(self, string, libs, _int_type, _compare, _print_int_type):
Test.__init__(self, string, libs)
self.int_type = _int_type
self.compare = _compare
self.print_int_type = _print_int_type
def get_vars(self, t_index, var_index):
self.var = "tFuncOutT%dV%d" % (t_index, var_index)
return "%s %s;" % (self.int_type, self.var), var_index + 1
def get_test(self, output):
return "(%s %s %s)" % (output, self.compare, self.var)
def get_out(self):
return '"%%%s\\n", %s' % (self.print_int_type, self.var)
for int_bits in ["", "8", "16", "32", "64"]:
for is_unsigned in [0, 1]:
for is_not_eq in [0, 1]:
int_type = "int"
int_type += (int_bits + "_t") if (int_bits != "") else ""
int_type = ("unsigned " if ("" == int_bits) else "u") + int_type
comp = "!=" if is_not_eq else "=="
print_int_type = "u" if is_unsigned else "d"
if int_bits != "":
print_int_type = "\" PRI" + print_int_type + int_bits + " \""
_is_eq_int = TestInt("IS_%sEQ_%sINT%s" % (
"NOT_" if is_not_eq else "",
"U" if is_unsigned else "", int_bits),
["inttypes.h", "stdlib.h"],
int_type,
comp,
print_int_type)
test_types[str(_is_eq_int)] = _is_eq_int
print_int_type = None
_is_eq_int = None
int_type = None
int_bits = None
is_unsigned = None
is_not_eq = None
# ============== IS_[NOT_]_EQ_STR ================
class TestStr(Test):
def __init__(self, string, libs, compare):
Test.__init__(self, string, libs)
self.compare = compare
def get_vars(self, t_index, var_index):
self.var = "tFuncOutT%dV%d" % (t_index, var_index)
return "char *%s;" % self.var, var_index + 1
def get_test(self, output):
return "(0 %s strcmp(%s, %s))" % (self.compare, output, self.var)
def get_out(self):
return '"%%s\\n", %s' % self.var
for is_not_eq in [0,1]:
_is_eq_int = TestStr("IS_%sEQ_STR" % ("NOT_" if is_not_eq else ""),
["string.h"],
"!=" if is_not_eq else "==")
test_types[str(_is_eq_int)] = _is_eq_int
_is_eq_int = None
is_not_eq = None
def generate_test_code(conf, file, tests, includes):
code = """
FILE *f = fopen("%s","wt");
if (NULL == f)
return 1488;\n""" % ("./" + conf["test_result_file_prefix"] + file['name'])
variables = ""
t_index = 0
var_index = 0
for test in tests:
t_type = test_types.get(test['type'], None)
if None is t_type:
continue
var_index = 0
_var_init, var_index = t_type.get_vars(t_index, var_index)
var_name = t_type.var
variables += " " + test['variables'].rstrip().lstrip().lstrip() + "\n"
variables += " " + _var_init + "\n"
code += """
/* TEST #%d for func '%s'*/
%s = %s(%s);
if %s
fprintf(f,"OK:");
else
fprintf(f,"WR:");
fprintf(f,%s);
fflush(f);
""" % (t_index, test['func'],
var_name, test['func'], ", ".join(test['input']),
t_type.get_test(test['output']),
t_type.get_out())
t_index += 1
includes.append("stdio.h")
return "\n\n/* === TESTED === */\n" +\
"\n".join(["#include <" + lib + ">" for lib in includes]) +\
"\n\nint main(void) {\n" +\
"/* Variables */" + \
variables +\
"/* Tests */" + \
code +\
"\n fclose(f);\n return 0;\n}"
| AzaubaevViktor/c_tested | lib_tested.py | lib_tested.py | py | 6,966 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "lxml.etree.parse",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "lxml.etree.XMLSyntaxError",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "lxml.etree... |
25389647152 | import nltk
import numpy as np
import pandas as pd
import re
"""This code aims to perform text preprocessing and save processed texts as a new file"""
def utils_preprocess_text(text, flg_stemm=False, flg_lemm=True, lst_stopwords=None):
"""Text processing: remove stopwords, stem or lemma"""
## clean (convert to lowercase and remove punctuations and characters and then strip)
text = re.sub(r"[^\w\s]", "", str(text).lower().strip())
## Tokenize (convert from string to list)
lst_text = text.split()
## remove Stopwords
if lst_stopwords is not None:
lst_text = [word for word in lst_text if word not in lst_stopwords]
## Stemming (remove -ing, -ly, ...)
if flg_stemm == True:
ps = nltk.stem.porter.PorterStemmer()
lst_text = [ps.stem(word) for word in lst_text]
## Lemmatisation (convert the word into root word)
if flg_lemm == True:
lem = nltk.stem.wordnet.WordNetLemmatizer()
lst_text = [lem.lemmatize(word) for word in lst_text]
## back to string from list
text = " ".join(lst_text)
return text
def main(
file_path="Suicide_Detection.csv",
lst_stopwords=nltk.corpus.stopwords.words("english"),
):
df = pd.read_csv(file_path, index_col=False)
df = df.iloc[:, 1:]
# class transformation to 0 and 1
df["y"] = df["class"].map({"suicide": "1", "non-suicide": "0"})
df["text_clean"] = df["text"].apply(
lambda x: utils_preprocess_text(
x, flg_stemm=False, flg_lemm=True, lst_stopwords=lst_stopwords
)
)
df.to_csv("processed_trainData.csv", index=False)
if __name__ == "__main__":
main()
| nogibjj/Suicide-Text-Classification | a_01_text_preprocessing.py | a_01_text_preprocessing.py | py | 1,659 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.sub",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "nltk.stem.porter.PorterStemmer",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "nltk.stem",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "nltk.stem.wordne... |
20665108806 | import os
from dataclasses import dataclass, field
from pathlib import Path
from .file_utils import CsvWriter, JsonWriter, PickleWriter
@dataclass
class File:
""" Класс, представляющий файл. """
name: str
size: int
parent: 'Directory'
path: str
def __str__(self):
return f"File: {self.parent}/{self.name}\n" \
f"Size: {self.size}\n"
def __dict__(self):
return {
"type": "file",
"name": self.name,
"size": self.size,
"path": f'{self.path}'
}
@dataclass
class Directory:
"""Класс, представляющий директорию. """
name: str
parent: 'Directory'
path: str
size: int = 0
files: list = field(default_factory=list)
subdirectories: list = field(default_factory=list)
def __str__(self):
return f"Directory: {self.parent}/{self.name}\n" \
f"Size: {self.size}\n" \
f"File count: {len(self.files)}\n" \
f"Subdirectory count: {len(self.subdirectories)}\n"
def to_dict(self):
return {
"type": "directory",
"name": self.name,
"size": self.size,
"files": [file.__dict__() for file in self.files],
"path": f'{self.path}'
}
def calculate(self):
for directory in self.subdirectories:
self.size += directory.size
@dataclass
class DirectoryManager:
""" Класс для управления директориями. """
directories = {}
path: Path
def traverse(self):
""" Рекурсивный обход директории. """
for dirpath, dirnames, filenames in os.walk(self.path):
if dirpath not in self.directories.keys():
directory = Directory(os.path.basename(dirpath), None, dirpath)
else:
directory = self.directories[dirpath]
for filename in filenames:
file_path = os.path.join(dirpath, filename)
file_size = os.path.getsize(file_path)
file = File(filename, file_size, directory, dirpath)
directory.files.append(file)
directory.size += file_size
for dirname in dirnames:
sub_directory = Directory(dirname, directory, dirpath)
directory.subdirectories.append(sub_directory)
self.directories[os.path.join(dirpath, dirname)] = sub_directory
self.directories[dirpath] = directory
for directory in self.directories.values():
directory.calculate()
def write_files(self, output_directory):
""" Запись результатов обхода директории в файл. """
self.traverse()
output_directory = Path(output_directory)
os.makedirs(output_directory, exist_ok=True)
directories = [d.to_dict() for d in self.directories.values()]
JsonWriter.write(output_directory / "directory.json", directories)
CsvWriter.write(output_directory / "directory.csv", directories)
PickleWriter.write(output_directory / "directory.pickle", directories)
| nadia3373/GeekBrains-Python-Developer | Diving into Python/s10/directory_traversal/directory_traversal.py | directory_traversal.py | py | 3,249 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "dataclasses.dataclass",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "dataclasses.field",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "dataclasses.field",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "dataclasses.... |
14138130461 | from flask_wtf import FlaskForm
from wtforms import SelectField, SubmitField
class Rate(FlaskForm):
rating = SelectField('Выберите оценку',
choices=[(None, 'Не завершено'), (10, 'Шедевр(10)'), (9, 'Великолепно(9)'),
(8, 'Очень хорошо(8)'), (7, 'Хорошо(7)'),
(6, 'Неплохо(6)'), (5, 'Нормально(5)'),
(4, 'Не очень(4)'), (3, 'Плохо(3)'),
(2, 'Ужасно(2)'), (1, 'Отвратительно(1)')])
submit = SubmitField('Подтвердить выбор')
| DmitriyDog/WEB | Rate.py | Rate.py | py | 714 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "flask_wtf.FlaskForm",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "wtforms.SelectField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "wtforms.SubmitField",
"line_number": 12,
"usage_type": "call"
}
] |
30522562696 | from django.urls import path
from django.views.generic import TemplateView
import mainapp.views as views
app_name = 'mainapp'
urlpatterns = [
path('',
views.WorkoutListView.as_view(),
name='index'),
path('about/',
TemplateView.as_view(template_name='mainapp/about.html'),
name='about'),
path('workout/<int:pk>/',
views.WorkoutDetailView.as_view(),
name='workout'),
path('workout/add/',
views.WorkoutCreateView.as_view(),
name='workout-add'),
path('workout/<int:pk>/update/',
views.WorkoutUpdateView.as_view(),
name='workout-update'),
path('workout/<int:pk>/delete/',
views.WorkoutDeleteView.as_view(),
name='workout-delete'),
path('schedule/',
views.ScheduleListView.as_view(),
name='schedule'),
path('test/',
TemplateView.as_view(template_name='mainapp/test.html'),
name='test'),
]
| galla-okto/otus_training_site | mainapp/urls.py | urls.py | py | 969 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "mainapp.views.WorkoutListView.as_view",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "mainapp.views.WorkoutListView",
"line_number": 10,
"usage_type": "attribute"
},
... |
33255293229 | from typing import Optional
from fastapi.routing import APIRouter
from pydantic.main import BaseModel
from mongo import user_col, list_col
from auth_repo import ar
from dependencies import verify_token_dependency
from bson.objectid import ObjectId
from fastapi import Depends
from fastapi.routing import APIRouter
user_router = APIRouter(
prefix="/users", dependencies=[Depends(verify_token_dependency)]
)
common_find_options_user = {"password": 0, "lists": 0}
@user_router.get("/")
async def get_all_users():
search_res = user_col.find(
{"_id": {"$ne": ObjectId(ar.get_current_user_id())}}, {"password": 0}
)
result_list = []
for user in search_res:
user["_id"] = str(user.get("_id"))
result_list.append(user)
return result_list
@user_router.get("/me")
async def get_me():
me_res = user_col.find_one(
{"_id": ObjectId(ar.get_current_user_id())}, common_find_options_user
)
me_res["_id"] = str(me_res.get("_id"))
return me_res
class EditMeBody(BaseModel):
profile_emoji: str
@user_router.put("/me")
async def edit_my_attribs(body: EditMeBody):
edit_res = user_col.update_one(
{"_id": ObjectId(ar.get_current_user_id())},
{"$set": {"profile_emoji": body.profile_emoji}},
)
return {
"id": edit_res.upserted_id,
"raw": edit_res.raw_result,
"metas": {
"matched": edit_res.matched_count,
"modified": edit_res.modified_count,
},
}
@user_router.get("/{user_id}/lists")
async def get_lists_by_user(user_id: str):
res = user_col.find_one({"_id": ObjectId(user_id)}, {"password": 0})
for idx, list_id in enumerate(res.get("lists")):
list_res = list_col.find_one(filter={"_id": ObjectId(list_id)})
list_res["_id"] = str(list_res.get("_id"))
res["lists"][idx] = list_res
res["_id"] = str(res.get("_id"))
return res
@user_router.get("/search")
async def search_user_by_full_name(q: str):
search_res = user_col.find({"$text": {"$search": q}}, common_find_options_user)
result_list = []
for user in search_res:
user["_id"] = str(user.get("_id"))
result_list.append(user)
return result_list
@user_router.get("/{user_id}")
async def get_user_by_id(user_id: str):
res = user_col.find_one({"_id": ObjectId(user_id)}, common_find_options_user)
res["_id"] = str(res.get("_id"))
return res
| snokpok/listlive | backend/src/routers/user.py | user.py | py | 2,432 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "fastapi.routing.APIRouter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "fastapi.Depends",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "dependencies.verify_token_dependency",
"line_number": 12,
"usage_type": "argument"
},
{
... |
43491211360 | from rest_framework.serializers import ModelSerializer, SlugRelatedField
from products.models import (
Product,
ProductTag
)
class ProductSerializer(ModelSerializer):
'''
Make output appear as an array of strings:
"tags": ["first", "second", "third"]
Rather than an array of objects:
"tags": [{
"content": "first"
},
"content": "second"
}]
'''
tags = SlugRelatedField(source = 'producttag_set',
slug_field = 'content',
many = True,
read_only = True)
class Meta:
model = Product
fields = ['id', 'name', 'price', 'stall', 'description', 'quantity', 'stall']
read_only_fields = ['tags',]
| skeithtan/iris | products/serializers.py | serializers.py | py | 768 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.SlugRelatedField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "products.models.Product",
"line_number": 27,
"usage... |
27215234475 | import os
import sys
import click
import pytest
from click.exceptions import ClickException
CONTEXT_SETTINGS = dict(
help_option_names=['-h', '--help']
)
class _CustomClickException(ClickException):
exit_code = 0x20
@pytest.fixture()
def cli1():
@click.command('cli1', help='CLI-1 example', context_settings=CONTEXT_SETTINGS)
@click.option('-c', type=int, help='optional C value', default=None, show_default=True)
@click.argument('a', type=int)
@click.argument('b', type=int)
def cli1(a, b, c):
if c is None:
print(f'{a} + {b} = {a + b}')
elif c < 0:
raise ValueError('Uncaught value error', c)
elif c > 1000:
print('Well, well, well...')
raise _CustomClickException(f'custom - {c!r}')
elif os.environ.get('FAIL'):
print('WTF?')
else:
print(f'{a} + {b} + {c} = {a + b + c}', file=sys.stderr)
return cli1
| HansBug/hbutils | test/testing/simulate/conftest.py | conftest.py | py | 955 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "click.exceptions.ClickException",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "os.environ.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "sys.st... |
39712277768 | import json
import sqlite3
from sqlite3 import Error
import requests
from lxml import html
def get_popular_drinks():
url = 'https://www.esquire.com/food-drink/drinks/a30246954/best-alcohol-bottles-2019/'
page = requests.get(url)
tree = html.fromstring(page.content)
alcohol = tree.xpath('//h3[@class="body-h3"]/text()')
images = tree.xpath('//img[@class="lazyimage lazyload"]/@data-src')
alcohol_list = []
index = 0
j = 0
while index < len(alcohol) and j < len(images):
bottle_dict = {"name": alcohol[index].strip(),
"price": alcohol[index + 1].strip(),
"img": images[j].replace("?resize=480:*", "")
}
if index < 34:
bottle_dict['brand'] = 'scotch'
elif index > 33 and index < 40:
bottle_dict['brand'] = 'tequila'
elif index >= 40 and index < 45:
bottle_dict['brand'] = 'gin'
elif index >= 46 and index < 52:
bottle_dict['brand'] = 'rum'
else:
bottle_dict['brand'] = 'cognac'
alcohol_list.append(bottle_dict)
j += 1
index += 2
return alcohol_list
def get_cocktails(brand):
reponse = requests.get(f'https://www.thecocktaildb.com/api/json/v1/1/filter.php?i={brand}')
return reponse.json()['drinks']
def get_cocktail_ingredients(api_id):
response = requests.get(f'https://www.thecocktaildb.com/api/json/v1/1/lookup.php?i={api_id}')
return response.json()['drinks'][0]
### Old Sqlite3 functions ###
# def create_connection(db_file):
# """ create a database connection to a SQLite database """
# conn = None
# try:
# conn = sqlite3.connect(db_file)
# print(sqlite3.version)
# except Error as e:
# print(e)
# finally:
# if conn:
# conn.close()
# def run_sql_files(db_file, sql_file):
# conn = sqlite3.connect(db_file)
# cursor = conn.cursor()
# sql_file = open(sql_file)
# sql_as_string = sql_file.read()
# cursor.executescript(sql_as_string)
# for row in cursor.execute("SELECT * FROM users"):
# print(row)
# sql_file.close()
# conn.close()
# def show_tables(db_file):
# conn = sqlite3.connect(db_file)
# cursor = conn.cursor()
# cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
# print(cursor.fetchall())
# conn.close()
# def add_user(db_file, username, password, name, email):
# conn = sqlite3.connect(db_file)
# cursor = conn.cursor()
# try:
# cursor.execute(f"INSERT INTO users (username, password, name, email) VALUES ('{username}', '{password}', '{name}', '{email}');")
# except Error as e:
# print(e)
# else:
# conn.commit()
# print("Success")
# conn.close()
# def delete_user(db_file, username):
# conn = sqlite3.connect(db_file)
# cursor = conn.cursor()
# cursor.execute(f"DELETE FROM users WHERE username = '{username}';")
# conn.commit()
# conn.close()
# def select_all(db_file, table):
# conn = sqlite3.connect(db_file)
# cursor = conn.cursor()
# cursor.execute(f"SELECT * FROM {table};")
# rows = cursor.fetchall()
# for row in rows:
# print(row)
# conn.close()
# return rows
# def add_popular_drinks(db_file):
# alcohol_dict = get_popular_drinks()
# conn = sqlite3.connect(db_file)
# cursor = conn.cursor()
# drinks_tuple_list = []
# for k, v in alcohol_dict.items():
# drinks_tuple_list.append((k, v['price']))
# print(drinks_tuple_list)
# cursor.executemany(f"INSERT INTO popular_drinks (name, price) VALUES (?, ?);", drinks_tuple_list)
# conn.commit()
# conn.close()
# if __name__ == '__main__':
# print(get_popular_drinks())
# print(get_cocktails())
# print(get_cocktail_ingredients('11007'))
# print(get_cocktails('scotch'))
# create_connection("mydb.db")
# run_sql_files("mydb.db", "second_step.sql")
# show_tables("mydb.db")
# add_user('mydb.db', 'Admin23', '1234', 'Addie', 'yes112@yo.com')
# delete_user('mydb.db', 'Admin')
# add_popular_drinks('mydb.db')
# select_all('mydb.db', 'popular_drinks')
| advaa123/cocktailcloset | models/basic.py | basic.py | py | 4,432 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "lxml.html.fromstring",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "lxml.html",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_... |
8424293393 | from bs4 import BeautifulSoup
import spacy
import os
#nlp = spacy.load("nl_core_news_lg")
nlp = spacy.load("en_core_web_lg")
import regex as re
from nltk import ngrams
import pickle
import json
from augment.replace import BertSampler
from sacremoses import MosesDetokenizer
md = MosesDetokenizer(lang='en')
def position_of_ngram(words, hyp):
length = len(words)
for i, sublist in enumerate((hyp[i:i + length] for i in range(len(hyp)))):
if words == sublist:
return i, i+length
return None, None
def wordtokenizer(text):
nlptext = nlp(text)
#Tokenize the text using SpaCy
tokenlist = [token.text for token in nlptext if token.text != ' ']
for idx, token in enumerate(tokenlist):
#SpaCy struggles with the E2E templates (e.g., __NAME__ ). As it tends to make all underscores separate tokens. Let's fix that.
#First, we find the start of the template
try:
if (tokenlist[idx] == '_') and (tokenlist[idx+1] == '_'):
wordgroup = tokenlist[idx]
dellist = []
nextidx = idx
#Go to the next words after the start of the template until you reach the end (market by two underscores and a non-underscore word).
#Then we will group all the separate tokens into the one template token, and use the collected index information to delete the part-of-template tokens.
while True:
nextidx += 1
try:
if (nextidx+2 == len(tokenlist)) or ((tokenlist[nextidx] == '_') and (tokenlist[nextidx+1] == '_') and (tokenlist[nextidx+2] != '_')):
dellist = dellist + [nextidx, nextidx+1]
wordgroup += tokenlist[nextidx] + tokenlist[nextidx+1]
break
else:
dellist.append(nextidx)
wordgroup += tokenlist[nextidx]
except IndexError:
return ['ERROR ERROR']
#We reverse the indexlist to make sure the deletion doesn't affect the next index
tokenlist[idx] = wordgroup
for delnum in dellist:
tokenlist[delnum] = ''
except IndexError:
return ['ERROR ERROR']
tokenlist = [x for x in tokenlist if x != '']
return tokenlist
def main():
#Gather all E2E files
filelist = []
for path, subdirs, files in os.walk('C:/Users/cvdrl/Desktop/EnrichedE2E-main'):
for name in files:
if name.endswith('.xml'):
filelist.append(os.path.join(path, name))
allentrytemplateinfo = {}
currentpath = os.getcwd()
for e2efile in filelist:
#Open the file, gather all entries, and all lexicalizations for that entry, then also find the template and text for that lexicalization
with open(e2efile, 'rb') as f:
soup = BeautifulSoup(f, 'lxml')
entrylist = soup.find('entries').find_all('entry')
fileentrytemplateinfo = []
for entry in entrylist:
targetlist = entry.find_all('target')
entrytemplatelist = []
for target in targetlist:
targettext = target.find('text').text
targettemplate = target.find('template').text
#Tokenize the targettext and template the same way as will be done in the Data_Augmentation file
tokentargettext = wordtokenizer(targettext)
targettext = ' '.join(tokentargettext)
tokentargettemplate = wordtokenizer(targettemplate)
if (tokentargettemplate == ['ERROR ERROR']) or (tokentargettext == ['ERROR ERROR']):
continue
targettemplatedict = {'eid': entry['eid'], 'lid': target['lid'], 'info': []}
templateissue = 'n'
#Iterate over the target text until the word index overlaps with a template indicator in the template text
for wordidx, word in enumerate(tokentargettext):
try:
if re.search(r'(__[A-Z]+_?[A-Z]+?__)', tokentargettemplate[wordidx]):
templatedict = {'tag': re.search(r'(__[A-Z]+_?[A-Z]+?__)', tokentargettemplate[wordidx]).group(1), 'wordmatches': [tokentargettext[wordidx]], 'indices': [wordidx], 'text': targettext, 'template': targettemplate, 'text_tokenized': tokentargettext, 'template_tokenized': tokentargettemplate}
nextlist = tokentargettext[wordidx+1:].copy()
for nextwordidx, nextword in enumerate(nextlist):
#If there is no next word in the template text anymore, add all remaining words to the dict.
if wordidx + 1 >= len(tokentargettemplate):
templatedict['wordmatches'].append(nextword)
templatedict['indices'].append(wordidx+1 + nextwordidx)
#Else stop if the next template word is found.
elif nextword == tokentargettemplate[wordidx+1]:
break
else:
templatedict['wordmatches'].append(nextword)
templatedict['indices'].append(wordidx+1 + nextwordidx)
targettemplatedict['info'].append(templatedict)
matchindices = templatedict['indices'].copy()
if len(matchindices) > 1:
matchindices = matchindices[1:]
for matchidx in matchindices:
tokentargettemplate.insert(matchidx, '_FILLER_')
except IndexError:
#print(tokentargettemplate)
#print(tokentargettext)
#print(targettext)
#print(e2efile)
#exit(2)
templateissue = 'y'
if templateissue == 'y':
continue
#ADD INFORMATION IF THE TEXT OVERLAPS WITH THE DATA AND WHERE IT OVERLAPS, SO THAT WE CAN CHANGE THIS WITH THE DATA AUGMENTATION
data_inputlist = entry.find('source').find_all('input')
for data_input in data_inputlist:
#TRY TO FIND N-GRAM MATCHES FOR MAX, THEN FOR MAX-1, MAX-2, etc.
#Iterate over the template info we collected
for idx, template_input in enumerate(targettemplatedict['info']):
#If the template_tag matches the data tag, let's see if there's overlapping text
if template_input['tag'] == data_input['tag']:
targettemplatedict['info'][idx].update({'data': {'attribute': data_input['attribute'], 'tag': data_input['tag'], 'value': data_input['value']}})
lexlist = template_input['indices'].copy()
ngramrange = list(range(len(lexlist), 0, -1))
ngramfound = 'n'
for ngramlen in ngramrange:
if ngramfound == 'n':
lexngramspositions = list(ngrams(lexlist, ngramlen))
lexngramspositions = [list(x) for x in lexngramspositions]
for lexngram in lexngramspositions:
wordmatchstart, wordmatchend = position_of_ngram(lexngram, lexlist)
wordmatchinput = template_input['wordmatches'][wordmatchstart:wordmatchend]
tokeninput = wordtokenizer(data_input['value'])
startposition, endposition = position_of_ngram(wordmatchinput, tokeninput)
if startposition != None:
ngramfound = 'y'
targettemplatedict['info'][idx].update({'overlap': lexngram})
break
if ngramfound == 'y':
break
#print(targettemplatedict)
entrytemplatelist.append(targettemplatedict)
fileentrytemplateinfo.append(entrytemplatelist)
allentrytemplateinfo.update({e2efile: fileentrytemplateinfo})
with open(currentpath + '/Data/AllEntryTemplateInfo.json', 'w') as outfile:
json.dump(allentrytemplateinfo, outfile, indent=4, separators=(',', ': '))
def convert_data(candidate, targettemplatedict, idxlist):
tokenizedcandidate = wordtokenizer(candidate)
l = [1, 2, 3]
datadict = {}
for output_element in targettemplatedict['info']:
replaceindices = []
for idx in idxlist:
if ('overlap' in output_element) and (idx in output_element['overlap']):
replaceindices.append([output_element['overlap'].index(idx), idx])
try:
datavalue = output_element['data']['value']
except KeyError:
print('ERROR ERROR', flush=True)
return 'ERROR ERROR'
datavaluelist = wordtokenizer(datavalue)
for replaceidx in replaceindices:
datavaluelist[replaceidx[0]] = tokenizedcandidate[replaceidx[1]]
datavaluestring = md.detokenize(datavaluelist)
datadict.update({output_element['data']['attribute']: datavaluestring})
datalist = []
for entry in datadict:
datalist.append(entry.upper() + '(' + entry + '="' + datadict[entry] + '")')
datastring = ' '.join(datalist)
return datastring
def data_augmentation(allentrytemplateinfo):
candidates125list = []
candidates250list = []
candidates500list = []
candidates1000list = []
rep = BertSampler(sim_threshold=0.001)
currentpath = os.getcwd()
if os.path.isfile(currentpath + '/DonePickle.pkl'):
previousdonelist = []
with open(currentpath + '/DonePickle.pkl', 'rb') as fr:
try:
while True:
previousdonelist.append(pickle.load(fr))
except EOFError:
pass
startsearch = 'y'
else:
startsearch = 'n'
for entrytemplateidx, entrytemplate in enumerate(allentrytemplateinfo):
for targettemplatedictidx, targettemplatedict in enumerate(entrytemplate):
if startsearch == 'y':
entryfound = 'n'
for prevdone in previousdonelist:
if (entrytemplateidx == prevdone['entrytemplateidx']) and (targettemplatedictidx == prevdone['targettemplatedictidx']):
entryfound = 'y'
break
if entryfound == 'y':
continue
else:
startsearch = 'n'
try:
doc = nlp(targettemplatedict['info'][0]['text'])
except IndexError:
continue
idxlist = []
for idx, token in enumerate(doc):
#print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_, token.shape_, token.is_alpha, token.is_stop)
if (token.tag_.startswith('NN')) or (token.pos_ == 'ADJ') or (token.pos_ == 'ADV') or (token.pos_ == 'NUM'): #or (token.pos_ == 'VERB'):
idxlist.append(idx)
#candidateslist = [o for o in rep(targettemplatedict['info'][0]['text'], idxlist, 20, dropout=0.2)]
candidateslist = [o for o in rep(targettemplatedict['info'][0]['text'], idxlist, 20, dropout=0.2)]
print(candidateslist, flush=True)
with open(currentpath + '/DonePickle.pkl', 'ab') as f:
pickle.dump({'entrytemplateidx': entrytemplateidx, 'targettemplatedictidx': targettemplatedictidx, 'candidateslist': candidateslist, 'targettemplatedict': targettemplatedict, 'idxlist': idxlist}, f)
for candidateidx, candidate in enumerate(candidateslist):
candidatedatastring = convert_data(candidate, targettemplatedict, idxlist)
if candidatedatastring == 'ERROR ERROR':
break
elif candidateidx < 1:
candidates125list.append([candidate, candidatedatastring])
candidates250list.append([candidate, candidatedatastring])
candidates500list.append([candidate, candidatedatastring])
candidates1000list.append([candidate, candidatedatastring])
elif candidateidx < 2:
candidates250list.append([candidate, candidatedatastring])
candidates500list.append([candidate, candidatedatastring])
candidates1000list.append([candidate, candidatedatastring])
elif candidateidx < 5:
candidates500list.append([candidate, candidatedatastring])
candidates1000list.append([candidate, candidatedatastring])
elif candidateidx < 10:
candidates1000list.append([candidate, candidatedatastring])
else:
break
rep = None # NOTE: clear out GPU memory
candidatesdict = {'125': candidates125list, '250': candidates250list, '500': candidates500list, '1000': candidates1000list}
for candlist in candidatesdict:
candidatestrg = [x[0] for x in candidatesdict[candlist]]
candidatessrc = [x[1] for x in candidatesdict[candlist]]
alltrgstring = '\n'.join(candidatestrg)
allsrcstring = '\n'.join(candidatessrc)
with open(currentpath + '/Predictions/Extended' + candlist + '_trg.txt', 'wb') as f:
f.write(bytes(alltrgstring, 'UTF-8'))
with open(currentpath + '/Predictions/Extended' + candlist + '_src.txt', 'wb') as f:
f.write(bytes(allsrcstring, 'UTF-8'))
def collect_dict():
currentpath = os.getcwd()
with open(currentpath + '/Data/AllEntryTemplateInfo.json', 'r') as infile:
allentrytemplateinfo = json.load(infile)
fulltrain = []
for e2efile in allentrytemplateinfo:
if '\\train\\' in e2efile:
fulltrain += allentrytemplateinfo[e2efile]
data_augmentation(fulltrain)
#allentrytemplateinfo = main()
collect_dict() | TallChris91/Neural-Data-to-Text-Small-Datasets | Data_Augmentation/Mark_Words_E2E.py | Mark_Words_E2E.py | py | 14,690 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "spacy.load",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sacremoses.MosesDetokenizer",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"lin... |
3975243099 | # -*- coding:utf-8 -*-
import torchvision
import torch.nn as nn
# def load_model(pretrained=True, num_classes=None):
# """加载model
# Parameters
# pretrained: bool
# True: 加载预训练模型; False: 加载未训练模型
# num_classes: int
# Alexnet最后一层输出
# Returns
# alexnet_model: model
# CNN模型
# """
# model = torchvision.models.alexnet(pretrained=pretrained)
# if pretrained:
# fc1 = nn.Linear(256 * 6 * 6, 4096)
# fc1.weight = model.classifier[1].weight
# fc1.bias = model.classifier[1].bias
# fc2 = nn.Linear(4096, 4096)
# fc2.weight = model.classifier[4].weight
# fc2.bias = model.classifier[4].bias
# classifier = nn.Sequential(
# nn.Dropout(),
# fc1,
# nn.ReLU(inplace=True),
# nn.Dropout(),
# fc2,
# nn.ReLU(inplace=True),
# nn.Linear(4096, num_classes),
# )
# model.classifier = classifier
# return model
def load_model(pretrained=True, num_classes=None):
"""加载model
Parameters
pretrained: bool
True: 加载预训练模型; False: 加载未训练模型
num_classes: int
Alexnet最后一层输出
Returns
alexnet_model: model
CNN模型
"""
model = torchvision.models.resnet34(pretrained=pretrained)
classifier = nn.Sequential(
nn.Linear(512, num_classes),
)
model.fc = classifier
return model | ray0809/pytorch | retrieval/DeepHash/DSDH_PyTorch/models/alexnet.py | alexnet.py | py | 1,578 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torchvision.models.resnet34",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 67,
"usage_type": "call"
},
{
"api_name"... |
73817524986 | # This work is licensed under the GNU GPLv2 or later.
# See the COPYING file in the top-level directory.
import json
import os
import re
import tests
import tests.mockbackend
import tests.utils
#################################
# 'bugzilla query' mock testing #
#################################
def test_query(run_cli):
# bad field option
fakebz = tests.mockbackend.make_bz()
cmd = "bugzilla query --field FOO"
out = run_cli(cmd, fakebz, expectfail=True)
assert "Invalid field argument" in out
# Simple query with some comma opts
cmd = "bugzilla query "
cmd += "--product foo --component foo,bar --bug_id 1234,2480"
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query1.txt",
bug_search_return="data/mockreturn/test_query1.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query1.txt")
# RHBZ query with a ton of opts
cmd = "bugzilla query "
cmd += "--product foo --component foo,bar --bug_id 1234,2480 "
cmd += "--keywords fribkeyword --fixed_in amifixed "
cmd += "--qa_whiteboard some-example-whiteboard "
cmd += "--cc foo@example.com --qa_contact qa@example.com "
cmd += "--comment 'some comment string' "
fakebz = tests.mockbackend.make_bz(rhbz=True,
bug_search_args="data/mockargs/test_query1-rhbz.txt",
bug_search_return="data/mockreturn/test_query1.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query1-rhbz.txt")
# --emailtype handling
cmd = "bugzilla query --cc foo@example.com --emailtype BAR "
fakebz = tests.mockbackend.make_bz(rhbz=True,
bug_search_args="data/mockargs/test_query2-rhbz.txt",
bug_search_return="data/mockreturn/test_query1.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query2-rhbz.txt")
# Same but with --ids output
cmd = "bugzilla query --ids "
cmd += "--product foo --component foo,bar --bug_id 1234,2480"
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query1-ids.txt",
bug_search_return="data/mockreturn/test_query1.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query1-ids.txt")
# Same but with --raw output
cmd = "bugzilla query --raw --bug_id 1165434"
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query2.txt",
bug_search_return={"bugs": [{"id": 1165434}]},
bug_get_args=None,
bug_get_return="data/mockreturn/test_getbug_rhel.txt")
out = run_cli(cmd, fakebz)
# Dictionary ordering is random, so scrub it from our output
out = re.sub(r"\{.*\}", r"'DICT SCRUBBED'", out, re.MULTILINE)
tests.utils.diff_compare(out, "data/clioutput/test_query2.txt")
# Test a bunch of different combinations for code coverage
cmd = "bugzilla query --status ALL --severity sev1,sev2 "
cmd += "--outputformat='%{foo}:%{bar}::%{whiteboard}:"
cmd += "%{flags}:%{flags_requestee}%{whiteboard:devel}::"
cmd += "%{flag:needinfo}::%{comments}::%{external_bugs}'"
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query3.txt",
bug_search_return="data/mockreturn/test_getbug_rhel.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query3.txt")
# Test --status DEV and --full
cmd = "bugzilla query --status DEV --full"
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query4.txt",
bug_search_return="data/mockreturn/test_getbug_rhel.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query4.txt")
# Test --status QE and --extra, and components-file
compfile = os.path.dirname(__file__) + "/data/components_file.txt"
cmd = "bugzilla query --status QE --extra "
cmd += "--components_file %s" % compfile
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query5.txt",
bug_search_return="data/mockreturn/test_getbug_rhel.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query5.txt")
# Test --status EOL and --oneline, and some --field usage
cmd = "bugzilla query --status EOL --oneline "
cmd += "--field FOO=1 --field=BAR=WIBBLE "
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query6.txt",
bug_search_return="data/mockreturn/test_getbug_rhel.txt",
bug_get_args="data/mockargs/test_query_cve_getbug.txt",
bug_get_return="data/mockreturn/test_query_cve_getbug.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query6.txt")
# Test --status OPEN and --from-url
url = "https://bugzilla.redhat.com/buglist.cgi?bug_status=NEW&bug_status=ASSIGNED&bug_status=MODIFIED&bug_status=ON_DEV&bug_status=ON_QA&bug_status=VERIFIED&bug_status=FAILS_QA&bug_status=RELEASE_PENDING&bug_status=POST&classification=Fedora&component=virt-manager&order=bug_status%2Cbug_id&product=Fedora&query_format=advanced" # noqa
cmd = "bugzilla query --status OPEN --from-url %s" % url
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query7.txt",
bug_search_return="data/mockreturn/test_getbug_rhel.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query7.txt")
# Test --json output
cmd = "bugzilla query --json --id 1165434"
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query8.txt",
bug_search_return={"bugs": [{"id": 1165434}]},
bug_get_args=None,
bug_get_return="data/mockreturn/test_getbug_rhel.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(tests.utils.sanitize_json(out),
"data/clioutput/test_query8.txt")
assert json.loads(out)
# Test --json output
cmd = ("bugzilla query --json --id 1165434 "
"--includefield foo --includefield bar "
"--excludefield excludeme "
"--extrafield extrame1 --extrafield extrame2 ")
fakebz = tests.mockbackend.make_bz(rhbz=True,
bug_search_args="data/mockargs/test_query9.txt",
bug_search_return={"bugs": [{"id": 1165434}]},
bug_get_args="data/mockargs/test_getbug_query9.txt",
bug_get_return="data/mockreturn/test_getbug_rhel.txt")
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(tests.utils.sanitize_json(out),
"data/clioutput/test_query9.txt")
assert json.loads(out)
# Test every remaining option
cmd = "bugzilla query "
cmd += "--sub-component FOOCOMP "
cmd += "--version 5.6.7 --reporter me@example.com "
cmd += "--summary 'search summary' "
cmd += "--assignee bar@example.com "
cmd += "--blocked 12345 --dependson 23456 "
cmd += "--keywords FOO --keywords_type substring "
cmd += "--url https://example.com --url_type sometype "
cmd += "--target_release foo --target_milestone bar "
cmd += "--quicksearch 1 --savedsearch 2 --savedsearch-sharer-id 3 "
cmd += "--tags +foo --flag needinfo --alias somealias "
cmd += "--devel_whiteboard DEVBOARD "
cmd += "--priority wibble "
cmd += "--fixed_in 5.5.5 --fixed_in_type substring "
cmd += "--whiteboard FOO --status_whiteboard_type substring "
fakebz = tests.mockbackend.make_bz(
bug_search_args="data/mockargs/test_query10.txt",
bug_search_return="data/mockreturn/test_getbug_rhel.txt",
rhbz=True)
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(out, "data/clioutput/test_query10.txt")
| python-bugzilla/python-bugzilla | tests/test_cli_query.py | test_cli_query.py | py | 7,747 | python | en | code | 120 | github-code | 6 | [
{
"api_name": "tests.mockbackend.make_bz",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tests.mockbackend",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "tests.mockbackend.make_bz",
"line_number": 27,
"usage_type": "call"
},
{
"api_na... |
2908367696 | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Union
from httpx import AsyncClient
from supertokens_python.recipe.thirdparty.provider import Provider
from supertokens_python.recipe.thirdparty.types import (
AccessTokenAPI, AuthorisationRedirectAPI, UserInfo, UserInfoEmail)
if TYPE_CHECKING:
from supertokens_python.framework.request import BaseRequest
from supertokens_python.utils import get_filtered_list
class Github(Provider):
def __init__(self, client_id: str, client_secret: str, scope: Union[None, List[str]] = None,
authorisation_redirect: Union[None, Dict[str, Union[str, Callable[[
BaseRequest], str]]]] = None,
is_default: bool = False):
super().__init__('github', client_id, is_default)
default_scopes = ["read:user", "user:email"]
if scope is None:
scope = default_scopes
self.client_secret = client_secret
self.scopes = list(set(scope))
self.access_token_api_url = 'https://github.com/login/oauth/access_token'
self.authorisation_redirect_url = 'https://github.com/login/oauth/authorize'
self.authorisation_redirect_params = {}
if authorisation_redirect is not None:
self.authorisation_redirect_params = authorisation_redirect
async def get_profile_info(self, auth_code_response: Dict[str, Any], user_context: Dict[str, Any]) -> UserInfo:
access_token: str = auth_code_response['access_token']
params = {
'alt': 'json'
}
headers = {
'Authorization': 'Bearer ' + access_token,
'Accept': 'application/vnd.github.v3+json'
}
async with AsyncClient() as client:
response_user = await client.get(url='https://api.github.com/user', params=params, headers=headers)
response_email = await client.get(url='https://api.github.com/user/emails', params=params, headers=headers)
user_info = response_user.json()
emails_info = response_email.json()
user_id = str(user_info['id'])
email_info = get_filtered_list(
lambda x: 'primary' in x and x['primary'], emails_info)
if len(email_info) == 0:
return UserInfo(user_id)
is_email_verified = email_info[0]['verified'] if 'verified' in email_info[0] else False
email = email_info[0]['email'] if 'email' in email_info[0] else user_info['email']
return UserInfo(user_id, UserInfoEmail(email, is_email_verified))
def get_authorisation_redirect_api_info(self, user_context: Dict[str, Any]) -> AuthorisationRedirectAPI:
params = {
'scope': ' '.join(self.scopes),
'client_id': self.client_id,
**self.authorisation_redirect_params
}
return AuthorisationRedirectAPI(
self.authorisation_redirect_url, params)
def get_access_token_api_info(
self, redirect_uri: str, auth_code_from_request: str, user_context: Dict[str, Any]) -> AccessTokenAPI:
params = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': auth_code_from_request,
'redirect_uri': redirect_uri
}
return AccessTokenAPI(self.access_token_api_url, params)
def get_redirect_uri(self, user_context: Dict[str, Any]) -> Union[None, str]:
return None
| starbillion/supertokens_python | supertokens_python/recipe/thirdparty/providers/github.py | github.py | py | 3,505 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "supertokens_python.recipe.thirdparty.provider.Provider",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 17,
"usage_type": "name"
},
... |
30172432184 | #!/usr/bin/env python3
import io
import time
import serial
from serial.tools.list_ports import comports
class Arduino(object):
def __init__(self, port):
self.port= serial.Serial(port, 115200, timeout=0.1)
self.iow= io.TextIOWrapper(
io.BufferedRWPair(self.port, self.port, 1),
'utf-8',
newline='\r\n'
)
self.reset()
def reset(self):
self.port.setDTR(0)
time.sleep(0.5)
self.port.setDTR(1)
time.sleep(0.5)
def exec_cmd(self, *params):
cmd= 'AT'
if len(params) >= 1:
cmd+= '+' + params[0]
if len(params) >= 2:
cmd+= '=' + ','.join(
map(str, params[1:])
)
self.iow.write(cmd + '\r\n')
resp= list()
for ln in map(str.strip, self.iow):
if ln == 'OK':
return(resp)
elif ln == 'FAIL':
raise(Exception('Arduino Error'))
else:
resp.append(ln)
class OutputPin(object):
def __init__(self, arduino, pin_no):
self.arduino= arduino
self.pin_no= pin_no
self.arduino.exec_cmd('SET_OUT', self.pin_no)
def turn_on(self):
self.set_state(True)
def turn_off(self):
self.set_state(False)
def set_state(self, state):
self.arduino.exec_cmd(
'WRITE_HIGH' if state else 'WRITE_LOW',
self.pin_no
)
class InputPin(object):
def __init__(self, arduino, pin_no, pullup=False):
self.arduino= arduino
self.pin_no= pin_no
self.arduino.exec_cmd('SET_IN', self.pin_no)
self.arduino.exec_cmd(
'WRITE_HIGH' if pullup else 'WRITE_LOW',
self.pin_no
)
def is_high(self):
res= self.arduino.exec_cmd('PIN_READ', self.pin_no)
state= True if (res[0].split(':')[1] == '1') else False
return(state)
def enumerate_ports():
ports= list(
port[0] for port in comports()
)
return(ports)
| ComNets-Bremen/GDI-Tutorials | target/examples/21_atuino.py | 21_atuino.py | py | 2,069 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "serial.Serial",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "io.TextIOWrapper",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "io.BufferedRWPair",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"li... |
9273208977 | from flask import Flask, render_template, request
import joblib
import numpy as np
import pandas as pd
app = Flask(__name__)
# Load your model here
model = joblib.load('C:/Users/Dylan/exoplanets/models/exoplanet_classifier.joblib')
@app.route('/', methods=['GET', 'POST'])
def home():
prediction = None
predicted_label = None
if request.method == 'POST':
exoplanetmass = float(request.form['exoplanetmass'])
exoplanetradius = float(request.form['exoplanetradius'])
exoplanetdensity = float(request.form['exoplanetdensity'])
unit = request.form['unit']
# Convert units if needed
if unit == 'Earths':
# Convert values based on your conversion logic
exoplanetmass = exoplanetmass * 0.00314558
exoplanetradius = exoplanetradius * 0.0892147
new_data = pd.DataFrame({
'pl_radj': [exoplanetradius],
'pl_bmassj': [exoplanetmass],
'pl_dens': [exoplanetdensity]
})
if not exoplanetdensity:
exoplanetradius *= 7.149e+9
exoplanetmass *= 6.99115e+9
volume = (4/3) * (np.pi) * (exoplanetradius**3)
exoplanetdensity = exoplanetmass/volume
# Make prediction using your loaded model
if exoplanetmass and exoplanetradius and exoplanetdensity and unit:
predicted_class = model.predict(new_data)
predicted_label = predicted_class[0]
# Map predicted class to human-readable format
return render_template('index.html', prediction=predicted_label)
if __name__ == '__main__':
app.run(debug=True)
| DylanBerger/ExoplanetClassifier | app.py | app.py | py | 1,739 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "joblib.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"... |
26898344474 | # Autor: David Martínez Acha
# Fecha: 04/02/2023 14:30
# Descripción: Permite cargar datasets
# Version: 1.2
from os.path import isfile
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas.api import types
from scipy.io import arff
from algoritmos.utilidades.filetype import FileType
from algoritmos.utilidades.labelencoder import OwnLabelEncoder
class DatasetLoader:
def __init__(self, file):
"""
Cargador para archivos (ARFF o CSV).
:param file: ruta del fichero
"""
self.target = None
if not isfile(file):
raise FileNotFoundError("El archivo no existe en el conjunto de datasets")
if ".csv" in file:
self.type = FileType.CSV
elif ".arff" in file:
self.type = FileType.ARFF
else:
raise ValueError("El fichero no es CSV o ARFF")
self.file = file
def get_allfeatures(self):
"""
Obtiene las columnas (atributos) de los datos, incluye el target
:return: listado de las características de los datos.
"""
return self._get_data().columns.values
def set_target(self, target):
"""
Especifica el target de los datos
:param target: el target o clase para la posterior clasificación
"""
self.target = target
def get_only_features(self):
"""
Obtiene las características de los datos. NO incluye target
:return: listado de las características de los datos (sin target).
"""
if self.target is None:
raise ValueError("La clase o target no ha sido establecida, selecciona primero la característica que "
"actúa como target")
return np.setdiff1d(self._get_data().columns.values, self.target)
def _get_data(self):
"""
Obtiene los datos sin procesar (directamente del fichero) según
el tipo de fichero que sea
:return: datos en forma de dataframe
"""
if self.type == FileType.CSV:
return self._csv_data()
elif self.type == FileType.ARFF:
return self._arff_data()
def _csv_data(self):
"""
Convierte los datos del fichero .CSV en un dataframe
:return: datos en forma de dataframe
"""
return pd.read_csv(self.file)
def _arff_data(self):
"""
Convierte los datos del fichero .ARFF en un dataframe
:return: datos en forma de dataframe
"""
data = arff.loadarff(self.file)
df = pd.DataFrame(data[0])
return df
def _detect_categorical_features(self, x: DataFrame):
"""
Detecta si existen características categóricas.
:param x: instancias
:return: True si todas son numéricas, False en caso contrario
"""
return not all(types.is_numeric_dtype(t) for t in list(x.dtypes))
def _detect_unlabelled_targets(self, y: DataFrame):
"""
Detecta si existen datos no etiquetados. Se sigue la convención del "-1"
para datos no etiquetados.
Casos considerados: -1, -1.0, "-1", "-1.0"
:param y: etiquetas
:return: True si hay datos no etiquetados, False en caso contrario
"""
values = y[self.target].astype(str).values
return "-1" in values or "-1.0" in values
def get_x_y(self):
"""
Obtiene por separado los datos (las características) y los target o clases
:return: las instancias (x), las clases o targets (y), el mapeo de las clases codificadas a las
originales y si el conjunto de datos ya era semi-supervisado
"""
if self.target is None:
raise ValueError("La clase o target no ha sido establecida, selecciona primero la característica que "
"actúa como target")
data = self._get_data()
x = data.drop(columns=[self.target])
if self._detect_categorical_features(x):
raise ValueError("Se han detectado características categóricas o indefinidas, "
"recuerde que los algoritmos solo soportan características numéricas")
if self.type == FileType.CSV:
y = pd.DataFrame(data[self.target], columns=[self.target])
else:
y = pd.DataFrame(
np.array([v.decode("utf-8") if not types.is_numeric_dtype(type(v)) else v for v in
data[self.target].values]),
columns=[self.target])
y.replace("?", "-1", inplace=True)
is_unlabelled = self._detect_unlabelled_targets(y)
y, mapping = OwnLabelEncoder().transform(y)
return x, y, mapping, is_unlabelled
| dma1004/TFG-SemiSupervisado | algoritmos/utilidades/datasetloader.py | datasetloader.py | py | 4,948 | python | es | code | 5 | github-code | 6 | [
{
"api_name": "os.path.isfile",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "algoritmos.utilidades.filetype.FileType.CSV",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "algoritmos.utilidades.filetype.FileType",
"line_number": 32,
"usage_type"... |
4785215660 | from collections import deque,defaultdict
n,m = map(int,input().split())
d = defaultdict(list)
for i in range(m):
u,v = map(int,input().split())
d[u].append(v)
d[v].append(u)
visited = [0]*n
ans = 0
for i in range(1,n+1):
if visited[i-1] == 1:
continue
q = deque()
q.append(i)
visited[i-1] = 1
while q:
now = q.popleft()
for next in d[now]:
if visited[next-1] == 1:
continue
visited[next-1] = 1
q.append(next)
ans += 1
print(ans) | K5h1n0/compe_prog_new | VirtualContest/022/06.py | 06.py | py | 541 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 13,
"usage_type": "call"
}
] |
19887674480 | #
# -*- coding: utf-8 -*-
# OpenPGPpy OpenPGPcard : OpenPGP smartcard communication library for Python
# Copyright (C) 2020-2022 BitLogiK
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from logging import getLogger
import time
from .der_coding import encode_der, decode_do
try:
from smartcard.System import readers
from smartcard.util import toBytes, toHexString
from smartcard.Exceptions import CardConnectionException
except ModuleNotFoundError as exc:
raise ModuleNotFoundError("pyscard not installed ?") from exc
logger = getLogger(__name__)
# Exception classes for OpenPGPcard
class PGPBaseException(Exception):
pass
class PGPCardException(PGPBaseException):
def __init__(self, sw_byte1, sw_byte2):
self.sw_byte1 = sw_byte1
self.sw_byte2 = sw_byte2
self.sw_code = (sw_byte1 << 8) | sw_byte2
self.message = "Error status : 0x%02X%02X" % (sw_byte1, sw_byte2)
super().__init__(self.message)
class ConnectionException(PGPBaseException):
pass
class BadInputException(PGPBaseException):
pass
class DataException(PGPBaseException):
pass
class PinException(PGPBaseException):
def __init__(self, num_retries):
self.retries_left = num_retries
if num_retries >= 2:
self.message = f"Wrong PIN. {num_retries} tries left"
else:
self.message = f"Wrong PIN. {num_retries} try left"
super().__init__(self.message)
HEX_SYMBOLS = "0123456789abcdefABCDEF"
APDU_SHORT = 256
APDU_LONG = 65536
# Utils helpers
def ishex(istring):
return all(c in HEX_SYMBOLS for c in istring)
def check_hex(func):
"""Decorator to check the first method argument
is 2/4 string hex (a DO short address)
Expands the hex string from 2 to 4 hex chars (adds leading 0)
"""
def func_wrapper(*args):
if len(args) < 2:
BadInputException(
"First argument must be filehex : 1 or 2 bytes hex string"
)
if not isinstance(args[1], str):
BadInputException("filehex provided must be a string")
args_list = [*args]
if len(args_list[1]) == 2:
# A single byte address : param_1=0
args_list[1] = "00" + args_list[1]
if len(args_list[1]) != 4 or not ishex(args_list[1]):
raise BadInputException("filehex provided must be 2 or 4 hex chars")
return func(*args_list)
return func_wrapper
def to_list(binstr):
return toBytes(binstr.hex())
def print_list(liststr):
"""Output a list pretty in the debug logger."""
for item in liststr:
logger.debug(" - %s", item)
# Core class OpenPGPcard
class OpenPGPcard:
AppID = toBytes("D27600012401")
default_manufacturer_name = "- unknown -"
manufacturer_list = {
0x0001: "PPC Card Systems",
0x0002: "Prism",
0x0003: "OpenFortress",
0x0004: "Wewid",
0x0005: "ZeitControl",
0x0006: "Yubico",
0x0007: "OpenKMS",
0x0008: "LogoEmail",
0x0009: "Fidesmo",
0x000A: "Dangerous Things",
0x000B: "Feitian Technologies",
0x002A: "Magrathea",
0x0042: "GnuPG",
0x1337: "Warsaw Hackerspace",
0x2342: "Warpzone",
0x2C97: "Ledger",
0x4354: "Confidential Technologies",
0x5443: "TIF-IT",
0x63AF: "Trustica",
0xAFAF: "ANSSI",
0xBA53: "c-base",
0xBD0E: "Paranoidlabs",
0xF517: "FSIJ",
0xF5EC: "F-Secure",
}
def __init__(self, reader_index=None):
applet_detected = False
readers_list = readers()
if len(readers_list) > 0:
if reader_index is None:
logger.debug("Trying to reach the OpenPGP app in all readers")
logger.debug("Available readers :")
print_list(readers_list)
if reader_index is not None:
if not isinstance(reader_index, int):
raise ValueError("reader_index must be int.")
if reader_index < 0:
raise ValueError("reader_index is a positive index, starts at 0.")
if len(readers_list) > reader_index:
readers_list = readers_list[reader_index : reader_index + 1]
else:
raise ConnectionException("Reader index out of readers detected")
logger.debug("Using reader index #%i", reader_index)
for reader in readers_list:
applet_detected = False
try:
logger.debug("Trying with reader : %s", reader)
self.connection = reader.createConnection()
self.connection.connect()
apdu_select = [0x00, 0xA4, 0x04, 0x00]
self.send_apdu(apdu_select, OpenPGPcard.AppID)
applet_detected = hasattr(self, "connection")
except Exception:
logger.debug("Fail with this reader")
if reader_index is not None:
raise ConnectionException("No OpenPGP applet on this reader.")
continue
if applet_detected:
logger.debug("An OpenPGP applet detected, using %s", reader.name)
self.name = reader.name
break
if applet_detected:
self.longer = 0
# Read device info
self.get_identifier()
self.get_application_data()
self.get_length()
self.get_features()
else:
raise ConnectionException("Can't find any OpenPGP device connected.")
# The object has the following attributes :
# self.name = str, name of the device (or the card reader used)
# self.pgpvermaj = int, OpenPGP application major version (3)
# self.pgpvermin = int, OpenPGP application minor version
# self.pgpverstr = string, OpenPGP application "maj.min"
# self.manufacturer_id = string, hex string of the manufacturer ID "0xXXXX"
# self.manufacturer = string, name of the manufacturer (or "- unknown -")
# self.serial = int, serial number
# self.max_cmd : int, maximum command length
# self.max_rsp : int, maximum response length
# self.display : bool, has a display ?
# self.bio : bool, has a biometric sensor ?
# self.button : bool, has a button ?
# self.keypad : bool, has a keypad ?
# self.led : bool, has a LED ?
# self.speaker : bool, has a speaker ?
# self.mic : bool, has a microphone ?
# self.touchscreen : bool, has a touchescreen ?
def __del__(self):
"""Disconnect device."""
if hasattr(self, "connection"):
del self.connection
def send_apdu(self, apdu_header, cmd_data, exp_resp_len=0):
"""send APDU 7816-4 with extended length
apdu_header : [ INS, CLA, P1, P2 ] ISO7816 APDU header,
without length info (Lc nor Le)
cmd_data field : bytes list of the command data
exp_resp_len : Expected response length, must be set to 65536
when expecting a long answser (with a short command)
"""
len_data = len(cmd_data)
# Lc is 1 or 3 bytes
if len_data < APDU_SHORT and exp_resp_len <= APDU_SHORT:
# Standard APDU : Lc 1 byte : short command and short response
apdu = apdu_header + [len_data] + cmd_data
elif len_data < APDU_LONG:
# Extended APDU : Lc 3 bytes : extended command and extended response
apdu = apdu_header + [0, len_data >> 8, len_data & 255] + cmd_data
else:
raise DataException("Command data too large")
if exp_resp_len > 0:
# Le present
if exp_resp_len < APDU_SHORT:
# Le fixed and short
apdu += [exp_resp_len]
elif exp_resp_len == APDU_SHORT:
# Le short : max response len 255 bytes
apdu += [0]
elif exp_resp_len < APDU_LONG:
# Le fixed and long
apdu += [exp_resp_len >> 8, exp_resp_len & 255]
elif exp_resp_len == APDU_LONG:
# Le long : max response len 65535 bytes
apdu += [0, 0]
else:
raise DataException("Expected data response too large")
logger.debug(
f" Sending 0x{apdu_header[1]:X} command with {len_data} bytes data"
)
if exp_resp_len > 0:
logger.debug(f" with Le={exp_resp_len}")
logger.debug(f"-> {toHexString(apdu)}")
t_env = time.time()
try:
data, sw_byte1, sw_byte2 = self.connection.transmit(apdu)
except CardConnectionException:
raise ConnectionException(
"Error when communicating with the OpenGPG device."
)
t_ans = (time.time() - t_env) * 1000
logger.debug(
" Received %i bytes data : SW 0x%02X%02X - duration: %.1f ms"
% (len(data), sw_byte1, sw_byte2, t_ans)
)
if len(data) > 0:
logger.debug(f"<- {toHexString(data)}")
while sw_byte1 == 0x61:
t_env = time.time()
datacompl, sw_byte1, sw_byte2 = self.connection.transmit(
[0x00, 0xC0, 0, 0, 0]
)
t_ans = (time.time() - t_env) * 1000
logger.debug(
" Received remaining %i bytes : 0x%02X%02X - duration: %.1f ms"
% (len(datacompl), sw_byte1, sw_byte2, t_ans)
)
logger.debug(f"<- {toHexString(datacompl)}")
data += datacompl
if sw_byte1 == 0x63 and sw_byte2 & 0xF0 == 0xC0:
raise PinException(sw_byte2 - 0xC0)
if sw_byte1 != 0x90 or sw_byte2 != 0x00:
raise PGPCardException(sw_byte1, sw_byte2)
return data
@check_hex
def select_data(self, filehex, param_1=0, param_2=4):
"""Select a data object : filehex is 2 bytes (4 string hex)."""
apdu_command = [
0x00,
0xA5,
param_1,
param_2,
]
data = toBytes("60 04 5C 02" + filehex)
self.send_apdu(apdu_command, data)
@check_hex
def get_data(self, filehex, data_hex=""):
"""Binary read / ISO read the object"""
logger.debug(f"Read Data {data_hex} in 0x{filehex}")
param_1 = int(filehex[0:2], 16)
param_2 = int(filehex[2:4], 16)
apdu_command = [0x00, 0xCA, param_1, param_2]
if len(data_hex) == 2:
data_hex = "00" + data_hex
dataresp = self.send_apdu(apdu_command, toBytes(data_hex), self.longer)
return dataresp
def get_next_data(self, param_1=0, param_2=0, data_hex=""):
"""Continue read."""
logger.debug("Read next data %s", data_hex)
apdu_command = [0x00, 0xCC, param_1, param_2]
blkdata = self.send_apdu(apdu_command, toBytes(data_hex))
return blkdata
@check_hex
def put_data(self, filehex, data_hex=""):
logger.debug(f"Put data {data_hex} in 0x{filehex}")
param_1 = int(filehex[0:2], 16)
param_2 = int(filehex[2:4], 16)
apdu_command = [0x00, 0xDA, param_1, param_2] # or 0xDB command
blkdata = self.send_apdu(apdu_command, toBytes(data_hex))
return blkdata
def get_identifier(self):
"""Full application identifier"""
resp = self.get_data("4F")
if len(resp) != 16:
raise DataException("Application identifier data shall be 16 bytes long.")
if resp[:6] != OpenPGPcard.AppID:
raise DataException(
"Start of application identifier data shall be the OpenGPG AID."
)
self.pgpvermaj = resp[6]
self.pgpvermin = resp[7]
self.pgpverstr = f"{resp[6]}.{resp[7]}"
self.manufacturer_id = f"0x{resp[8]:02X}{resp[9]:02X}"
manufacturer_id_int = int(self.manufacturer_id, 16)
if manufacturer_id_int in OpenPGPcard.manufacturer_list:
self.manufacturer = OpenPGPcard.manufacturer_list[manufacturer_id_int]
else:
self.manufacturer = OpenPGPcard.default_manufacturer_name
self.serial = int.from_bytes(resp[10:14], "big")
if self.pgpvermaj >= 3:
self.longer = APDU_LONG
logger.debug(f"PGP version : {self.pgpverstr}")
logger.debug(f"Manufacturer : {self.manufacturer} ({self.manufacturer_id})")
logger.debug(f"Serial : {self.serial}")
def get_length(self):
"""Extended length info DO 7F66 : 0202 xxxx 0202 xxxx
Also bit 7 in Application Data "0x73"
"""
self.max_cmd = 256
self.max_rsp = 256
if self.pgpvermaj >= 3:
resp = self.get_data("7F66")
if len(resp) == 8: # Simple DO
self.max_cmd = int.from_bytes(resp[2:4], "big")
self.max_rsp = int.from_bytes(resp[6:8], "big")
elif len(resp) == 11 and resp[:3] == [0x7F, 0x66, 8]: # Constructed DO
self.max_cmd = int.from_bytes(resp[5:7], "big")
self.max_rsp = int.from_bytes(resp[9:11], "big")
else:
raise DataException("Extended length info incorrect format.")
def get_pwstatus(self):
return self.get_data("C4")
def get_features(self):
"""Features optional DO 7F74"""
self.display = False
self.bio = False
self.button = False
self.keypad = False
self.led = False
self.speaker = False
self.mic = False
self.touchscreen = False
try:
resp = self.get_data("7F74")
except PGPCardException as exc:
if exc.sw_code == 0x6B00 or exc.sw_code == 0x6A83 or exc.sw_code == 0x6A88:
self.display_features()
return
raise
if resp[:3] == [0x7F, 0x74, 3]: # Turn constructed DO to simple DO
resp = resp[3:]
if resp[:2] != [0x81, 1]:
raise DataException("Features data shall start with 0x81 0x01.")
if len(resp) != 3:
raise DataException("Features data shall be 3 bytes long.")
feature_int = resp[2]
def check_bit(integ, bit_pos):
# Check bit 8..1
powertwo = 1 << (bit_pos - 1)
return (integ & powertwo) == powertwo
self.display = check_bit(feature_int, 8)
self.bio = check_bit(feature_int, 7)
self.button = check_bit(feature_int, 6)
self.keypad = check_bit(feature_int, 5)
self.led = check_bit(feature_int, 4)
self.speaker = check_bit(feature_int, 3)
self.mic = check_bit(feature_int, 2)
self.touchscreen = check_bit(feature_int, 1)
self.display_features()
def display_features(self):
"""Print features for debug"""
def capability_message(capability):
return "Yes" if capability else "No"
# logger.debug("Display ? %s", capability_message(self.display))
# logger.debug("Biometric sensor ? %s", capability_message(self.bio))
logger.debug("Button ? %s", capability_message(self.button))
# logger.debug("Keypad ? %s", capability_message(self.keypad))
# logger.debug("LED ? %s", capability_message(self.led))
# logger.debug("Speaker ? %s", capability_message(self.speaker))
# logger.debug("Microphone ? %s", capability_message(self.mic))
# logger.debug("TouchScreen ? %s", capability_message(self.touchscreen))
def get_historical_bytes(self):
"""Historical bytes DO 5F52"""
return self.get_data("5F52")
def get_application_data(self):
"""Application Related Data DO 6E"""
try:
resp = self.get_data("6E")
except PGPCardException as exc:
if exc.sw_code == 0x6D00:
# Retry after 3 seconds
time.sleep(3)
# Select again the applet
self.send_apdu([0x00, 0xA4, 0x04, 0x00], OpenPGPcard.AppID)
time.sleep(1)
return self.get_application_data()
app_rel_data = decode_do(resp)
if resp[0] == 0x6E:
app_rel_data = app_rel_data["6E"]
# Set the attribute about max PW length
pwstatus_data = bytes.fromhex(app_rel_data["73"]["C4"])
if pwstatus_data[1] < 128:
self.pw1_maxlen = pwstatus_data[1]
if pwstatus_data[1] < 128:
self.pw3_maxlen = pwstatus_data[3]
return app_rel_data
def terminate_df(self):
self.send_apdu([0, 0xE6, 0, 0], [])
def activate_file(self):
self.send_apdu([0, 0x44, 0, 0], [])
def reset(self, pin3):
self.verify_pin(3, pin3)
self.terminate_df()
self.activate_file()
def get_random(self, len_data):
"""Get challenge INS=0x84
Return len bytes of random (not integer)
ToDo : make it as optional, 6D00 error?
"""
return bytes(self.send_apdu([0, 0x84, 0, 0], [], len_data))
def get_pin_status(self, pin_bank):
"""Return remaining tries left for the given PIN bank address (1, 2 or 3)
If return 0 : PIN is blocked, if 9000 : PIN has been verified
"""
if self.pgpvermaj * 10000 + self.pgpvermin >= 30001: # >= v 3.1
try:
self.verify_pin(pin_bank, "")
return 9000
except PinException as exc:
return exc.retries_left
except PGPCardException as exc:
if exc.sw_code == 0x6983:
return 0
if exc.sw_code != 0x6A80:
raise exc
# Fallback to PW status "C4"
resp = self.get_pwstatus()
if len(resp) != 7:
raise PGPCardException("Bad PW status status data")
if pin_bank == 1:
return resp[4]
elif pin_bank == 3:
return resp[6]
raise PGPCardException("Only PW1 and PW3 are available for status")
def change_pin(self, old_pin, new_pin, pin_index):
"""Change PIN index number (index : 1 or 3)."""
if pin_index not in (3, 1):
raise DataException("Bad PIN index, must be 1 or 3.")
old_pin_bin = old_pin.encode("utf8")
new_pin_bin = new_pin.encode("utf8")
pin_min_len = 6
if pin_index == 3:
pin_min_len = 8
if len(old_pin_bin) < pin_min_len or len(new_pin_bin) < pin_min_len:
raise BadInputException(
f"Bad PIN #{pin_index} length, must be {pin_min_len} bytes."
)
data = old_pin_bin + new_pin_bin
self.send_apdu([0, 0x24, 0, 0x80 + pin_index], to_list(data))
def verify_pin(self, pin_bank, pin_string):
"""Verify PIN code : pin_bank is 1, 2 or 3 for SW1, SW2 or SW3
Call CHANGE REFERENCE DATA card command
"""
if pin_bank not in (1, 2, 3):
raise DataException("Bad PIN index, must be 1, 2 or 3.")
if pin_string:
self.send_apdu(
[0, 0x20, 0, 0x80 + pin_bank], to_list(pin_string.encode("utf8"))
)
else:
self.send_apdu([0, 0x20, 0, 0x80 + pin_bank], [])
@check_hex
def gen_key(self, keypos_hex):
"""Generate an asymmetric key pair in keypos slot address
Digital signature : 0xB600 : gen key according to algorithm data in C1
Confidentiality : 0xB800 : gen key according to algorithm data in C2
Authentication : 0xA400 : gen key according to algorithm data in C3
"""
return bytes(
self.send_apdu([0, 0x47, 0x80, 0], toBytes(keypos_hex), self.longer)
)
@check_hex
def get_public_key(self, keypos_hex):
"""Get the public part of the key pair in keypos slot address"""
return bytes(
self.send_apdu([0, 0x47, 0x81, 0], toBytes(keypos_hex), self.longer)
)
def sign(self, data):
"""Sign data, with Compute Digital Signature command"""
return bytes(self.send_apdu([0, 0x2A, 0x9E, 0x9A], to_list(data)))
def sign_ec_der(self, hashdata):
"""Sign with ECDSA hash data and output signature as ASN1 DER encoded
hashdata is the same size in bits of the EC key
"""
return encode_der(self.sign(hashdata))
def encipher(self):
"""Call ENC command
ToDo
"""
raise NotImplementedError()
def decipher(self, data):
return bytes(self.send_apdu([0, 0x2A, 0x80, 0x86], to_list(data)))
def decipher_25519(self, ext_pubkey):
"""For ECDH with Curve25519
ext_pubkey is a 32 bytes "x" public key
"""
data_field = b"\xA6\x12\x7F\x49\x22\x86\x20" + ext_pubkey
return self.decipher(data_field)
| bitlogik/OpenPGPpy | OpenPGPpy/openpgp_card.py | openpgp_card.py | py | 22,169 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "smartcard.util.toBytes",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "smartcard.util.toBytes",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "sma... |
17177402704 | """
Kaming Yip
CS677 A1 Data Science with Python
Apr 3, 2020
Assignment 9.3: Random Forest
"""
from pandas_datareader import data as web
import os
import pandas as pd
import numpy as np
from tabulate import tabulate
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
def main():
def get_stock(ticker, start_date, end_date):
"""
download the historical data from yahoo web
& manipulate the data to create desirable columns
"""
try:
df = web.get_data_yahoo(ticker, start=start_date, end=end_date)
df['Return'] = df['Adj Close'].pct_change()
df['Return'].fillna(0, inplace = True)
df['Return'] = 100.0 * df['Return']
df['Return'] = df['Return'].round(3)
df['Date'] = df.index
df['Date'] = pd.to_datetime(df['Date'])
df['Month'] = df['Date'].dt.month
df['Year'] = df['Date'].dt.year
df['Day'] = df['Date'].dt.day
for col in ['Open', 'High', 'Low', 'Close', 'Adj Close']:
df[col] = df[col].round(2)
df['Weekday'] = df['Date'].dt.weekday_name
df['Week_Number'] = df['Date'].dt.strftime('%U')
df['Year_Week'] = df['Date'].dt.strftime('%Y-%U')
col_list = ['Date', 'Year', 'Month', 'Day', 'Weekday',
'Week_Number', 'Year_Week', 'Open',
'High', 'Low', 'Close', 'Volume', 'Adj Close',
'Return']
num_lines = len(df)
df = df[col_list]
print('read', num_lines, 'lines of data for ticker:' , ticker)
return df
except Exception as error:
print(error)
return None
# design the selected stock name and time frame
try:
ticker='YELP'
input_dir = os.getcwd()
output_file = os.path.join(input_dir, ticker + '.csv')
df = get_stock(ticker, start_date='2016-01-01', end_date='2019-12-31')
df.to_csv(output_file, index=False)
print('wrote ' + str(len(df)) + ' lines to file: ' + ticker + '.csv', end = "\n\n" + "-" * 50 + "\n\n")
except Exception as e:
print(e)
print('failed to get Yahoo stock data for ticker: ', ticker, end = "\n\n" + "-" * 50 + "\n\n")
def weekly_return_volatility(data, start_date, end_date):
"""
calculate the weekly mean return and volatility
& create a new file to contain these infor
"""
try:
df_2 = data[data['Date'] >= start_date]
df_2 = df_2[df_2['Date'] <= end_date]
df_2 = df_2[['Year', 'Week_Number', 'Open', 'Adj Close', 'Return']]
df_2.index = range(len(df_2))
df_grouped = df_2.groupby(['Year', 'Week_Number'])['Return'].agg([np.mean, np.std])
df_grouped.reset_index(['Year', 'Week_Number'], inplace=True)
df_grouped.rename(columns={'mean': 'mean_return', 'std':'volatility'}, inplace=True)
df_grouped.fillna(0, inplace=True)
df_grouped["Open"] = df_2.groupby(["Year", "Week_Number"])["Open"].head(1).\
reset_index(drop = True).copy()
df_grouped["Adj Close"] = df_2.groupby(["Year", "Week_Number"])["Adj Close"].tail(1).\
reset_index(drop = True).copy()
return df_grouped
except Exception as error:
print(error)
return None
# create the weekly dataframe with mean return and volatility values
try:
df_weekly = weekly_return_volatility(df, start_date='2018-01-01', end_date='2019-12-31')
except Exception as e:
print("Error in weekly_return_volatility: ", end = " ")
print(e)
def weekly_label(data, year):
"""
to create labels
"""
try:
df_label = data[data["Year"] == year].copy()
mean_return_percent50 = np.percentile(df_label["mean_return"], 50)
volatility_percent50 = np.percentile(df_label["volatility"], 50)
df_label["True Label"] = np.where((df_label["mean_return"] >= mean_return_percent50) & \
(df_label["volatility"] <= volatility_percent50), "Green", "Red")
return df_label
except Exception as error:
print(error)
return None
try:
df_labeling = pd.DataFrame()
for year in [2018, 2019]:
df_year_label = weekly_label(df_weekly, year)
label_count = df_year_label.groupby("True Label")["True Label"].size().to_frame(name = "Freq")
print("Label Count for Year {0}".format(year))
print(tabulate(label_count, headers = "keys", numalign = "right"), end = "\n\n")
df_labeling = df_labeling.append(df_year_label, ignore_index = True)
df_labeling["Week_Number"] = df_labeling["Week_Number"].astype(int)
except Exception as e:
print("Error in weekly_label:", end = " ")
print(e)
def random_forest(train_data, test_data, predictor, N, d):
# train the Random Forest model by stock data in year 1
train_X = train_data[predictor].values
le = LabelEncoder()
train_Y = le.fit_transform(train_data["True Label"].values)
model = RandomForestClassifier(n_estimators = N, max_depth = d,
criterion = "entropy", random_state = 3)
model.fit(train_X, train_Y)
# predict the labels in year 2
test_X = test_data[predictor].values
test_Y = le.fit_transform(test_data["True Label"].values)
pred_Y = model.predict(test_X)
error_rate = np.mean(pred_Y != test_Y)
pred_Y = le.inverse_transform(pred_Y)
return pred_Y, error_rate
def designed_confusion_matrix(actual, pred):
cm = confusion_matrix(actual, pred)
list_of_tuples = list(zip(cm[0], cm[1]))
designed_cm = pd.DataFrame(list_of_tuples,
columns = ["Actual Green", "Actual Red"],
index = ["Predicted Green", "Predicted Red"])
diagonal_sum = cm.trace()
sum_of_all_elements = cm.sum()
accuracy = diagonal_sum / sum_of_all_elements
TPR = cm[0,0]/(cm[0,0] + cm[0,1])
TNR = cm[1,1]/(cm[1,0] + cm[1,1])
return designed_cm, accuracy, TPR, TNR
def printout(actual, pred, year):
cm, accuracy, TPR, TNR = designed_confusion_matrix(actual, pred)
print(" * The Confusion Matrix for Year {0} * ".format(year),
cm,
"The accuracy of this model is {0:.3f}.\n".format(accuracy) +\
"The true positive rate of this model is {0:.3f}.\n".format(TPR) +\
"The true negative rate of this model is {0:.3f}.\n".format(TNR),
sep = "\n\n", end = "\n\n")
def trade_with_labels(data, col_name):
money = 100.0
shares = 0.0
position = "No"
balance = []
df_trade_labels = data.copy()
for i in range(len(df_trade_labels) - 1):
if i == 0:
label = df_trade_labels.iloc[i][col_name]
if label == "Green":
shares = money / df_trade_labels.iloc[i]["Open"]
money = 0.0
position = "Long"
balance.append(shares * df_trade_labels.iloc[i]["Adj Close"])
else:
balance.append(money)
else:
label = df_trade_labels.iloc[i+1][col_name]
if label == "Red":
if position == "Long":
money = shares * df_trade_labels.iloc[i]["Adj Close"]
shares = 0.0
position = "No"
balance.append(money)
else:
if position == "No":
shares = money / df_trade_labels.iloc[i+1]["Open"]
money = 0.0
position = "Long"
balance.append(shares * df_trade_labels.iloc[i]["Adj Close"])
if position == "Long":
balance.append(shares * df_trade_labels.iloc[-1]["Adj Close"])
else:
balance.append(money)
return balance
def script_text(data, year, col_name):
label_text_max = "{0} Week {1}\nmax ${2}".\
format(year,
data.iloc[data[data["Year"] == year][col_name].idxmax()]["Week_Number"],
round(data[data["Year"] == year][col_name].max(), 2))
label_x_max = data[data["Year"] == year][col_name].idxmax()
label_y_max = round(data[data["Year"] == year][col_name].max(), 2)
label_text_min = "{0} Week {1}\nmin ${2}".\
format(year,
data.iloc[data[data["Year"] == year][col_name].idxmin()]["Week_Number"],
round(data[data["Year"] == year][col_name].min(), 2))
label_x_min = data[data["Year"] == year][col_name].idxmin()
label_y_min = round(data[data["Year"] == year][col_name].min(), 2)
label_text_final = "{0} Final:\n${1}".format(year, round(data[data["Year"] == year].iloc[-1][col_name], 2))
label_x_final = data[data["Year"] == year].tail(1).index.values
label_y_final = round(data[data["Year"] == year].iloc[-1][col_name], 2)
return label_text_max, label_x_max, label_y_max,\
label_text_min, label_x_min, label_y_min,\
label_text_final, label_x_final, label_y_final
def buy_n_hold(data):
money = 100.0
shares = 0.0
balance = []
df_buy_hold = data.copy()
for i in range(len(df_buy_hold)):
if i == 0:
shares = money / df_buy_hold.iloc[i]["Open"]
balance.append(shares * df_buy_hold.iloc[i]["Adj Close"])
return balance
########## Q1 ##########
print("\n" + "#" * 35 + " Q1 " + "#" * 35 + "\n")
try:
df_2018 = df_labeling.loc[df_labeling["Year"] == 2018].copy().reset_index(drop = True)
df_2019 = df_labeling.loc[df_labeling["Year"] == 2019].copy().reset_index(drop = True)
predictor = ["mean_return", "volatility"]
Y_2019 = df_2019[["True Label"]].values
N_list = list(range(1, 11))
d_list = list(range(1, 6))
x_list = []
y_list = []
size_list = []
results = pd.DataFrame(columns = N_list, index = d_list)
best_combo = [0, 0, float("inf")]
for N in N_list:
for d in d_list:
x_list.append(N)
y_list.append(d)
pred_Y, error_rate = random_forest(df_2018, df_2019, predictor, N, d)
results.loc[d, N] = error_rate
size_list.append(error_rate)
if error_rate < best_combo[2]:
best_combo = [N, d, error_rate]
else:
pass
results = results.astype(float)
print(" " * 10 + " * The Error Rate Results of Different Random Forests * ",
tabulate(results.round(3), headers = "keys", numalign = "left"),
sep = "\n\n", end = "\n\n")
min_size, max_size = min(size_list), max(size_list)
for i, ele in enumerate(size_list):
size_list[i] = (ele - min_size) / (max_size - min_size) * 800
plt.figure(figsize = (8, 4))
plt.scatter(x_list, y_list, marker = ".", s = size_list)
plt.title("The Error Rates with Different Combinations of N and d")
plt.xlabel("Number of Trees (N)")
plt.xticks(N_list)
plt.ylabel("Max Depth of Each Subtree (d)")
plt.yticks(d_list)
plt.show()
print("\nAs displayed above, the best combination is N = {0:d}, d = {1:d},".\
format(best_combo[0], best_combo[1]),
"with the minimal error rate of {0:.3f}.".format(best_combo[2]),
sep = "\n")
except Exception as e:
print("Error in Question 1:", end = " ")
print(e)
########## Q2 & Q3 ##########
print("\n" + "#" * 35 + " Q2 & Q3 " + "#" * 35 + "\n")
try:
optimal_N, optimal_d = best_combo[0], best_combo[1]
pred_Y, error = random_forest(df_2018, df_2019, predictor, optimal_N, optimal_d)
printout(Y_2019, pred_Y, 2019)
except Exception as e:
print("Error in Question 2:", end = " ")
print(e)
########## Q4 ##########
print("\n" + "#" * 35 + " Q4 " + "#" * 35 + "\n")
try:
df_trading = df_labeling[df_labeling["Year"] == 2019].copy().reset_index(drop = True)
df_trading["True Label Balance"] = trade_with_labels(df_trading, "True Label")
df_trading["Buy and Hold Balance"] = buy_n_hold(df_trading)
df_trading["Random Forest Label"] = pred_Y
df_trading["Random Forest Balance"] = trade_with_labels(df_trading, "Random Forest Label")
fig, ax = plt.subplots(figsize = (9, 5))
label_text_max_2019, label_x_max_2019, label_y_max_2019,\
label_text_min_2019, label_x_min_2019, label_y_min_2019,\
label_text_final_2019, label_x_final_2019, label_y_final_2019 =\
script_text(df_trading, 2019, "True Label Balance")
forest_text_max_2019, forest_x_max_2019, forest_y_max_2019,\
forest_text_min_2019, forest_x_min_2019, forest_y_min_2019,\
forest_text_final_2019, forest_x_final_2019, forest_y_final_2019 =\
script_text(df_trading, 2019, "Random Forest Balance")
buy_hold_text_max_2019, buy_hold_x_max_2019, buy_hold_y_max_2019,\
buy_hold_text_min_2019, buy_hold_x_min_2019, buy_hold_y_min_2019,\
buy_hold_text_final_2019, buy_hold_x_final_2019, buy_hold_y_final_2019 =\
script_text(df_trading, 2019, "Buy and Hold Balance")
# Trading with True Label
ax.plot(df_trading.index, "True Label Balance", data = df_trading, color = "blue")
ax.annotate(label_text_max_2019, xy = (label_x_max_2019, label_y_max_2019), xycoords = "data",
xytext = (label_x_max_2019+5, label_y_max_2019+5), color = "blue",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "blue"),
ha = "left", va = "bottom")
ax.annotate(label_text_min_2019, xy = (label_x_min_2019, label_y_min_2019), xycoords = "data",
xytext = (label_x_min_2019+5, label_y_min_2019+17), color = "blue",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "blue"),
ha = "left", va = "bottom")
ax.annotate(label_text_final_2019, xy = (label_x_final_2019, label_y_final_2019), xycoords = "data",
xytext = (label_x_final_2019+5, label_y_final_2019-5), color = "blue",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "blue"),
ha = "left", va = "bottom")
# Buy and Hold
ax.plot(df_trading.index, "Buy and Hold Balance", data = df_trading, color = "red")
ax.annotate(buy_hold_text_max_2019, xy = (buy_hold_x_max_2019, buy_hold_y_max_2019), xycoords = "data",
xytext = (buy_hold_x_max_2019+5, buy_hold_y_max_2019+11), color = "red",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "red"),
ha = "left", va = "bottom")
ax.annotate(buy_hold_text_min_2019, xy = (buy_hold_x_min_2019, buy_hold_y_min_2019), xycoords = "data",
xytext = (buy_hold_x_min_2019+4, buy_hold_y_min_2019+2), color = "red",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "red"),
ha = "left", va = "bottom")
ax.annotate(buy_hold_text_final_2019, xy = (buy_hold_x_final_2019, buy_hold_y_final_2019), xycoords = "data",
xytext = (buy_hold_x_final_2019+5, buy_hold_y_final_2019-2), color = "red",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "red"),
ha = "left", va = "bottom")
# Trading with Decision Tree Label
ax.plot(df_trading.index, "Random Forest Balance", data = df_trading, color = "green")
ax.annotate(forest_text_max_2019, xy = (forest_x_max_2019, forest_y_max_2019), xycoords = "data",
xytext = (forest_x_max_2019+5, forest_y_max_2019+5), color = "green",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "green"),
ha = "left", va = "bottom")
ax.annotate(forest_text_min_2019, xy = (forest_x_min_2019, forest_y_min_2019), xycoords = "data",
xytext = (forest_x_min_2019+5, forest_y_min_2019+27), color = "green",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "green"),
ha = "left", va = "bottom")
ax.annotate(forest_text_final_2019, xy = (forest_x_final_2019, forest_y_final_2019), xycoords = "data",
xytext = (forest_x_final_2019+5, forest_y_final_2019-15), color = "green",
arrowprops = dict(arrowstyle = "->", connectionstyle = "angle,angleA=0,angleB=90", color = "green"),
ha = "left", va = "bottom")
plt.title("* Year 2019 *\n" + "Performance against Different Investing Strategies", loc = "center")
plt.xlabel("Week Number")
plt.xticks(np.arange(0, 60, 5))
plt.ylabel("Total Balance($)")
plt.legend()
plt.show()
print("\nAs displayed in the plot above, the {0} strategy results in a".\
format("buy-and-hold" if buy_hold_y_final_2019 > forest_y_final_2019 else "Random Forest Classifier"),
"larger amount as ${0} at the end of the year 2019.".\
format(buy_hold_y_final_2019 if buy_hold_y_final_2019 > forest_y_final_2019 else forest_y_final_2019),
sep = "\n")
except Exception as e:
print("Error in Question 4:", end = " ")
print(e)
main()
| KamingYip/Trading_Strategies_with_Stock_Data | Random Forest.py | Random Forest.py | py | 18,943 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "pandas_datareader.data.get_data_yahoo",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pandas_datareader.data",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pandas.to_datetime",
"line_number": 31,
"usage_type": "call"
},
{
"a... |
23907212429 | #!/usr/bin/env python
# -*- coding:utf-8 -*
import pickle
import sys
import pandas as pd
from utils.change_column_names import changeToName
from utils.feature_engineering import *
from utils.labels import *
from utils.pipelines import transformation_pipeline
from utils.strategy import Strategy
input_path = sys.argv[1]
output_path = sys.argv[2]
model_file = 'model.pkl'
print('Loaded Libraries...')
tick_data = pd.read_csv(input_path)
with open(model_file, 'rb') as f:
model = pickle.load(f)
print('Loaded data and model...')
tick_data = changeToName(tick_data)
tick_data = transformation_pipeline.fit_transform(tick_data)
tick_data.drop(['Label', 'Index'], axis=1)
print('Transformed data...')
print('Building orders...')
tick_data_pred = model.predict(tick_data)
tick_data = tick_data[['Index', 'StockCode', 'TickTime', 'LatestTransactionPriceToTick',
'RollingTransPriceMeanDiff5', 'RollingTransPriceMeanDiff100', 'Label']]
tick_data['Label']=tick_data_pred
order_tick = Strategy().fit_transform(tick_data)
order_tick.to_csv(output_path, index=False)
| TheLohia/Sultans | myModel_demo.py | myModel_demo.py | py | 1,090 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_n... |
35848910596 | from fastapi import Depends, FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from sqlalchemy.orm import Session
import crud
import models
import schemas
from db_handler import SessionLocal, engine
models.Base.metadata.create_all(bind=engine)
app = FastAPI(
title="FDFC Server",
version="1.0.0"
)
origins = [
"http://localhost",
"http://localhost:3000",
"http://localhost:8000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
@app.post("/login", response_model=schemas.UserInfoResponse)
def post_login(request: schemas.UserRequest, db: Session=Depends(get_db)):
user = crud.get_user(
db=db,
username=request.username
)
return user
@app.post("/register", response_model=schemas.UserInfoResponse)
def post_register(request: schemas.UserRequest, db: Session=Depends(get_db)):
response = crud.add_user(
db=db,
username=request.username,
password=request.password
)
return response
@app.put("/additional-info", response_model=schemas.UserInfoResponse)
def put_additional_info(request: schemas.AdditionalInfoRequest, db: Session=Depends(get_db)):
response = crud.set_additional_info(
db=db,
id=request.id,
civil_status=request.civil_status,
occupation=request.occupation
)
return response
@app.put("/contact-info", response_model=schemas.UserInfoResponse)
def put_contact_info(request: schemas.ContactInfoRequest, db: Session=Depends(get_db)):
response = crud.set_contact_info(
db=db,
id=request.id,
mobile=request.mobile,
landline=request.landline,
email_address=request.email_address
)
return response
@app.put("/location-info", response_model=schemas.UserInfoResponse)
def put_location_info(request: schemas.LocationInfoRequest, db: Session=Depends(get_db)):
response = crud.set_location_info(
db=db,
id=request.id,
address_permanent=request.address_permanent,
address_temporary=request.address_temporary
)
return response | chris-vill/fdfc-server | main.py | main.py | py | 2,151 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.Base.metadata.create_all",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.Base",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "db_handler.engine",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "fa... |
42510851613 | import sys, pathlib
import pytest
sys.path.insert(0,str(pathlib.Path(__file__).parent.parent.joinpath("src").resolve()))
#import pytest
from certauth2.__main__ import main
from certauth2 import CertificateAuthority, Encoding
from certauth2.creds_store import ondiskPathStore, ondiskCredentialStore
from cryptography import x509
@pytest.fixture(params=[e for e in Encoding])
def encoding(request):
return request.param
@pytest.fixture(params=["pem", "pfx", "pkcs12"])
def root_ca_suffix(request):
return request.param
def get_ca(encoding:Encoding, root_ca_suffix:str ):
store = ondiskCredentialStore(f"./.private/{encoding.name.lower()}", encoding=encoding)
return CertificateAuthority(
f"./.private/my-ca.{root_ca_suffix}",
store=store,
)
def test_root_certificate(encoding:Encoding, root_ca_suffix:str):
ca = get_ca(encoding, root_ca_suffix)
root_creds = ca.credentials
assert root_creds.subject.rfc4514_string() == f"CN=my-ca"
assert root_creds.cert.issuer == root_creds.subject
usage:x509.Extension[x509.KeyUsage] = root_creds.cert.extensions.get_extension_for_oid(x509.OID_KEY_USAGE)
assert usage.critical is True
assert usage.value.crl_sign and usage.value.key_cert_sign
assert root_creds.cert.extensions.get_extension_for_class(x509.AuthorityKeyIdentifier).value.key_identifier == root_creds.cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier).value.key_identifier
pass
def test_load_creds(encoding:Encoding):
ca = get_ca(encoding, "pem")
creds = ca.load_creds("example.com", overwrite=True)
cp = ca["example.com"]
assert creds.cert == cp.cert
cp = ca[{"host":"example.com"}]
assert creds.cert == cp.cert
assert "example.com" in creds.cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value.get_values_for_type(x509.DNSName)
assert x509.OID_SERVER_AUTH in creds.cert.extensions.get_extension_for_class(x509.ExtendedKeyUsage).value
assert x509.OID_CLIENT_AUTH in creds.cert.extensions.get_extension_for_class(x509.ExtendedKeyUsage).value
if __name__ == "__main__":
test_root_certificate(Encoding.DER)
test_load_creds()
| jose-pr/pypki | tests/test_certauth2.py | test_certauth2.py | py | 2,188 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "sys.path.insert",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_nu... |
53220412 | from datetime import date
from zohocrmsdk.src.com.zoho.api.authenticator import OAuthToken
from zohocrmsdk.src.com.zoho.crm.api import Initializer
from zohocrmsdk.src.com.zoho.crm.api.dc import USDataCenter
from zohocrmsdk.src.com.zoho.crm.api.record import RecordOperations, ConvertBodyWrapper, LeadConverter, Record, Field, \
ActionWrapper, SuccessResponse, APIException
from zohocrmsdk.src.com.zoho.crm.api.tags import Tag
from zohocrmsdk.src.com.zoho.crm.api.util import Choice
class ConvertLead:
@staticmethod
def initialize():
environment = USDataCenter.PRODUCTION()
token = OAuthToken(client_id="clientID", client_secret="clientSecret", grant_token="grantToken")
Initializer.initialize(environment, token)
@staticmethod
def convert_lead(lead_id):
"""
This method is used to Convert a Lead record and print the response.
:param lead_id: The ID of the Lead to be converted.
"""
"""
example
lead_id = 3409643002034003
"""
record_operations = RecordOperations()
request = ConvertBodyWrapper()
# List to hold LeadConverter instances
data = []
record = LeadConverter()
record.set_overwrite(True)
record.set_notify_lead_owner(True)
record.set_notify_new_entity_owner(True)
record.set_accounts('34096430692007')
record.set_contacts('34096430836001')
record.set_assign_to('34096430302031')
deals = Record()
"""
Call add_field_value method that takes two arguments
Import the zcrmsdk.src.com.zoho.crm.api.record.field file
1 -> Call Field "." and choose the module from the displayed list and press "." and choose the field name from the displayed list.
2 -> Value
"""
deals.add_field_value(Field.Deals.deal_name(), 'deal_name')
deals.add_field_value(Field.Deals.description(), "deals description")
deals.add_field_value(Field.Deals.closing_date(), date(2020, 10, 2))
deals.add_field_value(Field.Deals.stage(), Choice("Closed Won"))
deals.add_field_value(Field.Deals.amount(), 500.78)
"""
Call add_key_value method that takes two arguments
1 -> A string that is the Field's API Name
2 -> Value
"""
deals.add_key_value('Custom_field', 'Value')
tag_list = []
tag = Tag()
tag.set_name('Converted')
tag_list.append(tag)
deals.set_tag(tag_list)
record.set_deals(deals)
data.append(record)
request.set_data(data)
# Call convertLead method that takes ConvertBodyWrapper instance and lead_id as parameter
response = record_operations.convert_lead(lead_id, request)
if response is not None:
print('Status Code: ' + str(response.get_status_code()))
response_object = response.get_object()
if response_object is not None:
if isinstance(response_object, ActionWrapper):
action_response_list = response_object.get_data()
for action_response in action_response_list:
if isinstance(action_response, SuccessResponse):
print("Status: " + action_response.get_status().get_value())
print("Code: " + action_response.get_code().get_value())
print("Details")
details = action_response.get_details()
for key, value in details.items():
print(key + ' : ' + str(value))
print("Message: " + action_response.get_message().get_value())
elif isinstance(action_response, APIException):
print("Status: " +
action_response.get_status().get_value())
print("Code: " + action_response.get_code().get_value())
print("Details")
details = action_response.get_details()
for key, value in details.items():
print(key + ' : ' + str(value))
print("Message: " +
action_response.get_message().get_value())
elif isinstance(response_object, APIException):
print("Status: " + response_object.get_status().get_value())
print("Code: " + response_object.get_code().get_value())
print("Details")
details = response_object.get_details()
for key, value in details.items():
print(key + ' : ' + str(value))
print("Message: " + response_object.get_message().get_value())
lead_id = 440248001507154
ConvertLead.initialize()
ConvertLead.convert_lead(lead_id)
| zoho/zohocrm-python-sdk-5.0 | samples/records/ConvertLead.py | ConvertLead.py | py | 4,997 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "zohocrmsdk.src.com.zoho.crm.api.dc.USDataCenter.PRODUCTION",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "zohocrmsdk.src.com.zoho.crm.api.dc.USDataCenter",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "zohocrmsdk.src.com.zoho.api.authentica... |
27280445528 | from aiogram import types
from aiogram.dispatcher.filters import BoundFilter
import config
import dispatcher
class IsOwnerFilter(BoundFilter):
"""
Custom filter "is_owner".
"""
key = "is_owner"
def __init__(self, is_owner):
self.is_owner = is_owner
async def check(self, message: types.Message):
return message.from_user.id in config.BOT_OWNERS
class IsAuthFilter(BoundFilter):
"""
Custom filter "is_owner".
"""
key = "is_auth"
def __init__(self, is_auth):
self.is_owner = is_auth
async def check(self, message: types.Message):
query = 'SELECT user_id FROM auth_users WHERE user_id = %s'
args = (message.from_user.id,)
res = dispatcher.db.execute_query(query, args)
res = bool(len(res))
return res
| YarikATM/Metall | tg_bot/filters.py | filters.py | py | 819 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "aiogram.dispatcher.filters.BoundFilter",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "aiogram.types.Message",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",
"line_number": 16,
"usage_type": "name"
},
{
"ap... |
3372040192 | import jieba
from os import path
import os
from wordcloud import WordCloud
def jieba_processing_txt(text, user_dict=[]):
for word in user_dict:
jieba.add_word(word)
mywordlist = []
seg_list = jieba.cut(text, cut_all=False)
liststr = "/ ".join(seg_list)
for myword in liststr.split('/'):
if len(myword.strip()) > 1:
mywordlist.append(myword)
return ' '.join(mywordlist)
def word_cloud(text, savePath, user_dict=[]):
d = path.dirname(__file__) if "__file__" in locals() else os.getcwd()
font_path = d + '/SourceHanSerifK-Light.otf'
wc = WordCloud(font_path=font_path, background_color="white", max_words=8000,
max_font_size=100, random_state=42, width=1200, height=900, margin=2,)
wc.generate(jieba_processing_txt(text, user_dict))
wc.to_file(savePath)
| gewas/VChaCha | wc.py | wc.py | py | 877 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "jieba.add_word",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "jieba.cut",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
74977715387 | import csv
import re
import logging
import gzip
import io
import sys
import os
import yaml
from dipper.sources.ZFIN import ZFIN
from dipper.sources.WormBase import WormBase
from dipper.sources.Source import Source
from dipper.models.assoc.Association import Assoc
from dipper.models.assoc.G2PAssoc import G2PAssoc
from dipper.models.Genotype import Genotype
from dipper.models.Reference import Reference
from dipper.models.Model import Model
from dipper.utils.GraphUtils import GraphUtils
LOG = logging.getLogger(__name__)
# get gene annotation from current.geneontology.com,
# which is the last official release (but not the bleeding edge)
GOGA = 'http://current.geneontology.org/annotations'
FTPEBI = 'ftp://ftp.uniprot.org/pub/databases/' # best for North America
UPCRKB = 'uniprot/current_release/knowledgebase/'
# large entries in field 7 of ZFIN require this:
csv.field_size_limit(sys.maxsize)
class GeneOntology(Source):
"""
This is the parser for the
[Gene Ontology Annotations](http://www.geneontology.org),
from which we process gene-process/function/subcellular
location associations.
We generate the GO graph to include the following information:
* genes
* gene-process
* gene-function
* gene-location
We process only a subset of the organisms:
Status: IN PROGRESS / INCOMPLETE
"""
gaf_columns = [ # GAF2.1 files contain the following columns:
'DB',
'DB_Object_ID',
'DB_Object_Symbol',
'Qualifier',
'GO_ID',
'DB:Reference',
'Evidence Code',
'With (or) From', # list possible w/pipe(or) w/comma(and) +both
'Aspect',
'DB_Object_Name',
'DB_Object_Synonym',
'DB_Object_Type',
'Taxon and Interacting taxon',
'Date',
'Assigned_By',
'Annotation_Extension',
'Gene_Product_Form_ID'
]
files = {
'9615': { # Canis lupus familiaris
'file': 'goa_dog.gaf.gz',
'url': GOGA + '/goa_dog.gaf.gz',
'columnns': gaf_columns
},
'7227': { # Drosophila melanogaster
'file': 'fb.gaf.gz',
'url': GOGA + '/fb.gaf.gz',
'columnns': gaf_columns
},
'7955': { # Danio rerio
'file': 'zfin.gaf.gz',
'url': GOGA + '/zfin.gaf.gz',
'columnns': gaf_columns
},
'10090': { # Mus musculus
'file': 'mgi.gaf.gz',
'url': GOGA + '/mgi.gaf.gz',
'columnns': gaf_columns
},
'10116': { # Rattus norvegicus
'file': 'rgd.gaf.gz',
'url': GOGA + '/rgd.gaf.gz',
'columnns': gaf_columns
},
'6239': { # Caenorhabditis elegans
'file': 'wb.gaf.gz',
'url': GOGA + '/wb.gaf.gz',
'columnns': gaf_columns
},
'9823': { # Sus scrofa
'file': 'goa_pig.gaf.gz',
'url': GOGA + '/goa_pig.gaf.gz',
'columnns': gaf_columns
},
'9031': { # Gallus gallus
'file': 'goa_chicken.gaf.gz',
'url': GOGA + '/goa_chicken.gaf.gz',
'columnns': gaf_columns
},
'9606': { # Homo sapiens
'file': 'goa_human.gaf.gz',
'url': GOGA + '/goa_human.gaf.gz',
'columnns': gaf_columns
},
'9913': { # Bos taurus
'file': 'goa_cow.gaf.gz',
'url': GOGA + '/goa_cow.gaf.gz',
'columnns': gaf_columns
},
'559292': { # Saccharomyces cerevisiae 4932
'file': 'sgd.gaf.gz',
'url': GOGA + '/sgd.gaf.gz',
'columnns': gaf_columns
},
'4896': { # Schizosaccharomyces pombe (yeast)
'file': 'pombase.gaf.gz',
'url': GOGA + '/pombase.gaf.gz',
'columnns': gaf_columns
},
'5782': { # Dictyostelium (slime mold genus)
'file': 'dictybase.gaf.gz',
'url': GOGA + '/dictybase.gaf.gz',
'columnns': gaf_columns
},
'5052': { # Aspergillus (fungi) http://www.aspergillusgenome.org/
'file': 'aspgd.gaf.gz',
'url': GOGA + '/aspgd.gaf.gz',
'columnns': gaf_columns
},
# consider this after most others - should this be part of GO?
# 'multispecies': {
# 'file': 'gene_association.goa_uniprot.gz',
# 'url': FTPEBI + 'GO/goa/UNIPROT/gene_association.goa_uniprot.gz'},
# 'go-references': { # does not seem to be used
# 'file': 'GO.references',
# # Quoth the header of this file: "This file is DEPRECATED.
# # Please see go-refs.json relative to this location"
# # (http://current.geneontology.org/metadata/go-refs.json)
# 'url': 'http://www.geneontology.org/doc/GO.references'
# },
'idmapping_selected': {
# 9.7GB mapping file takes hours to DL ...
# maps UniProt to Ensembl & more (which we imostly gnore)
# replace w/ Ensembl rdf? --- no, current approach seems most canonical
# ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/
# idmapping/idmapping_selected.tab.gz
'file': 'idmapping_selected.tab.gz',
'url': FTPEBI + UPCRKB + 'idmapping/idmapping_selected.tab.gz',
# ftp://ftp.uniprot.org
# /pub/databases/uniprot/current_release/knowledgebase/idmapping/README
'columns': [
'UniProtKB-AC',
'UniProtKB-ID',
'GeneID (EntrezGene)',
'RefSeq',
'GI',
'PDB',
'GO',
'UniRef100',
'UniRef90',
'UniRef50',
'UniParc',
'PIR',
'NCBI-taxon',
'MIM',
'UniGene',
'PubMed',
'EMBL',
'EMBL-CDS',
'Ensembl',
'Ensembl_TRS',
'Ensembl_PRO',
'Additional PubMed'
]
},
'gaf-eco-mapping': {
'file': 'gaf-eco-mapping.yaml',
'url': '/'.join((Source.DIPPERCACHE, 'go', 'gaf-eco-mapping.yaml')),
}
}
# a set of synomym curie prefixes we choose not to propagate as uri
# this takes a quarter million warrnings out of the log files
wont_prefix = [
'zgc', 'wu', 'si', 'im', 'BcDNA', 'sb', 'anon-EST', 'EG', 'id', 'zmp',
'BEST', 'BG', 'hm', 'tRNA', 'NEST', 'xx']
def __init__(self,
graph_type,
are_bnodes_skolemized,
data_release_version=None,
tax_ids=None):
super().__init__(
graph_type=graph_type,
are_bnodes_skized=are_bnodes_skolemized,
data_release_version=data_release_version,
name='go',
ingest_title='Gene Ontology',
ingest_url='http://www.geneontology.org',
ingest_logo='source-geneontology.png',
license_url=None,
data_rights='http://geneontology.org/page/use-and-license'
# file_handle=None
)
self.test_ids = []
# note: dipper-etl defaults tax_ids to '9606'
# note: sorting tax_ids for stable digest
if tax_ids is not None and [] != set(tax_ids).difference(['9606']):
LOG.info('Have %s given as taxon to ingest', str(tax_ids))
self.tax_ids = sorted([str(x) for x in tax_ids])
nottax = set(tax_ids) - set(self.files.keys())
if nottax:
LOG.error('Cant process taxon number(s):\t%s', str(nottax))
self.tax_ids = list(set(self.tax_ids) - nottax)
else:
self.tax_ids = sorted(['9606', '10090', '7955'])
LOG.info("Filtering to the following taxa: %s", self.tax_ids)
# moving this from process_gaf() to avoid repeating this for each
# file to be processed.
if '7955' in self.tax_ids:
self.zfin = ZFIN(self.graph_type, self.are_bnodes_skized)
if '6239' in self.tax_ids:
self.wbase = WormBase(self.graph_type, self.are_bnodes_skized)
if 'gene' not in self.all_test_ids:
LOG.warning("not configured with gene test ids.")
else:
self.test_ids = self.all_test_ids['gene']
# build the id map for mapping uniprot ids to genes ... ONCE
self.uniprot_entrez_id_map = self.get_uniprot_entrez_id_map()
# gaf evidence code mapping is built in parse(), after the file is fetched.
self.gaf_eco = {}
def fetch(self, is_dl_forced=False):
self.get_files(is_dl_forced)
def parse(self, limit=None):
yamlfile = '/'.join((self.rawdir, self.files['gaf-eco-mapping']['file']))
with open(yamlfile, 'r') as yfh:
self.gaf_eco = yaml.safe_load(yfh)
if limit is not None:
LOG.info("Only parsing first %s rows of each file", limit)
LOG.info("Parsing files...")
if self.test_only:
self.test_mode = True
for txid_num in list(set(self.files).intersection(self.tax_ids)):
gaffile = '/'.join((self.rawdir, self.files[txid_num]['file']))
self.process_gaf(gaffile, limit, self.uniprot_entrez_id_map)
LOG.info("Finished parsing.")
def process_gaf(self, gaffile, limit, id_map=None):
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
LOG.info("Processing Gene Associations from %s", gaffile)
uniprot_hit = 0
uniprot_miss = 0
col = self.gaf_columns
with gzip.open(gaffile, 'rb') as csvfile:
reader = csv.reader(
io.TextIOWrapper(csvfile, newline=""), delimiter='\t', quotechar='\"')
for row in reader:
# comments start with exclamation
if row[0][0] == '!':
continue
if len(row) != len(col):
LOG.error(
"Wrong number of columns %i, expected ... got:\n\t%s",
len(col), row)
exit(1)
dbase = row[col.index('DB')].strip()
gene_num = row[col.index('DB_Object_ID')].strip()
gene_symbol = row[col.index('DB_Object_Symbol')].strip()
qualifier = row[col.index('Qualifier')]
go_id = row[col.index('GO_ID')].strip()
ref = row[col.index('DB:Reference')].strip()
eco_symbol = row[col.index('Evidence Code')].strip()
with_or_from = row[col.index('With (or) From')]
aspect = row[col.index('Aspect')].strip()
gene_name = row[col.index('DB_Object_Name')]
gene_synonym = row[col.index('DB_Object_Synonym')]
# object_type = row[col.index('DB_Object_Type')].strip()
taxon = row[col.index('Taxon and Interacting taxon')].strip()
# date = row[col.index('Date')].strip()
# assigned_by = row[col.index('Assigned_By')].strip()
# annotation_extension = row[col.index('Annotation_Extension')]
# gene_product_form_id = row[col.index('Gene_Product_Form_ID')]
# test for required fields
if '' in [row[:10], row[12]]:
LOG.error(
"Missing required part of annotation on row %i:\n%s",
reader.line_num, str(row[:-4]))
continue
# (Don't) deal with qualifier NOT, contributes_to, colocalizes_with
if re.search(r'NOT', qualifier):
continue
if dbase in self.localtt:
dbase = self.localtt[dbase]
uniprotid = None
gene_id = None
if dbase == 'UniProtKB':
if id_map is not None:
# try/except much faster than checking
# for dict key membership
try:
gene_id = id_map[gene_num]
uniprotid = ':'.join((dbase, gene_num))
(dbase, gene_num) = gene_id.split(':')
uniprot_hit += 1
except KeyError:
# LOG.warning(
# "UniProt id %s is without a 1:1 mapping to entrez/ensembl",
# gene_num)
uniprot_miss += 1
continue
else:
gene_num = gene_num.split(':')[-1] # last
gene_id = ':'.join((dbase, gene_num))
if self.test_mode and gene_id[:9] != 'NCBIGene:' and\
gene_num not in self.test_ids:
continue
model.addLabel(gene_id, gene_symbol)
model.addType(gene_id, self.globaltt['gene'])
if gene_name != '':
model.addDescription(gene_id, gene_name)
if gene_synonym != '':
for syn in re.split(r'\|', gene_synonym):
syn = syn.strip()
if syn[:10] == 'UniProtKB:':
model.addTriple(
gene_id, self.globaltt['has gene product'], syn)
elif re.fullmatch(graph.curie_regexp, syn) is not None and\
syn.split(':')[0] not in self.wont_prefix:
syn = syn.strip()
LOG.warning(
'possible curie "%s" as a literal synomym for %s',
syn, gene_id)
if syn != '':
model.addSynonym(gene_id, syn)
elif syn != '':
model.addSynonym(gene_id, syn)
# First taxon is for the gene, after the pipe are interacting taxa
tax_curie = taxon.split('|')[0].replace('taxon', 'NCBITaxon')
# this is a required field but good to safe
if tax_curie:
geno.addTaxon(tax_curie, gene_id)
assoc = Assoc(graph, self.name)
assoc.set_subject(gene_id)
assoc.set_object(go_id)
try:
eco_id = self.gaf_eco[eco_symbol]
assoc.add_evidence(eco_id)
except KeyError:
LOG.error("Evidence code (%s) not mapped", eco_symbol)
refs = re.split(r'\|', ref)
for ref in refs:
ref = ref.strip()
if ref != '':
prefix = ref.split(':')[-2] # sidestep 'MGI:MGI:'
if prefix in self.localtt:
prefix = self.localtt[prefix]
ref = ':'.join((prefix, ref.split(':')[-1]))
refg = Reference(graph, ref)
if prefix == 'PMID':
ref_type = self.globaltt['journal article']
refg.setType(ref_type)
refg.addRefToGraph()
assoc.add_source(ref)
# TODO add the source of the annotations from assigned by?
rel = self.resolve(aspect, mandatory=False)
if rel is not None and aspect == rel:
if aspect == 'F' and re.search(r'contributes_to', qualifier):
assoc.set_relationship(self.globaltt['contributes to'])
else:
LOG.error(
"Aspect: %s with qualifier: %s is not recognized",
aspect, qualifier)
elif rel is not None:
assoc.set_relationship(rel)
assoc.add_association_to_graph()
else:
LOG.warning("No predicate for association \n%s\n", str(assoc))
if uniprotid is not None:
assoc.set_description('Mapped from ' + uniprotid)
# object_type should be one of:
# protein_complex; protein; transcript; ncRNA; rRNA; tRNA;
# snRNA; snoRNA; any subtype of ncRNA in the Sequence Ontology.
# If the precise product type is unknown,
# gene_product should be used
########################################################################
# Derive G2P Associations from IMP annotations
# in version 2.1 Pipe will indicate 'OR'
# and Comma will indicate 'AND'.
# in version 2.0, multiple values are separated by pipes
# where the pipe has been used to mean 'AND'
if eco_symbol == 'IMP' and with_or_from != '':
withitems = re.split(r'[|,]', with_or_from) # OR + AND
phenotypeid = go_id + 'PHENOTYPE'
# create phenotype associations
for itm in withitems:
if itm == '' or re.match(
r'(UniProtKB|WBPhenotype|InterPro|HGNC)', itm):
LOG.warning(
"Skipping %s from or with %s", uniprotid, itm)
continue
# sanity check/conversion on go curie prefix
(pfx, lclid) = itm.split(':')[-2:] # last prefix wins
if pfx in self.localtt:
pfx = self.localtt[pfx]
itm = ':'.join((pfx, lclid))
# for worms and fish, they might give a RNAi or MORPH
# in these cases make a reagent-targeted gene
if re.search('MRPHLNO|CRISPR|TALEN', itm):
targeted_gene_id = self.zfin.make_targeted_gene_id(
gene_id, itm)
geno.addReagentTargetedGene(itm, gene_id, targeted_gene_id)
# TODO PYLINT why is this needed?
# Redefinition of assoc type from
# dipper.models.assoc.Association.Assoc to
# dipper.models.assoc.G2PAssoc.G2PAssoc
assoc = G2PAssoc(
graph, self.name, targeted_gene_id, phenotypeid)
elif re.search(r'WBRNAi', itm):
targeted_gene_id = self.wbase.make_reagent_targeted_gene_id(
gene_id, itm)
geno.addReagentTargetedGene(itm, gene_id, targeted_gene_id)
assoc = G2PAssoc(
graph, self.name, targeted_gene_id, phenotypeid)
else:
assoc = G2PAssoc(graph, self.name, itm, phenotypeid)
for ref in refs:
ref = ref.strip()
if ref != '':
prefix = ref.split(':')[-2]
if prefix in self.localtt:
prefix = self.localtt[prefix]
ref = ':'.join((prefix, ref.split(':')[-1]))
assoc.add_source(ref)
# experimental phenotypic evidence
assoc.add_evidence(
self.globaltt['experimental phenotypic evidence'])
assoc.add_association_to_graph()
# TODO should the G2PAssoc be the evidence for the GO assoc?
if not self.test_mode and limit is not None and \
reader.line_num > limit:
break
uniprot_tot = (uniprot_hit + uniprot_miss)
uniprot_per = 0.0
if uniprot_tot != 0:
uniprot_per = 100.0 * uniprot_hit / uniprot_tot
LOG.info(
"Uniprot: %.2f%% of %i benefited from the idmapping_selected download",
uniprot_per, uniprot_tot)
def get_uniprot_entrez_id_map(self):
src_key = 'idmapping_selected'
taxon_digest = GraphUtils.digest_id(str(self.tax_ids))
id_map = {}
smallfile = '/'.join((self.rawdir, 'id_map_' + taxon_digest + '.yaml'))
bigfile = '/'.join((self.rawdir, self.files[src_key]['file']))
# if processed smallfile exists and is newer than bigfile then use it instead
if os.path.isfile(smallfile) and \
os.path.getctime(smallfile) > os.path.getctime(bigfile):
LOG.info("Using the cheap mapping file %s", smallfile)
with open(smallfile, 'r') as yamlreader:
id_map = yaml.safe_load(yamlreader)
else:
LOG.info(
"Expensive Mapping from Uniprot IDs to Entrez/ENSEMBL gene ids for %s",
self.tax_ids)
self.fetch_from_url(self.files[src_key]['url'], bigfile)
col = self.files[src_key]['columns']
ummapped_uniprot = 0
with gzip.open(bigfile, 'rb') as csvfile:
csv.field_size_limit(sys.maxsize)
reader = csv.reader( # warning this file is over 10GB unzipped
io.TextIOWrapper(csvfile, newline=""),
delimiter='\t', quotechar='\"')
for row in reader:
uniprotkb_ac = row[col.index('UniProtKB-AC')].strip()
# uniprotkb_id = row[col.index('UniProtKB-ID')]
geneid = row[col.index('GeneID (EntrezGene)')].strip()
# refseq = row[col.index('RefSeq')]
# gi = row[col.index('GI')]
# pdb = row[col.index('PDB')]
# go = row[col.index('GO')]
# uniref100 = row[col.index('UniRef100')]
# unifref90 = row[col.index('UniRef90')]
# uniref50 = row[col.index('UniRef50')]
# uniparc = row[col.index('UniParc')]
# pir = row[col.index('PIR')]
ncbitaxon = row[col.index('NCBI-taxon')].strip()
# mim = row[col.index('MIM')]
# unigene = row[col.index('UniGene')]
# pubmed = row[col.index('PubMed')]
# embl = row[col.index('EMBL')]
# embl_cds = row[col.index('EMBL-CDS')]
ensembl = row[col.index('Ensembl')].strip()
# ensembl_trs = row[col.index('Ensembl_TRS')]
# ensembl_pro = row[col.index('Ensembl_PRO')]
# other_pubmed = row[col.index('Additional PubMed')]
if ncbitaxon not in self.tax_ids:
continue
# neither empty nor a list
if geneid != '' and ';' not in geneid:
id_map[uniprotkb_ac] = 'NCBIGene:' + geneid
elif ensembl != '' and ';' not in ensembl:
id_map[uniprotkb_ac] = 'ENSEMBL:' + ensembl
else:
ummapped_uniprot += 1
LOG.info("Writing id_map out as %s", smallfile)
with open(smallfile, 'w') as yamlwriter:
yaml.dump(id_map, yamlwriter)
LOG.warning('Did not find 1:1 gene IDs for %i uniprots', ummapped_uniprot)
LOG.info(
"Acquired %i 1:1 uniprot to [entrez|ensembl] mappings", len(id_map.keys()))
return id_map
def getTestSuite(self):
import unittest
from tests.test_geneontology import GeneOntologyTestCase
test_suite = unittest.TestLoader().loadTestsFromTestCase(GeneOntologyTestCase)
return test_suite
| monarch-initiative/dipper | dipper/sources/GeneOntology.py | GeneOntology.py | py | 24,532 | python | en | code | 53 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "csv.field_size_limit",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sys.maxsize",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "dipper.source... |
30489056010 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 4 16:13:47 2021
@author: Roman
"""
from spektral.transforms import AdjToSpTensor
from spektral.data import Dataset
from spektral.transforms.normalize_one import NormalizeOne
import numpy as np
import pandas as pd
from scipy.sparse import coo_matrix
from astropy.coordinates import SkyCoord
import astropy.units as u
from src.graph.motgraph import MOTGraph
import time as t
import platform
def sliding_window(base_value, window_size = 4, overlap = 2, copy = False):
"""
build an array containing multiple view of base_value in order to create a sliding window with overlap
Parameters
----------
base_value : numpy array
values used to make the window.
window_size : int, optional
size of the window. The default is 4.
overlap : int, optional
number of value in the overlaping gap. The default is 2.
copy : bool, optional
DESCRIPTION. The default is False.
Returns
-------
numpy array
multiple view that compose the window.
"""
sh = (base_value.size - window_size + 1, window_size)
st = base_value.strides * 2
view = np.lib.stride_tricks.as_strided(base_value, strides = st, shape = sh)[0::overlap]
if copy:
return view.copy()
else:
return view
def loadSSOdata(month, _class, point_limit):
"""
load solar system alerts from local file
Parameters
----------
month : string
a string number used to specify which file will be loaded.
_class : string
specify which class object wil be loaded, values can only be 'Solar System MPC' or 'Solar System Candidate'.
point_limit : int
a value to limit the number of alerts loaded by taken only the object seen more than point_limit times, work only when _class is 'Solar System MPC'.
Returns
-------
dataframe
all alerts seen in the month, belonging to _class and seen more than point_limit times
"""
if platform.system() == 'Linux':
path = "../../data/month=" + month
elif platform.system() == 'Windows':
path = "..\..\data\month=" + month
else:
raise ValueError
df_sso = pd.read_pickle(path)
df_class = df_sso[df_sso['fink_class'] == _class]
if _class == 'Solar System MPC':
mpc_trajectory = df_class.groupby(['ssnamenr']).count()
mpc_index = mpc_trajectory[mpc_trajectory['ra'] >= point_limit].index
feature = ['ra', 'dec', 'jd', 'nid', 'dcmag', 'fid', 'ssnamenr', 'candid']
return df_class[df_class['ssnamenr'].isin(mpc_index)][feature]
else:
feature = ['ra', 'dec', 'jd', 'nid', 'dcmag', 'fid', 'candid']
return df_class[feature]
class EdgeNormalizeOne:
r"""
Normalizes the edge attributes by dividing each row by its sum, so that it
sums to 1:
$$
\X_i \leftarrow \frac{\X_i}{\sum_{j=1}^{N} \X_{ij}}
$$
"""
def __call__(self, graph):
e_sum = np.sum(graph.e, -1)
e_sum[e_sum == 0] = 1
graph.e = graph.e / e_sum[..., None]
return graph
class MOTGraphDataset(Dataset):
def __init__(self, date, load_candidates, lightcurves_point_limit, window_params = None, **kwargs):
"""
Build graph dataset from local solar system alert dataset
Parameters
----------
date : string
a string number used to specify which file will be loaded.
load_candidates : string
specify which class object wil be loaded, values can only be 'Solar System MPC' or 'Solar System Candidate'.
lightcurves_point_limit : int
a value to limit the number of alerts loaded by taken only the object seen more than point_limit times, work only when _class is 'Solar System MPC'.
window_params : int tuple, optional
parameter of the window, first is size, second is overlap. The default is None.
Returns
-------
None.
"""
self.date = date
self.lcpl = lightcurves_point_limit
self.load_candidates = load_candidates
self.window_params = window_params
super().__init__(**kwargs)
def read(self):
"""
method call by the class internally, perform file reading and graph building in order to create graph dataset
Returns
-------
output : graph list
all the graph build from the overlaping window.
"""
print("reading data...")
output = []
df_sso = loadSSOdata(self.date, self.load_candidates, self.lcpl)
print("number of sso_alert remaining after limitation by number of point in lightcurves: {}"\
.format(len(df_sso)))
nid = np.unique(df_sso['nid'])
window = 10
overlap = 5
if self.window_params is not None:
window, overlap = self.window_params
frames_window = sliding_window(nid, window, overlap)
print("construct graph by overlapping window on night id")
print("number of graph: {}".format(len(frames_window)))
nb_graph = 1
for frames in frames_window:
df_frames = df_sso[df_sso['nid'].isin(frames)]
df_frames = df_frames.assign(candid_idx=pd.Series(np.arange(len(df_frames))).values)
df_frames = df_frames.assign(label=pd.Series(np.zeros(len(df_frames))).values)
tmp_df = pd.merge(df_frames, df_frames, on='label')
graph_prune = tmp_df[(tmp_df['candid_x'] != tmp_df['candid_y'])\
& (tmp_df['nid_x'] != tmp_df['nid_y'])\
& (((tmp_df['dcmag_x'] - tmp_df['dcmag_y']) / (tmp_df['jd_x'] - tmp_df['jd_y'])) <= 1.0)
]
del tmp_df
ra_x, dec_x = np.array(graph_prune['ra_x']), np.array(graph_prune['dec_x'])
ra_y, dec_y = np.array(graph_prune['ra_y']), np.array(graph_prune['dec_y'])
c1 = SkyCoord(ra_x, dec_x, unit = u.degree)
c2 = SkyCoord(ra_y, dec_y, unit = u.degree)
alerts_sep = c1.separation(c2)
graph_prune['alert_sep'] = alerts_sep
graph_prune = graph_prune[graph_prune['alert_sep'] <= 0.8]
print("constructing graph nb {} with {} nodes and {} edges"\
.format(nb_graph, len(df_frames), len(graph_prune)))
# take edges where extremity nodes are the same mpc object
same_mpc = graph_prune[graph_prune['ssnamenr_x'] == graph_prune['ssnamenr_y']]
# take edges where the left node have been created before the right node
forward_same_mpc = same_mpc[same_mpc['nid_x'] < same_mpc['nid_y']]
# take only one edge if multiple exists
idx_label = forward_same_mpc.groupby(['ssnamenr_x', 'nid_x'])['nid_y'].idxmin()
# create the training label
graph_prune.loc[same_mpc.loc[idx_label].index, 'label'] = 1
edge_label = graph_prune['label'].to_numpy().astype(np.int32)
row = list(graph_prune['candid_idx_x'])
col = list(graph_prune['candid_idx_y'])
data = np.ones(len(col))
sparse_adj_mat = coo_matrix((data, (row, col)), shape=(len(df_frames), len(df_frames))).tocsr()
node_feature = df_frames[['ra', 'dec', 'jd', 'dcmag', 'nid', 'fid']].to_numpy()
edge_feature = np.c_[np.array(np.abs(graph_prune['dcmag_x'] - graph_prune['dcmag_y'])),
np.array(graph_prune['jd_x'] - graph_prune['jd_y']),
np.array(graph_prune['alert_sep']),
np.array(graph_prune['nid_x'] - graph_prune['nid_y'])]
past_index = np.where(edge_feature[:, -1] > 0)[0]
past_index = past_index.reshape((len(past_index), 1))
futur_index = np.where(edge_feature[:, -1] < 0)[0]
futur_index = futur_index.reshape((len(futur_index), 1))
if self.load_candidates == 'Solar System MPC':
graph_prune = graph_prune[['candid_x', 'nid_x', 'ssnamenr_x',
'candid_y', 'nid_y', 'ssnamenr_y', 'label']]
else:
graph_prune = graph_prune[['candid_x', 'nid_x', 'candid_y', 'nid_y']]
g = MOTGraph(node_feature, sparse_adj_mat, edge_feature, edge_label.reshape((len(edge_label), 1)),
graph_prune, past_index, futur_index)
output.append(g)
nb_graph += 1
print()
print("end reading")
return output
if __name__ == "__main__":
print("test")
t_before = t.time()
tr_dataset = MOTGraphDataset("03", 'Solar System MPC', 15, window_params=(5, 2),
transforms=[EdgeNormalizeOne(), NormalizeOne(), AdjToSpTensor()])
print("tr_dataset construct time: ", t.time() - t_before)
for g in tr_dataset:
print(g.y.sum())
| FusRoman/Alert-Association-previous-work | src/graph/motgraphdataset.py | motgraphdataset.py | py | 9,253 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.lib.stride_tricks.as_strided",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.lib",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "platform.system",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pla... |
38943946635 | import wx
import sys
from datetime import date
import webbrowser
from pbalance_c import pbalance_c as balance_c
from pincome_c import pincome_c as income_c
from pportfolio_c import pportfolio_c as portfolio_c
from padddata_c import padddata_c as adddata_c
from praw_c import praw_c as raw_c
from puseradd_c import puseradd_c as useradd_c
from plugin_c import plugin_c
from datautils_c import datautils_c
from datacoll_c import datacoll_c
class gui_c(wx.Frame):
def __init__(self,parent,program,version,about,owner):
wx.Frame.__init__(self,parent,-1,program+' '+version,size=(900,650))
self.CreateStatusBar()
self.SetMinSize((640,480))
self.pinfo = program
self.vinfo = version
self.ainfo = about
self.owner = owner
self.projectopen = False
self.frametitle = program+' '+version
# CALLBACK
owner.registerupdcallback(self.updatecallback)
# PLUGINS
#self.plugins = [balance_c,income_c,useradd_c,addquotes_c,raw_c,adddata_c]
self.plugins = [balance_c,income_c,portfolio_c,useradd_c,adddata_c,raw_c]
self.datacoll = self.owner.datacoll
# COLOURS
#self.palette = DEF_COLORS
#self.palette1 = DEF_COLORS_PROFIT
# MENU BAR
self.menubar = wx.MenuBar()
# FILE
self.menufile = wx.Menu()
item = self.menufile.Append(gui_c.ID_NEWPROJ,'New project...','Create a new project')
item = self.menufile.Append(gui_c.ID_OPEPROJ,'Open project...','Open an existing project')
item = self.menufile.Append(gui_c.ID_SAVPROJ,'Save project','Save current project')
self.menufile.AppendSeparator()
item = self.menufile.Append(gui_c.ID_EXIT,'Exit','Exit program')
#self.menufile.Enable(gui_c.ID_SAVPROJ,False)
self.menubar.Append(self.menufile,'File')
# VIEW
self.menuview = wx.Menu()
item = self.menuview.Append(gui_c.ID_DETAILS,'Details','Show a detailed textual view',kind=wx.ITEM_RADIO)
item = self.menuview.Append(gui_c.ID_HISTORY,'Monthly history','Show a monthly historical textual view',kind=wx.ITEM_RADIO)
item = self.menuview.Append(gui_c.ID_HISTORYY,'Yearly history','Show a yearly historical textual view',kind=wx.ITEM_RADIO)
item = self.menuview.Append(gui_c.ID_GDETAILS,'Detailed plot','Show a detailed graphical view',kind=wx.ITEM_RADIO)
item = self.menuview.Append(gui_c.ID_GHISTORY,'Monthly graph','Show a monthly historical graphical view',kind=wx.ITEM_RADIO)
item = self.menuview.Append(gui_c.ID_GHISTORYY,'Yearly graph','Show a yearly historical graphical view',kind=wx.ITEM_RADIO)
self.menubar.Append(self.menuview,'View')
# PROJECT
self.menuproj = wx.Menu()
item = self.menuproj.Append(gui_c.ID_UPDPROJ,'Download online quotes','Update stock quotes and currency rates online')
item = self.menuproj.Append(gui_c.ID_CALPROJ,'Re-calculate result','Re-calculate all matrices and data')
#self.menuproj.Enable(gui_c.ID_UPDPROJ,False)
#self.menuproj.Enable(gui_c.ID_CALPROJ,False)
self.menubar.Append(self.menuproj,'Project')
# HELP
self.menuabout = wx.Menu()
item = self.menuabout.Append(gui_c.ID_UPDATE,'Search for updates...','Online update search')
item = self.menuabout.Append(gui_c.ID_USERGU,'User\'s Guide... (web)','Frugal\'s User\'s Guide')
item = self.menuabout.Append(gui_c.ID_ABOUT,'About','About %s'%(self.pinfo))
self.menubar.Append(self.menuabout,'About')
# UPDATE
#self.menuupdate = wx.Menu()
#item = self.menuupdate.Append(gui_c.ID_UPDATE,'Show available update','Show available updates')
#self.menubar.Append(self.menuupdate,'Update')
# ADD MENU AND EVENTS
self.SetMenuBar(self.menubar)
#self.Bind(wx.EVT_MENU, self.OnExit, id=wx.ID_EXIT)
#wx.EVT_MENU(self,gui_c.ID_NEWPROJ,self.new)
self.Bind(wx.EVT_MENU, self.new, id=gui_c.ID_NEWPROJ)
#wx.EVT_MENU(self,gui_c.ID_OPEPROJ,self.open)
self.Bind(wx.EVT_MENU, self.open, id=gui_c.ID_OPEPROJ)
#wx.EVT_MENU(self,gui_c.ID_SAVPROJ,self.save)
self.Bind(wx.EVT_MENU, self.save, id=gui_c.ID_SAVPROJ)
#wx.EVT_MENU(self,gui_c.ID_EXIT,self.quit)
self.Bind(wx.EVT_MENU, self.quit, id=gui_c.ID_EXIT)
#wx.EVT_MENU(self,gui_c.ID_UPDPROJ,self.updateproject)
self.Bind(wx.EVT_MENU, self.updateproject, id=gui_c.ID_UPDPROJ)
#wx.EVT_MENU(self,gui_c.ID_CALPROJ,self.calc)
self.Bind(wx.EVT_MENU, self.calc, id=gui_c.ID_CALPROJ)
#wx.EVT_MENU(self,gui_c.ID_USERGU,self.usersguide)
self.Bind(wx.EVT_MENU, self.usersguide, id=gui_c.ID_USERGU)
#wx.EVT_MENU(self,gui_c.ID_ABOUT,self.about)
self.Bind(wx.EVT_MENU, self.about, id=gui_c.ID_ABOUT)
#wx.EVT_MENU(self,gui_c.ID_UPDATE,self.showupdate)
self.Bind(wx.EVT_MENU, self.showupdate, id=gui_c.ID_UPDATE)
self.Bind(wx.EVT_CLOSE,self.closeprogram)
# SPLITTER
self.splitv = wx.SplitterWindow(self)
# TREE
self.tree = wx.TreeCtrl(self.splitv,-1)
self.troot = self.tree.AddRoot('Frugal')
self.tree.Expand(self.troot)
self.tree.SelectItem(self.troot)
self.tree.Bind(wx.EVT_TREE_SEL_CHANGED,self.treeevent)
# PANEL
lc = wx.ListCtrl(self.splitv,style=wx.LC_REPORT)
# CONF SPLITTERS
self.splitv.SplitVertically(self.tree,lc,200)
self.Centre()
if len(sys.argv) > 1:
# assume path to project is first variable
path = sys.argv[1]
pd = wx.ProgressDialog('Opening project','Please wait - this might take a while...',100,self)
pd.Update(10)
self.openpath(path)
pd.Destroy()
def new(self,event):
if self.projectopen == True:
#TODO error message
return
dialog = wx.DirDialog(self,'Select new project directory','.',wx.DD_DEFAULT_STYLE)
if dialog.ShowModal() == wx.ID_OK:
path = dialog.GetPath()
dialog.Destroy()
dialog = wx.SingleChoiceDialog(self,'Choose project currency:','Currency',self.datacoll.ALLOWEDCURRENCIES)
if dialog.ShowModal() == wx.ID_OK:
currency = dialog.GetStringSelection()
else:
currency = 'EUR'
dialog.Destroy()
pd = wx.ProgressDialog('Create new project','Please wait... this might take a while.',100,self)
if self.owner.new(path,currency) == False:
pd.Destroy()
self.errdlg('Error: Failed to create new project!')
return
else:
dialog.Destroy()
return
if currency == '':
currency = 'EUR'
self.openpath(path,pd)
pd.Destroy()
def open(self,event):
if self.projectopen == True:
#TODO error message
return
dialog = wx.DirDialog(self,'Select project directory','.',wx.DD_DEFAULT_STYLE)
if dialog.ShowModal() == wx.ID_OK:
path = dialog.GetPath()
pd = wx.ProgressDialog('Opening project','Please wait - this might take a while...',100,self)
pd.Update(10)
self.openpath(path,pd)
pd.Destroy()
dialog.Destroy()
def openpath(self,path,pd=False):
if self.owner.open(path,pd) == True:
#self.menufile.Enable(gui_c.ID_NEWPROJ,False)
#self.menufile.Enable(gui_c.ID_OPEPROJ,False)
#self.menufile.Enable(gui_c.ID_SAVPROJ,True)
#self.menuproj.Enable(gui_c.ID_UPDPROJ,True)
#self.menuproj.Enable(gui_c.ID_CALPROJ,True)
self.createtree()
self.projectopen = True
else:
self.errdlg('Error: Failed to open project!')
def save(self,event):
if self.projectopen == False:
#TODO error message
return
pd = wx.ProgressDialog('Saving project','Please wait - this might take a while...',100,self)
pd.Update(10)
rv = self.owner.save(pd)
pd.Destroy()
if rv == False:
self.errdlg('Error: Failed to save project!')
return
self.unsaved_data = 0
def closeprogram(self,event):
if self.datacoll.unsaved == 1:
msg = wx.MessageDialog(self,'The financial data has been modified.\nDo you want to save your changes before exiting?','Save data',wx.YES_NO|wx.YES_DEFAULT|wx.CANCEL|wx.ICON_QUESTION)
rv = msg.ShowModal()
msg.Destroy()
if rv == wx.ID_YES:
self.owner.datacoll.savefiles()
if rv == wx.ID_CANCEL:
return
self.Destroy()
def quit(self,event):
self.Close()
def updateproject(self,event):
if self.projectopen == False:
#TODO error message
return
pd = wx.ProgressDialog('Updating stock quotes and currency rates','Please wait - this might take a while...',100,self)
pd.Update(10)
if self.owner.downloadquotes(pd) == True:
self.tree.SelectItem(self.troot)
pd.Destroy()
self.calc(0)
def calc(self,event):
if self.projectopen == False:
#TODO error message
return
pd = wx.ProgressDialog('Calculating result','Please wait - this might take a while...',100,self)
pd.Update(10)
if self.owner.calc(pd) == True:
self.tree.SelectItem(self.troot)
pd.Destroy()
def showupdate(self,event):
updateavailable = self.owner.checkforupdate()
if updateavailable == 1:
msg = 'A new version of Frugal is available for download!\n\nPlease go to http://www.samoht.se/frugal/ and click Download!'
self.SetTitle(self.frametitle+' (new version available for download)')
elif updateavailable == -1:
msg = 'Failed to read latest version! Please retry.'
else:
msg = 'No new version of Frugal available!'
notice = wx.MessageDialog(self,msg,self.pinfo,wx.OK|wx.ICON_INFORMATION)
notice.ShowModal()
notice.Destroy()
def updatecallback(self):
# UPDATE
#self.menuupdate = wx.Menu()
#item = self.menuupdate.Append(gui_c.ID_UPDATE,'Show available update','Show available updates')
#self.menubar.Append(self.menuupdate,'Update')
self.SetTitle(self.frametitle+' (new version available for download)')
def about(self,event):
about = wx.MessageDialog(self,self.ainfo,self.pinfo,wx.OK|wx.ICON_INFORMATION)
about.ShowModal()
about.Destroy()
def usersguide(self,event):
webbrowser.open('http://www.samoht.se/frugal/usersguide.php')
def createtree(self):
for p in self.plugins:
itm = self.tree.AppendItem(self.troot,p.NAME)
for f in p.FUNCTIONS:
self.tree.AppendItem(itm,f)
self.tree.Expand(self.troot)
def treeevent(self,event):
itm = event.GetItem()
text = self.tree.GetItemText(itm)
selectedplugin = None
attr = []
if self.menuview.FindItemById(gui_c.ID_DETAILS).IsChecked():
attr.append('details')
elif self.menuview.FindItemById(gui_c.ID_HISTORY).IsChecked():
attr.append('history')
elif self.menuview.FindItemById(gui_c.ID_HISTORYY).IsChecked():
attr.append('historyy')
elif self.menuview.FindItemById(gui_c.ID_GDETAILS).IsChecked():
attr.append('gdetails')
elif self.menuview.FindItemById(gui_c.ID_GHISTORY).IsChecked():
attr.append('ghistory')
elif self.menuview.FindItemById(gui_c.ID_GHISTORYY).IsChecked():
attr.append('ghistoryy')
for p in self.plugins:
if p.NAME == text:
selectedplugin = p(self,self.splitv,func=p.NAME,attr=attr)
for f in p.FUNCTIONS:
if f == text:
selectedplugin = p(self,self.splitv,func=f,attr=attr)
if not selectedplugin == None:
self.showpanel(selectedplugin)
def showpanel(self,panel):
old = self.splitv.GetWindow2()
self.splitv.ReplaceWindow(old,panel)
old.Destroy()
def errdlg(self,text='',title='Error',style=wx.OK|wx.ICON_ERROR):
return self.msgdlg(text,title,style)
def msgdlg(self,text='',title='Information',style=wx.OK|wx.ICON_INFORMATION):
msg = wx.MessageDialog(self,text,title,style)
rv = msg.ShowModal()
msg.Destroy()
return rv
ID_NEWPROJ = 1001
ID_OPEPROJ = 1002
ID_SAVPROJ = 1003
ID_EXIT = 1004
ID_CALPROJ = 1005
ID_ABOUT = 1006
ID_ADDTRAN = 1007
ID_DETAILS = 1008
ID_HISTORY = 1009
ID_GDETAILS = 1010
ID_GHISTORY = 1011
ID_CLOPROJ = 1012
ID_UPDATE = 1013
ID_HISTORYY = 1014
ID_GHISTORYY = 1015
ID_UPDPROJ = 1016
ID_USERGU = 1017
class appl_c(wx.App):
def OnInit(self):
return True
def setup(self,program,version,about,owner):
frame = gui_c(None,program,version,about,owner)
frame.Show()
| tomluvoe/frugal | src/gui_c.py | gui_c.py | py | 11,466 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "wx.Frame",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "wx.Frame.__init__",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "wx.Frame",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pbalance_c.pbalance_c",... |
7967036430 | """aiohttp-based client to retrieve web pages.
"""
import asyncio
from contextlib import closing
import time
import aiohttp
async def fetch_page(session, host, port=8000, wait=0):
"""Get one page.
"""
url = '{}:{}/{}'.format(host, port, wait)
with aiohttp.Timeout(10):
async with session.get(url) as response:
assert response.status == 200
return await response.text()
def get_multiple_pages(host, waits, port=8000, show_time=True):
"""Get multiple pages.
"""
tasks = []
pages = []
start = time.perf_counter()
with closing(asyncio.get_event_loop()) as loop:
with aiohttp.ClientSession(loop=loop) as session:
for wait in waits:
tasks.append(fetch_page(session, host, port, wait))
pages = loop.run_until_complete(asyncio.gather(*tasks))
duration = time.perf_counter() - start
sum_waits = sum(waits)
if show_time:
msg = 'It took {:4.2f} seconds for a total waiting time of {:4.2f}.'
print(msg.format(duration, sum_waits))
return pages
if __name__ == '__main__':
def main():
"""Test it.
"""
pages = get_multiple_pages(host='http://localhost', port='8000',
waits=[1, 5, 3, 2])
for page in pages:
print(page)
main()
| asyncio-docs/asyncio-doc | examples/aiohttp_client.py | aiohttp_client.py | py | 1,359 | python | en | code | 196 | github-code | 6 | [
{
"api_name": "aiohttp.Timeout",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "contextlib.closing",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "asyncio.get_even... |
36849230183 | from google.cloud import bigquery
import pandas as pd
import os
def ReadAlreadyProcessedData():
vAR_client = bigquery.Client()
vAR_table_name = "DMV_ELP_GPT4_RECOMMENDATION"
vAR_sql =(
"select REQUEST_ID,REQUEST_DATE,ORDER_CONFIGURATION,ORDER_PAYMENT_DATE from `"+ os.environ["GCP_PROJECT_ID"]+"."+os.environ["GCP_BQ_SCHEMA_NAME"]+"."+vAR_table_name+"`"
)
vAR_df = vAR_client.query(vAR_sql).to_dataframe()
return vAR_df | Deepsphere-AI/https-github.com-Deepsphere-AI-DMV_ELP_GPT4_Recommendation | DMV_Bigquery_Utility.py | DMV_Bigquery_Utility.py | py | 454 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "google.cloud.bigquery.Client",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "google.cloud.bigquery",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
}
] |
22550276830 | #!/usr/bin/env python3
import argparse
import logging
import rdflib
import rdflib_util as ru
import re
import sys
# Implementation of "list study group members" query directly in Python using
# rdflib API calls.
# ------------------------------------------------------
# main()
# ------------------------------------------------------
def main():
# input
parser = argparse.ArgumentParser(description='List subjects in a given DATS Dataset and StudyGroup.')
parser.add_argument('--dats_file', help ='Path to TOPMed or GTEx DATS JSON file.')
parser.add_argument('--dataset_id', required=False, help ='DATS identifier of the Dataset linked to the StudyGroup of interest.')
parser.add_argument('--study_group_name', required=False, help ='DATS identifier of the StudyGroup of interest.')
args = parser.parse_args()
# logging
logging.basicConfig(level=logging.INFO)
# parse JSON LD
g = ru.read_json_ld_graph(args.dats_file)
# obo:IAO_0000100 - "data set"
# obo:IAO_0000577 - "centrally registered identifier symbol"
# obo:RO_0003001 - "produced by"
# obo:OBI_0000066 - "investigation"
# obo:BFO_0000051 - "has part"
# obo:STATO_0000193 - "study group population"
# obo:RO_0002351 - "has member"
# obo:IAO_0000590 - "a textual entity that denotes a particular in reality"
# obo:BFO_0000040 - "material entity"
# SELECT ?dbgap_study_acc ?study_group_name ?subject_name
# WHERE {
# ----> ?dataset a obo:IAO_0000100.
# ----> ?dataset obo:IAO_0000577 ?dataset_id.
# ----> ?dataset_id sdo:identifier ?dbgap_study_acc.
# ?dataset obo:RO_0003001 ?study.
# ?study a obo:OBI_0000066.
# ?study obo:BFO_0000051 ?study_group.
# ?study_group a obo:STATO_0000193.
# ?study_group obo:IAO_0000590 ?study_group_name.
# ?study_group obo:RO_0002351 ?subject.
# ?subject a obo:BFO_0000040.
# ?subject obo:IAO_0000590 ?subject_name.
# }
# ORDER BY ?dbgap_study_acc ?study_group_name ?subject_name
# find ALL Datasets, retain those with a DATS identifier
all_datasets = [s for (s,p,o) in g.triples((None, None, ru.DATS_DATASET_TERM))]
dataset_ids = {}
datasets = []
for d in all_datasets:
for (s,p,o) in g.triples((d, ru.CENTRAL_ID_TERM, None)):
for (s2,p2,o2) in g.triples((o, ru.SDO_IDENT_TERM, None)):
dataset_ids[d] = o2
if d in dataset_ids:
datasets.append(d)
# filter datasets by id if one was specified
datasets = [d for d in datasets if (args.dataset_id is None) or (rdflib.term.Literal(args.dataset_id) == dataset_ids[d])]
# SELECT ?dbgap_study_acc ?study_group_name ?subject_name
# WHERE {
# ?dataset a obo:IAO_0000100.
# ?dataset obo:IAO_0000577 ?dataset_id.
# ?dataset_id sdo:identifier ?dbgap_study_acc.
# ----> ?dataset obo:RO_0003001 ?study.
# ----> ?study a obo:OBI_0000066.
# ?study obo:BFO_0000051 ?study_group.
# ?study_group a obo:STATO_0000193.
# ?study_group obo:IAO_0000590 ?study_group_name.
# ?study_group obo:RO_0002351 ?subject.
# ?subject a obo:BFO_0000040.
# ?subject obo:IAO_0000590 ?subject_name.
# }
# ORDER BY ?dbgap_study_acc ?study_group_name ?subject_name
# link each Dataset to Study (should be 1-1)
ds_to_study = {}
for d in datasets:
for (s,p,o) in g.triples((d, ru.PRODUCED_BY_TERM, None)):
for (s2,p2,o2) in g.triples((o, ru.RDF_TYPE_TERM, ru.DATS_STUDY_TERM)):
ds_to_study[d] = o
# filter Datasets not linked to a study
datasets = [d for d in datasets if d in ds_to_study]
# SELECT ?dbgap_study_acc ?study_group_name ?subject_name
# WHERE {
# ?dataset a obo:IAO_0000100.
# ?dataset obo:IAO_0000577 ?dataset_id.
# ?dataset_id sdo:identifier ?dbgap_study_acc.
# ?dataset obo:RO_0003001 ?study.
# ?study a obo:OBI_0000066.
# ----> ?study obo:BFO_0000051 ?study_group.
# ----> ?study_group a obo:STATO_0000193.
# ----> ?study_group obo:IAO_0000590 ?study_group_name.
# ?study_group obo:RO_0002351 ?subject.
# ?subject a obo:BFO_0000040.
# ?subject obo:IAO_0000590 ?subject_name.
# }
# ORDER BY ?dbgap_study_acc ?study_group_name ?subject_name
# link each Study to StudyGroup (1-many) and get StudyGroup name
study_to_groups = {}
study_group_to_name = {}
for s in ds_to_study.values():
groups = []
for (s,p,o) in g.triples((s, ru.HAS_PART_TERM, None)):
for (s2,p2,o2) in g.triples((o, ru.RDF_TYPE_TERM, ru.DATS_STUDY_GROUP_TERM)):
# get name
n_names = 0
for (s3,p3,o3) in g.triples((o, ru.NAME_TERM, None)):
study_group_to_name[o] = o3
n_names += 1
if n_names == 1:
groups.append(o)
# filter study groups by name if one was specified
groups = [g for g in groups if (args.study_group_name is None) or (rdflib.term.Literal(args.study_group_name) == study_group_to_name[g])]
study_to_groups[s] = groups
# SELECT ?dbgap_study_acc ?study_group_name ?subject_name
# WHERE {
# ?dataset a obo:IAO_0000100.
# ?dataset obo:IAO_0000577 ?dataset_id.
# ?dataset_id sdo:identifier ?dbgap_study_acc.
# ?dataset obo:RO_0003001 ?study.
# ?study a obo:OBI_0000066.
# ?study obo:BFO_0000051 ?study_group.
# ?study_group a obo:STATO_0000193.
# ?study_group obo:IAO_0000590 ?study_group_name.
# ----> ?study_group obo:RO_0002351 ?subject.
# ----> ?subject a obo:BFO_0000040.
# ----> ?subject obo:IAO_0000590 ?subject_name.
# }
# ORDER BY ?dbgap_study_acc ?study_group_name ?subject_name
# find subjects in each study group and retrieve their names
study_group_to_subjects = {}
subject_to_name = {}
for sg in study_group_to_name.keys():
subjects = []
for (s,p,o) in g.triples((sg, ru.HAS_MEMBER_TERM, None)):
for (s2,p2,o2) in g.triples((o, ru.RDF_TYPE_TERM, ru.DATS_MATERIAL_TERM)):
for (s3,p3,o3) in g.triples((o, ru.NAME_TERM, None)):
subject_to_name[o] = o3
subjects.append(o)
study_group_to_subjects[sg] = subjects
# SELECT ?dbgap_study_acc ?study_group_name ?subject_name
# WHERE {
# ?dataset a obo:IAO_0000100.
# ?dataset obo:IAO_0000577 ?dataset_id.
# ?dataset_id sdo:identifier ?dbgap_study_acc.
# ?dataset obo:RO_0003001 ?study.
# ?study a obo:OBI_0000066.
# ?study obo:BFO_0000051 ?study_group.
# ?study_group a obo:STATO_0000193.
# ?study_group obo:IAO_0000590 ?study_group_name.
# ?study_group obo:RO_0002351 ?subject.
# ?subject a obo:BFO_0000040.
# ?subject obo:IAO_0000590 ?subject_name.
# }
# ----> ORDER BY ?dbgap_study_acc ?study_group_name ?subject_name
print()
print("StudyGroup members:")
print()
print("dbGaP Study\tStudy Group\tSubject ID")
# sort datasets
datasets.sort(key=lambda x: dataset_ids[x])
for d in datasets:
dataset_id = dataset_ids[d]
study = ds_to_study[d]
groups = study_to_groups[study]
# sort study groups
groups.sort(key=lambda x: study_group_to_name[x])
for g in groups:
group_name = study_group_to_name[g]
subjects = study_group_to_subjects[g]
# sort subjects
subjects.sort(key=lambda x: subject_to_name[x])
for s in subjects:
subject_name = subject_to_name[s]
print("%s\t%s\t%s" % (dataset_id, group_name, subject_name))
print()
if __name__ == '__main__':
main()
| dcppc/crosscut-metadata | sparql/v0.5/rdflib_list_study_group_members.py | rdflib_list_study_group_members.py | py | 8,716 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "rdflib_... |
43095853918 | from tython.main import run
from colorama import init
init(autoreset=True)
while True:
text = input("> ")
if text.strip() == "":
continue
result, error = run("<stdin>", text)
if error:
print(f"\033[31merror \033[0m" + f"{error}")
elif result:
if len(result.elements) == 1:
print(repr(result.elements[0]))
else:
print(repr(result))
| traceover/tython | shell.py | shell.py | py | 411 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "colorama.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "tython.main.run",
"line_number": 11,
"usage_type": "call"
}
] |
10230251745 | import json
import numpy as np
from eval_list import eval_list
import evals.data
from evals.registry import registry
np.random.seed(42)
min_samples_per_dataset = 50
n_test_samples = 10
seen = set()
datarows = []
for eval in registry.get_evals("*"):
if eval.key not in eval_list or eval.key in seen:
continue
seen.add(eval.key)
if eval.args and "samples_jsonl" in eval.args:
samples = evals.data.get_jsonl(eval.args["samples_jsonl"])
# Contruct our tasks dataset
instruction_input_output = []
for sample in samples:
if "input" in sample and "ideal" in sample:
# We only want single-system single-user samples:
if isinstance(sample["input"], list) and len(sample["input"]) == 2:
if (
sample["input"][0]["role"] == "system"
and sample["input"][1]["role"] == "user"
):
# Skip if output is a list
if isinstance(sample["ideal"], list):
continue
dp_instruction = sample["input"][0]["content"]
dp_in = sample["input"][1]["content"]
dp_out = sample["ideal"]
instruction_input_output.append((dp_instruction, dp_in, dp_out))
# Skip if there are not enough samples
if len(instruction_input_output) < min_samples_per_dataset:
continue
# Check that all dp_instruction are the same
instruction_input_output = sorted(instruction_input_output, key=lambda x: x[0])
if instruction_input_output[0][0] != instruction_input_output[-1][0]:
continue
# Shuffle samples
np.random.shuffle(instruction_input_output)
test_samples = [
{
"input": i,
"output": o,
}
for _, i, o in instruction_input_output[:n_test_samples]
]
train_samples = [
{
"input": i,
"output": o,
}
for _, i, o in instruction_input_output[n_test_samples:]
]
row = {
"eval": eval.key,
"instruction": instruction_input_output[0][0],
"test_samples": test_samples,
"train_samples": train_samples,
}
datarows.append(row)
assert len(datarows) == len(
eval_list
), f"Unexpected number of evals: {len(datarows)} != {len(eval_list)}"
assert set([r["eval"] for r in datarows]) == set(
eval_list
), f"Missing evals: {set(eval_list) - set([r['eval'] for r in datarows])}"
# Shuffle rows
np.random.shuffle(datarows)
# Save jsonl to file
with open("samples.jsonl", "w") as f:
for row in datarows:
f.write(json.dumps(row) + "\n")
| openai/evals | evals/elsuite/self_prompting/scripts/dataset/compile_data.py | compile_data.py | py | 2,868 | python | en | code | 12,495 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "evals.registry.registry.get_evals",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "e... |
31165098736 | from torch.utils.data import Dataset, DataLoader
from albumentations.pytorch import ToTensorV2
from augmix import RandomAugMix
from utils import in_colab
import albumentations as A
import torchvision.io as io
import pytorch_lightning as pl
import torch
import cv2
def get_default_transforms(img_size):
transform = {
'train': A.Compose([
A.HorizontalFlip(p=0.5),
A.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, p=0.5),
A.SmallestMaxSize(max_size=img_size[0], p=1),
A.RandomCrop(height=img_size[0], width=img_size[1], p=1),
A.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
max_pixel_value=255.0,
p=1.0,
),
ToTensorV2()
]),
'inference': A.Compose([
A.SmallestMaxSize(max_size=img_size[0], p=1.0),
A.CenterCrop(height=img_size[0], width=img_size[1], p=1.0),
A.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
max_pixel_value=255.0,
p=1.0,
),
ToTensorV2()
]),
}
return transform
class Dataset(Dataset):
def __init__(self, img_ids, targets=None, img_size=(224, 224), inference=False, tta=False):
self.img_ids = img_ids
self.targets = targets
self.tta = tta
if tta:
self.augs = get_default_transforms(img_size)['tta']
elif inference:
self.augs = get_default_transforms(img_size)['inference']
else:
self.augs = get_default_transforms(img_size)['train']
def __len__(self):
return self.img_ids.shape[0]
def __getitem__(self, i):
image = cv2.imread(self.img_ids[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if not self.tta:
image = self.augs(image=image)['image']
if self.targets is not None:
target = torch.as_tensor(self.targets[i]).float()
return {
'images': image,
'targets': target
}
else:
if self.tta:
return {f'images_{i}': self.augs[i](image=image)['image'] for i in range(len(self.augs))}
else:
return {'images': image}
class DataModule(pl.LightningDataModule):
def __init__(
self, data, img_size=(224, 224),
train_filter=None, val_filter=None,
batch_size=64, inference=False, tta=False
):
super().__init__()
self.data = data
self.img_size = img_size
self.train_filter = train_filter
self.val_filter = val_filter
self.batch_size = batch_size
self.inference = inference
if tta:
self.augs = get_default_transforms(img_size)['tta']
def setup(self, stage=None):
if not self.inference:
self.train_df = self.data.loc[self.train_filter, :]
self.val_df = self.data.loc[self.val_filter, :]
def train_dataloader(self):
img_ids = self.train_df['file_path'].values
targets = self.train_df['Pawpularity'].values
train_dset = Dataset(img_ids, targets, img_size=self.img_size)
return DataLoader(
train_dset, shuffle=True, num_workers=2 if in_colab() else 4,
pin_memory=True, batch_size=self.batch_size, drop_last=True
)
def val_dataloader(self):
img_ids = self.val_df['file_path'].values
targets = self.val_df['Pawpularity'].values
val_dset = Dataset(img_ids, targets, img_size=self.img_size, inference=True)
return DataLoader(
val_dset, shuffle=False, num_workers=2 if in_colab() else 4,
pin_memory=True, batch_size=self.batch_size,
)
def test_dataloader(self):
pass
def predict_dataloader(self):
img_ids = self.data['file_path'].values
pred_dset = Dataset(img_ids, img_size=self.img_size, inference=True, tta=False)
return DataLoader(
pred_dset, shuffle=False, num_workers=2 if in_colab() else 4,
pin_memory=True, batch_size=self.batch_size,
) | mtenenholtz/petfinder-pawpularity-score | dataset.py | dataset.py | py | 4,274 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "albumentations.Compose",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "albumentations.HorizontalFlip",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "albumentations.ColorJitter",
"line_number": 16,
"usage_type": "call"
},
{
"a... |
14542676756 | import requests as rq
from bs4 import BeautifulSoup
import json
#-------------------------------------------
#Variables a utilizar
#-------------------------------------------
iLetras = 0 # variable para recorrer arreglo letras
aLetras=[
'a','b','c','d','e','f','g','h','i','j',
'k','l','m','n','ñ','o','p','q','r','s',
't','u','v','w','x','y','z'
]
#-----------------------------------------------
#-----------------------------------------------
def normalize(s):
replacements = (
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u"),
("ü","u")
)
for a, b in replacements:
s = s.replace(a, b).replace(a.upper(), b.upper())
return s
def CargarWeb(url):
r = rq.get(url)
soup=BeautifulSoup(r.content,'html5lib')
return soup
#Funcion para realizar busqueda en la pagina
#https://www.interpatagonia.com/mapuche/diccionario.html
def BuscarPalabras(iLetras,aLetras):
#Carga la pagina y separa la seccion
web = CargarWeb("https://www.interpatagonia.com/mapuche/diccionario.html").find('section')
pal = [] #Diccionario para establecer palabra + traduccion
for i in aLetras :
pal.append({'letra':i,'palabras':[]})
#-------------------------------------------------
#Recopilacion de palabras para la primera pagina
#-------------------------------------------------
#busca todas las etiquetas Ul de la pagina
for ul in web.findAll('ul'):
#Busta todas las etiquetas li de la pagina
for li in ul.findAll('li'):
try :
palabra = li.strong.text.split(":")[0].strip().lower() # Palabra en mapugundun
letra = palabra[:1].lower() # Obtiene la primera letra
#traduccion = ''
if ( len(li.text.split(":")) > 1 ):
traduccion = li.text.split(":")[1].strip()
if(traduccion != ""):
#Se llena con los datos
for pos in pal:
if(pos['letra'] == letra):
pos['palabras'].append({'palabra':palabra,'significado':traduccion})
#print(traduccion)
#pal[letra].append([text,traduccion])
except AttributeError:
pass
return pal
#-----------------------------------------------------------------
#Recopilacion de palabras fin primera pagina
#-----------------------------------------------------------------
#Se cargan las palabras del txt que este contiene el diccionario obtenido
# del pdf del gobierno con un diccionario amplio de mapudungn
def BuscarPalabras2(iLetras,aLetras):
f = open("Documentos/Palabras.txt")
iLetras = 0 # Inicia el indice en 0
aCad = []
actual_pto = False # Bandera para detectar si existe dob punto en palabra actual
sig_pto = False # Bandera para detectar si existe dob punto en palabra siguiente
for i in f.read().split('.'):
cad = i.split('\n') #Obtiene un arreglo donde separa las palabras por los saltos de linea
#Si al momento de separar las cadenas por salto de linea este presenta
# mas de 2 posiciones en el arreglo se agrega la palabra dependiendo si esta posee ":"
#sino se agrega a la definicion anterior .
if(len(cad)>2):
#print("------------------------")
for ind in range(len(cad)):
if(cad[ind] != "\n" ):
actual = ind #Indice del actual
#Si existe siguiente ve si tiene ":" , sino concadena lo del siguiente con el actual
if ( actual+1 < len(cad) and actual > 0):
siguiente = actual+1
for letras in cad[actual]:
if(letras == ":"):
actual_pto = True
for letras in cad[siguiente]:
if(letras == ":"):
sig_pto = True
#Si existe pto coma en el actual y el siguiente se guarda actual
if(actual_pto == True and sig_pto == True):
aCad.append(cad[actual])
actual_pto = False
sig_pto = False
#Si existe pto coma en el actual y el siguiente no
# se concatena con el actual
if(actual_pto == True and sig_pto == False):
pal = cad[actual] +" "+cad[siguiente]
#print("Concatenacion: " , pal)
aCad.append(pal)
actual_pto = False
sig_pto = False
#print("-----------------------")
else:
#Se guarda las palabras que no tengas mas de 1 posicion
if(len(cad) > 1):
aCad.append(cad[1])
#--------------------------------------------------------------------------
#Parte que regulariza el diccionario en Json por orden alfabetico
#-------------------------------------------------------------------------
pal=[]
#Crea las llaves para el diccionario
for i in aLetras:
pal.append({'letra':i,'palabras':[]})
for i in range(len(aCad)) :
separados = aCad[i].split(":") # Variable que separa la cadena por ":"
if(len(separados) > 1):
palabra = separados[0].lower()
significado = separados[1]
if(significado != " "):
#Se obtiene la primera palabra para ordenar alfabeticamente
letra = normalize(palabra[:1].lower())
for pos in pal:
if(pos['letra'] == letra):
pos['palabras'].append({'palabra':palabra,'significado':significado})
#---------------------------------------------------------------------
return pal
#Funcion para realizar busqueda en la pagina
#https://www.mapuche.nl/espanol/idioma/index_idioma.htm
#Para esta pagina se le debe pasar como parametro la letra para el diccionario
#https://www.mapuche.nl/espanol/idioma/"letra".htm <- tiene esa estructura
def BuscarPalabras3(iLetras,aLetras):
pal = [] #Diccionario para establecer palabra + traduccion
for i in aLetras:
pal.append({'letra':i,'palabras':[]})
for letra in aLetras:
try:
web = CargarWeb("https://www.mapuche.nl/espanol/idioma/"+letra+".htm")
contenido = web.find("td",attrs={'width':'749'}).text.split("\n") # Obtiene la parte que contiene las palabras + traduccion
for i in contenido:
if(len(i.strip().split("-")) > 1):
palabra = i.strip().split("-")[1].strip().lower() # separa la palabra por la "-" y quita los espacios vacios
letra = normalize(palabra[:1]).lower() # obtiene la primera letra de la palabra
traduccion = i.strip().split("-")[0].strip() # separa la traduccion por la "-" y quita los espacios vacios
if(len(letra)>0):
if(traduccion != ""):
for pos in pal:
if(pos['letra'] == letra):
pos['palabras'].append({'palabra':palabra,'significado':traduccion})
except Exception as e:
pass
return pal
def BuscarRepetidos(pal,pal2):
"""
Funcion que busca los repetidos de los 2 arreglo , llenando con valores sin tener ninguno repetido
"""
palabras1 = [pos['palabras'] for pos in pal] #Obtiene el arreglo de palabras
palabras2 = [pos['palabras'] for pos in pal2] # Obtiene el arreglo de palabras
pal_final = [] #Arreglo donde se guardaran las palabras sin repetisione
for i in pal:
pal_final.append({'letra':i['letra'],'palabras':[]})
for i in range(len(palabras1)):
a_palabras1 = palabras1[i] #obtiene el arreglo para cada posicion
a_palabras2 = palabras2[i] #obtiene el arreglo para cada posicion
repetidos = False
i_pal1 = 0 #Indice de a_palabras1
i_pal2 = 0 #Indice de a_palabras2
#Si el largo es mayor a 0 continua la busqueda
if(len(a_palabras1) > 0 ):
for i in a_palabras1:
pal1 = i['palabra'] #Guarda palabra
sig1 = i['significado'] #Guarda significado
#print(sig1)
for y in a_palabras2:
pal2 = y['palabra'] #Guarda palabra
sig2 = y['significado'] #Guarda significado
#Consulta si la palabras son iguales
if(normalize(pal1.lower()) == normalize(pal2.lower())):
letra = pal1[:1].lower()
cad = ""
#Ve si tiene punto y si tiene lo elimina
if(sig1.find(".") > 0 ):
a = sig1.split(".")
cad += a[0]
else:
cad += sig1
#Ve si tiene punto y si tiene lo elimina
if(sig2.find(".") > 0):
a = sig2.split(".")
cad +=","+a[0]
else:
cad +=","+sig2
#Guarda el dato repetido
for z in pal_final:
if(z['letra'] == letra):
z['palabras'].append({'palabra':pal1,'significado':cad})
return pal_final
#Funcion que guarda los valores restantes del diccionario
def llenar(pal,dic):
existe = False
palabras1 = [pos['palabras'] for pos in dic]
palabras2 = [pos['palabras'] for pos in pal]
for i in range(len(palabras1)) :
#Si la posicion de palabras1 esta vacio se llena automaticamente
#con la de palabras2
if(len(palabras1[i]) == 0):
if(len(palabras2[i]) > 0):
palabras1[i] = palabras2[i]
else:
pos1 = palabras1[i]
pos2 = palabras2[i]
for y in pos2:
pal = y['palabra']
sig = y['significado']
for z in pos1:
pal2 = z['palabra']
if(normalize(pal.lower()) == normalize(pal2.lower())):
existe = True
break
if(existe):
#Si existe la palabra la salta
existe=False
else:
#Si no existe la guarda
palabras1[i].append({'palabra':pal,'significado':sig})
for i in range(len(dic)):
dic[i]['palabras'] = palabras1[i]
return dic
#----------------------------------------------------------------
# Proceso de guardado de las palabras en json
#-------------------------------------------------------------------
print("Obteniendo palabras .....")
#Obtiene las palabras de la primera pagina
pal = BuscarPalabras(iLetras,aLetras)
#Obtiene las palabras del txt
pal2= BuscarPalabras2(iLetras,aLetras)
#Obtiene las palabras de la segunda pagina
pal3 = BuscarPalabras3(iLetras,aLetras)
#Busca los valores repetidos
d = BuscarRepetidos(pal,pal2)
d = BuscarRepetidos(d,pal3)
#Llena con las palabras que restan
d = llenar(pal,d);d = llenar(pal2,d);d = llenar(pal3,d);
#Guarda el diccionario
with open('json/dic_inicial.json','w') as file:
json.dump(d,file,indent=4)
print("Palabras obtenidas !! ")
| CamiloFerreira/Traductor-Esp-Mapuzungun | Obtener_palabras.py | Obtener_palabras.py | py | 9,734 | python | es | code | 1 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 320,
"usage_type": "call"
}
] |
22330842884 | import numpy as np
import matplotlib, gc
import matplotlib.pyplot as plt
from tensorflow import gradients
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops, math_ops
def hessian_vector_product(ys, xs, v):
""" Multiply the Hessian of `ys` wrt `xs` by `v` """
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [
math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v) if grad_elem is not None
]
# Second backprop
grads_with_none = gradients(elemwise_products, xs)
return_grads = [
grad_elem if grad_elem is not None \
else tf.zeros_like(x) \
for x, grad_elem in zip(xs, grads_with_none)]
return return_grads
def avg_l2_dist(orig, adv):
"""Get the mean l2 distortion between two orig and adv images"""
l2_dist = 0.0
num_ = orig.shape[0]
if num_ > 0:
for i in range(orig.shape[0]):
l2_dist+= np.linalg.norm(orig[i] - adv[i])
return l2_dist/orig.shape[0]
else:
return np.nan
def visualize(image_list, num_images, savefig=''):
"""Visualize images in a grid"""
assert(len(image_list) == num_images)
fig=plt.figure(figsize=(15,15))
columns = num_images
for i in range(1, columns+1):
img = image_list[i-1]
fig.add_subplot(1, columns, i)
if img.shape[-1] == 1:
img = np.squeeze(img)
plt.imshow(img,cmap='Greys')
else:
plt.imshow(img)
plt.axis('off')
plt.show()
fig.savefig(savefig,bbox_inches='tight')
#Normalize rows of a given matrix
def normalize(matrix):
"""Normalize each row vector in a matrix"""
matrix_nm = np.zeros_like(matrix)
for i in range(matrix.shape[0]):
norm = np.linalg.norm(matrix[i])
if norm > 0:
matrix_nm[i] = matrix[i]/np.linalg.norm(matrix[i])
return matrix_nm
def preds_to_labels(preds):
labels = np.zeros(preds.shape)
labels[np.arange(preds.shape[0]),np.argmax(preds, axis=1)] = 1
return labels
def norms_and_cos(model, data, labels, grads_train):
grads = model.get_gradients_wrt_params(data, labels)
grads_nm = normalize(grads)
norms = np.sqrt(np.dot(grads, grads.T)).diagonal()
cos_sim = np.dot(grads_nm, grads_train.T)
del grads_nm, grads
gc.collect()
return norms, cos_sim
def greater_cos(cos_sim, eta):
count = 0.0
num_ = cos_sim.shape[0]
if num_ > 0:
for i in range(num_):
if np.max(cos_sim[i]) > eta:
count+=1.0
return (count/num_)
else:
return 0.0
def smaller_norm(norms, gamma):
count=0.0
num_ = norms.shape[0]
if num_ > 0:
for i in range(num_):
if norms[i] < gamma:
count+=1.0
return (count/num_)
else:
return 0.0
def cos_and_norm_sep(cos_sim, norms, eta, gamma):
count=0.0
num_ = norms.shape[0]
if num_ > 0:
for i in range(num_):
if np.max(cos_sim[i]) > eta and norms[i] < gamma:
count+=1.0
return (count/num_)
else:
return 0.0
def comp_cos(cos_a, cos_b):
count = 0.0
num_ = cos_a.shape[0]
if num_ > 0:
for i in range(num_):
if np.max(cos_a[i]) > np.max(cos_b[i]):
count+=1.0
return (count/num_)
else:
return 0.0
def comp_norm(norm_a, norm_b):
count = 0.0
num_ = norm_a.shape[0]
if num_ > 0:
for i in range(num_):
if norm_a[i] > norm_b[i]:
count+=1.0
return (count/num_)
else:
return 0.0
def get_test_from_train_idx(a, b):
mask = np.ones_like(a,dtype=bool)
mask[b] = False
return a[mask]
def get_guide_idx(model, idx_filter=None, cos_sim=None, data_indices=None, idx=0):
idx_ = np.where(idx_filter == idx)[0][0]
max_sim_idx = np.argmax(cos_sim[idx_])
guide_img_idx = data_indices[max_sim_idx]
return guide_img_idx | jasjeetIM/AdversarialDetector | models/util.py | util.py | py | 4,254 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "tensorflow.gradients",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tensorflow.python.ops.math_ops.multiply",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tensorflow.python.ops.math_ops",
"line_number": 21,
"usage_type": "name"
}... |
6422355002 | from django.urls import path
from . import views
urlpatterns = [
path('account', views.account, name="account"),
path('profile', views.prifile, name="profile"),
path('signup', views.sign_up, name="signup"),
path('signin', views.sign_in, name="signin"),
path('signout', views.sign_out, name="signout"),
] | aposgial/Project_E3 | happy_traveller/register/urls.py | urls.py | py | 315 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
4992730292 | import torch
import numpy as np
import math
import torch.nn.functional as F
import re
import nltk, json
from fairseq import pybleu, options, progress_bar, tasks, tokenizer, utils, strategies
from fairseq.meters import TimeMeter
from fairseq.strategies.strategy_utils import duplicate_encoder_out
def getSubstitutePairs(pred_lst, input_lst):
def LCS(A,B):
A.append('0')
B.append('0')
n = len(A)
m = len(B)
A.insert(0,'0')
B.insert(0,'0')
# 二维表L存放公共子序列的长度
L = [ ([0]*(m+1)) for i in range(n+1) ]
# 二维表C存放公共子序列的长度步进
C = [ ([0]*(m+1)) for i in range(n+1) ]
for x in range (0,n+1):
for y in range (0,m+1):
if (x==0 or y==0):
L[x][y] = 0
elif A[x] == B[y]:
L[x][y] = ( L[x-1][y-1] + 1 )
C[x][y] = 0
elif L[x-1][y] >= L[x][y-1]:
L[x][y] = L[x-1][y]
C[x][y] = 1
else:
L[x][y] = L[x][y-1]
C[x][y] = -1
return L[n][m],C,n,m
def printLCS(C,A,x,y):
if ( x == 0 or y == 0):
return 0
if C[x][y] == 0:
printLCS(C,A,x-1,y-1)
lcsres.append(A[x])
elif C[x][y] == 1:
printLCS(C,A,x-1,y)
else:
printLCS(C,A,x,y-1)
length,C,x,y = LCS(pred_lst, input_lst)
lcsres = []
printLCS(C,pred_lst,x,y)
ret = []
i, j, k = 1, 1, 0
word2change, substitute = [], []
while k < len(lcsres):
if pred_lst[i] == lcsres[k] and input_lst[j] == lcsres[k]:
i += 1; j += 1; k += 1
word2change, substitute = [], []
else:
while pred_lst[i] != lcsres[k]:
substitute.append(re.sub('\.|,', '', pred_lst[i]))
i += 1
while input_lst[j] != lcsres[k]:
word2change.append(re.sub('\.|,', '', input_lst[j]))
j += 1
if len(word2change) != len(substitute):
ret.append((' '.join(word2change), ' '.join(substitute), i-len(word2change)-1, len(word2change)))
else:
idx = 0
for reti in range(len(word2change)):
ret.append((word2change[reti], substitute[reti], i-len(word2change)+idx-1, 1))
idx += 1
res = []
for k, v, idx, length in ret:
if not bool(re.search(r'\d', k)) and re.sub(' ', '', k) != re.sub(' ', '', v):
res.append((k, v, idx, idx+length))
return res
def main(args):
assert args.path is not None, '--path required for generation!'
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert args.replace_unk is None or args.raw_text, \
'--replace-unk requires a raw text dataset (--raw-text)'
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 12000
print(args)
use_cuda = torch.cuda.is_available() and not args.cpu
torch.manual_seed(args.seed)
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
print('| {} {} {} examples'.format(args.data, args.gen_subset, len(task.dataset(args.gen_subset))))
# Set dictionaries
#src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
dict = tgt_dict
# Load decoding strategy
strategy = strategies.setup_strategy(args)
# Load ensemble
print('| loading model(s) from {}'.format(args.path))
models, _ = utils.load_ensemble_for_inference(args.path.split(':'), task, model_arg_overrides=eval(args.model_overrides))
models = [model.cuda() for model in models]
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=8,
num_shards=args.num_shards,
shard_id=args.shard_id,
).next_epoch_itr(shuffle=False)
results = []
scorer = pybleu.PyBleuScorer()
num_sentences = 0
has_target = True
timer = TimeMeter()
with open('test.en-de.en', 'r') as f:
inputs = f.readlines()
res_dict = {}
with progress_bar.build_progress_bar(args, itr) as t:
translations = generate_batched_itr(t, strategy, models, tgt_dict, length_beam_size=args.length_beam, use_gold_target_len=args.gold_target_len)
for sample_id, src_tokens, target_tokens, hypos in translations:
has_target = target_tokens is not None
target_tokens = target_tokens.int().cpu() if has_target else None
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(args.gen_subset).src.get_original_text(sample_id)
target_str = task.dataset(args.gen_subset).tgt.get_original_text(sample_id)
else:
src_str = dict.string(src_tokens, args.remove_bpe)
if args.dehyphenate:
src_str = dehyphenate(src_str)
if has_target:
target_str = dict.string(target_tokens, args.remove_bpe, escape_unk=True)
if args.dehyphenate:
target_str = dehyphenate(target_str)
if not args.quiet:
print('S-{}\t{}'.format(sample_id, inputs[sample_id].strip()))
if has_target:
print('T-{}\t{}'.format(sample_id, target_str))
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypos.int().cpu(),
src_str=src_str,
alignment= None,
align_dict=align_dict,
tgt_dict=dict,
remove_bpe=args.remove_bpe,
)
if args.dehyphenate:
hypo_str = dehyphenate(hypo_str)
if not args.quiet:
print('H-{}\t{}'.format(sample_id, hypo_str))
if args.print_alignment:
print('A-{}\t{}'.format(
sample_id,
' '.join(map(lambda x: str(utils.item(x)), alignment))
))
res = getSubstitutePairs(nltk.word_tokenize(hypo_str), nltk.word_tokenize(inputs[sample_id].strip()))
# Score only the top hypothesis
if has_target:
if align_dict is not None or args.remove_bpe is not None:
# Convert back to tokens for evaluation with unk replacement and/or without BPE
target_tokens = tgt_dict.encode_line(target_str, add_if_not_exist=True)
results.append((target_str, hypo_str))
res_dict[sample_id.cpu().tolist()] = {'input_words':nltk.word_tokenize(src_str), "pred_words":nltk.word_tokenize(hypo_str), "substitute_topk":[[[k,lpos,rpos],[v]] for k,v,lpos,rpos in res]}
num_sentences += 1
if has_target:
print('Time = {}'.format(timer.elapsed_time))
ref, out = zip(*results)
print('| Generate {} with beam={}: BLEU4 = {:2.2f}, '.format(args.gen_subset, args.beam, scorer.score(ref, out)))
finalres = {}
with open('test.en-de.idx', 'r') as f:
idxs = f.readlines()
for i in range(max(res_dict.keys())+1):
finalres[idxs[i]] = res_dict[i]
with open(args.path+'res.json', 'w') as f:
json.dump(finalres, f, indent=4)
def dehyphenate(sent):
return re.sub(r'(\S)-(\S)', r'\1 ##AT##-##AT## \2', sent).replace('##AT##', '@')
def generate_batched_itr(data_itr, strategy, models, tgt_dict, length_beam_size=None, use_gold_target_len=False, cuda=True):
"""Iterate over a batched dataset and yield individual translations.
Args:
maxlen_a/b: generate sequences of maximum length ax + b,
where x is the source sentence length.
cuda: use GPU for generation
"""
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if 'net_input' not in s:
continue
input = s['net_input']
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in input.items()
if k != 'prev_output_tokens'
}
with torch.no_grad():
gold_target_len = s['target'].ne(tgt_dict.pad()).sum(-1) if use_gold_target_len else None
hypos = generate(strategy, encoder_input, models, tgt_dict, length_beam_size, gold_target_len)
for batch in range(hypos.size(0)):
src = utils.strip_pad(input['src_tokens'][batch].data, tgt_dict.pad())
ref = utils.strip_pad(s['target'][batch].data, tgt_dict.pad()) if s['target'] is not None else None
hypo = utils.strip_pad(hypos[batch], tgt_dict.pad())
example_id = s['id'][batch].data
yield example_id, src, ref, hypo
def generate(strategy, encoder_input, models, tgt_dict, length_beam_size, gold_target_len):
assert len(models) == 1
model = models[0]
src_tokens = encoder_input['src_tokens']
src_tokens = src_tokens.new(src_tokens.tolist())
bsz = src_tokens.size(0)
encoder_out = model.encoder(**encoder_input)
beam = predict_length_beam(gold_target_len, encoder_out['predicted_lengths'], length_beam_size)
max_len = beam.max().item()
length_mask = torch.triu(src_tokens.new(max_len, max_len).fill_(1).long(), 1)
length_mask = torch.stack([length_mask[beam[batch] - 1] for batch in range(bsz)], dim=0)
tgt_tokens = src_tokens.new(bsz, length_beam_size, max_len).fill_(tgt_dict.mask())
tgt_tokens = (1 - length_mask) * tgt_tokens + length_mask * tgt_dict.pad()
tgt_tokens = tgt_tokens.view(bsz * length_beam_size, max_len)
duplicate_encoder_out(encoder_out, bsz, length_beam_size)
hypotheses, lprobs = strategy.generate(model, encoder_out, tgt_tokens, tgt_dict)
hypotheses = hypotheses.view(bsz, length_beam_size, max_len)
lprobs = lprobs.view(bsz, length_beam_size)
tgt_lengths = (1 - length_mask).sum(-1)
avg_log_prob = lprobs / tgt_lengths.float()
best_lengths = avg_log_prob.max(-1)[1]
hypotheses = torch.stack([hypotheses[b, l, :] for b, l in enumerate(best_lengths)], dim=0)
return hypotheses
def predict_length_beam(gold_target_len, predicted_lengths, length_beam_size):
if gold_target_len is not None:
beam_starts = gold_target_len - (length_beam_size - 1) // 2
beam_ends = gold_target_len + length_beam_size // 2 + 1
beam = torch.stack([torch.arange(beam_starts[batch], beam_ends[batch], device=beam_starts.device) for batch in range(gold_target_len.size(0))], dim=0)
else:
beam = predicted_lengths.topk(length_beam_size, dim=1)[1]
beam[beam < 2] = 2
return beam
if __name__ == '__main__':
parser = options.get_generation_parser()
args = options.parse_args_and_arch(parser)
main(args)
| microsoft/SmartWordSuggestions | code/baselines/CMLM/updates/generate_cmlm.py | generate_cmlm.py | py | 12,369 | python | en | code | 18 | github-code | 6 | [
{
"api_name": "re.sub",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 84,
"usage_ty... |
12441870149 | import redis
from redis_lru import RedisLRU
from connect import connect
from models import Quote
client = redis.StrictRedis(host="localhost", port=6379, password=None)
cache = RedisLRU(client)
quotes = Quote.objects()
@cache
def find_by_name(value):
finding_quotes = []
full_name = value.split(":")[1]
for quote in quotes:
if quote.author.fullname.lower() == full_name.lower():
finding_quotes.append(quote.quote)
print(finding_quotes)
@cache
def find_by_tag(value):
finding_quotes = []
tags = value.split(":")[1].split(",")
for quote in quotes:
for tag in tags:
if tag in quote.tags:
finding_quotes.append(quote.quote)
print(finding_quotes)
def main():
while True:
command = input("Enter your 'command:value' or 'exit': ")
if command.startswith("name"):
find_by_name(command)
elif command.startswith("tag"):
find_by_tag(command)
elif command.startswith("exit"):
break
else:
print("Wrong command. Please, try again.")
continue
if __name__ == "__main__":
main() | DanielDDZ/web_modul_8 | MongoDB/main.py | main.py | py | 1,218 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "redis.StrictRedis",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "redis_lru.RedisLRU",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "models.Quote.objects",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.Quote"... |
1775721628 | import datetime
EXISTING_TYPES = (
(0, "Пионер"),
(1, "Педсостав"),
)
SIGN_SET = (
('dining_services+', "Дежурный в столовой"),
('activity+', "Активность"),
('salary+', "Зарплата"),
('fee+', "Гонорар"),
('purchase-', "Покупка"),
('fine-', "Штраф"),
)
SIGN_SET_ALL = (
('p2p+', "Личный перевод"),
('dining_services+', "Дежурный в столовой"),
('activity+', "Активность"),
('salary+', "Зарплата"),
('fee+', "Гонорар"),
('purchase-', "Покупка"),
('fine-', "Штраф"),
)
DATE_START_OF_ = datetime.date(year=2024, month=6, day=25)
DATE_END_OF_ = datetime.date(year=2024, month=6, day=25) + datetime.timedelta(weeks=3)
| RegSirius06/SWBM | constants/bank/forms.py | forms.py | py | 810 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.date",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 28,
"usage_type": "call"
}
] |
26038625786 | from __future__ import annotations
import logging
import os
from dataclasses import dataclass
from typing import Iterable
from pants.backend.cc.subsystems.compiler import CCSubsystem, ExternalCCSubsystem
from pants.backend.cc.target_types import CCLanguage
from pants.core.util_rules.archive import ExtractedArchive
from pants.core.util_rules.archive import rules as archive_rules
from pants.core.util_rules.system_binaries import (
BinaryNotFoundError,
BinaryPathRequest,
BinaryPaths,
BinaryPathTest,
)
from pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest
from pants.engine.fs import DownloadFile
from pants.engine.internals.native_engine import EMPTY_DIGEST, Digest
from pants.engine.platform import Platform
from pants.engine.process import Process
from pants.engine.rules import Get, Rule, collect_rules, rule
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
from pants.util.ordered_set import OrderedSet
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class CCToolchainRequest:
"""A request for a C/C++ toolchain."""
language: CCLanguage
@dataclass(frozen=True)
class CCToolchain:
"""A C/C++ toolchain."""
compiler: str
# include_directories: tuple[str, ...] = () # TODO as part of the `check` goal to ensure source roots are handled
compiler_flags: tuple[str, ...] = ()
compiler_definitions: tuple[str, ...] = ()
linker_flags: tuple[str, ...] = ()
digest: Digest = EMPTY_DIGEST
def __post_init__(self):
# TODO: Should this error out to notify the user of a mistake? Or silently handle
# Or just ensure all defines have -D right now?
if self.compiler_definitions:
sanitized_definitions = [define.lstrip("-D") for define in self.compiler_definitions]
object.__setattr__(self, "compiler_definitions", tuple(sanitized_definitions))
@property
def compile_command(self) -> tuple[str, ...]:
"""The command to compile a C/C++ source file."""
command = [self.compiler, *self.compiler_definitions, *self.compiler_flags]
return tuple(filter(None, command))
@property
def link_command(self) -> tuple[str, ...]:
"""The command to link a C/C++ binary."""
command = [self.compiler, *self.linker_flags]
return tuple(filter(None, command))
async def _executable_path(binary_names: Iterable[str], search_paths: Iterable[str]) -> str:
"""Find the path to an executable by checking whether the executable supports a version
option."""
for name in binary_names:
binary_paths = await Get( # noqa: PNT30: requires triage
BinaryPaths,
BinaryPathRequest(
binary_name=name,
search_path=search_paths,
test=BinaryPathTest(args=["-v"]),
),
)
if not binary_paths or not binary_paths.first_path:
continue
return binary_paths.first_path.path
raise BinaryNotFoundError(f"Could not find any of '{binary_names}' in any of {search_paths}.")
async def _setup_downloadable_toolchain(
request: CCToolchainRequest,
subsystem: ExternalCCSubsystem,
platform: Platform,
) -> CCToolchain:
"""Set up a toolchain from a downloadable archive."""
download_file_request = subsystem.get_request(platform).download_file_request
maybe_archive_digest = await Get(Digest, DownloadFile, download_file_request)
extracted_archive = await Get(ExtractedArchive, Digest, maybe_archive_digest)
# Populate the toolchain for C or C++ accordingly
if request.language == CCLanguage.CXX:
return CCToolchain(
compiler=subsystem.cxx_executable,
compiler_flags=tuple(subsystem.cxx_compiler_flags),
compiler_definitions=tuple(subsystem.cxx_definitions),
digest=extracted_archive.digest,
)
return CCToolchain(
compiler=subsystem.c_executable,
compiler_flags=tuple(subsystem.c_compiler_flags),
compiler_definitions=tuple(subsystem.c_definitions),
digest=extracted_archive.digest,
)
async def _setup_system_toolchain(
request: CCToolchainRequest, subsystem: CCSubsystem
) -> CCToolchain:
"""Set up a toolchain from the user's host system."""
# Sanitize the search paths in case the "<PATH>" is specified
raw_search_paths = list(subsystem.search_paths)
if "<PATH>" in raw_search_paths:
i = raw_search_paths.index("<PATH>")
env = await Get(EnvironmentVars, EnvironmentVarsRequest(["PATH"]))
system_path = env.get("PATH", "")
raw_search_paths[i : i + 1] = system_path.split(os.pathsep)
search_paths = tuple(OrderedSet(raw_search_paths))
# Populate the toolchain for C or C++ accordingly
if request.language == CCLanguage.CXX:
cxx_executable = await _executable_path(tuple(subsystem.cxx_executable), search_paths)
return CCToolchain(
cxx_executable,
compiler_flags=tuple(subsystem.cxx_compiler_flags),
compiler_definitions=tuple(subsystem.cxx_definitions),
)
c_executable = await _executable_path(tuple(subsystem.c_executable), search_paths)
return CCToolchain(
c_executable,
compiler_flags=tuple(subsystem.c_compiler_flags),
compiler_definitions=tuple(subsystem.c_definitions),
)
@rule(desc="Setup the CC Toolchain", level=LogLevel.DEBUG)
async def setup_cc_toolchain(
request: CCToolchainRequest,
subsystem: CCSubsystem,
external_subsystem: ExternalCCSubsystem,
platform: Platform,
) -> CCToolchain:
"""Set up the C/C++ toolchain."""
if external_subsystem.url_template:
return await _setup_downloadable_toolchain(request, external_subsystem, platform)
else:
return await _setup_system_toolchain(request, subsystem)
@dataclass(frozen=True)
class CCProcess:
args: tuple[str, ...]
language: CCLanguage
description: str
input_digest: Digest = EMPTY_DIGEST
output_files: tuple[str, ...] = ()
level: LogLevel = LogLevel.INFO
@rule(desc="Setup a CC Process loaded with the CCToolchain", level=LogLevel.DEBUG)
async def setup_cc_process(request: CCProcess) -> Process:
"""Set up a C/C++ process.
This rule will load the C/C++ toolchain based on the requested language. It will then return a
Process that can be run to compile or link a C/C++ source file.
"""
toolchain = await Get(CCToolchain, CCToolchainRequest(request.language))
# TODO: What if this is for linking instead of compiling?
# TODO: From tdyas: Should there then be a CCCompilerProcess and CCLinkerProcess?
# Investigate further during `check` PR
compiler_command = list(toolchain.compile_command)
# If downloaded, this will be the toolchain, otherwise empty digest
immutable_digests = {"__toolchain": toolchain.digest}
if toolchain.digest != EMPTY_DIGEST:
compiler_command[0] = f"__toolchain/{compiler_command[0]}"
argv = tuple(compiler_command) + request.args
return Process(
argv=argv,
input_digest=request.input_digest,
output_files=request.output_files,
description=request.description,
level=request.level,
immutable_input_digests=immutable_digests,
# env={"__PANTS_CC_COMPILER_FINGERPRINT": toolchain.compiler.fingerprint},
)
def rules() -> Iterable[Rule | UnionRule]:
return (
*collect_rules(),
*archive_rules(),
)
| pantsbuild/pants | src/python/pants/backend/cc/util_rules/toolchain.py | toolchain.py | py | 7,547 | python | en | code | 2,896 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pants.backend.cc.target_types.CCLanguage",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 31,
"usage_type": "call"
},
{
"... |
81653711 | import json
import os
from corai_util.tools.src.function_json import zip_json, unzip_json
def list_of_dicts_to_txt(parameter_options, column_size=15, file_name="config.txt"):
"""
Writes the parameter options in a formatted file, the header of the file contains the parameter names,
each following line contains an entry from the parameter options.
Args:
parameter_options: The list of dictionaries to be written to the file
column_size: The size of the columns in the file
file_name: The path to where the config file should be written
Returns:
None
"""
# get the names of all the parameters
p_names = list(parameter_options[0])
# get the number of values for each parameter
length = len(p_names)
# start with the line name
line_pattern = ""
for i in range(length):
line_pattern += " {:>" + str(column_size) + "}"
line_pattern += "\n"
with open(file_name, "w") as file:
line = line_pattern.format(*p_names)
file.write(line)
for p_option in parameter_options:
values = []
for p_name in p_names:
values.append(p_option[p_name])
line = line_pattern.format(*values)
file.write(line)
def list_of_dicts_to_json(list_of_dicts, file_name="config.json", compress=False):
"""
Writes the list_of_dicts to a json file.
Create a directory if the path yields a non-existent directory.
Args:
list_of_dicts(list<dict>): to be written to the file
file_name (str): The path to where the config file should be written with extension.
compress: Boolean to specify if compression should be applied before writing to the file.
Returns:
None
"""
if compress:
list_of_dicts = zip_json(list_of_dicts)
directory_where_to_save = os.path.dirname(file_name)
if not os.path.exists(directory_where_to_save):
if directory_where_to_save != '':
os.makedirs(directory_where_to_save)
with open(file_name, 'w') as file:
json.dump(list_of_dicts, file)
def json2python(path, compress=False):
with open(path, 'r') as file:
dict = json.load(file)
if compress:
dict = unzip_json(dict)
file.close()
return dict
def factory_fct_linked_path(ROOT_DIR, path_to_folder):
"""
Semantics:
Args:
ROOT_DIR: Path to the root of the project.
path_to_folder: a path written in the format you want because we use the function os.path.join to link it.
Returns:
The linker
Examples:
linked_path = factory_fct_linked_path(ROOT_DIR, "path/a"):
path_save_history = linked_path(['plots', f"best_score_{nb}.pth"])
#and ROOT_DIR should be imported from a script at the root where it is written:
import os
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
"""
# example:
PATH_TO_ROOT = os.path.join(ROOT_DIR, path_to_folder)
def linked_path(path):
# a list of folders like: ['C','users','name'...]
# when adding a '' at the end like
# path_to_directory = linker_path_to_result_file([path, ''])
# one adds a \ at the end of the path. This is necessary in order to continue writing the path.
return os.path.join(PATH_TO_ROOT, *path)
return linked_path
| Code-Cornelius/CorAI | corai_util/tools/src/function_writer.py | function_writer.py | py | 3,449 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "corai_util.tools.src.function_json.zip_json",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name... |
74025598587 | import arcade
from src.characters.model import Player
class LuffyPlayer(Player):
life = 100
basic_attack = 5
special_attack = 15
speed = 5
def __init__(self, x, y, direction):
super().__init__()
self.x = x
self.y = y
self.direction = direction
self.animation = LuffyAnimation(self.x, self.y, self.direction, 2)
def move(self, direction):
self.x += direction*self.speed
self.animation.move_x(self.x, direction)
def action_basic_attack(self, direction):
self.animation.action_basic_attack(self.x, direction)
def stop(self, direction):
self.animation.stop_move(direction)
def draw(self):
self.animation.draw()
def update(self):
self.animation.update()
class LuffyAnimation(object):
sprite_path = "assets/sprites/luffy.png"
def __init__(self, x, y, direction, scale):
super().__init__()
self.scale = scale
self.sprite_list = arcade.SpriteList()
move_sprite = arcade.AnimatedTimeSprite(scale=self.scale)
move_sprite.position = [x, y]
move_sprite.textures = self.stand_animation(direction)
self.sprite_list.append(move_sprite)
def draw(self):
self.sprite_list.draw()
def update(self):
self.sprite_list.update_animation()
def move_animation(self, direction) -> [arcade.Texture]:
mirror = (direction == "left")
t1 = arcade.load_texture(self.sprite_path, 387, 12, 55, 70, mirrored=mirror, scale=self.scale)
t2 = arcade.load_texture(self.sprite_path, 440, 12, 51, 70, mirrored=mirror, scale=self.scale)
t3 = arcade.load_texture(self.sprite_path, 490, 12, 58, 70, mirrored=mirror, scale=self.scale)
t4 = arcade.load_texture(self.sprite_path, 547, 12, 51, 70, mirrored=mirror, scale=self.scale)
t5 = arcade.load_texture(self.sprite_path, 597, 12, 51, 70, mirrored=mirror, scale=self.scale)
t6 = arcade.load_texture(self.sprite_path, 646, 12, 52, 70, mirrored=mirror, scale=self.scale)
t7 = arcade.load_texture(self.sprite_path, 698, 12, 58, 70, mirrored=mirror, scale=self.scale)
t8 = arcade.load_texture(self.sprite_path, 755, 12, 51, 70, mirrored=mirror, scale=self.scale)
return [t1, t2, t3, t4, t5, t6, t7, t8]
def stand_animation(self, direction) -> [arcade.Texture]:
mirror = (direction == "left")
t1 = arcade.load_texture(self.sprite_path, 0, 10, 46, 66, mirrored=mirror, scale=self.scale)
t2 = arcade.load_texture(self.sprite_path, 44, 10, 44, 66, mirrored=mirror, scale=self.scale)
t3 = arcade.load_texture(self.sprite_path, 88, 7, 44, 69, mirrored=mirror, scale=self.scale)
t4 = arcade.load_texture(self.sprite_path, 139, 7, 44, 69, mirrored=mirror, scale=self.scale)
t5 = arcade.load_texture(self.sprite_path, 181, 5, 40, 72, mirrored=mirror, scale=self.scale)
t6 = arcade.load_texture(self.sprite_path, 139, 7, 44, 69, mirrored=mirror, scale=self.scale)
t7 = arcade.load_texture(self.sprite_path, 88, 7, 44, 69, mirrored=mirror, scale=self.scale)
t8 = arcade.load_texture(self.sprite_path, 44, 10, 44, 66, mirrored=mirror, scale=self.scale)
return [t1, t2, t3, t4, t5, t6, t7, t8]
def basic_attack_animation(self, direction) -> [arcade.Texture]:
mirror = (direction == "left")
t1 = arcade.load_texture(self.sprite_path, 0, 83, 48, 66, mirrored=mirror, scale=self.scale)
t2 = arcade.load_texture(self.sprite_path, 46, 83, 44, 66, mirrored=mirror, scale=self.scale)
t3 = arcade.load_texture(self.sprite_path, 88, 83, 65, 66, mirrored=mirror, scale=self.scale)
t4 = arcade.load_texture(self.sprite_path, 153, 83, 68, 66, mirrored=mirror, scale=self.scale)
t5 = arcade.load_texture(self.sprite_path, 220, 83, 44, 66, mirrored=mirror, scale=self.scale)
return [t1, t2, t3, t4, t5]
def get_sprite(self) -> arcade.AnimatedTimeSprite:
return self.sprite_list[0]
def move_x(self, x, direction):
sprite = self.get_sprite()
sprite.center_x = x
if direction > 0:
sprite.textures = self.move_animation("right")
else:
sprite.textures = self.move_animation("left")
def action_basic_attack(self, x, direction):
sprite = self.get_sprite()
sprite.center_x = x
if direction > 0:
sprite.textures = self.basic_attack_animation("right")
else:
sprite.textures = self.basic_attack_animation("left")
def stop_move(self, direction):
sprite = self.get_sprite()
sprite.textures = self.stand_animation(direction)
| anthonykgross/One-fight | src/characters/luffy/model.py | model.py | py | 4,704 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "src.characters.model.Player",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "arcade.SpriteList",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "arcade.AnimatedTimeSprite",
"line_number": 43,
"usage_type": "call"
},
{
"api_name":... |
30296220599 | # -*- encoding: utf-8 -*-
'''
@File : alien.py
@Time : 2021/10/25 23:48:17
@Author : James
@Version : 1.0
@Desc : 外星人类
'''
import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
'''表示单个外星人'''
def __init__(self, ai_game):
'''初始化外星人并设置其初始位置'''
super().__init__()
self.screen = ai_game.screen
# 加载外星人图像并设置其rect属性
self.image = pygame.image.load(r'G:\GitWorkSpace\python_practice\alien_invasion\images\alien.bmp')
self.rect = self.image.get_rect()
# 每个外星人最初在左上角附近出现
self.rect.x = self.rect.width
self.rect.y = self.rect.height
# 存储外星人的精准水平位置
self.x = float(self.rect.x)
| heisenberg000/python_practice | alien_invasion/alien.py | alien.py | py | 830 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "pygame.sprite.Sprite",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "pygame.image.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 21,
"usage_type": "attribute"
}
] |
13525657769 | #1bc test code against sklearn - optimizer
# import necessary packages
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from random import random, seed
import numpy as np
from sklearn.neural_network import MLPRegressor
from tqdm import tqdm
# ensure the same random numbers appear every time
np.random.seed(0)
# Make data.
x = np.arange(0, 1, 0.05)
y = np.arange(0, 1, 0.05)
x, y = np.meshgrid(x,y)
m = len(x)
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
#adding normalized noise to the Franke function
sigma2 = 0.1
z = FrankeFunction(x, y) + np.random.normal(0,sigma2, len(x))
x = np.ravel(x)
y = np.ravel(y)
z = np.ravel(z)
# The design matrix now as function of a given polynomial
def create_X(x, y, n ):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta
X = np.ones((N,l))
idx = 0
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,idx] = (x**(i-k))*(y**k)
idx +=1
return X
X = create_X(x, z, n=7)
# We split the data in test and training data
X_train, X_test, z_train, z_test = train_test_split(X, z, test_size=0.2)
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_train[:,0] = 1
X_test[:,0] = 1
# one-liner from scikit-learn library
train_size = 0.8
test_size = 1 - train_size
X_train, X_test, Y_train, Y_test = train_test_split(X, z, train_size=train_size,
test_size=test_size)
max_iter=5000 #default=200
score = 0
etalist = np.logspace(-1,-6, num=20)
reglist= np.logspace(0,-10, num=20)
optimal = [0,0]
for eta in tqdm(etalist):
for lmbd in reglist:
regr = MLPRegressor(activation='relu',solver='sgd',alpha=lmbd,learning_rate_init=eta,
max_iter=max_iter).fit(X_train, Y_train)
#regr.predict(X_test)
if score < regr.score(X_test, Y_test):
score = regr.score(X_test, Y_test)
optimal[0] = eta; optimal[1] = lmbd
print('optimal score = ', score)
print('optimal learning rate = ', optimal[0])
print('optimal lambda =', optimal[1])
'''
PS C:\python\project2> python .\1bcMLPReg2.py
100%|█████████████████████████████████████████████████████████████████████████████████████████| 20/20 [01:24<00:00, 4.24s/it]
optimal score = 0.9670078678439595
optimal learning rate = 0.0545559478116852
optimal lambda = 1e-10
'''
''' relu
PS C:\python\project2> python .\1bcMLPReg2.py
100%|█████████████████████████████████████████████████████████████████████████████████████████| 20/20 [01:53<00:00, 5.67s/it]
optimal score = 0.992733368588985
optimal learning rate = 0.0545559478116852
optimal lambda = 1e-10
PS C:\python\project2>
'''
| gery2/FYS-STK-4155---Project-2 | Codes/1bcMLPReg2.py | 1bcMLPReg2.py | py | 3,760 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"... |
69887984508 | import nltk
import gensim
import cleantext
import re
import xlrd
import sys
from gensim.models import word2vec
from data_treatment import data_treatment
from nltk.corpus import reuters
from nltk.corpus import wordnet as wn
from sklearn.externals import joblib
from nltk.stem import WordNetLemmatizer
class Synonyms_suggestion:
def __init__(self,model_type):
self.model1_path = "E:\\programming\\NLP\\TQCorpus\\GoogleNews-vectors-negative300.bin\\GoogleNews-vectors-negative300.bin"
self.model2_path = "data\\txt\\Economist\\Economist.txt"
if model_type == 0:
pass
if model_type==1:
self.model1 = gensim.models.KeyedVectors.load_word2vec_format(self.model1_path, binary=True)
elif model_type==2:
self.f_model2_in = open(self.model2_path,"r",encoding="ISO-8859-1") #使用ISO解码
all_string = self.f_model2_in.read()
all_list = nltk.sent_tokenize(all_string)
train_sentences_model2 = []
for i in range(0,len(all_list)):
train_sentences_model2.append(cleantext.clean(all_list[i]))
train_sentences_model3 = list(reuters.sents())
for sent in train_sentences_model3:
train_sentences_model2.append(sent)
self.model2 = word2vec.Word2Vec(train_sentences_model2, min_count=2, window=3, size=300)
self.lemmatizer = WordNetLemmatizer() # 词性还原器
# 载入人工规则替换词表
self.artificial_word_book = xlrd.open_workbook("data/suggestion/word_by_rule.xls")
self.artificial_word_sheet_vec = self.artificial_word_book.sheet_by_index(0)
self.artificial_word_sheet_adj = self.artificial_word_book.sheet_by_index(1)
self.artificial_word_vec = []
self.artificial_word_adj = []
for i in range(0,self.artificial_word_sheet_vec.ncols):
temp_list = self.artificial_word_sheet_vec.col_values(i)[2:]
temp_list = [w.lower() for w in temp_list]
temp_list = [w for w in temp_list if w != ' ' and w != '']
for i in range(len(temp_list)):
temp_list[i] = self.lemmatizer.lemmatize(temp_list[i], pos='v')
self.artificial_word_vec.append(temp_list)
for i in range(0,self.artificial_word_sheet_adj.ncols):
temp_list = self.artificial_word_sheet_adj.col_values(i)[2:]
temp_list = [w.lower() for w in temp_list]
temp_list = [w for w in temp_list if w != ' ' and w != '']
self.artificial_word_adj.append(temp_list)
def suggestion_word(self,word,sentence,model=2):
# 词性处理
sentence = nltk.word_tokenize(sentence)
pos_tag_list = nltk.pos_tag(sentence)
tag = pos_tag_list[sentence.index(word)][1]
word = word.lower()
# suggestion by artificial rule
suggestion_list_artificial_rule = []
if tag.startswith('VB'):
word = self.lemmatizer.lemmatize(word, pos='v')
for i in range(0, len(self.artificial_word_vec)):
if word in self.artificial_word_vec[i]:
suggestion_list_artificial_rule = self.artificial_word_vec[i]
break
elif tag.startswith('JJ'):
word = self.lemmatizer.lemmatize(word, pos='a')
for i in range(0, len(self.artificial_word_adj)):
if word in self.artificial_word_adj[i]:
suggestion_list_artificial_rule = self.artificial_word_adj[i]
break
elif tag.startswith('R'):
word = self.lemmatizer.lemmatize(word, pos='r')
for i in range(0, len(self.artificial_word_vec)):
if word in self.artificial_word_adj[i]:
suggestion_list_artificial_rule = self.artificial_word_adj[i]
break
else:
word = self.lemmatizer.lemmatize(word, pos='n')
# suggestion by wordnet
if tag.startswith('NN'):
word_meaning_list = wn.synsets(word, pos=wn.NOUN)
elif tag.startswith('VB'):
word_meaning_list = wn.synsets(word, pos=wn.VERB)
elif tag.startswith('JJ'):
word_meaning_list = wn.synsets(word, pos=wn.ADJ)
elif tag.startswith('R'):
word_meaning_list = wn.synsets(word, pos=wn.ADV)
else:
word_meaning_list = wn.synsets(word)
suggestion_ans_wordnet = []
for word_meaning in word_meaning_list:
lemmas_ans_wordnet = []
word_meaning_hypernyms = word_meaning.hypernyms()
word_meaning_hyponyms = word_meaning.hyponyms()
word_meaning_similar = word_meaning.similar_tos()
lemmas_ans_wordnet+=word_meaning_hyponyms
lemmas_ans_wordnet+=word_meaning_hypernyms
lemmas_ans_wordnet+=word_meaning_similar
for i in range(len(lemmas_ans_wordnet)):
syn = lemmas_ans_wordnet[i]
suggestion_ans_wordnet.append(str(syn.lemmas()[0].name()))
suggestion_ans_wordnet = data_treatment.pretrement_for_synonyms(suggestion_ans_wordnet)
# suggestion by word2vec
suggestion_list_word2vec = []
if model==0:
suggestion_list_word2vec = []
if model==1:
suggestion_list_word2vec = self.model1.most_similar([word],topn=20)
elif model==2:
suggestion_list_word2vec = self.model2.most_similar([word],topn=20)
suggestion_ans_word2vec = []
for i in range (0,len(suggestion_list_word2vec)):
suggestion_ans_word2vec.append(suggestion_list_word2vec[i][0])
suggestion_ans_word2vec = data_treatment.pretrement_for_synonyms(suggestion_ans_word2vec)
## 去除_号
for i in range(len(suggestion_ans_word2vec)):
word = suggestion_ans_word2vec[i]
word = word.replace("_", " ")
if tag.startswith('NN'):
word = self.lemmatizer.lemmatize(word, pos='n')
elif tag.startswith('VB'):
word = self.lemmatizer.lemmatize(word, pos='v')
elif tag.startswith('JJ'):
word = self.lemmatizer.lemmatize(word, pos='a')
elif tag.startswith('R'):
word = self.lemmatizer.lemmatize(word, pos='r')
else:
word = self.lemmatizer.lemmatize(word)
suggestion_ans_word2vec[i] = word
suggestion_ans_word2vec = list(set(suggestion_ans_word2vec))
final_ans = []
final_ans+=suggestion_list_artificial_rule
final_ans += suggestion_ans_wordnet
final_ans += suggestion_ans_word2vec
return final_ans
if __name__=="__main__":
word = sys.argv[1]
sent = sys.argv[2]
mode = sys.argv[3]
# word = "love"
# sent = "i love you"
# mode = 0
synonyms = Synonyms_suggestion(int(mode))
syn_list = synonyms.suggestion_word(word,sent,int(mode))
print(syn_list) | caffe-in/TQLwriter | 后端/Synonyms_suggestion.py | Synonyms_suggestion.py | py | 7,026 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "gensim.models.KeyedVectors.load_word2vec_format",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "gensim.models",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "nltk.sent_tokenize",
"line_number": 26,
"usage_type": "call"
},
{
... |
70647233789 | """
This is the simplest example of training NER (named entity recognizer).
NER is responsible for recognizing "Apple" as a 'company', "George Bush" as a 'person', and so on.
THE GOAL for training model is to recognize in text 'iPhone' as a 'GADGET' (for example), and so on.
How do we learn the model to recognizing specific words in specific context?
Through showing to model a few hundreds of examples. Where we showing exactly word position and we label what it is.
For example in text 'Who is Shaka Khan?' we can label like this: {"entities": [(7, 17, "PERSON")]}
or
'I like London and Berlin.' And here: {"entities": [(7, 13, "LOC"), (18, 24, "LOC")]})
we are using tuple with text and dict inside.
"""
import plac
import random
import warnings
from pathlib import Path
import spacy
from spacy.util import minibatch, compounding
# training data
TRAIN_DATA = [
("Who is Shaka Khan?", {"entities": [(7, 17, "PERSON")]}),
("I like London and Berlin.", {"entities": [(7, 13, "LOC"), (18, 24, "LOC")]}),
]
# Here we can inject model name, output_dir and n_iter for main function, but for now we are working on empty model!
#@plac.annotations(
# model=("Model name. Defaults to blank 'en' model.", "option", "m", str),
# output_dir=("Optional output directory", "option", "o", Path),
# n_iter=("Number of training iterations", "option", "n", int),
#)
def main(model=None, output_dir=None, n_iter=100):
"""Load the model, set up the pipeline and train the entity recognizer."""
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # create blank Language class
print("Created blank 'en' model")
# create the built-in pipeline components and add them to the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
if "ner" not in nlp.pipe_names:
ner = nlp.create_pipe("ner")
nlp.add_pipe(ner, last=True)
# otherwise, get it so we can add labels
else:
ner = nlp.get_pipe("ner")
# add labels
for _, annotations in TRAIN_DATA:
for ent in annotations.get("entities"):
ner.add_label(ent[2])
# get names of other pipes to disable them during training
pipe_exceptions = ["ner", "trf_wordpiecer", "trf_tok2vec"]
other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions]
# only train NER
with nlp.disable_pipes(*other_pipes), warnings.catch_warnings():
# show warnings for misaligned entity spans once
warnings.filterwarnings("once", category=UserWarning, module='spacy')
# reset and initialize the weights randomly – but only if we're
# training a new model
if model is None:
nlp.begin_training()
for itn in range(n_iter):
random.shuffle(TRAIN_DATA)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(
texts, # batch of texts
annotations, # batch of annotations
drop=0.5, # dropout - make it harder to memorise data
losses=losses,
)
print("Losses", losses)
# test the trained model
for text, _ in TRAIN_DATA:
doc = nlp(text)
print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
for text, _ in TRAIN_DATA:
doc = nlp2(text)
print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
if __name__ == "__main__":
plac.call(main)
# Expected output:
# Entities [('Shaka Khan', 'PERSON')]
# Tokens [('Who', '', 2), ('is', '', 2), ('Shaka', 'PERSON', 3),
# ('Khan', 'PERSON', 1), ('?', '', 2)]
# Entities [('London', 'LOC'), ('Berlin', 'LOC')]
# Tokens [('I', '', 2), ('like', '', 2), ('London', 'LOC', 3),
# ('and', '', 2), ('Berlin', 'LOC', 3), ('.', '', 2)] | koualsky/dev-learning | spacy/train_model/full_example.py | full_example.py | py | 4,664 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "spacy.load",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "spacy.blank",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "warnings.catch_warnings",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "warnings.filterwarning... |
2122328128 | import stripe
from celery import task
from django.conf import settings
from users.models import Buyer
@task
def create_customer(card_token, buyer_id):
stripe.api_key = settings.STRIPE_API_KEY
buyer = Buyer.objects.get(id=buyer_id)
customer = stripe.Customer.create(
email=buyer.email,
source=card_token,
)
buyer.customer_id = customer.id
buyer.save()
return buyer.id
| HackBulgaria/Web-Development-with-Django | week11/stripe_integration/payments/tasks.py | tasks.py | py | 420 | python | en | code | 25 | github-code | 6 | [
{
"api_name": "stripe.api_key",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.STRIPE_API_KEY",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 12,
"usage_type": "name"
},
{
... |
9437207469 | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import glob
file_path_th = '.\data_processing\ex\*through_E*.csv'
file_path_cr = '.\data_processing\ex\*cross_E*.csv'
csv_1 = []
csv_2 = []
x_axis = []
th_sum = []
cr_sum = []
# through
for filename_1 in glob.glob(file_path_th, recursive=True):
csv_1.append(filename_1)
for file_1 in csv_1:
filename_1 = file_1.split('\\')[-1][:-4]
data_1 = pd.read_csv('.\data_processing\ex\%s.csv'%filename_1, names=['first'])
final_sum_1 = 0
integer_1 = list(range(0,list(data_1.shape)[0]))
data_1.index = integer_1
data_1.index.name = 'order'
for i in range(0,list(data_1.shape)[0]):
find_value_1 = data_1['first'][i]
find_value_1 = find_value_1.split()
key_1 = list(map(float, find_value_1))
sum_1 = 0
for x in range(0,len(key_1)):
sum_1 +=float(key_1[x])
#print('%d번째 합 = '%i,sum_1)
final_sum_1 = sum_1 + final_sum_1
#x_axis.append(int(filename_1[-3:]))
th_sum.append(final_sum_1)
#print('%s의 값 ='%filename,final_sum)
#print(x_axis)
#print(th_sum)
# cross
for filename_2 in glob.glob(file_path_cr, recursive=True):
csv_2.append(filename_2)
for file_2 in csv_2:
filename_2 = file_2.split('\\')[-1][:-4]
data_2 = pd.read_csv('.\data_processing\ex\%s.csv'%filename_2,names=['first'])
final_sum_2 = 0
integer_2 = list(range(0,list(data_2.shape)[0]))
data_2.index = integer_2
data_2.index.name = 'order'
for i in range(1,list(data_2.shape)[0]):
find_value_2 = data_2['first'][i]
find_value_2 = find_value_2.split()
key_2 = list(map(float, find_value_2))
#print(find_value_2)
sum_2 = 0
for x in range(0,len(key_2)):
sum_2 +=float(key_2[x])
#print('%d번째 합 = '%i,sum_2)
final_sum_2 = sum_2 + final_sum_2
cr_sum.append(final_sum_2)
#print('%s의 값 ='%filename_2,final_sum_2)
#print(cr_sum)
# calculation
th_sum_square_list = []
cr_sum_square_list = []
th_cr_sum_list = []
r_value_list = []
k_value_list = []
for j in range(0,len(th_sum)):
th_sum_square = th_sum[j]*th_sum[j]
cr_sum_square = cr_sum[j]*cr_sum[j]
th_sum_square_list.append(th_sum_square)
cr_sum_square_list.append(cr_sum_square)
th_cr_sum = th_sum_square + cr_sum_square
th_cr_sum_list.append(th_cr_sum)
r_value = np.sqrt(th_sum_square / th_cr_sum)
k_value = np.sqrt(cr_sum_square / th_cr_sum)
r_value_list.append(r_value)
k_value_list.append(k_value)
def run(x, y, z):
plt.scatter(x, y)
plt.scatter(x, z)
plt.plot(x,y)
plt.plot(x,z)
plt.title('Power by r,k')
plt.xlabel('distance [nm]',labelpad=10)
plt.ylabel('Power [W]',labelpad=10)
plt.legend()
plt.grid(True)
plt.show()
| jordan-kim/r_k_graph_fitting | src/fitting_r_k.py | fitting_r_k.py | py | 2,885 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "glob.glob",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_numbe... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.