seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17968159051 | from pymysql import cursors
import math
from api.common.db import get_db
class UserAccess:
def __init__(self):
self.db = get_db()
def user_all(self, page, size):
cursor = self.db.cursor(cursor=cursors.DictCursor)
query_sql = ' where 1=1 '
query_param_limit = []
query_param_limit.append((int(page) - 1) * int(size))
query_param_limit.append(int(size))
cursor.execute("select * from user_people"
+ format(query_sql) +
" limit %s,%s", query_param_limit)
info = cursor.fetchall()
data = {'data': info, 'totalCount': len(info), 'totalPage': math.ceil(len(info) / 10),
'pageNo': page, 'pageSize': size}
return data
| Kepler-XX/flask_handle | api/dataaccess/user/userdataaccess.py | userdataaccess.py | py | 765 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "api.common.db.get_db",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors.DictCursor",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pymysql.cursors",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "m... |
32803396247 | #!/usr/bin/env python
import os
from flask import Flask, render_template
import tmdb
import settings
app = Flask(__name__)
themoviedb = tmdb.TMDB(settings.API_KEY)
@app.route('/')
def index():
movies = themoviedb.now_playing()
return render_template('index.html', movies=movies)
@app.route('/movie/')
@app.route('/movie/<id>')
def movie(id=None):
movie = None
if id is not None:
movie = themoviedb.movie_info(id)
cast = themoviedb.movie_casts(id)
return render_template('movie.html', movie=movie, cast=cast)
@app.route('/person/<id>')
def person(id):
person = themoviedb.person_info(id)
return render_template('person.html', person=person)
@app.context_processor
def inject_globals():
return dict(urls=themoviedb.image_urls)
if __name__ == "__main__":
app.debug = True
# app.run()
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| ddominguez/TMDb-api-demo | app.py | app.py | py | 986 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tmdb.TMDB",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "settings.API_KEY",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "flask.render_template",
... |
25071838592 | from datetime import datetime
from sqlalchemy import func, select
from sqlalchemy.orm import SessionTransaction
from app.infra.constants import InventoryOperation
from app.infra import models
async def increase_inventory(
product_id: int,
*,
quantity: int,
transaction: SessionTransaction,
) -> models.InventoryDB:
"""Increase inventory."""
inventory = models.InventoryDB(
product_id=product_id,
quantity=quantity,
operation=InventoryOperation.INCREASE,
)
transaction.session.add(inventory)
return inventory
async def total_inventory(
product_id: int,
*,
transaction: SessionTransaction,
) -> int:
"""Get total inventory by product_id."""
products_query = select(func.sum(models.InventoryDB.quantity)).where(
models.InventoryDB.product_id == product_id,
)
products = await transaction.session.execute(products_query)
total = products.fetchone()
return total[0]
async def decrease_inventory(
product_id: int,
*,
quantity: int,
order_id: int,
transaction: SessionTransaction,
) -> models.InventoryDB:
"""Decrease product in stock."""
inventory = models.InventoryDB(
product_id=product_id,
quantity=-quantity,
operation=InventoryOperation.DECREASE.value,
order_id=order_id,
created_at=datetime.now(),
)
transaction.session.add(inventory)
await transaction.session.flush()
return inventory
| jonatasoli/fast-ecommerce-back | app/inventory/repository.py | repository.py | py | 1,480 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "sqlalchemy.orm.SessionTransaction",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "app.infra.models.InventoryDB",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "app.infra.models",
"line_number": 15,
"usage_type": "name"
},
{
"a... |
18379371372 | import os, shutil, tkinter.filedialog, tqdm, hashlib
#Can be used to search for bitwise identical files and separate them from the main set
src_dir = tkinter.filedialog.askdirectory()
temp_dir = os.path.join(src_dir, 'Duplicates')
os.makedirs(temp_dir, exist_ok=True)
map = set()
def hash(file):
with open(file, "rb") as f:
data = f.read()
hash_object = hashlib.sha256(data)
return hash_object.hexdigest()
for file in tqdm.tqdm(os.listdir(src_dir), smoothing=0, desc='Проверка'):
if '.mp3' in file:
src_file = os.path.join(src_dir, file)
sum = hash(src_file)
if sum in map:
dst_file = os.path.join(temp_dir, file)
shutil.move(src_file, dst_file)
else:
map.add(sum)
| Genos-Noctua/Scripts | Duplicates.py | Duplicates.py | py | 793 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tkinter.filedialog.filedialog.askdirectory",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog.filedialog",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "tkinter.filedialog",
"line_number": 3,
"usage_type": "name"
... |
14376982322 | import warnings
from argparse import ArgumentParser
from os.path import join
import joblib
import numpy
from optuna import create_study
from sklearn.impute import SimpleImputer
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import tensorflow as tf
# Local packages
try:
import RARinterpret
except ModuleNotFoundError:
import sys
sys.path.append("../")
import RARinterpret
# Argument parsing
parser = ArgumentParser()
parser.add_argument("--features", type=str)
parser.add_argument("--target", type=str, choices=["Vobs", "gobs"])
parser.add_argument("--n_trials", type=int)
parser.add_argument("--n_splits", type=int)
parser.add_argument("--test_size", type=float)
parser.add_argument("--epochs", type=int)
parser.add_argument("--patience", type=int)
parser.add_argument("--seed", type=int, default=42)
args = parser.parse_args()
features = RARinterpret.parse_features(args.features)
# Set up paths
dumpdir = "/mnt/extraspace/rstiskalek/rar/nn"
fout = join("../results/hyper", "{}_{}_{}.p".format("NN", args.target,
args.features))
# Load up all data
frame = RARinterpret.RARFrame()
test_masks = RARinterpret.make_test_masks(frame["index"], args.n_splits,
test_size=args.test_size,
random_state=args.seed)
X_, y, features = frame.make_Xy(target=args.target, features=features,
append_variances=True, dtype=numpy.float32)
def reg_from_trial(trial, X):
width = trial.suggest_int("width", 4, 128)
dropout_rate = trial.suggest_float("dropout_rate", 0.001, 0.1)
schedule = tf.keras.optimizers.schedules.CosineDecayRestarts(
initial_learning_rate=0.005, first_decay_steps=500, alpha=1e-3)
opt = tf.keras.optimizers.Adam(learning_rate=schedule, amsgrad=True)
return RARinterpret.PLModel.from_hyperparams(X, layers=[width], opt=opt,
dropout_rate=dropout_rate,)
def objective(trial):
loss = 0.
for n in range(args.n_splits):
train, test = RARinterpret.train_test_from_mask(test_masks[n, :])
# Run the imputer. Train only on train and apply everywhere.
X = numpy.copy(X_)
imputer = SimpleImputer()
imputer.fit(X[train]) # Fit only on train
X = imputer.transform(X)
# Create the regressor and score it
reg = reg_from_trial(trial, X[train])
reg.train(X[train], y[train], epochs=args.epochs,
patience=args.patience, batch_fraction=1/3,
verbose=False)
loss += reg.score(X[test], y[test])
return loss
study = create_study(direction="minimize")
study.optimize(objective, n_trials=args.n_trials)
# Evaluate the average scaled loss
loss = numpy.asanyarray([tr.value for tr in study.trials])
loss *= len(frame) / test_masks.sum()
out = {"trials": study.trials,
"best_params": study.best_params,
"loss": loss,
}
print("Saving to `{}`...".format(fout), flush=True)
joblib.dump(out, fout)
print("All finished!", flush=True)
| Richard-Sti/RARinterpret | scripts/run_nnparam.py | run_nnparam.py | py | 3,156 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "warnings.catch_warnings",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "warnings.simplefilter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.pat... |
16564352960 | import json
import mimetypes
import os
from django.shortcuts import render
from django.core import serializers
from .models import Log
from django.http import HttpResponse
from django.http import JsonResponse
from django.core.serializers.json import DjangoJSONEncoder
from datetime import date, datetime, timedelta
from django.db.models import Count, Q
# This view saves all logs to a file the serves the file
def jsonData(request):
# Writing logs to the file 'logs.json'
logs = Log.objects.all()
with open('logs.json', "w") as out:
logs_json = serializers.serialize("json", logs)
out.write(logs_json)
# sending file to user
file_path = 'logs.json'
file = open(file_path, 'r')
mime_type, _ = mimetypes.guess_type(file_path)
response = HttpResponse(file, content_type=mime_type)
response['Content-Disposition'] = "attachment; filename=%s" % file_path
size = os.path.getsize(file_path)
response['Content-Length'] = size
return response
# This view serves the homepage
def index(request):
# number of time a user visits the homepage
num_visits = request.session.get('num_visits', 0)
request.session['num_visits'] = num_visits + 1
context = {
'num_visits': num_visits
}
return render(request, 'homepage.html', context)
def dashboard(request):
total_requests = Log.objects.all().count()
total_anonymous_requests = Log.objects.filter(visited_by='').count()
total_signed_in_requests = total_requests - total_anonymous_requests
total_signed_in_users = Log.objects.values('visited_by').distinct().count() - 1 # -1 to discard anonymous users
# if there are no authenticated users, total_signed_in_users will be -1
if total_signed_in_users == -1:
total_signed_in_users = 0
total_requests_today = Log.objects.filter(timestamp__contains=date.today()).count()
todays_date = datetime.today()
week_before_date = datetime.today() - timedelta(days=7)
# get number of requests of last 7 days
total_requests_in_previous_week = Log.objects.filter(Q(timestamp__gte=week_before_date)&Q(timestamp__lte=todays_date)).count()
# get diffents countries stored in database
countries = Log.objects.values('location_country').distinct()
context = {
'total_requests': total_requests,
'total_signed_in_requests': total_signed_in_requests,
'total_anonymous_requests': total_anonymous_requests,
'todays_date': todays_date.strftime("%Y-%m-%d"),
'total_signed_in_users': total_signed_in_users,
'total_requests_today': total_requests_today,
'week_before_date': week_before_date.strftime("%Y-%m-%d"),
'total_requests_in_previous_week': total_requests_in_previous_week,
'countries': countries,
}
return render(request, 'dashboard.html', context)
# this view serves the data required to plot graph on dashboard
def graphData(request):
# Getting data
obj = Log.objects.extra({'timestamp' : "date(timestamp)"}).values('timestamp').annotate(total=Count('id'))
data = json.dumps(list(obj), cls=DjangoJSONEncoder) # converting data to json
return JsonResponse(data, safe=False) # sending data
# this view returns the number of requests of particular date
def requestsOnDate(request):
# Getting data
requests_on_date = Log.objects.filter(timestamp__contains=request.GET['date']).count()
return JsonResponse({'requests_on_date': requests_on_date }, safe=False) # sending data
# this view returns the number of requests between two dates
def requestsBetweenDates(request):
from_date = request.GET['from_date']
to_date = request.GET['to_date']
# Getting data
requests_between_dates = Log.objects.filter(Q(timestamp__gte=from_date)&Q(timestamp__lte=to_date)).count()
return JsonResponse({'requests_between_dates': requests_between_dates }, safe=False) # sending data
# this view reuturns the number of requests came from different countries
def requestsFromCountry(request):
country = request.GET['country']
# Getting data
requests_from_countries = Log.objects.filter(location_country=country).count()
return JsonResponse({'requests_from_countries': requests_from_countries}, safe=False) # sending data | arishrehmankhan/visitor_logger | logger/views.py | views.py | py | 4,303 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "models.Log.objects.all",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.Log.objects",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "models.Log",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.core... |
42643440373 | import flask
from flask import request, jsonify
from config import config
import psycopg2
app = flask.Flask(__name__)
app.config["DEBUG"] = True
# Adds data for our catalogue in the form of a list of dictionaries
@app.route("/add_details", methods=["GET", "POST"])
def add_details_page():
if request.method == "POST":
place_details = request.form["name"]
place_details_cuisine = request.form["cuisine"]
place_details_address = request.form["address"]
place_details_price_range = request.form["price_range"]
place_details_webpage = request.form["webpage"]
place_details_opening_times = request.form["opening_times"]
insert_into_db(place_details, place_details_cuisine, place_details_address, place_details_price_range, place_details_webpage, place_details_opening_times)
flash("Added details " + str(place_details) + " to our DB, thanks for your input!")
return render_template("add_details.html")
return render_template("add_details.html")
places = [
{'id': 0,
'name': 'Al Bab Mansour/Cafe Atlas',
'cuisine': 'Morrocan',
'address': 'St Nicholas Market, Bristol, BS1 1JQ',
'price range': '£-££',
'webpage': '',
'opening times': 'Mon–Sat: 12:00 – 16:00 (Closed Sunday)'},
{'id': 1,
'name': 'Asado',
'cuisine': 'Burgers',
'address': '90 Colston Street, Bristol, BS1 5BB',
'price range': '££-£££',
'webpage': 'http://www.asadobristol.com/',
'opening times': 'Sun: 09:00 – 23:00, Tue–Sat: 12:00 – 23:00 (Closed Monday)'},
{'id': 2,
'name': 'Beerd',
'cuisine': 'Pizza',
'address': '157-159 St Michaels Hill, Cotham, Bristol, BS2 8DB',
'price range': '££-£££',
'webpage': 'https://beerdbristol.com/',
'opening times': 'Sun: 15:00 – 22:00, Mon–Thur: 16:00 – 22:00, Fri & Sat: 12:00 – 22:00'},
{'id': 3,
'name': 'Beirut Mezze',
'cuisine': 'Lebanese / Halal',
'address': '13a Small Street, Bristol, BS1 1DE',
'price range': '££-£££',
'webpage': 'http://www.beirutmezze.com/',
'opening times': 'Sun: 17:30 – 22:45, Mon–Thur: 17:30 – 23:00, Fri & Sat: 17:00 – 23:00'},
{'id': 4,
'name': "Bertha's Pizza",
'cuisine': 'Pizza',
'address': 'The Old Gaol Stables, Cumberland Road, Bristol, BS1 6WW',
'price range': '££-£££',
'webpage': 'http://berthas.co.uk/bookings/?LMCL=i8_eeS',
'opening times': 'Wed & Thurs: 17:00 – 21:00, Fri & Sat: 11:30 – 14:00 and 17:00 – 22:00, Sun: 11:30 – 16:00 (Closed Monday & Tuesday)'},
{'id': 5,
'name': 'Bomboloni',
'cuisine': 'Italian',
'address': '225 Gloucester Road, Bishopston, Bristol, BS7 8NR',
'price range': '££-£££',
'webpage': 'https://bomboloni.net/',
'opening times': 'Tue–Sat: 10:00 – 22:00 (Closed Sunday and Monday)'},
{'id': 6,
'name': 'The Burger Joint',
'cuisine': 'Burgers',
'address': '83 Whiteladies Road, Clifton, Bristol, BS8 2NT & 240 North Street, Bedminster, Bristol, BS3 1JD',
'price range': '£££-££££',
'webpage': 'https://www.theburgerjoint.co.uk/',
'opening times': 'Sun–Tue: 12:00 – 22:00, Wed & Thurs: 12:00 – 22:30, Fri & Sat: 12:00 – 23:00'},
{'id': 7,
'name': 'Carribean Wrap',
'cuisine': 'Carribean',
'address': 'St Nicholas Market, Bristol, BS1 1JQ',
'price range': '£-££',
'webpage': 'https://www.facebook.com/Caribbean-Wrap-Bristol-577537682267740/',
'opening times': 'Mon–Sat: 12:00 – 17:00 (Closed Sunday)'},
{'id': 8,
'name': 'Chilli Daddy',
'cuisine': 'Street Food',
'address': '45-47 Baldwin Street, Bristol, BS1 1RA',
'price range': '£-££',
'webpage': 'https://www.chillidaddy.com/',
'opening times': 'Sun-Thur: 11:00 – 21:00, Fri & Sat: 11:00 – 22:00'},
{'id': 9,
'name': 'Eat A Pitta',
'cuisine': 'Mediterranean / Vegetarian',
'address': '1-3 Glass Arcade Street, Bristol, BS1 1LJ',
'price range': '£-££',
'webpage': 'https://www.eatapitta.co.uk/',
'opening times': 'Sun: 11:00 – 17:30, Mon–Sat: 11:00 – 20:00'},
{'id': 10,
'name': "Edna's Kitchen",
'cuisine': 'Cafe',
'address': 'Castle Park, Wine Street, Bristol, BS1 2DN',
'price range': '£-££',
'webpage': 'www.ednas-kitchen.com',
'opening times': 'Mon-Sun: 11:00 – 17:00'},
{'id': 11,
'name': 'Falafel King',
'cuisine': 'Mediterranean / Vegetarian',
'address': '6 Cotham Hill, Redland, Bristol, BS6 6LF',
'price range': '£-££',
'webpage': 'https://www.falafelkingbristol.com/',
'opening times': 'Sun: 11:00 – 19:30, Mon–Sat: 10:30 – 22:30'},
{'id': 12,
'name': 'Fishminster',
'cuisine': 'Fish & Chips',
'address': '267 North Street, Bedminster, Bristol, BS3 1JN',
'price range': '£-££',
'webpage': 'https://fishminster.co.uk/',
'opening times': 'Sun: 17:00 – 22:00, Mon–Wed: 11:30 – 14:00 and 17:00 – 22:30, Thu–Sat: 11:30 – 22:30'},
{'id': 13,
'name': 'Harbour and Browns',
'cuisine': 'International',
'address': 'Unit 13, Cargo 2, Museum Street Opposite the M Shed, Bristol, BS1 6ZA',
'price range': '££-£££',
'webpage': 'https://harbourandbrowns.com/',
'opening times': 'Sun: 12:00 – 16:00, Tue & Wed: 18:00 – 23:00, Thur & Fri: 12:00 – 23:00, Sat: 10:00 – 23:00 (Closed Monday)'},
{'id': 14,
'name': 'Matina',
'cuisine': 'Middle Eastern',
'address': 'St Nicholas Market, Bristol, BS1 1JQ',
'price range': '£-££',
'webpage': 'https://www.facebook.com/Matina-Middle-Eastern-1610754745830638/',
'opening times': 'Mon–Sat: 11:00 – 17:00 (Closed Sunday)'},
{'id': 15,
'name': 'Pickle Bristol',
'cuisine': 'Cafe',
'address': 'Underfall Yard, Hotwells, Bristol, BS1 6XG',
'price range': '£-££',
'webpage': 'https://en-gb.facebook.com/picklebristol/',
'opening times': 'Tue–Fri: 09:00 – 17:00, Sat & Sun: 09:00 – 18:00 (Closed Monday)'},
{'id': 16,
'name': 'Pie Minister',
'cuisine': 'British',
'address': '7 Broad Quay, Bristol, BS1 4DA',
'price range': '££-£££',
'webpage': 'https://pieminister.co.uk/restaurants/broadquay/',
'opening times': 'Sun: 12:00 – 21:00, Mon–Sat: 12:00 – 22:00'},
{'id': 17,
'name': 'Rice & Things',
'cuisine': 'Carribean',
'address': '120 Cheltenham Road, Bristol, BS6 5RW',
'price range': '££-£££',
'webpage': 'https://riceandthings.co.uk/',
'opening times': 'Sun: 11:00 – 20:00, Mon-Fri: 12:00 – 22:00, Sat: 12:00 – 23:00'},
{'id': 18,
'name': 'Rollin Vietnamese',
'cuisine': 'Vietnamese',
'address': '23-25 The Arcade, Broadmead, Bristol, BS1 3JD',
'price range': '£-££',
'webpage': 'https://www.facebook.com/rollin.vietnamese/',
'opening times': 'Mon–Sun: 10:00 – 19:00'},
{'id': 19,
'name': 'The Pickled Brisket',
'cuisine': 'Street Food',
'address': 'Cargo 2, Wapping Wharf, Bristol, BS1 6WE',
'price range': '££-£££',
'webpage': 'https://thepickledbrisket.co.uk/',
'opening times': 'Tue & Wed: 12:00 – 15:00, Thurs: 12:00 – 16:00, Fri & Sat: 12:00 – 18:00, Sun: 12:00 – 16:00 (Closed Monday)'},
{'id': 20,
'name': 'The Real Greek',
'cuisine': 'Greek',
'address': '84a Glass House, Cabot Circus, Bristol, BS1 3BX',
'price range': '££-£££',
'webpage': 'https://www.therealgreek.com/restaurants/bristol/',
'opening times': 'Sun: 12:00 – 20:00, Mon-Sat: 12:00 – 21:00'},
{'id': 21,
'name': 'The Rose of Denmark',
'cuisine': 'British',
'address': '6 Dowry Place, Hotwells, Bristol, BS8 4QL',
'price range': '££-£££',
'webpage': 'https://www.facebook.com/roseofdenmarkbristol/',
'opening times': 'Mon-Sun: 12:00 - 23:00'},
{'id': 22,
'name': 'The Woolly Cactus',
'cuisine': 'Mexican',
'address': 'The Keg Store, 1 Bath Street, Redcliffe, Bristol, BS1 6HL',
'price range': '£-££',
'webpage': 'www.thewoollycactus.co.uk',
'opening times': 'Mon–Fri: 11:00 – 15:00 (Closed Saturday & Sunday)'},
{'id': 23,
'name': 'Tuk Tuck',
'cuisine': 'Japanese / Asian / Korean',
'address': '5 St Stephens Street, Bristol, BS1 1EE',
'price range': '££-£££',
'webpage': 'https://www.facebook.com/TukTuck-737841626257740/',
'opening times': 'Sun: 15:00 – 22:00, Mon-Thur: 16:00 – 22:00, Fri & Sat: 12:00 – 22:00'}
]
def connect():
""" Connect to the PostgreSQL database server """
conn = None
try:
# read connection parameters
params = config()
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
# create a cursor
cur = conn.cursor()
# execute a statement
print('PostgreSQL database version:')
cur.execute('SELECT version()')
# display the PostgreSQL database server version
db_version = cur.fetchone()
print(db_version)
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
if __name__ == '__main__':
connect()
# def dict_factory(cursor, row):
# d = {}
# for idx, col in enumerate(cursor.description):
# d[col[0]] = row[idx]
# return d
# @app.route('/', methods=['GET'])
# def home():
# return "<h1>Bristol Fodder</h1><p>This site is a prototype API for places to eat in Bristol</p>"
# @app.route('/index', methods=['GET'])
# def index():
# return "<h1>Index Page</h1><p>Reserved Index Page</p>"
# # A route to return all of the available entries in our catalog.
# @app.route('/api/v1/entries/places/all', methods=['GET'])
# def api_all():
# conn = sqlite3.connect('places.db')
# conn.row_factory = dict_factory
# cur = conn.cursor()
# all_places = cur.execute('SELECT * FROM places;').fetchall()
# return jsonify(all_places)
# @app.errorhandler(404)
# def page_not_found(e):
# return "<h1>404</h1><p>The entry could not be found.</p>", 404
# @app.route('/api/v1/entries/places', methods=['GET'])
# def api_filter():
# query_parameters = request.args
# id = query_parameters.get('id')
# name = query_parameters.get('name')
# cuisine = query_parameters.get('cuisine')
# address = query_parameters.get('address')
# price_range = query_parameters.get('price_range')
# webpage = query_parameters.get('webpage')
# opening_times = query_parameters.get('opening_times')
# query = "SELECT * FROM places WHERE"
# to_filter = []
# if id:
# query += ' id=? AND'
# to_filter.append(id)
# if name:
# query += ' name=? AND'
# to_filter.append(name)
# if cuisine:
# query += ' cuisine=? AND'
# to.filter.append(cuisine)
# if address:
# query += ' address=? AND'
# to_filter.append(address)
# if price_range:
# query += ' price_range=? AND'
# to_filter.append(price_range)
# if webpage:
# query += ' webpage=? AND'
# to_filter.append(webpage)
# if opening_times:
# query += ' opening_times=? AND'
# to_filter.append(opening_times)V
# if not (id or name or cuisine or address or price_range or webpage or opening_times):
# return page_not_found(404)
# query = query[:-4] + ';'
# conn = sqlite3.connect('places.db')
# conn.row_factory = dict_factory
# cur = conn.cursor()
# results = cur.execute(query, to_filter).fetchall()
# return jsonify(results)
# app.run()
conn = psycopg2.connect(dsn)
cur = conn.cursor()
cur.execute(sql, (value1,value2))
| aerodigi/bristolunch | app/api.py | api.py | py | 12,119 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "flask.request.form... |
41612500645 | import logging
from typing import Optional
from pedantic import pedantic_class
from src.models.running_token import RunningToken
from src.models.text import Text
from src.models.token_state_condition import \
TokenStateCondition
from src.models.token_state_modification import \
TokenStateModification
@pedantic_class
class TokenStateRule:
def __init__(self, condition: Optional[TokenStateCondition] = None,
modification: Optional[TokenStateModification] = None,
text: Optional[Text] = None) -> None:
self.text = text
self.condition = condition
self.modification = modification
def _apply_modifications(self, token: RunningToken) -> RunningToken:
if self.modification is not None:
self.modification.change_token(token=token)
return token
def check_and_modify(self, token: RunningToken) -> RunningToken:
logging.debug(f'Checking TSRule: {self}')
if self.condition is None or self.condition.check_condition(token=token):
token = self._apply_modifications(token=token)
else:
logging.debug(f'Rule not meet! Token: {token}')
return token
def __str__(self) -> str:
return f'TokenStateRule:[Text:{self.text}' \
f' Conditions: {self.condition}' \
f' Modifications: {self.modification}]'
def __repr__(self) -> str:
return self.__str__()
| rathaustreppe/bpmn-analyser | src/models/token_state_rule.py | token_state_rule.py | py | 1,452 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "typing.Optional",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "src.models.token_state_condition.TokenStateCondition",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 17,
"usage_type": "name"
},
{
... |
19942705134 | from rest_framework import generics, status
from rest_framework.response import Response
from .serializers import (
PostSerializer,
)
from rest_framework import (
status,
viewsets,
filters,
mixins,
generics,
)
from .models import (
Post,
)
from rest_framework.permissions import (
AllowAny,
IsAdminUser,
IsAuthenticated,
)
from .permissions import (
IsActive,
IsEmailVerified,
IsSelf
)
from rest_framework_simplejwt.authentication import JWTAuthentication
class PostViewSet(
viewsets.GenericViewSet,
mixins.CreateModelMixin,
mixins.DestroyModelMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin
):
permission_classes = []
authentication_classes = [
JWTAuthentication,
]
serializer_class = PostSerializer
queryset = Post.objects.all()
filter_backends = [
filters.OrderingFilter,
filters.SearchFilter,
]
"""
'^' Starts-with search.
'=' Exact matches.
'@' Full-text search. (Currently only supported Django's PostgreSQL backend.)
'$' Regex search.
"""
search_fields = [
'$poststatus',
'$postmessage',
]
def get_permissions(self):
print(self.action)
if self.action in ['list']:
self.permission_classes = [(IsAuthenticated) | (IsAuthenticated & IsAdminUser)]
elif self.action in ['update', 'partial_update']:
self.permission_classes = [(IsAuthenticated & IsActive & IsEmailVerified & IsSelf) |
(IsAuthenticated & IsAdminUser)]
elif self.action in ['retrieve']:
self.permission_classes = [(IsAuthenticated) | (IsAuthenticated & IsAdminUser)]
elif self.action in ['create']:
self.permission_classes = [IsAuthenticated & IsActive & IsEmailVerified]
elif self.action in ['destroy']:
self.permission_classes = [(IsAuthenticated & IsActive & IsEmailVerified & IsSelf) |
(IsAuthenticated & IsAdminUser)]
else:
self.permission_classes = [~AllowAny]
return [permission() for permission in self.permission_classes]
def get_queryset(self):
queryset = super().get_queryset()
return queryset
def options(self, request, *args, **kwargs):
options_result = super().options(request, *args, **kwargs)
print(options_result)
return options_result
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(queryset, many=True)
data = serializer.data
offset = int(request.query_params.get("offset", 0))
limit = int(request.query_params.get("limit", 10))
return Response({
"result": data[offset:offset + limit],
"offset": offset,
"limit": limit,
"count": len(data)
})
def retrieve(self, request, *args, **kwargs):
retrieve_result = super().retrieve(request, *args, **kwargs)
return retrieve_result
def create(self, request, *args, **kwargs):
mutable = request.POST._mutable
if not mutable:
request.POST._mutable = True
if 'is_allowed' not in list(dict(request.data).keys()):
request.data['is_allowed'] = True
request.data['postuserid'] = request.user.id
request.data['postusername'] = request.user.username
create_result = super().create(request, *args, **kwargs)
return create_result
def update(self, request, *args, **kwargs):
update_result = super().update(request, *args, **kwargs)
return update_result
def destroy(self, request, *args, **kwargs):
destroy_result = super().destroy(request, *args, **kwargs)
return destroy_result
class PostsByUserNameViewSet(
viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.RetrieveModelMixin
):
permission_classes = []
authentication_classes = [
JWTAuthentication,
]
serializer_class = PostSerializer
queryset = Post.objects.all()
filter_backends = [
filters.OrderingFilter,
filters.SearchFilter,
]
"""
'^' Starts-with search.
'=' Exact matches.
'@' Full-text search. (Currently only supported Django's PostgreSQL backend.)
'$' Regex search.
"""
search_fields = [
'$poststatus',
'$postmessage',
]
def get_permissions(self):
print(self.action)
if self.action in ['list']:
self.permission_classes = [(IsAuthenticated) | (IsAuthenticated & IsAdminUser)]
elif self.action in ['retrieve']:
self.permission_classes = [(IsAuthenticated) | (IsAuthenticated & IsAdminUser)]
else:
self.permission_classes = [~AllowAny]
return [permission() for permission in self.permission_classes]
def get_queryset(self):
queryset = super().get_queryset()
return queryset
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset().filter(postuserid=request.user))
serializer = self.get_serializer(queryset, many=True)
data = serializer.data
offset = int(request.query_params.get("offset", 0))
limit = int(request.query_params.get("limit", 10))
return Response({
"result": data[offset:offset + limit],
"offset": offset,
"limit": limit,
"count": len(data)
})
def retrieve(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset().filter(postusername=kwargs['pk']))
serializer = self.get_serializer(queryset, many=True)
data = serializer.data
offset = int(request.query_params.get("offset", 0))
limit = int(request.query_params.get("limit", 10))
return Response({
"result": data[offset:offset + limit],
"offset": offset,
"limit": limit,
"count": len(data)
})
| sparkai-ca/realestate | post/views.py | views.py | py | 6,181 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.viewsets.GenericViewSet",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "rest_framework.mixins.CreateModelMixin",
"line_number": 31,
"usage_ty... |
31268143302 | from selenium import webdriver
from time import sleep
class InstaBot:
#login in to instagram
def __init__(self,username,pw):
self.username = username
self.pw = pw
self.friends = []
self.driver = webdriver.Chrome()
self.driver.get("https://instagram.com")
sleep(2)
try:
self.driver.find_element_by_xpath("//a[contains(text(), 'Log in')]").click()
sleep(2)
login_field = self.driver.find_element_by_xpath("//input[@name =\"username\"]").send_keys(username)
pw_field = self.driver.find_element_by_xpath("//input[@name =\"password\"]").send_keys(pw)
except:
login_field = self.driver.find_element_by_xpath("//input[@name =\"username\"]").send_keys(username)
pw_field = self.driver.find_element_by_xpath("//input[@name =\"password\"]").send_keys(pw)
self.driver.find_element_by_xpath('//button[@type="submit"]').click()
sleep(4)
self.driver.find_element_by_xpath("//button[contains(text(), 'Not Now')]").click()
#returns the usrs followers
def getFollowers(self, usr):
try:
self.driver.find_element_by_xpath("//a[@href= \"/" + usr + "/followers/\"" + "]").click()
except:
self.driver.get("https://instagram.com/" + usr)
self.driver.find_element_by_xpath("//a[@href= \"/" + usr + "/followers/\"" + "]").click()
sleep(1)
#scrolls all the way down to have all users loaded
scrollBox = self.driver.find_element_by_xpath('/html/body/div[4]/div/div[2]')
lastHeight, sBheight = 0, 1
while(lastHeight != sBheight):
lastHeight = sBheight
sleep(1)
sBheight = self.driver.execute_script("arguments[0].scrollTo(0,arguments[0].scrollHeight);return arguments[0].scrollHeight", scrollBox)
links = scrollBox.find_elements_by_tag_name('a')
followersNames = [name.text for name in links if name.text != '']
self.driver.find_element_by_xpath("/html/body/div[4]/div/div[1]/div/div[2]/button").click()
return followersNames
#returns all the people the usrs follow
def getFollowing(self, usr):
try:
self.driver.find_element_by_xpath("//a[@href= \"/" + usr + "/following/\"" + "]").click()
except:
self.driver.get("https://instagram.com/" + usr)
try:
self.driver.find_element_by_xpath("//a[@href= \"/" + usr + "/following/\"" + "]").click()
except:
return []
sleep(1)
#scrolls all the way down to have all users loaded
scrollBox = self.driver.find_element_by_xpath('/html/body/div[4]/div/div[2]')
lastHeight, sBheight = 0, 1
while(lastHeight != sBheight):
lastHeight = sBheight
sleep(1)
sBheight = self.driver.execute_script("arguments[0].scrollTo(0,arguments[0].scrollHeight);return arguments[0].scrollHeight", scrollBox)
links = scrollBox.find_elements_by_tag_name('a')
followingNames = [name.text for name in links if name.text != '']
self.driver.find_element_by_xpath("/html/body/div[4]/div/div[1]/div/div[2]/button").click()
return followingNames
#find the people that don't follow the usr but the usr follows
def getUnfollowers(self, usr):
self.driver.get("https://instagram.com/" + usr)
sleep(2)
followers = self.getFollowers(usr)
following = self.getFollowing(usr)
unfollowingList = [name for name in following if name not in followers and name != ""]
print(len(unfollowingList))
return print(unfollowingList)
#friends are the people who follow and are followed by the user
def getFriends(self, usr):
self.driver.get("https://instagram.com/" + usr)
sleep(2)
followers = self.getFollowers(usr)
following = self.getFollowing(usr)
friendsList = [name for name in following if name in followers and name != ""]
return friendsList
#find the people that follow the usr and the usr doens't follow - not really tested
def getFansList(self, usr):
self.driver.get("https://instagram.com/" + usr)
sleep(2)
followers = self.getFollowers(usr)
following = self.getFollowing(usr)
fanslist = [name for name in followers if name not in following and name != ""]
print(len(fanslist))
print(fanslist)
return fanslist
#find the people all usrs are followed by
def commonFollowers(self, usrs):
commonList = self.getFollowers(usrs[0])
i=1
for i in range(len(usrs)):
followersList = self.getFollowers(usrs[i])
commonList = [name for name in followersList if name in commonList and name != ""]
print(len(commonList))
print(commonList)
#find the people all usrs follow
def commonFollowing(self, usrs):
commonList = self.getFollowing(usrs[0])
i=1
for i in range(len(usrs)):
followingList = self.getFollowing(usrs[i])
commonList = [name for name in followingList if name in commonList and name != ""]
print(len(commonList))
print(commonList)
#find the people who the usrs follow and are followed by them in common between the users
def commonFriends(self, usrs):
if usrs[0] != self.username:
commonList = self.getFriends(usrs[0])
elif self.friends != []:
commonList = self.friends
else:
self.friends = self.getFriends(self.username)
commonList = self.getFriends(self.username)
i=1
for i in range(i, len(usrs)):
friendsList = self.getFriends(usrs[i])
commonList = [name for name in friendsList if name in commonList and name != ""]
return [len(commonList), commonList]
#make a json with the friends
def makejsonfile(self, usr):
self.friends = self.getFriends(usr)
f = open("instagramFriendsNetwork.json", "w+")
separator = "\", \""
f.write("[\n\t{\n\t\"" + usr + "\" : {\n\t" + "\"friends\": [\"" + separator.join(self.friends) + "\"]\n\t}\n},")
i = 0
for i in range(len(self.friends)):
commonFriendsLen, commonFriends = self.commonFriends([self.username, self.friends[i]])
f.write("{\n\t\"" + self.friends[i] + "\" : {\n\t" + "\"followers\": [\"" + separator.join(commonFriends) + "\"]\n\t, \"commonNumber\": " + str(commonFriendsLen) + "}\n},")
f.write("\n\t}\n]")
f.close()
def makejsonforuser(self, usr):
commonFriendsLen, commonFriends = self.checkusers(usr)
f = open("instagramFriendsNetwork.json", "a")
separator = "\", \""
f.write("{\n\t\"" + usr + "\" : {\n\t" + "\"followers\": [\"" + separator.join(commonFriends) + "\"]\n\t, \"commonNumber\": " + str(commonFriendsLen) + "}\n},")
f.close()
#not interesting - improved version of commonfriends
def checkusers(self, usr):
self.driver.get("https://instagram.com/" + usr)
sleep(2)
#Uses the "in common button"
try:
a = self.driver.find_element_by_xpath("//span[contains(text(), 'Followed by')]")
text = mybot.driver.execute_script('return arguments[0].innerText;', a)
number = int(text[text.index("+") + 2 : text.index("more")-1])
self.driver.find_element_by_xpath("//a[@href = \"/" + usr + "/followers/mutualOnly\"" + "]").click()
sleep(1)
self.driver.find_element_by_xpath("//a[@href = \"/" + usr + "/followers/mutualFirst\"" + "]").click()
followB = self.driver.find_element_by_xpath("//button[contains(text(), 'Follow')]")
self.driver.execute_script('arguments[0].scrollIntoView()', followB)
scrollBox = self.driver.find_element_by_xpath('/html/body/div[4]/div/div[2]')
links = scrollBox.find_elements_by_tag_name('a')
followersNames = [name.text for name in links if name.text != '']
followersNames = followersNames[:number+3]
self.driver.find_element_by_xpath("/html/body/div[4]/div/div[1]/div/div[2]/button").click()
except:
#When the user has less then 4 common following, if for each case 1, 2 or 3
try:
a = self.driver.find_element_by_xpath("//span[contains(text(), 'Followed by')]")
text = mybot.driver.execute_script('return arguments[0].innerText;', a)
except:
text = ''
followersNames = []
if ',' in text:
followersNames.append(text[text.index('by ') + 3:text.index(',')])
print(text[text.index('by ') + 3:text.index(',')])
text = text[text.index(',') + 2:]
elif 'and' in text:
followersNames.append(text[text.index('by ') + 3:text.index(' and')])
text = text[text.index(' and') + 5:]
else:
followersNames.append(text[text.index('by ') + 3:])
if ',' in text:
followersNames.append(text[:text.index(',')])
text = text[text.index(',') + 6:]
followersNames.append(text)
elif text != '':
followersNames.append(text)
print(followersNames)
followers = self.getFollowing(usr)
if self.friends == []:
self.friends = self.getFriends(self.username)
mutualFollowers = [name for name in followersNames if name in followers]
mutualFriends = [name for name in mutualFollowers if name in self.friends]
return [len(mutualFriends), mutualFriends]
#spam likes in the use
def likePhotos(self, usr, sleepTime):
sleep(sleepTime)
self.driver.get("https://instagram.com/" + usr)
sleep(2)
photos = self.driver.find_elements_by_class_name("_9AhH0")
photosPassed = []
i = 0
while i < len(photos) -1 :
photos[i].click()
photosPassed.append(photos[i])
lastPhoto = photos[i]
sleep(1)
likeButton = mybot.driver.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[2]/section[1]/span[1]/button')
svg = likeButton.find_element_by_tag_name("svg")
#Like means the user doens't like the photo
if mybot.driver.execute_script("return arguments[0].getAttribute('aria-label')", svg) == "Like" :
likeButton = mybot.driver.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[2]/section[1]/span[1]/button').click()
#scrolls the photo into view to load new photos
mybot.driver.execute_script('arguments[0].scrollIntoView()', photos[i])
#class for the photos
photos_aux = self.driver.find_elements_by_class_name("_9AhH0")
try:
#when you go down on the user's page the last photos will "unload"
i = photos_aux.index(lastPhoto) + 1
except:
print('error :' + str(i))
photos = photos_aux
self.driver.find_element_by_xpath('/html/body/div[4]/button[1]').click()
return photosPassed
mybot = InstaBot('username', 'password')
| hlferreira/Selenium-Instagram-Bot | InstaScript.py | InstaScript.py | py | 11,626 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
29871462854 | import requests
from bs4 import BeautifulSoup
import pandas as pd
import json
import datetime
from .utils import *
def loadata(name, start=None,end=None,decode="utf-8"):
"""
Load Data
Inputs:
Input | Type | Description
=================================================================================
name |string | You must respect the notation. To see the notation see BVCscrap.notation()
start |string "YYYY-MM-DD" | starting date Must respect the notation
end |string "YYYY-MM-DD" | Must respect the notation
decode |string | type of decoder. default value is utf-8. If it is not working use utf-8-sig
Outputs:
Output | Type | Description
=================================================================================
| pandas.DataFrame (4 columns) |Value Min Max Variation Volume
"""
code=get_code(name)
if name != "MASI" and name != "MSI20":
if start and end:
link="https://medias24.com/content/api?method=getPriceHistory&ISIN="+ code+"&format=json&from="+start +"&to=" + end
else :
start='2011-09-18'
end= str(datetime.datetime.today().date())
link="https://medias24.com/content/api?method=getPriceHistory&ISIN="+ code+"&format=json&from="+start +"&to=" + end
request_data = requests.get(link,headers={'User-Agent': 'Mozilla/5.0'})
soup = BeautifulSoup(request_data.text,features="lxml")
data=get_data(soup,decode)
else:
if name=="MASI" :
link="https://medias24.com/content/api?method=getMasiHistory&periode=10y&format=json"
else:
link="https://medias24.com/content/api?method=getIndexHistory&ISIN=msi20&periode=10y&format=json"
request_data = requests.get(link,headers={'User-Agent': 'Mozilla/5.0'})
soup = BeautifulSoup(request_data.text,features="lxml")
data_all=get_index(soup,decode)
if start and end :
data=produce_data(data_all,start,end)
else:
data=data_all
return data
def loadmany(*args,start=None,end=None,feature="Value",decode="utf-8"):
"""
Load the data of many equities
Inputs:
Input | Type | Description
=================================================================================
*args |strings | You must respect the notation. To see the notation see BVCscrap.notation
start |string "YYYY-MM-DD" | starting date Must respect the notation
end |string "YYYY-MM-DD" | Must respect the notation
feature|string | Variable : Value,Min,Max,Variation or Volume
decode |string | type of decoder. default value is utf-8. If it is not working use utf-8-sig
Outputs:
Output | Type | Description
=================================================================================
| pandas.DataFrame (len(args) columns) | close prices of selected equities
"""
if type(args[0])==list:
args=args[0]
data=pd.DataFrame(columns=args)
for stock in args:
value=loadata(stock,start,end,decode)
data[stock]=value[feature]
return data
def getIntraday(name,decode="utf-8"):
"""
Load intraday data
Inputs:
-Name: stock,index
-decode: default value is "utf-8", if it is not working use : "utf-8-sig"
"""
if name != "MASI" and name != "MSI20":
code=get_code(name)
link="https://medias24.com/content/api?method=getStockIntraday&ISIN="+code+"&format=json"
elif name == "MASI":
link="https://medias24.com/content/api?method=getMarketIntraday&format=json"
else :
link="https://medias24.com/content/api?method=getIndexIntraday&ISIN=msi20&format=json"
request_data = requests.get(link,headers={'User-Agent': 'Mozilla/5.0'})
soup = BeautifulSoup(request_data.text,features="lxml")
data=intradata(soup,decode)
return data
| AmineAndam04/BVCscrap | BVCscrap/load.py | load.py | py | 4,098 | python | en | code | 22 | github-code | 1 | [
{
"api_name": "datetime.datetime.today",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "bs4.Beaut... |
23521147749 | import json
#Opening File
jsonFile = open("./public/json-sample.json")
#load the json data as JSON object
data = json.load(jsonFile)
#Iterating the JSON Object
for i in data['customers']:
print(i)
#Close the file once finished the operation
jsonFile.close()
| jainvikram444/python-basic-examples-3.10.7 | 02-read-json.py | 02-read-json.py | py | 267 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 7,
"usage_type": "call"
}
] |
38961817510 | import os
import requests
with open('domains.txt', 'r') as f:
domains = [line.strip() for line in f]
with open('results.txt', 'w') as f:
for domain in domains:
url = f"http://{domain}"
response = requests.get(url)
if response.status_code == 200:
path = os.path.join(domain, ".svn")
if os.path.exists(path):
f.write(f"{domain}/.svn/\n")
print(f"{domain}/.svn/ directory found.")
else:
print(f"{domain}/.svn/ directory not found.")
else:
print(f"{domain} returned {response.status_code} status code.")
exit()
| agentjacker/svn-finder | sv.py | sv.py | py | 646 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_num... |
15971173081 | from aiida.orm import CalculationFactory, DataFactory
from base import ordered_unique_list
import os
class VaspMaker(object):
'''
simplifies creating a Scf, Nscf or AmnCalculation from scratch interactively or
as a copy or continuation of a previous calculation
further simplifies creating certain often used types of calculations
Most of the required information can be given as keyword arguments to
the constructor or set via properties later on.
The input information is stored in the instance and the calculation
is only built in the :py:meth:`new` method. This also makes it possible
to create a set of similar calculations in an interactive setting very
quickly.
:param structure: A StructureData node or a
(relative) path to either a .cif file or a POSCAR file. Defaults to
a new empty structure node recieved from calc_cls.
:type structure: str or StructureData
:keyword calc_cls: the class that VaspMaker will use when creating
Calculation nodes.
defaults to 'vasp.vasp5'.
if a string is given, it will be passed to aiida's CalculationFactory
:type calc_cls: str or vasp.BasicCalculation subclass
:keyword continue_from: A vasp calculation node with charge_density and
wavefunction output links. VaspMaker will create calculations that
start with those as inputs.
:type continue_from: vasp calculation node
:keyword copy_from: A vasp calculation. It's inputs will be used as
defaults for the created calculations.
:type copy_from: vasp calculation node
:keyword charge_density: chargedensity node from a previously run
calculation
:type charge_density: ChargedensityData
:keyword wavefunctions: wavefunctions node from a previously run
calculation
:type wavefunctions: WavefunData
:keyword array.KpointsData kpoints: kpoints node to use for input
:keyword str paw_family: The name of a PAW family stored in the db
:keyword str paw_map: A dictionary mapping element symbols -> PAW
symbols
:keyword str label: value for the calculation label
:keyword str computer: computer name, defaults to code's if code is
given
:keyword str code: code name, if any Calculations are given, defaults
to their code
:keyword str resources: defaults to copy_from.get_resources() or None
:keyword str queue: defaults to queue from given calculation, if any,
or None
.. py:method:: new()
:returns: an instance of :py:attr:`calc_cls`, initialized with the data
held by the VaspMaker
.. py:method:: add_settings(**kwargs)
Adds keys to the settings (INCAR keywords), if settings is already
stored, makes a copy.
Does not overwrite previously set keywords.
.. py:method:: rewrite_settings(**kwargs)
Same as :py:meth:`add_settings`, but also overwrites keywords.
.. py:attribute:: structure
Used to initialize the created calculations as well as other nodes
(like kpoints).
When changed, can trigger changes in other data nodes.
.. py:attribute:: calc_cls
Vasp calculation class to be used in :py:meth:`new`
.. py:attribute:: computer
.. py:attribute:: code
.. py:attribute:: queue
.. py:attribute:: settings
A readonly shortcut to the contents of the settings node
.. py:attribute:: kpoints
The kpoints node to be used, may be copied to have py:func:set_cell
called.
.. py:attribute:: wavefunction
.. py:attribute:: charge_density
.. py:attribute:: elements
Chemical symbols of the elements contained in py:attr:structure
'''
def __init__(self, *args, **kwargs):
self._init_defaults(*args, **kwargs)
self._calcname = kwargs.get('calc_cls')
if 'continue_from' in kwargs:
self._init_from(kwargs['continue_from'])
if 'copy_from' in kwargs:
self._copy_from(kwargs['copy_from'])
def _init_defaults(self, *args, **kwargs):
calcname = kwargs.get('calc_cls', 'vasp.vasp5')
if isinstance(calcname, (str, unicode)):
self.calc_cls = CalculationFactory(calcname)
else:
self.calc_cls = calcname
self.label = kwargs.get('label', 'unlabeled')
self._computer = kwargs.get('computer')
self._code = kwargs.get('code')
self._settings = kwargs.get('settings', self.calc_cls.new_settings())
self._set_default_structure(kwargs.get('structure'))
self._paw_fam = kwargs.get('paw_family', 'PBE')
self._paw_def = kwargs.get('paw_map')
self._paws = {}
self._set_default_paws()
self._kpoints = kwargs.get('kpoints', self.calc_cls.new_kpoints())
self.kpoints = self._kpoints
self._charge_density = kwargs.get('charge_density', None)
self._wavefunctions = kwargs.get('wavefunctions', None)
self._wannier_settings = kwargs.get('wannier_settings', None)
self._wannier_data = kwargs.get('wannier_data', None)
self._recipe = None
self._queue = kwargs.get('queue')
self._resources = kwargs.get('resources', {})
def _copy_from(self, calc):
ins = calc.get_inputs_dict()
if not self._calcname:
self.calc_cls = calc.__class__
self.label = calc.label + '_copy'
self._computer = calc.get_computer()
self._code = calc.get_code()
self._settings = ins.get('settings')
self._structure = ins.get('structure')
self._paws = {}
for paw in filter(lambda i: 'paw' in i[0], ins.iteritems()):
self._paws[paw[0].replace('paw_', '')] = paw[1]
self._kpoints = ins.get('kpoints')
self._charge_density = ins.get('charge_density')
self._wavefunctions = ins.get('wavefunctions')
self._wannier_settings = ins.get('wannier_settings')
self._wannier_data = ins.get('wannier_data')
self._queue = calc.get_queue_name()
self._resources = calc.get_resources()
def _set_default_structure(self, structure):
if not structure:
self._structure = self.calc_cls.new_structure()
elif isinstance(structure, (str, unicode)):
structure = os.path.abspath(structure)
if os.path.splitext(structure)[1] == '.cif':
self._structure = DataFactory(
'cif').get_or_create(structure)[0]
elif os.path.basename(structure) == 'POSCAR':
from ase.io.vasp import read_vasp
pwd = os.path.abspath(os.curdir)
os.chdir(os.path.dirname(structure))
atoms = read_vasp('POSCAR')
os.chdir(pwd)
self._structure = self.calc_cls.new_structure()
self._structure.set_ase(atoms)
else:
self._structure = structure
def _init_from(self, prev):
out = prev.get_outputs_dict()
self._copy_from(prev)
if 'structure' in out:
self.structure = prev.out.structure
self.rewrite_settings(istart=1, icharg=11)
self.wavefunctions = prev.out.wavefunctions
self.charge_density = prev.out.charge_density
self._wannier_settings = out.get('wannier_settings',
self._wannier_settings)
self._wannier_data = out.get('wannier_data', self.wannier_data)
def new(self):
calc = self.calc_cls()
calc.use_code(self._code)
calc.use_structure(self._structure)
for k in self.elements:
calc.use_paw(self._paws[k], kind=k)
calc.use_settings(self._settings)
calc.use_kpoints(self._kpoints)
calc.set_computer(self._computer)
calc.set_queue_name(self._queue)
if self._charge_density:
calc.use_charge_density(self._charge_density)
if self._wavefunctions:
calc.use_wavefunctions(self._wavefunctions)
if self._wannier_settings:
calc.use_wannier_settings(self._wannier_settings)
if self._wannier_data:
calc.use_wannier_data(self._wannier_data)
calc.label = self.label
calc.set_resources(self._resources)
return calc
# ~ def new_or_stored(self):
# ~ # start building the query
# ~ query_set = self.calc_cls.query()
# ~ # filter for calcs that use the same code
# ~ query_set = query_set.filter(inputs=self._code.pk)
# ~ # settings must be the same
# ~ for calc in query_set:
# ~ if calc.inp.settings.get_dict() != self._settings.get_dict():
# ~ # TODO: check structure.get_ase() / cif
# ~ # TODO: check paws
# ~ # TODO: check kpoints
# ~ # TODO: check WAVECAR / CHGCAR if applicable
# ~ # TODO: check wannier_settings if applicable
@property
def structure(self):
return self._structure
@structure.setter
def structure(self, val):
self._set_default_structure(val)
self._set_default_paws()
if self._kpoints.pk:
self._kpoints = self._kpoints.copy()
self._kpoints.set_cell(self._structure.get_ase().get_cell())
@property
def settings(self):
return self._settings.get_dict()
@property
def kpoints(self):
return self._kpoints
@kpoints.setter
def kpoints(self, kp):
self._kpoints = kp
self._kpoints.set_cell(self._structure.get_ase().get_cell())
def set_kpoints_path(self, value=None, weights=None, **kwargs):
'''
Calls kpoints' set_kpoints_path method with value, automatically adds
weights.
Copies the kpoints node if it's already stored.
'''
if self._kpoints.is_stored:
self.kpoints = self.calc_cls.new_kpoints()
self._kpoints.set_kpoints_path(value=value, **kwargs)
if 'weights' not in kwargs:
kpl = self._kpoints.get_kpoints()
wl = [1. for i in kpl]
self._kpoints.set_kpoints(kpl, weights=wl)
def set_kpoints_mesh(self, *args, **kwargs):
'''
Passes arguments on to kpoints.set_kpoints_mesh, copies if it was
already stored.
'''
if self._kpoints.pk:
self.kpoints = self.calc_cls.new_kpoints()
self._kpoints.set_kpoints_mesh(*args, **kwargs)
def set_kpoints_list(self, kpoints, weights=None, **kwargs):
'''
Passes arguments on to kpoints.set_kpoints, copies if it was already
stored.
'''
import numpy as np
if self._kpoints.pk:
self.kpoints = self.calc_cls.new_kpoints()
if not weights:
weights = np.ones(len(kpoints), dtype=float)
self._kpoints.set_kpoints(kpoints, weights=weights, **kwargs)
@property
def wavefunctions(self):
return self._wavefunctions
@wavefunctions.setter
def wavefunctions(self, val):
self._wavefunctions = val
self.add_settings(istart=1)
@property
def charge_density(self):
return self._charge_density
@charge_density.setter
def charge_density(self, val):
self._charge_density = val
self.add_settings(icharg=11)
@property
def wannier_settings(self):
return self._wannier_settings
@wannier_settings.setter
def wannier_settings(self, val):
self._wannier_settings = val
if 'lwannier90' not in self.settings:
self.add_settings(lwannier90=True)
@property
def wannier_data(self):
return self._wannier_data
@wannier_data.setter
def wannier_data(self, val):
self._wannier_data = val
@property
def code(self):
return self._code
@code.setter
def code(self, val):
self._code = val
self._computer = val.get_computer()
@property
def computer(self):
return self._computer
@computer.setter
def computer(self, val):
self._computer = val
@property
def queue(self):
return self._queue
@queue.setter
def queue(self, val):
self._queue = val
@property
def resources(self):
return self._resources
@resources.setter
def resources(self, val):
if isinstance(val, dict):
self._resources.update(val)
else:
self._resources['num_machines'] = val[0]
self._resources['num_mpiprocs_per_machine'] = val[1]
def add_settings(self, **kwargs):
if self._settings.pk:
self._settings = self._settings.copy()
for k, v in kwargs.iteritems():
if k not in self.settings:
self._settings.update_dict({k: v})
def rewrite_settings(self, **kwargs):
if self._settings_conflict(kwargs):
if self._settings.pk:
self._settings = self._settings.copy()
self._settings.update_dict(kwargs)
def _settings_conflict(self, settings):
conflict = False
for k, v in settings.iteritems():
conflict |= (self.settings.get(k) != v)
return conflict
def _set_default_paws(self):
for k in self.elements:
if k not in self._paws:
if self._paw_def is None:
raise ValueError("The 'paw_map' keyword is required. Pre-defined potential mappings are defined in 'aiida.tools.codespecific.vasp.default_paws'.".format(k))
try:
paw = self.calc_cls.Paw.load_paw(
family=self._paw_fam, symbol=self._paw_def[k])[0]
except KeyError:
raise ValueError("The given 'paw_map' does not contain a mapping for element '{}'".format(k))
self._paws[k] = paw
@property
def elements(self):
return ordered_unique_list(
self._structure.get_ase().get_chemical_symbols())
def pkcmp(self, nodeA, nodeB):
if nodeA.pk < nodeB.pk:
return -1
elif nodeA.pk > nodeB.pk:
return 1
else:
return 0
def verify_settings(self):
if not self._structure:
raise ValueError('need structure,')
magmom = self.settings.get('magmom', [])
lsorb = self.settings.get('lsorbit', False)
lnonc = self.settings.get('lnoncollinear', False)
ok = True
msg = 'Everything ok'
nmag = len(magmom)
nsit = self.n_ions
if lsorb:
if lnonc:
if magmom and not nmag == 3*nsit:
ok = False
msg = 'magmom has wrong dimension'
else:
if magmom and not nmag == nsit:
ok = False
msg = 'magmom has wrong dimension'
else:
if magmom and not nmag == nsit:
ok = False
msg = 'magmom has wrong dimension'
return ok, msg
def check_magmom(self):
magmom = self.settings.get('magmom', [])
st_magmom = self._structure.get_ase().get_initial_magnetic_moments()
lsf = self.noncol and 3 or 1
nio = self.n_ions
s_mm = nio * lsf
mm = len(magmom)
if magmom and st_magmom:
return s_mm == mm
else:
return True
def set_magmom_1(self, val):
magmom = [val]
magmom *= self.n_ions
magmom *= self.noncol and 3 or 1
self.rewrite_settings(magmom=magmom)
@property
def nbands(self):
return self.n_ions * 3 * (self.noncol and 3 or 1)
@property
def n_ions(self):
return self.structure.get_ase().get_number_of_atoms()
@property
def n_elec(self):
res = 0
for k in self._structure.get_ase().get_chemical_symbols():
res += self._paws[k].valence
return res
@property
def noncol(self):
lsorb = self.settings.get('lsorbit', False)
lnonc = self.settings.get('lnoncollinear', False)
return lsorb or lnonc
@property
def icharg(self):
return self.settings.get('icharg', 'default')
@icharg.setter
def icharg(self, value):
if value not in [0, 1, 2, 4, 10, 11, 12]:
raise ValueError('invalid ICHARG value for vasp 5.3.5')
else:
self.settings['icharg'] = value
@property
def recipe(self):
return self._recipe
@recipe.setter
def recipe(self, val):
if self._recipe and self._recipe != val:
raise ValueError('recipe is already set to something else')
self._init_recipe(val)
self._recipe = val
def _init_recipe(self, recipe):
if recipe == 'test_sc':
self._init_recipe_test_sc()
else:
raise ValueError('recipe not recognized')
def _init_recipe_test_sc(self):
self.add_settings(
gga='PE',
gga_compat=False,
ismear=0,
lorbit=11,
lsorbit=True,
sigma=0.05,
)
| greschd/aiida-vasp | aiida/orm.calc.job.vasp/maker.py | maker.py | py | 17,170 | python | en | code | null | github-code | 1 | [
{
"api_name": "aiida.orm.CalculationFactory",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 164,
"usage_type": "attribute"
},
{
"api_name": "os.path.... |
15712467618 | from cs207project.storagemanager.storagemanagerinterface import StorageManagerInterface
from cs207project.timeseries.arraytimeseries import ArrayTimeSeries
import numpy as np
import json
class FileStorageManager(StorageManagerInterface):
"""
This class inherits from the StorageManagerInterface ABC and implements it by putting 2-d numpy
arrays with 64-bit floats for both times and values onto disk.
NOTES
-----
PRE: It supports access to the time series in memory both on get and store calls by managing
a class variable self._id_dict
Examples:
---------
>>> fsm = FileStorageManager()
>>> ts = ArrayTimeSeries(times=[2,6,11,17,25], values=[10,12,22,34,40])
>>> unique_id = fsm.get_unique_id()
>>> fsm.store(unique_id, ts)
array(...
>>> stored_ts = fsm.get(unique_id)
>>> assert stored_ts[2] == 22.0
"""
def __init__(self,dir_path = ''):
"""
The manager maintains a persistent structure in memory and on disk which maps ids to the
appropriate files and keeps track of lengths. It creates an on disk json file to store
an id/length map or, if one already exists, updates the map.
"""
# set the file name for the time series id/length map
file_path = dir_path +'id_length_map.json'
self._dir_path = dir_path # Store optional dir path to store light curve within
# if the map file already exists, open it
try:
id_length_map = open(file_path, 'r')
self._id_dict = json.load(id_length_map)
except IOError:
# if the file does not exist, create a new dict to be saved to disk in the store() method
self._id_dict = dict()
def get_unique_id(self):
"""
Description
-----------
Method used to create a new and unique id.
Parameters
----------
self: Instance of subclass of StorageManagerInterface.
Returns
-------
int : the newly created unique id
"""
# start the ids at 1
i = 1
# loop through the id/length map to determine the next unique id
while True:
# this string represents the name of the file stored on disk for this time series
new_id = 'ts_datafile_' + str(i)
# this is a unique id, return it
if new_id not in self._id_dict:
return new_id
# the id was not unique, increment and continue the loop
i += 1
def store(self, id, t):
"""
Description
-----------
Method used to store a time series using the storage manager.
Parameters
----------
self: Instance of subclass of StorageManagerInterface.
id : int
Used as an identification of a particular time series being stored.
t : SizedContainerTimeSeriesInterface
A time series associated with SizedContainerTimeSeriesInterface
that allows for time series data persistence.
Returns
-------
SizedContainerTimeSeriesInterface
"""
# verify that the provided id is an int and convert it to a string
if isinstance(id, int):
id = str(id)
# convert the time series to 2-d numpy array with 64-bit floats for both times and values
ts = np.vstack((t.times(), t.values())).astype(np.float64)
# save the time series to disk as a binary file in .npy format
np.save(self._dir_path + str(id), ts)
# update the id/length map in memory for this store
self._id_dict[id] = len(t.times())
# update the id/length map on disk for this store
# store the map as a json file
with open(self._dir_path + "id_length_map.json", "w") as outfile:
json.dump(self._id_dict, outfile)
# return this instance of SizedContainerTimeSeriesInterface
return ts
def size(self, id):
"""
Description
-----------
Method used to return the size of a particular time series stored based on the
provided id.
Parameters
----------
self: Instance of subclass of StorageManagerInterface.
id : int
The id of the time series of interest.
Returns
-------
int : the size of the time series in question.
Notes
-----
POST: returns -1 if no time series is found using the provided id
"""
# the id should be a string
if not isinstance(id, str):
id = str(id)
# if there is a time series file for the provided id, return the size
if id in self._id_dict:
return self._id_dict[id]
# no time series file was found, return -1
else:
return -1
def get(self, id):
"""
Description
-----------
Method used to return a particular time series stored based on the
provided id.
Parameters
----------
self: Instance of subclass of StorageManagerInterface.
id : int
The id of the time series of interest.
Returns
-------
SizedContainerTimeSeriesInterface : the time series data requested by id.
"""
# it should be a string
if not isinstance(id, str):
id = str(id)
# if the id is present in the map
if id in self._id_dict:
# load the numpy data from the binary file associated with the provided id
ts = np.load(self._dir_path + id + ".npy")
# return a SizedContainerTimeSeriesInterface instance
return ArrayTimeSeries(ts[0], ts[1])
else:
return None
def get_ids(self):
"""Returns list of ids for previously generated time series"""
return self._id_dict.keys()
"""
Create a single instance of the FileStorageManager class. This is used in
SMTimeSeries for delegation in methods that are implemented to satisfy interface
requirements for SizedContainerTimeSeriesInterface.
"""
FileStorageManagerSingleton = FileStorageManager()
| gitrdone4/cs207project | cs207project/storagemanager/filestoragemanager.py | filestoragemanager.py | py | 5,354 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cs207project.storagemanager.storagemanagerinterface.StorageManagerInterface",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 104,
"usage_type": "ca... |
72325948193 | #!/usr/bin/env python
"""
Created on Thu Aug 25 21:43:45 2016
@author: John Swoboda
"""
import numpy as np
import scipy as sp
import scipy.constants as spconst
import matplotlib.pylab as plt
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("notebook")
from ISRSpectrum import Specinit
def main():
Ne = 1e11
Ti = 1.1e3
Te = 3e3
databloc = np.array([[Ne,Ti],[Ne,Te]])
species = ['O+','e-']
mult=np.arange(1,4)
mult[-1]=50
Cia = np.sqrt(spconst.k*(2.*Ti)/16./spconst.m_p)
#make list of dictionaries
dict1={'name':'AMISR','Fo':449e6,'Fs':50e3,'alpha':70.}
ISS1 = Specinit(centerFrequency = dict1['Fo'], bMag = 0.4e-4, nspec=256, sampfreq=dict1['Fs'],dFlag=True)
(figmplf, axmat) = plt.subplots(1, 1,figsize=(8, 6), facecolor='w')
lines = []
labels = ['Spectrum','Ion Acoustic Frequency']
for ima,imult in enumerate(mult):
k = 2*dict1['Fo']/spconst.c
databloc[1,1]=Ti*imult
Cia = np.sqrt(spconst.k*(imult*Ti+Ti)/(16.*spconst.m_p))
xloc = np.array([-k*Cia,k*Cia])
(omeg,spec)=ISS1.getspecsep(databloc,species,vel = 0.0, alphadeg=dict1['alpha'],rcsflag=False)
if ima==0:
axmat.set_xlabel('f in kHz')
axmat.set_ylabel('Amp')
axmat.set_title('Spectra')
lines.append( axmat.stem(xloc*1e-3, np.ones(2)*np.amax(spec), linefmt='g--', markerfmt='go', basefmt=' ')[0])
lines.append( axmat.plot(omeg*1e-3,spec,label='Output',linewidth=5)[0])
plt.savefig('DifferentTemps.png')
if __name__== '__main__':
main()
| jswoboda/ISRSpectrum | Examples/ionetemp.py | ionetemp.py | py | 1,589 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "seaborn.set_style",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "seaborn.set_context",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
... |
1671208309 | import datetime
from io import BytesIO
import discord
from discord.ext import commands
from dotenv import load_dotenv
from PIL import Image, ImageChops, ImageDraw, ImageFont
load_dotenv()
TOKEN = "OTA3MzAzNjAwMTM5Njc3Nzgw.YYlOUw.n6YYL1TRL3UNWao_fe9Ekakb8IA"
client = commands.Bot(command_prefix="!", help_command=None, intents=discord.Intents().all())
def circle(pfp, size=(215, 215)):
pfp = pfp.resize(size, Image.ANTIALIAS).convert("RGBA")
bigsize = (pfp.size[0] * 3, pfp.size[1] * 3)
mask = Image.new('L', bigsize, 0)
draw = ImageDraw.Draw(mask)
draw.ellipse((0, 0) + bigsize, fill=255)
mask = mask.resize(pfp.size, Image.ANTIALIAS)
mask = ImageChops.darker(mask, pfp.split()[-1])
pfp.putalpha(mask)
return pfp
@client.event
async def on_ready():
print("Bot Activo")
@client.command()
async def contract(ctx):
embed_contract = discord.Embed(title="🧾**CONTRATOS**",
description=":pushpin: TOKEN: https://bscscan.com/token/0xa1e34c4d25de38f0491f8b7b279c254f45e7d8e3\n\n"
":pushpin: NFTs: https://bscscan.com/token/0x9e3a158a357a6403aad454f501d69e86b04a2174",
timestamp=datetime.datetime.utcnow())
await ctx.send(embed=embed_contract)
@client.command()
async def redes(ctx):
embed_redes = discord.Embed(title="**REDES SOCIALES**",
description="Twitter : https://twitter.com/predatorprogame?s=21\n"
"YouTube : https://youtube.com/channel/UCtanZ2hYPOBl1OOmNZkrPXQ\n"
"Telegram Anuncios en español: https://t.me/PredatorNoticias\n"
"Telegram Comunidad en español: https://t.me/predator_pro_es\n"
"Telegram Comunidad en Ingles: https://t.me/predator_pro",
timestamp=datetime.datetime.utcnow())
await ctx.send(embed=embed_redes)
@client.command()
async def download(ctx):
await ctx.send("**PAGINA DE DESCARGA**\n\n"
"https://predator-game.com/dashboard/download/")
@client.command()
async def roi(ctx):
await ctx.send("**ROI DE PREDATORS**\n\n"
"Video Explicativo: https://youtu.be/YOfqa7m4-IQ")
@client.command()
async def web(ctx):
await ctx.send("**Pagina Web**\n"
"https://predator-game.com/")
@client.command()
async def bug(ctx):
embed_bug = discord.Embed(title="🧾**¿HAS REPORTADO UN BUG Y SE PIERDE ENTRE MENSAJES?**",
description="— Hemos creado un formulario de reporte de bugs SOBRE EL JUEGO.\n\n"
"Si has encontrado uno, por favor, utiliza este enlace para hacerlo saber y solucionarlo.\n\n"
":pushpin: https://forms.gle/z3bFXWyTPBYBuutX6",
timestamp=datetime.datetime.utcnow())
await ctx.send(embed=embed_bug)
@client.command()
async def b(ctx):
embed_b = discord.Embed(title=":rocket: BOT PRO :rocket:",
description=":warning: Recuerden que pueden utilizar el comando **!help** para usar nuestro BOT de consultas básicas.:fire:",
timestamp=datetime.datetime.utcnow())
await ctx.send(embed=embed_b)
@client.command(name="help")
async def help(ctx):
embed_help = discord.Embed(title="🧾**COMANDOS**",
description="!web >>> Muestra la página oficial de Predator\n"
"!roi >>> Muestra un video explicativo del ROI\n"
"!download >>> Muestra la página oficial de descarga de Predator\n"
"!redes >>> Muestra las redes oficiales de Predator\n"
"!contract >>> Muestra los contratos de Predator\n"
"!bug>>> Muestra el formulario para reportar los bugs!\n"
"!faq>>> Muestra las preguntas más frecuentes!\n"
"!pro >>> Muestra el precio del token\n"
"!whitepaper >>> Muestra los Whitepapers en Español e Ingles",
timestamp=datetime.datetime.utcnow())
await ctx.send(embed=embed_help)
@client.command()
async def whitepaper(ctx):
embed_whitepaper = discord.Embed(title=":map: WHITEPAPER Y ROADMAP ACTUALIZADO :map:",
description="— Luego de semanas preparando todo y algunos últimos ajustes, ya fue hecho público el nuevo whitepaper y el roadmap actualizado del proyecto.\n\n"
":warning: Los enlaces al whitepaper en inglés y español son los siguientes:\n\n"
":flag_us: WHITEPAPER EN INGLÉS : https://docs.predator-game.com/welcome-to-predator/introduction\n"
":flag_es: WHITEPAPER EN ESPAÑOL: https://spdocs.predator-game.com/bienvenido-a-predator/master\n\n"
"El roadmap nuevo lo pueden ver en la web o en el whitepaper.\n\n"
":rotating_light:Se debe mencionar que el documento tendrá cambios a futuro: más información que no haya sido añadida aún o futuras implementaciones no mencionadas.\n"
"Buen juego para todos :heart:",
timestamp=datetime.datetime.utcnow())
await ctx.send(embed=embed_whitepaper)
@client.event
async def on_member_join(member):
card = Image.open("card.png")
asset = member.avatar_url_as(size=128)
data = BytesIO(await asset.read())
pfp = Image.open(data)
pfp = circle(pfp, (215, 215))
card.paste(pfp, (425, 80))
draw = ImageDraw.Draw(card)
name = str(f"Bienvenido, {member.display_name}!")
relleno = "Te uniste a la comunidad de predator!"
font = ImageFont.truetype("Montserrat-MediumItalic.ttf", 30)
draw.text((375, 330), name, font=font, fill="white")
draw.text((255, 380), relleno, font=font, fill="white")
card.save("profile.png")
await client.get_channel(888133258490040381).send(file=discord.File("profile.png"))
await client.get_channel(888133258490040381).send("Ve al canal **#pick-your-lenguage** para seleccionar tu idioma!\n\n"
"Go to **#pick-your-lenguage** to select a language!")
@client.command()
async def faq(ctx):
embed_faq = discord.Embed(title=" PREDATOR GAME ($PRO)\n :warning:FAQ — Preguntas Frecuentes ",
description=":arrow_forward:¿QUE NECESITO PARA COMENZAR A JUGAR?\n "
"R: Necesitas un NFT y al menos 50 $PRO para hacer las misiones diarias.\n \n "
":arrow_forward:¿DONDE PUEDO REGISTRARME?\n"
"R: https://predator-game.com/market/#/auth/register\n\n"
":arrow_forward:¿CUÁNTO VALEN LOS NFTs?\n"
"R: Están disponibles desde los 0.14 BNB a 1.1 BNB, también eventualmente hay cápsulas por minteo, con un valor de US$100 en $PRO (Cantidad definida por el Oráculo).\n\n"
":arrow_forward:¿COMO ACCEDO AL MARKET?\n"
"R: https://predator-game.com/market/#/\n\n"
":arrow_forward:¿DE CUÁNTO TIEMPO ES EL ROI?\n"
"R: Se estiman unos 30 a 45 días dependiendo de tu inversión inicial.\n\n"
":arrow_forward:¿CUÁNTO SE GANARÁ EN EL JUEGO?\n"
"R: Las ganancias están entre 4 a 15 dólares diarios, variando según cuantos $PRO deposites dentro del juego y de tu desempeño y nivel a la hora de completar misiones.Puedes ganar más, pero tendrás que apostar y arriesgar a perder más dinero. También puedes ganar dinero con el farming, cada 7, 14 o 30 días según los stats de tu NFT.\n\n"
":arrow_forward:¿HAY FARMING?\n"
"R: Sí, el apartado de farming se encuentra en nuestra web.\n\n"
":arrow_forward:¿HABRÁ STAKING?\n"
"R: Sí, se está desarrollando un sistema de staking que beneficia a los top holders.\n\n"
":arrow_forward:¿PODRÉ COMPRAR NFTs CON PRO?\n"
"R: Actualmente puedes comprar NFT en PRO usando el sistema de cápsulas.\n\n"
":arrow_forward:¿PARA QUÉ SE USARÁ EL PRO?\n"
"R: Para compra de NFT, powerups (por partida), armas, objetos de un solo uso, apuestas, torneos, staking, farming.\n\n"
":arrow_forward:¿PARA QUE SE UTILIZARA EL TOR?\n"
"R: Para compra de NFT, skins únicos, power ups (de tiempo), apuestas. Se recibirán recompensas en TOR.\n"
":arrow_forward:¿HAY UN SISTEMA DE ORÁCULO?\n"
"R: Si hay un sistema económico basado en oráculo.\n\n"
":arrow_forward:¿COMO TRANSFORMO MIS PRO A PS (Pro silver)?\n"
"R: Debes apretar en el botón 'swap' dentro del juego e intercambiarlo. Recuerda que 1 $PRO=1000 PS.\n\n "
":arrow_forward:¿QUE ES $TOR?\n"
"R: $TOR sera nuestro token secundario, que se usará principalmente para pagar las recompensas dentro del juego.\n\n"
":arrow_forward:¿VAMOS A RETIRAR EN $TOR O EN $PRO?\n"
"R: Puedes retirar los dos simultáneamente sin ningún problema.\n\n"
":arrow_forward:¿QUE PASARA CON EL TOKEN $PRO?\n"
"R: $PRO será nuestro token de gobernanza, es decir, de inversión y gasto.\n\n"
":arrow_forward:¿EL RETIRO DE MIS TOKENS SERA SOLO UNA VEZ POR MES?\n"
"R: No. Puedes retirar cuando lo desees eligiendo tu tiempo y comision de retiro.\n\n"
":arrow_forward:¿HAY SISTEMA DE BECAS?\n"
"R: Si. El sistema de becas se encuentra en etapa de desarrollo.\n\n"
":arrow_forward:¿EL MODO ESPECTADOR YA ESTA HABILITADO?\n"
"R: El modo espectador se encuentra en testeos internos.\n\n"
":arrow_forward:¿CUANDO ES LA PREVENTA DEL TOKEN $TOR?\n"
"R: La fecha de la preventa sera anunciada en los próximos dias.\n\n"
":arrow_forward:¿DONDE PUEDO ENCONTRAR EL WHITEPAPER?\n"
"R: Puedes encontrarlo en nuestra web o siguiendo el siguiente link: https://docs.predator-game.com/welcome-to-predator/introduction",
timestamp=datetime.datetime.utcnow())
await ctx.send(embed=embed_faq)
client.run(TOKEN)
| Federico-Tahan/probotot | main.py | main.py | py | 12,220 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "disco... |
5102741779 | from __future__ import division
import os, sys, time, random, argparse
from pathlib import Path
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True # please use Pillow 4.0.0 or it may fail for some images
from os import path as osp
import numbers, numpy as np
import init_path
import torch
import dlib
import cv2
from datetime import datetime
import imutils
import models
import datasets
from visualization import draw_image_by_points
from san_vision import transforms
from utils import time_string, time_for_file, get_model_infos
os.environ["CUDA_VISIBLE_DEVICES"]='0'
# define the face detector
DETECTOR = dlib.get_frontal_face_detector()
# define lip region
(LIPFROM, LIPTO) = (48, 68)
# define threshold for lip motion
HIGH_THRESHOLD = 0.65
LOW_THRESHOLD = 0.4
# calculate lip aspect ratio
def lip_aspect_ratio(lip):
# left top to left bottom
A = np.linalg.norm(lip[2] - lip[9]) # 51, 59
# right top to right bottom
B = np.linalg.norm(lip[4] - lip[7]) # 53, 57
# leftest to rightest
C = np.linalg.norm(lip[0] - lip[6]) # 49, 55
lar = (A + B) / (2.0 * C)
return lar
class SAN_Args():
def __init__(self, input_type, input, save_path=None):
self.input_type = input_type
self.input = input
self.save_path = save_path
def execute(self):
(_, tempfilename) = os.path.split(self.input)
(filename, _) = os.path.splitext(tempfilename)
# image input
if self.input_type.upper() == 'IMAGE':
temp_img = cv2.imread(self.input)
temp_img = imutils.resize(temp_img, width=640)
cv2.imwrite('temp.jpg', temp_img)
args = Args(image='temp.jpg')
_, img = evaluate(args)
now = datetime.now()
filename = filename + now.strftime("_%Y%m%d_%H%M%S_") + 'SAN'
cv2.imwrite(self.save_path + filename + '.jpg', img)
cv2.imshow("Image", img)
cv2.waitKey(1000)
cv2.destroyAllWindows()
os.remove('temp.jpg')
# video input
else:
# read original video
VC = cv2.VideoCapture(self.input)
FRAME_RATE = VC.get(cv2.CAP_PROP_FPS)
# define output video
FRAME_WIDTH = int(VC.get(cv2.CAP_PROP_FRAME_WIDTH))
FRAME_HEIGHT = int(VC.get(cv2.CAP_PROP_FRAME_HEIGHT))
now = datetime.now()
filename = filename + now.strftime("_%Y%m%d_%H%M%S_") + 'SAN'
out = cv2.VideoWriter(self.save_path + filename + '.mp4', cv2.VideoWriter_fourcc(*'mp4v'), FRAME_RATE, (FRAME_WIDTH, FRAME_HEIGHT))
f = open(self.save_path + filename + "_LARs.txt","w")
# process video
while (VC.isOpened()):
# read frames
rval, frame = VC.read()
if rval:
frame = imutils.resize(frame, width=640)
cv2.imwrite('frame.jpg', frame)
args = Args(image='frame.jpg')
lar, frame = evaluate(args)
# record lar
f.write(str(lar)+'\n')
# write into output video
frame = cv2.resize(frame, (FRAME_WIDTH, FRAME_HEIGHT), interpolation = cv2.INTER_AREA)
out.write(frame)
# show the frame
cv2.imshow("Frame", frame)
# control imshow lasting time
key = cv2.waitKey(1) & 0xFF
# quit
if key == ord("q"):
break
else:
break
# cleanup
cv2.destroyAllWindows()
os.remove('frame.jpg')
VC.release()
out.release()
f.close()
class Args():
def __init__(self, image, face=None, cpu=False):
self.image = image
self.model = 'SAN/snapshots/SAN_300W_GTB_itn_cpm_3_50_sigma4_128x128x8/checkpoint_49.pth.tar'
self.face = face
self.locate_face()
self.cpu = cpu
def locate_face(self):
if self.face == None:
img = cv2.imread(self.image)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rects = DETECTOR(img_rgb, 0)
if len(rects) == 0:
print('Fail to find a face!')
else:
rect = rects[0]
left = rect.tl_corner().x
top = rect.tl_corner().y
right = rect.br_corner().x
bottom = rect.br_corner().y
self.face = [left, top, right, bottom]
def evaluate(args):
if not args.cpu:
assert torch.cuda.is_available(), 'CUDA is not available.'
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
print ('The image is {:}'.format(args.image))
print ('The model is {:}'.format(args.model))
snapshot = Path(args.model)
assert snapshot.exists(), 'The model path {:} does not exist'
print ('The face bounding box is {:}'.format(args.face))
assert len(args.face) == 4, 'Invalid face input : {:}'.format(args.face)
if args.cpu: snapshot = torch.load(snapshot, map_location='cpu')
else : snapshot = torch.load(snapshot)
mean_fill = tuple( [int(x*255) for x in [0.5, 0.5, 0.5] ] )
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
param = snapshot['args']
eval_transform = transforms.Compose([transforms.PreCrop(param.pre_crop_expand), transforms.TrainScale2WH((param.crop_width, param.crop_height)), transforms.ToTensor(), normalize])
net = models.__dict__[param.arch](param.modelconfig, None)
if not args.cpu: net = net.cuda()
weights = models.remove_module_dict(snapshot['state_dict'])
net.load_state_dict(weights)
dataset = datasets.GeneralDataset(eval_transform, param.sigma, param.downsample, param.heatmap_type, param.dataset_name)
dataset.reset(param.num_pts)
print ('[{:}] prepare the input data'.format(time_string()))
[image, _, _, _, _, _, cropped_size], meta = dataset.prepare_input(args.image, args.face)
print ('[{:}] prepare the input data done'.format(time_string()))
print ('Net : \n{:}'.format(net))
# network forward
with torch.no_grad():
if args.cpu: inputs = image.unsqueeze(0)
else : inputs = image.unsqueeze(0).cuda()
batch_heatmaps, batch_locs, batch_scos, _ = net(inputs)
#print ('input-shape : {:}'.format(inputs.shape))
flops, params = get_model_infos(net, inputs.shape, None)
print ('\nIN-shape : {:}, FLOPs : {:} MB, Params : {:}.'.format(list(inputs.shape), flops, params))
flops, params = get_model_infos(net, None, inputs)
print ('\nIN-shape : {:}, FLOPs : {:} MB, Params : {:}.'.format(list(inputs.shape), flops, params))
print ('[{:}] the network forward done'.format(time_string()))
# obtain the locations on the image in the orignial size
cpu = torch.device('cpu')
np_batch_locs, np_batch_scos, cropped_size = batch_locs.to(cpu).numpy(), batch_scos.to(cpu).numpy(), cropped_size.numpy()
locations, scores = np_batch_locs[0,:-1,:], np.expand_dims(np_batch_scos[0,:-1], -1)
scale_h, scale_w = cropped_size[0] * 1. / inputs.size(-2) , cropped_size[1] * 1. / inputs.size(-1)
locations[:, 0], locations[:, 1] = locations[:, 0] * scale_w + cropped_size[2], locations[:, 1] * scale_h + cropped_size[3]
prediction = np.concatenate((locations, scores), axis=1).transpose(1,0)
shape = []
for i in range(param.num_pts):
point = prediction[:, i]
shape.append([round(point[0]), round(point[1])])
# shape.append([point[0], point[1]])
print ('The coordinate of {:02d}/{:02d}-th points : ({:.1f}, {:.1f}), score = {:.3f}'.format(i, param.num_pts, float(point[0]), float(point[1]), float(point[2])))
shape = np.array(shape)
# locate lip region
lip = shape[LIPFROM:LIPTO]
# get lip aspect ratio
lar = lip_aspect_ratio(lip)
# image = draw_image_by_points(args.image, prediction, 1, (255,0,0), False, False)
# img = cv2.cvtColor(np.asarray(image),cv2.COLOR_RGB2BGR)
img = cv2.imread(args.image)
lip_shape = cv2.convexHull(lip)
cv2.drawContours(img, [lip_shape], -1, (0, 255, 0), 1)
if lar > HIGH_THRESHOLD or lar < LOW_THRESHOLD:
cv2.putText(img, "Lip Motion Detected!", (30, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 1)
print(lar)
print('finish san evaluation on a single image : {:}'.format(args.image))
return lar, img
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate a single image by the trained model', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--image', type=str, help='The evaluation image path.')
parser.add_argument('--model', type=str, help='The snapshot to the saved detector.')
parser.add_argument('--face', nargs='+', type=float, help='The coordinate [x1,y1,x2,y2] of a face')
parser.add_argument('--cpu', action='store_true', help='Use CPU or not.')
args = parser.parse_args()
evaluate(args)
| visionshao/LipMotionDetection | SAN/san_eval.py | san_eval.py | py | 9,313 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "PIL.ImageFile",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "... |
3243298205 | import math
import numpy as np
import torch
def input_matrix_wpn_2d(inH, inW, scale, add_scale=True):
outH, outW = int(scale * inH), int(scale * inW)
scale_int = int(math.ceil(scale))
h_offset = torch.ones(inH*scale_int)
w_offset = torch.ones(inW*scale_int)
mask_h = torch.zeros(inH*scale_int)
mask_w = torch.zeros(inW*scale_int)
# projection coordinate and caculate the offset
# [outH,]
h_project_coord = torch.arange(0, outH, 1).mul(1.0 / scale)
int_h_project_coord = torch.floor(h_project_coord)
# [outH,]
offset_h_coord = h_project_coord - int_h_project_coord # v_h
int_h_project_coord = int_h_project_coord.int() # floor_h
number = 0
temp = -1
for i in range(len(offset_h_coord)):
if int_h_project_coord[i] != temp:
number = int_h_project_coord[i]*scale_int
temp = int_h_project_coord[i]
else:
number += 1
mask_h[number] = 1
h_offset[number] = offset_h_coord[i]
# [outW,]
w_project_coord = torch.arange(0, outW, 1).mul(1.0 / scale)
int_w_project_coord = torch.floor(w_project_coord)
# [outW,]
offset_w_coord = w_project_coord - int_w_project_coord # v_w
int_w_project_coord = int_w_project_coord.int() # floor_w
number = 0
temp = -1
for i in range(len(offset_w_coord)):
if int_w_project_coord[i] != temp:
number = int_w_project_coord[i]*scale_int
temp = int_w_project_coord[i]
else:
number += 1
mask_w[number] = 1
w_offset[number] = offset_w_coord[i]
# [outH, outW]: Every Row is the same
h_offset_matrix = torch.cat(
[h_offset.unsqueeze(-1)] * (inW*scale_int), 1)
# [outH, outW]: Every Column is the same
w_offset_matrix = torch.cat(
[w_offset.unsqueeze(0)] * (inH*scale_int), 0)
mask_h = torch.cat([mask_h.unsqueeze(-1)] * (scale_int * inW),
1).view(-1, scale_int * inW, 1)
mask_w = torch.cat([mask_w.unsqueeze(0)] * (scale_int * inH),
0).view(-1, scale_int * inW, 1)
mask_mat = torch.sum(torch.cat((mask_h, mask_w), 2), 2).view(
scale_int * inH, scale_int * inW)
mask_mat = mask_mat.eq(2)
ref_matrix = torch.cat(
[h_offset_matrix.unsqueeze(0), w_offset_matrix.unsqueeze(0)], 0)
if add_scale:
ref_matrix = torch.cat(
[ref_matrix, torch.ones(1, (inH*scale_int), (inW*scale_int))/scale])
return ref_matrix.unsqueeze(0), mask_mat
def input_matrix_wpn_1d(inH, inW, scale, add_scale=True):
'''
By given the scale and the size of input image, we caculate the
input matrix for the weight prediction network
Args:
inH, inW: the size of the feature maps
scale: is the upsampling times
'''
outH, outW = int(scale * inH), int(scale * inW)
# mask records which pixel is invalid, 1 valid or 0 invalid
# h_offset and w_offset caculate the offset to generate the input matrix
scale_int = int(math.ceil(scale))
# print(f"inH:{inH}, outH:{outH}, scale_int:{scale_int}, ")
# [inH, r, 1]
h_offset = torch.ones(inH, scale_int, 1)
mask_h = torch.zeros(inH, scale_int, 1)
w_offset = torch.ones(1, inW, scale_int)
mask_w = torch.zeros(1, inW, scale_int)
# projection coordinate and caculate the offset
# [outH,]
h_project_coord = torch.arange(0, outH, 1).mul(1.0 / scale)
int_h_project_coord = torch.floor(h_project_coord)
# [outH,]
offset_h_coord = h_project_coord - int_h_project_coord # v_h
int_h_project_coord = int_h_project_coord.int() # floor_h
# [outW,]
w_project_coord = torch.arange(0, outW, 1).mul(1.0 / scale)
int_w_project_coord = torch.floor(w_project_coord)
# [outW,]
offset_w_coord = w_project_coord - int_w_project_coord # v_w
int_w_project_coord = int_w_project_coord.int() # floor_w
# flag for number for current coordinate LR image
flag = 0
number = 0
for i in range(outH):
if int_h_project_coord[i] == number:
h_offset[int_h_project_coord[i], flag, 0] = offset_h_coord[i]
mask_h[int_h_project_coord[i], flag, 0] = 1
flag += 1
else:
h_offset[int_h_project_coord[i], 0, 0] = offset_h_coord[i]
mask_h[int_h_project_coord[i], 0, 0] = 1
number += 1
flag = 1
# print(f"==> h offset shape:{h_offset.shape}")
flag = 0
number = 0
""" Shape:[inW, |r|]
[[1, 1, 1, 0]
[1, 1, 1, 1]
[1, 1, 0, 0]
[1, 1, 1, 1]
[1, 1, 1, 0]...]
"""
for i in range(outW):
if int_w_project_coord[i] == number:
# First line case and the [1:] case for the other lines
w_offset[0, int_w_project_coord[i], flag] = offset_w_coord[i]
mask_w[0, int_w_project_coord[i], flag] = 1
flag += 1
else:
# The first element for every line except the first line
# Second 0 in the next line is actually the flag=0 case
w_offset[0, int_w_project_coord[i], 0] = offset_w_coord[i]
mask_w[0, int_w_project_coord[i], 0] = 1
number += 1
flag = 1
# [inH, |r|, |r|*inW] -> [|r|*inH, |r|*inW, 1]: Every Line is the same
h_offset_coord = torch.cat(
[h_offset] * (scale_int * inW), 2).view(-1, scale_int * inW, 1)
# print(f"h_offset_coord shape:{h_offset_coord.shape}")
# [|r|* inH, inW, |r|] -> [|r|*inH, |r|*inW, 1]: Every Column is the same
w_offset_coord = torch.cat(
[w_offset] * (scale_int * inH), 0).view(-1, scale_int * inW, 1)
####
mask_h = torch.cat([mask_h] * (scale_int * inW),
2).view(-1, scale_int * inW, 1)
mask_w = torch.cat([mask_w] * (scale_int * inH),
0).view(-1, scale_int * inW, 1)
# [|r|* inH, |r|*inW, 2]
pos_mat = torch.cat((h_offset_coord, w_offset_coord), 2)
# print(f"pos_mat shape:{pos_mat.shape}")
mask_mat = torch.sum(torch.cat((mask_h, mask_w), 2), 2).view(
scale_int * inH, scale_int * inW)
mask_mat = mask_mat.eq(2)
i = 1
h, w, _ = pos_mat.size()
while(pos_mat[i][0][0] >= 1e-6 and i < h):
i = i+1
j = 1
# pdb.set_trace()
h, w, _ = pos_mat.size()
while(pos_mat[0][j][1] >= 1e-6 and j < w):
j = j+1
pos_mat_small = pos_mat[0:i, 0:j, :]
# print(f"pos_mat_small shape: {pos_mat_small.shape}")
pos_mat_small = pos_mat_small.contiguous().view(1, -1, 2)
if add_scale:
scale_mat = torch.zeros(1, 1)
scale_mat[0, 0] = 1.0 / scale
# (inH*inW*scale_int**2, 4)
scale_mat = torch.cat([scale_mat] * (pos_mat_small.size(1)), 0)
pos_mat_small = torch.cat(
(scale_mat.view(1, -1, 1), pos_mat_small), 2)
# outH*outW*2 outH=scale_int*inH , outW = scale_int *inW
# print(f"pos_mat_small shape: {pos_mat_small.shape}")
return pos_mat_small, mask_mat
# speed up the model by removing the computation | miracleyoo/Meta-SSSR-Pytorch-Publish | matrix_input.py | matrix_input.py | py | 7,064 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "math.ceil",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 11,
... |
29786504578 | import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
import csv
import pandas as pd
from PIL import Image
#root = '/home/miplab/data/Kaggle_Eyepacs/train/train_full'
#save_path = '/home/miplab/data/Kaggle_Eyepacs/train/train_full_CLAHE'
#annotations_path = '/home/miplab/data/Kaggle_Eyepacs/train/trainLabels.csv'
root = '/home/miplab/data/Kaggle_Eyepacs/EyeQ/EyeQ_dr/original/good_only'
save_path = '/home/miplab/data/Kaggle_Eyepacs/EyeQ/EyeQ_dr/CLAHE/good_only'
#annotations_path = '/home/miplab/data/Kaggle_Eyepacs/test/retinopathy_solution.csv'
#anno_df = pd.read_csv(annotations_path)
#print(anno_df)
#length = len(anno_df.index)
#for x,y,indx in zip(anno_df['image'], anno_df['level'], range(length)):
# x = x+".jpeg"
for path,dirs, files in os.walk(root):
if path.endswith('original'): # skip the root directory
continue
elif path.endswith("filtered"):
continue
elif path.endswith("full"):
continue
elif path.endswith("good_only"):
continue
new_path = save_path + path[64:]
if os.path.exists(new_path):
indx =0
length = len(files)
for x in files:
file_path = os.path.join(path, x)
if os.path.isfile(file_path):
image= cv2.imread(file_path)
image = cv2.resize(image, (600,600))
image_bw = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#declatration of CLAHE
clahe = cv2.createCLAHE(clipLimit = 10)
final_img = clahe.apply(image_bw)
cv2.imwrite(os.path.join(new_path, x), final_img)
indx+=1
print("{} out of {}".format(indx, length))
else:
os.mkdir(new_path)
| JustinZorig/fundus_anomoly_detection | clahe_preprocessing.py | clahe_preprocessing.py | py | 1,778 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.walk",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number":... |
35349989578 | """Import a discussion from the old research database."""
from django.core.management.base import BaseCommand, CommandError
from _comment_database import Story
from group_discussion.models import Topic, Comment
from pony.orm import db_session
from django.contrib.auth.models import User
class Command(BaseCommand):
"""Import a discussion from the old research database."""
help = 'Import a discussion from the old research database'
def add_arguments(self, parser):
"""Import a discussion from the old research database."""
parser.add_argument('url', nargs='+', type=str)
def handle(self, *args, **options):
"""Import a discussion from the old research database."""
url = args[0]
with db_session():
story = Story[url]
if not story:
raise CommandError('Story "%s" does not exist' % url)
# First create the topic
title = "%s (%s)" % (story.title, story.site)
topic = Topic.objects.get_or_create(title=title)[0]
topic.locked = True
topic.save()
def get_user(topic, name):
# First we need to see if there is a real user
try:
user = User.objects.get(username=name)
except User.DoesNotExist:
user = User.objects.create_user(
username=name, email='%s@example.com' % name,
password='password')
return user.topic_user(topic)
# Delete existing posts
for comment in topic.comments.all():
comment.delete()
# Delete existing users
for user in topic.users.all():
user.delete()
# Delete existing groups
for group in topic.groups.all():
group.delete()
# Load in each post
comments = {}
for comment in story.disqus_thread.comments:
# Get the author
author = get_user(topic, comment.author.name)
new_comment = Comment(
text=comment.message,
topic=topic,
author=author,
created_at=comment.created_at
)
comments[comment.id] = new_comment
new_comment.save()
# Load the likes and dislikes
for liker in comment.liked_by:
new_comment.liked_by.add(get_user(topic, liker.name))
for disliker in comment.disliked_by:
new_comment.disliked_by.add(get_user(topic, disliker.name))
# Ensure posts are linked to their parents
for comment in story.disqus_thread.comments:
if comment.parent is not None:
comments[comment.id].parent = comments[comment.parent.id]
comments[comment.id].save()
self.stdout.write('Successfully imported "%s"' % url)
| jscott1989/newscircle | group_discussion/management/commands/import_topic.py | import_topic.py | py | 3,061 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pony.orm.db_session",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "_comment_database.Story",
"line_number": 23,
"usage_type": "name"
},
{
... |
16557961615 | # https://leetcode.com/problems/flood-fill/
# Solved Date: 20.05.12.
import collections
class Solution:
def flood_fill(self, image, sr, sc, newColor):
visit = [[False for _ in range(len(image[0]))] for _ in range(len(image))]
queue = collections.deque()
queue.append((sr, sc, image[sr][sc]))
visit[sr][sc] = True
image[sr][sc] = newColor
while queue:
y, x, color = queue.popleft()
for dy, dx in ((-1, 0), (0, -1), (1, 0), (0, 1)):
new_y, new_x = y + dy, x + dx
if 0 <= new_y < len(image) and 0 <= new_x < len(image[0]):
if not visit[new_y][new_x] and image[new_y][new_x] == color:
visit[new_y][new_x] = True
image[new_y][new_x] = newColor
queue.append((new_y, new_x, color))
return image
def main():
solution = Solution()
image = [[1, 1, 1], [1, 1, 0], [1, 0, 1]]
print(solution.flood_fill(image, 1, 1, 2))
if __name__ == '__main__':
main()
| imn00133/algorithm | LeetCode/May20Challenge/Week2/day11_flood_fill.py | day11_flood_fill.py | py | 1,072 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 10,
"usage_type": "call"
}
] |
28599657749 |
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email']
class ProfileUpdateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProfileUpdateForm, self).__init__(*args, **kwargs)
self.fields['job'].widget.attrs['rows'] = '4'
self.fields['job'].widget.attrs['placeholder'] = 'Tell us about your Job'
self.fields['job'].widget.attrs['id'] = '1'
self.fields['education'].widget.attrs['rows'] = '4'
self.fields['education'].widget.attrs['placeholder'] = 'Tell us about your Education'
self.fields['education'].widget.attrs['id'] = '2'
self.fields['projects'].widget.attrs['rows'] = '4'
self.fields['projects'].widget.attrs['placeholder'] = 'Tell us about your Projects'
self.fields['projects'].widget.attrs['id'] = '3'
self.fields['skills'].widget.attrs['rows'] = '4'
self.fields['skills'].widget.attrs['placeholder'] = 'Tell us about your Skills'
self.fields['skills'].widget.attrs['id'] = '4'
self.fields['internships'].widget.attrs['rows'] = '4'
self.fields['internships'].widget.attrs['placeholder'] = 'Tell us about your Internships'
self.fields['internships'].widget.attrs['id'] = '5'
self.fields['links'].widget.attrs['rows'] = '4'
self.fields['links'].widget.attrs['placeholder'] = 'Drop any Links which contains your work'
self.fields['links'].widget.attrs['id'] = '6'
class Meta:
model = Profile
fields = ['image','job','education','projects','skills','internships','links'] | GovardhanNM/blog-website | users/forms.py | forms.py | py | 1,997 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.auth.forms.UserCreationForm",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.forms.EmailField",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 9,
"usage_type": "name"
},
{
"api_... |
11645330285 | # -*- coding: utf-8 -*-
import scrapy
import sqlite3
from ..items import HvgarticleItem
class HvgarticlesSpider(scrapy.Spider):
name = 'hvgarticles'
allowed_domains = ['hvg.com']
conn = sqlite3.connect(r'C:\Users\Athan\OneDrive\Documents\Dissertation\Python\webscraperorigo\url.db')
curr = conn.cursor()
urls=[]
curr.execute("""SELECT url FROM 'hvgUrl_tb' WHERE
url LIKE "%201801%" OR
url LIKE "%201802%" OR
url LIKE "%201803%" OR
url LIKE "%201804%" OR
url LIKE "%201805%" OR
url LIKE "%2017%" OR
url LIKE "%2016%" OR
url LIKE "%2015%" OR
url LIKE "%201405%" or
url LIKE "%201406%" or
url LIKE "%201407%" or
url LIKE "%201408%" or
url LIKE "%201409%" or
url LIKE "%201410%" or
url LIKE "%201411%" or
url LIKE "%201412%" ORDER BY url;
""")
for row in curr.fetchall():
urlrow = str(row)
urlrow = urlrow.replace('(',"")
urlrow = urlrow.replace(')',"")
urlrow = urlrow.replace("'","")
urlrow = urlrow.replace(',',"")
urls.append(urlrow)
start_urls = urls
def parse(self, response):
items = HvgarticleItem()
text = ['']
connections = ['']
tags = ['']
start_url = ['']
p = response.css(".entry-summary p::text,.entry-summary p a::text, .entry-summary p a em::text,.entry-content p::text,.entry-content p a::text, .entry-content p a em::text").extract()
connection = response.css(".entry-summary p a, .entry-summary p a em,.entry-content p,.entry-content p a, .entry-content p a em").xpath("@href").extract()
tag = response.css(".article-tags .tag::text").extract()
start_url[0] = response.request.url
for paragaph in p:
text[0] += " " + paragaph
for c in connection:
connections[0] += " " + c
for t in tag:
tags[0] += " " + t
items['paragaph'] = text
items['tags'] = tags
items['connections'] = connections
items['start_url'] = start_url
yield items | AJszabo/dissertation | hvgarticle/hvgarticle/spiders/hvgarticles.py | hvgarticles.py | py | 2,330 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scrapy.Spider",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "items.HvgarticleItem",
"line_number": 40,
"usage_type": "call"
}
] |
43312588716 | import sys
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
def get_all_ids_ujz(category="most-popular", page_number="1"):
url = "https://www.youjizz.com/" + category + "/" + page_number + ".html"
headers = {
'Cookie': 'commentPhrase=cllTWHVFd1ZzckYvVTRKd1ZZc1BWQ296clh5RCs0YjAyNTFjZGtQc3pLdz06OkgmihcPsRpzrB6ef53DIQU'
'=; RNLBSERVERID=ded6584 '
}
response = requests.request("GET", url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
a_elements = soup.find_all('a', class_="frame video")
return [a_element['data-video-id'] for a_element in a_elements]
def get_single_movie_ujz(movie_id):
url = "https://www.youjizz.com/videos/-" + str(movie_id) + ".html "
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome('/usr/bin/chromedriver', options=chrome_options)
driver.get(url)
# get the page source
page_source = driver.page_source
driver.close()
# parse the HTML
soup = BeautifulSoup(page_source, "html.parser")
tags = soup.find('meta', {'name': 'keywords'})['content'].split(' , ')
description = soup.find('meta', {'name': 'description'})['content']
try:
video_favorite = soup.find("input", {'id': "checkVideoFavorite"})
views = video_favorite['data-views']
rating = video_favorite['data-rating']
except:
views = 0
rating = 0
scripts = soup.find_all("script")
image_url = "http:" + soup.find('meta', {'property': 'og:image'})['content']
for script in scripts:
if str(script).__contains__('var dataEncodings'):
script = str(script)
data_encodings = script.split('}];')[0]
data_encodings = data_encodings.replace('<script>', '').replace(
'var dataEncodings = ', '').replace('\n', '').replace(' ', '') + "}]"
# data_encodings_str = data_encodings.split('dataEncodings =')[1] + "}]"
# all_movie_data = list(eval(data_encodings_str))
# download_links = [
# {'title': movie_data['name'],
# 'link': ["https:" + movie_data['filename'].replace('\\', '')],
# 'subtitle': '',
# 'quality': movie_data['quality']}
# for movie_data in all_movie_data
# ]
return {'name': soup.find('title').text,
'farsi_name': '+18 کوس کوص کون کیر سکس سکسی پورن سوپر جنده لزبین ',
'description': description,
'views': views,
'url': url,
'rating': rating,
'movie_id': movie_id,
'tags': tags, # type of this field is Array
'image': image_url,
'download_links': data_encodings
}
# if __name__ == '__main__':
# for page_number in range(1, 100):
# all_ids = get_all_ids_ujz(page_number=str(page_number))
# print(all_ids)
# for movie_id in all_ids:
# print(get_single_movie_ujz(movie_id))
| naderjlyr/yt-downloader-back | downloads/view/adult/youjizz.py | youjizz.py | py | 3,353 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.request",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 25,
"usage_type": "call"
},
{
"api_... |
1908173277 | import tkinter as tk
import tkmacosx as tkmac
from tkinter import ttk
from tkinter import simpledialog
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import cube_moves as cm
import time
from db import add_session, display_sessions
def cube_timer(master=None, root=None):
frame1 = tk.Frame(master=master, height=400, width=700, bg='#D3D3D3')
frame1.grid(row=0, column=0, padx=10)
frame2 = tk.Frame(master=master, height=500, width=200, bg='#D3D3D3')
frame2.grid(row=0, column=1, padx=5, pady=5, rowspan=2)
frame3 = tk.Frame(master=master, height=10, width=700, bg='#D3D3D3')
frame3.grid(row=1, column=0, padx=10)
# timer, scramble
tk.Label(master=frame1, bg='#D3D3D3', width=85, height=5).grid(row=0, columnspan=2)
scramble_label = tk.Label(master=frame1, text=' ' * 30, font=('DIN Alternate', 30), bg='#D3D3D3')
scramble_label.grid(row=1, columnspan=2)
clock = tk.Label(master=frame1, text='00:00.00', font=('DIN Alternate', 100), bg='#D3D3D3')
clock.grid(row=2, columnspan=2)
show_scramble = tk.IntVar()
tk.Checkbutton(master=frame1, text='Show Scramble', command=lambda: check(), bg='#D3D3D3').grid(row=3, columnspan=2)
tk.Label(master=frame1, bg='#D3D3D3', height=4).grid(row=4, columnspan=2)
def check():
if show_scramble.get() == 1:
show_scramble.set(0)
scramble_label.config(text='\t' * 6)
elif show_scramble.get() == 0:
show_scramble.set(1)
scramble_label.config(text=cm.scramble_cube()[0])
# creating info box
file_handle = open("./utils/info.txt", 'r')
info1 = tk.Label(master=frame3, justify='left', pady=5, wraplength=400, bg='#D3D3D3')
info2 = tk.Label(master=frame3, justify='left', pady=5, wraplength=400, bg='#D3D3D3')
info1.grid(row=0, column=0)
info2.grid(row=0, column=1)
text = ''
while not file_handle.readline().strip() == '--- CUBE TIMER ---':
pass
else:
while True:
line = file_handle.readline()
if line.strip().startswith('---'):
break
else:
text = text + line
info1.config(text=text)
file_handle.seek(0, 0)
text = ''
while not file_handle.readline().strip() == '--- DETAILS ---':
pass
else:
while True:
line = file_handle.readline()
if line.strip().startswith('---'):
break
else:
text = text + line
info2.config(text=text)
file_handle.close()
# details
tabs = ttk.Notebook(master=frame2, height=450, width=300)
tabs.grid(row=0, column=0)
sessions = [tk.Frame(master=tabs)]
session_times = [[]]
def define_session(session):
session_times.append([])
text_box1 = tk.Text(master=session, height=5, width=10, font=('', 15))
text_box1.grid(row=0, column=0, padx=5)
text_box2 = tk.Text(master=session, height=3, width=20, font=('', 15))
text_box2.grid(row=0, column=1)
times = ''
count = 0
for i in session_times[sessions.index(session)]:
times = times + str(count + 1) + '. ' + str(i) + ' sec' + '\n'
count += 1
text_box1.insert(tk.END, times.rstrip('\n'))
text_box1.see(tk.END)
text_box1.config(spacing1=3, spacing2=3, spacing3=3, state='disabled')
fastest = slowest = average = 0.00
if count != 0:
fastest = min(session_times[sessions.index(session)])
slowest = max(session_times[sessions.index(session)])
average = "{:.2f}".format(sum(session_times[sessions.index(session)]) / count)
details = 'Fastest solve: ' + str(fastest) + ' sec\n' + 'Slowest solve: ' + str(
slowest) + ' sec\n' + 'Average time: ' + str(average)[:] + ' sec'
text_box2.insert(tk.END, details)
text_box2.config(spacing1=3, spacing2=3, spacing3=3, state='disabled')
fig = Figure(figsize=(3, 3), dpi=100)
# fig.subplots_adjust(bottom=1, top=2, left=1, right=2)
plot = fig.add_subplot()
plot.plot(session_times[sessions.index(session)])
canvas = FigureCanvasTkAgg(fig, master=session)
canvas.draw()
canvas.get_tk_widget().grid(row=1, column=0, columnspan=2)
def save():
session_name = tk.simpledialog.askstring("Save Session", 'Enter Session Name:')
add_session(session_name, session_times[sessions.index(session)])
root.grab_set()
def show():
frame1.grid_remove()
frame2.grid_remove()
frame3.grid_remove()
display_sessions(master=master, root=root)
save_button = tkmac.Button(master=session, text='Save Session', command=lambda: save())
save_button.grid(row=2, column=0)
show_button = tkmac.Button(master=session, text='Show Saved Sessions', command=lambda: show())
show_button.grid(row=2, column=1)
tabs.add(sessions[0], text='Session 1')
define_session(sessions[0])
session_add = tk.Frame(master=tabs, height=450, width=200)
tabs.add(session_add, text='+')
tk.Label(master=session_add, text='Session name :').grid(row=0, column=0)
name = tk.Entry(master=session_add)
name.grid(row=0, column=1, pady=20)
add = tk.Button(master=session_add, text='Add Session', command=lambda: add(name.get()))
add.grid(row=1, column=0, columnspan=2, pady=10)
def add(name):
# tabs.hide(len(sessions))
tabs.forget(session_add)
sessions.append(tk.Frame(master=tabs))
if name == '':
tabs.add(sessions[-1], text='Session ' + str(len(sessions)))
else:
tabs.add(sessions[-1], text=name)
tabs.add(session_add, text='+')
define_session(sessions[-1])
timer_control = 0
root.bind('<space>', lambda event: keybind(event))
def next_solve():
if show_scramble.get() == 1:
scramble_label.config(text=cm.scramble_cube()[0])
time_str = clock.cget('text')
if time_str.startswith('00:'):
time_taken = float(time_str[3:])
else:
time_taken = float(time_str[:2]) * 60 + float(time_str[3:])
session_times[tabs.index('current')] += [time_taken]
define_session(sessions[tabs.index('current')])
def keybind(event):
nonlocal timer_control
if timer_control == 0: # reset timer
clock.config(text='00:00.00')
clock.config(fg='red')
timer_control = 1
elif timer_control == 1: # start timer
# scramble_label.config(text='')
clock.config(fg='green')
timer_control = 2
timer()
elif timer_control == 2: # stop timer
clock.config(fg='black')
timer_control = 0
next_solve()
def timer():
nonlocal timer_control
now = time.time()
while timer_control == 2:
time.sleep(0.01)
value = time.time() - now
min = int(value // 60)
sec = round(value - min * 60, 2)
if min < 10:
min = '0' + str(min)
else:
min = str(min)
if sec < 10:
sec = '0' + str(sec)
else:
sec = str(sec)
if int(sec[3:]) < 10:
sec = sec[:3] + '0' + sec[-1]
clock.config(text=min + ':' + sec)
master.update()
if __name__ == '__main__':
master = tk.Tk()
master.configure(bg='#D3D3D3')
# cube_timer(master=master)
cube_timer(master=master, root=master)
master.mainloop()
| gaurav-behera/virtual-rubiks-cube | rubiks_cube/cube_timer.py | cube_timer.py | py | 7,752 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tkinter.Frame",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tkinter.Frame",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tkinter.Frame",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_n... |
5961849769 | import json
from ast import literal_eval
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils.decorators import method_decorator
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.views.generic import TemplateView, View
from django.views.decorators.csrf import csrf_exempt
from django.db.models import Q
from django.core.exceptions import FieldDoesNotExist
from django_extensions.management.commands import show_urls
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from jsonapi import serializers, models
DEF_PAGE_SIZE = 1000
HTTP_BAD_REQUEST_CODE = 400
JSON_POST_ARGS = 'jsonparams'
class JSONResponseMixin(object):
pagination_params = ['page', 'pageSize']
def buildErrorResponse(self, message, code):
err = {"metadata": {
"pagination": {
"pageSize": 0,
"currentPage": 0,
"totalCount": 0,
"totalPages": 0
},
"status": [{
"message": message,
"code": code
}],
"datafiles": []
},
"result": {}
}
return err
def buildResponse(self, results, pagination={"pageSize": 0, "currentPage": 0, "totalCount": 0, "totalPages": 0}, status=[], datafiles=[]):
output = {}
output['result'] = results
output['metadata'] = {}
output['metadata']['pagination'] = pagination
output['metadata']['status'] = status
output['metadata']['datafiles'] = datafiles
return output
def prepareResponse(self, objects, requestDict):
try:
pagesize = int(requestDict.get('pageSize', DEF_PAGE_SIZE))
page = int(requestDict.get('page', 0)) + 1 # BRAPI wants zero page indexing...
except:
return self.buildErrorResponse('Invalid page or pageSize parameter', HTTP_BAD_REQUEST_CODE)
# order is mandatory because of pagination
if self.model and not objects.ordered:
objects = objects.order_by('pk')
paginator = Paginator(objects, pagesize)
try:
pageObjects = paginator.page(page)
except EmptyPage:
# If page is out of range, deliver last page of results.
return self.buildErrorResponse('Empty page was requested: {}'.format(page-1), HTTP_BAD_REQUEST_CODE)
# pageObjects = paginator.page(paginator.num_pages)
pagination = {'pageSize': pagesize,
'currentPage': page-1,
'totalCount': len(objects),
'totalPages': paginator.num_pages
}
# return serialized data
data = []
for obj in pageObjects:
if self.serializer:
data.append(self.serializer(obj).data)
else:
data.append(obj)
return self.buildResponse(results={'data': data}, pagination=pagination)
def sortOrder(self, requestDict, objects):
val = requestDict.get('sortOrder')
if val is not None:
val = val.lower()
if val == 'desc':
objects = objects.reverse()
elif val == 'asc':
pass # by default
else:
raise ValueError('Invalid value for "sortOrder" parameter: {}'.format(val))
return objects
@method_decorator(csrf_exempt, name='dispatch')
class UnsafeTemplateView(View):
pass
# we have to handle different types of requests:
# GET with parameters in URL path e.g., ... brapi/v1/germplasm/id
# GET with parameters encoded as "application/x-www-form-urlencoded"
# POST with parameters encoded as "application/json"
#
# in addition there is a shortcut class GET_detail_response for listing single objects by PK
class GET_response(JSONResponseMixin):
def checkGETparameters(self, requestDict):
return set(requestDict.keys()) - set(self.get_parameters) - set(self.pagination_params)
def get(self, request, *args, **kwargs):
requestDict = self.request.GET
# sanity: fail if there are unwanted parameters
unknownParams = self.checkGETparameters(requestDict)
if unknownParams:
return JsonResponse(self.buildErrorResponse('Invalid query pararameter(s) {}'.format(unknownParams), HTTP_BAD_REQUEST_CODE))
# execute query and make pagination
objects = self.get_objects_GET(requestDict)
# try:
# objects = self.get_objects_GET(requestDict)
# except Exception as e:
# return JsonResponse(self.buildErrorResponse('Data error: {}'.format(str(e)), HTTP_BAD_REQUEST_CODE))
response = self.prepareResponse(objects, requestDict)
return JsonResponse(response)
class POST_JSON_response(JSONResponseMixin):
def checkPOSTparameters(self, requestDict):
return set(requestDict.keys()) - set(self.post_json_parameters) - set(self.pagination_params)
def post(self, request, *args, **kwargs):
try:
requestDict = json.loads(request.body.decode("utf-8"))
except Exception:
return JsonResponse(self.buildErrorResponse('Invalid JSON POST parameters', HTTP_BAD_REQUEST_CODE))
unknownParams = self.checkPOSTparameters(requestDict)
if unknownParams:
return JsonResponse(self.buildErrorResponse('Invalid query pararameter(s) {}'.format(unknownParams), HTTP_BAD_REQUEST_CODE))
# execute query and make pagination
try:
objects = self.get_objects_POST(requestDict)
except Exception as e:
return JsonResponse(self.buildErrorResponse('Data error: {}'.format(str(e)), HTTP_BAD_REQUEST_CODE))
response = self.prepareResponse(objects, requestDict)
return JsonResponse(response)
class GET_URLPARAMS_response(JSONResponseMixin):
def checkGETparameters(self, requestDict):
return set(requestDict.keys()) - set(self.get_parameters) - set(self.pagination_params)
def get(self, request, *args, **kwargs):
requestDict = self.request.GET
# sanity: fail if there are unwanted parameters
unknownParams = self.checkGETparameters(requestDict)
if unknownParams:
return JsonResponse(self.buildErrorResponse('Invalid query pararameter(s) {}'.format(unknownParams), HTTP_BAD_REQUEST_CODE))
# execute query and make pagination
try:
objects = self.get_objects_GET(requestDict, **kwargs)
except Exception as e:
return JsonResponse(self.buildErrorResponse('Data error: {}'.format(str(e)), HTTP_BAD_REQUEST_CODE))
response = self.prepareResponse(objects, requestDict)
return JsonResponse(response)
class GET_detail_response(JSONResponseMixin):
def get(self, request, *args, **kwargs):
requestDict = kwargs
try:
pkval = requestDict.get(self.pk)
obj = self.model.objects.get(pk=pkval)
except self.model.DoesNotExist:
return JsonResponse(self.buildErrorResponse('Invalid object ID', 404))
serializer = self.serializer(obj)
return JsonResponse(self.buildResponse(results=serializer.data))
class Index(TemplateView):
template_name = 'root.html'
def get(self, request):
return render(request, self.template_name)
def post(self, request):
return self.get(request)
class CallSearch(GET_response, UnsafeTemplateView):
model = None
serializer = None
get_parameters = ['datatype']
def get_objects_GET(self, requestDict):
datatype = requestDict.get('datatype')
tmp = [x.split('\t') for x in show_urls.Command().handle(format_style='dense', urlconf='ROOT_URLCONF', no_color=True).split('\n')]
urls = []
for entry in tmp:
if len(entry) < 2:
continue
url = entry[0]
viewclass = entry[1].split('.')[-1]
if url.startswith('/brapi/v1/'):
url = url.replace('/brapi/v1/', '').replace('<', '{').replace('>', '}')
if url:
urls.append((url, globals()[viewclass]))
result = []
for url, klas in urls:
data = {}
data['call'] = url
data['datatypes'] = ['json']
data['methods'] = []
if issubclass(klas, GET_response) or issubclass(klas, GET_URLPARAMS_response) or issubclass(klas, GET_detail_response):
data['methods'].append('GET')
if issubclass(klas, POST_JSON_response):
data['methods'].append('POST')
if datatype is None or datatype in data['datatypes']:
result.append(data)
return result
class GermplasmDetails(GET_detail_response, UnsafeTemplateView):
model = models.Germplasm
serializer = serializers.GermplasmDetailsSerializer
pk = 'germplasmDbId'
class GermplasmSearch(GET_response, POST_JSON_response, UnsafeTemplateView):
model = models.Germplasm
serializer = serializers.GermplasmDetailsSerializer
get_parameters = ['germplasmName', 'germplasmDbId', 'germplasmPUI']
post_json_parameters = ['germplasmPUIs', 'germplasmDbIds', 'germplasmSpecies', 'germplasmGenus', 'germplasmNames', 'accessionNumbers']
def get_objects_GET(self, requestDict):
qdict = {}
for param in self.get_parameters:
if param in requestDict:
qdict[param] = requestDict[param]
return self.model.objects.filter(Q(**qdict))
def get_objects_POST(self, requestDict):
qdict = {}
param2attr = {'germplasmPUIs': 'germplasmPUI',
'germplasmDbIds': 'germplasmDbId',
'germplasmSpecies': 'species',
'germplasmGenus': 'genus',
'germplasmNames': 'name',
'accessionNumbers': 'accessionNumber'}
for p in param2attr:
if p in requestDict:
qdict['{}__in'.format(param2attr[p])] = requestDict[p]
return self.model.objects.filter(Q(**qdict))
class ProgramSearch(POST_JSON_response, UnsafeTemplateView):
model = models.Program
serializer = serializers.ProgramSerializer
post_json_parameters = ['programDbId', 'name', 'abbreviation', 'objective', 'leadPerson']
def get_objects_POST(self, requestDict):
query = Q()
distinct = False
for pn in self.post_json_parameters:
val = requestDict.get(pn)
if val is not None:
query &= Q(**{'{}'.format(pn): val}) # Q(attrName=val)
objects = self.model.objects.filter(query)
if distinct:
objects = objects.distinct()
return self.sortOrder(requestDict, objects)
class ProgramList(GET_response, TemplateView):
model = models.Program
serializer = serializers.ProgramSerializer
get_parameters = ['programName', 'abbreviation']
def get_objects_GET(self, requestDict):
query = Q()
if 'programName' in requestDict:
query &= Q(name=requestDict['programName'])
if 'abbreviation' in requestDict:
query &= Q(abbreviation=requestDict['abbreviation'])
objects = self.model.objects.filter(query)
return objects
class TrialDetails(GET_detail_response, TemplateView):
model = models.Trial
serializer = serializers.TrialDetailsSerializer
pk = 'trialDbId'
class TrialList(GET_response, TemplateView):
model = models.Trial
serializer = serializers.TrialSummarySerializer
get_parameters = ['programDbId', 'locationDbId', 'active', 'sortBy', 'sortOrder']
def get_objects_GET(self, requestDict):
query = Q()
val = requestDict.get('programDbId')
if val is not None:
query &= Q(programDbId=val)
val = requestDict.get('active')
if val is not None:
val = val.lower()
if val not in ['true', 'false']:
raise ValueError('Invalid value for "active" parameter: {}'.format(val))
val = True if val == 'true' else False
query &= Q(active=val)
objects = self.model.objects.filter(query)
# we have to handle most cases manually because the fields are renamed
val = requestDict.get('sortBy')
if val is not None:
if val == 'trialName':
objects = objects.order_by('name')
elif val == 'programName':
objects = objects.order_by('programDbId__name')
elif val == 'studyName':
objects = objects.order_by('study__name')
elif val == 'locationName':
objects = objects.order_by('study__locationDbId__name')
else:
try:
self.model._meta.get_field(val)
objects = objects.order_by(val)
except:
raise ValueError('Invalid value for "sortBy" parameter: {}'.format(val))
return self.sortOrder(requestDict, objects)
class StudyDetails(GET_detail_response, TemplateView):
model = models.Study
serializer = serializers.StudyDetailsSerializer
pk = 'studyDbId'
class StudyList(GET_response, TemplateView):
model = models.Study
serializer = serializers.StudySummarySerializer
get_parameters = []
def get_objects_GET(self, requestDict):
return self.model.objects.all()
class StudySearch(GET_response, POST_JSON_response, UnsafeTemplateView):
model = models.Study
serializer = serializers.StudySummarySerializer
get_parameters = ['trialDbId', 'studyType', 'programDbId', 'locationDbId', 'seasonDbId', 'germplasmDbIds',
'observationVariableDbIds', 'active', 'sortBy', 'sortOrder']
post_json_parameters = ['studyType', 'studyNames', 'studyLocations', 'programNames',
'germplasmDbIds', 'observationVariableDbIds', 'active', 'sortBy', 'sortOrder']
# this is actual ListStudySummaries
def get_objects_GET(self, requestDict):
query = Q()
distinct = False
val = requestDict.get('trialDbId')
if val is not None:
query &= Q(trialDbId__pk=val)
val = requestDict.get('studyType')
if val is not None:
query &= Q(studyType__name=val)
val = requestDict.get('programDbId')
if val is not None:
query &= Q(trialDbId__programDbId__pk=val)
val = requestDict.get('locationDbId')
if val is not None:
query &= Q(locationDbId__pk=val)
val = requestDict.get('seasonDbId')
if val is not None:
# the API doc is idiotic about this: seasonDbId can be either PK or year...
query &= (Q(studyseason__seasonDbId__pk=val) | Q(studyseason__seasonDbId__year=val))
distinct = True
val = requestDict.get('germplasmDbIds')
if val is not None:
# safely parse list of strings
try:
val = literal_eval(val)
except:
raise ValueError('Invalid value for "germplasmDbIds" parameter: {}'.format(val))
query &= Q(cropDbId__germplasm__pk__in=val)
distinct = True
val = requestDict.get('observationVariableDbIds')
if val is not None:
# safely parse list of strings
try:
val = literal_eval(val)
except:
raise ValueError('Invalid value for "observationVariableDbIds" parameter: {}'.format(val))
query &= Q(cropDbId__observationvariable__pk__in=val)
distinct = True
val = requestDict.get('active')
if val is not None:
val = val.lower()
if val not in ['true', 'false']:
raise ValueError('Invalid value for "active" parameter: {}'.format(val))
val = True if val == 'true' else False
query &= Q(active=val)
val = requestDict.get('sortBy')
orderAttr = None
if val is not None:
try:
self.model._meta.get_field(val)
orderAttr = val
except:
raise ValueError('Invalid value for "sortBy" parameter: {}'.format(val))
objects = self.model.objects.filter(query)
if distinct:
objects = objects.distinct()
if orderAttr:
objects = objects.order_by(orderAttr)
return self.sortOrder(requestDict, objects)
def get_objects_POST(self, requestDict):
query = Q()
distinct = False
val = requestDict.get('studyType')
if val is not None:
query &= Q(studyType=val)
val = requestDict.get('studyNames')
if val is not None:
query &= Q(name__in=val)
val = requestDict.get('studyLocations')
if val is not None:
query &= Q(locationDbId__name__in=val)
val = requestDict.get('programNames')
if val is not None:
query &= Q(trialDbId__programDbId__name__in=val)
val = requestDict.get('germplasmDbIds')
if val is not None:
query &= Q(cropDbId__germplasm__pk__in=val)
distinct = True
val = requestDict.get('observationVariableDbIds')
if val is not None:
query &= Q(cropDbId__observationvariable__pk__in=val)
distinct = True
val = requestDict.get('active')
if val is not None:
val = val.lower()
if val not in ['true', 'false']:
raise ValueError('Invalid value for "active" parameter: {}'.format(val))
val = True if val == 'true' else False
query &= Q(active=val)
val = requestDict.get('sortBy')
orderAttr = None
if val is not None:
try:
self.model._meta.get_field(val)
orderAttr = val
except:
raise ValueError('Invalid value for "sortBy" parameter: {}'.format(val))
objects = self.model.objects.filter(query)
if distinct:
objects = objects.distinct()
if orderAttr:
objects = objects.order_by(orderAttr)
return self.sortOrder(requestDict, objects)
class StudyObservationVariable(GET_URLPARAMS_response, UnsafeTemplateView):
model = models.ObservationVariable
serializer = serializers.ObservationVariableSerializer
get_parameters = []
def get_objects_GET(self, requestDict, **kwargs):
study = models.Study.objects.get(studyDbId=kwargs.get('studyDbId'))
variables = study.cropDbId.observationvariable_set.all()
return variables
class StudyGermplasm(GET_URLPARAMS_response, UnsafeTemplateView):
model = models.Germplasm
serializer = serializers.Study_GermplasmSerializer
get_parameters = []
def get_objects_GET(self, requestDict, **kwargs):
study = models.Study.objects.get(studyDbId=kwargs.get('studyDbId'))
return study.cropDbId.germplasm_set.all()
class LocationList(GET_response, UnsafeTemplateView):
model = models.Location
serializer = serializers.LocationSerializer
get_parameters = ['locationType']
def get_objects_GET(self, requestDict):
query = Q()
if 'locationType' in requestDict:
query &= Q(type=requestDict['locationType'])
objects = self.model.objects.filter(query)
return objects
class LocationDetails(GET_detail_response, UnsafeTemplateView):
model = models.Location
serializer = serializers.LocationSerializer
pk = 'locationDbId'
| vpodpecan/brapi-python | jsonapi/views.py | views.py | py | 19,838 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.core.paginator.Paginator",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "django.core.paginator.EmptyPage",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "django.views.generic.View",
"line_number": 99,
"usage_type": "name"
},
... |
32044848439 |
import requests, os
import json
import openpyxl
import glob
states = ['Alabama',
'Alaska',
'Arizona',
'Arkansas',
'California',
'Colorado',
'Connecticut',
'Delaware',
'Florida',
'Georgia',
'Hawaii',
'Idaho',
'Illinois',
'Indiana',
'Iowa',
'Kansas',
'Kentucky',
'Louisiana',
'Maine',
'Maryland',
'Massachusetts',
'Michigan',
'Minnesota',
'Mississippi',
'Missouri',
'Montana',
'Nebraska',
'Nevada',
'New Hampshire',
'New Jersey',
'New Mexico',
'New York',
'North Carolina',
'North Dakota',
'Ohio',
'Oklahoma',
'Oregon',
'Pennsylvania',
'Rhode Island',
'South Carolina',
'South Dakota',
'Tennessee',
'Texas',
'Utah',
'Vermont',
'Virginia',
'Washington',
'West Virginia',
'Wisconsin',
'Wyoming',
'District of Columbia',
'Puerto Rico',
'Guam',
'American Samoa',
'U.S. Virgin Islands',
'Northern Mariana Islands']
files = glob.glob('./*.xlsx')
print("You need to have this in a directory with the only .xlsx file is the one you want the addresses of")
for file in files:
xl = openpyxl.load_workbook(file)
sheet_name = xl.sheetnames[0]
sheet = xl[sheet_name]
print('Make sure rowid is in column A, latitude is in column B, and longitude in C')
print('The location data will be in column D')
try:
i=1
rowlist =[]
while 1:
latitude = sheet['B'+str(i)].value
longitude = sheet['C'+str(i)].value
if latitude == None or longitude == None:
break
#rowlist.append([lat , long])
try:
url = f'http://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer/reverseGeocode?f=pjson&featureTypes=StreetInt&location={longitude},{latitude}'
res = requests.get(url)
x = json.loads(res.content)
location = x['address']['Region']
if location in states:
sheet['D' + str(i)] = location
print('OK')
else:
sheet['D' + str(i)] = 'ZZZ ' + location
print('Outside US')
except:
sheet['D' + str(i)] = 'Check This'
print(f'{i} it failed')
i = i + 1
except:
print('This run had an error of some kind')
cwd = os.getcwd()
path = cwd + '/results'
if not os.path.exists(path):
os.mkdir(path)
xl.save('results/youdidit.xlsx')
final = input("Program Complete. Hit ENTER to end.")
| nanites2000/lat_long_finder | lat_long_xls.py | lat_long_xls.py | py | 2,455 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "glob.glob",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "openpyxl.load_workbook",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_... |
15644547901 | # from datetime import datetime
import httpx
from django.http import JsonResponse
API_BASE = "http://localhost:8080/api/v1"
from django.views.decorators.http import require_GET
@require_GET
def pokemons_golang(request):
with httpx.Client() as client:
resp = client.get(API_BASE + "/pokemons/").json()
return JsonResponse(resp, safe=False)
@require_GET
def pokemon_golang(request, pokemon_name):
with httpx.Client() as client:
resp = client.get(f"{API_BASE}/pokemons/{pokemon_name}").json()
return JsonResponse(resp, safe=False)
| pliniomikael/django-go-performance | backend/pokemon/views/golang_api.py | golang_api.py | py | 567 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "httpx.Client",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.views.decorators.http.require_GET",
"line_number": 11,
"usage_type": "name"
},
{
"ap... |
36242862348 | from django.shortcuts import render
from django.views import View
from django.urls import reverse_lazy
from task_manager.tasks.models import Task
from task_manager.tasks import forms
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from task_manager.mixins import LoginRequiredMixin
from django.utils.translation import gettext as _
from django.contrib.messages.views import SuccessMessageMixin
from django.views.generic.list import ListView
class TasksListView(ListView):
model = Task
template_name = 'task/tasks_list.html'
context_object_name = 'tasks'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['filter'] = \
forms.TaskFilter(self.request.GET, queryset=self.get_queryset(),
request=self.request)
return context
class TaskDetailsView(View):
model = Task
template_name = 'task/tasks_list.html'
context_object_name = 'tasks'
def get(self, request, *args, **kwargs):
task_id = kwargs.get('pk')
task = Task.objects.get(id=task_id)
task_labels = task.labels.values_list('name', flat=True)
return render(request, 'task/task_details.html', context={
'task': task, 'task_labels': task_labels
})
class CreateTask(SuccessMessageMixin, CreateView):
form_class = forms.TaskCreateForm
template_name = 'task/create_task.html'
success_url = reverse_lazy('tasks_list')
success_message = _('Task created')
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class UpdateTask(SuccessMessageMixin, LoginRequiredMixin, UpdateView):
model = Task
form_class = forms.TaskUpdateForm
template_name = 'task/update_task.html'
success_url = reverse_lazy('tasks_list')
success_message = _('Task changed')
class DeleteTask(LoginRequiredMixin, SuccessMessageMixin, DeleteView):
model = Task
template_name = 'task/delete_task.html'
success_url = reverse_lazy('tasks_list')
success_message = _('Task deleted')
| Labidahrom/task-manager | task_manager/tasks/views.py | views.py | py | 2,134 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.views.generic.list.ListView",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "task_manager.tasks.models.Task",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "task_manager.tasks.forms.TaskFilter",
"line_number": 21,
"usage_type": ... |
71716837795 | # Modified from: https://github.com/pliang279/LG-FedAvg/blob/master/utils/train_utils.py
from torchvision import datasets, transforms
from models.Nets import MLP, CNNCifar100Multi, CNNCifarMulti, MLPMulti, CNN_FEMNISTMulti
from utils.sampling import noniid, noniid_global
import os
import json
from log_utils.logger import info_logger
trans_mnist = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
trans_cifar10_train = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
trans_cifar10_val = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
trans_cifar100_train = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441],
std=[0.267, 0.256, 0.276])])
trans_cifar100_val = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441],
std=[0.267, 0.256, 0.276])])
def get_data(args):
if args.dataset == 'mnist':
dataset_train = datasets.MNIST('./data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST('./data/mnist/', train=False, download=True, transform=trans_mnist)
dict_users_train, rand_set_all = noniid(dataset_train, args.num_users, args.shard_per_user,
args.num_classes)
dict_users_test, rand_set_all = noniid(dataset_test, args.num_users, args.shard_per_user, args.num_classes, rand_set_all=rand_set_all, testb=True)
elif args.dataset == 'cifar10':
dataset_train = datasets.CIFAR10('./data/cifar10', train=True, download=True, transform=trans_cifar10_train)
dataset_test = datasets.CIFAR10('./data/cifar10', train=False, download=True, transform=trans_cifar10_val)
dict_users_train, rand_set_all, global_train = noniid_global(dataset_train, args.num_users, args.shard_per_user, args.num_classes)
dict_users_test, rand_set_all = noniid(dataset_test, args.num_users, args.shard_per_user, args.num_classes, rand_set_all=rand_set_all, testb=True)
elif args.dataset == 'cifar100':
dataset_train = datasets.CIFAR100('./data/cifar100', train=True, download=True, transform=trans_cifar100_train)
dataset_test = datasets.CIFAR100('./data/cifar100', train=False, download=True, transform=trans_cifar100_val)
dict_users_train, rand_set_all, global_train = noniid_global(dataset_train, args.num_users, args.shard_per_user,
args.num_classes)
dict_users_test, rand_set_all = noniid(dataset_test, args.num_users, args.shard_per_user, args.num_classes, rand_set_all=rand_set_all, testb=True)
else:
exit('Error: unrecognized dataset')
temp = {i: list(tmp) for i, tmp in enumerate(rand_set_all)}
info_logger.info("rand_set_all: \n{}".format(str(temp)))
return dataset_train, dataset_test, dict_users_train, dict_users_test, temp
# return dataset_train, dataset_test, dict_users_train, dict_users_test, global_train
def read_data(train_data_dir, test_data_dir):
clients = []
groups = []
train_data = {}
test_data = {}
train_files = os.listdir(train_data_dir)
train_files = [f for f in train_files if f.endswith('.json')]
for f in train_files:
file_path = os.path.join(train_data_dir,f)
with open(file_path, 'r') as inf:
cdata = json.load(inf)
clients.extend(cdata['users'])
if 'hierarchies' in cdata:
groups.extend(cdata['hierarchies'])
train_data.update(cdata['user_data'])
test_files = os.listdir(test_data_dir)
test_files = [f for f in test_files if f.endswith('.json')]
for f in test_files:
file_path = os.path.join(test_data_dir,f)
with open(file_path, 'r') as inf:
cdata = json.load(inf)
test_data.update(cdata['user_data'])
clients = list(train_data.keys())
return clients, groups, train_data, test_data
def get_model(args):
if args.model == 'cnn' and 'cifar100' in args.dataset:
net_glob = CNNCifar100Multi(args=args).to(args.device)
elif args.model == 'cnn' and 'cifar10' in args.dataset:
net_glob = CNNCifarMulti(args=args).to(args.device)
elif args.model == 'mlp' and 'mnist' in args.dataset:
net_glob = MLPMulti(dim_in=784, dim_hidden=256, dim_out=args.num_classes).to(args.device)
elif args.model == 'cnn' and 'femnist' in args.dataset:
net_glob = CNN_FEMNISTMulti(args=args).to(args.device)
elif args.model == 'mlp' and 'cifar' in args.dataset:
net_glob = MLP(dim_in=3072, dim_hidden=512, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
return net_glob
| skyarg/FedEC | utils/train_utils.py | train_utils.py | py | 5,744 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 10,
"usage_type": "call"
},
{
... |
42027772083 | #!venv/bin/python
##This code is incomplete. Use at own risk.
#TODO: re-architect so there's client-side timekeeping if the server becomes unavailable
import requests
import json
import uuid
import ConfigParser
import sys
import argparse
import psutil
import os
import subprocess
defuser="foo"
defpass="bar"
Config = ConfigParser.RawConfigParser()
Configfile = "./dman.cfg"
global dman
dman = {}
#DMAN defaults. To be written to Configfile if one doesn't exist.
dman["user"] = "foo"
dman["pass"] = "bar"
#If you change this value, you MUST modify "PATH=/opt/dman:" in autodecrypt.sh
dman["root"] = "/opt/dman/"
#domain+scriptname to query
dman["url"] = "http://localhost:5000/dman"
#Default deadman set timeout.
dman["deftimeout"] = "86400" #24 hours
##Name of encrypted LUKS device (i"m using LVM)
dman["luksopen"] = "/dev/system/encrypted_luks"
#name of cryptsetup luksopen device to be created (/dev/mapper/decryptedname)
dman["luksdecrypt"] = "/dev/mapper/decrypted_luks"
#Location to mount decrypted device
dman["mountdir"] = "/mnt/decryptmount/"
def Config_write():
Config.add_section("main")
dman["uuid"] = uuid.uuid4()
for x in dman:
Config.set("main", x, dman[x])
Config.add_section("dirs")
Config.set("dirs", "dir1", "/foo/bar")
try:
with open(Configfile, "wb") as file:
Config.write(file)
print("OK: wrote default config: %s") % Configfile
return True
except:
print("ERROR: failed to write config %s") % Configfile
raise
def Config_read():
Config.read(Configfile)
global ConfigMain
global ConfigDirs
try:
ConfigMain = ConfigSectionMap("main")
ConfigDirs = ConfigSectionMap("dirs")
return True
except:
print("ERROR: Configfile incomplete")
raise
def ConfigSectionMap(section):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
def killthings():
killpids = set()
try:
for proc in psutil.process_iter():
lsof = proc.open_files()
for l in lsof:
for d in ConfigDirs:
if l[0].startswith(ConfigDirs[d]):
print(proc.pid,ConfigDirs[d])
killpids.add(proc.pid)
for d in ConfigDirs:
if proc.cwd().startswith(ConfigDirs[d]):
print(proc.pid,proc.cwd())
killpids.add(proc.pid)
except:
print("error, could not read process list. Run as root?")
for p in killpids:
try:
if psutil.Process(p).is_running():
psutil.Process(p).kill()
print("killed: %d") % p
except:
print("ERROR: failed to kill %d") % p
#Unmount Directories
try:
for d in ConfigDirs:
subprocess.check_call([ "umount", ConfigDirs[d] ])
print("OK: unmounted %s") % ConfigDirs[d]
except:
print("ERROR: failed to unmount %s") % ConfigDirs[d]
#Stop LUKS volume
try:
subprocess.check_call([ "cryptsetup", "close", ConfigMain["luksdecrypt"] ])
except:
print("ERROR: failed to stop LUKS device %s") % ConfigMain["luksdecrypt"]
def main():
#Read Config, otherwise attempt to write a re-read a default one.
try:
Config_read()
print("OK: Read existing config.")
except:
print("ERROR: Could not read existing config. Creating new...")
try:
Config_write()
try:
Config_read()
print("OK: Read new config.")
except:
print("ERROR: Could not read new config. What??.")
except:
print("ERROR: Could not write new config.")
#try:
# Config_read()
#except:
# print("ERROR: Could not read new config.")
parser = argparse.ArgumentParser(description="dman client")
parser.add_argument("-po", "--post",
dest="postvar",
nargs="?",
const=ConfigMain["uuid"],
type=str,
help="Create new record")
parser.add_argument("-t", "--time",
dest="timevar",
nargs="?",
const=ConfigMain["deftimeout"],
type=int,
help="Specify time for post/put")
parser.add_argument("-g", "--get",
action="store_true", dest="getvar",
default=False,
help="get single record")
parser.add_argument("-pu", "--put",
dest="putvar",
nargs="?",
const=ConfigMain["deftimeout"],
type=int,
help="Update existing record")
parser.add_argument("-a", "--getall",
action="store_true", dest="getall",
default=False,
help="List all records")
parser.add_argument("-d", "--delete",
dest="delete",
nargs="?",
const=ConfigMain["uuid"],
type=str,
help="delete current or specified record")
parser.add_argument("-k", "--kill",
action="store_true", dest="kill",
default=False)
args, leftovers = parser.parse_known_args()
if args.kill:
killthings()
sys.exit(0)
#TODO: report successful kill to server
if args.timevar is not None:
time = args.timevar
else:
time = ConfigMain["deftimeout"]
try:
if args.postvar:
r = requests.post(ConfigMain["url"], data = {"uuid":"%s" % args.postvar, "delta":"%d" % int(time)}, auth=(ConfigMain["user"], ConfigMain["pass"]))
if args.getvar:
r = requests.get(ConfigMain["url"] + "/" + ConfigMain["uuid"], auth=(ConfigMain["user"], ConfigMain["pass"]))
if r.status_code == 404: #bad response, node doesn"t exist yet
print("STATUS: Creating new node")
r = requests.post(ConfigMain["url"], data = {"uuid":"%s" % ConfigMain["uuid"], "delta":"%d" % int(time)}, auth=(ConfigMain["user"], ConfigMain["pass"]))
else: #good response, node exists
try:
j = json.loads(r.text)
if j["state"] == "alive":
print("ALIVE!!")
elif j["state"] == "dead":
print("STATUS: node dead... killing")
killthings()
else:
print("ERROR: unknown exception")
except ValueError:
print("ERROR: No JSON returned")
elif args.putvar is not None:
r = requests.put(ConfigMain["url"] + "/" + ConfigMain["uuid"], data = { "delta":"%d" % int(time) }, auth=(ConfigMain["user"], ConfigMain["pass"]))
elif args.getall:
r = requests.get(ConfigMain["url"], auth=(ConfigMain["user"], ConfigMain["pass"]))
elif args.delete:
r = requests.delete(ConfigMain["url"] + "/" + args.delete, auth=(ConfigMain["user"], ConfigMain["pass"]))
try:
r
print("---JSON---")
j = json.loads(r.text)
print(json.dumps(j, indent=4))
except:
print("ERROR: No response or not JSON")
except Exception as err:
print("ERROR: request failure: {0}".format(err))
main() | then3rd/dman-py | dman-client.py | dman-client.py | py | 7,937 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "ConfigParser.RawConfigParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "psutil.process_iter",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "psutil.Pr... |
32185653886 | import urllib3
import ssl
from pyVmomi import vim
from pyVim import connect
from copy import copy
import datetime
from plugins.VCenter import PluginVCenterScanBase
from utils import output
from utils.consts import AllPluginTypes
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class PluginVCenterCertExpired(PluginVCenterScanBase):
display = "vCenter 证书过期日期"
alias = "vc_cert_exp"
p_type = AllPluginTypes.Scan
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def run_script(self, args) -> dict:
sslContext = None
if hasattr(ssl, '_create_unverified_context'):
sslContext = ssl._create_unverified_context()
vc_cont = connect.SmartConnect(host=self.dc_ip, user=self.ldap_conf['user'], pwd=self.ldap_conf['password'],
sslContext=sslContext)
result = copy(self.result)
content = vc_cont.RetrieveContent()
object_view = content.viewManager.CreateContainerView(content.rootFolder, [vim.HostSystem], True)
instance_list = []
for host_system in object_view.view:
instance = {}
try:
host_system.configManager.certificateManager.certificateInfo
except Exception as e:
output.debug(e)
return result
time1 = str(host_system.configManager.certificateManager.certificateInfo.notAfter).split(' ')[0]
time2 = str(datetime.datetime.now()).split(' ')[0]
timecert = datetime.datetime.strptime(time1, "%Y-%m-%d")
timenow = datetime.datetime.strptime(time2, "%Y-%m-%d")
if timecert < timenow:
result['status'] = 1
instance['host'] = host_system.name
instance['证书过期时间'] = timecert
instance_list.append(instance)
result['data'] = {"instance_list": instance_list}
return result
| Amulab/CAudit | plugins/VCenter/Plugin_VCenter_Scan_2011.py | Plugin_VCenter_Scan_2011.py | py | 1,995 | python | en | code | 250 | github-code | 1 | [
{
"api_name": "urllib3.disable_warnings",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "urllib3.exceptions",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "plugins.VCenter.PluginVCenterScanBase",
"line_number": 15,
"usage_type": "name"
},
{... |
22743221263 | from flask import Flask, Blueprint, render_template, request, send_file, redirect, url_for,session,json
import os
from jsot_to_csv import json_csv_conv
app = Flask(__name__)
json_csv = Blueprint('json-csv', __name__)
app.config['UPLOAD_FOLDER'] = os.path.join(os.environ["USERPROFILE"], 'Desktop')
@json_csv.route('/csv-json', methods=['POST', 'GET'])
def login():
return render_template("json_to_csv.html")
@json_csv.route('/new-page_11', methods=['POST', 'GET'])
def new_fn():
name = ''
if request.method == 'POST':
file = request.files['file']
filename = file.filename
if filename.split('.')[-1] != 'json':
return redirect(url_for("json-csv.login"))
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(file_path)
visibility = 'visible'
name = json_csv_conv(file_path)
json.dumps({"main": name})
session['name'] = name
print(name)
msg = 'Scroll down to download converted file'
return redirect(url_for("json-csv.downloadFile123", name=name))
@json_csv.route('/download12')
def downloadFile123 ():
name = session['name']
return send_file(name, as_attachment=True)
| hsamvel/Flask_App | website/json_to_csv.py | json_to_csv.py | py | 1,219 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.Blueprint",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,... |
19298224268 | from imutils import resize
import numpy as np
import time
import cv2
import csv
# Write columns in the x_values.csv
# Format [R:Int, G:Int, B:Int, Area:Float]
def write_col_x(rowcita):
with open('./dataset/x_values_test.csv', 'a', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(rowcita)
# Write columns in the y_values_csv
# Format [y:Int]
# The values of y correspond to a label
# 0 = chocorramo, 1 = jet_azul, 2 = jumbo_flow_blanca, 3 = jumbo_naranja, 4 = jumbo_roja
# 5 = fruna_verde, 6 = fruna_naranja, 7 = fruna_roja, 8 = fruna_amarilla
# Number of tests: 0 = 27; 1 = 31; 2 = 30; 3 = 38; 4 = 33; 5 = 30; 6 = 30; 7 = 30; 8 = 30; 9 = 30
def write_col_y(label):
with open('./dataset/y_values_test.csv', 'a', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(label)
def getRGB(image):
kernelOP = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
kernelCL = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11,11))
img1 = cv2.imread("dataset/banda.jpeg")
img1 = cv2.morphologyEx(img1, cv2.MORPH_OPEN, kernelOP, iterations=2)
img2 = image
img2 = cv2.morphologyEx(img2, cv2.MORPH_CLOSE, kernelCL, iterations=2)
diff = cv2.absdiff(img2, img1)
mask = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
th = 35
imask = mask>th
canvas = np.zeros_like(img2, np.uint8)
canvas[imask] = img2[imask]
rprom = 0
gprom = 0
bprom = 0
cont = 0
a, b, c = canvas.shape
zero = np.array([0,0,0])
for i in range(a-1):
for j in range(b-1):
arr = canvas[i][j]
if ((arr > 150).all()):
bprom += arr[0]
gprom += arr[1]
rprom += arr[2]
cont += 1
return [int(rprom/cont),int(gprom/cont),int(bprom/cont)]
kernelOP = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
kernelCL = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
cap = cv2.VideoCapture(0)
cap.set(3,640)
cap.set(4,480)
cap.set(cv2.CAP_PROP_AUTOFOCUS, 0)
cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0.25)
cap.set(cv2.CAP_PROP_EXPOSURE , 0.4)
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while(True):
ret, frame = cap.read()
frame = frame[::, 95:525]
image = frame
image = resize(image, width=500)
image = image[50:3500, 75:480]
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
thresh = fgbg.apply(image)
cv2.imshow('No Background', thresh)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernelOP, iterations=2)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernelCL, iterations=2)
im, contours, hierarchy= cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 0 and cv2.contourArea(contours[0]) > 10000 and cv2.contourArea(contours[0]) < 80000:
rect = cv2.minAreaRect(contours[0])
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(image,[box],0,(0,0,255),2)
if rect[0][1] > 250 and rect[0][1] < 350:
area = rect[1][0] * rect[1][1]
rgb = getRGB(frame)
print('Area: ', area)
print('Color: ', rgb)
data = rgb + [area]
write_col_x(data)
write_col_y('9')
# area = cv2.contourArea(contours[0])
# cv2.drawContours(image, contours, -1, (0,255,0), 2)
cv2.imshow("objects Found", image)
cv2.imshow('Thresh', thresh)
time.sleep(0.01)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| dameos/computer_vision_classification | write_dataset_using_video.py | write_dataset_using_video.py | py | 3,757 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "csv.writer",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "csv.QUOTE_MINIMAL",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "csv.writer",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "csv.QUOTE_MINIMAL",
... |
19401162497 | """
Write a Python program to calculate number of days between two dates. Note: try to import datetime module
Sample dates : (2014, 7, 2), (2014, 7, 11)
Expected output : 9 days
"""
from datetime import date
d1 = date(2014, 7, 2)
d2 = date(2014, 7, 11)
nums = d2 - d1
print(nums.days) | kallykj/learnpython | FromW3resource/src/basic14.py | basic14.py | py | 286 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.date",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 10,
"usage_type": "call"
}
] |
28106726008 | ##################
# Author : Sooraj Bharadwaj
# Date: 04/13/2022
#################
# IMPORTS
import wikipedia as wk
import json
import tkinter as tk
def randomPageGenerator():
"""
This function generates a random page from the wikipedia.
@param: None
@return: json object with page and page.metadata
"""
# wikipedia random title generator
title = wk.random(pages=1)
# search for the page
page = wk.page(title)
# Get meta info about the page
page_meta = {
'title' : title,
'categories' : page.categories
}
# Prepare return object
ret = {
'page' : page,
'meta' : page_meta
}
return ret
def getCenterPoints(root, window_dim):
""":
This function returns the center points of the window
@param: root, (window.len, window.width)
@return: center_x, center_y
"""
x_center = int(root.winfo_screenwidth()/2 - window_dim[0]/2)
y_center = int(root.winfo_screenheight()/2 - window_dim[1]/2)
return (x_center, y_center)
def displayPage(root, page_obj):
pass
def gui(page_obj):
root = tk.Tk()
root.title("Random Wikipedia Page")
# Display the page title
title_label = tk.Label(root, text=page_obj['meta']['title'])
title_label.pack()
# Determine window placement (center)
centr = getCenterPoints(root, (800, 800))
# Create Read More button
read_more_btn = tk.Button(
root,
text="Show this article",
command = displayPage(root, page_obj)
)
# Display Read More button
read_more_btn.pack(
side=tk.BOTTOM,
fill=tk.X,
expand=True
)
root.geometry(f"800x800+{centr[0]}+{centr[1]}")
root.mainloop()
| surajbharadwaj17/random-wiki | util.py | util.py | py | 1,860 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "wikipedia.random",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "wikipedia.page",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_... |
3178436922 | import asyncio
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Set, List, Tuple, Dict
import aiosqlite
from blspy import G1Element
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.ints import uint32, uint64
from chia.util.lru_cache import LRUCache
from chia.util.streamable import streamable, Streamable
@dataclass(frozen=True)
@streamable
class FarmerRecord(Streamable):
singleton_genesis: bytes32
owner_public_key: G1Element
pool_puzzle_hash: bytes32
relative_lock_height: uint32
p2_singleton_puzzle_hash: bytes32
blockchain_height: uint32 # Height of the singleton (might not be the last one)
singleton_coin_id: bytes32 # Coin id of the singleton (might not be the last one)
points: uint64
difficulty: uint64
rewards_target: bytes32
is_pool_member: bool # If the farmer leaves the pool, this gets set to False
class PoolStore:
connection: aiosqlite.Connection
lock: asyncio.Lock
@classmethod
async def create(cls):
self = cls()
self.db_path = Path("pooldb.sqlite")
self.connection = await aiosqlite.connect(self.db_path)
self.lock = asyncio.Lock()
await self.connection.execute("pragma journal_mode=wal")
await self.connection.execute("pragma synchronous=2")
await self.connection.execute(
(
"CREATE TABLE IF NOT EXISTS farmer("
"singleton_genesis text PRIMARY KEY,"
" owner_public_key text,"
" pool_puzzle_hash text,"
" relative_lock_height bigint,"
" p2_singleton_puzzle_hash text,"
" blockchain_height bigint,"
" singleton_coin_id text,"
" points bigint,"
" difficulty bigint,"
" rewards_target text,"
" is_pool_member tinyint)"
)
)
# Useful for reorg lookups
await self.connection.execute("CREATE INDEX IF NOT EXISTS scan_ph on farmer(p2_singleton_puzzle_hash)")
await self.connection.commit()
self.coin_record_cache = LRUCache(1000)
return self
async def add_farmer_record(self, farmer_record: FarmerRecord):
cursor = await self.connection.execute(
f"INSERT OR REPLACE INTO farmer VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(
farmer_record.singleton_genesis.hex(),
bytes(farmer_record.owner_public_key).hex(),
farmer_record.pool_puzzle_hash.hex(),
farmer_record.relative_lock_height,
farmer_record.p2_singleton_puzzle_hash.hex(),
farmer_record.blockchain_height,
farmer_record.singleton_coin_id.hex(),
farmer_record.points,
farmer_record.difficulty,
farmer_record.rewards_target.hex(),
int(farmer_record.is_pool_member),
),
)
await cursor.close()
await self.connection.commit()
async def get_farmer_record(self, singleton_genesis: bytes32) -> Optional[FarmerRecord]:
# TODO: use cache
cursor = await self.connection.execute(
"SELECT * from farmer where singleton_genesis=?", (singleton_genesis.hex(),)
)
row = await cursor.fetchone()
if row is None:
return None
return FarmerRecord(
bytes.fromhex(row[0]),
G1Element.from_bytes(bytes.fromhex(row[1])),
bytes.fromhex(row[2]),
row[3],
bytes.fromhex(row[4]),
row[5],
bytes.fromhex(row[6]),
row[7],
row[8],
bytes.fromhex(row[9]),
True if row[10] == 1 else False,
)
async def get_pay_to_singleton_phs(self) -> Set[bytes32]:
cursor = await self.connection.execute("SELECT p2_singleton_puzzle_hash from farmer")
rows = await cursor.fetchall()
all_phs: Set[bytes32] = set()
for row in rows:
all_phs.add(bytes32(bytes.fromhex(row[0])))
return all_phs
async def get_farmer_records_for_p2_singleton_phs(self, puzzle_hashes: Set[bytes32]) -> List[FarmerRecord]:
puzzle_hashes_db = tuple([ph.hex() for ph in list(puzzle_hashes)])
cursor = await self.connection.execute(
f'SELECT * from farmer WHERE p2_singleton_puzzle_hash in ({"?," * (len(puzzle_hashes_db) - 1)}?) '
)
rows = await cursor.fetchall()
records: List[FarmerRecord] = []
for row in rows:
record = FarmerRecord(
bytes.fromhex(row[0]),
G1Element.from_bytes(bytes.fromhex(row[1])),
bytes.fromhex(row[2]),
row[3],
bytes.fromhex(row[4]),
row[5],
bytes.fromhex(row[6]),
row[7],
row[8],
bytes.fromhex(row[9]),
True if row[10] == 1 else False,
)
records.append(record)
return records
async def get_farmer_points_and_ph(self) -> List[Tuple[uint64, bytes32]]:
cursor = await self.connection.execute(f"SELECT points, rewards_target from farmer")
rows = await cursor.fetchall()
accumulated: Dict[bytes32, uint64] = {}
for row in rows:
points: uint64 = uint64(row[0])
ph: bytes32 = bytes32(bytes.fromhex(row[1]))
if ph in accumulated:
ph[accumulated] += points
else:
ph[accumulated] = points
ret: List[Tuple[uint64, bytes32]] = []
for ph, total_points in accumulated.items():
ret.append((total_points, ph))
return ret
async def clear_farmer_points(self) -> List[Tuple[uint64, bytes32]]:
cursor = await self.connection.execute(f"UPDATE farmer set points=0")
await cursor.fetchall()
| amuDev/Chia-Pooling | store.py | store.py | py | 6,020 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "chia.util.streamable.Streamable",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "chia.types.blockchain_format.sized_bytes.bytes32",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "blspy.G1Element",
"line_number": 19,
"usage_type": "name... |
28964666541 | import os
import random
import time
from copy import deepcopy, copy
from twisted.conch import recvline
from twisted.conch.insults import insults
from honeySSH import core
from honeySSH.core.config import config
from honeySSH.core import honeyFilesystem
class HoneyBaseProtocol(insults.TerminalProtocol):
def __init__(self, user, env):
self.cfg = config()
self.user = user
self.env = env
self.hostname = self.cfg.get('ssh', 'hostname')
self.fs = honeyFilesystem.HoneyFilesystem(deepcopy(self.env.fs))
if self.fs.exists(user.home):
self.cwd = user.home
else:
self.cwd = '/'
# commands is also a copy so we can add stuff on the fly
self.commands = copy(self.env.commands)
self.password_input = False
self.cmdstack = []
def logDispatch(self, msg):
transport = self.terminal.transport.session.conn.transport
msg = ':dispatch: ' + msg
# transport.factory.logDispatch(transport.transport.sessionno, msg)
def logCommand(self, command):
transport = self.terminal.transport.session.conn.transport
transport.logger.add_command(command)
def connectionMade(self):
self.displayMOTD()
transport = self.terminal.transport.session.conn.transport
self.realClientIP = transport.transport.getPeer().host
self.clientVersion = transport.otherVersionString
self.logintime = transport.logintime
# self.ttylog_file = transport.ttylog_file
# source IP of client in user visible reports (can be fake or real)
cfg = config()
if cfg.has_option('ssh', 'fake_addr'):
self.clientIP = cfg.get('ssh', 'fake_addr')
else:
self.clientIP = self.realClientIP
def displayMOTD(self):
try:
self.writeln(self.fs.file_contents('/etc/motd'))
except:
pass
def connectionLost(self, reason):
pass
def getCommand(self, cmd, paths):
if not len(cmd.strip()):
return None
path = None
if cmd in self.commands: # 如果指令在指令列表中,直接返回指令
return self.commands[cmd]
if cmd[0] in ('.', '/'): # 如果以.或/开头,进行路径解析
path = self.fs.parse_path(cmd, self.cwd)
if not self.fs.exists(path):
return None
else:
for i in ['%s/%s' % (self.fs.parse_path(x, self.cwd), cmd) \
for x in paths]:
if self.fs.exists(i):
path = i
break
pass
if path in self.commands:
return self.commands[path]
return None
def lineReceived(self, line):
if len(self.cmdstack):
self.cmdstack[-1].lineReceived(line)
def writeln(self, data):
self.terminal.write(data)
self.terminal.nextLine()
def call_command(self, cmd, *args):
obj = cmd(self, *args)
self.cmdstack.append(obj)
obj.start()
def addInteractor(self, interactor):
transport = self.terminal.transport.session.conn.transport
transport.interactors.append(interactor)
def delInteractor(self, interactor):
transport = self.terminal.transport.session.conn.transport
transport.interactors.remove(interactor)
def uptime(self, reset = None):
transport = self.terminal.transport.session.conn.transport
r = time.time() - transport.factory.starttime
if reset:
transport.factory.starttime = reset
return r
class HoneyPotInteractiveProtocol(HoneyBaseProtocol, recvline.HistoricRecvLine):
def __init__(self, user, env):
recvline.HistoricRecvLine.__init__(self)
HoneyBaseProtocol.__init__(self, user, env)
def connectionMade(self):
HoneyBaseProtocol.connectionMade(self)
recvline.HistoricRecvLine.connectionMade(self)
self.cmdstack = [core.honeyCMD.HoneyShell(self)]
transport = self.terminal.transport.session.conn.transport
# todo
# transport.factory.sessions[transport.transport.sessionno] = self
self.keyHandlers.update({
b'\x04': self.handle_CTRL_D,
b'\x15': self.handle_CTRL_U,
b'\x03': self.handle_CTRL_C,
b'\x09': self.handle_TAB,
})
# this doesn't seem to be called upon disconnect, so please use
# HoneyPotTransport.connectionLost instead
def connectionLost(self, reason):
HoneyBaseProtocol.connectionLost(self, reason)
recvline.HistoricRecvLine.connectionLost(self, reason)
# Overriding to prevent terminal.reset()
def initializeScreen(self):
self.setInsertMode()
def call_command(self, cmd, *args):
self.setTypeoverMode()
HoneyBaseProtocol.call_command(self, cmd, *args)
def keystrokeReceived(self, keyID, modifier):
# transport = self.terminal.transport.session.conn.transport
# if type(keyID) == type(''):
# ttylog.ttylog_write(transport.ttylog_file, len(keyID),
# ttylog.TYPE_INPUT, time.time(), keyID)
recvline.HistoricRecvLine.keystrokeReceived(self, keyID, modifier)
# Easier way to implement password input?
def characterReceived(self, ch, moreCharactersComing):
if self.mode == 'insert':
self.lineBuffer.insert(self.lineBufferIndex, ch)
else:
self.lineBuffer[self.lineBufferIndex:self.lineBufferIndex+1] = [ch]
self.lineBufferIndex += 1
if not self.password_input:
self.terminal.write(ch)
def handle_RETURN(self):
if len(self.cmdstack) == 1:
if self.lineBuffer:
self.historyLines.append(''.join([c.decode() if isinstance(c, bytes) else c for c in self.lineBuffer]) )
self.historyPosition = len(self.historyLines)
self.lineBuffer = [c.encode('utf8') if isinstance(c, str) else c for c in self.lineBuffer]
return recvline.RecvLine.handle_RETURN(self)
def handle_CTRL_C(self):
self.cmdstack[-1].ctrl_c()
def handle_CTRL_U(self):
for i in range(self.lineBufferIndex):
self.terminal.cursorBackward()
self.terminal.deleteCharacter()
self.lineBuffer = self.lineBuffer[self.lineBufferIndex:]
self.lineBufferIndex = 0
def handle_CTRL_D(self):
self.call_command(self.commands['exit'])
def handle_TAB(self):
self.cmdstack[-1].handle_TAB()
class LoggingServerProtocol(insults.ServerProtocol):
def connectionMade(self):
transport = self.transport.session.conn.transport
# transport.ttylog_file = '%s/tty/%s-%s.log' % \
# (config().get('honeypot', 'log_path'),
# time.strftime('%Y%m%d-%H%M%S'),
# int(random.random() * 10000))
# print('Opening TTY log: %s' % transport.ttylog_file)
# # ttylog.ttylog_open(transport.ttylog_file, time.time())
# transport.ttylog_open = True
insults.ServerProtocol.connectionMade(self)
def write(self, bytes, noLog = False):
# transport = self.transport.session.conn.transport
# for i in transport.interactors:
# i.sessionWrite(bytes)
# if transport.ttylog_open and not noLog:
# ttylog.ttylog_write(transport.ttylog_file, len(bytes),
# ttylog.TYPE_OUTPUT, time.time(), bytes)
insults.ServerProtocol.write(self, bytes)
# this doesn't seem to be called upon disconnect, so please use
# HoneyPotTransport.connectionLost instead
def connectionLost(self, reason):
insults.ServerProtocol.connectionLost(self, reason) | Jerry-zhuang/HoneySSH | honeySSH/core/honeyProtocol.py | honeyProtocol.py | py | 7,808 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "twisted.conch.insults.insults.TerminalProtocol",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "twisted.conch.insults.insults",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "honeySSH.core.config.config",
"line_number": 16,
"usage... |
12171467469 | import torch
import torch.nn as nn
import numpy as np
import math
# Embeds each token in vocab into vector space. Simple lookup table.
class TokenEmbedding(nn.Module):
def __init__(self, vocab_size: int = 256, dim: int = 64) -> None:
super(TokenEmbedding, self).__init__()
self.dim = dim
self.embedding = nn.Embedding(vocab_size, self.dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.embedding(x) * np.sqrt(self.dim)
# Captures information about word order when passing tokens through self-attention
class PositionalEncoding(nn.Module):
def __init__(self, emb_size: int, dropout: float, maxlen: int = 5000):
super(PositionalEncoding, self).__init__()
den = torch.exp(-torch.arange(0, emb_size, 2) * math.log(10000) / emb_size)
pos = torch.arange(0, maxlen).reshape(maxlen, 1)
pos_embedding = torch.zeros((maxlen, emb_size))
pos_embedding[:, 0::2] = torch.sin(pos * den)
pos_embedding[:, 1::2] = torch.cos(pos * den)
pos_embedding = pos_embedding.unsqueeze(-2)
self.dropout = nn.Dropout(dropout)
self.register_buffer("pos_embedding", pos_embedding)
def forward(self, token_embedding: torch.Tensor):
return self.dropout(
token_embedding + self.pos_embedding[: token_embedding.size(0), :]
)
| VashishtMadhavan/transformers-scratch | embeddings.py | embeddings.py | py | 1,363 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
16503325102 | #!/usr/bin/env python
import os
import re
import sys
import bXML
import stat
import datetime
import cStringIO
import traceback
try:
import xattr
kXattrAvailable= True
except:
kXattrAvailable= False
def pathToList(path):
elements= []
while True:
(path, name)= os.path.split(path)
elements.insert(0, name)
if not name:
break
return elements
""" TODO
Have a validate/fix method
have a way to validate/fix from a sax stream
"""
kCharactersToEscapePattern= re.compile(r"([^a-zA-Z0-9_])")
def escape(string):
return kCharactersToEscapePattern.sub(lambda m: "$%02x"%(ord(m.group(1))), string)
kEscapePattern= re.compile(r"\$([0-9A-Fa-f][0-9A-Fa-f])")
def unescape(string):
return kEscapePattern.sub(lambda m: char(int(m.group(1), 16)), string)
#kBetterDateFormat= "%Y/%m/%d@%H:%M:%S.%f" # not supported in 2.5.1 (Mac OS X 10.5/ppc)
kReliableDateFormat= "%Y/%m/%d@%H:%M:%S"
def formatDate(timestamp):
dt= datetime.datetime.utcfromtimestamp(timestamp)
return dt.strftime(kReliableDateFormat)
def parseDate(timestampString):
dt= datetime.datetime.strptime(timestampString, kReliableDateFormat)
return calendar.timegm(dt.timetuple())
# Assumption: path is a sub-path of base
# return relative path from base to path
# os.path.join(base, result) == path
def getSubPathRelative(base, path):
if path.find(base) < 0:
raise SyntaxError(path+" is not in "+base)
relative= ""
while not os.path.samefile(base, path):
(path, name)= os.path.split(path)
if len(relative) == 0:
relative= name
else:
relative= os.path.join(name, relative)
return relative
class Manifest:
def __init__(self, dirOrPathOrText, skipPaths= None, skipExtensions= None, skipNames= None, doHash= True, skipAttributes= None):
self.__path= None
self.__contents= None
if os.path.isdir(dirOrPathOrText):
self.__path= dirOrPathOrText
self.__parseDirectory(skipPaths, skipExtensions, skipNames, doHash, skipAttributes)
else:
self.__contents= bXML.link(dirOrPathOrText)
def unlink(self):
self.__contents.unlink()
def save(self, pathOrFile= None):
if not pathOrFile:
pathOrFile= self.__path
if not pathOrFile:
raise SyntaxError("No Path Specified")
if isinstance(pathOrFile, basestring):
file= open(pathOrFile, 'w')
self.__contents.writexml(file)
file.close()
else:
self.__contents.writexml(pathOrFile)
def __skipped(self, relativePath, skipPaths, skipExtensions, skipNames):
if skipPaths:
for item in skipPaths:
if relativePath.startswith(item):
return True
if skipNames or skipExtensions:
pathNames= pathToList(relativePath)
for name in pathNames:
if skipNames and name in skipNames:
return True
if skipExtensions:
for ext in skipExtensions:
if name.endswith(ext):
return True
return False
def __addElement(self, relativePath, doHash, skipAttributes):
try:
fullPath= os.path.join(self.__path, relativePath)
stats= os.lstat(fullPath)
properties= {'path': relativePath}
if stat.S_ISLNK(stats.st_mode):
kind= "link"
properties['target']= os.readlink(fullPath)
elif stat.S_ISDIR(stats.st_mode):
kind= "directory"
elif stat.S_ISREG(stats.st_mode):
kind= "file"
properties['size']= str(stats.st_size)
else:
return None # unknown file type, skip it
properties['modified']= formatDate(stats.st_mtime)
mods= stat.S_IMODE(stats.st_mode)
if mods & (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH) == 0:
properties['readonly']= "true"
if mods & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) != 0:
properties['executable']= "true"
bXML.appendText(self.__contents.documentElement, "\n\t")
element= bXML.appendElement(self.__contents.documentElement, kind, properties)
if kXattrAvailable:
try:
attrs= xattr.listxattr(fullPath)
for attr in attrs:
try:
value= xattr.getxattr(fullPath, attr, True)
bXML.appendText(element, "\n\t\t")
tag= bXML.appendElement(element, "xattr", {'name': attr})
bXML.appendText(tag, escape(value))
except: # can't read this attribute
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
traceback.print_exception(exceptionType, exceptionValue, exceptionTraceback, limit=5, file=sys.stderr)
pass
except: # something went wrong
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
traceback.print_exception(exceptionType, exceptionValue, exceptionTraceback, limit=5, file=sys.stderr)
pass
if element.firstChild:
bXML.appendText(element, "\n\t")
except: # skip files we can't look at
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
traceback.print_exception(exceptionType, exceptionValue, exceptionTraceback, limit=5, file=sys.stderr)
pass
def __parseDirectory(self, skipPaths, skipExtensions, skipNames, doHash, skipAttributes):
if self.__contents != None:
raise SyntaxError("Already Created")
self.__contents= bXML.create("manifest")
if skipExtensions:
for item in skipExtensions:
bXML.appendText(self.__contents.documentElement, "\n\t")
bXML.appendElement(self.__contents.documentElement, "filter", {'extension': item})
if skipPaths:
for item in skipPaths:
bXML.appendText(self.__contents.documentElement, "\n\t")
bXML.appendElement(self.__contents.documentElement, "filter", {'path': item})
if skipNames:
for item in skipNames:
bXML.appendText(self.__contents.documentElement, "\n\t")
bXML.appendElement(self.__contents.documentElement, "filter", {'name': item})
for path, dirs, files in os.walk(self.__path):
relativePath= getSubPathRelative(self.__path, path)
if self.__skipped(relativePath, skipPaths, skipExtensions, skipNames):
continue
files.extend(dirs)
for item in files:
if self.__skipped(item, None, skipExtensions, skipNames):
continue
itemFullPath= os.path.join(path, item)
itemRelativePath= os.path.join(relativePath, item)
if self.__skipped(itemRelativePath, skipPaths, None, None):
continue
self.__addElement(itemRelativePath, doHash, skipAttributes)
if self.__contents.documentElement.firstChild:
bXML.appendText(self.__contents.documentElement, "\n")
if __name__ == "__main__":
for arg in sys.argv[1:]:
manifest= Manifest(arg)
buffer= cStringIO.StringIO()
manifest.save(buffer)
manifest2= Manifest(buffer.getvalue())
manifestPath= os.path.join("/tmp", os.path.split(arg)[1]+".xml")
manifest2.save(manifestPath)
manifest3= Manifest(manifestPath)
manifest3.save(sys.stdout)
| marcpage/build | old/old/bManifest.py | bManifest.py | py | 6,553 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.split",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number":... |
71405995554 | #!/usr/bin/env python
"""
Table Docstring
The Table class represents the control of the
Turing Machine as the entire functional (edge)
relation between some defined present state and
the next target state.
"""
import math
import copy
from lib.State import State
from typing import Set, List, Tuple
from lib.controls.Write import Write
from lib.Controller import Controller
from lib.controllers.Input import Input
from lib.controllers.Output import Output
from lib.controllers.table.Edge import Edge
from lib.controllers.table.Word import Word
from lib.controllers.binary_table.Bit import Bit
from lib.controllers.binary_table.BinaryTable import BinaryTable
from lib.controllers.binary_table.StateSequence import StateSequence
from lib.controllers.binary_table.ControlSequence import ControlSequence
__author__ = "Dylan Pozorski"
__project__ = "TuringMachine"
__class__ = "Table"
class Table(Controller):
"""
Table
Attributes:
entries (:obj:`Set[Edge]`): The list of mappings
composing the finite state machine's graph.
"""
def __init__(self, entries: Set[Edge]):
"""
Table Constructor.
:param entries: Set[Edge], The set of mappings
composing the finite state machine's graph.
"""
Controller.__init__(self)
entries = {} if entries is None else entries
self.__entries = set()
for entry in entries:
self.add(edge=entry)
def __len__(self) -> int:
"""
Return the number of entries that
are in the table.
:return: int
"""
return 0 if self.entries is None else len(self.entries)
def __str__(self) -> str:
"""
Return the canonical string representation
of the table object.
:return: str
"""
rep = "Control Table\n"
for entry in self.entries:
rep += str(entry) + "\n"
return rep
def __repr__(self) -> str:
"""
Return the canonical string representation
of the table object.
:return: str
"""
return self.__str__()
def is_empty(self) -> bool:
"""
Returns whether the table is empty
of transition records.
:return: bool
"""
return self.__len__() == 0
def add(self, edge: Edge) -> None:
"""
Add the provided edge to the table.
:param edge: Edge, The edge to add.
:return: None
:raises: ValueError If an edge is added
that leads to an ambiguous init state.
"""
if edge not in self.entries:
if edge.source.root:
s = self.initial_state()
if s is None or not s.root \
or s.label == edge.source.label:
self.__entries.add(edge)
elif s.label != edge.source.label:
msg = "Ambiguous Initial State."
raise ValueError(msg)
else:
self.__entries.add(edge)
def remove(self, edge: Edge) -> None:
"""
Remove the provided edge from the table.
:param edge: Edge, The edge to remove.
:return: None
"""
if edge is not None:
for entry in self.entries:
if entry == edge:
self.entries.remove(edge)
break
def next(self, state: State, input: Input) -> Output:
"""
From the specified input, compute the transition
action and the next graph state.
:param state: State, The current state that
the tape head is currently located.
:param input: Input, The current word
being read by the print head on the TM.
:return: Output
"""
action, match = None, None
if state is not None:
for e in self.entries:
if input.word == e.condition \
and e.source == state:
match = e.target
action = e.action
break
else:
match = self.initial_state()
return Output(
action=action,
state=match,
timestep=input.timestep
)
def initial_state(self) -> State:
"""
Return the initial state of the table's
transition entries. If no root is specified,
then the lowest labeled state is selected.
:return: State
"""
root, lowest = None, None
for entry in self.entries:
if entry.source.root:
return entry.source
elif lowest is None or lowest.label > entry.source.label:
lowest = entry.source
return lowest
def indefinite_states(self) -> List[Tuple[State, Word]]:
"""
Return a list of indefinite state-input pairs.
These pairs indicate accessible states within the
table, but ones that are not defined across all
potential inputs/transitions.
:return: List[Tuple[State, Word]]
"""
states = [s.label for s in self.states if not s.terminal]
vocab = [w.name for w in self.vocab]
indefinites = list()
for s in states:
for w in vocab:
tmp_s = State(label=s)
tmp_w = Word(name=w)
e = Edge(source=tmp_s, condition=tmp_w, target=tmp_s)
if e not in self.entries:
indefinites.append((tmp_s, tmp_w))
return indefinites
def indefinite_edges(self) -> List[Edge]:
"""
Return a list of indefinite edges. With a dummy
write action (writing the same value as currently)
on the tape before transitioning to a terminal
node.
:return: List[Edge]
"""
edges, indefinites = list(), self.indefinite_states()
labeler = max([s.label for s in self.states])
for indef in indefinites:
labeler += 1
s = State(
label=labeler,
terminal=True,
op_status=1
)
edges.append(
Edge(
source=indef[0],
condition=indef[1],
action=Write(word=indef[1]),
target=s
)
)
return edges
def close_domain(self) -> None:
"""
Compute the domain's closure and add missing
elements to the domain. This primarily deals
with adding all of the indefinite transition
states to the edge set and terminating them
with failure nodes.
:return: None
"""
edges = list(self.indefinite_edges())
edges = list(self.entries) + edges
self.__entries = set(edges)
def rebase(self) -> None:
"""
Rebasing the table reassigns the state
labels into a contiguous listing of integer
labels.
:return: None
"""
states = list()
[states.append(e.source) for e in self.entries]
[states.append(e.target) for e in self.entries]
states = sorted(list(set(states)))
rebased_edges = list()
for edge in self.entries:
n = copy.deepcopy(edge)
n.source.label = states.index(n.source)
n.target.label = states.index(n.target)
rebased_edges.append(n)
self.__entries = rebased_edges
def is_binary(self) -> bool:
"""
Evaluate whether the current table is a
binary table (i.e. it only has transition
conditions and write operations over the
binary vocabulary {0, 1}).
:return: bool
"""
vocab = [w.name for w in list(self.vocab)]
return len(vocab) == 2 and Bit.BINARY_LABEL_1 in vocab \
and Bit.BINARY_LABEL_0 in vocab
def to_binary(self) -> BinaryTable:
"""
Convert the table into a binary table
controller.
:return: BinaryTable
"""
bits = math.ceil(math.log(len(self.states), 2))
sources, targets = list(), list()
controls = list()
for entry in self.entries:
targets.append(
StateSequence(
identity=entry.target.to_binary(label_size=bits),
operation=entry.action.to_binary()
)
)
for entry in self.entries:
found = False
source = StateSequence(
identity=entry.source.to_binary(label_size=bits),
operation=entry.action.to_binary() # just a placeholder
)
condition = entry.condition.to_binary()
target = StateSequence(
identity=entry.target.to_binary(label_size=bits),
operation=entry.action.to_binary()
)
for node in targets:
if node.identity == source.identity:
found = True
source.operation = node.operation
controls.append(
ControlSequence(
source=source,
condition=condition,
target=target
)
)
if not found and source.root:
w = Word(name=condition.values[1].value)
source.operation = Write(word=w).to_binary()
controls.append(
ControlSequence(
source=source,
condition=condition,
target=target
)
)
return BinaryTable(entries=set(controls))
@property
def entries(self) -> Set[Edge]:
"""
:obj:`Set[Edge]` The list of mappings
composing the finite state machine's graph.
Set table entries.
"""
return self.__entries
@property
def states(self) -> Set[State]:
"""
:obj:`Set[State]` The set of states that
are contained within the domain and range
of the table.
"""
states = set()
for entry in self.entries:
states.add(entry.source)
states.add(entry.target)
return states
@property
def vocab(self) -> Set[Word]:
"""
:obj:`Set[Word]` The vocabulary of
words that are conditioned upon for
transitions. This vocabulary may be
a subset of the Tape's vocabulary, but
only trivially so (i.e. in the case where
the tape includes an unused vocab word).
"""
vocab = set()
for entry in self.entries:
vocab.add(entry.condition)
if isinstance(entry.action, Write):
vocab.add(getattr(entry.action, "word"))
return vocab
| dpozorski/TuringMachine | lib/controllers/table/Table.py | Table.py | py | 8,791 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "lib.Controller.Controller",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "lib.controllers.table.Edge.Edge",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "... |
13005074253 | # This file is part of PAINTicle.
#
# PAINTicle is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PAINTicle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PAINTicle. If not, see <http://www.gnu.org/licenses/>.
# The particle painter, that's using the gpu directly
# <pep8 compliant>
from . import simulationstep
from .. import numpyutils
from bpy.props import FloatProperty
import numpy as np
class FrictionStep(simulationstep.SimulationStep):
friction_coefficient: FloatProperty(name="Friction",
description="The friction coefficient (how sticky is the surface).",
min=0.0, soft_max=0.1, default=0.01, options=set())
def simulate(self, sim_data: simulationstep.SimulationData, particles: simulationstep.ParticleData,
forces: simulationstep.Forces, new_particles: simulationstep.ParticleData):
# Friction calculation
# Project forces onto normal vector
# We don't need to divide by the square length of normal, since this is a normalized vector.
unormal = numpyutils.unstructured(particles.normal)
factor = numpyutils.vec_dot(unormal, forces)
ortho_force = unormal * factor[:, np.newaxis]
# The force on the plane is then just simple vector subtraction
plane_force = forces - ortho_force
# factor is the inverse of what we need here, since the normal is pointing to the outside of the surface,
# but friction only applies if force is applied towards the surface. Hence we use (1+x) instead of (1-x)
friction = np.clip(1+self.friction_coefficient*factor/numpyutils.vec_length(plane_force), 0, 1)
return plane_force * friction[:, np.newaxis]
| FrankFirsching/PAINTicle | painticle/sim/frictionstep.py | frictionstep.py | py | 2,201 | python | en | code | 36 | github-code | 1 | [
{
"api_name": "bpy.props.FloatProperty",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "numpy.clip",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",... |
16165070884 | import numpy as np
from enum import Enum
from udacidrone import Drone
import time
visdom_available= True
try:
import visdom
except:
visdom_available = False
class PlaneMode(Enum):
"""
Constant which isn't defined in Mavlink but useful when dealing with
the airplane simulation
"""
SUB_MODE_MANUAL = 1
SUB_MODE_LONGITUDE = 2
SUB_MODE_LATERAL = 3
SUB_MODE_STABILIZED = 4
SUB_MODE_VTOL_ATTITUDE = 9
SUB_MODE_VTOL_POSITION = 10
class Udaciplane(Drone):
"""
Udaciplane class for use with the Unity Fixed Wing/Flying Car simulation
"""
def __init__(self, connection, tlog_name="TLog.txt"):
super().__init__(connection, tlog_name)
def cmd_stabilized(self, roll, altitude, sideslip, airspeed):
"""Command the stabilized mode of the drone
Args:
roll: in radians
altitude: in meters (positive up)
sideslip: in radians (positive nose left)
airspeed: in meters/sec
"""
self.connection.set_sub_mode(PlaneMode.SUB_MODE_STABILIZED.value)
self.connection.cmd_moment(roll, altitude, sideslip, airspeed)
def cmd_longitude_mode(self, elevator, throttle, roll = 0, sideslip = 0,
t=0):
"""Command the longitude mode while lateral is stabilized
Args:
elevator: in percentage of maximum elevator (-1:1)
throttle: in percentage of maximum throttle RPM (0:1)
roll: in radians
sideslip: in radians (positive nose left)
"""
self.connection.set_sub_mode(PlaneMode.SUB_MODE_LONGITUDE.value)
self.connection.cmd_moment(roll, elevator, sideslip, throttle, t)
def cmd_lateral_mode(self, aileron, rudder, altitude, airspeed):
"""Command the lateral mode while longitudinal mode is stabilized
Args:
aileron: in percentage of maximum aileron (-1:1)
rudder: in percentage of maximum rudder (-1:1)
altitude: in meters (positive up)
airspeed: in meters/sec
"""
self.connection.set_sub_mode(PlaneMode.SUB_MODE_LATERAL.value)
self.connection.cmd_moment(aileron, altitude, rudder, airspeed)
def cmd_controls(self, aileron, elevator, rudder, throttle):
"""Command the manual aircraft controls
Args:
aileron: in percentage of maximum aileron (-1:1)
rudder: in percentage of maximum rudder (-1:1)
elevator: in percentage of maximum elevator (-1:1)
throttle: in percentage of maximum throttle RPM (0:1)
"""
self.connection.set_sub_mode(PlaneMode.SUB_MODE_MANUAL.value)
controls = [aileron, elevator, rudder, throttle]
self.connection.cmd_controls(controls)
def cmd_hybrid(self, aileron, elevator, rudder, throttle, roll_moment, pitch_moment, yaw_moment, thrust):
"""Command the manual aircraft controls, the VTOL moments and total thrust force
Args:
aileron: in percentage of maximum aileron (-1:1)
rudder: in percentage of maximum rudder (-1:1)
elevator: in percentage of maximum elevator (-1:1)
throttle: in percentage of maximum throttle RPM (0:1)
roll_moment: in percentage of maximum roll moment (-1:1)
pitch_moment: in percentage of maximum pitch moment (-1:1)
yaw_moment: in percentage of maximum yaw_moment (-1:1)
thrust: in percentage of maximum thrust (0:1)
"""
self.connection.set_sub_mode(PlaneMode.SUB_MODE_MANUAL.value)
controls = [aileron, elevator, rudder, throttle, roll_moment, pitch_moment, yaw_moment , thrust]
self.connection.cmd_controls(controls)
def cmd_moment(self, roll_moment, pitch_moment, yaw_moment, thrust):
"""Command the VTOL moments and total thrust force
Args:
roll_moment: in percentage of maximum roll moment (-1:1)
pitch_moment: in percentage of maximum pitch moment (-1:1)
yaw_moment: in percentage of maximum yaw_moment (-1:1)
thrust: in percentage of maximum thrust (0:1)
"""
self.connection.set_sub_mode(PlaneMode.SUB_MODE_MANUAL.value)
controls = [0.0, 0.0, 0.0, 0.0, roll_moment, pitch_moment, yaw_moment, thrust]
self.connection.cmd_controls(controls)
def cmd_vtol_position(self, north, east, altitude, heading):
"""Command the local position and drone heading.
Args:
north: local north in meters
east: local east in meters
altitude: altitude above ground in meters
heading: drone yaw in radians
"""
self.connection.set_sub_mode(PlaneMode.SUB_MODE_VTOL_POSITION.value)
self.cmd_position(north, east, altitude, heading)
def cmd_vtol_attitude(self,roll, pitch, yaw_rate, vert_vel):
"""Command the drone through attitude command
Args:
roll: in radians
pitch: in randians
yaw_rate: in radians/second
vert_vel: upward velocity in meters/second
"""
self.connection.set_sub_mode(PlaneMode.SUB_MODE_VTOL_ATTITUDE.value)
self.cmd_attitude(roll, pitch, yaw_rate, vert_vel)
| telmo-correa/FCND-FixedWing | plane_drone.py | plane_drone.py | py | 5,420 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "enum.Enum",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "udacidrone.Drone",
"line_number": 24,
"usage_type": "name"
}
] |
43618923048 | import asyncio
import sys
from room import ChatRoom
def main(argv):
name = argv[1] if len(argv) >= 2 else "AChat"
port = int(argv[2]) if len(argv) >= 3 else 9999
loop = asyncio.get_event_loop()
chat_room = ChatRoom(name, port, loop)
server = chat_room.run()
loop.run_forever()
if __name__ == '__main__':
main(sys.argv)
| Nef1k/AsyncChat | main.py | main.py | py | 354 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "asyncio.get_event_loop",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "room.ChatRoom",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 19,
"usage_type": "attribute"
}
] |
12533484804 | #!/usr/bin/env python3
"""
Base64 encode an image and output the element based on the specified format.
Usage:
Base64_encode.py [options] [image]
"""
import argparse
import base64
import sys
import tempfile
from urllib.parse import urlparse
import requests
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("image", nargs="?", help="URL, or path to image file")
parser.add_argument("--html", action="store_true", help="output HTML")
parser.add_argument("--markdown", action="store_true", help="output Markdown")
parser.add_argument("--json", action="store_true", help="output JSON")
args = parser.parse_args()
image_formats = {
"png": "image/png",
"jpg": "image/jpeg",
"jpeg": "image/jpeg",
"gif": "image/gif",
"svg": "image/svg+xml",
}
# if the image is a URL, validate and download it to a temporary file
def download_image(image):
"""Download the image to a temporary file."""
# Parse the URL and remove trailing slashes
url = urlparse(image)
url = url._replace(path=url.path.rstrip("/"))
# Recreate the URL string from the parsed URL object
image = url.geturl()
if url.scheme in ["http", "https"]:
response = requests.get(image, stream=True, timeout=5)
response.raise_for_status()
# Extract image format from the URL
image_format = get_image_format(image)
if image_format not in image_formats:
print(f"Error: The format '{image_format}' is not supported.")
return None, None
# Create a temporary file and write the image to the temporary file
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
for chunk in response.iter_content(chunk_size=8192):
if chunk: # Filter out keep-alive new chunks
temp_file.write(chunk)
temp_file_path = temp_file.name # Get the path of the temporary file
return temp_file_path, image_format
# If not an image, return the original path and the format
image_format = get_image_format(image)
if image_format not in image_formats:
print(f"Error: The format '{image_format}' is not supported.")
return None, None
return image, image_format
def get_image_format(path):
"""Get the image format from the file extension."""
path = urlparse(path).path # Extract the path from the URL
filename = path.split("/")[-1] # Get the filename from the URL path
return filename.split(".")[-1].lower()
def get_image_data_url(image_data, mime_type):
"""Get the image data URL."""
# mime_type = image_formats[image_format]
encoded_string = base64.b64encode(image_data).decode("utf-8")
data_url = f"data:{mime_type};base64,{encoded_string}"
return data_url
def output_html(img_element):
"""Output the HTML."""
return f'<img src="{img_element}">'
def output_markdown(img_element):
"""Output the Markdown."""
return f""
def output_json(img_element):
"""Output the JSON."""
return f'{{"image": "{img_element}"}}'
def main():
"""Read from a file or stdin and output the encoded string."""
if args.image:
try:
image_path, image_format = download_image(args.image)
with open(image_path, "rb") as image:
image_data = image.read()
mime_type = image_formats[image_format]
except FileNotFoundError:
print(f"Error: The file '{args.image}' was not found.")
return
except PermissionError:
print(
f"Error: Permission denied when trying to open the file '{args.image}'."
)
return
else:
image_data = sys.stdin.buffer.read()
mime_type = "image/png"
data_url = get_image_data_url(image_data, mime_type)
if args.html:
print(output_html(data_url))
elif args.markdown:
print(output_markdown(data_url))
elif args.json:
print(output_json(data_url))
else:
print("No output format specified. Use --html, --markdown, or --json.")
if __name__ == "__main__":
main()
| bblinder/home-brews | base64_encode.py | base64_encode.py | py | 4,167 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlparse",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tempfile.N... |
9152596133 | import pytest
from django.test import RequestFactory
from mixer.backend.django import mixer
from apps.core.views import ResubscribeView
pytestmark = pytest.mark.django_db
class TestResubscribe:
def test_auth_resubscribe_with_payments(self):
profile = mixer.blend('core.Profile')
mixer.blend('core.Payment', profile=profile)
request = RequestFactory().get('/payment')
request.user = profile.user
response = ResubscribeView.as_view()(request)
assert response.status_code == 200
response.render()
assert 'button_paypal' in response.content.decode('utf-8')
| oadiazp/erpsomosmas | apps/core/tests/test_views/test_resubscribe.py | test_resubscribe.py | py | 628 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pytest.mark",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "mixer.backend.django.mixer.blend",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "mixer.backend.django.mixer",
"line_number": 12,
"usage_type": "name"
},
{
"api_n... |
4365606556 | import logging
import os
import re
import signal
import sys
from typing import Callable, List, TYPE_CHECKING, Union
import dill as pickle
from oletools.olevba import VBA_Parser
from oletools.thirdparty.oledump.plugin_biff import cBIFF
from symbexcel.excel_wrapper import ExcelWrapper, parse_excel_doc
from .boundsheet import Cell
from .state import State
if TYPE_CHECKING:
pass
log = logging.getLogger(__name__)
# define useful type shortcuts
Stash = List[State]
class SimulationManager:
def __init__(self, excel_doc=None, filename=None, com=None, nocache=None, keep_predecessors=0,
enable_delegations=False, default_handlers=False, check_symbolic_args=True):
"""
:param excel_doc: The excel document that you want to analyse
"""
if not excel_doc and ExcelWrapper.get_file_type(filename) is None:
raise RuntimeError('The sample has an invalid filetype, aborting')
self.excel_doc = excel_doc or parse_excel_doc(filename, com, nocache)
self.MAX_INSNS = 1000000
self.sha1 = self.excel_doc.sha1
self.vba_code = ''
self.dconn = dict()
self.dconn_cells = list()
self.enable_delegations = enable_delegations
self.default_handlers = default_handlers
self.check_symbolic_args = check_symbolic_args
self.keep_predecessors = keep_predecessors
self.insns_count = 0
self.symbolic = False
self._halt = False
self.error = list()
# create an XLM parser
self.xlm_parser = ExcelWrapper.get_parser()
# initialize empty stashes
self._stashes = {
'active': [],
'deadended': [],
'found': [],
'pruned': []
}
sheets = self.excel_doc.get_sheets()
entrypoints = self.excel_doc.get_entrypoints()
defined_names = self.excel_doc.get_defined_names()
log.debug(f'Defined names: {defined_names}')
_vba_run_cell_regex_str = r'Application\.Run Sheets\(\"(?P<sheet>.*?)\"\)\.Range\(\"(?P<cell>.*?)\"\)'
_vba_run_cell_regex = re.compile(_vba_run_cell_regex_str)
_vba_run_name_regex_str = r'Application\.Run \(\"(?P<name>.*?)\"\)'
_vba_run_name_regex = re.compile(_vba_run_name_regex_str)
try:
vbaparser = VBA_Parser(filename)
# try to parse entrypoints from VBA code
vba_code = vbaparser.get_vba_code_all_modules()
for i, (sheet, cell_str) in enumerate(_vba_run_cell_regex.findall(vba_code)):
entrypoints += [(f'vba_run_cell_{i}', sheets[sheet][cell_str])]
for i, name in enumerate(_vba_run_name_regex.findall(vba_code)):
entrypoints += [(f'vba_run_name_{i}', defined_names[name.lower()])]
# parse DCONN
for excel_stream in ('Workbook', 'Book'):
if vbaparser.ole_file.exists(excel_stream):
data = vbaparser.ole_file.openstream(excel_stream).read()
biff_plugin = cBIFF(name=[excel_stream], stream=data, options='-o DCONN -s')
conn = biff_plugin.Analyze()
if conn:
self.dconn[conn[-1].strip().lower()] = conn[-2]
except:
self.set_error('OleVBA parsing failed')
if len(entrypoints) == 0:
self.set_error('Entrypoint(s) not found!')
return
if self.dconn:
print(f'DCONN entries: {self.dconn}')
# Create initial states.
for name, cell in entrypoints:
if not isinstance(cell, Cell):
log.warning('Skipping invalid entry point: %s %s' % (name, cell))
continue
log.info(f'Entry point {name}: "{cell.a1}"')
state = State(simgr=self, curr_cell=cell, memory=sheets)
self.active.append(state)
def set_error(self, s):
log.error(f'[ERROR] {s}')
self.error += [s]
def __getstate__(self):
state = dict(self.__dict__)
# del state['excel_doc']
del state['xlm_parser']
return state
def __setstate__(self, state):
self.__dict__ = state
self.xlm_parser = ExcelWrapper.get_parser()
@property
def states(self) -> Union[State, None]:
"""
:return: All the states
"""
return sum(self._stashes.values(), [])
@property
def active(self) -> Stash:
"""
:return: Active stash
"""
return self._stashes['active']
@property
def deadended(self) -> Stash:
"""
:return: Deadended stash
"""
return self._stashes['deadended']
@property
def found(self) -> Stash:
"""
:return: Found stash
"""
return self._stashes['found']
@property
def one_active(self) -> Union[State, None]:
"""
:return: First element of the active stash, or None if the stash is empty
"""
if len(self._stashes['active']) > 0:
return self._stashes['active'][0]
else:
return None
@property
def one_deadended(self) -> Union[State, None]:
"""
:return: First element of the deadended stash, or None if the stash is empty
"""
if len(self._stashes['deadended']) > 0:
return self._stashes['deadended'][0]
else:
return None
@property
def one_found(self) -> Union[State, None]:
"""
:return: First element of the found stash, or None if the stash is empty
"""
if len(self._stashes['found']) > 0:
return self._stashes['found'][0]
else:
return None
def halt(self, signum, frame):
log.error(f'[TIMEOUT] Simulation manager for {self.sha1} timed out')
for state in self.states:
state.halt = True
state.error = 'TimeoutError'
self.set_error("TIMEOUT")
self._halt = True
def move(self, from_stash: str, to_stash: str, filter_func: Callable[[State], bool] = lambda s: True) -> None:
"""
Move all the states that meet the filter_func condition from from_stash to to_stash
:param from_stash: Source stash
:param to_stash: Destination Stash
:param filter_func: A function that discriminates what states should be moved
:return: None
"""
for s in list(self._stashes[from_stash]):
if filter_func(s):
self._stashes[from_stash].remove(s)
self._stashes[to_stash].append(s)
def step(self, n: int = 1) -> None:
"""
Perform n steps (default is 1), after each step move all the halted states to the deadended stash
:param n: Number of steps
:return: None
"""
for _ in range(n):
for state in list(self.active):
try:
state.step()
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
log.exception('Something went wrong during the deobfuscation.')
self.set_error(f'{exc_type.__name__} at {fname}:{exc_tb.tb_lineno}')
state.error = e.__class__
self.move(from_stash='active', to_stash='deadended', filter_func=lambda s: s.halt or s.error)
def run(self, find: Callable[[State], bool] = lambda s: False, checkpoint=None, timeout=0) -> None:
"""
Run the simulation manager, until the `find` condition is met. The analysis will stop when there are no more
active states or some states met the `find` condition (these will be moved to the found stash)
example: simgr.run(find=lambda s: '=ALERT' in s.formula)
:param find: Function that will be called after each step. The matching states will be moved to the found stash
:param timeout: Max running time, in seconds
:return: None
"""
# handle timeout
signal.signal(signal.SIGALRM, self.halt)
signal.alarm(timeout)
try:
while len(self.active) > 0 and len(self.found) == 0 and not self._halt:
if checkpoint and self.insns_count == checkpoint:
with open(f'/tmp/symbexcel.{self.sha1}.checkpoint.{checkpoint}', 'wb') as f:
pickle.dump(self, f)
self.move(from_stash='active', to_stash='found', filter_func=find)
self.step()
self.insns_count += 1
if self.insns_count >= self.MAX_INSNS:
log.error(f"Exceeded MAX_INSNS ({self.MAX_INSNS})")
self.set_error(f"Exceeded MAX_INSNS ({self.MAX_INSNS})")
self.move(from_stash='active', to_stash='pruned')
self._halt = True
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
log.exception(f'Exception while stepping the Simulation Manager')
self.set_error(f'{exc_type.__name__} at {fname}:{exc_tb.tb_lineno}')
finally:
signal.alarm(0)
if checkpoint == -1:
with open(f'/tmp/symbexcel.{self.sha1}.checkpoint.{checkpoint}', 'wb') as f:
pickle.dump(self, f)
def __str__(self) -> str:
stashes_str = [f'{len(stash)} {stash_name}' # {[s for s in stash]}'
for stash_name, stash in self._stashes.items() if len(stash)]
errored_count = len([s for stash_name, stash in self._stashes.items() if len(stash) for s in stash if s.error])
stashes_str += [f'({errored_count} errored)']
return f'<SimulationManager[{self.insns_count}] with {", ".join(stashes_str)}>'
def __repr__(self) -> str:
return self.__str__()
| ucsb-seclab/symbexcel | symbexcel/simulation_manager.py | simulation_manager.py | py | 10,078 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "state.State",
... |
32212352055 | import purchase
import utils.queries as queries
#Function that will take user input on how to order our clothing articles (for print-out)
#After it calls orderBy, this function will call purchase if input is valid. Input will be the clothing ID of the article of clothing you want to buy
def viewAndPlace(connection, custID):
customerInteracting = True
while customerInteracting == True:
user_input = raw_input("Order by: ID No (0), Name (1), Type (2), Season (3), Price (4), Material (5)\n")
myDict={
"0":"ClothingID",
"1":"Name",
"2":"Type",
"3":"Season",
"4":"Price",
"5":"Material"
}
val = myDict.get(user_input,"back")
if val == "back":
return
else:
if not queries.printClothesInOrder(connection,val):
return
isValid = False
while isValid == False:
user_input = raw_input("\nTo purchase an item, input its ID No. Type 'back' to go back to your action screen\n")
if user_input == "back":
return
try:
user_input = int(user_input)
isValid = True
except:
print("That's an invalid id")
article = queries.selectArticle(connection,user_input)
if article == None:
print("Could not select clothing item to purchase")
else:
purchase.purchase(connection, custID, article[0])
| jcprice12/PythonDB | prompts/browse.py | browse.py | py | 1,287 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils.queries.printClothesInOrder",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "utils.queries",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "utils.queries.selectArticle",
"line_number": 37,
"usage_type": "call"
},
{
"api_n... |
36364918583 | #!/usr/bin/env python
__author__ = 'danielcarlin'
import pandas
import scipy.stats
import numpy.random
from optparse import OptionParser
from theano_maskedRBM import makeMatricesAgree
import operator
from math import fabs
def corr_matrices(data_in,rbm_out,spearman=True):
"""Take two matrices and return the correlation of their corresponding columns. Defaults to Spearman."""
entities_x=list(data_in.columns.values)[1:]
sample_names=list(data_in[data_in.columns[0]])
entities_out=list(rbm_out.columns.values)[1:]
out_names=list(rbm_out[rbm_out.columns[0]])
[m1,m2,entities_agree]=makeMatricesAgree(rbm_out,entities_out,data_in,entities_x)
spr={}
null_model={}
for i in xrange(m1.shape[1]):
if spearman:
spr[i]=scipy.stats.spearmanr(m1[:,i],m2[:,i])
null_model[i]=scipy.stats.spearmanr(m1[:,i],m2[:,numpy.random.randint(low=0,high=m2.shape[1])])
else:
spr[i]=scipy.stats.pearsonr(m1[:,i],m2[:,i])
null_model[i]=scipy.stats.pearsonr(m1[:,i],m2[:,numpy.random.randint(low=0,high=m2.shape[1])])
return spr,entities_agree, null_model
def write_regulators_table(rbm_w,table_file='targets_table.txt',giveN=5):
"""Outputs a table of top N targets for each TF"""
fh=open(table_file,'w')
for tf in list(rbm_w.columns.values)[1:]:
targets=list(rbm_w.loc[rbm_w[tf] !=0][rbm_w.columns[0]])
weights=list(rbm_w.loc[rbm_w[tf] !=0][tf])
to_sort=zip(targets,weights)
sorted_values = sorted(to_sort, key=lambda target:fabs(target[1]),reverse=True)
sorted_targets=[l[0] for l in sorted_values[0:giveN]]
sorted_weights=[str(l[1]) for l in sorted_values[0:giveN]]
fh.write(tf+'\t'+','.join(sorted_targets)+'\t'+','.join(sorted_weights)+'\n')
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-d", "--data", dest="train_data_file", action="store", type="string", default='/Users/danielcarlin/projects/regulator_RBM/test_data/all_data.tab',
help="File containining a samples (rows) by genes (columns), tab delimited data")
parser.add_option('-r', "--rbm-output", dest="rbm_output_file", action="store", type="string", default='output.txt',help ="output file of hidden layer probabilities")
parser.add_option('-w',"--rbm-weights",dest="rbm_weights_file",action="store",type="string",default=None,help="weights composing the hidden layer ofr the RBM")
parser.add_option('-c', "--correlation-file", dest="corr_file", action='store', type='string', default=None, help="file for correlation between expression and regulon")
parser.add_option('-n', "--null-model", dest="null_file", action='store', type='string', default=None, help="file for null model output")
parser.add_option('-p', "--pearson", dest="pearson", action='store_true', default=False, help="Pearson rather than Spearman correlation")
parser.add_option('-t','--target-table', dest="target_table",default=None, help="table for learned targets")
(opts, args) = parser.parse_args()
data_in = pandas.read_table(opts.train_data_file)
rbm_out = pandas.read_table(opts.rbm_output_file)
if opts.pearson:
spr,ent,nm=corr_matrices(data_in,rbm_out,spearman=False)
else:
spr,ent,nm=corr_matrices(data_in,rbm_out)
fh=open(opts.corr_file,'w')
for k in spr.keys():
fh.write(ent[k]+'\t'+str(spr[k][0])+'\t'+str(spr[k][1])+'\n')
fh.close()
fh2=open(opts.null_file,'w')
for k in spr.keys():
fh2.write(ent[k]+'\t'+str(nm[k][0])+'\t'+str(nm[k][1])+'\n')
fh2.close()
if opts.target_table is not None:
rbm_w = pandas.read_table(opts.rbm_weights_file)
write_regulators_table(rbm_w,table_file=opts.target_table,giveN=5)
| decarlin/RIGGLE | scripts/post_rbm_analysis.py | post_rbm_analysis.py | py | 3,800 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "theano_maskedRBM.makeMatricesAgree",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "scipy.stats.stats.spearmanr",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "scipy.stats.stats",
"line_number": 27,
"usage_type": "attribute"
},
{
... |
72165751073 | import os
import numpy as np
import cv2
import classifier
from sklearn.model_selection import train_test_split
from modelevaluation import load_rep_images
import matplotlib.pyplot as plt
# set constants
args = {
"images_per_category": 10000,
"num_categories": 43,
"testing_data_directory": "gtsrb-testing",
"training_data_directory": "gtsrb-training",
"epochs": 10
}
# set filenames for training and testing datasets
sm_filenames = {
"x_train": "smx_train",
"y_train": "smy_train",
"x_test": "smx_test",
"y_test": "smy_test"
}
lg_filenames = {
"x_train": "x_train",
"y_train": "y_train",
"x_test": "x_test",
"y_test": "y_test"
}
train_test_filenames = lg_filenames
# Set constants for CLAHE parameter ranges
grid_sizes = [2, 4, 8]
clip_limits = [5, 10, 20, 30, 40]
ids = ['gray', 'gray_eq', 'lab', 'lab_eq', 'orig']
TEST_SIZE = 0.20 # Allows user to modify proportion of data set
############################################################################
def load_split_save_images(
training_dir,
num_categories,
images_per_category,
tt_filenames):
"""
Takes as input the training directory, the number of categories, number of
images per category, and a dictionary tt_filenames specifying how to label
the saved training/testing sets.
This method will load the specified images from the training directory,
then split them into training and testing sets, then save the training
and testing sets to files in a directory called 'presplitimages' with
filenames specified by the last input parameter.
This method should be run ONE TIME for a specified set of models. All models
can then be trained and tested on the same data.
"""
# load training data
images, labels = load_training_data(
training_dir,
num_categories,
images_per_category
)
# Split data into training and testing sets
labels = tf.keras.utils.to_categorical(labels)
x_train, x_test, y_train, y_test = train_test_split(
np.array(images), np.array(labels), test_size=TEST_SIZE
)
# Save training and testing sets to files
np.save(f"{os.path.join('presplitimages',tt_filenames['x_train'])}.npy", x_train)
np.save(f"{os.path.join('presplitimages',tt_filenames['y_train'])}.npy", y_train)
np.save(f"{os.path.join('presplitimages',tt_filenames['x_test'])}.npy", x_test)
np.save(f"{os.path.join('presplitimages',tt_filenames['y_test'])}.npy", y_test)
return x_train, x_test, y_train, y_test
def load_presplit_images(tt_filenames):
"""
Loads a saved set of testing and training images from the directory
'presplitimages/'. This is so we
can use the same training/testing sets on many models.
"""
x_train = np.load(f"{os.path.join('presplitimages',tt_filenames['x_train'])}.npy")
x_test = np.load(f"{os.path.join('presplitimages',tt_filenames['x_test'])}.npy")
y_train = np.load(f"{os.path.join('presplitimages',tt_filenames['y_train'])}.npy")
y_test = np.load(f"{os.path.join('presplitimages',tt_filenames['y_test'])}.npy")
return x_train, x_test, y_train, y_test
def get_mod_images(x_train_orig, x_test_orig, im_type, cl = None):
"""
Returns two lists of modified images, modified according to
im_type (which can be 'orig', 'gray', 'gray_eq', 'lab', or 'lab_eq')
and cl which can specify the parameters for a CLAHE filter.
cl, if specified should have the form (grid_size, clip_limit).
"""
x_train, x_test = [], []
# If im_type is orig, do not modify
if im_type == 'orig':
return x_train_orig, x_test_orig
# Set the filter to be applied according to im_type
if im_type[:4] == 'gray':
filt = cv2.COLOR_BGR2GRAY
elif im_type[:3] == 'lab':
filt = cv2.COLOR_BGR2LAB
# Apply the filter
for img in x_train_orig:
x_train.append(cv2.cvtColor(img, filt))
for img in x_test_orig:
x_test.append(cv2.cvtColor(img, filt))
# If images should be equalized, apply histogram
# appropriately
if im_type == "gray_eq":
x_train = [cv2.equalizeHist(img) for img in x_train]
x_test = [cv2.equalizeHist(img) for img in x_test]
if im_type == "lab_eq":
for i in range(len(x_train)):
lab_planes = cv2.split(x_train[i])
lab_planes[0] = cv2.equalizeHist(lab_planes[0])
x_train[i] = cv2.merge(lab_planes)
for i in range(len(x_test)):
lab_planes = cv2.split(x_test[i])
lab_planes[0] = cv2.equalizeHist(lab_planes[0])
x_test[i] = cv2.merge(lab_planes)
# Convert to numpy ndarrays, then reshape if necessary
x_train, x_test = np.array(x_train), np.array(x_test)
# grayscale images will now have the incorrect shape
# as ndarrays, so reshape them
dim = len(x_train[0].shape)
if dim == 2:
width, height = x_train.shape[1], x_train.shape[2]
x_train = x_train.reshape(x_train.shape[0],width, height, 1)
x_test = x_test.reshape(x_test.shape[0],width, height, 1)
# Return modified images, or apply CLAHE filter as appropriate
if not cl:
return x_train, x_test
else:
grid_size, clip_limit = cl[0], cl[1]
cl_obj = cv2.createCLAHE(
clipLimit = clip_limit,
tileGridSize = (grid_size, grid_size)
)
if im_type[:4] == "gray":
for coll in [x_train, x_test]:
for i in range(len(coll)):
img = coll[i]
mod_img = cl_obj.apply(img)
mod_img = mod_img.reshape(mod_img.shape[0], mod_img.shape[1], 1)
coll[i] = mod_img
elif im_type[:3] == "lab":
for coll in [x_train, x_test]:
for i in range(len(coll)):
img = coll[i]
lab_planes = cv2.split(img)
lab_planes[0] = cl_obj.apply(lab_planes[0])
mod_img = cv2.merge(lab_planes)
coll[i] = mod_img
return x_train, x_test
def make_train_evaluate_model(
x_train_orig,
x_test_orig,
y_train,
y_test,
im_type,
cl = None):
"""
Makes a model, modifies the training/testing images according to im_type, then
trains and evaluates the model on the specified training/testing sets.
im_type can be any of 'orig', 'gray', 'lab', 'gray_eq', 'lab_eq'
Returns a dictionary whose keys are {'training_loss', 'training_acc',
'testing_loss', 'testing_acc'}
"""
ch = 1 if im_type[:4] == 'gray' else 3
x_train, x_test = get_mod_images(x_train_orig, x_test_orig, im_type, cl)
model = get_model(None, num_categories = args["num_categories"], channels = ch)
model.fit(x_train, y_train, epochs = args['epochs'], verbose = 2)
print(f"done training {im_type} model, with {cl if cl else 'no'} CLAHE filter")
model_res = dict()
res = model.evaluate(x_train, y_train, verbose = 0)
model_res['training_loss'] = res[0]
model_res['training_acc'] = res[1]
res = model.evaluate(x_test, y_test, verbose = 0)
model_res['testing_loss'] = res[0]
model_res['testing_acc'] = res[1]
return model_res
def show_hist_results(results, ids):
"""
Shows a histogram of the results from the initial comparison of models
trained on the same set of images, each with a different filter applied.
The filters are {'orig', 'gray', 'gray_eq', 'lab', 'lab_eq'}
"""
labels = ids
tr_acc = [results[name]['training_acc'] for name in labels]
te_acc = [results[name]['testing_acc'] for name in labels]
x = np.arange(len(labels))
width = 0.35
fig, ax = plt.subplots()
rects1 = ax.bar(x-width/2, tr_acc, width, label = "tr acc.")
rects2 = ax.bar(x+width/2, te_acc, width, label = "te acc.")
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Accuracy')
ax.set_title('Accuracy by image type on training/testing sets')
ax.set_xticks(x)
ax.set_xticklabels([label[1:] for label in labels])
ax.legend()
ax.bar_label(rects1, padding=3)
ax.bar_label(rects2, padding=3)
fig.tight_layout()
plt.ylim(.8, 1)
plt.show()
def show_CLAHE_3d_bar(results, im_type, cutoff = .8):
"""
Makes a 3d bar plot for the results of the models trained on image
sets with CLAHE filters applied to `im_type` images. `im_type` can
be {gray, gray_eq, lab, lab_eq}.
Cutoff specifies a lower cutoff for the z-axis.
"""
fig = plt.figure(figsize=(8, 3))
ax1 = fig.add_subplot(121, projection='3d')
ax2 = fig.add_subplot(122, projection='3d')
# arange x and y values
_x = np.arange(5)
_y = np.arange(3)
_xx, _yy = np.meshgrid(_x, _y)
x, y = _xx.ravel(), _yy.ravel()
xtoCL = {a:clip for a,clip in zip(_x, clip_limits)}
ytoGS = {b:size for b,size in zip(_y, grid_sizes)}
top1 = [results[(im_type, ytoGS[b], xtoCL[a])]['training_acc'] - cutoff
for a,b in zip(x,y)]
top2 = [results[(im_type, ytoGS[b], xtoCL[a])]['testing_acc'] - cutoff
for a,b in zip(x,y)]
bottom = [cutoff for a in x]
width = depth = 1
ax1.bar3d(x, y, bottom, width, depth, top1, shade=True)
ax1.set_title('Training Accuracy')
ax1.set_xlabel('Clip Limit')
ax1.set_xticks(_x)
ax1.set_xticklabels(clip_limits)
ax1.set_yticks(_y)
ax1.set_yticklabels(grid_sizes)
ax1.set_ylabel('Grid Size')
ax1.set_zlim(cutoff,1)
ax2.bar3d(x, y, bottom, width, depth, top2, shade=True)
ax2.set_title('Testing Accuracy')
ax2.set_xlabel('Clip Limit')
ax1.set_xticks(_x)
ax1.set_xticklabels(clip_limits)
ax1.set_yticks(_y)
ax1.set_yticklabels(grid_sizes)
ax2.set_ylabel('Grid Size')
ax2.set_zlim(cutoff,1)
plt.show()
def compare_model_avgs(results, dim, vals):
"""
Given an axis, `dim`, across which to compare (should be one of {'im_type',
'clip_limit', 'grid_size'), and a list of values within that axis, this
function returns a pair of dictionaries, one for testing results and
another for training results. Each of the dictionaries has `vals` as its
keys. The value of dict[key] for any given key is the average performance
of all the models trained on that type of image (with various CLAHE filters).
"""
dim_dict = {'im_type':0, 'grid_size': 1, 'clip_limit': 2}
teres = {val: [] for val in vals}
trres = {val: [] for val in vals}
for key, val in results.items():
if key[dim_dict[dim]] in vals:
teres[key[dim_dict[dim]]].append(val['testing_acc'])
trres[key[dim_dict[dim]]].append(val['training_acc'])
for thing in vals:
teres[thing] = sum(teres[thing])/len(teres[thing])
trres[thing] = sum(trres[thing])/len(trres[thing])
return teres, trres
############################################################################
# Load images, train_test_split them
# x_train, x_test, y_train, y_test = load_split_save_images(
# args["training_data_directory"],
# args["num_categories"],
# args["images_per_category"],
# train_test_filenames
# )
# load saved training/testing sets
# x_train_orig, x_test_orig, y_train_orig, y_test_orig = load_presplit_images(
# train_test_filenames)
# y_train, y_test = y_train_orig, y_test_orig
# We need a bunch of models, one for each triple (clip, grid, im_type)
# For each triple, we will modify the training and testing sets
# to conform to the image type, then make and train a model, then evaluate
# the model. Finally, we save the evaluation data to a dictionary.
# The dictionary is saved to a file.
# Make, train, and evaluate models without CLAHE filters applied
# results = dict()
# for im_type in ids:
# print(f"model {im_type}")
# results[im_type] = make_train_evaluate_model(
# x_train_orig,
# x_test_orig,
# y_train,
# y_test,
# im_type
# )
# for im_type in ids[:-1]:
# for grid_size in grid_sizes:
# for clip_limit in clip_limits:
# print(f"Model {im_type} with CLAHE filter: clip = {clip_limit}, grid = {grid_size}")
# results[(im_type, grid_size, clip_limit)] = make_train_evaluate_model(
# x_train_orig,
# x_test_orig,
# y_train,
# y_test,
# im_type,
# (grid_size, clip_limit)
# )
# Show histogram of results for model accuracy on models without
# CLAHE filters
show_hist_results(results, ids)
# Show a 3d bar plot comparing performance of all CLAHE operators applied within a
# specified image type
show_CLAHE_3d_bar(results, 'lab_eq')
show_CLAHE_3d_bar(results, 'gray')
# Based on this analysis, the model that performs optimally does so on equalized lab images and uses a CLAHE filter with grid_size = 8 and clip_limit = 10.
| drwiggle/GTSRB-CNN | imgenhmodeltesting.py | imgenhmodeltesting.py | py | 13,101 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.pa... |
74872598754 | """SQLAlchemy one-to-many relationship with multiple foreign keys.
https://avacariu.me/writing/2019/composite-foreign-keys-and-many-to-many-relationships-in-sqlalchemy
"""
from pathlib import Path
from typing import List
from sqlalchemy import (
Column,
ForeignKey,
ForeignKeyConstraint,
Integer,
String,
Table,
create_engine,
select,
)
from sqlalchemy.orm import Session, declarative_base, relationship
Base = declarative_base()
class Author(Base):
"""An author is identified by their userid and username."""
__tablename__ = "author"
userid = Column(Integer, primary_key=True)
username = Column(String(30), primary_key=True)
recipes = relationship(
"Recipe",
back_populates="author",
# foreign_keys=[userid, username],
# primaryjoin="and_(Recipe.author_userid==Author.userid, Recipe.author_username==Author.username)",
)
class Recipe(Base):
"""A recipe is identified by its id and title."""
__tablename__ = "recipe"
id = Column(Integer, primary_key=True)
title = Column(String(30))
# author_userid = Column(Integer, ForeignKey("author.userid"), nullable=False)
# author_username = Column(String(30), ForeignKey("author.username"), nullable=False)
author_userid = Column(Integer, nullable=False)
author_username = Column(String(30), nullable=False)
author = relationship(
"Author",
back_populates="recipes",
# primaryjoin="and_(Recipe.author_userid==Author.userid, Recipe.author_username==Author.username)",
# foreign_keys=[author_userid, author_username],
)
# this produces
# FOREIGN KEY(author_userid, author_username) REFERENCES author (userid, username)
# which I kinda like more than the double references
# FOREIGN KEY(author_userid) REFERENCES author (userid),
# FOREIGN KEY(author_username) REFERENCES author (username)
__table_args__ = (
ForeignKeyConstraint(
["author_userid", "author_username"],
["author.userid", "author.username"],
),
)
# Create the database and engine
sqlite_file_name = "local_o2m_sa_dup.db"
sqlite_fp = Path(sqlite_file_name)
if sqlite_fp.exists():
sqlite_fp.unlink()
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables() -> None:
"""Set up the database and tables."""
Base.metadata.create_all(engine)
def create_and_select() -> None:
with Session(engine) as session:
author = Author(userid=1, username="spongebob")
recipe = Recipe(id=1, title="Krabby Patty", author=author)
session.add(recipe)
session.commit()
print(f"\nDONE\n")
print(f"\nADDED RECIPE: {recipe.title}")
def main() -> None:
"""Main function."""
create_db_and_tables()
create_and_select()
if __name__ == "__main__":
main()
| Pitrified/recipinator | backend/be/notebooks/relation/one_to_many_sa_dup.py | one_to_many_sa_dup.py | py | 2,922 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlalchemy.orm.declarative_base",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name... |
20681551463 | """ Perform quick baseline benchmarck based on bag of words
for sentiment analysis
Author: Pham Quang Nhat Minh (FTRI)
"""
import os
import sys
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn import metrics
from sklearn import cross_validation
from sklearn.pipeline import Pipeline
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.neighbors import KNeighborsClassifier
def get_w(w, tag):
if tag:
return w.split('/')[0]
else:
return w
if __name__ == '__main__':
os.system('clear')
tag = False
# use raw data
# datadir = './data/SA2016-training_data'
# use data with word segmentation
datadir = './data/SA2016-training-data-ws'
# filenames = ['SA-training_positive.txt',
# 'SA-training_negative.txt',
# 'SA-training_neutral.txt',
# ]
filenames = [
'train_positive_tokenized.txt',
'train_negative_tokenized.txt',
'train_neutral_tokenized.txt',
]
#label_codes = ['pos', 'neg']
label_codes = ['pos', 'neg', 'neutral']
print("******** Use binary features ********")
sentences = []
labels = []
for i, filename in enumerate(filenames):
path = os.path.join(datadir, filename)
label = label_codes[i]
f = open(path, 'r')
for line in f:
line = line.rstrip()
if line == '':
continue
words = [ get_w(w, tag) for w in line.split()]
sentences.append( ' '.join( words ) )
labels.append(label)
y = np.array(labels)
# count_vect = CountVectorizer( ngram_range = (1,3), binary=True )
count_vect = CountVectorizer( binary=True )
X_binary = count_vect.fit_transform( sentences )
models = [
LinearSVC(),
RandomForestClassifier(n_estimators=100, max_depth=None,
min_samples_split=1, random_state=0),
]
model_names = [
'Linear SVM',
'Random Forest',
]
for clf, mdname in zip(models, model_names):
print('== Use %s method ==' % mdname)
X = X_binary
if mdname == 'Gradient Boosting Trees':
X = X_binary.toarray()
predicted = cross_validation.cross_val_predict(clf, X, y, cv=10)
print(metrics.classification_report(y, predicted))
print
print
print("******** Use TF-IDF weighting **********")
# count_vect = CountVectorizer(ngram_range = (1,3))
count_vect = CountVectorizer()
X_count = count_vect.fit_transform( sentences )
tfidf_transformer = TfidfTransformer()
X_tfidf = tfidf_transformer.fit_transform( X_count )
for clf, mdname in zip(models, model_names):
print('== Use %s method ==' % mdname)
X = X_tfidf
predicted = cross_validation.cross_val_predict(clf, X, y, cv=10)
print(metrics.classification_report(y, predicted))
print
| minhpqn/sentiment_analysis_vlsp_2016 | bow_baseline.py | bow_baseline.py | py | 3,409 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.system",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": ... |
1876221377 | from datetime import datetime
import django
import os
import sys
# Required for models to load
project_root = os.path.abspath(os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..', 'network'))
sys.path.insert(0, project_root)
os.environ['DJANGO_SETTINGS_MODULE'] = 'project4.settings'
django.setup()
from network.models import User, Post, Following
from network.models import EST
admin = User.objects.create_superuser('admin', 'admin@example.com', '123')
user1 = User.objects.create_user(
first_name='Carlos',
last_name='Sainz',
username="SmoothOperator",
email="user1@example.com",
password="123")
user1.save()
user2 = User.objects.create_user(
first_name='Conor',
last_name='Mcgregor',
username="TheNotoriousMMA",
email="cm@example.com",
password="123")
user2.save()
user3 = User.objects.create_user(
first_name='Alex',
last_name='Pereira',
username="PoatanMMA",
email="ap@example.com",
password="123")
user3.save()
user4 = User.objects.create_user(
first_name='Novak',
last_name='Djokovic',
username="NoleTennis",
email="nd@example.com",
password="123")
user4.save()
p1 = Post(
user=user1,
content="""
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi feugiat ultricies
metus at dignissim. Aliquam tincidunt, elit at scelerisque lacinia, mi libero
ornare libero, sit amet dictum libero nunc suscipit tellus. Phasellus hendrerit,
elit et blandit ullamcorper, tellus est tincidunt sem, quis laoreet erat felis
nec turpis.
""",
created_at=datetime(2023, 9, 9, 1, 14, 15, tzinfo=EST),
)
p1.save()
p2 = Post(
user=user1,
content="""Ut sed tellus sed lorem congue mollis.""",
created_at=datetime(2023, 9, 12, 0, 28, 40, tzinfo=EST),
)
p2.save()
p3 = Post(
user=user1,
content="""
Interdum et malesuada fames ac ante ipsum primis in faucibus. Etiam mollis
tortor elit, tincidunt venenatis neque viverra scelerisque
""",
created_at=datetime(2023, 9, 19, 2, 19, 26, tzinfo=EST),
)
p3.save()
p4 = Post(
user=user2,
content="""
Etiam tempor sem eget tortor fermentum blandit. Nullam neque risus, convallis
a quam sit amet, hendrerit sollicitudin magna.
""",
created_at=datetime(2023, 9, 1, 14, 2, 50, tzinfo=EST),
)
p4.save()
p5 = Post(
user=user2,
content="""
Mauris venenatis ipsum quis libero hendrerit, nec molestie enim consectetur.
Suspendisse et risus eget dui pretium tincidunt.
""",
created_at=datetime(2023, 9, 7, 3, 13, 42, tzinfo=EST),
)
p5.save()
p6 = Post(
user=user2,
content="""
Etiam condimentum nunc neque, id finibus magna dapibus sed.
""",
created_at=datetime(2023, 9, 20, 21, 10, 3, tzinfo=EST),
)
p6.save()
p7 = Post(
user=user2,
content="""
Sed nec maximus urna
""",
created_at=datetime(2023, 9, 3, 16, 25, 50, tzinfo=EST),
)
p7.save()
p8 = Post(
user=user3,
content="""
Aenean sit amet tellus nec ex consectetur maximus.
""",
created_at=datetime(2023, 9, 9, 7, 20, 20, tzinfo=EST),
)
p8.save()
p9 = Post(
user=user3,
content="""
Dianzi laggiu lascia storia eccolo riposi pel all. Consunta oh piramide no
dovresti lucidita proseguo tremante.
""",
created_at=datetime(2023, 9, 6, 19, 26, 14, tzinfo=EST),
)
p9.save()
p10 = Post(
user=user3,
content="""
I love espressos!!!
""",
created_at=datetime(2023, 9, 17, 18, 0, 30, tzinfo=EST),
)
p10.save()
# Create a follower relationship
follower = Following(user=user1, follows=user2)
follower.save()
follower = Following(user=user1, follows=user3)
follower.save()
follower = Following(user=user1, follows=user4)
follower.save()
follower = Following(user=user2, follows=user4)
follower.save() | LorenzoPeve/CS50_Web | project_4/etl/init_etl.py | init_etl.py | py | 3,704 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.abspath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_nu... |
33352868921 | # Services
import logging
from typing import List
# Own
from arq_server.base.ArqErrors import ArqError
from arq_server.services.CoreService import Configuration,Base
from arq_server.services.support.SecurityTools import Security
class NormalizeSelector:
# Services TIPS
__logger: logging.Logger
__config: Configuration
__security: Security
def __init__(self, core, cross):
self.__init_services(
core.logger_service(),
core.config_service(),
cross.security_tools()
)
self.__logger.info("NormalizeSelector - Servicios core inicializados correctamente")
self.__avaliableServicesWithInput={
'configuration': core.config_service(),
'security': cross.security_tools()
}
self.__logger.info("NormalizeSelector - lista de servicios que admiten instrucciones:\n"+str(self.__avaliableServicesWithInput.keys()))
def addAvaliableService(self,singletonService:object):
try:
self.__avaliableServicesWithInput[singletonService.__class__.__name__]=singletonService
self.__logger.info("Nuevo servicio incluído:" \
+ singletonService.__class__.__name__ \
+ "Ahora estan expuestos los siguientes servicios:\n" \
+ str(self.__avaliableServicesWithInput.keys())
)
except Exception as e:
raise ArqError("¡Error añadiendo un servicio nuevo a los ya expuestos! -> "+str(e))
def processInput(self,input:dict, headers:dict)->dict:
"""
metadata:
{
'protocol' : '',
'timeInit' : '',
'user' : ''
}
"""
output = {}
try:
input = self.__validateInput(input) # Si la entrada no es correcta, salta excepción
# Parametros de entrada
context = input.pop('context')
service = input.pop('service')
metadata = input.pop('metadata')
filtered_headers=self.__filterHeaders(headers,service) # Si faltan cabeceras, salta excepción
if service not in self.__config.getProperty('logical','publicServices').split(','):
self.__security.validate_token(filtered_headers['token']) # Valida token para los servicios protegidos
self.__logger.info("Contexto: %s",context)
if context == 'arq':
output['response']=self.__arq_instructions(service,input,**filtered_headers)
else:
raise ArqError("contexto no válido")
output['metadata']=metadata
except ArqError as arqErr:
output['error']=arqErr.normalize_exception()
return output
def __arq_instructions(self,service, input_instructions:dict,**kwargs):
if service not in self.__avaliableServicesWithInput:
raise ArqError("Servicio de arquitectura no existe o no admite instrucciones")
return self.__avaliableServicesWithInput[service].read_input_instruccions(input_instructions,**kwargs)
def __validateInput(self,raw_input:dict)->dict:
"""
Comprueba que el input fuente contiene las claves configuradas. Descarta excesos
---
Devuelve el diccionario de entrada filtrado
"""
avaliableKeys= self.__config.getProperty('logical','avaliableInputKeys').split(',')
try:
filtered_input = { av_key: raw_input[av_key] for av_key in avaliableKeys }
if (not isinstance(filtered_input['args'],List)) or (not isinstance(filtered_input['kwargs'],List)):
raise ArqError("Los argumentos no traen el formato correcto")
self.__logger.info("La entrada es válida")
return filtered_input
except ArqError as arqe:
raise arqe
except Exception as e:
raise ArqError("La entrada no cumple los requisitos, revisar:"+str(e))
def __filterHeaders(self,raw_headers,service):
"""
Comprueba que las cabeceras contiene las claves configuradas. Descarta excesos
---
Devuelve el diccionario de cabeceras filtrado
"""
service_headers= self.__config.getProperty('logical',service+'.avaliableHeaders')
if service_headers is None:
service_headers = self.__config.getProperty('logical','__default.avaliableHeaders')
avaliableHeaders = service_headers.split(',')
try:
filtered_headers = { av_key: raw_headers[av_key] for av_key in avaliableHeaders }
return filtered_headers
except ArqError as arqe:
raise arqe
except Exception as e:
raise ArqError("Faltan cabeceras, revisar:"+str(e))
def __init_services(self, logger, config, security):
# Servicio de logging
self.__logger = logger.arqLogger()
self.__config = config
self.__security = security
| RafaelGB/pythonScripts | Arquitectura/arq_server/services/protocols/logical/NormalizeSelector.py | NormalizeSelector.py | py | 4,989 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.Logger",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "arq_server.services.CoreService.Configuration",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "arq_server.services.support.SecurityTools.Security",
"line_number": 13,
... |
22290561342 | from typing import Any, Dict, Optional
import httpx
from ...client import Client
from ...models.mediation_grant import MediationGrant
from ...types import Response
def _get_kwargs(
mediation_id: str,
*,
client: Client,
) -> Dict[str, Any]:
url = "{}/mediation/requests/{mediation_id}/grant".format(client.base_url, mediation_id=mediation_id)
headers: Dict[str, str] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
return {
"method": "post",
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
}
def _parse_response(*, response: httpx.Response) -> Optional[MediationGrant]:
if response.status_code == 201:
response_201 = MediationGrant.from_dict(response.json())
return response_201
return None
def _build_response(*, response: httpx.Response) -> Response[MediationGrant]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=_parse_response(response=response),
)
def sync_detailed(
mediation_id: str,
*,
client: Client,
) -> Response[MediationGrant]:
"""Grant received mediation
Args:
mediation_id (str):
Returns:
Response[MediationGrant]
"""
kwargs = _get_kwargs(
mediation_id=mediation_id,
client=client,
)
response = httpx.request(
verify=client.verify_ssl,
**kwargs,
)
return _build_response(response=response)
def sync(
mediation_id: str,
*,
client: Client,
) -> Optional[MediationGrant]:
"""Grant received mediation
Args:
mediation_id (str):
Returns:
Response[MediationGrant]
"""
return sync_detailed(
mediation_id=mediation_id,
client=client,
).parsed
async def asyncio_detailed(
mediation_id: str,
*,
client: Client,
) -> Response[MediationGrant]:
"""Grant received mediation
Args:
mediation_id (str):
Returns:
Response[MediationGrant]
"""
kwargs = _get_kwargs(
mediation_id=mediation_id,
client=client,
)
async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
response = await _client.request(**kwargs)
return _build_response(response=response)
async def asyncio(
mediation_id: str,
*,
client: Client,
) -> Optional[MediationGrant]:
"""Grant received mediation
Args:
mediation_id (str):
Returns:
Response[MediationGrant]
"""
return (
await asyncio_detailed(
mediation_id=mediation_id,
client=client,
)
).parsed
| Indicio-tech/acapy-client | acapy_client/api/mediation/post_mediation_requests_mediation_id_grant.py | post_mediation_requests_mediation_id_grant.py | py | 2,755 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "client.Client",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "client.base_url",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "client.get_headers",
... |
21542111875 | import os
import time
import torch
from argparse import ArgumentParser
from MemSE.nas import DataloadersHolder, ResNetArchEncoder
from MemSE.training import RunManager, RunConfig
from ofa.model_zoo import ofa_net
from MemSE.nn import OFAxMemSE, FORWARD_MODE, MemSE
from MemSE import ROOT
parser = ArgumentParser()
parser.add_argument("--datapath", default=os.environ.get("DATASET_STORE", None))
args, _ = parser.parse_known_args()
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
ofa = ofa_net('ofa_resnet50', pretrained=True)
ofa.set_max_net()
default_gmax = MemSE(ofa.get_active_subnet()).quanter.Wmax
ofaxmemse = OFAxMemSE(ofa)
encoder = ResNetArchEncoder(default_gmax)
run_config = RunConfig(dataset_root=args.datapath, dataset='ImageNetHF')
run_manager = RunManager(run_config, mode=FORWARD_MODE.MONTECARLO)
datahld = DataloadersHolder(run_manager)
train_ld, eval_ld = datahld.get_image_size(128)
for i in range(10):
print('Warming up ', i)
a = encoder.random_sample_arch(ofaxmemse)
a['image_size'] = 128
ofaxmemse.set_active_subnet(a, train_ld)
torch.cuda.synchronize()
@torch.no_grad()
def eval(nb_batch, power=1):
a = encoder.random_sample_arch(ofaxmemse)
a['image_size'] = 128
ofaxmemse.set_active_subnet(a, train_ld)
ofaxmemse.quant(scaled=False)
metrics = None
if nb_batch > 0:
_, metrics = run_manager.validate(
net=ofaxmemse,
data_loader=eval_ld,
no_logs=True,
nb_batchs=nb_batch,
nb_batchs_power=power
)
metrics.display_summary()
ofaxmemse.unquant()
return metrics
res = {0: {}, 1: {}}
for n in range(150):
for p in [0, 1]:
print(n, p)
times = []
for _ in range(5):
torch.cuda.synchronize()
start_t = time.time()
eval(n, power=p)
torch.cuda.synchronize()
elapsed = time.time() - start_t
times.append(elapsed)
res[p][n] = sum(times) / 5
print(res[p][n])
torch.save(res, ROOT/ "experiments/conference_2/results/overhead.pth") | sebastienwood/MemSE | experiments/conference_2/ofa_early_tests/ofa_overhead.py | ofa_overhead.py | py | 2,230 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.is_... |
18242721965 | import argparse
import generator.generator as generator
import shared.db as db
from shared import validators
parser = argparse.ArgumentParser()
parser.add_argument("--type", type=validators.check_data_type, default="u",
help="Specify the type of data: u - uncorrelated, w - weakly correlated, s - strongly correlated")
parser.add_argument("--elem_amount", type=validators.check_positive_integer, default=100,
help="Specify number of elements which will be generated")
parser.add_argument("--max_weight", dest="v", type=validators.check_positive_integer, default=10,
help="Specify maximum weight")
parser.add_argument("--max_prof_deviation", dest="r", type=validators.check_positive_integer, default=5,
help="Specify maximum deviation between weight and profit")
args = parser.parse_args()
if __name__ == '__main__':
print("type " + str(args.type))
print("elem_amount " + str(args.elem_amount))
print("v (max_weight) " + str(args.v))
print("r (max_prof_deviation) " + str(args.r))
data = generator.generate_data(args.type, args.elem_amount, args.v, args.r)
file_name = db.generate_file_name(args.type, args.elem_amount, args.v, args.r)
db.save_database(data, file_name)
new_data = db.load_database(file_name)
for elem in new_data:
print(elem.weight, elem.profit)
| wwolny/evolutionary-knapsack-problem | generate.py | generate.py | py | 1,392 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "shared.validators.check_data_type",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "shared.validators",
"line_number": 8,
"usage_type": "name"
},
{
"api... |
11983713355 | from __future__ import print_function
import collections
import string
class Program:
def __init__(self, program, program_id, queues):
self.registers = collections.defaultdict(int)
self.registers['p'] = program_id
self.queues = queues
self.program = program
self.idx = 0
self.program_id = program_id
self.send_count = 0
self.first_sent = None
self.first_printed = False
def get_id(self):
return self.program_id
def step(self):
def val(s):
if s in string.letters:
return self.registers[s]
return int(s)
while self.idx < len(self.program):
i = self.program[self.idx]
cmd = i[0]
if cmd == 'set':
self.registers[i[1]] = val(i[2])
elif cmd == 'add':
self.registers[i[1]] += val(i[2])
elif cmd == 'mul':
self.registers[i[1]] *= val(i[2])
elif cmd == 'mod':
self.registers[i[1]] %= val(i[2])
elif cmd == 'jgz':
if val(i[1]) > 0:
self.idx += val(i[2])
continue
elif cmd == 'snd':
to_send = val(i[1])
self.queues[1 - self.program_id].append(to_send)
self.send_count += 1
if self.program_id == 0:
self.first_sent = to_send
elif cmd == 'rcv':
reg_ref = i[1]
if self.program_id == 0 and self.registers[reg_ref] and not self.first_printed:
print('easy', self.first_sent)
self.first_printed = True
if not self.queues[self.program_id]:
return
self.registers[reg_ref] = self.queues[self.program_id].pop(0)
self.idx += 1
def main(inp):
instrs = [line.rstrip().split() for line in inp]
queues = [[], []]
progs = [Program(instrs, 0, queues), Program(instrs, 1, queues)]
idx = 0
while True:
progs[idx].step()
idx = (idx + 1) % len(progs)
if not queues[0] and not queues[1]:
print('hard', progs[1].send_count)
break
if __name__ == '__main__':
import sys
main(sys.stdin) | dfyz/adventofcode | 2017/18/sln.py | sln.py | py | 2,330 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "string.letters",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin",
"line_number": 75,
"usage_type": "attribute"
}
] |
24495933626 | import matplotlib.pyplot as plt
def visualize_data(title,ylabel,xlabel):
plt.scatter(x_train, y_train, marker='x', c='r')
# Set the title
plt.title(title)
# Set the y-axis label
plt.ylabel(ylabel)
# Set the x-axis label
plt.xlabel(xlabel)
plt.show()
| JeremiahTheFirst/MachineLearningClasses | Python_Coursera/SecondWeek/visualize_data.py | visualize_data.py | py | 287 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matp... |
34707942970 | """Adds versioning `User.authorized`
Revision ID: ee3c6c0702a6
Revises: 0fbbcf5eb614
Create Date: 2021-10-04 03:41:16.571947
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ee3c6c0702a6'
down_revision = '0fbbcf5eb614'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('authorized', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'authorized')
# ### end Alembic commands ###
| jshwi/jss | migrations/versions/ee3c6c0702a6_adds_versioning_user_authorized.py | ee3c6c0702a6_adds_versioning_user_authorized.py | py | 676 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Boolean... |
16845073288 | import threading
import time
import logging
import serial
import pynmea2
log = logging.getLogger('gnss')
class GnssThread(threading.Thread):
def __init__(self, q, NMEAPort):
threading.Thread.__init__(self)
self.q = q
self.NMEAPort = NMEAPort
self.live = True
self.nmea = None
def run(self):
log.debug(f"Listening for NMEA on {self.NMEAPort}...")
self.nmea = serial.Serial(self.NMEAPort)
while self.live:
line = self.nmea.readline().decode('ASCII')
if line.startswith("$GPGGA"):
# Little silly to use PyNMEA2 for just this one thing, but the NMEA sentence format
# is oddly complicated and this saves doing our own lat/lon format conversions.
sentence = pynmea2.parse(line)
# Only send on valid fixes, before gps_qual changes the results are either null or
# have very high error.
if sentence.gps_qual != 0:
self.q.put(['LocationFix', {'lat': sentence.latitude, 'lon': sentence.longitude, 'alt': sentence.altitude}])
def stop(self):
self.live = False | jcrawfordor/cellscan | cellscan/gnss.py | gnss.py | py | 1,186 | python | en | code | 25 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "threa... |
37240069738 | import pathlib
import sys
module_dir = pathlib.Path(__file__).parent.resolve()
root_dir = module_dir.parent
model_dir = root_dir.joinpath("models")
asvlite_wrapper_dir = root_dir.joinpath("dependency", "ASVLite", "wrapper", "cython")
sys.path.insert(0, str(asvlite_wrapper_dir))
import os.path
import math
import multiprocessing as mp
import pyproj
import epsg
import random
import numpy as np
import pandas as pd
from tqdm import tqdm # to show a progress bar
from sea_surface import py_Sea_surface
from asv import py_Asv, py_Asv_specification
from geometry import py_Coordinates_3D
class Rudder_PID_controller:
def __init__(self, asv_spec, K=None):
self.max_rudder_angle = math.pi/6 # 30 deg
self.asv_spec = asv_spec
self.error = 0.0 # error in each time step
self.previous_error = 0.0 # error in the previous time step
self.cumulative_error = 0.0 # integral of errors
self.delta_error = 0.0 # differential of error
self.out_dir = root_dir.joinpath("results", "rudder_controller", "tuning")
self.out_dir.mkdir(parents=True, exist_ok=True) # Create the out_dir if it does not exist.
# P,I,D gain terms
if K is not None:
self.K = np.array(K)
else:
# K is None
# Check if PID file exist in models
rudder_PID_file = model_dir.joinpath("rudder_PID")
if os.path.isfile(str(rudder_PID_file)):
# PID values exist is file
df_pid = pd.read_csv(rudder_PID_file, delim_whitespace=True)
self.K = np.array(df_pid.iloc[0])
print("Loading rudder PID from file - {}".format(rudder_PID_file))
print("P,I,D = ", self.K)
else:
# PID values not provided and does not exist in file.
initial_PIDs = []
for P in [1,2]:
for I in [1,2]:
for D in [1,2]:
initial_PIDs.append([P,I,D])
initial_PIDs = tqdm(initial_PIDs, leave=False)
initial_PIDs.set_description("Tuning iterations")
for initial_PID in initial_PIDs:
P,I,D = initial_PID
self.K = np.array([P,I,D])
self._tune_controller()
def __relative_angle(self, asv, waypoint):
theta = None
p1 = asv.py_get_position_origin()
p2 = asv.py_get_position_cog()
p3 = waypoint
# theta = math.atan2((p3.y-p1.y), (p3.x-p1.x)) - math.atan2((p2.y-p1.y), (p2.x-p1.x))
long1, lat1 = epsg.PCS_to_GCS(p1.x, p1.y)
long2, lat2 = epsg.PCS_to_GCS(p2.x, p2.y)
long3, lat3 = epsg.PCS_to_GCS(p3.x, p3.y)
geodesic = pyproj.Geod(ellps='WGS84')
fwd_azimuth_1, back_azimuth_1, distance_1 = geodesic.inv(long1, lat1, long2, lat2)
# fwd_azimuth_1 = fwd_azimuth_1 if fwd_azimuth_1 >= 0.0 else (360 + fwd_azimuth_1)
fwd_azimuth_2, back_azimuth_2, distance_2 = geodesic.inv(long1, lat1, long3, lat3)
# fwd_azimuth_2 = fwd_azimuth_2 if fwd_azimuth_2 >= 0.0 else (360 + fwd_azimuth_2)
theta = (fwd_azimuth_2 - fwd_azimuth_1) * math.pi/180
return theta
def get_rudder_angle(self, asv, waypoint):
# Compute the relative angle between the vehicle heading and the waypoint.
theta = self.__relative_angle(asv, waypoint)
# Set error as the difference of the current heading and the desired heading.
self.previous_error = self.error
self.error = theta
gamma = 0.7 # Rate at which the past errors reduces.
self.cumulative_error = self.error + gamma*self.cumulative_error
self.delta_error = self.error - self.previous_error
# Compute the rudder angle
E = np.array([self.error, self.cumulative_error, self.delta_error]) # P, I, D errors.
phi = np.dot(np.transpose(self.K), E) # radians because error is in radians.
# Limit the rudder angle within the range (-PI/6, PI/6)
if phi > self.max_rudder_angle:
phi = self.max_rudder_angle
elif phi < -self.max_rudder_angle:
phi = -self.max_rudder_angle
return phi # radians
def _simulate_asv_in_sea_state(self, sea_state_and_PID):
significant_wave_ht, asv_heading, P, I, D = sea_state_and_PID
long, lat = (-150.0, 20)
x, y = epsg.GCS_to_PCS(long, lat)
start_point = py_Coordinates_3D(x, y, 0)
long, lat = (-150.0, 20.01)
x, y = epsg.GCS_to_PCS(long, lat)
waypoint = py_Coordinates_3D(x, y, 0)
# Init waves
rand_seed = 1
count_component_waves = 21
wave = py_Sea_surface(significant_wave_ht, 0.0, rand_seed, count_component_waves)
# Init ASV
time_step_size = 40 # milli sec
attitude = py_Coordinates_3D(0.0, 0.0, asv_heading)
asv = py_Asv(self.asv_spec, wave, start_point, attitude)
thrust_tuning_factor = 0.03 # The thrust tuning factor is an assumed and, for controller tuning, thrust tuning factor
# is assumed as a constant for all sea states.
asv.py_wg_set_thrust_tuning_factor(thrust_tuning_factor)
# Init controller
controller = Rudder_PID_controller(self.asv_spec, [P,I,D])
# Simulate
time = 0.0
max_time = 60 # sec
heading_error = 0
while time < max_time:
time += time_step_size/1000.0
# Compute the dynamics
rudder_angle = controller.get_rudder_angle(asv, waypoint)
asv.py_wg_compute_dynamics(rudder_angle, time_step_size)
# Compute the error in heading
error = self.__relative_angle(asv, waypoint)
heading_error += error*error
# Root mean square error
rms_error = math.sqrt(heading_error/(max_time * (1000/time_step_size)))
return rms_error
def _tune_controller(self):
f = open("{}/{}_{}_{}.txt".format(self.out_dir, self.K[0], self.K[1], self.K[2]), "w")
f.write("P I D error_avg error_std\n")
pool = mp.Pool(mp.cpu_count()) # Create a pool of processes to run in parallel.
delta = 0.25
P_current, I_current, D_current = list(self.K)
# Policy Gradient Reinforcement Learning
iterations = tqdm(range(10), leave=False) # This is going to take some time, therefore show a progress bar.
iterations.set_description("Policy iterations")
for n in iterations:
costs = []
PIDs = []
for P in [P_current-delta, P_current, P_current+delta]:
for I in [I_current-delta, I_current, I_current+delta]:
for D in [D_current-delta, D_current, D_current+delta]:
PIDs.append([P,I,D])
PIDs = tqdm(PIDs, leave=False)
PIDs.set_description("Controller variants")
for PID in PIDs:
P,I,D = PID
sea_states_and_PID = []
for significant_wave_ht in np.arange(1.0, 10.0, 2.0):
for asv_heading in np.arange(0.0, 360.0, 45.0):
sea_states_and_PID.append([significant_wave_ht, asv_heading * math.pi/180, P, I, D])
results = []
for result in pool.imap_unordered(self._simulate_asv_in_sea_state, sea_states_and_PID): # Run multiple simulations in parallel
results.append(result) # append the return for each call to self._simulate_asv_in_sea_state to the list.
# Compute cost for each combination of PID:
costs.append([P, I, D, np.average(np.array(results))])
# Compute the next set of PID terms
costs = np.array(costs)
f.write("{} {} {} {} {}\n".format( P_current,
I_current,
D_current,
np.average(costs, axis=0)[-1],
np.std(costs, axis=0)[-1]))
def compute_average_cost(index, K):
mask = []
for item in costs:
mask_value = True if item[index] == K else False
mask.append(mask_value)
average_cost = costs[mask].mean(axis=0)[-1]
return average_cost
# Compute the average performance for all cases with P_current-delta
avg_costs_P_minus = compute_average_cost(0, P_current-delta)
# Compute the average performance for all cases with P_current
avg_costs_P = compute_average_cost(0, P_current)
# Compute the average performance for all cases with P_current+delta
avg_costs_P_plus = compute_average_cost(0, P_current+delta)
# Compute the average performance for all cases with I_current-delta
avg_costs_I_minus = compute_average_cost(1, I_current-delta)
# Compute the average performance for all cases with I_current
avg_costs_I = compute_average_cost(1, I_current)
# Compute the average performance for all cases with I_current+delta
avg_costs_I_plus = compute_average_cost(1, I_current+delta)
# Compute the average performance for all cases with D_current-delta
avg_costs_D_minus = compute_average_cost(2, D_current-delta)
# Compute the average performance for all cases with D_current
avg_costs_D = compute_average_cost(2, D_current)
# Compute the average performance for all cases with D_current+delta
avg_costs_D_plus = compute_average_cost(2, D_current+delta)
# Compute the Adjustment vector.
A = [0,0,0]
# Adjustment for P
min_costs_P = min(avg_costs_P_minus, avg_costs_P, avg_costs_P_plus)
if min_costs_P == avg_costs_P_minus:
A[0] = -1
elif min_costs_P == avg_costs_P:
A[0] = 0
else:
A[0] = 1
# Adjustment for I
min_costs_I = min(avg_costs_I_minus, avg_costs_I, avg_costs_I_plus)
if min_costs_I == avg_costs_I_minus:
A[1] = -1
elif min_costs_I == avg_costs_I:
A[1] = 0
else:
A[1] = 1
# Adjustment for D
min_costs_D = min(avg_costs_D_minus, avg_costs_D, avg_costs_D_plus)
if min_costs_D == avg_costs_D_minus:
A[2] = -1
elif min_costs_D == avg_costs_D:
A[2] = 0
else:
A[2] = 1
# Compute the new gain terms
A = np.array(A)
K_current = np.array([P_current, I_current, D_current])
K_current = K_current + A*delta
P_current, I_current, D_current = list(K_current)
self.K = K_current
f.close()
if __name__ == '__main__':
import cProfile
# Wave glider specs
asv_spec = py_Asv_specification()
asv_spec.L_wl = 2.1 # m
asv_spec.B_wl = 0.6 # m
asv_spec.D = 0.25 # m
asv_spec.T = 0.09 # m
asv_spec.max_speed = 4.0 # m/s
asv_spec.disp = 0.09 # m3
asv_spec.r_roll = 0.2 # m
asv_spec.r_pitch = 0.6 # m
asv_spec.r_yaw = 0.6 # m
asv_spec.cog = py_Coordinates_3D(1.05, 0.0, -3.0) # m
rudder_controller = Rudder_PID_controller(asv_spec) # Will also tune the controller. | resilient-swarms/StormExplorers | source/rudder_controller.py | rudder_controller.py | py | 11,698 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path.insert",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "math.pi",
"line_number": ... |
31343094774 | import math
from django.shortcuts import render
# Create your views here.
from django.views import View
from goods.models import *
from django.core.paginator import Paginator
from django.http.response import HttpResponseBase
# 主页显示
class IndexView(View):
def get(self, request, cid=1, num=1):
# 所有通过url位置传参传入进来的都需要进行强转,以免抛出异常
cid = int(cid)
num = int(num)
# step1: 查询所有类别信息
categorys = Category.objects.all().order_by('id')
# step2: 查询当前类别下的所有信息(给个默认值女装:id=2)
goodsList = Goods.objects.filter(category_id=cid).order_by('id')
# step3: 分页,每页显示8条记录
pager = Paginator(goodsList, 8)
# step4: 获取当前页的数据
page_goodList = pager.page(num)
# step5: 处理首页和尾页(假设一共显示10页,选择页在list正中间)
begin = (num - int(math.ceil(10.0 / 2)))
# 首页禁止越界
if begin < 1:
begin = 1
# 尾页禁止越界
end = begin + 9
if end > pager.num_pages:
end = pager.num_pages
if end <= 10:
begin = 1
else:
begin = end - 9
pagelist = range(begin, end + 1)
return render(request, 'index.html',
{'categorys': categorys, 'goodList': page_goodList, 'currentCid': cid, 'pagelist': pagelist,
'currentNum': num})
# 使用二阶装饰器来构建缓存,实现“猜你喜欢”功能(保存你浏览过的网页的数据) 使用的是LCU
def recommend_view(func):
def wrapper(detailView, request, goodsid, *args, **kwargs):
# 将存放在cookie中的goodsId获取
cookie_str = request.COOKIES.get('recommend', '')
# 存放所有goodsid的列表
goodsIdList = [gid for gid in cookie_str.split() if gid.strip()]
# 思考1:最终需要获取的推荐商品
goodsObjList = [Goods.objects.get(id=gsid) for gsid in goodsIdList if
gsid != goodsid and Goods.objects.get(id=gsid).category_id == Goods.objects.get(
id=goodsid).category_id][:4]
# 将goodsObjList传递给get方法
response = func(detailView, request, goodsid, goodsObjList, *args, **kwargs)
# 判断goodsid是否存在goodsIdList中
if goodsid in goodsIdList:
goodsIdList.remove(goodsid)
goodsIdList.insert(0, goodsid)
else:
goodsIdList.insert(0, goodsid)
# goodsIdList中的int转化成str
goodsIdList = [str(x) for x in goodsIdList]
# 将goodsIdList中的数据保存到Cookie中
response.set_cookie('recommend', str(" ".join(goodsIdList)))
return response
return wrapper
class DetailView(View):
@recommend_view
def get(self, request, goodsid, recommendList=[]):
goodsid = int(goodsid)
# 根据goodsid查询商品详情信息(goods对象)
goods = Goods.objects.get(id=goodsid)
return render(request, 'detail.html', {'goods': goods, 'recommendList': recommendList}) | yimin12/A_GeniusShopping | goods/views.py | views.py | py | 3,228 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.views.View",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.core.paginator.Paginator",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.sh... |
17783054773 | # -*- coding: utf-8 -*-
"""
v2.0 - 23-Feb-2017
Changes:
(1) In perspective_transformation(): Corrected persective transformation source and destination points.
(2) In edge_detect(): Corrected color conversion.
(3) In detect_lanes() and opt_detect_lanes(): Corrected calculation of radii of curvature and vehicle offset.
(4) In class Line(): set the lane curve fit averaging to 3.
(5) In process_video_frame(image): added color conversion and improved the logic for calling etect_lanes() and opt_detect_lanes()
Created on Wed Feb 15 18:30:51 2017
Advanced Lane Finding v1.0
"""
##############################################################################
### INCLUDES
##############################################################################
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from moviepy.editor import VideoFileClip
##############################################################################
## Camera Calibration
##############################################################################
def calibrate_camera(list_images, num_corners = (6,9)):
row, col = num_corners[0], num_corners[1]
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((row*col,3), np.float32)
objp[:,:2] = np.mgrid[0:col,0:row].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3D points in real world space
imgpoints = [] # 2D points in image plane.
# Step through the list and search for chessboard corners
for fname in list_images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (col,row),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (col,row), corners, ret)
# Calibrate the camera with the found object and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
return mtx, dist
# Make a list of calibration images
list_images = glob.glob('./camera_cal/calibration*.jpg')
mtx, dist = calibrate_camera(list_images,(6,9))
if 1:
img = cv2.imread('./test_images/test3.jpg')
cv2.imwrite('./output_images/test_image.jpg', img)
dst = cv2.undistort(img, mtx, dist, None, mtx)
cv2.imwrite('./output_images/undistorted_test_image.jpg', dst)
else :
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.set_title('distorted image')
ax1.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
ax2.set_title('undistorted image')
ax2.imshow(cv2.cvtColor(dst, cv2.COLOR_BGR2RGB))
##############################################################################
# Functions for absolute value of gradient along x orientation, magnitude of
#the gradients and direction of the gradient
##############################################################################
def abs_sobel_thresh(gray, orient='x', sobel_kernel=3, thresh=(0, 255)):
# 1) Take the derivative in x or y given orient = 'x' or 'y'
if orient == 'x':
sobel = cv2.Sobel(gray, cv2.CV_64F, 1,0, ksize=sobel_kernel)
else:
sobel = cv2.Sobel(gray, cv2.CV_64F, 0,1, ksize=sobel_kernel)
# 2) Take the absolute value of the derivative or gradient
abs_sobel = np.absolute(sobel)
# 3) Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# 4) Create a mask of 1's where the scaled gradient magnitude
# is > thresh_min and < thresh_max
abs_sobel_binary = np.zeros_like(scaled_sobel)
abs_sobel_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
# 5) Return this mask
return abs_sobel_binary
def mag_thresh(gray, sobel_kernel=3, mag_thresh=(0, 255)):
# 1) Take the derivative in x and y
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1,0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0,1, ksize=sobel_kernel)
sobel = np.sqrt(np.square(sobelx) + np.square(sobely))
# 2) Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255*sobel/np.max(sobel))
# 3) Create a mask of 1's where the scaled gradient magnitude
# is > thresh_min and < thresh_max
mag_binary = np.zeros_like(scaled_sobel)
mag_binary[(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1])] = 1
# 4) Return this mask
return mag_binary
def dir_threshold(gray, sobel_kernel=3, thresh=(0, np.pi/2)):
# 1) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1,0, ksize = sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0,1, ksize = sobel_kernel)
# 2) Take the absolute value of the x and y gradients
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
# 3) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
gradients = np.arctan2(abs_sobely, abs_sobelx)
# 4) Create a binary mask where direction thresholds are met
dir_binary = np.zeros_like(gradients)
dir_binary[(gradients >=thresh[0]) & (gradients <= thresh[1])] = 1
return dir_binary
##############################################################################
##Prepare image for lane detection using magnitude threshold and S component
## of HLS converted image.
##############################################################################
def edge_detect(undist_image):
gray = cv2.cvtColor(undist_image, cv2.COLOR_BGR2GRAY)
mag_binary = mag_thresh(gray, sobel_kernel=9, mag_thresh=(75, 255))
hls = cv2.cvtColor(undist_image, cv2.COLOR_BGR2HLS)
S = hls[:,:,2]
thresh = (175, 255)
binary = np.zeros_like(S)
binary[(S > thresh[0]) & (S <= thresh[1])] = 1
comb_binary = np.zeros_like(binary)
comb_binary[(mag_binary == 1) | (binary >= 1)] = 1
return comb_binary
comb_binary = edge_detect(dst)
if 0:
cv2.imwrite('./output_images/combined_binary_test_image.jpg', comb_binary)
else:
f2, (a2) = plt.subplots(1, 1, figsize=(20,10))
# a1.set_title('Test image - undistorted')
# a1.imshow(cv2.cvtColor(dst, cv2.COLOR_BGR2RGB))
a2.set_title('Lane detected with window search')
a2.imshow(comb_binary, cmap='gray')
##############################################################################
##Perspective transformation
##############################################################################
def perspective_transformation():
#define source image points
src = np.array([[215,700], [1080,700], [735,480],[550,480]], np.int32)
#define destination image points for bird's eye view
dst = np.array([[360,720], [960,720], [960,0], [360,0 ]], np.int32)
M = cv2.getPerspectiveTransform(np.float32(src),np.float32(dst))
Minv = cv2.getPerspectiveTransform(np.float32(dst), np.float32(src))
return M, Minv
M,Minv = perspective_transformation()
binary_warped = cv2.warpPerspective(comb_binary,M,(comb_binary.shape[1], comb_binary.shape[0]), flags=cv2.INTER_LINEAR)
if 0:
cv2.imwrite('./output_images/binary_warped_test_image.jpg', binary_warped)
#else:
# f2, (a2) = plt.subplots(1, 1, figsize=(20,10))
## a1.set_title(' edge detected image')
## a1.imshow(comb_binary, cmap='gray')
#
# a2.set_title('Lane detected perspective transformed image')
# a2.imshow(binary_warped, cmap='gray')
##############################################################################
##Detect lanes - using histogram and window search method
##############################################################################
def detect_lanes(binary_warped, visualize = True):
# Histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]/2:,:], axis=0)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 50
# Set minimum number of pixels found to recenter window
minpix = 25
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
if (visualize == True):
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
if (visualize == True):
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# If we don't find enough relevant points, return all None, this would trigger
# using previous frame data for videos
min_inds = 7200
if lefty.shape[0] < min_inds or righty.shape[0] < min_inds:
return None, None, None, None, None
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
if (visualize == True):
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Show and save this to image on disk
plt.imshow(out_img)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
plt.imsave('./output_images/window_search_lane_detect_test_image.jpg', out_img)
## Radius of curvature
# Define y-value where we want radius of curvature
y_eval = 600
left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
# Define conversions in x and y from pixels space to meters
ym_per_pix = 60/720 # meters per pixel in y dimension
xm_per_pix = 3.7/600 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Calculate vehicle offset from lane center
bottom_y = binary_warped.shape[0] - 1
bottom_x_left = left_fit[0]*(bottom_y**2) + left_fit[1]*bottom_y + left_fit[2]
bottom_x_right = right_fit[0]*(bottom_y**2) + right_fit[1]*bottom_y + right_fit[2]
vehicle_offset = binary_warped.shape[1]/2 - (bottom_x_left + bottom_x_right)/2
# Convert pixel offset to meters
vehicle_offset *= xm_per_pix
return left_fit, right_fit, left_curverad, right_curverad, vehicle_offset
left_fit, right_fit, left_curverad, right_curverad, vehicle_offset = detect_lanes(binary_warped, visualize=True)
##############################################################################
##Optimised Detect lanes - this is used for subsequent frames on videos
##############################################################################
def opt_detect_lanes(binary_warped, left_fit, right_fit, visualize=True):
# Binary warped image from the next frame of video
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 50
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# If we don't find enough relevant points, return all None. This triggers
# detection of lanes using histogram and window method.
min_inds = 7200
if lefty.shape[0] < min_inds or righty.shape[0] < min_inds:
return None, None, None, None, None
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
if (visualize == True):
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
# show and save image to disk
plt.imshow(result)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
plt.imsave('./output_images/quick_search_lane_detect_test_image.jpg')
## Radius of curvature
# Define y-value where we want radius of curvature
y_eval = 600
left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
# Define conversions in x and y from pixels space to meters
ym_per_pix = 60/720 # meters per pixel in y dimension
xm_per_pix = 3.7/600 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Calculate vehicle offset from lane center
bottom_y = binary_warped.shape[0] - 1
bottom_x_left = left_fit[0]*(bottom_y**2) + left_fit[1]*bottom_y + left_fit[2]
bottom_x_right = right_fit[0]*(bottom_y**2) + right_fit[1]*bottom_y + right_fit[2]
vehicle_offset = binary_warped.shape[1]/2 - (bottom_x_left + bottom_x_right)/2
# Convert pixel offset to meters
vehicle_offset *= xm_per_pix
return left_fit, right_fit, left_curverad, right_curverad, vehicle_offset
##############################################################################
##Draw lanes on original image for visualization
##############################################################################
def draw_lanes_on_road(binary_warped, left_fit, right_fit, rc, vehicle_offset, image ):
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(image,1, newwarp, 0.3, 0)
# Print the radius of curvature and vehicle offset
label = 'Radius of curvature: %.f m' % rc
result = cv2.putText(result, label, (20,50), 0, 1, (255,255,255), 2, cv2.LINE_AA)
label = 'Car offset from lane center: %.1f m' % vehicle_offset
result = cv2.putText(result, label, (20,80), 0, 1, (255,255,255), 2, cv2.LINE_AA)
return result
result = draw_lanes_on_road(binary_warped, left_fit, right_fit, (left_curverad + right_curverad)/2.0, vehicle_offset, dst)
if 0:
cv2.imwrite('./output_images/final_test_image.jpg', result)
##############################################################################
##Line class for videos
##############################################################################
# Define a class to receive the characteristics of each line detection
class Line():
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# Polynomial coefficients: x = A*y^2 + B*y + C
self.A = []
self.B = []
self.C = []
# Moving average of co-efficients
self.A_avg = 0.
self.B_avg = 0.
self.C_avg = 0.
# radius of curvature
self.rc = 0.
#car offset from center
self.vehicle_offset = 0.
def get_average_fit(self):
return(self.A_avg, self.B_avg, self.C_avg)
def average_fit(self, fit_coeffs):
self.A.append(fit_coeffs[0])
self.B.append(fit_coeffs[1])
self.C.append(fit_coeffs[2])
# pop out the oldest co-efficients and average them over 3 frames
if(len(self.A) >=2):
_ = self.A.pop(0)
_ = self.B.pop(0)
_ = self.C.pop(0)
self.A_avg = np.mean(self.A)
self.B_avg = np.mean(self.B)
self.C_avg = np.mean(self.C)
return self.A_avg, self.B_avg, self.C_avg
def set_params(self, rc, vehicle_offset):
self.rc = rc
self.vehicle_offset = vehicle_offset
def get_params(self):
return self.rc, self.vehicle_offset
##############################################################################
##Pipeline on Video frames
##############################################################################
# define global variables
left_lane = Line()
right_lane = Line()
lane_detect = False
new_fit, reuse_fit = 0, 0
def process_video_frame(image):
global mtx, dist, left_lane, right_lane, lane_detect, new_fit, reuse_fit
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
#undistort the image
dst = cv2.undistort(image, mtx, dist, None, mtx)
#edge detected imag
comb_binary = edge_detect(dst)
#perspective transformation
binary_warped = cv2.warpPerspective(comb_binary,M,(comb_binary.shape[1], comb_binary.shape[0]), flags=cv2.INTER_LINEAR)
if (lane_detect == False):
#detect lanes with window searching
left_fit, right_fit, left_curverad, right_curverad, vehicle_offset = detect_lanes(binary_warped, visualize=False)
if ((right_fit == None) or (left_fit == None)):
left_fit = left_lane.get_average_fit()
right_fit = right_lane.get_average_fit()
rc, vehicle_offset = left_lane.get_params()
reuse_fit+=1
else:
left_fit = left_lane.average_fit(left_fit)
right_fit = right_lane.average_fit(right_fit)
#radius of curvature and vehicle offset
rc = (left_curverad + right_curverad)/2.0
left_lane.set_params(rc,vehicle_offset)
lane_detect = True
new_fit+=1
else:
# fast lane detect
left_fit = left_lane.get_average_fit()
right_fit = right_lane.get_average_fit()
left_fit, right_fit, left_curverad, right_curverad, vehicle_offset = opt_detect_lanes(binary_warped, left_fit, right_fit, visualize=False)
if ((right_fit == None) or (left_fit == None)):
#detect lanes with window searching
left_fit, right_fit, left_curverad, right_curverad, vehicle_offset = detect_lanes(binary_warped, visualize=False)
if ((right_fit == None) or (left_fit == None)):
left_fit = left_lane.get_average_fit()
right_fit = right_lane.get_average_fit()
rc, vehicle_offset = left_lane.get_params()
reuse_fit+=1
else:
left_fit = left_lane.average_fit(left_fit)
right_fit = right_lane.average_fit(right_fit)
#radius of curvature and vehicle offset
rc = (left_curverad + right_curverad)/2.0
left_lane.set_params(rc,vehicle_offset)
lane_detect = True
new_fit+=1
else:
rc = (left_curverad + right_curverad)/2.0
reuse_fit+=1
#draw lanes on the image
result = draw_lanes_on_road(binary_warped, left_fit, right_fit, rc, vehicle_offset, dst)
result = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
return result
output_video = 'p4_project_video.mp4'
clip1 = VideoFileClip("project_video.mp4")
output_clip = clip1.fl_image(process_video_frame)
output_clip.write_videofile(output_video, audio=False)
print('Number of times histogram and window search is used:',new_fit)
print('Number of times quick lanes detect is used :', reuse_fit)
| gollaratti/advanced_lane_finding | advanced_lane_finding.py | advanced_lane_finding.py | py | 26,579 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "numpy.mgrid",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"lin... |
26385352063 | from django.shortcuts import render
from django.contrib import messages
from .models import Contact
def home(request):
return render(request, 'home/home.html')
def about(request):
name = 'Foyez Ahammad'
skill = ' Git & Github, Django, MySQL, Basic Front-End (HTML, CSS, JS, Bootstrap), Django REST Framework'
skill_know = '1. Python 2. Git & Github 3. Django 4. MySQL 5. A little bit (HTML, CSS, Bootstrap). '
gpa = 'GPA: 5.00'
# For Template tag condition
num = 10
knowledge = {'knowledge': ['Python', 'Git & Github', 'Django', 'MySQL']}
context = {'name': name, 'skill': skill,
'skill_know': skill_know, 'gpa': gpa, 'num': num}
return render(request, 'home/about.html', context=context)
return render(request, 'home/about.html', context=knowledge)
def contact(request):
if request.method == 'POST':
name = request.POST.get('name')
email = request.POST.get('email')
phone = request.POST.get('phone')
desc = request.POST.get('desc')
contact = Contact(name=name, email=email, phone=phone, desc=desc)
contact.save()
messages.success(request, 'Successfully, ')
return render(request, 'home/contact.html')
| foyez-ahammad/django-practices | SHOP/home/views.py | views.py | py | 1,233 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.shortcuts.render",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 23,
"usage_type": "call"
},
{
"api_name":... |
74539482592 | import errno
from tempfile import TemporaryDirectory
from unittest.mock import patch
import escapism
import pytest
import docker
from repo2docker.__main__ import make_r2d
from repo2docker.app import Repo2Docker
from repo2docker.utils import chdir
def test_find_image():
images = [{"RepoTags": ["some-org/some-repo:latest"]}]
with patch("repo2docker.docker.docker.APIClient") as FakeDockerClient:
instance = FakeDockerClient.return_value
instance.images.return_value = images
r2d = Repo2Docker()
r2d.output_image_spec = "some-org/some-repo"
assert r2d.find_image()
instance.images.assert_called_with()
def test_dont_find_image():
images = [{"RepoTags": ["some-org/some-image-name:latest"]}]
with patch("repo2docker.docker.docker.APIClient") as FakeDockerClient:
instance = FakeDockerClient.return_value
instance.images.return_value = images
r2d = Repo2Docker()
r2d.output_image_spec = "some-org/some-other-image-name"
assert not r2d.find_image()
instance.images.assert_called_with()
def test_image_name_remains_unchanged():
# if we specify an image name, it should remain unmodified
with TemporaryDirectory() as src:
app = Repo2Docker()
argv = ["--image-name", "a-special-name", "--no-build", src]
app = make_r2d(argv)
app.start()
assert app.output_image_spec == "a-special-name"
def test_image_name_contains_sha1(repo_with_content):
upstream, sha1 = repo_with_content
app = Repo2Docker()
# force selection of the git content provider by prefixing path with
# file://. This is important as the Local content provider does not
# store the SHA1 in the repo spec
argv = ["--no-build", "file://" + upstream]
app = make_r2d(argv)
app.start()
assert app.output_image_spec.endswith(sha1[:7])
def test_local_dir_image_name(repo_with_content):
upstream, sha1 = repo_with_content
app = Repo2Docker()
argv = ["--no-build", upstream]
app = make_r2d(argv)
app.start()
assert app.output_image_spec.startswith(
"r2d" + escapism.escape(upstream, escape_char="-").lower()
)
def test_build_kwargs(repo_with_content):
upstream, sha1 = repo_with_content
argv = [upstream]
app = make_r2d(argv)
app.extra_build_kwargs = {"somekey": "somevalue"}
with patch.object(docker.APIClient, "build") as builds:
builds.return_value = []
app.build()
builds.assert_called_once()
args, kwargs = builds.call_args
assert "somekey" in kwargs
assert kwargs["somekey"] == "somevalue"
def test_run_kwargs(repo_with_content):
upstream, sha1 = repo_with_content
argv = [upstream]
app = make_r2d(argv)
app.extra_run_kwargs = {"somekey": "somevalue"}
with patch.object(docker.DockerClient, "containers") as containers:
app.start_container()
containers.run.assert_called_once()
args, kwargs = containers.run.call_args
assert "somekey" in kwargs
assert kwargs["somekey"] == "somevalue"
def test_root_not_allowed():
with TemporaryDirectory() as src, patch("os.geteuid") as geteuid:
geteuid.return_value = 0
argv = [src]
with pytest.raises(SystemExit) as exc:
app = make_r2d(argv)
assert exc.code == 1
with pytest.raises(ValueError):
app = Repo2Docker(repo=src, run=False)
app.build()
app = Repo2Docker(repo=src, user_id=1000, user_name="jovyan", run=False)
app.initialize()
with patch.object(docker.APIClient, "build") as builds:
builds.return_value = []
app.build()
builds.assert_called_once()
def test_dryrun_works_without_docker(tmpdir, capsys):
with chdir(tmpdir):
with patch.object(docker, "APIClient") as client:
client.side_effect = docker.errors.DockerException("Error: no Docker")
app = Repo2Docker(dry_run=True)
app.build()
captured = capsys.readouterr()
assert "Error: no Docker" not in captured.err
def test_error_log_without_docker(tmpdir, capsys):
with chdir(tmpdir):
with patch.object(docker, "APIClient") as client:
client.side_effect = docker.errors.DockerException("Error: no Docker")
app = Repo2Docker()
with pytest.raises(SystemExit):
app.build()
captured = capsys.readouterr()
assert "Error: no Docker" in captured.err
| jupyterhub/repo2docker | tests/unit/test_app.py | test_app.py | py | 4,565 | python | en | code | 1,542 | github-code | 1 | [
{
"api_name": "unittest.mock.patch",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "repo2docker.app.Repo2Docker",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "unittest.mock.patch",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "r... |
42292475980 | import torch
import torch.nn as nn
# Import the skrl components to build the RL system
from skrl.models.torch import Model, GaussianMixin, DeterministicMixin
from skrl.memories.torch import RandomMemory
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.resources.schedulers.torch import KLAdaptiveRL
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.envs.torch import wrap_env
from skrl.envs.torch import load_omniverse_isaacgym_env
from skrl.utils import set_seed
from learning.model import StochasticActorHeightmap, DeterministicHeightmap, ObserverationInfo, NetworkInfo
import hydra
from omegaconf import DictConfig
from hydra import compose, initialize
import wandb
import datetime
from omniisaacgymenvs.tasks.utils.camera.heightmap_distribution import Heightmap
from omniisaacgymenvs.tasks.utils.terrains.extract_terrain import extract_terrain
import os
#cfg_ppo = PPO_DEFAULT_CONFIG.copy()
# set the seed for reproducibility
set_seed(42)
class TrainerSKRL():
def __init__(self):
self._load_cfg()
time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
#self.wandb_group =f"Improved_{time_str}"
self.wandb_group ="New"
#self.wandb_name = f"run_{time_str}"
self.wandb_name = "Run1"
# self.start_simulation()
#self.start_training()
def _load_cfg(self):
initialize(config_path="cfg", job_name="test_app")
cfg = compose(config_name="config")
self.cfg_ppo = PPO_DEFAULT_CONFIG.copy()
self.cfg_network = cfg.trainSKRL.network
self.cfg_experiment = cfg.trainSKRL.experiment
self.cfg_config = cfg.trainSKRL.config
self.sim_params = cfg.task
# Set all parameters according to cfg file
for param, value in (cfg.trainSKRL.config).items():
self.cfg_ppo[param] = value
print(self.cfg_ppo)
hydra.core.global_hydra.GlobalHydra.instance().clear()
def start_simulation(self):
env = load_omniverse_isaacgym_env(task_name="Rover")
self.env = wrap_env(env)
def log_parameters(self):
config = { "mlp layers": self.cfg_network.mlp.layers,
"mlp_activation": self.cfg_network.mlp.activation,
"encoder_layers": self.cfg_network.encoder.layers,
"encoder_activation": self.cfg_network.encoder.activation,
"hyperparameters": self.cfg_config,
"rewards": self.sim_params.rewards,
"sim parameters": {"environment": self.sim_params.env,
"simulation": self.sim_params.sim,},
}
# for key,value in (self.cfg_config).items():
# config[key] = value
return config
def train(self,sweep=False):
env = self.env
device = env.device
if not sweep:
self.log_parameters()
# Instantiate a RandomMemory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=60, num_envs=self.env.num_envs, device=device)
# Get values from cfg
mlp_layers = self.cfg_network.mlp.layers
encoder_layers = self.cfg_network.encoder.layers
activation_function = self.cfg_network.mlp.activation
#print(env.num_exteroceptive)
#TODO fix
heightmap_distribution = Heightmap()
num_sparse = heightmap_distribution.get_num_sparse_vector()
num_dense = heightmap_distribution.get_num_dense_vector()
num_beneath = heightmap_distribution.get_num_beneath_vector()
networkInfo = NetworkInfo([256,160,128],[80,60],[80,60],[80,60],"leakyrelu")
observerationInfo = ObserverationInfo(4,num_sparse,num_dense,num_beneath)
# Instantiate the agent's models (function approximators).
models_ppo = { "policy": StochasticActorHeightmap(env.observation_space, env.action_space, networkInfo, observerationInfo ),
"value": DeterministicHeightmap(env.observation_space, env.action_space, networkInfo,observerationInfo)}
# print()
# Instantiate parameters of the model
# for model in models_ppo.values():
# model.init_parameters(method_name="normal_", mean=0.0, std=0.05)
self.cfg_ppo["experiment"]["write_interval"] = 100
# Define agent
agent = PPO(models=models_ppo,
memory=memory,
cfg=self.cfg_ppo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
#agent.migrate("best.pt")
#agent.load("agent_219000.pt")
#agent.load("agent_13000.pt")
#agent.load("agent_939000.pt")
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 1000000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
#trainer.eval()
trainer.train()
def start_training_sweep(self,n_sweeps):
self.start_simulation()
# Define sweep config
sweep_configuration = {
'method': 'bayes',
'name': 'sweep',
'metric': {'goal': 'maximize', 'name': 'Reward / Total reward (mean)'},
'parameters':
{
'mini_batches': {'values': [4, 8]},
'lr': {'max': 0.003, 'min': 0.00003}
}
}
# Initialize sweep by passing in config. (Optional) Provide a name of the project.
sweep_id = wandb.sweep(sweep=sweep_configuration, project='isaac-rover-2.0')
wandb.agent(sweep_id, function=self.sweep, count=n_sweeps)
# Start sweep job.
def sweep(self):
time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
#self.wandb_name = f"test-Anton_{time_str}"
self.wandb_name = "no_dense_encoder"
run = wandb.init(project='isaac-rover-2.0', sync_tensorboard=True,name=self.wandb_name,group=self.wandb_group, entity="aalborg-university")
self.cfg_ppo["learning_rate"] = wandb.config.lr
self.cfg_ppo["mini_batches"] = wandb.config.mini_batches
self.train()
# wandb.finish()
def start_training(self):
self.start_simulation()
config = self.log_parameters()
log=True
if log:
wandb.init(project='isaac-rover-2.0', config=config, sync_tensorboard=True,name=self.wandb_name,group=self.wandb_group, entity="aalborg-university")
self.train()
if log:
wandb.finish()
def start_training_sequential(self):
for i in range(3):
print(i)
self.train()
pass
if __name__ == '__main__':
# Get hyperparameter config
terrainExist = os.path.exists("tasks/utils/terrain/")
if not terrainExist:
extract_terrain()
trainer = TrainerSKRL()
# trainer.start_training_sequential()
#trainer.start_training_sweep(4)
trainer.start_training()
#parse_hydra_configs()
| abmoRobotics/isaac_rover_2.0 | omniisaacgymenvs/train.py | train.py | py | 7,235 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "skrl.utils.set_seed",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "hydr... |
13858043669 | import logging
import os
from datetime import datetime
from bson.objectid import ObjectId
from celery import shared_task as task
from celery.utils.log import get_task_logger
from products.models import Product
from utils.mongodb import mongo_db, mongo_update
logger = get_task_logger(__name__)
@task(name='jms_products_api')
def jms_import_products_api(mon_insert_id):
import requests
time_now = datetime.now()
db = mongo_db()
id_code = None
if not mon_insert_id:
id_code = db.product_logs.insert_one(
{"user": None, "status": "Processing",
"created_at": time_now, })
response_next = None
page = 1
status = "done"
while response_next or page == 1:
response_next = None
token = os.environ.get('3JMSTOKEN', '')
headers = {'Authorization': "Token " + token}
response = requests.get(
"https://staging3jms.com/api/v1/inventory/?page=" + str(page),
headers=headers
)
try:
response_next = response.json()['next']
except Exception as e:
logging.exception(e)
response_next = None
try:
response_result = response.json()['results']
except Exception as e:
logging.exception(e)
response_result = []
if page == 1:
status = "error"
page += 1
for i in response_result:
result = i
try:
Product.objects.update_or_create(
sku=result["vws_product_sku"],
defaults={
'name': result["name"],
'brand': result["brand"],
'weight': result["weight"],
'year': result["year"],
'price': result["price"],
'currency': result["currency"],
'bottle_size': result["bottle_size"],
'image_url': result["image_url"],
'category': result["category"],
'subcategory': result["subcategory"],
'upc': result["upc"]})
except Exception as e:
logging.exception(e)
pass
finish_time = datetime.now()
mongo_update("product_logs",
{"_id": ObjectId(
mon_insert_id) if mon_insert_id else id_code.inserted_id},
{
"$set": {
"finished_at": finish_time,
"duration": (finish_time - time_now).total_seconds(),
"status": status}
}
)
| HASSINE-BENABDELAZIZ/ecommerce | products/tasks.py | tasks.py | py | 2,738 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "celery.utils.log.get_task_logger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 18,
"usage_type": "name"
},
{
"api_name... |
43565136672 | from django.conf import settings
from django.utils.translation import ugettext_lazy as _
def robots(request):
return {'ROBOTS_NOINDEX': getattr(settings, 'ROBOTS_NOINDEX', False)}
def google_analytics(request):
key = 'GOOGLE_ANALYTICS_TRACKING_ID'
return {key: getattr(settings, key, False)}
def ribbon(request):
enabled = getattr(settings, 'RIBBON_ENABLED', True)
if not enabled:
return {'ribbon': False}
url = 'http://github.com/colab/colab'
text = _('Fork me!')
return {
'ribbon': {
'text': getattr(settings, 'RIBBON_TEXT', text),
'url': getattr(settings, 'RIBBON_URL', url),
}
}
| colab/colab | colab/home/context_processors.py | context_processors.py | py | 675 | python | en | code | 23 | github-code | 1 | [
{
"api_name": "django.conf.settings",
"line_number": 6,
"usage_type": "argument"
},
{
"api_name": "django.conf.settings",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "django.conf.settings",
"line_number": 15,
"usage_type": "argument"
},
{
"api_nam... |
1579619363 | import matplotlib.pyplot as plt
import torch.nn.functional as F
import argparse
import torch
import os
def calc_accuracy(model, data_loader, device):
correct_pred = 0
instance_count = 0
with torch.no_grad():
model.eval()
for x, y in data_loader:
x, y = x.to(device), y.to(device)
pred = model(x)
_, pred_labels = torch.max(F.softmax(pred, dim=1), 1)
instance_count += y.size(0)
correct_pred += (pred_labels == y).sum().item()
acc = correct_pred / instance_count
return acc
def parse_train_args():
parser = argparse.ArgumentParser(description="Training argument parser")
parser.add_argument("-ne", "--num_epoch", type=int, required=False, default=10, help="number of epochs")
parser.add_argument("-bs", "--batch_size", type=int, required=False, default=50, help="batch size")
parser.add_argument("-ms", "--manual_seed", type=int, required=False, default=0, help="random seed")
parser.add_argument("-d", "--device", type=str, required=False, default="cuda", help="device: cuda/cpu")
parser.add_argument("-lr", "--learning_rate", type=float, required=False, default=0.001, help="learning rate")
parser.add_argument("-nc", "--num_classes", type=int, required=False, default=10, help="number of classes")
parser.add_argument("-tr", "--training_set_ratio", type=float, required=False, default=0.8,
help="the ratio between training and validation set")
parser.add_argument("-mp", "--model_dir", type=str, required=False, default="./checkpoints",
help="model path to save")
parser.add_argument("-pf", "--plot_flag", type=bool, required=False, default=True, help="plot all test results")
parser.add_argument("-sc", "--save_checkpoint", type=int, required=False, default=10,
help="saving checkpoint every save_checkpoint epochs")
parser.add_argument("-lm", "--load_model", type=bool, required=False, default=False,
help="load pretrained model, on restart case")
parser.add_argument("-lmp", "--load_model_path", type=str, required=False,
help="path of the pretrained model to load, on restart case")
args = parser.parse_args()
return args
def parse_test_args():
parser = argparse.ArgumentParser(description="Testing argument parser")
parser.add_argument("-d", "--device", type=str, required=False, default="cpu", help="device: cuda/cpu")
parser.add_argument("-nc", "--num_classes", type=int, required=False, default=10, help="number of classes")
parser.add_argument("-ms", "--manual_seed", type=int, required=False, default=0, help="random seed")
parser.add_argument("-bs", "--batch_size", type=int, required=False, default=1, help="batch size")
parser.add_argument("-mp", "--model_path", type=str, required=True, help="model path to load")
parser.add_argument("-pf", "--plot_flag", type=bool, required=False, default=False, help="plot all test results")
args = parser.parse_args()
return args
def plot_loss_accuracy(train_loss_lst: list, valid_loss_lst: list, train_acc_lst: list, valid_acc_lst: list):
fig = plt.figure()
plt.plot(train_loss_lst, '-bo')
plt.plot(valid_loss_lst, '-go')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['Training', 'Validation'])
fig.suptitle('Loss')
fig = plt.figure()
plt.plot(train_acc_lst, '-bo')
plt.plot(valid_acc_lst, '-go')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend(['Training', 'Validation'])
fig.suptitle('Accuracy')
plt.show()
def save_checkpoint(dir_path, epoch, model, optim, loss):
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
path = f"{dir_path}/checkpoint_{epoch}.pt"
torch.save({'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optim.state_dict(),
'loss': loss
},
path)
def load_checkpoint(path, model, optim):
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['model_state_dict'])
optim.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
return epoch, loss
| shaynaor/AlexNet_PyTorch | utils.py | utils.py | py | 4,336 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.no_grad",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn.function... |
12640380193 | import xml.etree.ElementTree as ET
from capas_proyecto.acceso_a_datos.comprobar_long_dict import contar_canciones_xml
def crear_dicc_nombre_ruta(RUTA_XML):
try:
arbol = ET.parse(RUTA_XML)
except FileNotFoundError:
exit("El nombre del archivo XML no es correcto.")
except ET.ParseError:
exit("El archivo XML está mal formado.")
else:
raiz = arbol.getroot()
diccionario = {}
for canciones in raiz:
for cancion in canciones:
nombre_y_ruta = {}
nombre_y_ruta[cancion.find('name').text] = cancion.find("path").text
diccionario.update(nombre_y_ruta)
num_canciones = contar_canciones_xml(raiz)
assert len(diccionario) == num_canciones
return diccionario, num_canciones
| DanielFernandezR/vlc-random-playlist | capas_proyecto/acceso_a_datos/api.py | api.py | py | 813 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "xml.etree.ElementTree.ParseError",
"line_number": 10,
"usage_type": "attribute"
},
{
... |
72243807394 | from apple import Apple
from board import Board
from snake import Snake
from algorthim import DFS
import pygame
from constants import GAME,SNAKE,APPLE,COLOR,SCALE
import time
import random
from a import *
#GAMELOOP
class Game:
def __init__(self, display):
self.display = display #instance
self.score = 0
def loop(self):
#TIME
clock = pygame.time.Clock()
#INSTANCE
board = Board(self.display)
apple = Apple(self.display)
snake = Snake(self.display)
#ACTUAL GAMELOOP
while True:
path = astar(board.returnBoard(), snake.snake_pos(),apple.apple_Position())
print(path)
for i in range(1,len(path)):
#if next move is not orthogonal
if snake.changeDirectionTo(snake.CoordinateToDirection(path[i])):
path = astar(board.returnBoard(), snake.snake_pos(),apple.apple_Position())
for event in pygame.event.get():
if event.type == pygame.QUIT:
print("exit pressed!")
return 0
# DRAW BACKGROUND
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
snake.changeDirectionTo('UP')
elif event.key == pygame.K_DOWN:
snake.changeDirectionTo('DOWN')
elif event.key == pygame.K_LEFT:
snake.changeDirectionTo('LEFT')
elif event.key == pygame.K_RIGHT:
snake.changeDirectionTo('RIGHT')
# MOVE SNAKE and record position of SNAKE
snake.move()
# Collision Detection
if snake.collision():
return 0
#eating
if snake.ate(apple.apple_Position()):
self.score += 1
pygame.display.set_caption(GAME['CAPTION']+ str(self.score))
apple.randomize()
#DRAW
board.drawBoard(self.display,snake.snake_body(),apple.apple_Position())
apple.draw()
snake.draw_body()
#DRAW UPDATE
pygame.display.update()
clock.tick(GAME['FPS'])
def main():
display = pygame.display.set_mode((GAME['WIDTH'],GAME['HEIGHT']))
pygame.display.set_caption(GAME['CAPTION'])
game = Game(display)
value = game.loop()
#LINUX EXIT COMMANDS
if value != 0:
print("Game ended wrong: ", value)
exit(1)
if __name__ == '__main__':
main()
| adambenaceur/AutonomousSnake | run.py | run.py | py | 2,793 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.time.Clock",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "board.Board",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "apple.Apple",
"lin... |
597656300 | import os
import time
import cv2
import itertools
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from tensorflow.keras.optimizers import Adam
import tensorflow.keras.applications.inception_v3 as inception_v3
import tensorflow.keras.applications.inception_resnet_v2 as inception_resnet_v2
import tensorflow.keras.applications.densenet as densenet
import tensorflow.keras.applications.mobilenet_v2 as mobilenet_v2
import tensorflow.keras.applications.mobilenet as mobilenet
import tensorflow.keras.applications.resnet50 as resnet50
import tensorflow.keras.applications.vgg16 as vgg16
import tensorflow.keras.applications.vgg19 as vgg19
import tensorflow.keras.applications.xception as xception
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Input
from tensorflow.keras.layers import Lambda
from tensorflow.keras.constraints import NonNeg
from tensorflow.keras.layers import Activation
from tensorflow.keras import backend as K
from tensorflow.keras.preprocessing import image
def loadimgs(path, shape_img=(595, 842)):
'''
path => Path of train directory or test directory
'''
X = []
y = []
for cartorio in os.listdir(path):
print("Carregando Cartorio: " + cartorio)
cartorio_path = os.path.join(path, cartorio)
for filename in os.listdir(cartorio_path):
image_path = os.path.join(cartorio_path, filename)
img = image.load_img(image_path, target_size=shape_img)
img = image.img_to_array(img)
img = img[:int(img.shape[0] * 1/3.), :, :]
try:
X.append(img)
y.append(cartorio)
except Exception as e:
print(e)
y = np.vstack(y)
X = np.stack(X)
return X, y
def gaussian(x):
return K.exp(-K.pow(x, 2))
def get_siamese_model(name=None, input_shape=(224, 224, 3),
embedding_vec_size=512, not_freeze_last=2):
"""
Model architecture
"""
if name == "InceptionV3":
base_model = inception_v3.InceptionV3(
weights='imagenet', include_top=False)
model_preprocess_input = inception_v3.preprocess_input
if name == "InceptionResNetV2":
base_model = inception_resnet_v2.InceptionResNetV2(
weights='imagenet', include_top=False)
model_preprocess_input = inception_resnet_v2.preprocess_input
if name == "DenseNet121":
base_model = densenet.DenseNet121(
weights='imagenet', include_top=False)
model_preprocess_input = densenet.preprocess_input
if name == "DenseNet169":
base_model = densenet.DenseNet169(
weights='imagenet', include_top=False)
model_preprocess_input = densenet.preprocess_input
if name == "DenseNet201":
base_model = densenet.DenseNet201(
weights='imagenet', include_top=False)
model_preprocess_input = densenet.preprocess_input
if name == "MobileNetV2":
base_model = mobilenet_v2.MobileNetV2(
weights='imagenet', include_top=False)
model_preprocess_input = mobilenet_v2.preprocess_input
if name == "MobileNet":
base_model = mobilenet.MobileNet(
weights='imagenet', include_top=False)
model_preprocess_input = mobilenet.preprocess_input
if name == "ResNet50":
base_model = resnet50.ResNet50(
weights='imagenet', include_top=False)
model_preprocess_input = resnet50.preprocess_input
if name == "VGG16":
base_model = vgg16.VGG16(
weights='imagenet', include_top=False)
model_preprocess_input = vgg16.preprocess_input
if name == "VGG19":
base_model = vgg19.VGG19(
weights='imagenet', include_top=False)
model_preprocess_input = vgg19.preprocess_input
if name == "Xception":
base_model = xception.Xception(
weights='imagenet', include_top=False)
model_preprocess_input = xception.preprocess_input
# Verifica se existe base_model
if 'base_model' not in locals():
return ["InceptionV3", "InceptionResNetV2",
"DenseNet121", "DenseNet169", "DenseNet201",
"MobileNetV2", "MobileNet",
"ResNet50",
"VGG16", "VGG19",
"Xception"
]
# desativando treinamento
for layer in base_model.layers[:-not_freeze_last]:
layer.trainable = False
x = base_model.layers[-1].output
x = GlobalAveragePooling2D()(x)
x = Dense(
embedding_vec_size,
activation='linear', # sigmoid? relu?
name='embedding',
use_bias=False
)(x)
model = Model(
inputs=base_model.input,
outputs=x
)
left_input = Input(input_shape)
right_input = Input(input_shape)
# Generate the encodings (feature vectors) for the two images
encoded_l = model(left_input)
encoded_r = model(right_input)
# Add a customized layer to compute the absolute difference between the encodings
L1_layer = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1]))
L1_distance = L1_layer([encoded_l, encoded_r])
# Add a dense layer with a sigmoid unit to generate the similarity score
prediction = Dense(
1,
activation=Activation(gaussian),
use_bias=False,
kernel_constraint=NonNeg()
)(L1_distance)
# Connect the inputs with the outputs
siamese_net = Model(
inputs=[left_input, right_input],
outputs=prediction
)
return {
"model": siamese_net,
"preprocess_input": model_preprocess_input
}
# In[5]:
train_folder = "dataset/train/"
val_folder = 'dataset/test/'
save_path = 'model_data/'
# In[6]:
X, y = loadimgs(train_folder)
# In[7]:
Xval, yval = loadimgs(val_folder)
# In[8]:
def show_img(img):
plt.figure(figsize=(20, 7))
plt.imshow(img/255, aspect='auto', interpolation='nearest')
# In[9]:
show_img(X[10])
# In[10]:
model_name = "MobileNetV2"
# In[11]:
model_dict = get_siamese_model(model_name, X[0].shape)
model = model_dict["model"]
preprocess_input = model_dict["preprocess_input"]
# In[12]:
model.summary()
# In[13]:
X = preprocess_input(X)
# In[14]:
X.shape
# In[15]:
def get_index(list1, list2):
comb = list(
itertools.product(
enumerate(list1),
enumerate(list2)
)
)
y = np.array([int(c[0][1][0] == c[1][1][0]) for c in comb])
idx_left = np.array([c[0][0] for c in comb])
idx_right = np.array([c[1][0] for c in comb])
return y, idx_left, idx_right
# In[16]:
def get_batch(X, y, batch_size, proportion=0.5):
n_examples, width, height, depth = X.shape
y_, idx_left, idx_right = get_index(y, y)
idx_one = np.random.choice(
np.where(y_ == 1)[0],
int(batch_size * proportion)
).tolist()
idx_zero = np.random.choice(
np.where(y_ == 0)[0],
int(batch_size * (1-proportion))
).tolist()
sel_idx = idx_one + idx_zero
np.random.shuffle(sel_idx)
y_batch = y_[sel_idx]
X_batch_l = X[idx_left[sel_idx]]
X_batch_r = X[idx_right[sel_idx]]
return [X_batch_l, X_batch_r], y_batch
# In[17]:
optimizer = Adam(lr=0.001)
model.compile(loss="binary_crossentropy", optimizer=optimizer)
# In[18]:
batch_size = 64
n_epochs = 20
proportion = 0.3
# In[19]:
# train model on each dataset
for epoch in tqdm(range(n_epochs)):
X_train, y_train = get_batch(X, y, batch_size, proportion)
model.fit(X_train, y_train, batch_size=batch_size, epochs=5)
# In[ ]:
# In[20]:
model.save(model_name + "_siamese.h5")
# In[26]:
model_embedding = model.layers[2]
# In[21]:
model.predict([Xval[0:4], Xval[15:19]])
# In[22]:
show_img(Xval[3])
# In[23]:
show_img(Xval[18])
# In[24]:
model.predict([Xval[0:4], Xval[0:4]])
# In[32]:
model.summary()
# In[36]:
model2 = Model(inputs=model.input, outputs=model.layers[-1].output)
# In[39]:
model2.predict([Xval[0:4], Xval[0:4]]) + 0.5
| Otazz/KaggleOSIC | network.py | network.py | py | 8,241 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": ... |
14467288238 | import numpy as np
from sklearn import metrics
import matplotlib.pyplot as plt
from kmeans import *
from minibatchkmeans import *
from gmm import *
from Dense_AutoEncoder import *
from CNN_AutoEncoder_TSNE import *
# load the data
data1 = np.load(r'kmnist-train-imgs.npz')
data2 = np.load(r'kmnist-train-labels.npz')
train_imgs = data1['arr_0']
train_labels = data2['arr_0']
NMI = []
ARI = []
n_clusters=10
#### Clustering with unprocessed data ####
train_imgs1=train_imgs.reshape(60000,28*28)
# kmeans
pred1 = kmeans(n_clusters,train_imgs1)
NMI1 = metrics.normalized_mutual_info_score(pred1, train_labels)
ARI1 = metrics.adjusted_rand_score(pred1, train_labels)
NMI.append(NMI1)
ARI.append(ARI1)
# gmm
pred2 = gmm(train_imgs1,n_clusters,1)
NMI2 = metrics.normalized_mutual_info_score(pred2, train_labels)
ARI2 = metrics.adjusted_rand_score(pred2, train_labels)
NMI.append(NMI2)
ARI.append(ARI2)
# minibatchkmeans
pred3 = minibatchkmeans(n_clusters,train_imgs1)
NMI3 = metrics.normalized_mutual_info_score(pred3, train_labels)
ARI3 = metrics.adjusted_rand_score(pred3, train_labels)
NMI.append(NMI3)
ARI.append(ARI3)
#### Clustering with data after dimensionality reduction using Dense Autoencoder ####
train_imgs2 = dense_AE(train_imgs,2)
# kmeans
pred4 = kmeans(n_clusters,train_imgs2)
NMI4 = metrics.normalized_mutual_info_score(pred4, train_labels)
ARI4 = metrics.adjusted_rand_score(pred4, train_labels)
NMI.append(NMI4)
ARI.append(ARI4)
# gmm
pred5 = gmm(train_imgs2,n_clusters,30)
NMI5 = metrics.normalized_mutual_info_score(pred5, train_labels)
ARI5 = metrics.adjusted_rand_score(pred5, train_labels)
NMI.append(NMI5)
ARI.append(ARI5)
# minibatchkmeans
pred6 = minibatchkmeans(n_clusters,train_imgs2)
NMI6 = metrics.normalized_mutual_info_score(pred6, train_labels)
ARI6 = metrics.adjusted_rand_score(pred6, train_labels)
NMI.append(NMI6)
ARI.append(ARI6)
#### Clustering with data after dimensionality reduction using Convolutional Autoencoder and T-sne ####
train_imgs3 = cnn_AE_Tsne(train_imgs)
# kmeans
pred7 = kmeans(n_clusters,train_imgs3)
NMI7 = metrics.normalized_mutual_info_score(pred7, train_labels)
ARI7 = metrics.adjusted_rand_score(pred7, train_labels)
NMI.append(NMI7)
ARI.append(ARI7)
# gmm
pred8 = gmm(train_imgs3,n_clusters,2)
NMI8 = metrics.normalized_mutual_info_score(pred8, train_labels)
ARI8 = metrics.adjusted_rand_score(pred8, train_labels)
NMI.append(NMI8)
ARI.append(ARI8)
# minibatchkmeans
pred9 = minibatchkmeans(n_clusters,train_imgs3)
NMI9 = metrics.normalized_mutual_info_score(pred9, train_labels)
ARI9 = metrics.adjusted_rand_score(pred9, train_labels)
NMI.append(NMI9)
ARI.append(ARI9)
#### Results Visualization ####
NMI_1=[round(x1,3) for x1 in NMI]
ARI_1=[round(x2,3) for x2 in ARI]
size = 9
x = np.arange(size)
total_width, n = 0.8, 3
width = total_width / n
x = x - (total_width - width) / 2
plt.figure(dpi=300,figsize=(13,6))
p1=plt.bar(x, NMI_1, width=width, label="NMI")
plt.bar_label(p1, label_type='edge',fontsize=8)
p2=plt.bar(x + width, ARI_1, width=width, label="ARI")
plt.bar_label(p2, label_type='edge',fontsize=8)
x_labels = ['kmeans', 'GMM', 'Mkmeans', 'pre1_kmeans', 'pre1_GMM','pre1_Mkmeans', 'pre2_kmeans', 'pre2_GMM','pre2_Mkmeans']
plt.xticks(x, x_labels)
plt.title('NMI and ARI in different algorithms')
plt.legend()
plt.show()
| Mateguo1/KMNIST | cluster/main.py | main.py | py | 3,316 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.normalized_mutual_info_score",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sk... |
71344922594 | from django.core.paginator import Paginator, EmptyPage
from django.core.serializers import serialize
from django.shortcuts import render, redirect, reverse, get_object_or_404
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin
from .forms import CreateOfferForm
from companies.models import Companies, Countries
from .models import OffersImages, Offers
from django.views.generic import DeleteView
from users.models import User
from django.http import Http404, JsonResponse, HttpResponse, HttpResponseRedirect
from funcy import omit
from django.dispatch import receiver
import os
from django.db.models.signals import post_delete, pre_save
from users.views import get_profile_ph
from users.models import Profile
import json
from users.mixins import AccessForCompletesOnlyMixin, AccessForMembersOnlyMixin
# from users.views import is_allowed
class CreateOfferView(AccessForMembersOnlyMixin, View):
template_name = 'offers/create_offer.html'
def get(self, request, **kwargs):
# try:
# is_allowed(request)
# except PermissionError:
# return redirect('set-profile-info')
context = {
'form': CreateOfferForm(),
'profile_ph': get_profile_ph(request),
}
return render(request, self.template_name, context)
def post(self, request, **kwargs):
# try:
# is_allowed(request)
# except PermissionError:
# return redirect('set-profile-info')
form = CreateOfferForm(request.POST, request.FILES)
company_id = kwargs['pk']
images = request.FILES.getlist('image')
company = Companies.objects.filter(id=company_id, owner=request.user).first()
if not company:
raise Http404
if form.is_valid():
obj = form.save(commit=False)
obj.company = company
obj.save()
for i in images:
OffersImages(offer=obj, image=i).save()
nexty = request.POST.get('next')
if nexty:
try:
return HttpResponseRedirect(nexty)
except:
raise Http404
else:
return redirect('offers:my-offers')
context = {
'form': form,
'profile_ph': get_profile_ph(request),
}
return render(request, self.template_name, context)
class DeleteOfferView(AccessForMembersOnlyMixin, View):
def post(self, request, **kwargs):
obj = get_object_or_404(Offers, id=request.POST.get('id'))
if obj.company.owner != self.request.user:
raise Http404
obj.delete()
data = {'deleted': True, 'success': True}
return JsonResponse(data)
class UpdateOfferView(AccessForMembersOnlyMixin, View):
template_name = 'offers/update_offer.html'
def get(self, request, **kwargs):
# try:
# is_allowed(request)
# except PermissionError:
# return redirect('set-profile-info')
obj = get_object_or_404(Offers, id=kwargs['pk'])
if obj.company.owner != request.user:
raise Http404
context = {
'form': CreateOfferForm(initial={
'type': obj.type,
'coupon_price': obj.coupon_price,
'currency': obj.currency,
'retail_price': obj.retail_price,
'title': obj.title,
'amount_min': obj.amount_min,
'amount_max': obj.amount_max
}),
'images': OffersImages.objects.filter(offer=obj).all(),
'profile_ph': get_profile_ph(request),
}
return render(request, self.template_name, context)
def post(self, request, **kwargs):
# try:
# is_allowed(request)
# except PermissionError:
# return redirect('set-profile-info')
instance = get_object_or_404(Offers, id=kwargs['pk'])
if instance.company.owner != request.user:
raise Http404
form = CreateOfferForm(request.POST, request.FILES, instance=instance)
images = request.FILES.getlist('image')
if form.is_valid():
obj = form.save()
for i in images:
OffersImages(offer=obj, image=i).save()
nexty = request.POST.get('next')
if nexty:
try:
return HttpResponseRedirect(nexty)
except:
raise Http404
else:
return redirect('offers:my-offers')
context = {
'form': form,
'profile_ph': get_profile_ph(request),
}
return render(request, self.template_name, context)
class OfferImageDeleteView(AccessForMembersOnlyMixin, View):
def get(self, request, **kwargs):
post = get_object_or_404(OffersImages, id=request.GET.get('id'))
if post.offer.company.owner == request.user:
post.delete()
return JsonResponse({'success': True})
else:
return Http404
@receiver(post_delete, sender=OffersImages)
def post_save_image(sender, instance, *args, **kwargs):
""" Clean Old Image file """
try:
instance.image.delete(save=False)
except:
pass
class MyOffersPageView(AccessForMembersOnlyMixin, View):
template_name = 'offers/my_offers.html'
def get(self, request, **kwargs):
# try:
# is_allowed(request)
# except PermissionError:
# return redirect('set-profile-info')
objects = Companies.objects.filter(owner=request.user).prefetch_related('offers_set',
'owner__profile_set').all()
context = {
'objects': objects,
'profile_ph': get_profile_ph(request),
}
return render(request, self.template_name, context)
class SendImagesView(AccessForMembersOnlyMixin, View):
def get(self, request, **kwargs):
images = OffersImages.objects.filter(offer=request.GET.get('id')).all()
res = []
for image in images:
res.append(image.image.url)
return JsonResponse({'images': res})
class CatalogPageView(View):
catalog_template = 'offers/catalog.html'
catalog_template_hidden = 'offers/catalog-hidden.html'
def get_template(self):
if self.request.user.is_authenticated:
if self.request.user.role >= User.INVITED:
return self.catalog_template
return self.catalog_template_hidden
def get(self, request, **kwargs):
offers = Offers.objects.prefetch_related('offersimages_set', 'company__owner__profile_set').order_by(
'-id').all()
currencies = Offers.objects.values('currency').distinct()
niches = Companies.objects.values('niche').distinct()
# niche = request.GET.get('niche')
# if niche:
# initial_offers = initial_offers.filter(company__niche=niche)
#
# amount_min = request.GET.get('amount_min')
# amount_max = request.GET.get('amount_max')
# if amount_min and amount_max:
# initial_offers = initial_offers.filter(amount_min__lte=amount_max, amount_max__gte=amount_min)
# elif amount_min:
# initial_offers = initial_offers.filter(amount_max__gte=amount_min)
# elif amount_max:
# initial_offers = initial_offers.filter(amount_min__lte=amount_max)
#
# currency = request.GET.get('currency')
# if currency:
# initial_offers = initial_offers.filter(currency=currency)
#
# retail_price_min = request.GET.get('retail_price_min')
# retail_price_max = request.GET.get('retail_price_max')
# if retail_price_min and retail_price_max:
# initial_offers = initial_offers.filter(retail_price__gte=retail_price_min,
# retail_price__lte=retail_price_max)
# elif retail_price_min:
# initial_offers = initial_offers.filter(retail_price__gte=retail_price_min)
# elif retail_price_max:
# initial_offers = initial_offers.filter(retail_price__lte=retail_price_max)
#
# coupon_price_min = request.GET.get('coupon_price_min')
# coupon_price_max = request.GET.get('retail_price_max')
# if coupon_price_min and coupon_price_max:
# initial_offers = initial_offers.filter(retail_price__gte=coupon_price_min,
# retail_price__lte=coupon_price_max)
# elif coupon_price_min:
# initial_offers = initial_offers.filter(retail_price__gte=coupon_price_min)
# elif coupon_price_max:
# initial_offers = initial_offers.filter(retail_price__lte=coupon_price_max)
#
# country = request.GET.get('country')
# if country:
# initial_offers = initial_offers.filter(company__country_of_res=country)
#
# phone_num_show = request.GET.get('phone_num_show')
# #phone_num_show = 1
# if phone_num_show:
# suitable_users = Profile.objects.filter(phone_num_show=True).values_list('user_id', flat=True)
# initial_offers = initial_offers.filter(company__owner__id__in=suitable_users)
#
# print(initial_offers)
#
# p = Paginator(initial_offers, 2)
#
# page = request.GET.get('page')
#
# try:
# res = p.page(page)
# except EmptyPage:
# raise Http404
#
# serialized_data = serialize("json", res, use_natural_foreign_keys=True, use_natural_primary_keys=True)
# print(serialized_data)
#
# if request.headers.get('X-Requested-With') == 'XMLHttpRequest':
# return JsonResponse(serialized_data, safe=False)
try:
profile_ph = get_profile_ph(request)
except TypeError:
profile_ph = None
context = {
'offers': offers,
'profile_ph': profile_ph,
'currencies': currencies,
'niches': niches,
}
return render(request, self.get_template(), context)
@receiver(post_delete, sender=OffersImages)
def post_save_image(sender, instance, *args, **kwargs):
""" Clean Old Image file """
try:
img = OffersImages.objects.filter(offer=instance.offer).first()
obj = Offers.objects.get(id=instance.offer.id)
if not img:
obj.has_photo = False
obj.save()
except:
pass
@receiver(pre_save, sender=OffersImages)
def pre_save_image(sender, instance, *args, **kwargs):
""" instance old image file will delete from os """
try:
obj = Offers.objects.get(id=instance.offer.id)
if not obj.has_photo:
obj.has_photo = True
obj.save()
except:
pass
| cyber-tatarin/crossm | crossm/offers/views.py | views.py | py | 11,407 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "users.mixins.AccessForMembersOnlyMixin",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "forms.CreateOfferForm",
"line_number": 34,
"usage_type": "call"
},
{
"ap... |
5410461819 | from datetime import datetime, timezone
import json
import logging
from math import ceil
from slugify import slugify
from flask import Response, request
from flask_camp import current_api, allow
from sqlalchemy.sql.functions import func
from werkzeug.exceptions import BadRequest
from c2corg_api.search import DocumentSearch, DocumentLocaleSearch
from c2corg_api.models import USERPROFILE_TYPE, ROUTE_TYPE, models as document_types
log = logging.getLogger(__name__)
# Search engines accept not more than 50000 urls per sitemap,
# and the sitemap files may not exceed 10 MB. With 50000 urls the sitemaps
# are not bigger than 9MB, but to be safe we are using 45000 urls per sitemap.
# see http://www.sitemaps.org/protocol.html
PAGES_PER_SITEMAP = 45000
class _Sitemaps:
@staticmethod
def get_locales_per_type():
return (
current_api.database.session.query(DocumentSearch.document_type, func.count().label("count"))
.join(DocumentLocaleSearch, DocumentSearch.id == DocumentLocaleSearch.id)
.filter(DocumentSearch.document_type != USERPROFILE_TYPE)
.group_by(DocumentSearch.document_type)
.all()
)
class _Sitemap:
@staticmethod
def get_locales(document_type, page):
fields = [
DocumentSearch.id,
DocumentLocaleSearch.lang,
DocumentLocaleSearch.title_prefix,
DocumentLocaleSearch.title,
DocumentSearch.timestamp,
]
query = (
current_api.database.session.query(*fields)
.select_from(DocumentLocaleSearch)
.join(DocumentSearch, DocumentSearch.id == DocumentLocaleSearch.id)
.filter(DocumentSearch.document_type == document_type)
.order_by(DocumentLocaleSearch.id, DocumentLocaleSearch.lang)
.limit(PAGES_PER_SITEMAP)
.offset(PAGES_PER_SITEMAP * page)
)
return query.all()
class SitemapsRest(_Sitemaps):
rule = "/sitemaps"
@allow("anonymous")
def get(self):
"""Returns the information needed to generate a sitemap index file.
See: http://www.sitemaps.org/protocol.html
The response consists of a list of URLs to request the information
needed to generate the sitemap linked from the sitemap index.
E.g.
{
"sitemaps": [
"/sitemaps/w/0",
"/sitemaps/a/0",
"/sitemaps/i/0",
"/sitemaps/i/1",
"/sitemaps/i/2",
"/sitemaps/i/3",
"/sitemaps/i/4",
"/sitemaps/i/5",
...
]
}
"""
document_locales_per_type = self.get_locales_per_type()
sitemaps = []
for doc_type, doc_count in document_locales_per_type:
num_sitemaps = ceil(doc_count / PAGES_PER_SITEMAP)
sitemaps += [
{"url": f"/sitemaps/{doc_type}/{i}", "doc_type": doc_type, "i": i} for i in range(0, num_sitemaps)
]
result = Response(response=json.dumps({"sitemaps": sitemaps}), content_type="application/json")
result.add_etag() # TODO : compute it only one time per day
result.make_conditional(request)
return result
class SitemapRest(_Sitemap):
rule = "/sitemaps/<string:document_type>/<int:page>"
@allow("anonymous")
def get(self, document_type, page):
"""Returns the information needed to generate a sitemap for a given type and sitemap page number."""
if document_type not in document_types:
raise BadRequest("Invalid document type")
document_locales = self.get_locales(document_type, page)
# include `title_prefix` for routes
is_route = document_type == ROUTE_TYPE
data = {"pages": [self.format_page(is_route, *locale) for locale in document_locales]}
result = Response(response=json.dumps(data), content_type="application/json")
result.add_etag() # TODO : compute it only one time per day
result.make_conditional(request)
return result
@staticmethod
def format_page(is_route, doc_id, lang, title, title_prefix, last_updated):
page = {"document_id": doc_id, "lang": lang, "title": title, "lastmod": last_updated.isoformat()}
if is_route:
page["title_prefix"] = title_prefix
return page
class SitemapsXml(_Sitemaps):
rule = "/sitemaps.xml"
@allow("anonymous")
def get(self):
"""Returns a sitemap index file.
See: http://www.sitemaps.org/protocol.html
The response consists of a list of URLs of sitemaps.
<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap>
<loc>https://api.camptocamp.org/sitemaps.xml/w/0.xml</loc>
<lastmod>2019-02-11T18:01:49.193770+00:00</lastmod>
</sitemap>
<sitemap>
<loc>https://api.camptocamp.org/sitemaps.xml/a/0.xml</loc>
<lastmod>2019-02-11T18:01:49.193770+00:00</lastmod>
</sitemap>
<sitemap>
<loc>https://api.camptocamp.org/sitemaps.xml/i/0.xml</loc>
<lastmod>2019-02-11T18:01:49.193770+00:00</lastmod>
</sitemap>
<sitemap>
<loc>https://api.camptocamp.org/sitemaps.xml/i/1.xml</loc>
<lastmod>2019-02-11T18:01:49.193770+00:00</lastmod>
</sitemap>
</sitemap>
"""
document_locales_per_type = self.get_locales_per_type()
sitemaps = []
now = datetime.utcnow().replace(tzinfo=timezone.utc)
lastmod = now.isoformat()
template = """<sitemap>
<loc>https://api.camptocamp.org/sitemaps.xml/{doc_type}/{i}.xml</loc>
<lastmod>{lastmod}</lastmod>
</sitemap>"""
for doc_type, count in document_locales_per_type:
num_sitemaps = ceil(count / PAGES_PER_SITEMAP)
sitemaps_for_type = [
template.format(doc_type=doc_type, i=i, lastmod=lastmod) for i in range(0, num_sitemaps)
]
sitemaps.extend(sitemaps_for_type)
body = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
{}
</sitemapindex>""".format(
"\n".join(sitemaps)
)
result = Response(response=body, content_type="text/xml")
result.add_etag() # TODO : compute it only one time per day
result.make_conditional(request)
return result
class SitemapXml(_Sitemap):
rule = "/sitemaps.xml/<string:document_type>/<int:page>.xml"
@allow("anonymous")
def get(self, document_type, page):
"""Returns a sitemap file for a given type and sitemap page number."""
if document_type not in document_types:
raise BadRequest("Invalid document type")
document_locales = self.get_locales(document_type, page)
body = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
{}
</urlset>""".format(
"\n".join([self.format_page(document_type, *locale) for locale in document_locales])
)
result = Response(response=body, content_type="text/xml")
result.add_etag() # TODO : compute it only one time per day
result.make_conditional(request)
return result
@staticmethod
def format_page(document_type, doc_id, lang, title_prefix, title, last_updated):
page = {
"document_id": doc_id,
"lang": lang,
"lastmod": last_updated.isoformat(),
"document_type": document_type,
}
if title_prefix:
page["title"] = slugify(f"{title_prefix} {title}")
else:
page["title"] = slugify(title)
return """<url>
<loc>https://www.camptocamp.org/{document_type}/{document_id}/{lang}/{title}</loc>
<lastmod>{lastmod}</lastmod>
<changefreq>weekly</changefreq>
</url>""".format(
**page
)
| c2corg/c2c_api-poc | c2corg_api/views/sitemap.py | sitemap.py | py | 8,364 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "c2corg_api.search.DocumentLocaleSearch",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "flask_camp.current_api.database.session.query",
"line_number": 29,
"usage_ty... |
35196334080 | from direct.gui.OnscreenImage import OnscreenImage
from pandac.PandaModules import TransparencyAttrib
from direct.gui.OnscreenText import OnscreenText
from direct.showbase.DirectObject import DirectObject
from pandac.PandaModules import TextNode
from gui.GUIOrder import GUIOrder
from event.InventoryEvent import AmmoChangeEvent, SelectedItemChangeEvent
import Globals
class HUDBottomRight(DirectObject):
def __init__(self):
self.node = base.a2dBottomRight.attachNewNode('hudBottomRight')#GUIOrder.ORDER[GUIOrder.HUD])
self.node.setBin('fixed', GUIOrder.ORDER[GUIOrder.HUD])
self.ammoIcon = OnscreenImage(image = 'Assets/Images/HUD/HUDBottomRight.png', scale = 512.0 / 1024, pos = (-0.5, 0, 0.5))
self.ammoIcon.setTransparency(TransparencyAttrib.MAlpha)
self.ammoIcon.reparentTo(self.node)
self.ammoTextClip = OnscreenText(text = '30', pos = (-0.35, 0.09), scale = 0.12, fg = (1, 1, 1, 1), shadow = (0, 0, 0, 1), mayChange = True, align=TextNode.ARight, font = Globals.FONT_SAF)
self.ammoTextClip.reparentTo(self.node)
self.ammoTextClip.setBin('fixed', GUIOrder.ORDER[GUIOrder.HUD])
self.ammoTextLeft = OnscreenText(text = '90', pos = (-0.23, 0.05), scale = 0.07, fg = (1, 1, 1, 1), shadow = (0, 0, 0, 1), mayChange = True, align=TextNode.ARight, font = Globals.FONT_SAF)
self.ammoTextLeft.reparentTo(self.node)
self.ammoTextLeft.setBin('fixed', GUIOrder.ORDER[GUIOrder.HUD])
self.accept(AmmoChangeEvent.EventName, self.OnAmmoChangeEvent)
self.accept(SelectedItemChangeEvent.EventName, self.OnSelectedItemChangeEvent)
def OnAmmoChangeEvent(self, event):
item = event.GetItem()
if(item and item.GetCurrentClipAmmo() == ''):
self.ChangeAmmoText('', '')
else:
self.ChangeAmmoText(str(item.GetCurrentClipAmmo()), str(item.GetTotalRemainingAmmo()))
def ChangeAmmoText(self, clip, total):
self.ammoTextClip.setText(clip)
self.ammoTextLeft.setText(total)
def OnSelectedItemChangeEvent(self, event):
if(event.GetItemStack() and event.GetItemStack().GetItem()):
self.OnAmmoChangeEvent(AmmoChangeEvent(None, event.GetItemStack().GetItem()))
else:
self.ChangeAmmoText('', '')
def Destroy(self):
self.ignoreAll()
self.ammoIcon.removeNode()
self.ammoTextClip.removeNode()
self.ammoTextLeft.removeNode()
self.node.removeNode() | czorn/Modifire | net/modifire/hud/HUDBottomRight.py | HUDBottomRight.py | py | 2,563 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "direct.showbase.DirectObject.DirectObject",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "gui.GUIOrder.GUIOrder.ORDER",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "gui.GUIOrder.GUIOrder",
"line_number": 15,
"usage_type": "name... |
39497210901 | from collections import deque
from sys import stdin
def bfs(x,n,visited,graph):
queue = deque([x])
visited[x] = 1
while queue:
x = queue.popleft()
for v in graph[x]:
if visited[v] == 0:
visited[v] = 1
queue.append(v)
return True
n,m = map(int,stdin.readline().split())
graph = [[] for _ in range(n+1)]
for _ in range(m):
u,v = map(int,stdin.readline().split())
graph[u].append(v)
graph[v].append(u)
visited = [0]*(n+1)
count = 0
for i in range(1,n+1):
if visited[i] == 0:
count += bfs(i,n,visited,graph)
print(count) | yundaehyuck/Python_Algorithm_Note | theory_source_code/graph/connected_component.py | connected_component.py | py | 697 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "sys.stdin.readline",
... |
32278157428 | import sqlite3 as sql
import numpy as np
import pandas as pd
import pickle
import os
import joblib
import onnx
import onnxruntime as rt
import torch
filename = "./svm_iris.onnx"
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
rf_model_loaded = onnx.load(os.path.join(PROJECT_ROOT, "static/rf_model_init.onnx"))
sess = rt.InferenceSession(os.path.join(PROJECT_ROOT, "static/rf_model_init.onnx"))
input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[0].name
# scaler_loaded = onnx.load(os.path.join(PROJECT_ROOT, "static/scaler_init.onnx"))
# onnx.checker.check_model(scaler_loaded)
fixed_acidity = 7.5
volatile_acidity = 0.75
citric_acid = 3.00
residual_sugar = 3.9
chlorides = 0.176
free_sulfur_dioxide = 12.0
total_sulfur_dioxide = 35.0
density = 0.91
ph = 3.9
sulphates = 0.56
alcohol = 9.4
X = np.array(
[
fixed_acidity,
volatile_acidity,
citric_acid,
residual_sugar,
chlorides,
free_sulfur_dioxide,
total_sulfur_dioxide,
density,
ph,
sulphates,
alcohol,
]
)
pred_onx = sess.run(None, {input_name: X.astype(np.float32)})[0]
print(pred_onx)
X_scaled = scaler_loaded.transform(X)
quality = rf_model_loaded.predict(X_scaled)[0]
res = "хорошее" if quality == 1 else "плохое"
msg = f"Вино {res}"
| AlexeyKlimov-git/Innopolis-ML-course | test_predict_model.py | test_predict_model.py | py | 1,401 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "onnx.load",
"line_n... |
13210877907 | import numpy as np
import matplotlib.pyplot as plt
class differential:
"""Solver of differential equations
using RK4. diffEq should be a function with the
differential equation that returns acceleration.
All variables inside diffEq must be global"""
def __init__(self, diffEq, plot_str, dt=0.01, T=20, x0=0, v0=0):
if callable(diffEq):
self.diffEq = diffEq
else:
raise TypeError('diffEq must be a callable function')
self.dt = float(dt)
self.T = T
self.n = int(self.T/self.dt) + 1
self.x = np.zeros(self.n)
self.v = np.zeros(self.n)
self.t = np.linspace(0,self.T,self.n)
self.x[0] = x0
self.v[0] = v0
self.plot_str = plot_str
def solve(self):
""" method that uses RK4 and diffEq
to solve the entire motion"""
for i in range(self.n - 1):
self.x[i + 1],self.v[i + 1] = self.RK4(self.x[i],self.v[i],self.t[i])
return self.x, self.v, self.t
def RK4(self,xStart,vStart,tStart):
""" Runge-Kutta method of 4th order"""
a1 = self.diffEq(xStart,vStart,tStart)
v1 = vStart
xHalf1 = xStart + v1 * self.dt/2.0
vHalf1 = vStart + a1 * self.dt/2.0
a2 = self.diffEq(xHalf1,vHalf1,tStart+self.dt/2.0)
v2 = vHalf1
xHalf2 = xStart + v2 * self.dt/2.0
vHalf2 = vStart + a2 * self.dt/2.0
a3 = self.diffEq(xHalf2,vHalf2,tStart+self.dt/2.0)
v3 = vHalf2
xEnd = xStart + v3 * self.dt
vEnd = vStart + a3 * self.dt
a4 = self.diffEq(xEnd,vEnd,tStart + self.dt)
v4 = vEnd
aMiddle = 1.0/6.0 * (a1 + 2*a2 + 2*a3 + a4)
vMiddle = 1.0/6.0 * (v1 + 2*v2 + 2*v3 + v4)
xEnd = xStart + vMiddle * self.dt
vEnd = vStart + aMiddle * self.dt
return xEnd, vEnd
def plot(self):
""" plots phase space of motion,
after solve() method is used"""
plt.title(self.plot_str)
plt.plot(self.x,self.v)
plt.xlabel('position [m]')
plt.ylabel('velocity [m/s]')
plt.grid()
plt.show()
if __name__ == '__main__':
def diffEq(xNow,vNow,tNow):
return - k*xNow/m
oppgave1 = differential(diffEq,r'$m\ddot{x}(t) + kx(t) = 0$',x0=1.0)
m = 0.500
k = 1.0
oppgave1.solve()
oppgave1.plot()
"""
[Command: python -u /home/simen/github/university/4semester/fys2130/project.py]
[Finished in 2.405s]
"""
| simehaa/University | fys2130/project.py | project.py | py | 2,477 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.title",
... |
17203669123 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('spirit_user', '0004_auto_20150731_2351'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='given_likes_count',
field=models.PositiveIntegerField(default=0, verbose_name='given likes count'),
),
migrations.AddField(
model_name='userprofile',
name='received_likes_count',
field=models.PositiveIntegerField(default=0, verbose_name='received likes count'),
),
]
| nacoss-biu/nacoss-biu | spirit/user/migrations/0005_auto_20151206_1214.py | 0005_auto_20151206_1214.py | py | 675 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 14,
"usage_type": "call"
},
{
... |
22098178009 | import yfinance as yf
import datetime
import pandas as pd
def get_dados(siglas, num_dias = 588, intervalo = '1wk', inicio = '', fim = ''):
"""
siglas -> []
Retorna uma lista de DataFrames
com os valores de fechamento das siglas passadas
"""
if inicio == '':
inicio = (datetime.date.today() - datetime.timedelta(num_dias))
if fim == '':
fim = datetime.date.today()
dados = []
for sigla in siglas:
df = yf.download(sigla, start = inicio, end = fim, interval = intervalo)
df.drop(['Open', 'High', 'Low', 'Adj Close', 'Volume'], axis = 1, inplace = True)
df = df.transpose()
df.dropna(axis = 1, inplace = True)
df.index = [sigla]
dados.append(df)
dados = pd.concat(dados)
return dados
| Nadyan/stock-analysis | dados/get_data.py | get_data.py | py | 828 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "datetime.date.today",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.da... |
14538126773 | #!/usr/bin/env python
# encoding: utf-8
from rdflib.serializer import Serializer
import configparser
import corpus
import csv
import glob
import json
import rdflib
import sys
CONFIG = configparser.ConfigParser()
CONFIG.read("rc.cfg")
PREAMBLE = """
@base <https://github.com/Coleridge-Initiative/adrf-onto/wiki/Vocabulary> .
@prefix cito: <http://purl.org/spar/cito/> .
@prefix dct: <http://purl.org/dc/terms/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
"""
TEMPLATE_DATASET = """
:{}
rdf:type :Dataset ;
foaf:page "{}"^^xsd:anyURI ;
dct:publisher "{}" ;
dct:title "{}" ;
"""
TEMPLATE_PUBLICATION = """
:{}
rdf:type :ResearchPublication ;
foaf:page "{}"^^xsd:anyURI ;
dct:publisher "{}" ;
dct:title "{}" ;
dct:identifier "{}" ;
:openAccess "{}"^^xsd:anyURI ;
"""
if __name__ == "__main__":
out_buf = [ PREAMBLE.lstrip() ]
## load the datasets
dataset_path = CONFIG["DEFAULT"]["dataset_path"]
known_datasets = {}
with open(dataset_path, "r") as f:
for elem in json.load(f):
dat_id = elem["id"]
id_list = [elem["provider"], elem["title"]]
known_datasets[dat_id] = corpus.get_hash(id_list, prefix="dataset-")
if "url" in elem:
url = elem["url"]
else:
url = "http://example.com"
out_buf.append(
TEMPLATE_DATASET.format(
known_datasets[dat_id],
url,
elem["provider"],
elem["title"]
).strip()
)
if "alt_title" in elem:
for alt_title in elem["alt_title"]:
out_buf.append(" dct:alternative \"{}\" ;".format(alt_title))
out_buf.append(".\n")
## load the publications
for filename in glob.glob("corpus/pub/*.json"):
with open(filename) as f:
for elem in json.load(f):
link_map = elem["datasets"]
if len(link_map) > 0:
id_list = [elem["publisher"], elem["title"]]
pub_id = corpus.get_hash(id_list, prefix="publication-")
out_buf.append(
TEMPLATE_PUBLICATION.format(
pub_id,
elem["url"],
elem["publisher"],
elem["title"],
elem["doi"],
elem["pdf"]
).strip()
)
dat_list = [ ":{}".format(known_datasets[dat_id]) for dat_id in link_map ]
out_buf.append(" cito:citesAsDataSource {} ;".format(", ".join(dat_list)))
out_buf.append(".\n")
## write the TTL output
filename = "tmp.ttl"
with open(filename, "w") as f:
for text in out_buf:
f.write(text)
f.write("\n")
## load the TTL output as a graph
graph = rdflib.Graph()
graph.parse(filename, format="n3")
## transform graph into JSON-LD
with open("corpus/vocab.json", "r") as f:
context = json.load(f)
with open("tmp.jsonld", "wb") as f:
f.write(graph.serialize(format="json-ld", context=context, indent=2))
## read back
graph = rdflib.Graph()
graph.parse("tmp.jsonld", format="json-ld")
| Coleridge-Initiative/RCHuman | rcc1/bin/gen_ttl.py | gen_ttl.py | py | 3,528 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "corpus.get_hash",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"... |
26219494731 | import tvm
import tvm.relay as relay
import tvm.relay.testing as testing
from graphviz import Digraph
import os
from collage.utils import get_backend_from_backend_pattern_annotation
def _traverse_expr(node, node_dict):
if node in node_dict:
return
if isinstance(node, tvm.ir.op.Op):
return
node_dict[node] = len(node_dict)
def get_node_color(node):
backend_name = get_backend_from_backend_pattern_annotation(node.backend)
# If this is default (no backend op assignment)
color = "greenyellow"
if backend_name == "tensorrt":
color = "orange"
elif backend_name[:3] == "tvm":
color = "greenyellow"
elif backend_name[:5] == "cudnn":
color = "yellow"
elif backend_name[:6] == "cublas":
color = "grey60"
return color
def visualize_backend_placement(expr, file_name, expr2node=None):
dot = Digraph(format='pdf')
dot.attr(rankdir='BT')
node_dict = {}
relay.analysis.post_order_visit(expr, lambda node: _traverse_expr(node, node_dict))
for node, node_idx in node_dict.items():
if not isinstance(node, relay.Let):
node_idx_backend_str = f"[{node_idx}, {node.backend}]"
else:
node_idx_backend_str = f"[{node_idx}, NO_BACKEND]"
# Debug for DP: print node_dfs_order
if expr2node is not None and hash(node) in expr2node:
node_dfs_order = expr2node[hash(node)]._topological_order
node_idx_backend_str = f"[{node_dfs_order}, {node_idx}, {node.backend}]"
node_color = get_node_color(node)
if isinstance(node, relay.Function):
dot.node(str(node_idx), f'Function ({node_idx})', shape='doubleoctagon')
dot.edge(str(node_dict[node.body]), str(node_idx))
elif isinstance(node, relay.expr.Var):
if isinstance(node.type_annotation, tvm.ir.type.TupleType):
type_info = node.type_annotation.fields
tensor_info = f'Tensor[TupleType{tuple(type_info)}]'
elif not hasattr(node.type_annotation, 'shape'):
tensor_info = f'NoType'
else:
type_info = node.type_annotation.shape
tensor_info = f'Tensor[{tuple(type_info)}, {node.type_annotation.dtype}]'
dot.node(str(node_idx), \
f'{node.name_hint} {node_idx_backend_str}:\n{tensor_info}', \
shape='rectangle'
)
elif isinstance(node, relay.expr.GlobalVar):
dot.node(str(node_idx), \
f'{node.name_hint} {node_idx_backend_str}', \
shape='rectangle'
)
elif isinstance(node, relay.Constant):
dot.node(str(node_idx), \
f'Constant {node_idx_backend_str}:\nTensor[{tuple(node.data.shape)}, {node.data.dtype}]', \
shape='rectangle'
)
elif isinstance(node, relay.expr.Call):
args = [node_dict[arg] for arg in node.args]
if isinstance(node.op, tvm.relay.Function):
dot.node(str(node_idx), f'Call {node_idx_backend_str}(Function({node_dict[node.op.body]}))', shape='ellipse',
style='filled', color=node_color)
else:
if isinstance(node.op, relay.expr.GlobalVar):
dot.node(str(node_idx), f'Call{node_idx_backend_str}(GlobalVar={node.op.name_hint})', shape='ellipse', style='filled', color=node_color)
elif isinstance(node.op, relay.Var):
dot.node(str(node_idx), f'Call {node_idx_backend_str}(Var={node.op.name_hint})', shape='ellipse', style='filled', color=node_color)
else:
dot.node(str(node_idx), f'Call {node_idx_backend_str}(op={node.op.name})', shape='ellipse', style='filled', color=node_color)
for arg in args:
dot.edge(str(arg), str(node_idx))
elif isinstance(node, relay.expr.TupleGetItem):
dot.node(str(node_idx), f'TupleGetItem {node_idx_backend_str}(idx={node.index})', shape='ellipse', style='filled', color=node_color)
dot.edge(str(node_dict[node.tuple_value]), str(node_idx))
elif isinstance(node, relay.expr.Tuple):
args = [node_dict[field] for field in node.fields]
dot.node(str(node_idx), f'Tuple {node_idx_backend_str}(fileds=none)', shape='ellipse', style='filled', color=node_color)
for arg in args:
dot.edge(str(arg), str(node_idx))
else:
raise RuntimeError(f'Unknown node type. node_idx: {node_idx}, node: {type(node)}')
dot.render(f'{file_name}.gv')
os.remove(f'{file_name}.gv')
| mikepapadim/collage-non-tvm-fork | python/collage/analysis/visualize.py | visualize.py | py | 4,763 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "tvm.ir",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "collage.utils.get_backend_from_backend_pattern_annotation",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "graphviz.Digraph",
"line_number": 36,
"usage_type": "call"
},
{... |
36975188047 | # -*- coding: cp1252 -*-
import io
import os
import sys
import time
import misctools
import stringtools
class Apho:
def __init__( self ):
self.thous = [] # list of pair (sentence, author)
self.aCountSaid = [] # for each sentence, number of said time
self.aLastSaid = [] # time of last said
self.embed = None # when using it
def load( self ):
"""
Charge les pensees: un fichier avec des pensées, puis sur la derniere ligne l'auteur. séparé par des lignes vides.
eg:
Je ne pense pas à toute la misère, je pense à la beauté qui reste.
Anne Frank
Fais de ta vie un rêve et d’un rêve une réalité.
Antoine de Saint-Exupery
"""
"""
TODO a l'occasion: prendre un gros roman puis chercher des phrases assez courte sans prénom et les définir comme des pensee avec nom de l'auteur, livre et année.
cf D:\books avec des pdfs
"""
f = io.open(misctools.getThisFilePath()+"datas/pensee.txt","r",encoding='cp1252')
blob = [] # un bloc de ligne de texte séparé par une ligne vide
bContinue = 1
while bContinue:
line = f.readline()
if len(line)<1:
bContinue = 0
if bContinue and line[-1] == '\n': line = line[:-1]
if len(line)<1:
if len(blob)>1:
# end of blob
citation = " ".join(blob[:-1])
auth = blob[-1]
self.thous.append( (citation,auth) )
self.aCountSaid.append(0)
self.aLastSaid.append(0)
blob = []
else:
blob.append(line)
#~ print("DBG: load: blob: %s" % str(blob))
#~ print(self.thous)
print("INF: Apho.load: %d loaded apho(s))" % len(self.thous))
def getThoughts( self, sentence ):
"""
find a thoughts not said a lot, relative to sentence.
return a pair, (thought,author) or None if none
"""
bVerbose = 1
#~ bVerbose = 0
bMatchShort = 0
bMatchShort = 1
bUseWordMatching = 1
#~ bUseWordMatching = 0 # use camembert
if bUseWordMatching:
if 0:
sentence = sentence.replace('.', ' ').replace(',', ' ')
words = sentence.split()
words = stringtools.lowerList(words)
else:
import usual_words
words = usual_words.filterSentences(sentence,bVerbose=0)
words = stringtools.lowerList(words)
# add also words without '
i = 0
while i < len(words):
if "'" in words[i]:
words.extend(words[i].split("'"))
i += 1
if bVerbose: print("DBG: getThoughts: words: %s" % words)
# find radical style
i = 0
while i < len(words):
if len(words[i])<3:
del words[i]
continue
# on le fera plus tard pour essayer de matcher sur le mot reel
#~ if len(words[i])>5:
#~ words[i] = words[i][:-3] # travailler => travail
i += 1
# remove usual words
if 0:
import usual_words
i = 0
while i < len(words):
if usual_words.isUsualWord(words[i]):
del words[i]
continue
i += 1
print("match word: %s" % words)
match = []
for t in self.thous:
cit = t[0]
cit = cit.lower()
n = 0
for w in words:
#~ print("compare with cit: %s" % cit)
if w in cit:
if bVerbose or 0: print( "match: '%s' in '%s'" % (w,cit) )
#~ n += 1
n += len(w) # count more point if word is long!
if bMatchShort and len(w)>5:
# lemmatisation du pauvre
if "er" == w[-2:]:
ws = w[:-3]
else:
ws = w[:-2]
if ws in cit:
# pb: ecoute => eco can match with recommencer
if bVerbose: print( "match short: '%s' in '%s'" % (ws,cit) )
n += len(ws)
match.append(n*30/len(cit))
else:
# camembert
import numpy as np
sys.path.append("../camembert")
import sentence_embedding
# generating for all apho
if self.embed == None:
timeBegin = time.time()
if 0:
# 6s
listEmb = []
for t in self.thous:
cit = t[0]
v = sentence_embedding.camEmbedList([cit])[0]
listEmb.append(v)
else:
# once precomputed: 0.12s
list_s = [x[0] for x in self.thous]
listEmb = sentence_embedding.precomputeList(list_s,"apho_embed.txt")
# le 7ieme chiffre après la virgule est différent quand on sauve dans un fichier
print("listEmb takes %.2fs" % (time.time()-timeBegin))
#~ print(listEmb[0])
#~ print(listEmb[1])
#~ print(listEmb[2])
self.embed = listEmb
e = sentence_embedding.camEmbedList(sentence)
match = []
for v in self.embed:
simi = np.dot(e,v)/0.4 # 0.4 is a good threshold for simi
#reglage pour ne pas prendre ceux qui sont trop loin (on a ensuite un filtre >=1)
#~ simi *= 2.35 # un peu trop sympa, 19 hits: laisse passer 'c'est bien fait peur. tu es un bon petit gars. eh mon petit gnocchi est-ce qu'il a." => Gaïa, c'est l'heure d'aller te coucher.
simi *= 2.2 # plus limité: 14 hits
match.append(simi)
# at this point we have a list of match for each thou (the greater the better)
print("match: %s" % match)
#~ [x for _, x in sorted(zip(Y, X))]
# both are working, but second seems faster, todolater: measures
#~ index_order = [x for _, x in sorted(zip(match, range(len(match))),reverse=True)]
index_order = sorted(range(len(match)), key=lambda k: match[k],reverse=True)
#~ print("index_order: %s" % index_order)
# etais ce vraiment la peine de les trier, alors qu'on va les parcourir ensuite ?
less_said_idx = index_order[0]
for idx in index_order[1:]:
if match[idx]<1:
break
if time.time()-self.aLastSaid[idx]<5*60:
continue
if self.aCountSaid[less_said_idx] > self.aCountSaid[idx]:
less_said_idx = idx
print("less_said_idx: %d" % less_said_idx )
if match[less_said_idx] < 1:
return None
if self.aCountSaid[less_said_idx] > 0 and 0:
# decide to say already said or not ?
return None
# first sentence of the list can be selected by default
if time.time()-self.aLastSaid[less_said_idx]<5*60:
return None
self.aCountSaid[less_said_idx] += 1
self.aLastSaid[less_said_idx] = time.time()
print("match: %.2f" % match[less_said_idx] )
return self.thous[less_said_idx]
# class Apho - end
apho = Apho()
apho.load()
global_tts = None
def say(txt):
global global_tts
if global_tts == None:
import pyttsx3
global_tts = pyttsx3.init()
if 1:
txt = txt.replace("Gaia", "Gaïa")
print("INF: say: '%s'" % txt)
global_tts.say(txt)
global_tts.runAndWait()
def sayGoogle(txt):
sys.path.append("../scripts")
import tts_say
tts_say.say(txt)
def wordsCallback(words,confidence):
if confidence<0.6:
return
if len(words)<4:
return
print("INF: heard: '%s'" % words)
#~ say(phrase)
ret = apho.getThoughts(words)
if ret != None:
saymethod = say
if 1:
saymethod = sayGoogle
saymethod(ret[0])
saymethod(ret[1])
def test_loop_asr():
if 0:
from pocketsphinx import LiveSpeech, get_model_path
import os
model_path = get_model_path()
print("model_path: %s" % model_path )
# good model path:
model_path = "C:\\Python39\\Lib\\site-packages\\speech_recognition\\pocketsphinx-data\\"
strAnsiLang = "fr-FR"
for phrase in LiveSpeech(
hmm=(os.path.join(model_path, strAnsiLang)+"\\acoustic-model\\"),
lm=os.path.join(model_path, strAnsiLang+'\\language-model.lm.bin'),
dic=os.path.join(model_path, strAnsiLang+'\\pronounciation-dictionary.dict')
):
phrase = str(phrase)
wordsCallback( phrase, 0.5)
else:
# my own one
import microphone
microphone.loopProcess(wordsCallback)
"""
# probleme actuel, les mots banals hits trop:
+ mettre malus sur longueur de la phrase!
INF: heard: 'c'est cool'
less_said_idx: 3
INF: say: 'Le plus difficile, ce n'est pas de sortir de Polytechnique, c'est de sortir de l'ordinaire.'
INF: heard: 'je suis bien d'accord'
less_said_idx: 115
INF: say: 'Je ne comprends pas qu'on achète du vin sans l'avoir goûté au préalable. Il ne viendrait à personne l'idée d'acheter un pantalon sans l'essayer avant. Alors, Dieu me tire-bouchonne, ne refusez pas à votre bouche ce que vous accordez à vos fesses.'
INF: heard: 'un bon petit vin rouge'
less_said_idx: 8
INF: say: 'Le bonheur est une petite chose que l'on grignote, assis par terre, au soleil.'
"""
def strToPrint(s):
if sys.version_info[0] >= 3:
return s
o = ""
for c in s:
#~ print( ord(c) )
if ord(c) <= 127:
o += c
return o
global_testApho_nbr_hit = 0
def testApho(s):
global global_testApho_nbr_hit
ret = apho.getThoughts(s)
print("\n%s" % strToPrint(s))
print("=>")
if ret != None:
s = ret[0]
#~ print(str(s)) # UnicodeEncodeError: 'ascii' codec can't encode character u'\xe0'
print(strToPrint(s)) # cette ligne bloque en python 2.7, LookupError: unknown encoding: cp65001 # corriger en faisant dans le shell: set PYTHONIOENCODING=UTF-8
global_testApho_nbr_hit += 1
print("")
def autoTest():
testApho("j'aime pas travailler")
testApho("j'aime pas travailler")
testApho("j'aime pas travailler")
testApho("j'aime pas travailler")
print("")
#~ testApho("j'ai la volonté de t'aider"))
#~ testApho("j'ai la volonté de t'aider"))
#~ testApho("j'ai la volonté de t'aider"))
testApho("j'ai la volonté de t'aider")
testApho("j'ai la volonté de t'aider")
testApho("j'ai la volonté de t'aider")
print("")
testApho("Il me faudrait du courage")
testApho("Il me faudrait du courage")
testApho("J'aime le ChamPagne.")
testApho("J'aime le ChamPagne.")
testApho("J'aime le vin.")
testApho("J'aime le vin.")
testApho("d'attendre la pluie")
testApho("d'attendre la pluie")
testApho("attendre la pluie")
print("")
testApho("Dis moi une phrase")
testApho("Ecoute moi")
testApho("Dis moi un truc intelligent!")
testApho("Dis moi un truc intelligent!")
print("")
testApho("Dis moi, tu connais des gens célèbres?")
testApho("Dis moi, tu connais des gens célèbres?")
testApho("travailler moins c'est cool ou pas ?")
testApho("travailler moins c'est cool ou pas ?")
testApho("Consommer moins c'est cool ou pas ?")
testApho("c'est bien fait peur. tu es un bon petit gars. eh mon petit gnocchi est-ce qu'il a.")
# => "Gourmandise : source inépuisable de bonheur. a cause de bonheur" bon match bonheur, c'est moche.
# => maintenant donne, Quand vous êtes à Rome, faites comme les Romains. fait => faites
# => Le seul fait d’exister est un véritable bonheur
testApho("ambulance") # => Ben par exemple, c'est un mec qui va a une soirée genre bien habillé
print("")
print("global_testApho_nbr_hit: %s" % global_testApho_nbr_hit )
assert(global_testApho_nbr_hit >= 20)
if 0:
# test sur python 2.7
print(stringtools.accentToHtml("un élève"))
for i,a in enumerate(apho.thous):
print(i)
#~ print(stringtools.accentToHtml(a[0]))
s1 = stringtools.cp1252ToHtml(a[0])
s2 = stringtools.cp1252ToHtml(a[1])
print(s1)
print(s2)
print(stringtools.transformAccentToUtf8(a[0]))
print(stringtools.transformAccentToUtf8(a[1]))
#~ if i>80:
#~ break
if __name__ == "__main__":
#~ autoTest()
test_loop_asr() | alexandre-mazel/electronoos | alex_pytools/apho.py | apho.py | py | 13,380 | python | fr | code | 2 | github-code | 1 | [
{
"api_name": "io.open",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "misctools.getThisFilePath",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "stringtools.lowerList",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "usual_words.f... |
71989716193 | """SimPhoNy-wrapper for celery-workflows"""
import logging
from typing import TYPE_CHECKING
from osp.core.namespaces import emmo
from osp.core.session import SimWrapperSession
from .celery_workflow_engine import CeleryWorkflowEngine
if TYPE_CHECKING:
from typing import UUID, Any, Dict, List, Optional
from pydantic import BaseSettings
from osp.core.cuds import Cuds
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class CeleryWorkflowSession(SimWrapperSession):
"""SimPhoNy-wrapper session for Celery-Chains and Worklows"""
def __init__(
self,
input_uuid: "UUID",
engine: CeleryWorkflowEngine = None,
logging_id: str = None,
) -> None:
"""Initalite the session."""
if not engine:
engine = CeleryWorkflowEngine(input_uuid, logging_id=logging_id)
super().__init__(engine=engine)
# OVERRIDE
def __str__(self):
return "CeleryWorkflowSession"
# OVERRIDE
def _run(self, root_cuds_object) -> str:
"""Run the wrapper session."""
return self._engine.run()
# OVERRIDE
def _apply_added(self, root_obj, buffer) -> None:
"""Apply scans of added cuds in buffer."""
for obj in buffer.values():
if obj.is_a(emmo.Workflow):
message = """Found %s. Will search for workflow steps
and related workers on the platform."""
logger.info(message, obj)
self._scan_for_neighbours(obj)
if not self._engine.tasks:
message = """Did not find any workflow steps with
complementary workers. Will scan for single
object of type %s."""
logger.info(message, emmo.Calculation)
self._scan_for_single_calc(buffer)
else:
message = """Scan for workflow steps complete.
Identified the following chain of workers: %s"""
logger.info(message, self._engine.tasks)
def _scan_for_neighbours(self, obj: "Cuds", step: str = "first") -> None:
"""Scan the input cuds for neighbour-tasks"""
neighbour = obj.get(rel=emmo[f"hasSpatial{step.title()}"])
if not neighbour:
message = f"""Did not find {step} task in the chain of calculations in {obj}.
Workflow chain has ended here."""
logger.info(message)
else:
neighbour = neighbour.pop()
self._get_worker_mapping(neighbour)
self._scan_for_neighbours(neighbour, step="next")
def _scan_for_single_calc(self, buffer: "Dict[Any, Cuds]") -> None:
"""Find single calculations to be run in the buffer."""
for obj in buffer.values():
if obj.is_a(emmo.Calculation):
self._get_worker_mapping(obj)
if not self._engine.tasks:
message = """Did not find any calculations with
complementary workers."""
raise TypeError(message)
message = """Found additional workers %s in the buffer,
but will ignored because not part of a workflow chain."""
logger.info(message, self._engine.tasks)
def _get_worker_mapping(self, calculation: "Cuds") -> None:
mapping = self._scan_worker_mapping(calculation)
if not mapping:
message = f"""Task in the chain of calculations is not properly
mapped to an existing worker {calculation}."""
logger.info(message)
else:
self._engine.add_task((calculation, mapping))
def _scan_worker_mapping(self, calculation: "Cuds") -> "Optional[str]":
response = []
for superclass, mapping in self.worker_mapping.items():
if calculation.is_a(superclass):
response.append(mapping)
if len(response) > 1:
raise ValueError(
f"More than 1 {calculation.oclass} found in worker mapping!"
)
if response:
response = response.pop()
return response
# OVERRIDE
def _apply_deleted(self, root_obj, buffer) -> None:
"""Apply functions for updated-buffer."""
def _apply_updated(self, root_obj, buffer) -> None:
"""Apply functions for deleted-buffer."""
# OVERRIDE
def _load_from_backend(self, uids, expired=None) -> "Cuds":
"""Load a cuds from backend."""
for uid in uids:
if uid in self._registry:
yield self._registry.get(uid)
else:
yield None
@property
def settings(cls) -> "BaseSettings":
"""Return the settings from the engine."""
return cls._engine.settings
@property
def result(cls) -> "Dict[str, Any]":
"""Return the final result of the workflow"""
return cls._engine.result
@property
def worker_mapping(cls) -> "Dict[str, Any]":
"""Return the mappings of the ontology class and worker name"""
return cls._engine.worker_mapping
@property
def workers(cls) -> "List[str]":
"""Return the list of workers to pass the knowledge graph in a chain."""
return cls._engine.workers
| simphony/reaxpro-workflow-service | osp/wrappers/celery_workflow_wrapper/celery_workflow_wrapper.py | celery_workflow_wrapper.py | py | 5,175 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "osp.core.ses... |
22409149985 | # coding:utf8
# author:winton
import logging
import os
import datetime
import argparse
from conf import Config
from util import Util
from ConsumerManager import ConsumerManager
class Lams:
'''
控制数据收集的主要流程
'''
def init(self):
'''
读取配置并完成初始化
'''
loggerConfig = Config.logger
# 检查日志文件夹是否存在
logDir = os.path.split(loggerConfig['filename'])[0]
if not os.path.exists(logDir):
os.makedirs(logDir)
# 日志配置
logging.basicConfig(
level=loggerConfig['level'],
format=loggerConfig['format'],
filename=loggerConfig['filename'],
encoding=loggerConfig['encoding']
)
# 载入consumer
self.cm = ConsumerManager(Config)
logging.info('%d Consumers loaded' % len(self.cm.consumers))
self.allFile = 0
self.successFile = 0
logging.info('Lams starting...')
def startForNew(self, move_after_dispatch=True):
'''
开始处理指定目录下的数据
'''
dataDir = Config.datapool_new
dataList = os.listdir(dataDir)
if len(dataList) == 0:
logging.info('New data not found, exiting...')
os.system('exit 0')
logging.info('New data found, dispatching...')
for filename in dataList:
self.dispatch(filename, dataDir, move_after_dispatch)
logging.info('Dispatching finish, %d success, %d fail' % (self.successFile, self.allFile - self.successFile))
os.system('exit 0')
def startForAll(self, classInfo=None):
'''
重新处理所有数据
'''
if classInfo is not None:
logging.info('dispatch for class [%s]' % classInfo)
logging.info('dispatch all data...')
for parent, dirnames, filenames in os.walk(Config.datapool):
for filename in filenames:
self.dispatch(filename, parent, False, classInfo=classInfo)
logging.info('Dispatching finish, %d success, %d fail' % (self.successFile, self.allFile - self.successFile))
os.system('exit 0')
def dispatch(self, filename, dataDir, move_after_dispatch=True, classInfo=None):
'''
分发对应的文件列表
'''
filePath = os.path.join(dataDir, filename)
try:
event = Util.loadJsonFile(filePath)
consumers = self.cm.getMapConsumer(event, classInfo)
for csm in consumers:
logging.info('event "%s" is sending to consumer "%s"' % (filePath, csm))
self.cm.emitEvent(event, consumers)
except Exception as e:
logging.exception('Error when dispatching "%s" [%s]' % (filePath, str(e)))
else:
self.successFile += 1
# 将已处理的文件移动到指定文件夹
if move_after_dispatch:
t = datetime.datetime.now()
today = t.strftime('%Y-%m-%d')
newDir = '%s/%s' % (Config.datapool, today)
if not os.path.exists(newDir):
os.makedirs(newDir)
newFilePath = '%s/%s' % (newDir, filename)
logging.debug('moving [src=%s] [dst=%s]' % (filePath, newFilePath))
os.rename(filePath, newFilePath)
finally:
self.allFile += 1
if __name__ == '__main__':
ap = argparse.ArgumentParser(description='do dispatching jobs')
ap.add_argument('-A', '--all', action='store_true', help='dispatch all history data and new data')
ap.add_argument('-c', help='dispatch all but just dispatch to one class, use it in this form [moduleName:className]')
args = ap.parse_args()
test = Lams()
test.init()
if args.all:
test.startForAll()
elif args.c is not None:
test.startForAll(args.c.split(':'))
else:
test.startForNew()
| WintonLuo/Lams | lams.py | lams.py | py | 3,982 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "conf.Config.logger",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "conf.Config",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "os.path.split",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line... |
20048142461 | from django.urls import path
from .views import MovieList,MovieDetail,MovieCategory,MovieLanguage,MovieSearch,MovieYear,MostWatch
from django.conf.urls import url
app_name='movie'
urlpatterns = [
path('', MovieList.as_view(),name='Movie_List'),
path('category/<str:category>', MovieCategory.as_view(),name='MovieCategory'),
path('language/<str:lang>', MovieLanguage.as_view(),name='MovieLanguage'),
path('search/', MovieSearch.as_view(),name='MovieSearch'),
path('<slug:slug>', MovieDetail.as_view(),name='Movie_Detail'),
path('year/<int:year>', MovieYear.as_view(),name='MovieYear'),
path('mostwatch/',MostWatch.as_view(),name='MostWatch')
] | sureshsaravananbabu/IMDB-clone | imdb/movie/urls.py | urls.py | py | 673 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "views.MovieList.as_view",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "views.MovieList",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.urls.pat... |
9758744512 | import sys
import traceback
import zipfile
from array import *
import re as regEx
import random
import socket
import struct
import ipaddress
import subprocess
import os
from datetime import datetime
import time
import logging
# Regex strings for all it should search for in the files
ipv4Pattern = regEx.compile(r'(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]'
r'[0-9]|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|'
r'1[0-9]{2}|[1-9][0-9]|[0-9])')
ipv6Pattern = regEx.compile(r'(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:)'
r'{1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]'
r'{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|'
r'([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4})'
r'{1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::'
r'(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|'
r'(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9])'
r'{0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))')
imsiPattern = regEx.compile(r'[0-9]{13,17}')
imsiHexPattern = regEx.compile(r'(IMSI : { |IMSI : )(\d{2}\ |\d[A-Z]\ |[A-Z]\d\ )(\d{2}\ |\d[A-Z]\ |[A-Z]\d\ )'
r'(\d{2}\ |\d[A-Z]\ |[A-Z]\d\ )(\d{2}\ |\d[A-Z]\ |[A-Z]\d\ )(\d{2}\ |\d[A-Z]\ |'
r'[A-Z]\d\ )(\d{2}\ |\d[A-Z]\ |[A-Z]\d\ )(\d{2}\ |\d[A-Z]\ |[A-Z]\d\ )(\d{2}\ |'
r'\d[A-Z]\ |[A-Z]\d\ )(\d{2}|\d[A-Z]|[A-Z]\d)')
macPattern = regEx.compile(r'(?:[0-9A-Fa-f]{2}[:-]){5}(?:[0-9A-Fa-f]{2})')
hostnamePattern = regEx.compile(r'(?i)epcgw')
usernamePattern = regEx.compile(r'(?i)serviceuser')
urlPattern = regEx.compile(r'(?i)splunk')
hostnameMatches = {}
usernameMatches = {}
urlMatches = {}
# Dictionaries and lists for all matches found during the washing
ipv4AddressMatches = {}
ipv6AddressMatches = {}
imsiMatches = {}
imsiHexMatches = {}
macMatches = {}
urlFound = []
hostnameFound = []
usernameFound = []
# Different variables needed throughout the script
inboxLocation = '/nfs/data/inbox/'
folderLocation = '/nfs/data/'
tmpLocation = '/nfs/data/tmp/'
folder_dt = datetime.now()
dt_string = folder_dt.strftime('%y-%m-%d-%H.%M')
outboxLocation = folderLocation + '/outbox/'
outboxDirname = 'washed-' + dt_string
outboxDir = folderLocation + '/outbox/' + outboxDirname
tmpDir = tmpLocation + outboxDirname
script_log = '/nfs/data/cron-script.log'
washing_log = '/local/scramble/washing-script/log/washing-script.log'
zipEnd = '.zip'
zipLogEnd = '.log.zip'
gzEnd = '.gz'
gzLogEnd = '.log.gz'
tarEnd = '.tar'
tarLogEnd = '.log.tar'
targzEnd = '.tar.gz'
targzLogEnd = '.log.tar.gz'
def replaceCharsInTuple(tuple):
try:
# Clean up IP-addresses in tuple and return as string
tupleToReturn = tuple.replace(',', '.').replace('(', '').replace(')', '').replace(' ', '').replace('\'', '')
return ''.join(tupleToReturn)
except Exception:
logger.error(current_time() + ' - An error occurred:\n' + traceback.format_exc())
def replaceIpv6Tuple(tuple):
try:
ipv6TupleToReturn = tuple[0]
return ipv6TupleToReturn
except Exception:
logger.error(current_time() + ' - An error occurred:\n' + traceback.format_exc())
def InsertSpaceInTupleImsiHex(tuple):
try:
# Clean up IMSI hex in tuple and return as string
tupleToReturn = tuple.replace(',', '').replace('(', '').replace(')', '').replace('\'', '') \
.replace('IMSI : ', '').replace('IMSI : { ', '').replace(':', '').replace('{', '').replace(' ', '') \
.replace(' ', ' ')
return ''.join(tupleToReturn)
except Exception:
logger.error(current_time() + ' - An error occurred:\n' + traceback.format_exc())
def checkIfIpv4ExistsAndReplace(match):
try:
# If match not found in dictionary, generate a new IPv4 address with the first two octates as x
if ipv4AddressMatches.get(match) == None:
ipv4List = ['x', 'x']
ipv4List.append(str(random.randint(1, 255)))
ipv4List.append(str(random.randint(1, 255)))
ipv4AddressMatches[match] = '.'.join(ipv4List)
return ipv4AddressMatches.get(match)
except Exception:
logger.error(current_time() + ' - An error occurred:\n' + traceback.format_exc())
def checkIfIpv6ExistsAndReplace(match):
try:
# If match not found in dictionary, generate a new IPv6 address
false_ipv6 = regEx.compile(r'[a-fA-F]{1,3}::|::[a-fA-F]{1,3}| ::|:: | :: ')
false_match = regEx.findall(false_ipv6, match)
socket_match = socket.inet_pton(socket.AF_INET6, match)
if True:
if match != '::':
if len(false_match) == 0:
if ipv6AddressMatches.get(match) == None:
ipv6AddressMatches[match] = ipaddress.IPv6Address(
random.randint(0, 2 ** 128 - 1)) # Add random IPv6
return ipv6AddressMatches.get(match)
if len(false_match) <= 1:
ipv6AddressMatches[match] = false_match[0]
return ipv6AddressMatches.get(match)
if match == '::':
ipv6AddressMatches[match] = '::'
return ipv6AddressMatches.get(match)
if False:
ipv6AddressMatches[match] = false_match[0]
return ipv6AddressMatches.get(match)
except socket.error:
ipv6AddressMatches[match] = false_match[0]
return ipv6AddressMatches.get(match)
def checkIfMacExistsAndReplace(match):
try:
# If match not found in dictionary, generate a new mac address
if macMatches.get(match) == None:
macMatches[match] = '%02x:%02x:%02x:%02x:%02x:%02x' % (
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255))
return macMatches.get(match)
except Exception:
logger.error(current_time() + ' - An error occurred:\n' + traceback.format_exc())
def checkIfImsiExistsAndReplace(match):
try:
# If match not found in dictionary, generate a new random number with xxxxxx in front
if imsiMatches.get(match) == None:
imsiList = ['xxxxx']
imsiList.append(str(random.randint(1000000000, 9999999999)))
imsiMatches[match] = ''.join(imsiList)
return imsiMatches.get(match)
except Exception:
logger.error(current_time() + ' - An error occurred:\n' + traceback.format_exc())
def checkIfImsiHexExistsAndReplace(match):
try:
# If match not found in dictionary, generate a new random number with xx on part of the hex
if imsiHexMatches.get(match) == None:
imsiHexList = ['xx ', 'xx ', 'xx ', 'xx ']
imsiHexList.append(str(random.randint(10, 99)) + ' ' + str(random.randint(10, 99)) + ' ' +
str(random.randint(10, 99)) + ' ' + str(random.randint(10, 99)) + ' ' +
str(random.randint(0, 9)) + 'F')
imsiHexMatches[match] = str(''.join(imsiHexList))
return imsiHexMatches.get(match)
except Exception:
logger.error(current_time() + ' - An error occurred:\n' + traceback.format_exc())
def checkIfHostnameExistsAndReplace(match):
try:
# If match not found in dictionary, replace it. If found replace item listed in the dictionary
if hostnameMatches.get(match) == None:
logger.warning(current_time() + ' - !!!!!!!!!!' + match + ' not found in hostname list!!!!!!!!!!\n'
'Match removed from file')
hostnameMatches[match] = str('xxxxxxx')
return hostnameMatches.get(match)
except Exception:
logger.error(current_time() + ' - An error occurred:\n' + traceback.format_exc())
def checkIfUsernameExistsAndReplace(match):
try:
# If match not found in dictionary, replace it. If found replace item listed in the dictionary
if usernameMatches.get(match) == None:
usernameMatches[match] = str('xxxxxxxxx')
return usernameMatches.get(match)
except Exception:
logger.error(current_time() + ' - An error occurred:\n' + traceback.format_exc())
def checkIfUrlExistsAndReplace(match):
try:
# If match not found in dictionary, replace it. If found replace item listed in the dictionary
if urlMatches.get(match) == None:
logger.warning(current_time() + ' - !!!!!!!!!!' + match + ' not found in URL list!!!!!!!!!!\n'
'Match removed from file')
urlMatches[match] = str('xxxxx')
return urlMatches.get(match)
except Exception:
logger.error(current_time() + ' - An error occurred:\n' + traceback.format_exc())
def current_time():
return time.strftime('%d-%m-%y %H:%M:%S', time.localtime())
logging.basicConfig(filename=washing_log,
format='%(message)s')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def wash_filename(item):
try:
global washed_filename
current_filename = str(item)
filename_ipv4Match = regEx.findall(ipv4Pattern, item)
filename_ipv6Match = regEx.findall(ipv6Pattern, item)
filename_imsiMatch = regEx.findall(imsiPattern, item)
filename_imsiHexMatch = regEx.findall(imsiHexPattern, item)
filename_hostnameMatch = regEx.findall(hostnamePattern, item)
filename_macMatch = regEx.findall(macPattern, item)
filename_usernameMatch = regEx.findall(usernamePattern, item)
filename_urlMatch = regEx.findall(urlPattern, item)
filename_ipv4Array = []
filename_ipv6Array = []
filename_imsiArray = []
filename_imsiHexArray = []
filename_macArray = []
filename_hostnameArray = []
filename_usernameArray = []
filename_urlArray = []
for i in range(len(filename_ipv4Match)):
filename_ipv4Array.append(replaceCharsInTuple(str(filename_ipv4Match[i])))
for i in range(len(filename_ipv6Match)):
filename_ipv6Array.append(replaceIpv6Tuple(filename_ipv6Match[i]))
for i in range(len(filename_macMatch)):
filename_macArray.append(filename_macMatch[i])
for i in range(len(filename_imsiMatch)):
filename_imsiArray.append(filename_imsiMatch[i])
for i in range(len(filename_imsiHexMatch)):
filename_imsiHexArray.append(InsertSpaceInTupleImsiHex(str(filename_imsiHexMatch[i])))
for i in range(len(filename_hostnameMatch)):
filename_hostnameArray.append(filename_hostnameMatch[i])
for i in range(len(filename_usernameMatch)):
filename_usernameArray.append(filename_usernameMatch[i])
for i in range(len(filename_urlMatch)):
filename_urlArray.append(filename_urlMatch[i])
for ipv4 in filename_ipv4Array:
replacedIpv4Address = checkIfIpv4ExistsAndReplace(ipv4)
new_filename = item.replace(ipv4, replacedIpv4Address)
washed_filename = new_filename # Overwrite current filename with new filename
for ipv6 in filename_ipv6Array:
replacedIpv6Address = checkIfIpv6ExistsAndReplace(ipv6)
new_filename = item.replace(ipv6, str(replacedIpv6Address))
washed_filename = new_filename # Overwrite current filename with new filename
for mac in filename_macArray:
replacedMacAddress = checkIfMacExistsAndReplace(mac.lower())
new_filename = item.replace(mac, replacedMacAddress)
washed_filename = new_filename # Overwrite current filename with new filename
for imsi in filename_imsiArray:
replacedImsiAddress = checkIfImsiExistsAndReplace(imsi)
new_filename = item.replace(imsi, replacedImsiAddress)
washed_filename = new_filename # Overwrite current filename with new filename
for imsiHex in filename_imsiHexArray:
replacedImsiHexAddress = checkIfImsiHexExistsAndReplace(imsiHex)
new_filename = item.replace(imsiHex, replacedImsiHexAddress)
washed_filename = new_filename # Overwrite current filename with new filename
for hostname in filename_hostnameArray:
if hostname.lower() not in hostnameFound:
hostnameFound.append(hostname.lower())
replacedHostnameAddress = checkIfHostnameExistsAndReplace(hostname.lower())
new_filename = item.replace(hostname, replacedHostnameAddress)
washed_filename = new_filename # Overwrite current filename with new filename
for username in filename_usernameArray:
if username.lower() not in usernameFound:
usernameFound.append(username.lower())
replacedUsernameAddress = checkIfUsernameExistsAndReplace(username.lower())
new_filename = item.replace(username, str(replacedUsernameAddress))
washed_filename = new_filename # Overwrite current filename with new filename
for url in filename_urlArray:
if url.lower() not in urlFound:
urlFound.append(url.lower())
replacedurlAddress = checkIfUrlExistsAndReplace(url.lower())
new_filename = item.replace(url, replacedurlAddress)
washed_filename = new_filename # Overwrite current filename with new filename
if current_filename == str(washed_filename):
return washed_filename
except Exception:
logger.error(current_time() + ' - An error occurred:\n' + traceback.format_exc())
def unzipFiles():
try:
os.chdir(tmpLocation)
os.getcwd()
# Walk down the tmp folder and find/unzip all zipped files
# If there is a zipped file, it will start over to walk into the unzipped files aswell
for root, dirs, files in os.walk(tmpLocation):
for item in files:
itemLocation = root + '/' + item
# Unzip files ending with .gz, .zip or .tar, then delete the zipped version
if os.path.exists(itemLocation):
if item.endswith(zipEnd):
logger.info(current_time() + ' - Unzipping: "' + root + '"/"' + item + '"')
newZipName = item.replace('.zip', '')
os.system('mkdir "' + str(root) + '"/"' + str(newZipName) + '"')
os.system('unzip -o -qq "' + str(root) + '"/"' + str(item) + '" -d "' + str(root) + '"/"' +
str(newZipName) + '"')
os.system('rm -rf "' + str(root) + '"/"' + str(item) + '"')
unzipFiles()
if item.endswith(zipLogEnd):
logger.info(current_time() + ' - Unzipping: "' + root + '"/"' + item + '"')
newZipName = item.replace('.zip', '')
os.system('mkdir "' + str(root) + '"/"' + str(newZipName) + '"')
os.system('unzip -o -qq "' + str(root) + '"/"' + str(item) + '" -d "' + str(root) + '"/"' +
str(newZipName) + '"')
os.system('rm -rf "' + str(root) + '"/"' + str(item) + '"')
if item.endswith(targzEnd) or item.endswith(tarEnd):
logger.info(current_time() + ' - Unzipping tar file: "' + root + '"/"' + str(item) + '"')
newTarName1 = item.replace('.gz', '')
newTarName2 = newTarName1.replace('.tar', '')
os.system('mkdir "' + str(root) + '"/"' + newTarName2 + '"')
os.system('tar -xf "' + str(root) + '"/"' + str(item) + '" -C "' + str(root) + '"/"' +
str(newTarName2) + '"')
os.system('rm -rf "' + str(root) + '"/"' + str(item) + '"')
unzipFiles()
if item.endswith(targzLogEnd) or item.endswith(tarLogEnd):
logger.info(current_time() + ' - Unzipping tar file: "' + root + '"/"' + str(item) + '"')
newTarName1 = item.replace('.gz', '')
newTarName2 = newTarName1.replace('.tar', '')
os.system('mkdir "' + str(root) + '"/"' + newTarName2 + '"')
os.system('tar -xf "' + str(root) + '"/"' + str(item) + '" -C "' + str(root) + '"/"' +
str(newTarName2) + '"')
os.system('rm -rf "' + str(root) + '"/"' + str(item) + '"')
if item.endswith(gzEnd):
logger.info(current_time() + ' - Unzipping gz file: "' + root + '"/"' + item + '"')
os.system('gzip -fd "' + str(root) + '"/"' + str(item) + '"')
unzipFiles()
if item.endswith(gzLogEnd):
logger.info(current_time() + ' - Unzipping gz file: "' + root + '"/"' + item + '"')
os.system('gzip -fd "' + str(root) + '"/"' + str(item) + '"')
walking_file()
except Exception as err:
with open(script_log, 'a') as f:
f.write(current_time() + ' - An error occurred\n' + traceback.format(err))
logger.error(current_time() + ' - An error occurred\n' + traceback.format(err))
quit()
def walking_file():
try:
global washed_filename
tmp_dirs = os.listdir(tmpLocation)
for folder in tmp_dirs:
for root, dirs, files in os.walk(folder):
for i in dirs:
washed_filename = ''
wash_filename(str(i))
if str(i) != washed_filename and washed_filename != '':
os.system('mv "' + root + '"/"' + str(i) + '" "' + root + '"/"' + washed_filename + '"')
walking_file()
for root, dirs, files in os.walk(folder):
for file in files:
washed_filename = ''
wash_filename(str(file))
if str(file) != washed_filename and washed_filename != '':
os.system('mv "' + root + '"/"' + file + '" "' + root + '"/"' + washed_filename + '"')
washFiles()
except Exception:
logger.error(current_time() + ' - An error occurred:\n' + traceback.format_exc())
def washFiles():
try:
with open(script_log, 'a') as f:
f.write(current_time() + ' - Done unzipping all folders\n')
logger.info(current_time() + ' - Done unzipping all folders')
tmp_dirs = os.listdir(tmpLocation)
for directory in tmp_dirs:
# Walking down the file structure in the inbox file and washing each file
for root, dirs, files in os.walk(directory):
for file in files:
logger.info(current_time() + ' - Now washing ' + file)
newFileContent = ''
with open(root + '/' + file, 'r+') as logFile:
for line in logFile:
# Going through each line in the file and finding regex mathes
currentLine = line.strip()
ipv4MatchesInLine = regEx.findall(ipv4Pattern, line)
ipv6MatchesInLine = regEx.findall(ipv6Pattern, line)
imsiMatchesInLine = regEx.findall(imsiPattern, line)
imsiHexMatchesInLine = regEx.findall(imsiHexPattern, line)
macMatchesInLine = regEx.findall(macPattern, line)
hostnameMatchesInLine = regEx.findall(hostnamePattern, line)
usernameMatchesInLine = regEx.findall(usernamePattern, line)
urlMatchesInLine = regEx.findall(urlPattern, line)
ipv4Array = []
ipv6Array = []
imsiArray = []
imsiHexArray = []
macArray = []
hostnameArray = []
usernameArray = []
urlArray = []
# Add all matches to their respective array
for i in range(len(ipv4MatchesInLine)):
ipv4Array.append(replaceCharsInTuple(str(ipv4MatchesInLine[i])))
for i in range(len(ipv6MatchesInLine)):
ipv6Array.append(replaceIpv6Tuple(ipv6MatchesInLine[i]))
for i in range(len(macMatchesInLine)):
macArray.append(macMatchesInLine[i])
for i in range(len(imsiMatchesInLine)):
imsiArray.append(imsiMatchesInLine[i])
for i in range(len(imsiHexMatchesInLine)):
imsiHexArray.append(InsertSpaceInTupleImsiHex(str(imsiHexMatchesInLine[i])))
for i in range(len(hostnameMatchesInLine)):
hostnameArray.append(hostnameMatchesInLine[i])
for i in range(len(usernameMatchesInLine)):
usernameArray.append(usernameMatchesInLine[i])
for i in range(len(urlMatchesInLine)):
urlArray.append(urlMatchesInLine[i])
# Replace all matches found with the other entry in the dictionary
for ipv4 in ipv4Array:
replacedIpv4Address = checkIfIpv4ExistsAndReplace(ipv4)
newLine = currentLine.replace(ipv4, replacedIpv4Address)
currentLine = newLine # Overwrite current line with newLine to reflect changes made
for ipv6 in ipv6Array:
replacedIpv6Address = checkIfIpv6ExistsAndReplace(ipv6)
newLine = currentLine.replace(ipv6, str(replacedIpv6Address))
currentLine = newLine # Overwrite current line with newLine to reflect changes made
for mac in macArray:
replacedMacAddress = checkIfMacExistsAndReplace(mac.lower())
newLine = currentLine.replace(mac, replacedMacAddress)
currentLine = newLine # Overwrite current line with newLine to reflect changes made
for imsi in imsiArray:
replacedImsiAddress = checkIfImsiExistsAndReplace(imsi)
newLine = currentLine.replace(imsi, replacedImsiAddress)
currentLine = newLine # Overwrite current line with newLine to reflect changes made
for imsiHex in imsiHexArray:
replacedImsiHexAddress = checkIfImsiHexExistsAndReplace(imsiHex)
newLine = currentLine.replace(str(imsiHex), str(replacedImsiHexAddress))
currentLine = newLine # Overwrite current line with newLine to reflect changes made
for hostname in hostnameArray:
if hostname.lower() not in hostnameFound:
hostnameFound.append(hostname.lower())
replacedHostnameAddress = checkIfHostnameExistsAndReplace(hostname.lower())
newLine = currentLine.replace(hostname, replacedHostnameAddress)
currentLine = newLine # Overwrite current line with newLine to reflect changes made
for username in usernameArray:
if username.lower() not in usernameFound:
usernameFound.append(username.lower())
replacedUsernameAddress = checkIfUsernameExistsAndReplace(username.lower())
newLine = currentLine.replace(username, str(replacedUsernameAddress))
currentLine = newLine # Overwrite current line with newLine to reflect changes made
for url in urlArray:
if url.lower() not in urlFound:
urlFound.append(url.lower())
replacedurlAddress = checkIfUrlExistsAndReplace(url.lower())
newLine = currentLine.replace(url, replacedurlAddress)
currentLine = newLine # Overwrite current line with newLine to reflect changes made
newFileContent += currentLine + '\n'
logFile.truncate(0) # Remove old content of file
logFile.seek(0) # Start writing from index 0
logFile.write(newFileContent)
logger.info(current_time() + ' - ###Done washing ' + file + '###')
os.system('touch ' + directory + '/washingreport.txt')
# Making a file which contains all matches found and what they are changed to
with open(directory + '/washingreport.txt', 'r+') as reportfile:
stringToWrite = ('########################\nResult from washing\n########################\n\n' +
'Ipv4 dictionary:\n' + str(ipv4AddressMatches) +
'\n\nIpv6 dictionary:\n' + str(ipv6AddressMatches) +
'\n\nImsi dictionary:\n' + str(imsiMatches) +
'\n\nImsi hex dictionary:\n' + str(imsiHexMatches) +
'\n\nMac address dictionary:\n' + str(macMatches) +
'\n\nUsernames found:\n' + str(usernameFound) +
'\n\nHostnames found:\n' + str(hostnameFound) +
'\n\nUrl found:\n' + str(urlFound) + '\n')
reportfile.write(stringToWrite)
# Move all files washed in tmp folder to the respective outbox folder
os.system('chmod 777 ' + directory)
os.system('mv ' + directory + ' ' + outboxLocation)
logger.info('\n\n' + current_time() + '\n########################\nMoved files to: ' + outboxLocation + directory +
'\n########################\n')
with open(script_log, 'a') as f:
f.write(str(current_time()) + ' - Washing is complete\n')
logger.info(current_time() + ' - Washing is complete\n')
except Exception:
logger.error(current_time() + ' - An error occurred:\n' + traceback.format_exc())
def move_files():
try:
content_inbox = os.listdir(inboxLocation)
content_tmp = os.listdir(tmpLocation)
logger.info(current_time() + ' - Script starting')
if len(content_inbox) == 0 and len(content_tmp) == 0:
quit()
if len(content_inbox) != 0 and len(content_tmp) == 0:
while True:
sftp_files = subprocess.check_output('du -H -d1 /nfs/data/inbox/', shell=True).decode()
time.sleep(2)
sftp_files_2 = subprocess.check_output('du -H -d1 /nfs/data/inbox/', shell=True).decode()
if str(sftp_files) == str(sftp_files_2):
break
if str(sftp_files) != str(sftp_files_2):
with open(script_log, 'a') as f:
f.write(str(current_time()) + ' - Files are still being transferred, will wait to start script\n')
logger.warning(current_time() + ' - Files are still being transferred, will wait to start script')
time.sleep(2)
# Make a new directory in /tmp with date and time
os.system('mkdir ' + tmpDir)
os.system('cp -r ' + inboxLocation + '* ' + tmpDir)
os.system('rm -rf ' + inboxLocation + '*')
os.system('chmod -R 777 ' + tmpDir)
tmp_folder_content = os.listdir(tmpDir)
for folder in tmp_folder_content:
if folder.endswith(zipEnd):
logger.info(current_time() + ' - Unzipping: ' + tmpDir + '/"' + folder + '"')
newZipName = folder.replace('.zip', '')
os.system('mkdir ' + str(tmpDir) + '/"' + str(newZipName) + '"')
os.system('unzip -o -qq ' + str(tmpDir) + '/"' + str(folder) + '" -d ' + str(tmpDir) + '/"' +
newZipName + '"')
os.system('rm -rf ' + str(tmpDir) + '/"' + str(folder) + '"')
if folder.endswith(gzEnd):
logger.info(current_time() + ' - Unzipping gz file: ' + tmpDir + '/"' + folder + '"')
os.system('gzip -fd ' + str(tmpDir) + '/"' + str(folder) + '"')
if folder.endswith(targzEnd) or folder.endswith(tarEnd):
logger.info(current_time() + ' - Unzipping tar file: ' + tmpDir + '/"' + folder + '"')
newTarName1 = folder.replace('.gz', '')
newTarName2 = newTarName1.replace('.tar', '')
os.system('mkdir ' + str(tmpDir) + '/"' + newTarName2 + '"')
os.system('tar -xf ' + str(tmpDir) + '/"' + str(folder) + '" -C ' + str(tmpDir) + '/"' +
newTarName2 + '"')
os.system('rm -rf ' + str(tmpDir) + '/"' + folder + '"')
with open(script_log, 'a') as f:
f.write(str(current_time()) + ' - Done unzipping root folder\n')
logger.info(current_time() + ' - Done unzipping root folder')
check_folder()
if len(content_tmp) != 0 and len(content_inbox) == 0:
check_folder()
except Exception:
logger.error(current_time() + ' - An error occurred:\n' + traceback.format_exc())
def check_folder():
try:
num_gz = subprocess.check_output('find ' + tmpLocation + ' -name "*gz"', shell=True).decode()
num_gz = num_gz.split('\n')
num_zipped = subprocess.check_output('find ' + tmpLocation + ' -name "*zip"', shell=True).decode()
num_zipped = num_zipped.split('\n')
num_tar = subprocess.check_output('find ' + tmpLocation + ' -name "*tar"', shell=True).decode()
num_tar = num_tar.split('\n')
total_zipped = len(num_gz) + len(num_zipped) + len(num_tar) - 3
if total_zipped >= 500:
new_limit = total_zipped * 3
sys.setrecursionlimit(new_limit)
limit = str(new_limit)
with open(script_log, 'a') as f:
f.write(str(current_time()) + ' - Amount of zipped files will exceed max amount of calls, will change '
'recursion limit to: ' + limit + '\n')
logger.warning(current_time() + ' - Amount of zipped files will exceed max amount of calls, will change '
'recursion limit to: ' + limit)
unzipFiles()
if total_zipped <= 500:
unzipFiles()
except Exception:
logger.error(current_time() + ' - An error occurred:\n' + traceback.format_exc())
if __name__ == '__main__':
move_files() | Cripyy/Random-scripts | washingscript.py | washingscript.py | py | 32,410 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 33,
... |
70755553954 | __all__ = [
"JobItem",
]
import datetime
import top
class JobItem(top.Table):
"""job_item table ORM.
"""
_job = top.Job()
_agent_stocktake = top.AgentStocktake()
def __init__(self):
"""Toll Outlet Portal job_item table initialiser.
"""
super(JobItem, self).__init__('job_item')
@property
def schema(self):
return ["id INTEGER PRIMARY KEY",
"job_id INTEGER",
"connote_nbr CHAR(30)",
"item_nbr CHAR(32)",
"consumer_name CHAR(30)",
"email_addr CHAR(60)",
"phone_nbr CHAR(20)",
"pieces INTEGER",
"status INTEGER",
"created_ts TIMESTAMP",
"pickup_ts TIMESTAMP",
"pod_name CHAR(40)",
"identity_type_id INTEGER",
"identity_type_data CHAR(30)",
"extract_ts TIMESTAMP",
"reminder_ts TIMESTAMP",
"notify_ts TIMESTAMP"]
def collected_sql(self, business_unit, ignore_pe=False):
"""SQL wrapper to extract the collected items from the "jobitems"
table.
**Args:**
*business_unit*: the id relating to the job.bu_id value.
**Kwargs:**
*ignore_pe*: ``boolean`` flag to ignore job items whose
parent job is Primary Elect (default ``False``)
**Returns:**
the SQL string
"""
sql = """SELECT ji.connote_nbr as 'REF1',
ji.id as 'JOB_KEY',
ji.pickup_ts as 'PICKUP_TIME',
ji.pod_name as 'PICKUP_POD',
it.description as 'IDENTITY_TYPE',
ji.identity_type_data as 'IDENTITY_DATA',
ji.item_nbr as 'ITEM_NBR',
ag.code as 'AGENT_ID',
ag.state as 'AGENT_STATE'
FROM job_item as ji, identity_type as it, job as j, agent as ag
WHERE pickup_ts IS NOT null
AND extract_ts IS null
AND ji.identity_type_id = it.id
AND j.agent_id = ag.id
AND (ji.job_id = j.id AND j.bu_id = %d)""" % business_unit
if ignore_pe:
sql += """
AND (j.service_code != 3 OR j.service_code IS NULL)"""
return sql
def upd_collected_sql(self, id, time):
"""SQL wrapper to update the collected items from the "jobitems"
table.
**Args:**
*id*: the id relating to the ``jobitem.id`` value.
**Returns:**
the SQL string
"""
sql = """UPDATE job_item
SET extract_ts = '%s'
WHERE id = %d""" % (time, id)
return sql
def upd_file_based_collected_sql(self, connote, item_nbr, time=None):
"""SQL wrapper to update the collected items from the "jobitems"
table.
This variant of the :meth:`upd_collected_sql` method is used
to close of file-based extractions.
**Args:**
*connote*: connote value relating to the ``jobitem.connote_nbr``
value
*item_nbr*: connote value relating to the ``jobitem.item_nbr``
value
**Kwargs:**
*time*: override the time to set from the current time
**Returns:**
the SQL string
"""
if time is None:
time = datetime.datetime.now().isoformat(' ').split('.', 1)[0]
sql = """UPDATE %(name)s
SET extract_ts = '%(time)s', pickup_ts = '%(time)s'
WHERE connote_nbr = '%(connote)s'
AND item_nbr = '%(item_nbr)s'""" % {'name': self.name,
'time': time,
'connote': connote,
'item_nbr': item_nbr}
return sql
def connote_sql(self, connote):
"""SQL wrapper to extract records where job_item.connote_nbr
is equal to *connote*.
**Args:**
connote: Connote value relating to the job_item.connote_nbr
column.
**Returns:**
the SQL string
"""
sql = """SELECT id
FROM %s
WHERE connote_nbr = '%s'""" % (self.name, connote)
return sql
def connote_item_nbr_sql(self, connote, item_nbr):
"""SQL wrapper to extract records where job_item.connote_nbr
is equal to *connote* and job_item.item_nbr equals *item_nbr*.
**Args:**
connote: Connote value relating to the job_item.connote_nbr
column.
item_nbr: Item Number value relating to the job_item.item_nbr
column.
**Returns:**
the SQL string
"""
sql = """SELECT id
FROM %s
WHERE connote_nbr = '%s'
AND item_nbr = '%s'
ORDER BY created_ts DESC""" % (self.name, connote, item_nbr)
return sql
def item_number_sql(self, item_nbr):
"""SQL wrapper to extract records where job_item.item_nbr
is equal to *item_nbr*.
**Args:**
item_nbr: Item Number value relating to the job_item.item_nbr
column.
**Returns:**
the SQL string
"""
sql = """SELECT id
FROM %s
WHERE item_nbr = '%s'""" % (self.name, item_nbr)
return sql
def uncollected_sql(self, start_date, uncollected_period):
"""SQL wrapper to extract the job_item records which remain
uncollected after *uncollected_period* has elapsed.
**Args:**
uncollected_period: job_item.notify_ts value that defines
an uncollected parcel
**Returns:**
the SQL string
"""
sql = """SELECT id
FROM job_item
WHERE (created_ts > '%s' AND notify_ts < '%s')
AND pickup_ts IS NULL
AND (email_addr != '' OR phone_nbr != '')
AND reminder_ts IS NULL""" % (start_date, uncollected_period)
return sql
def job_item_agent_details_sql(self, job_item_id):
"""SQL wrapper to extract the agent details against a *job_item_id*.
SQL also returns additional information relating to the
*job_item_id* such as:
* ``jobitem.connote_nbr``
* ``jobitem.item_nbr``
* ``jobitem.notify_ts``
* ``jobitem.created_ts``
* ``jobitem.email_addr``
* ``jobitem.phone_nbr``
* ``jobitem.pickup_ts``
and the *job.bu_id*.
**Args:**
job_item_id: the jobitem.id value to search against
**Returns:**
the SQL string
"""
sql = """SELECT ag.name,
ag.address,
ag.suburb,
ag.postcode,
ji.connote_nbr,
ji.item_nbr,
ji.notify_ts,
ji.created_ts,
ji.email_addr,
ji.phone_nbr,
ji.pickup_ts,
j.bu_id
FROM job_item as ji, job as j, agent as ag
WHERE ji.job_id = j.id
AND j.agent_id = ag.id
AND ji.id = %d""" % job_item_id
return sql
def update_reminder_ts_sql(self, id, ts=None):
return self.update_timestamp_sql(id, column='reminder_ts', ts=ts)
def update_notify_ts_sql(self, id, ts=None):
return self.update_timestamp_sql(id, column='notify_ts', ts=ts)
def update_timestamp_sql(self, id, column, ts=None):
"""SQL wrapper to update the ``job_item.reminder_ts`` to *ts*
timestamp.
**Args:**
*id*: integer value relating to the ``job_item.id``
*column*: the timestamp column to update
**Kwargs:**
*ts*: override the current time
**Returns:**
the SQL string
"""
if ts is None:
ts = datetime.datetime.now().isoformat(' ').split('.', 1)[0]
sql = """UPDATE %s
SET %s = '%s'
WHERE id = %d
""" % (self.name, column, ts, id)
return sql
def connote_base_primary_elect_job(self, connote):
"""SQL wrapper to verify if a *connote* is associated with a primary
elect job.
Primary elect jobs are identified by a integer value 3 in the
``job.service_code`` column.
**Args:**
connote: the jobitem.connote value to search against.
**Returns:**
the SQL string
"""
sql = """SELECT ji.id
FROM job as j, %s as ji
WHERE ji.job_id = j.id
AND ji.connote_nbr = '%s'
AND ji.notify_ts IS NULL
AND (ji.email_addr != '' OR ji.phone_nbr != '')
AND j.service_code = 3""" % (self.name, connote)
return sql
def uncollected_jobitems_sql(self,
service_code=3,
bu_ids=None,
delivery_partners=None,
day_range=14):
"""SQL wrapper to extract uncollected Service Code-based jobs.
Service Code jobs are identified by a integer value in the
``job.service_code`` column.
Query will ignore records that have either white space or
a spurious ``.`` in the email or phone number columns.
The *bu_ids* relate to the ``job.bu_id`` column.
**Kwargs:**
*service_code*: value relating to the ``job.service_code``
column (default ``3`` for Primary Elect)
*bu_ids*: integer based tuple of Business Unit ID's to search
against (default ``None`` ignores all Business Units)
*day_range*: number of days from current time to include
in search (default 14.0 days)
**Returns:**
the SQL string
"""
if bu_ids is None:
bu_ids = tuple()
if len(bu_ids) == 1:
bu_ids = '(%d)' % bu_ids[0]
if delivery_partners is None:
delivery_partners = tuple()
if len(delivery_partners) == 1:
delivery_partners = "('%s')" % delivery_partners[0]
now = datetime.datetime.now()
start_ts = now - datetime.timedelta(days=day_range)
start_date = start_ts.strftime('%Y-%m-%d %H:%M:%S')
sql = """SELECT ji.id, ji.connote_nbr, ji.item_nbr
FROM job AS j, %(name)s AS ji, agent AS ag, delivery_partner AS dp
WHERE ji.job_id = j.id
AND j.agent_id = ag.id
AND ag.dp_id = dp.id
AND dp.name IN %(dps)s
AND ji.pickup_ts IS NULL
AND ji.notify_ts IS NULL
AND (ji.email_addr NOT IN ('', '.') OR ji.phone_nbr NOT IN ('', '.'))
AND j.bu_id IN %(bu_ids)s
AND j.service_code = %(sc)d
AND ji.created_ts > '%(start_date)s'""" % {'name': self.name,
'dps': str(delivery_partners),
'bu_ids': str(bu_ids),
'sc': service_code,
'start_date': start_date}
return sql
def reference_sql(self,
bu_ids,
reference_nbr=None,
picked_up=False,
delivery_partners=None,
columns=None,
alias='ji'):
"""Extract connote_nbr/item_nbr against *reference_nbr*.
Query is an ``OR`` against both ``connote_nbr`` and ``item_nbr``.
**Args:**
*bu_ids*: integer based tuple of Business Unit ID's to search
against (default ``None`` ignores all Business Units)
**Kwargs:**
*reference_nbr*: parcel ID number as scanned by the agent. If
``None``, then the values from the ``agent_stocktake`` table
will be used.
*picked_up*: boolean flag that will extract ``job_items``
that have been picked up if ``True``. Otherwise, will extract
``job_items`` that have not been picked up if ``False``.
*delivery_partners*: string based list of Delivery Partner
names to limit result set against. For example,
``['Nparcel', 'Toll']``. The values supported are as per
the ``delivery_partner.name`` table set
*columns*: string prepresentation of the columns to query
against
*alias*: table alias (default ``ji``)
**Returns:**
the SQL string
"""
if columns is None:
columns = self._select_columns(alias)
if not picked_up:
pickup_sql = 'IS NULL'
else:
pickup_sql = 'IS NOT NULL'
if len(bu_ids) == 1:
bu_ids = '(%d)' % bu_ids[0]
ref = reference_nbr
if reference_nbr is None:
ref = self._agent_stocktake.reference_sql()
dp_sql = str()
if delivery_partners is not None:
dps = ', '.join(["'%s'" % x for x in delivery_partners])
dps = '(%s)' % dps
if len(delivery_partners) == 1:
dps = "('%s')" % delivery_partners[0]
dp_sql = """AND dp.name IN %s AND ag.dp_id = dp.id""" % dps
union_sql = self.job_based_reference_sql(bu_ids,
ref,
picked_up,
delivery_partners,
columns)
sql = """SELECT DISTINCT %(columns)s
FROM %(name)s as %(alias)s,
job AS j,
agent AS ag,
agent_stocktake AS st,
delivery_partner AS dp
WHERE %(alias)s.job_id = j.id
AND ag.id = st.agent_id
AND j.bu_id IN %(bu_ids)s
AND j.agent_id = ag.id
AND (%(alias)s.connote_nbr IN (%(ref)s)
OR %(alias)s.item_nbr IN (%(ref)s))
AND %(alias)s.pickup_ts %(pickup_sql)s
%(dp_sql)s
UNION
%(union)s""" % {'columns': columns,
'bu_ids': str(bu_ids),
'name': self.name,
'ref': ref,
'alias': alias,
'union': union_sql,
'pickup_sql': pickup_sql,
'dp_sql': dp_sql}
return sql
def job_based_reference_sql(self,
bu_ids,
reference_nbr,
picked_up=False,
delivery_partners=None,
columns=None,
alias='ji'):
"""Extract connote_nbr/item_nbr against *reference_nbr* matched
to the ``job.card_ref_nbr``.
Query is an ``OR`` against both ``connote_nbr`` and ``item_nbr``.
**Args:**
*bu_ids*: integer based tuple of Business Unit ID's to search
against (default ``None`` ignores all Business Units)
*reference_nbr*: parcel ID number as scanned by the agent
**Kwargs:**
*picked_up*: boolean flag that will extract ``job_items``
that have been picked up if ``True``. Otherwise, will extract
``job_items`` that have not been picked up if ``False``.
*delivery_partners*: string based list of Delivery Partner
names to limit result set against. For example,
``['Nparcel', 'Toll']``. The values supported are as per
the ``delivery_partner.name`` table set
*columns*: string prepresentation of the columns to query
against
*alias*: table alias
**Returns:**
the SQL string
"""
if columns is None:
columns = self._select_columns(alias)
pickup_sql = 'AND %s.pickup_ts ' % alias
if not picked_up:
pickup_sql += 'IS NULL'
else:
pickup_sql += 'IS NOT NULL'
if len(bu_ids) == 1:
bu_ids = '(%d)' % bu_ids[0]
dp_sql = str()
if delivery_partners is not None:
dps = ', '.join(["'%s'" % x for x in delivery_partners])
dps = '(%s)' % dps
if len(delivery_partners):
dps = "('%s')" % delivery_partners[0]
dp_sql = """AND dp.name IN %s AND ag.dp_id = dp.id""" % dps
sql = """SELECT DISTINCT %(columns)s
FROM %(name)s AS %(alias)s,
job AS j,
agent AS ag,
delivery_partner AS dp,
agent_stocktake AS st
WHERE %(alias)s.job_id = j.id
AND j.bu_id IN %(bu_ids)s
AND j.agent_id = ag.id
%(dp_sql)s
AND %(alias)s.job_id IN
(
%(sql)s
)
%(pickup_sql)s""" % {'columns': columns,
'bu_ids': bu_ids,
'name': self.name,
'sql': self._job.reference_sql(reference_nbr),
'alias': alias,
'pickup_sql': pickup_sql,
'dp_sql': dp_sql}
return sql
def _select_columns(self, alias='ji'):
"""Helper method that captures required columns in the
uncollected aged report query.
**Kwargs:**
*alias*: table alias
**Returns:**
the SQL string
"""
columns = """%(alias)s.id as JOB_ITEM_ID,
j.bu_id as JOB_BU_ID,
%(alias)s.connote_nbr as CONNOTE_NBR,
j.card_ref_nbr as BARCODE,
%(alias)s.item_nbr as ITEM_NBR,
j.job_ts as JOB_TS,
%(alias)s.created_ts as CREATED_TS,
%(alias)s.notify_ts as NOTIFY_TS,
%(alias)s.pickup_ts as PICKUP_TS,
%(alias)s.pieces as PIECES,
%(alias)s.consumer_name as CONSUMER_NAME,
ag.dp_code as DP_CODE,
ag.code as AGENT_CODE,
ag.name as AGENT_NAME,
ag.address as AGENT_ADDRESS,
ag.suburb as AGENT_SUBURB,
ag.state as AGENT_STATE,
ag.postcode as AGENT_POSTCODE,
ag.phone_nbr as AGENT_PHONE_NBR,
(SELECT DISTINCT ag.dp_code
FROM agent_stocktake AS st, agent AS aag
WHERE (%(alias)s.connote_nbr = st.reference_nbr
OR j.card_ref_nbr = st.reference_nbr
OR %(alias)s.item_nbr = st.reference_nbr)
AND st.agent_id = aag.id) AS ST_DP_CODE,
(SELECT DISTINCT ag.code
FROM agent_stocktake AS st, agent AS aag
WHERE (%(alias)s.connote_nbr = st.reference_nbr
OR j.card_ref_nbr = st.reference_nbr
OR %(alias)s.item_nbr = st.reference_nbr)
AND st.agent_id = aag.id) AS ST_AGENT_CODE,
(SELECT DISTINCT ag.name
FROM agent_stocktake AS st, agent AS aag
WHERE (%(alias)s.connote_nbr = st.reference_nbr
OR j.card_ref_nbr = st.reference_nbr
OR %(alias)s.item_nbr = st.reference_nbr)
AND st.agent_id = aag.id) AS ST_AGENT_NAME""" % {'alias': alias}
return columns
def non_compliance_sql(self,
bu_ids,
picked_up=False,
delivery_partners=None,
alias='ji'):
"""Extract ``job_item`` detail of all items in the ``job_item``
table that do not exist in the ``agent_stocktake`` table.
Senarios are based on the *picked_up* flag. For example, all
parcels that *have* been picked up or *have not* been picked up.
**Args:**
*bu_ids*: integer based tuple of Business Unit ID's to search
against (default ``None`` ignores all Business Units)
**Kwargs:**
*picked_up*: boolean flag that will extract ``job_items``
that have been picked up if ``True``. Otherwise, will extract
``job_items`` that have not been picked up if ``False``.
*delivery_partners*: string based list of Delivery Partner
names to limit result set against. For example,
``['Nparcel', 'Toll']``. The values supported are as per
the ``delivery_partner.name`` table set
*alias*: table alias (default ``ji``)
**Returns:**
the SQL string
"""
if bu_ids is None:
bu_ids = tuple()
if len(bu_ids) == 1:
bu_ids = '(%d)' % bu_ids[0]
columns = self._select_columns(alias)
col = 'ji.id'
if not picked_up:
pickup_sql = 'IS NULL'
else:
pickup_sql = 'IS NOT NULL'
dp_sql = str()
if delivery_partners is not None:
dps = ', '.join(["'%s'" % x for x in delivery_partners])
dps = '(%s)' % dps
if len(delivery_partners) == 1:
dps = "('%s')" % delivery_partners[0]
dp_sql = """AND dp.name IN %s AND ag.dp_id = dp.id""" % dps
sql = """SELECT DISTINCT %(columns)s
FROM %(name)s AS %(alias)s, job AS j, agent AS ag, delivery_partner AS dp
WHERE %(alias)s.job_id = j.id
AND j.agent_id = ag.id
AND %(alias)s.pickup_ts %(pickup_sql)s
%(dp_sql)s
AND ji.id NOT IN
(%(sql)s)""" % {'columns': columns,
'alias': alias,
'name': self.name,
'pickup_sql': pickup_sql,
'dp_sql': dp_sql,
'sql': self.reference_sql(bu_ids=bu_ids,
picked_up=picked_up,
delivery_partners=delivery_partners,
columns=col)}
return sql
def total_agent_stocktake_parcel_count_sql(self,
bu_ids,
picked_up=False,
delivery_partners=None,
day_range=7,
alias='ji'):
"""Sum ``agent_stocktake`` based parcel counts per ADP based on
*picked_up*.
Query is an ``OR`` against both ``connote_nbr`` and ``item_nbr``.
**Args:**
*bu_ids*: integer based tuple of Business Unit ID's to search
against (default ``None`` ignores all Business Units)
**Kwargs:**
*picked_up*: boolean flag that will extract ``job_items``
that have been picked up if ``True``. Otherwise, will extract
``job_items`` that have not been picked up if ``False``.
*delivery_partners*: string based tuple of Delivery Partner
names to limit result set against. For example,
``['top', 'toll']``. The values supported are as per
the ``delivery_partner.name`` table set
*day_range*: number of days from current time to include
in the agent_stocktake table search (default 7 days)
*alias*: table alias (default ``ji``)
**Returns:**
the SQL string
"""
now = datetime.datetime.now()
start_ts = now - datetime.timedelta(days=day_range)
start_date = start_ts.strftime('%Y-%m-%d %H:%M:%S')
if not picked_up:
pickup_sql = 'IS NULL'
else:
pickup_sql = 'IS NOT NULL'
if len(bu_ids) == 1:
bu_ids = '(%d)' % bu_ids[0]
col = 'st.reference_nbr'
sql_ref = self.reference_sql(bu_ids=bu_ids,
picked_up=picked_up,
delivery_partners=delivery_partners,
columns=col)
sql = """SELECT DISTINCT agent.dp_code AS DP_CODE,
agent.code AS AGENT_CODE,
agent.name AS AGENT_NAME,
(SELECT MAX(st.created_ts)
FROM agent_stocktake AS st, agent AS ag
WHERE ag.code = agent.code
AND ag.id = st.agent_id) AS STOCKTAKE_CREATED_TS,
(SELECT COUNT(DISTINCT ags.reference_nbr)
FROM agent_stocktake AS ags, agent AS ag
WHERE ags.agent_id = agent.id
AND ags.created_ts > '%(start_date)s') AS AGENT_PIECES,
(SELECT COUNT(ji.id)
FROM job_item AS ji, job AS j, agent AS ag
WHERE ag.id = agent.id
AND j.agent_id = ag.id
AND j.id = ji.job_id
AND ji.pickup_ts %(pickup_sql)s) AS TPP_PIECES
FROM agent AS agent, agent_stocktake AS agent_stocktake
WHERE agent_stocktake.agent_id = agent.id
AND reference_nbr != ''
AND agent_stocktake.created_ts > '%(start_date)s'
AND agent_stocktake.reference_nbr IN (%(sql_ref)s)
GROUP BY agent.id,
agent.dp_code,
agent.code,
agent.name,
agent_stocktake.created_ts,
agent_stocktake.reference_nbr""" % {'sql_ref': sql_ref,
'start_date': start_date,
'pickup_sql': pickup_sql}
return sql
def total_parcel_count_sql(self,
picked_up=False,
delivery_partners=None,
alias='ji'):
"""Sum parcel counts per ADP based on *picked_up*.
**Kwargs:**
*picked_up*: boolean flag that will extract ``job_items``
that have been picked up if ``True``. Otherwise, will extract
``job_items`` that have not been picked up if ``False``.
*delivery_partners*: string based tuple of Delivery Partner
names to limit result set against. For example,
``['Nparcel', 'Toll']``. The values supported are as per
the ``delivery_partner.name`` table set
*alias*: table alias (default ``ji``)
**Returns:**
the SQL string
"""
if not picked_up:
pickup_sql = 'IS NULL'
else:
pickup_sql = 'IS NOT NULL'
dp_sql = str()
if delivery_partners is not None:
dps = ', '.join(["'%s'" % x for x in delivery_partners])
dps = '(%s)' % dps
if len(delivery_partners) == 1:
dps = "('%s')" % delivery_partners[0]
dp_sql = """AND dp.name IN %s AND ag.dp_id = dp.id""" % dps
sql = """SELECT SUM(%(alias)s.pieces)
FROM %(name)s AS %(alias)s, job AS j, agent AS ag, delivery_partner AS dp
WHERE %(alias)s.job_id = j.id
AND j.agent_id = ag.id
%(dp_sql)s
AND %(alias)s.pickup_ts %(pickup_sql)s""" % {'name': self.name,
'dp_sql': dp_sql,
'pickup_sql': pickup_sql,
'alias': alias}
return sql
def agent_id_of_aged_parcels(self,
period=7,
delivery_partners=None,
alias='ji'):
"""SQL to provide a distinct list of agents that have an
aged parcel.
**Kwargs:**
*period*: time (in days) from now that is the cut off for
agent compliance (default 7 days)
*delivery_partners*: string based tuple of Delivery Partner
names to limit result set against. For example,
``['top', 'toll']``. The values supported are as per
the ``delivery_partner.name`` table set
*alias*: table alias (default ``ji``)
**Returns:**
the SQL string
"""
now = datetime.datetime.now()
ts = now - datetime.timedelta(days=period)
date = ts.strftime('%Y-%m-%d %H:%M:%S')
dps = delivery_partners
kwargs = {'period': period,
'delivery_partners': dps}
compliance_sql = self._agent_stocktake.compliance_sql(**kwargs)
sql = """%(compliance_sql)s
AND ag.id IN
(SELECT DISTINCT(j.agent_id)
FROM job as j, %(name)s AS %(alias)s, agent AS ag
WHERE %(alias)s.job_id = j.id
AND %(alias)s.created_ts < '%(date)s'
AND %(alias)s.pickup_ts IS NULL)""" % {'compliance_sql': compliance_sql,
'name': self.name,
'alias': alias,
'date': date}
return sql
| loum/top | top/table/jobitem.py | jobitem.py | py | 27,503 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "top.Table",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "top.Job",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "top.AgentStocktake",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
... |
36412894737 | import numpy as np
import cv2
import glob
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--rgb_path", type=str, help="raw image path", default='./data_augmentation/raw_data/bg_img/select/total/')
# parser.add_argument("--mask_path", type=str, help="raw image path", default='./data_augmentation/raw_data/bg_img/select/select_rgb/')
# parser.add_argument("--obj_mask_path", type=str, help="raw image path", default='./data_augmentation/raw_data/obj_mask/')
# parser.add_argument("--bg_path", type=str, help="raw image path", default='./data_augmentation/raw_data/bg_img/select/select_rgb/')
parser.add_argument("--output_path", type=str, help="raw image path", default='./data_augmentation/raw_data/bg_img/save_bg/')
args = parser.parse_args()
if __name__ == '__main__':
os.makedirs(args.output_path, exist_ok=True)
rgb_path = os.path.join(args.rgb_path, '*.jpg')
rgb_list = glob.glob(rgb_path)
idx = 0
for rgb_idx in rgb_list:
print(rgb_idx)
idx += 1
file_name = rgb_idx.split('/')[5]
file_name = file_name.split('.')[0]
# if not os.path.isfile(args.obj_mask_path + file_name + '.png'):
rgb_img = cv2.imread(rgb_idx)
zero_mask = np.zeros(rgb_img.shape[:2], dtype=np.uint8)
zero_mask = np.expand_dims(zero_mask, axis=-1)
cv2.imwrite(args.output_path + 'rgb/' + 'bg2_image_idx_{0}_'.format(idx) + '_rgb.jpg', rgb_img)
cv2.imwrite(args.output_path + 'gt/' + ' bg2_image_idx_{0}_'.format(idx) + '_mask.png', zero_mask)
# cv2.imwrite(args.obj_mask_path + file_name + '.png', zero_mask) | chansoopark98/Tensorflow-Keras-Semantic-Segmentation | data_augmentation/make_blank_label.py | make_blank_label.py | py | 1,674 | python | en | code | 12 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_n... |
70004662115 | #!flask/bin/python
from flask import Flask, jsonify, request, json
import requests
import time
from core import test_class
from BeautifulSoup import BeautifulSoup
app = Flask(__name__)
@app.route('/ytc/search', methods=['GET'])
def get_tasks():
start_time = time.time()
q1 = request.args.get('q')
url = 'https://www.google.co.in/search?q='+q1
response = requests.get(url)
html = response.content
soup = BeautifulSoup(html)
div = soup.find('div', attrs={'id': 'res'})
jsonArr = []
for row in div.findAll('cite'):
jsonArr.append(row.text)
print("--- %s seconds ---" % (time.time() - start_time))
return jsonify({'links': jsonArr})
@app.route('/ytc/redosearch', methods=['GET'])
def get_tasks_redo():
w = 'wiki'
yt = 'youtuber'
start_time = time.time()
q1 = request.args.get('q')
jsonArr = []
url = 'https://www.google.co.in/search?q='+q1+w
response = requests.get(url)
html = response.content
soup = BeautifulSoup(html)
div = soup.find('div', attrs={'id': 'res'})
for row in div.findAll('cite'):
jsonArr.append(row.text)
url = 'https://www.google.co.in/search?q='+q1+yt
response = requests.get(url)
html = response.content
soup = BeautifulSoup(html)
div = soup.find('div', attrs={'id': 'res'})
for row in div.findAll('cite'):
jsonArr.append(row.text)
jsonArr = [k for k in jsonArr if 'wikipedia' in k]
print("--- %s seconds ---" % (time.time() - start_time))
return jsonify({'links': jsonArr})
tasks = [
{
'Approve': 72,
'Spam': 18.5,
'Hate Speech': 2.3,
'Far Right': 0,
'Harassment' : 7.2
}
]
@app.route('/pace/predict', methods=['GET'])
def get_pace_data():
return jsonify(tasks)
@app.route('/pace/message', methods = ['POST'])
def api_message():
if request.headers['Content-Type'] == 'text/plain':
return "Text Message: " + request.data
elif request.headers['Content-Type'] == 'application/json':
return "JSON Message: " + json.dumps(request.json)
else:
return "415 Unsupported Media Type ;)"
@app.route('/test', methods=['GET'])
def get_test():
f = test_class.Fridge()
return jsonify(f.in_fridge('apples'))
if __name__ == '__main__':
app.run(port=5001, debug=True) | gauravsingh1983/pythonscripts | web/__init__.py | __init__.py | py | 2,385 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
... |
4254375058 | import ssl
import re
import traceback
from urllib.request import Request, urlopen
import requests
import json
from careerjet_api import CareerjetAPIClient
from bs4 import BeautifulSoup, SoupStrainer
from concurrent.futures import ThreadPoolExecutor
# ---------------------------------------------------------------------------- #
def getFullJobDesc(jobsDict):
descriptions = []
urls = []
try:
jobsArr = jobsDict['jobs']
for element in jobsArr:
urls.append(str(element['url']))
print("\nUrls:", urls[0:4])
def get_url(url):
return requests.get(url).text
with ThreadPoolExecutor(max_workers=50) as pool:
resList = list(pool.map(get_url, urls))
print("Threads done")
for element in resList:
soup = BeautifulSoup(element, 'html.parser')
print(soup.prettify())
pageNumberArr = soup.find_all("script")[1]
pageNumberArr = str(pageNumberArr).replace('<script type="application/ld+json">', "").replace("</script>",
"")
res = json.loads(pageNumberArr)['description']
res = res.split('<br>')
print("\nInitial parsing finished")
for part in res:
if part == ' ' or part == '':
res.remove(part)
res = "".join(res)
description = res.replace("<strong>", "").replace("</strong>", "").replace("<li>", "").replace("</li>",
"").replace(
"<ul>", "").replace("</ul>", "").replace("Posting Notes:", "")
print("\nDescription is cleaned")
descriptions.append(description)
return descriptions
except Exception as err:
traceback.print_exc()
return " "
# ---------------------------------------------------------------------------- #
if __name__ == "__main__":
try:
cj = CareerjetAPIClient("en_US")
location = 'san francisco'
keywords = 'data scientist'
pageNum = '0'
if keywords is not None or location is not None:
result_json = cj.search({
'location': location,
'keywords': keywords,
'affid': '5f41b19494be1f6eaab8a755b612e343',
'user_ip': '11.22.33.44',
'url': 'http://www.example.com/jobsearch?q=python&l=london',
'user_agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:31.0) Gecko/20100101 Firefox/31.0',
'pagesize': '99',
'page': pageNum,
})
except Exception as e:
print(f'API query failed\nFailed to retrieve jobs from CareerJet API with info: loc={location}, keywords={keywords}, pageNum={pageNum}')
print(f'Error:\n{e}')
if len(result_json['jobs']) != 99 or type(result_json) is not dict:
raise Exception('CareerJet API response did not pass testing. Response:', result_json)
print('Size of jobs:', len(result_json['jobs']))
descriptions = getFullJobDesc(result_json)
print(descriptions)
| Victor-JB/LinkHS-Jobs-API | get_full_description.py | get_full_description.py | py | 3,265 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "... |
70861202914 | import threading
import mock
import pytest
from blazingmq import BasicHealthMonitor
from blazingmq import Error
from blazingmq import QueueOptions
from blazingmq import Session
from blazingmq import session_events
from blazingmq.testing import HostHealth
def test_receiving_host_health_events():
# GIVEN
spy = mock.MagicMock()
host_health = BasicHealthMonitor()
toggled_health_twice = threading.Event()
host_healthy_events = []
def callback(*args):
spy(*args)
if isinstance(args[0], session_events.HostHealthRestored):
host_healthy_events.append(args[0])
if len(host_healthy_events) == 2:
toggled_health_twice.set()
# WHEN
session = Session(callback, host_health_monitor=host_health)
host_health.set_unhealthy()
host_health.set_healthy()
host_health.set_unhealthy()
host_health.set_healthy()
toggled_health_twice.wait()
session.stop()
# THEN
assert spy.call_args_list == [
mock.call(session_events.Connected(None)),
mock.call(session_events.HostUnhealthy(None)),
mock.call(session_events.HostHealthRestored(None)),
mock.call(session_events.HostUnhealthy(None)),
mock.call(session_events.HostHealthRestored(None)),
mock.call(session_events.Disconnected(None)),
]
def test_enabling_real_host_health_monitoring():
# GIVEN
spy = mock.MagicMock()
def callback(*args):
spy(*args)
# WHEN
session = Session(callback)
session.stop()
# THEN
assert spy.call_args_list == [
mock.call(session_events.Connected(None)),
mock.call(session_events.Disconnected(None)),
]
def test_disabling_host_health_monitoring():
# GIVEN
spy = mock.MagicMock()
def callback(*args):
spy(*args)
# WHEN
session = Session(callback, host_health_monitor=None)
session.stop()
# THEN
assert spy.call_args_list == [
mock.call(session_events.Connected(None)),
mock.call(session_events.Disconnected(None)),
]
def test_queue_suspension(unique_queue):
# GIVEN
host_health = HostHealth()
queue_suspended_event_received = threading.Event()
def on_session_event(event):
print(event)
if isinstance(event, session_events.QueueSuspended):
queue_suspended_event_received.set()
session = Session(on_session_event, host_health_monitor=host_health)
session.open_queue(
unique_queue,
read=False,
write=True,
options=QueueOptions(suspends_on_bad_host_health=True),
)
# WHEN
session.post(unique_queue, b"blah")
host_health.set_unhealthy()
queue_suspended_event_received.wait()
with pytest.raises(Exception) as exc:
session.post(unique_queue, b"blah")
session.stop()
# THEN
assert exc.type is Error
assert exc.match("QUEUE_SUSPENDED")
| bloomberg/blazingmq-sdk-python | tests/integration/test_health_monitoring.py | test_health_monitoring.py | py | 2,921 | python | en | code | 15 | github-code | 1 | [
{
"api_name": "mock.MagicMock",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "blazingmq.BasicHealthMonitor",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "threading.Event",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "blazingmq... |
34860632841 | """
Code for running experiments for
"Asymmetric Tri-training for Debiasing Missing-Not-At-Random Explicit Feedback".
"""
import argparse
import yaml
import warnings
import tensorflow as tf
from trainer import Trainer, Tuner
possible_model_names = ['uniform', 'uniform-at', 'user', 'user-at', 'item', 'item-at',
'both', 'both-at', 'nb', 'nb-at', 'nb_true', 'nb_true-at']
parser = argparse.ArgumentParser()
parser.add_argument('--data', '-d', type=str, required=True)
parser.add_argument('--model_name', '-m', type=str,
choices=possible_model_names, required=True)
parser.add_argument('--tuning', '-t', action='store_true')
if __name__ == "__main__":
warnings.filterwarnings("ignore")
tf.get_logger().setLevel("ERROR")
args = parser.parse_args()
# hyper-parameters
config = yaml.safe_load(open('../config.yaml', 'r'))
eta = config['eta']
batch_size = config['batch_size']
max_iters = config['max_iters']
pre_iters = config['pre_iters']
post_steps = config['post_steps']
post_iters = config['post_iters']
num_sims = config['num_sims']
n_trials = config['n_trials']
model_name = args.model_name
tuning = args.tuning
data = args.data
if tuning:
tuner = Tuner(data=data, model_name=model_name)
tuner.tune(n_trials=n_trials)
print('\n', '=' * 25, '\n')
print(f'Finished Tuning of {model_name}!')
print('\n', '=' * 25, '\n')
trainer = Trainer(data=data, batch_size=batch_size, max_iters=max_iters,
pre_iters=pre_iters, post_steps=post_steps,
post_iters=post_iters, eta=eta, model_name=model_name)
trainer.run_simulations(num_sims=num_sims)
print('\n', '=' * 25, '\n')
print(f'Finished Running {model_name}!')
print('\n', '=' * 25, '\n')
| usaito/asymmetric-tri-rec-real | src/main.py | main.py | py | 1,857 | python | en | code | 23 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "warnings.filterwarnings",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tensorflow.get_logger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": ... |
23550339540 | import logging as logger
import os
from pyspark.sql import DataFrame
from ddataflow.exceptions import BiggerThanMaxSize
from ddataflow.sampling.default import filter_function
from ddataflow.utils import get_or_create_spark
class DataSource:
"""
Utility functions at data source level
"""
def __init__(
self,
*,
name: str,
config: dict,
local_data_folder: str,
snapshot_path: str,
size_limit,
):
self._name = name
self._local_data_folder = local_data_folder
self._snapshot_path = snapshot_path
self._size_limit = size_limit
self._config = config
self._filter = None
self._source = None
if "source" in self._config:
self._source = config["source"]
else:
if self._config.get("file-type") == "parquet":
self._source = lambda spark: spark.read.parquet(self._name)
else:
self._source = lambda spark: spark.table(self._name)
if "filter" in self._config:
self._filter = self._config["filter"]
else:
if self._config.get("default_sampling"):
self._filter = lambda df: filter_function(df)
def query(self):
"""
query with filter unless none is present
"""
df = self.query_without_filter()
if self._filter is not None:
print(f"Filter set for {self._name}, applying it")
df = self._filter(df)
else:
print(f"No filter set for {self._name}")
return df
def has_filter(self) -> bool:
return self._filter is not None
def query_without_filter(self):
"""
Go to the raw data source without any filtering
"""
spark = get_or_create_spark()
logger.debug(f"Querying without filter source: '{self._name}'")
return self._source(spark)
def query_locally(self):
logger.info(f"Querying locally {self._name}")
path = self.get_local_path()
if not os.path.exists(path):
raise Exception(
f"""Data source '{self.get_name()}' does not have data in {path}.
Consider downloading using the following command:
ddataflow current_project download_data_sources"""
)
spark = get_or_create_spark()
df = spark.read.parquet(path)
return df
def get_dbfs_sample_path(self) -> str:
return os.path.join(self._snapshot_path, self._get_name_as_path())
def get_local_path(self) -> str:
return os.path.join(self._local_data_folder, self._get_name_as_path())
def _get_name_as_path(self):
"""
converts the name when it has "/mnt/envents" in the name to a single file in a (flat structure) _mnt_events
"""
return self.get_name().replace("/", "_")
def get_name(self) -> str:
return self._name
def get_parquet_filename(self) -> str:
return self._name + ".parquet"
def estimate_size_and_fail_if_too_big(self):
"""
Estimate the size of the data source use the _name used in the _config
It will throw an exception if the estimated size is bigger than the maximum allowed in the configuration
"""
print("Estimating size of data source: ", self.get_name())
df = self.query()
size_estimation = self._estimate_size(df)
print("Estimated size of the Dataset in GB: ", size_estimation)
if size_estimation > self._size_limit:
raise BiggerThanMaxSize(self._name, size_estimation, self._size_limit)
return df
def _estimate_size(self, df: DataFrame) -> float:
"""
Estimates the size of a dataframe in Gigabytes
Formula:
number of gigabytes = (N*V*W) / 1024^3
"""
print(f"Amount of rows in dataframe to estimate size: {df.count()}")
average_variable_size_bytes = 50
return (df.count() * len(df.columns) * average_variable_size_bytes) / (
1024**3
)
| getyourguide/DDataFlow | ddataflow/data_source.py | data_source.py | py | 4,121 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "ddataflow.sampling.default.filter_function",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "ddataflow.utils.get_or_create_spark",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 69,
"usage_type": "call"
... |
29461253994 | import dearpygui.dearpygui as dpg
from data import extract_data, get_path2
dpg.create_context()
path2data = get_path2("Test_spectrum.syn")
x, y, _ = extract_data(path2data)
# Screen resolution for window scaling. dpi for correct font size.
res_x, res_y = 1920, 1080
dpi = 150
with dpg.font_registry():
# first argument ids the path to the .ttf or .otf file
default_font = dpg.add_font("/usr/share/fonts/OTF/CodeNewRomanNerdFont-Bold.otf", 20)
with dpg.window(label="Spectra", width=int(res_x*0.8), height=int(res_y * 0.8)):
# themes part
dpg.bind_font(default_font)
with dpg.theme(tag="spectrum_theme_1"):
with dpg.theme_component(1):
dpg.add_theme_color(dpg.mvPlotCol_Line, (231, 0, 230),
category=dpg.mvThemeCat_Plots)
dpg.add_theme_color(dpg.mvPlotCol_Fill, (230, 0, 230, 170),
category=dpg.mvThemeCat_Plots)
# spectrum plot part
with dpg.plot(label="Spectrum plot", height=int(res_y*0.8), width=-1):
dpg.add_plot_legend()
dpg.add_plot_axis(dpg.mvXAxis, label="x")
dpg.add_plot_axis(dpg.mvYAxis, label="y", tag="yaxis")
dpg.add_shade_series(list(x), list(y), label="Synthetic spectra",
parent="yaxis", tag="syn_spectrum")
# apply theme
dpg.bind_item_theme(dpg.last_item(), "spectrum_theme_1")
dpg.create_viewport(title='Extractor', width=int(res_x*0.9),
height=int(res_y*0.9))
dpg.setup_dearpygui()
dpg.show_viewport()
dpg.start_dearpygui()
dpg.destroy_context()
| zhukgleb/synth_spectrum | gui.py | gui.py | py | 1,598 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dearpygui.dearpygui.create_context",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "dearpygui.dearpygui",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "data.get_path2",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "da... |
24649723716 | import os
import sys
import warnings
import platform
import datetime as dt
import numpy as np
import matplotlib.pyplot as mp
import matplotlib.dates as md
def dmy2ymd(dmy):
return dt.datetime.strptime(
str(dmy, encoding='utf-8'),
'%d-%m-%Y').date().strftime('%Y-%m-%d')
def read_data(filename):
dates, closing_prices = np.loadtxt(
filename, delimiter=',', usecols=(1, 6),
unpack=True, dtype=np.dtype('M8[D], f8'),
converters={1: dmy2ymd})
return dates, closing_prices
def calc_returns(N, closing_prices):
returns = np.diff(closing_prices) / closing_prices[:-1]
weights = np.hanning(N)
smr = np.convolve(weights, returns, 'valid')
return returns, smr
def fit_polys(fit_x, fit_y, fit_d, poly_x):
fit_p = np.polyfit(fit_x, fit_y, fit_d)
poly_y = np.polyval(fit_p, poly_x)
return poly_y
def find_inters(fit_x, fit_y1, fit_y2,
fit_d, min_x, max_x):
fit_p1 = np.polyfit(fit_x, fit_y1, fit_d)
fit_p2 = np.polyfit(fit_x, fit_y2, fit_d)
fit_p3 = np.polysub(fit_p1, fit_p2)
roots = np.roots(fit_p3)
reals = roots[np.isreal(roots)].real
inters = []
for real in reals:
if min_x < real and real < max_x:
inters.append([real,
np.polyval(fit_p1, real)])
inters.sort()
inters = np.array(inters)
return inters
def init_chart(N, first_day, last_day):
mp.gcf().set_facecolor(np.ones(3) * 240 / 255)
mp.title('Smoothing Returns (%d Days)' % N, fontsize=20)
mp.xlabel('Trading Days From %s To %s' % (
first_day.astype(md.datetime.datetime).strftime(
'%d %b %Y'),
last_day.astype(md.datetime.datetime).strftime(
'%d %b %Y')), fontsize=14)
mp.ylabel('Returns Of Stock Price', fontsize=14)
ax = mp.gca()
ax.xaxis.set_major_locator(
md.WeekdayLocator(byweekday=md.MO))
ax.xaxis.set_minor_locator(md.DayLocator())
ax.xaxis.set_major_formatter(
md.DateFormatter('%d %b %Y'))
mp.tick_params(which='both', top=True, right=True,
labelright=True, labelsize=10)
mp.grid(linestyle=':')
def draw_returns(dates, bhp_returns, vale_returns):
dates = dates.astype(md.datetime.datetime)
mp.plot(dates, bhp_returns, '-', c='orangered',
alpha=0.25, label='BHP Returns')
mp.plot(dates, vale_returns, '-', c='dodgerblue',
alpha=0.25, label='VALE Returns')
def draw_smrs(N, dates, bhp_smrs, vale_smrs):
dates = dates.astype(md.datetime.datetime)
mp.plot(dates, bhp_smrs, '-', c='orangered',
alpha=0.75, label='BHP SMR-%d' % N)
mp.plot(dates, vale_smrs, '-', c='dodgerblue',
alpha=0.75, label='VALE SMR-%d)' % N)
def draw_polys(N, dates, bhp_polys, vale_polys, degree):
dates = dates.astype(md.datetime.datetime)
mp.plot(dates, bhp_polys, '-', c='orangered',
label='BHP SMR-%d Polynomial (%d)' % (
N, degree))
mp.plot(dates, vale_polys, '-', c='dodgerblue',
label='VALE SMR-%d Polynomal (%d)' % (
N, degree))
def draw_inters(inters):
dates, inters = np.hsplit(inters, 2)
dates = dates.astype(int).astype(
'M8[D]').astype(md.datetime.datetime)
mp.scatter(dates, inters, marker='X', s=120,
c='firebrick',
label='Intersection of SMRs', zorder=3)
mp.gcf().autofmt_xdate()
mp.legend()
def show_chart():
mng = mp.get_current_fig_manager()
if 'Windows' in platform.system():
mng.window.state('zoomed')
else:
mng.resize(*mng.window.maxsize())
mp.show()
def main(argc, argv, envp):
warnings.filterwarnings('ignore',
category=np.RankWarning)
dates, bhp_closing_prices = read_data('bhp.csv')
dates, vale_closing_prices = read_data('vale.csv')
N = 8
bhp_returns, bhp_smrs = calc_returns(
N, bhp_closing_prices)
vale_returns, vale_smrs = calc_returns(
N, vale_closing_prices)
days = dates[N - 1:-1].astype(int)
degree = 5
bhp_ploys = fit_polys(days, bhp_smrs, degree, days)
vale_ploys = fit_polys(days, vale_smrs, degree, days)
inters = find_inters(days, bhp_smrs, vale_smrs,
degree, days[0], days[-1])
init_chart(N, dates[0], dates[-2])
draw_returns(dates[:-1], bhp_returns, vale_returns)
draw_smrs(N, dates[N - 1:-1], bhp_smrs, vale_smrs)
draw_polys(N, dates[N - 1:-1], bhp_ploys, vale_ploys,
degree)
draw_inters(inters)
show_chart()
return 0
if __name__ == '__main__':
sys.exit(main(len(sys.argv), sys.argv, os.environ))
| smakerm/list | note/step4/爬虫笔记/18. DataScience/day05/smr.py | smr.py | py | 4,832 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.loadtxt",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy... |
28567646925 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"View module showing all messages concerning discarded beads"
from typing import cast
from bokeh import layouts
from model.plots import PlotState
from taskview.plots import PlotView, CACHE_TYPE, TaskPlotCreator
from utils import initdefaults
from view.base import stretchout
from ._widgets import QualityControlWidgets
from ._plots import QualityControlPlots
from ._model import QualityControlModelAccess
class _StateDescriptor:
def __get__(self, inst, owner):
return getattr(inst, '_state').state if inst else self
@staticmethod
def setdefault(inst, value):
"sets the default value"
getattr(inst, '_ctrl').display.updatedefaults("qc.state", state = PlotState(value))
def __set__(self, inst, value):
getattr(inst, '_ctrl').display.update("qc.state", state = PlotState(value))
class QCPlotState:
"qc plot state"
state = PlotState.active
name = "qc.state"
@initdefaults(frozenset(locals()))
def __init__(self, **_):
pass
class QualityControlPlotCreator(TaskPlotCreator[QualityControlModelAccess, None]):
"Creates plots for discard list"
_plotmodel: None
_model: QualityControlModelAccess
_RESET: frozenset = frozenset()
state = cast(PlotState, _StateDescriptor())
def __init__(self, ctrl):
super().__init__(ctrl)
self._widgets = QualityControlWidgets(ctrl, self._model)
self._plots = QualityControlPlots (ctrl, self._model)
self._state = QCPlotState()
ctrl.display.add(self._state)
self.addto(ctrl)
def observe(self, ctrl):
"observes the model"
super().observe(ctrl)
self._plots .observe(ctrl)
self._widgets.observe(self, ctrl)
def _addtodoc(self, ctrl, doc, *_):
"returns the figure"
mode = self.defaultsizingmode()
widgets = self._widgets.addtodoc(self, ctrl, mode)
grid = self._plots.addtodoc(self._ctrl, doc, mode)
out = layouts.row(grid, widgets, **mode)
self.__resize(ctrl, out)
return stretchout(out)
def _reset(self, cache:CACHE_TYPE):
self._widgets.reset(cache)
self._plots.reset(cache)
def __resize(self, ctrl, sizer):
figtb = ctrl.theme.get("theme", "figtbheight")
borders = ctrl.theme.get("theme", "borders")
sizer.update(**self.defaulttabsize(ctrl))
widg = sizer.children[1]
width = max(i.width for i in widg.children)
for i in widg.children:
i.width = width
widg.children[-1].height = (
sizer.height - sum(i.height for i in widg.children[:-1])-figtb
)
widg.update(width = width, height = sizer.height)
sizer.children[0].update(width = sizer.width-width, height = sizer.height)
sizer.children[0].children[1].update(
width = sizer.width-width-borders,
height = sizer.height
)
plots = sizer.children[0].children[1].children[1].children
for i in plots:
i[0].update(
plot_width = sizer.children[0].children[1].width,
plot_height = (sizer.height-figtb)//len(plots)
)
class QualityControlView(PlotView[QualityControlPlotCreator]):
"a widget with all discards messages"
TASKS = 'datacleaning', 'extremumalignment'
PANEL_NAME = 'Quality Control'
def ismain(self, ctrl):
"Cleaning and alignment, ... are set-up by default"
self._ismain(ctrl, tasks = self.TASKS)
| depixusgenome/trackanalysis | src/qualitycontrol/view/_view.py | _view.py | py | 3,713 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "model.plots.PlotState",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "model.plots.PlotState",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "model.plots.PlotState.active",
"line_number": 28,
"usage_type": "attribute"
},
{
"api... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.