seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32633364752 | from aiogram.dispatcher.filters import state
from aiogram.dispatcher import FSMContext
from main import bot, dp
from aiogram import types
from aiogram.types import ParseMode
from language_middleware import i18n
from sql import SQL
from config import DB_NAME
dp.middleware.setup(i18n)
_ = i18n.gettext
database = SQL(f'{DB_NAME}')
def unpack_geo(geopositions: list):
return '\n'.join(geopositions)
async def register(id: int):
await bot.send_message(chat_id=id, text=_("You've already been registered!"))
def throw_buttons():
keyboards = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
button_location = types.KeyboardButton(_("Here I Am"), request_location=True)
button_victory = types.KeyboardButton("\u270C")
button_snowflake = types.KeyboardButton("\u2744")
button_cold = types.KeyboardButton("\U0001F976")
button_like = types.KeyboardButton("\U0001F44D")
button_fire = types.KeyboardButton("\U0001F525")
button_swear = types.KeyboardButton("\U0001F621")
keyboards.add(button_location, button_victory, button_snowflake, button_cold, button_fire, button_swear, button_like)
return keyboards
async def send_request(ids: int, first_name: str, message_id:int, contact):
await bot.send_message(ids,
text=_("User [{0}](tg://user?id={1}) with number {2} waves {3} to you\.\n*Switch on your location before answer 'Here I Am'*\.").format(first_name, message_id, contact, '\u270B'),
reply_markup=throw_buttons(),
parse_mode=ParseMode.MARKDOWN_V2)
def button_contact():
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
button = types.KeyboardButton(_("Share a contact"), request_contact=True)
keyboard.add(button)
return keyboard
async def aware_of_contact(id: int):
await bot.send_message(chat_id=id, text=_('In order to use a bot please share your contact!')
, reply_markup=button_contact())
async def thank(id: int):
await bot.send_message(chat_id=id,
text=_("Thank you for the registration!"))
async def action_after_registration(id: int, first_name: str):
await bot.send_message(id,
text=_("Welcome, {0}!\nYou can find instruction in menu!\nPlease, choose your further action from menu!").format(
first_name),
)
async def cancel(id: int):
await bot.send_message(id, text=_("Your action has been cancelled"))
async def share_a_contact(id: int):
await bot.send_message(id,
text=_("Please share a contact of a person (choose from your contacts)!"))
async def request_acceptance(id: int):
await bot.send_message(id,
text=_(
"Your request has been accepted!\nA person will share his location or state soon, meanwhile you can continue using a bot!"))
async def forwarding(id: int):
await bot.send_message(id,
text=_("Unfortunately, this user has not been registered yet, tell him/her about this bot by forwarding the following message:"))
async def send_request_live(ids: int, first_name: str, message_id:int, contact):
await bot.send_message(ids,
text=_("User [{0}](tg://user?id={1}) with number {2} wants to track your *live* location\.\n*Switch on your location before answer 'Location'*\.").format(first_name, message_id, contact),
reply_markup=throw_buttons(),
parse_mode=ParseMode.MARKDOWN_V2)
async def check_queries(query: list, id: int):
if len(query[id]) != 0:
await send_request(id,
database.get_name(query[id][-1])[0][0],
query[id][-1],
database.get_contact(query[id][-1])[0][0]) | 7Dany6/wave-me-bot | functions.py | functions.py | py | 3,965 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "main.dp.middleware.setup",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "language_middleware.i18n",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "main.dp.middleware",
"line_number": 11,
"usage_type": "attribute"
},
{
"api... |
467939918 | import math
from binance_api import Binance
import config_trade
import statistics as st
import time
import requests
def SMA(data, period):
if len(data) == 0:
raise Exception("Empty data")
if period <= 0:
raise Exception("Invalid period")
interm = 0
result = []
nan_inp = 0
for i, v in enumerate(data):
if math.isnan(data[i]):
result.append(math.nan)
interm = 0
nan_inp += 1
else:
interm += v
if (i+1 - nan_inp) < period:
result.append(math.nan)
else:
result.append(interm/float(period))
if not math.isnan(data[i+1-period]):
interm -= data[i+1-period]
return result
def take_info_hloc(API_KEY, API_SECRET, COIN, FRAME, Limit = 50) -> list:
bot = Binance(API_KEY=API_KEY, API_SECRET=API_SECRET)
try:
data = bot.klines(
symbol = COIN+'USDT',
interval = FRAME,
limit = Limit)
hloc4 = list()
for i in data:
hloc4.append(st.mean((float(i[1]),float(i[2]),float(i[3]),float(i[4]))))
return hloc4
except Exception:
return take_info_hloc(API_KEY=config_trade.API_KEY, API_SECRET=config_trade.API_SECRET, COIN=config_trade.COIN, FRAME=config_trade.FRAME, Limit = 50)
def put_order (side, price, quoteOrderQty, API_KEY, API_SECRET, COIN, type = 'LIMIT'):
bot = Binance(API_KEY=API_KEY, API_SECRET=API_SECRET)
def take_prec(COIN=COIN):
prec = requests.get(f'https://api.binance.com/api/v3/exchangeInfo?symbol={COIN}USDT').json()
return(float(prec['symbols'][0]['filters'][2]['stepSize']))
print (quoteOrderQty)
try:
if side == 'BUY':
quantity = round((quoteOrderQty/price) - ((quoteOrderQty/price)%take_prec()), 7)
print (quantity)
elif side == 'SELL':
quantity = round(quoteOrderQty - ((quoteOrderQty/price)%take_prec()), 7)
print (quantity)
'''a = bot.exchangeInfo()['symbols']
for i in a:
if i['symbol'] == 'BTCUSDT':
print (i)'''
return bot.createOrder(
symbol=COIN+'USDT',
side = side,
type = type,
quantity = quantity,
price = round(price - price%0.01,2),
recvWindow = 59999,
timeInForce = 'GTC'
)
except Exception:
print ('EROR EROR EROR ORDER PUT')
return put_order (side, price, quoteOrderQty, type, API_KEY, API_SECRET, COIN)
def cancel_order (API_KEY, API_SECRET,COIN):
bot = Binance(API_KEY=API_KEY, API_SECRET=API_SECRET)
try:
return bot.cancelOrders(
symbol = COIN+'USDT',
recvWindow = 59999
)
except Exception:
return cancel_order (API_KEY=config_trade.API_KEY, API_SECRET=config_trade.API_SECRET,COIN=config_trade.COIN)
def check_order (API_KEY, API_SECRET,COIN):
bot = Binance(API_KEY=API_KEY, API_SECRET=API_SECRET)
try:
return bot.openOrders(
symbol = COIN+'USDT',
recvWindow = 59999
)
except Exception:
check_order (API_KEY=config_trade.API_KEY, API_SECRET=config_trade.API_SECRET,COIN=config_trade.COIN)
def balance_check(API_KEY, API_SECRET):
bot = Binance(API_KEY=API_KEY, API_SECRET=API_SECRET)
try:
balance = bot.account()
return balance
except:
return balance_check(API_KEY=config_trade.API_KEY, API_SECRET=config_trade.API_SECRET,COIN=config_trade.COIN)
def main(API_KEY=config_trade.API_KEY, API_SECRET=config_trade.API_SECRET, COIN=config_trade.COIN, FRAME = config_trade.FRAME):
while True:
time_now = time.gmtime()[3:5]
if time_now[0] == 0 and time_now[1] == 1:
orders = check_order(API_KEY=API_KEY, API_SECRET=API_SECRET, COIN=COIN)
print (orders)
if len(orders) != 0:
print(cancel_order(API_KEY=API_KEY, API_SECRET=API_SECRET, COIN=COIN))
data = take_info_hloc(API_KEY=API_KEY, API_SECRET=API_SECRET, COIN=COIN, FRAME=FRAME, Limit=50)
sma = SMA(data=data, period=3)[-1]
shift = sma * config_trade.koef
print (shift,sma)
person_data_raw = balance_check(API_KEY=API_KEY, API_SECRET=API_SECRET)['balances']
person_data = dict()
for i in person_data_raw:
if i['asset'] == f'{COIN}' or i['asset'] == 'USDT':
person_data[i['asset']] = i['free']
print (person_data)
if float(person_data[COIN])*sma < 12 and float(person_data['USDT']) > 12:
if config_trade.quoteOrderQty:
put_order('BUY', shift, round(float(config_trade.quoteOrderQty)*0.98, 0), API_KEY=API_KEY, API_SECRET=API_SECRET, COIN=COIN, type='LIMIT')
else:
put_order('BUY', shift, round(float(person_data['USDT'])*0.97, 0), API_KEY=API_KEY, API_SECRET=API_SECRET, COIN=COIN, type='LIMIT')
if float(person_data[COIN])*sma > 12:
put_order('SELL', sma, float(person_data[f'{COIN}']), type='LIMIT', API_KEY=API_KEY, API_SECRET=API_SECRET, COIN=COIN)
else:
pass
time.sleep(60)
else:
orders = check_order(API_KEY=API_KEY, API_SECRET=API_SECRET, COIN=COIN)
if len(orders) == 0:
data = take_info_hloc(API_KEY=API_KEY, API_SECRET=API_SECRET, COIN=COIN, FRAME=FRAME, Limit=50)
sma = SMA(data=data, period=3)[-1]
shift = sma * config_trade.koef
print (shift,sma)
person_data_raw = balance_check(API_KEY=API_KEY, API_SECRET=API_SECRET)['balances']
person_data = dict()
for i in person_data_raw:
if i['asset'] == f'{COIN}' or i['asset'] == 'USDT':
person_data[i['asset']] = i['free']
print (person_data)
if float(person_data[COIN])*sma > 12:
put_order('SELL', sma, float(person_data[f'{COIN}']), type='LIMIT', API_KEY=API_KEY, API_SECRET=API_SECRET, COIN=COIN)
elif float(person_data[COIN])*sma < 12 and float(person_data['USDT']) > 12:
if config_trade.quoteOrderQty:
put_order('BUY', shift, round(float(config_trade.quoteOrderQty)*0.98, 0), API_KEY=API_KEY, API_SECRET=API_SECRET, COIN=COIN, type='LIMIT')
else:
put_order('BUY', shift, round(float(person_data['USDT'])*0.97, 0), API_KEY=API_KEY, API_SECRET=API_SECRET, COIN=COIN, type='LIMIT')
else:
pass
time.sleep(60)
else:
time.sleep(60)
if __name__ == '__main__':
main()
#print (check_order(API_KEY=config_trade.API_KEY, API_SECRET=config_trade.API_SECRET, COIN=config_trade.COIN))
...
| OGKuz/binance_shift_bot | shift_bot.py | shift_bot.py | py | 7,247 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "math.isnan",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "math.nan",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "math.nan",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "math.isnan",
"line_number"... |
30677005666 | #--------------------import some mould-------------#
from flask import Flask ,render_template,request,redirect,url_for,flash,session
import os
import mysql.connector
db=mysql.connector.connect(
host="localhost",
user="root",
password="1889",
database="AirPort"
)
#----------StartProject------------#
#listCountry
listCountry=[
"Afghanistan",
"Albania",
"Algeria",
"Andorra",
"Angola",
"Belarus",
"Bhutan",
"Blize",
"canada",
"China",
"colombia",
"Morocco",
"Mongolia",
"Mali",
"Malisya",
"palestine",
"panama",
"Iran",
"Iraq",
"Italya",
"India",
"Iceland",
"Qatar",
"United Arab Emirates",
"United States of America",
"South Korea",
"Sudan",
"Saudi Arabia",
"San Marino",
"Singapore",
"Dominica",
"Denmark"]
#listDay
listDay=["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","sunday"]
#ClassPlace
ClassPlace=["FirstClass","EconomyClass","BusinessClass"]
#function DisplayAircraft
def DisplayAircraft():
cur=db.cursor()
cur.execute('select idAircraft from Aircraft')
all_aircraft=cur.fetchall()
all_aircraft=[str(val[0]) for val in all_aircraft]
return all_aircraft
#function DisplaySector
def DisplaySector():
cur=db.cursor()
cur.execute("select idSector from Sector")
all_sector=cur.fetchall()
all_sector=[str(val[0]) for val in all_sector]
return all_sector
#function DisplayFlight
def DisplayFlight():
cur=db.cursor()
cur.execute("select idFlight from Flight")
all_Flight=cur.fetchall()
all_Flight=[str(val[0]) for val in all_Flight]
return all_Flight
#*****************************#
app=Flask(__name__)
app.secret_key=os.urandom(150)
#------------page principale--------------#
@app.route('/')
def PageUser():
if "Email" in session:
cur=db.cursor()
cur.execute("select Schedule.FlightDate,Flight.DepartureTime,Flight.ArrivaTime,Flight.PrixFlight,Flight.Seat,Sector.Source,Sector.Destination,Aircraft.NameAirport from Schedule inner join Flight on Schedule.idFlight=Flight.idFlight inner join Sector on Sector.idSector=Flight.idSector inner join Aircraft on Aircraft.idAircraft=Flight.idAircraft ")
SearchFlight=cur.fetchall()
return render_template("PageUser.html",Search=SearchFlight,ClassPlace=ClassPlace)
return render_template("Contact.html")
#-------------PagSingup--------------#
@app.route('/SingUp')
def SingUp():
return render_template("SingUp.html")
@app.route("/AddSingUp",methods=["GET","POST"])
def AddSingUp():
if request.method=="POST":
NameUser=request.form.get("NameUser")
EmailUser=request.form.get("EmailUser")
PasswordUser= request.form.get("PasswordUser")
PasswordConfirm=request.form.get("PasswordConfirm")
if PasswordUser !=PasswordConfirm or NameUser=="" or EmailUser=="" or PasswordUser=="" or PasswordConfirm=="":
flash("Full all control or Password not confirm" ,category="Confirm")
else:
print("OK")
cur=db.cursor()
cur.execute("insert into Passenger(Email,FullName,Password) values(%s,%s,%s)",(EmailUser, NameUser ,PasswordUser))
db.commit()
flash("SingUp Successfly",category="successfly")
return render_template("Contact.html")
return render_template("SingUp.html")
#-----------------Pagelogin----------------#
@app.route('/User')
def UserContacte():
return render_template("UserContacte.html")
@app.route('/login',methods=["GET","POST"])
def login():
if request.method=="POST":
Email=request.form["Email"]
password=request.form["password"]
cur = db.cursor()
cur.execute("select Password, Email from Passenger where Password ='"+password +"' and Email='"+Email+"' ")
Data=cur.fetchall()
cur=db.cursor()
cur.execute("select Schedule.FlightDate,Flight.DepartureTime,Flight.ArrivaTime,Flight.PrixFlight ,Flight.Seat,Sector.Source,Sector.Destination,Aircraft.NameAirport from Schedule inner join Flight on Schedule.idFlight=Flight.idFlight inner join Sector on Sector.idSector=Flight.idSector inner join Aircraft on Aircraft.idAircraft=Flight.idAircraft ")
SearchFlight=cur.fetchall()
if len((Data))>0:
session["loggedinUser"]=True
session["Email"] = Email
Email=session["Email"]
return render_template("PageUser.html",Email=Email,listCountry=listCountry,Search=SearchFlight,ClassPlace=ClassPlace)
else:
if Email=="" and password=="":
flash("Fill all control", category="Confirm")
else:
if ((Email.upper()=="ZORO1889@GMAIL.COM" or Email.lower()=="zoro1889@gmail.com") and (password.upper()=="ADMIN" or password.lower()=="admin")):
session["Email"] = Email
Email=session["Email"]
return redirect(url_for("PagedAdmin"))
flash("your Password or Email not correct", category="Confirm")
return render_template("Contact.html")
#------------------logout-------------#
@app.route("/logout")
def logout():
session.clear()
return render_template("Contact.html")
#--------------------------------------PageUser----------------------------------------------------------------------------------#
@app.route('/Contact')
def PageContact():
return render_template("Contact.html")
@app.route("/Resrvation",methods=["POST","GET"])
def PageResvation():
if request.method=="POST":
NumberPlace=request.form.get("NumberPlace")
DateResrvation=request.form.get("DateResrvation")
Placeclass=request.form.get("Placeclass")
cur=db.cursor()
cur.execute("insert into Reservation(NumberPlace,DateReservation,Class) values(%s,%s,%s)",(NumberPlace,DateResrvation,Placeclass))
db.commit()
return redirect(url_for("PageUser"))
return render_template("PageUser.html")
#---------------------------------------PageAdmin-------------------------------------------------------------------------------#
@app.route('/Admin')
def PagedAdmin():
if "Email" in session:
curAir=db.cursor()
curAir.execute("select * from Aircraft")
dataAircraft= curAir.fetchall()
curSector=db.cursor()
curSector.execute("select * from Sector")
dataSector=curSector.fetchall()
curFlight=db.cursor()
curFlight.execute("SELECT idFlight,DepartureTime,ArrivaTime,PrixFlight,Seat,flight from Flight ")
all_Flight=curFlight.fetchall()
curSchedule=db.cursor()
curSchedule.execute("SELECT idSchedule ,FlightDate,NameAirport,Source,Destination,Sector.FirstClass,Sector.EconomyClass ,Sector.BusinessClass FROM Schedule INNER JOIN Flight on Flight.idFlight=Schedule.idFlight INNER join Sector on Sector.idSector=Flight.idFlight INNER JOIN Aircraft on Aircraft.idAircraft=Flight.idAircraft")
data_Schedule=curSchedule.fetchall()
curUser=db.cursor()
curUser.execute("SELECT Email,FullName from Passenger")
data_passanger=curUser.fetchall()
curResvation=db.cursor()
curResvation.execute("select * from Reservation")
data_Resvation=curResvation.fetchall()
return render_template("PageAdmin.html",dataAircraft=dataAircraft,dataSector=dataSector,all_Flight=all_Flight ,data_Schedule=data_Schedule,data_passanger=data_passanger,data_Resvation=data_Resvation)
return render_template("Contact.html")
@app.route('/DashboardAdmin')
def DashboardAdmin():
return redirect(url_for('PagedAdmin'))
#------------------------------------------------#
#-----------------PageAircraft----------------#
@app.route('/Aircraft')
def PageAircraft():
if "Email" in session:
cur = db.cursor()
cur.execute("select * from Aircraft")
dataAircraft = cur.fetchall()
return render_template("Aircraft.html",dataAircraft=dataAircraft)
return render_template("Contact.html")
#*********AddAircraft**************************#
@app.route("/AddAircraft",methods=["GET","POST"])
def PageAddAircraft():
if request.method=="POST":
Airport=request.form.get('Airport')
FirstClass=request.form.get("FirstClass")
economyClass=request.form.get("economyClass")
BusinessClass=request.form.get("BusinessClass")
cur=db.cursor()
cur.execute("insert into Aircraft(NameAirport,FirstClass ,EconomyClass,BusinessClass) values(%s,%s,%s,%s)",(Airport,FirstClass,economyClass,BusinessClass))
db.commit()
flash("Element Add Successfly", category="successfly")
return redirect(url_for('PageAircraft'))
return render_template("Aircraft.html")
#*********DeleteAircraft**************************#
@app.route("/DeleteAircraft/<string:id>")
def DeleteAircraft(id):
cur = db.cursor()
cur.execute("delete from Aircraft where idAircraft={0}".format(id))
db.commit()
flash("Element is Delete Successfly", category="Confirm")
return redirect(url_for('PageAircraft'))
#*********DeleteAircraft**************************#
@app.route('/EditAircraft/<id>')
def Edit(id):
cur=db.cursor()
cur.execute("select * from Aircraft where idAircraft={0}".format(id))
data=cur.fetchall()
return render_template("EditAircraft.html",data=data[0])
#*********UpdateAircraft**************************#
@app.route("/updateAircraft/<id>",methods=["POST","GET"])
def UpdateAircraft(id):
Airport = request.form.get('Airport')
FirstClass = request.form.get("FirstClass")
economyClass = request.form.get("economyClass")
BusinessClass = request.form.get("BusinessClass")
if request.method == "POST":
cur=db.cursor()
cur.execute("update Aircraft set NameAirport=%s, FirstClass=%s,EconomyClass=%s,BusinessClass=%s where idAircraft=%s",(Airport,FirstClass,economyClass,BusinessClass,id))
db.commit()
flash("Element Update Successfly", category="successfly")
return redirect(url_for('PageAircraft'))
#------------------------------------------------#
#-----------------PageSector----------------#
@app.route('/Sector')
def PageSector():
curSector = db.cursor()
curSector.execute("select * from Sector")
dataSector = curSector.fetchall()
if "Email" in session:
return render_template("Sector.html",listCountry=listCountry,listDay=listDay,dataSector=dataSector)
return render_template("Contact.html")
#************AddSector*************#
@app.route('/AddSector',methods=["POST","GET"])
def AddSector():
Source=request.form.get("Source")
Destination=request.form.get("Destination")
WeekDay=request.form.get("WeekDay")
FirstClass=request.form.get("FirstClass")
EconomyClass=request.form.get("EconomyClass")
BusinessClass=request.form.get("BusinessClass")
if request.method=="POST":
cur=db.cursor()
cur.execute("insert into Sector(Source ,Destination ,WeekDay,FirstClass ,EconomyClass ,BusinessClass ) values(%s,%s,%s,%s,%s,%s)",
(Source,Destination,WeekDay,FirstClass,EconomyClass,BusinessClass))
db.commit()
flash("Element Add Successfly", category="successfly")
return redirect(url_for("PageSector"))
return render_template("Sector.html")
#************DeleteSector*************#
@app.route('/DeleteSector/<string:idSector>')
def DeleteSector(idSector):
cur=db.cursor()
cur.execute("delete from Sector where idSector={0} ".format(idSector))
db.commit()
return redirect(url_for("PageSector"))
#************EditSector*************#
@app.route('/EditSector/<idSector>')
def EditSector(idSector):
cur=db.cursor()
cur.execute("select * from Sector where idSector={0} ".format(idSector))
data=cur.fetchall()
return render_template("EditSector.html",data=data[0],listCountry=listCountry,listDay=listDay)
#************UpdateSector*************#
@app.route('/UpdateSector/<idSector>',methods=["POST","GET"])
def updateSector(idSector):
if request.method=="POST":
Source=request.form.get("Source_")
Destination=request.form.get("Destination")
WeekDay=request.form.get("WeekDay")
FirstClass=request.form.get("FirstClass")
EconomyClass=request.form.get("EconomyClass")
BusinessClass=request.form.get("BusinessClass")
cur=db.cursor()
cur.execute(" update Sector set Source=%s,Destination=%s,WeekDay=%s,FirstClass=%s,EconomyClass=%s,BusinessClass=%s where idSector=%s",(Source,Destination, WeekDay,FirstClass,EconomyClass,BusinessClass,idSector))
db.commit()
flash("Element Update Successfly", category="successfly")
return redirect(url_for('PageSector'))
return render_template("Contact.html")
#------------------------------------------------#
#-----------------PageFlights----------------#
@app.route('/Flights')
def PageFlight():
if "Email" in session:
all_air_data=DisplayAircraft()
all_sector_data=DisplaySector()
cur=db.cursor()
cur.execute("SELECT idFlight,DepartureTime,ArrivaTime,PrixFlight,Seat,flight from Flight ")
all_Flight=cur.fetchall()
return render_template("Flights.html",all_air=all_air_data,all_sector=all_sector_data,all_Flight=all_Flight)
return render_template("Contact.html")
#************AddFlights*************#
@app.route('/AddFlights',methods=["POST","GET"])
def AddFlights():
if request.method=="POST":
DepartureTime=request.form.get("DepartureTime")
ArrivalTime=request.form.get("ArrivalTime")
PrixFlight=request.form.get("PrixFlight")
AircraftID=request.form.get("AircraftID")
SectorID=request.form.get("SectorID")
Seat=request.form.get("Seat")
Flight=request.form.get("Flight")
cur=db.cursor()
cur.execute("insert into Flight(DepartureTime,ArrivaTime,PrixFlight,idAircraft,idSector,Seat,flight) values(%s,%s,%s,%s,%s,%s,%s)",
(DepartureTime,ArrivalTime,PrixFlight,AircraftID,SectorID,Seat,Flight))
flash("Element Add Successfly", category="successfly")
db.commit()
return redirect(url_for("PageFlight"))
return render_template("Contact.html")
#************DeleteFlights*************#
@app.route('/DeleteFlights/<string:idFlights>')
def DeleteFlights(idFlights):
cur=db.cursor()
cur.execute("delete from Flight where idFlight={0}".format(idFlights))
db.commit()
flash("Element Delte Successfly", category="successfly")
return redirect(url_for("PageFlight"))
#************EditFlights*************#
@app.route('/EditFlights/<idFlights>')
def EditFlights(idFlights):
all_air_data=DisplayAircraft()
all_sector_data=DisplaySector()
cur=db.cursor()
cur.execute("select * from Flight where idFlight={0}".format(idFlights))
all_data_Flight=cur.fetchall()
return render_template("EditFlight.html",all_data_Flight=all_data_Flight[0],all_air_data=all_air_data,all_sector_data=all_sector_data)
#************UpdateFlights*************#
@app.route('/UpdateFlights/<all_data_Flight>',methods=['POST','GET'])
def updateFlights(all_data_Flight):
if request.method=="POST":
DepartureTime=request.form.get("DepartureTime")
ArrivalTime=request.form.get("ArrivalTime")
PrixFlight=request.form.get("PrixFlight")
AircraftID=request.form.get("AircraftID")
SectorID=request.form.get("SectorID")
Seat=request.form.get("Seat")
Flight=request.form.get("Flight")
cur=db.cursor()
cur.execute("update Flight set DepartureTime=%s,ArrivaTime=%s,prixFlight=%s,idAircraft=%s,idSector=%s,Seat=%s,flight=%s where idFlight=%s",(DepartureTime,ArrivalTime,PrixFlight,AircraftID,SectorID,Seat,Flight,all_data_Flight))
db.commit()
return redirect(url_for("PageFlight"))
return render_template("Contact.html")
#---------------------PageSchedule-------------------------------#
@app.route('/Schedule')
def PageSchedule():
if "Email" in session:
data_Flight=DisplayFlight()
cur=db.cursor()
cur.execute("SELECT idSchedule ,FlightDate,FirstClass,EconomyClass ,BusinessClass FROM Schedule INNER JOIN Flight on Flight.idFlight=Schedule.idFlight INNER join Sector on Sector.idSector=Flight.idSector ")
data_Schedule=cur.fetchall()
return render_template("Schedule.html",data_Flight=data_Flight,data_Schedule=data_Schedule)
return render_template("Contact.html")
#************AddSchedule*************#
@app.route('/AddSchedule',methods=['POST',"GET"])
def AddSchedule():
if request.method=="POST":
FlightDate=request.form.get('FlightDate')
idFlight=request.form.get("idFlight")
cur=db.cursor()
cur.execute("insert into Schedule(FlightDate,idFlight) values(%s,%s)",(FlightDate,idFlight))
db.commit()
flash("Element Add Successfly", category="successfly")
return redirect(url_for("PageSchedule"))
return render_template("Contact.html")
#************DeleteSchedule*************#
@app.route('/DeleteSchedule/<string:idSchedule>')
def DeleteSchedule(idSchedule):
cur=db.cursor()
cur.execute("delete from Schedule where idSchedule={0}".format(idSchedule))
db.commit()
return redirect(url_for("PageSchedule"))
#************EditSchedule*************#
@app.route('/EditSchedule/<idSchedule>')
def EditSchedule(idSchedule):
data_Flight=DisplayFlight()
cur=db.cursor()
cur.execute("select * from Schedule where idSchedule={0}".format(idSchedule))
dataSchedule=cur.fetchall()
return render_template("EditSchedule.html",dataSchedule=dataSchedule[0],data_Flight=data_Flight)
#************UpdateSchedule*************#
@app.route('/UpdateSchedule/<idSchedule>',methods=["POST","GET"])
def UpdateSchedule(idSchedule):
if request.method=="POST":
FlightDate=request.form.get('FlightDate')
idFlight=request.form.get("idFlight")
cur=db.cursor()
cur.execute("update Schedule set FlightDate=%s,idFlight=%s where idSchedule=%s",(FlightDate,idFlight,idSchedule))
db.commit()
return redirect(url_for("PageSchedule"))
return render_template("Contact.html")
#***************UserConnect****************#
@app.route('/UserConnect')
def UserConnect():
if "Email" in session:
cur=db.cursor()
cur.execute("select idPassenger,Email,FullName from Passenger")
data_passanger=cur.fetchall()
return render_template("UserContacte.html",data_passanger=data_passanger)
return render_template("Contact.html")
#***************DelteUserConnect****************#fff
@app.route('/DeleteUserConnect/<string:idUser>')
def UserDeleteConnect(idUser):
cur=db.cursor()
cur.execute("delete from Passenger where idPassenger={0}".format(idUser))
db.commit()
return redirect(url_for("UserConnect"))
if __name__=="__main__":
app.run(debug=True)
#-----------------endproject----------------#
| zakariyae1889/AirportWeb | Views.py | Views.py | py | 19,623 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "mysql.connector.connector.connect",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 5,
"usage_type": "name"
},
{
"api... |
16822733733 | import requests
# Replace this value with your own Discord API token
TOKEN = input("Enter your Discord API token: ")
headers = {
"Authorization": f"Bot {TOKEN}",
"User-Agent": "MyBot/1.0",
}
# Send a GET request to the Discord API to retrieve the webpack chunk data
response = requests.get("https://discordapp.com/api/v6/webpack/discord_app", headers=headers)
data = response.json()
# Extract the required data from the webpack chunk
wp_require = data[0][1]
mod = next(x for x in wp_require["c"].values() if hasattr(x["exports"], "default") and hasattr(x["exports"]["default"], "isDeveloper"))
user_mod = next(x for x in wp_require["c"].values() if hasattr(x["exports"], "default") and hasattr(x["exports"]["default"], "getUsers"))
nodes = list(mod["exports"]["default"]._dispatcher._actionHandlers._dependencyGraph.nodes.values())
# Try to execute the first part of the code
try:
experiment_store = next(x for x in nodes if x.name == "ExperimentStore")
experiment_store.actionHandler["CONNECTION_OPEN"]({"user": {"flags": 1}, "type": "CONNECTION_OPEN"})
except Exception as e:
pass
# Execute the second part of the code
old_get_user = user_mod["exports"]["default"].__proto__.getCurrentUser
user_mod["exports"]["default"].__proto__.getCurrentUser = lambda: {"hasFlag": lambda: True}
developer_experiment_store = next(x for x in nodes if x.name == "DeveloperExperimentStore")
developer_experiment_store.actionHandler["CONNECTION_OPEN"]()
user_mod["exports"]["default"].__proto__.getCurrentUser = old_get_user
# This code allows for you to hear and speak while muted and deafened
| catgirlasn/discord | eavesdrop.py | eavesdrop.py | py | 1,607 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
}
] |
29808972190 | import webapp2
import json
import nltk_utils
class MainHandler(webapp2.RequestHandler):
def renderJSON(self, dictionary):
dataJSON = json.dumps(dictionary)
self.response.headers["Content-Type"] = "application/json; charset=UTF-8"
self.response.write(dataJSON)
def get(self):
self.response.write('NLTK demo project')
def post(self):
inputText = self.request.get("text")
resultsLimit = self.request.get("resultsLimit")
dictionary = {}
if inputText and not inputText.isspace():
numberOfResults = 0
if resultsLimit:
numberOfResults = int(resultsLimit)
(tags, freqDist) = nltk_utils.getTagsAndFreqDist(inputText)
tagDict = nltk_utils.findTags('NN', tags, numberOfResults)
for tag in tagDict:
for word in tagDict[tag]:
if word in dictionary:
dictionary[word] += freqDist[word]
else:
dictionary[word] = freqDist[word]
self.renderJSON(dictionary)
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
| sivu22/nltk-on-gae | GAE/main.py | main.py | py | 1,183 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "webapp2.RequestHandler",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "nltk_utils.getTagsAndFreqDist",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "nl... |
7291178560 | from pyramid.config import Configurator
from sqlalchemy import engine_from_config
from os import environ
from api.models.sql_automap import DBSession, Base
def main(global_config, **settings):
sqlalchemy_url_value = environ.get('STR_CONNECTION', 'mysql://root:pass@172.17.0.3:3306/matomo')
settings.update({'sqlalchemy.url': sqlalchemy_url_value})
application_url_value = environ.get('APPLICATION_URL', 'http://127.0.0.1:6543')
settings.update({'application.url': application_url_value})
config = Configurator(settings=settings)
engine = engine_from_config(settings, 'sqlalchemy.', pool_recycle=1800)
DBSession.configure(bind=engine)
Base.metadata.bind = engine
config.add_renderer('tsv', 'api.renderers.TSVRenderer')
config.add_route('home', '/')
config.add_route('status', '/status')
config.add_route('members', '/members')
config.add_route('reports', '/reports')
config.add_route('reports_report_id', '/reports/{report_id}')
config.scan('.views')
return config.make_wsgi_app()
| scieloorg/scielo-sushi-api | api/__init__.py | __init__.py | py | 1,052 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.environ.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number"... |
10496870550 | from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from onnx_tf.backend import prepare as prepare_onnx_model
import tensorflow as tf
import argparse
import onnx
parser = argparse.ArgumentParser()
parser.add_argument("--onnx_dir", type=str, help="Path where ONNX models are stored (.onnx)",
default='your_onnx_model_dir.onnx')
parser.add_argument("--output_dir", type=str, help="Path to save the converted model with tensorflow",
default='onnx2tf_converted/')
parser.add_argument("--gpu_num", type=int, help="Specify the GPU to perform the conversion on",
default=0)
args = parser.parse_args()
if __name__ == '__main__':
gpu_number = '/device:GPU:' + str(args.gpu_num)
with tf.device(gpu_number):
"""
ONNX -> Tensorflow saved model
"""
# Load the ONNX model and convert it to a tensorflow saved model.
onnx_model = onnx.load(args.onnx_dir)
onnx2tf_model = prepare_onnx_model(onnx_model)
onnx2tf_model.export_graph(args.output_dir + 'onnx2tf_model')
"""
Tensorflow savedf model -> Tensorflow frozen graph
"""
# Load the saved tensorflow saved model.
model = tf.saved_model.load(args.output_dir + 'onnx2tf_model')
# Convert to frozen graph.
frozen_out_path = args.output_dir + 'frozen_graph_result'
# Set name of the frozen graph (.pb) file
frozen_graph_filename = 'frozen_graph'
full_model = tf.function(lambda x: model(images=x)) # full model
full_model = full_model.get_concrete_function(
tf.TensorSpec(model.signatures['serving_default'].inputs[0].shape.as_list(),
model.signatures['serving_default'].inputs[0].dtype.name))
# Get frozen ConcreteFunction
frozen_func = convert_variables_to_constants_v2(full_model)
frozen_func.graph.as_graph_def()
layers = [op.name for op in frozen_func.graph.get_operations()]
print("Frozen model layers: ")
for layer in layers:
print(layer)
print("Frozen model inputs: {0}".format(frozen_func.inputs))
print("Frozen model outputs: {0}".format(frozen_func.outputs))
# Save frozen graph
tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
logdir=frozen_out_path,
name=f"{frozen_graph_filename}.pb",
as_text=False)
tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
logdir=frozen_out_path,
name=f"{frozen_graph_filename}.pbtxt",
as_text=True) | chansoopark98/Tensorflow-Keras-Object-Detection | convert_onnx_to_tf.py | convert_onnx_to_tf.py | py | 2,815 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tensorflow.device",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "onnx.load",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "onnx_tf.backend.pr... |
5392824044 | import cv2
import os
import pydicom
import argparse
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--type', type=str, required=True,
choices=['train', 'test'], help='whether to convert train images or test images')
args = vars(parser.parse_args())
if args['type'] == 'train':
print('Converting train images from .dcm to .jpg...')
inputdir = 'input/stage_2_train_images/'
outdir = 'input/images'
elif args['type'] == 'test':
print('Converting test images from .dcm to .jpg...')
inputdir = 'input/stage_2_test_images/'
outdir = 'input/samples'
os.makedirs(outdir, exist_ok=True)
train_list = [f for f in os.listdir(inputdir)]
for i, f in tqdm(enumerate(train_list[:]), total=len(train_list)):
ds = pydicom.read_file(inputdir + f) # read dicom image
img = ds.pixel_array # get image array
# img = cv2.resize(img, (416, 416))
cv2.imwrite(os.path.join(outdir, f.replace('.dcm','.jpg')), img) # write jpg image | sovit-123/Pneumonia-Detection-using-Deep-Learning | dcm_to_jpg.py | dcm_to_jpg.py | py | 1,011 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_n... |
3650828458 | from flask import Blueprint, request, send_from_directory, Response
from config import IS_LOCALHOST
import yaml
import dotenv
from os import getenv
dotenv.load_dotenv()
bp = Blueprint("plugin", __name__)
print('localhost started? ', IS_LOCALHOST)
AUTHO_CLIENT_URL = getenv('AUTHO_CLIENT_URL')
AUTHO_AUTHORIZATION_URL = getenv('AUTHO_AUTHORIZATION_URL')
OPENAI_VERIFICATION_TOKEN = getenv('OPENAI_VERIFICATION_TOKEN')
if not (AUTHO_CLIENT_URL and AUTHO_AUTHORIZATION_URL and OPENAI_VERIFICATION_TOKEN):
print('WARNING: THIS WILL NOT WORK ON PRODUCTION WITHOUT AUTHO_CLIENT_URL, AUTHO_AUTHORIZATION_URL, and OPENAI_VERIFICATION_TOKEN ENV VARIABLES SET')
@bp.route("/.well-known/ai-plugin.json", methods=["GET"])
def get_ai_plugin():
host = request.headers['Host']
print('host: ', host)
if IS_LOCALHOST:
print('GIVING LOCALHOST YAMAL')
with open('./plugin/manifest_local.json', 'r') as f:
text = f.read()
text = text.replace("PLUGIN_HOSTNAME", "http://localhost:5000")
else:
print('rendering prod manifest')
with open('./plugin/manifest.json', 'r') as f:
text = f.read()
text = text.replace("PLUGIN_HOSTNAME", f"https://{host}")
text = text.replace("AUTHO_CLIENT_URL", AUTHO_CLIENT_URL)
text = text.replace("AUTH0_AUTHORIZATION_URL", AUTHO_AUTHORIZATION_URL)
text = text.replace("OPENAI_VERIFICATION_TOKEN", OPENAI_VERIFICATION_TOKEN)
return Response(text, mimetype="text/json")
@bp.route("/.well-known/ai-plugin2.json", methods=["GET"])
def get_ai_plugin2():
host = request.headers['Host']
with open('./plugin/manifest.json', 'r') as f:
text = f.read()
text = text.replace("PLUGIN_HOSTNAME", f"https://{host}")
text = text.replace("AUTHO_CLIENT_URL", AUTHO_CLIENT_URL)
text = text.replace("AUTH0_AUTHORIZATION_URL", AUTHO_AUTHORIZATION_URL)
text = text.replace("OPENAI_VERIFICATION_TOKEN", OPENAI_VERIFICATION_TOKEN)
return Response(text, mimetype="text/json")
@bp.route("/openapi.yaml", methods=["GET"])
def get_openapi():
with open("./plugin/openapi.yaml") as f:
host = request.headers['Host']
text = f.read()
if IS_LOCALHOST:
text = text.replace("PLUGIN_HOSTNAME", "http://localhost:5000")
else:
text = text.replace("PLUGIN_HOSTNAME", f"https://{host}")
# load the yaml
yaml_dict = yaml.load(text, Loader=yaml.FullLoader)
print('yaml good')
return Response(text, mimetype="text/yaml")
@bp.route("/logo.jpeg", methods=["GET"])
def get_logo():
print('getting logo')
return send_from_directory('./static', 'logo.png')
| matthewlouisbrockman/the_one_plugin | backend/plugin/plugin_routes.py | plugin_routes.py | py | 2,688 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.Blueprint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "config.IS_LOCALHOST",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "os.getenv",
... |
16402536741 | import sqlite3 as s3
db_name = "/home/egws/ESCAPE_GAMES"
def create_table(room):
"""Create Table for a room"""
try:
db = s3.connect(db_name)
except:
print("Connexion à la base " + db_name + " impossible")
try:
cursor = db.cursor()
try:
cursor.execute("""
CREATE TABLE IF NOT EXISTS '""" + room + """'(
id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
society TEXT,
date TEXT,
time TEXT,
status TEXT
)
""")
except:
print("Error: Can't create the SQL request")
try:
db.commit()
except:
print("Error: Can't commit the SQL request")
except:
print("Error: Table cannot be created")
db.close()
def drop_table(room):
"""Drop room table from the database"""
try:
db = s3.connect(db_name)
except:
print('Error "drop_table()" : Can\'t connect to DB')
try:
cursor = db.cursor()
cursor.execute("""DROP TABLE '""" + room + """'""")
db.commit()
db.close()
except:
print('Error "drop_table()" : Can\'t drop table')
def get_all_datas(room):
"""Get all the datas for a room"""
db = s3.connect(db_name)
cursor = db.cursor()
cursor.execute("""
SELECT * FROM '""" + room + """'
""")
datas = cursor.fetchall()
db.commit()
db.close()
return datas
def add_datas(room, datas):
"""Add datas to room table"""
db = s3.connect(db_name)
cursor = db.cursor()
for line in datas:
info = [line[0], line[2], line[3], line[4]]
check = check_entry(room, line[2], line[3], line[4])
if check[0] == False:
cursor.execute("""INSERT INTO '""" + room + """' (society, date, time, status) VALUES(?, ?, ?, ?)""", info)
else:
cursor.execute("""UPDATE '""" + room + """' SET status = '""" + line[4] + """' WHERE id='""" + str(check[1][0][0]) + """'""")
db.commit()
db.close()
def check_entry(room, date, time, status):
db = s3.connect(db_name)
cursor = db.cursor()
cursor.execute("""SELECT * FROM '""" + room + """' WHERE date='""" + date + """' AND time='""" + time + """'""")
res = cursor.fetchall()
if len(res) > 0:
checked = True
else:
checked = False
return (checked, res)
| piment/egws | database.py | database.py | py | 2,445 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
... |
74774265705 | import sys
import treeswift as ts
from sys import platform as _platform
import tempfile
from subprocess import Popen, PIPE
import pkg_resources
import time
import logging
def print_ident(tree):
for i in tree.root.children:
print(len(list(i.traverse_postorder())),)
print("")
def reestimate_backbone(options):
assert options.ref_fp
start = time.time()
orig_branch_tree = ts.read_tree(options.tree_fp, schema='newick')
if len(orig_branch_tree.root.children) > 2: # 3
rooted = False
else:
rooted = True
orig_branch_tree.suppress_unifurcations()
if len(orig_branch_tree.root.children) > 3:
# polytomy at the root
orig_branch_tree.resolve_polytomies()
else:
# root node is ok, resolve the other nodes
for i in orig_branch_tree.root.children:
i.resolve_polytomies()
all_branches_have_length = True
for n in orig_branch_tree.traverse_postorder(internal=True, leaves=True):
if not n.is_root() and n.edge_length is None:
all_branches_have_length = False
break
if rooted and all_branches_have_length:
left, right = orig_branch_tree.root.children
if left.children:
thetwo = [next(c.traverse_postorder(internal=False)) for c in left.children]
theone = [next(right.traverse_postorder(internal=False))]
lengthtwoside = left.edge_length
lengthoneside = right.edge_length
else:
thetwo = [next(c.traverse_postorder(internal=False)) for c in right.children]
theone = [next(left.traverse_postorder(internal=False))]
lengthtwoside = right.edge_length
lengthoneside = left.edge_length
orig_branch_resolved_fp = tempfile.NamedTemporaryFile(delete=True, mode='w+t').name
orig_branch_tree.write_tree_newick(orig_branch_resolved_fp)
if _platform == "darwin":
fasttree_exec = pkg_resources.resource_filename('apples', "tools/FastTree-darwin")
elif _platform == "linux" or _platform == "linux2":
fasttree_exec = pkg_resources.resource_filename('apples', "tools/FastTree-linux")
elif _platform == "win32" or _platform == "win64" or _platform == "msys":
fasttree_exec = pkg_resources.resource_filename('apples', "tools/FastTree.exe")
else:
# Unrecognised system
raise ValueError('Your system {} is not supported yet.' % _platform)
bb_fp = tempfile.NamedTemporaryFile(delete=False, mode='w+t')
fasttree_log = tempfile.NamedTemporaryFile(delete=False, mode='w+t').name
logging.info("FastTree log file is located here: %s" % fasttree_log)
s = [fasttree_exec, "-nosupport", "-nome", "-noml", "-log", fasttree_log,
"-intree", orig_branch_resolved_fp]
if not options.protein_seqs:
s.append("-nt")
with open(options.ref_fp, "r") as rf:
with Popen(s, stdout=PIPE, stdin=rf, stderr=sys.stderr) as p:
#options.tree_fp = bb_fp.name
tree_string = p.stdout.read().decode('utf-8')
if rooted and all_branches_have_length:
ft = ts.read_tree_newick(tree_string)
for n in ft.traverse_postorder(internal=False):
if n.label == theone[0].label:
theone_inft = n
break
ft.reroot(theone_inft)
mrca = ft.mrca([n.label for n in thetwo])
mrca_edge_length = mrca.edge_length
ft.reroot(mrca, length=mrca_edge_length/2)
if lengthtwoside+lengthoneside > 0:
for i in range(2):
if ft.root.children[i] == mrca:
ft.root.children[i].edge_length = mrca_edge_length*lengthtwoside/(lengthtwoside+lengthoneside)
ft.root.children[1-i].edge_length = mrca_edge_length*lengthoneside/(lengthtwoside+lengthoneside)
ft.is_rooted = False
tree_string = str(ft)
with open(bb_fp.name, "w") as ntree:
ntree.write(tree_string.strip())
ntree.write("\n")
options.tree_fp = bb_fp.name
logging.info(
"[%s] Reestimated branch lengths in %.3f seconds." % (time.strftime("%H:%M:%S"), (time.time() - start)))
| balabanmetin/apples | apples/reestimateBackbone.py | reestimateBackbone.py | py | 4,332 | python | en | code | 22 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "treeswift.read_tree",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sys.platfor... |
22755402141 | import privateConfig
from selenium.webdriver.chrome.options import Options
URL_PISOS = "https://www.pisos.com"
URL_PLACE = "/venta/pisos-esparreguera/"
URL_LOG_IN = "https://www.pisos.com/Login"
USER_PISOS = "informe.casas@gmail.com"
PW_PISOS = "17InformeCasas"
USER_MAIL = "informe.casas@gmail.com"
PW_MAIL = "17InformeCasas"
SMTP_SERVER = 'smtp.gmail.com'
SMTP_PORT = 587
FILEPATH = './excels/houses_dataframe.csv'
MAIL_TO_SEND = "informe.casas@gmail.com"
TEST_MODE = False
MAX_WORKERS = 1
def get_Chrome_Options ():
WINDOW_SIZE = "1920,1080"
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_experimental_option("prefs", {'profile.managed_default_content_settings.images':2})
chrome_options.add_argument("--remote-debugin-port=9222")
chrome_options.add_argument("--window-size=%s" % WINDOW_SIZE)
if privateConfig.PathNeeded:
chrome_options.binary_location = privateConfig.ChromeDriverPath
return chrome_options
| paucampana/pisosScrapper | app/src/config.py | config.py | py | 1,095 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "privateConfig.PathNeeded",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "privateConfig.ChromeDriverPath",
"line_number": 31,
"usage_type":... |
21143828586 | import torch
from ipdb import set_trace
import torch.nn as nn
import torch.nn.functional as F
import logging
from torch.nn.utils.rnn import pad_sequence
from config import MyBertConfig
from src.models.bert_model import BaseBert
from src.models.flash import GAU
from src.ner_predicate import vote, span_predicate
from utils.train_utils import load_model
from utils.loss_utils import LabelSmoothingCrossEntropy, FocalLoss
logger = logging.getLogger('main.bert_span')
class InterBertSpan(BaseBert):
def __init__(self, config: MyBertConfig):
"""
这个只能针对普通的二分类
:param config:
:param num_tags:这个为2,表示预测的类别
:param dropout_prob:
:param is_train:
:param loss_type:
"""
super(InterBertSpan, self).__init__(config)
# 这个时候numtags=2,因为只有disease一种类别
self.config = config
self.num_tags = config.num_span_class
self.scheme = config.inter_scheme
out_dims = self.bert_config.hidden_size
mid_linear_dims = 128
# todo:不使用RElu激活函数的结果,尝试更换激活函数...
if self.scheme in [1,2,3,5]:
self.mid_linear = nn.Sequential(
nn.Linear(out_dims, mid_linear_dims),
nn.Dropout(config.dropout_prob)
)
out_dims = mid_linear_dims * 2
elif self.scheme == 6:
# 得到的结果时
self.mid_linear = nn.LSTM(out_dims, mid_linear_dims, batch_first=True, bidirectional=True,num_layers=2, dropout=0.5)
self.dropout = nn.Dropout(0.5)
out_dims = mid_linear_dims * 2
elif self.scheme == 7:
# 得到的结果时
self.mid_linear = GAU(dim=768,dropout=0.4)
self.dropout = nn.Dropout(0.5)
out_dims = 768
elif self.scheme == 8:
# 输出的shape = (batch_size,seq_len,mid_linear_dims)
mid_linear_dims = 256
self.start_mid_linear = nn.Sequential(
nn.Linear(out_dims, mid_linear_dims),
nn.Dropout(config.dropout_prob)
)
self.end_mid_linear = nn.Sequential(
nn.Linear(out_dims, mid_linear_dims),
nn.Dropout(config.dropout_prob)
)
out_dims = mid_linear_dims * 2
if self.scheme == 1 or self.scheme == 5:
self.inter_linear = nn.Linear(self.num_tags,out_dims)
self.start_fc = nn.Linear(out_dims, self.num_tags)
self.end_fc = nn.Linear(out_dims, self.num_tags)
init_blocks = [self.mid_linear, self.start_fc, self.end_fc, self.inter_linear]
elif self.scheme == 2:
self.inter_linear = nn.Linear(self.num_tags, out_dims)
self.start_fc = nn.Linear(out_dims, self.num_tags)
self.end_fc = nn.Linear(out_dims*2, self.num_tags)
init_blocks = [self.mid_linear, self.start_fc, self.end_fc, self.inter_linear]
elif self.scheme == 3:
self.mid_linear = nn.Sequential(
nn.Linear(out_dims, mid_linear_dims),
nn.Dropout(config.dropout_prob),
nn.LeakyReLU(),
)
self.inter_linear = nn.Linear(self.num_tags, 100)
self.inter_linear2 = nn.Linear(100, out_dims)
self.start_fc = nn.Linear(out_dims, self.num_tags)
self.end_fc = nn.Linear(out_dims * 2, self.num_tags)
init_blocks = [self.mid_linear, self.start_fc, self.end_fc, self.inter_linear]
elif self.scheme == 4:
self.inter_linear = nn.LSTM(self.num_tags, out_dims//2, batch_first=True, bidirectional=True,
num_layers=2, dropout=0.5)
self.start_fc = nn.Linear(out_dims, self.num_tags)
self.end_fc = nn.Linear(out_dims, self.num_tags)
init_blocks = [self.mid_linear, self.start_fc, self.end_fc, self.inter_linear]
elif self.scheme == 6 or self.scheme == 7:
self.inter_linear = nn.Linear(self.num_tags, out_dims)
self.start_fc = nn.Linear(out_dims, self.num_tags)
self.end_fc = nn.Linear(out_dims, self.num_tags)
init_blocks = [self.mid_linear, self.start_fc, self.end_fc, self.inter_linear]
elif self.scheme == 8:
self.inter_linear = nn.Linear(mid_linear_dims, mid_linear_dims)
self.start_fc = nn.Linear(mid_linear_dims, self.num_tags)
self.end_fc = nn.Linear(mid_linear_dims, self.num_tags)
init_blocks = [self.start_mid_linear,self.end_mid_linear, self.start_fc, self.end_fc, self.inter_linear]
elif self.scheme == 11:
self.mid_linear = nn.Sequential(
nn.Linear(out_dims, mid_linear_dims),
nn.Dropout(config.dropout_prob)
)
self.inter_linear = nn.Sequential(
nn.Linear(self.num_tags,mid_linear_dims),
nn.LeakyReLU(),
nn.Dropout(0.1),
)
self.start_fc = nn.Linear(mid_linear_dims, self.num_tags)
self.end_fc = nn.Linear(mid_linear_dims, self.num_tags)
init_blocks = [self.mid_linear, self.start_fc, self.end_fc, self.inter_linear]
elif self.scheme == 12:
self.mid_linear = nn.Sequential(
nn.Linear(out_dims, mid_linear_dims),
nn.Dropout(config.dropout_prob)
)
self.inter_linear = nn.Sequential(
nn.Linear(self.num_tags,mid_linear_dims),
nn.LeakyReLU(),
nn.Dropout(0.1),
)
self.start_fc = nn.Linear(mid_linear_dims, self.num_tags)
self.end_fc = nn.Linear(mid_linear_dims, self.num_tags)
init_blocks = [self.mid_linear, self.start_fc, self.end_fc, self.inter_linear]
self.dynamic_weight = nn.Parameter(torch.empty(1))
self.dynamic_weight.data.fill_(0.5) # init sparse_weight
elif self.scheme == 13:
# 加权方式是加法
self.mid_linear = nn.Sequential(
nn.Linear(out_dims, mid_linear_dims),
nn.Dropout(config.dropout_prob)
)
self.inter_linear = nn.Sequential(
nn.Linear(self.num_tags,mid_linear_dims),
nn.LeakyReLU(),
nn.Dropout(0.1),
)
self.start_fc = nn.Linear(mid_linear_dims, self.num_tags)
self.end_fc = nn.Linear(mid_linear_dims, self.num_tags)
init_blocks = [self.mid_linear, self.start_fc, self.end_fc, self.inter_linear]
elif self.scheme == 20:
"""
20是CNN系列
"""
elif self.scheme == 30:
"""
30是BilSTM系列
"""
self.mid_linear = nn.LSTM(out_dims, mid_linear_dims // 2, batch_first=True, bidirectional=True,num_layers=2, dropout=0.5)
self.inter_linear = nn.Sequential(
nn.Linear(self.num_tags, mid_linear_dims),
nn.LeakyReLU(),
nn.Dropout(0.1),
)
self.start_fc = nn.Linear(mid_linear_dims, self.num_tags)
self.end_fc = nn.Linear(mid_linear_dims, self.num_tags)
init_blocks = [self.mid_linear, self.start_fc, self.end_fc, self.inter_linear]
elif self.scheme == 40:
"""
40是BiGRU系列的实验
"""
self.mid_linear = nn.GRU(out_dims, mid_linear_dims // 2, batch_first=True, bidirectional=True,
num_layers=2, dropout=0.5)
self.inter_linear = nn.Sequential(
nn.Linear(self.num_tags, mid_linear_dims),
nn.LeakyReLU(),
nn.Dropout(0.1),
)
self.start_fc = nn.Linear(mid_linear_dims, self.num_tags)
self.end_fc = nn.Linear(mid_linear_dims, self.num_tags)
init_blocks = [self.mid_linear, self.start_fc, self.end_fc, self.inter_linear]
reduction = 'none'
self.loss_type = config.span_loss_type
if self.loss_type == 'ce':
logger.info('损失函数使用:CrossEntropy')
self.criterion = nn.CrossEntropyLoss(reduction=reduction)
elif self.loss_type == 'ls_ce':
logger.info('损失函数使用:LabelSmoothing CrossEntropy-')
self.criterion = LabelSmoothingCrossEntropy(reduction=reduction)
elif self.loss_type == 'focal': # 这个用于多类别...
logger.info('损失函数使用:Focal Loss')
self.criterion = FocalLoss(reduction=reduction)
self._init_weights(init_blocks)
def forward(self, token_ids, attention_masks, token_type_ids, input_token_starts=None, start_ids=None, end_ids=None,
input_true_length=None):
"""
:param token_ids: 下面三个,给bert的值
:param attention_masks:
:param token_type_ids:
:param input_token_starts:
:param start_ids: 这个pad是按照batch的实际长度,并不是按照batch的subword长度,
:param end_ids: 同上
:param input_true_length: token_ids的真实长度
:return:
"""
if self.config.bert_name in ['scibert','biobert','flash','bert','flash_quad','wwm_bert']:
bert_outputs = self.bert_model(input_ids=token_ids, attention_mask=attention_masks,
token_type_ids=token_type_ids)
sequence_output = bert_outputs[0]
elif self.config.bert_name == 'kebiolm':
bert_outputs = self.bert_model(input_ids=token_ids, attention_mask=attention_masks,
token_type_ids=token_type_ids, return_dict=False)
sequence_output = bert_outputs[2] # shape=(batch_size,seq_len,hidden_dim)=[32, 55, 768]
else:
raise ValueError
origin_sequence_output = []
for layer, starts in zip(sequence_output, input_token_starts):
res = layer[starts] # shape=(seq_len,hidden_size)=(256,768)
origin_sequence_output.append(res)
# 这里的max_len和上面的seq_len已经不一样了,因为这里是按照token-level,而不是subword-level
padded_sequence_output = pad_sequence(origin_sequence_output, batch_first=True)
# 如果是scheme
if self.scheme == 1:
# 这是最原始的方式
seq_out = self.mid_linear(padded_sequence_output)
start_logits = self.start_fc(seq_out)
inter_logits = F.relu(self.inter_linear(start_logits))
seq_out = (seq_out+inter_logits)/2
end_logits = self.end_fc(seq_out)
elif self.scheme == 11:
# 这是最原始的方式
seq_out = self.mid_linear(padded_sequence_output)
start_logits = self.start_fc(seq_out)
inter_logits = self.inter_linear(start_logits)
seq_out = (seq_out + inter_logits) / 2
end_logits = self.end_fc(seq_out)
elif self.scheme == 12:
# 加权方式是学习参数
seq_out = self.mid_linear(padded_sequence_output)
start_logits = self.start_fc(seq_out)
inter_logits = self.inter_linear(start_logits)
seq_out = self.dynamic_weight*seq_out + (1-self.dynamic_weight)*inter_logits
end_logits = self.end_fc(seq_out)
elif self.scheme == 13:
# 加权方式是+
seq_out = self.mid_linear(padded_sequence_output)
start_logits = self.start_fc(seq_out)
inter_logits = self.inter_linear(start_logits)
seq_out = seq_out + inter_logits
end_logits = self.end_fc(seq_out)
elif self.scheme == 2:
seq_out = self.mid_linear(padded_sequence_output)
start_logits = self.start_fc(seq_out)
inter_logits = F.relu(self.inter_linear(start_logits))
seq_out = torch.cat((seq_out,inter_logits),axis=-1)
end_logits = self.end_fc(seq_out)
elif self.scheme == 3:
seq_out = self.mid_linear(padded_sequence_output)
start_logins = self.inter_linear(seq_out)
start_logits = self.start_fc(start_logins)
inter_logits = F.tanh(self.inter_linear2())
seq_out = torch.cat((seq_out, inter_logits), axis=-1)
end_logits = self.end_fc(seq_out)
elif self.scheme == 4:
seq_out = self.mid_linear(padded_sequence_output)
start_logits = self.start_fc(seq_out)
inter_logits = F.relu(self.inter_linear(start_logits)[0])
seq_out = (seq_out + inter_logits) / 2
end_logits = self.end_fc(seq_out)
elif self.scheme == 5:
seq_out = self.mid_linear(padded_sequence_output)
start_logits = self.start_fc(seq_out)
inter_logits = F.relu(self.inter_linear(start_logits))
seq_out = (seq_out + inter_logits)
end_logits = self.end_fc(seq_out)
elif self.scheme == 6:
seq_out = self.mid_linear(padded_sequence_output)
seq_out = self.dropout(seq_out[0])
start_logits = self.start_fc(seq_out)
inter_logits = F.relu(self.inter_linear(start_logits))
seq_out = (seq_out + inter_logits)
end_logits = self.end_fc(seq_out)
elif self.scheme == 7:
seq_out = self.mid_linear(padded_sequence_output)
seq_out = self.dropout(seq_out)
start_logits = self.start_fc(seq_out)
inter_logits = F.relu(self.inter_linear(start_logits))
seq_out = (seq_out + inter_logits)
end_logits = self.end_fc(seq_out)
elif self.scheme == 8:
start_seq_out = self.start_mid_linear(padded_sequence_output)
end_seq_out = self.end_mid_linear(padded_sequence_output)
start_logits = self.start_fc(start_seq_out)
inter_logits = F.relu(self.inter_linear(start_seq_out))
end_seq_out = (end_seq_out + inter_logits)
end_logits = self.end_fc(end_seq_out)
elif self.scheme == 30:
# bilstm+加权方式是+
seq_out,_ = self.mid_linear(padded_sequence_output)
start_logits = self.start_fc(seq_out)
inter_logits = self.inter_linear(start_logits)
seq_out = seq_out + inter_logits
end_logits = self.end_fc(seq_out)
elif self.scheme == 40:
# bilstm+加权方式是+
seq_out, _ = self.mid_linear(padded_sequence_output)
start_logits = self.start_fc(seq_out)
inter_logits = self.inter_linear(start_logits)
seq_out = seq_out + inter_logits
end_logits = self.end_fc(seq_out)
else:
raise ValueError
loss_mask = torch.zeros((start_logits.shape[0], start_logits.shape[1])).to(token_ids.device)
for i, lens in enumerate(input_true_length):
loss_mask[i][:lens] = 1
# 正好修正start_ids,end_ids的情况
# 由于多GPU,修改start_ids
out = (start_logits, end_logits,)
if start_ids is not None and end_ids is not None: # 这是训练模式,计算loss
# start_logtis.shape=torch.Size([4096, 14])
start_logits = start_logits.view(-1, self.num_tags)
end_logits = end_logits.view(-1, self.num_tags)
# 去掉 padding 部分的标签,计算真实 loss
mask = loss_mask.view(-1) == 1
active_start_logits = start_logits[mask] # (?,14)这个?的值就并不确定了
active_end_logits = end_logits[mask]
active_start_labels = start_ids.view(-1)[mask]
active_end_labels = end_ids.view(-1)[mask]
start_loss = self.criterion(active_start_logits, active_start_labels).mean(dim=-1)
end_loss = self.criterion(active_end_logits, active_end_labels).mean(dim=-1)
loss = start_loss + end_loss
out = (loss,) + out
return out
| KeDaCoYa/MKG-GC | entity_extraction/src/models/inter_bert_span.py | inter_bert_span.py | py | 16,287 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "src.models.bert_model.BaseBert",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "config.MyBertConfig",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "... |
1243434549 | """
uncertainty_analysis.py
=========================================
Python script that performs uncertainty analysis on profile estimation.
"""
import time
import pandas as pd
import numpy as np
from src.instance import Instance
from src.optimization_profile_time import ProfileOptTime
from scipy.stats import dirichlet
def generate_profile(df, r, num):
profile_list =[]
for j in range(num):
profile = []
for i in range(df.shape[0]):
alpha = df.iloc[i].to_numpy()
alpha += 1e-5
alpha = alpha * r
sample = dirichlet.rvs(alpha, size=1)
profile.append(list(sample[0][:-1]))
profile = np.array(profile)
profile_list.append(profile)
return profile_list
if __name__ == "__main__":
# Path to input files
pop_dir = 'data/inputFiles/population.txt'
travelA_dir = 'data/inputFiles/travelTimesAmbu-base2loc.txt'
lambda_dir = 'data/inputFiles/estimate_incident_rate.csv'
profile_dir = 'data/inputFiles/Profiles.xlsx'
# Read lambda from file
df_incident = pd.read_csv(lambda_dir)
estimate_lambda = df_incident['adjusted_estimate'].to_numpy()
# Read profile from file
df_profile = pd.read_excel(profile_dir)
df_profile = df_profile.drop(df_profile.columns[0], axis=1)
df_profile1 = df_profile.drop(df_profile.columns[-1], axis=1)
profile_day = df_profile1.to_numpy()
# Set up problem instances
nBases = 15
ambus = [0, 3, 2, 2, 3, 0, 1, 1, 3, 2, 2, 1, 0, 2, 3]
nVolunteers = 3571
volResDelay = 180
walkingSpeed = 6 #kmh
ambuBusyProb = 0.44
ambuResDelay = 180
threshold = 420 #1km at 6kmh + vol response delay
maxDispatchDistance = 1.0
nSteps = 10000
inst = Instance(pop_dir, travelA_dir, nVolunteers, volResDelay, walkingSpeed,
nBases, ambuResDelay, ambuBusyProb, ambus, threshold,
maxDispatchDistance, nSteps)
# Additional parameters
numTimeSeg = 2
alphaL = [0.17, 0.08]
lambdaL = [estimate_lambda, estimate_lambda]
OHCAProbL = [0.7, 0.3]
profile_night = np.identity(inst.nLocations)
profileL = [profile_day, profile_night]
no_profileL = [profile_night, profile_night]
OptFW = ProfileOptTime(inst, numTimeSeg, alphaL, lambdaL, OHCAProbL, profileL)
OptFW_no_profile = ProfileOptTime(inst, numTimeSeg, alphaL, lambdaL, OHCAProbL, no_profileL)
# Proportional and uniform distribution
x_prop = estimate_lambda.copy()
x_uni = [x / sum(inst.area) for x in inst.area]
#--------------------------- Optimize with and without profile-------------
start = time.time()
x_waalewijn, _, _ = OptFW.Frankwolfe_LB(x_prop, tol=1e-3, method='w')
end = time.time()
print("The time taken to optimize Waalewijn is: ", (end - start)/60, " minutes.\n")
# Optimize without profile
start = time.time()
x_waalewijn_noprofile, _, _ = OptFW_no_profile.Frankwolfe_LB(x_prop, tol=1e-3, method='w')
end = time.time()
print("The time taken to optimize Waalewijn is: ", (end - start)/60, " minutes.\n")
# ------------------------ Uncertainty analysis --------------------------
profile_list_day = generate_profile(df_profile, 1, 500)
result_opt = []
result_uni = []
result_diag = []
result_prop = []
for i in range(100):
if i%10 == 0:
print(i)
profileL = [profile_list_day[i], profile_night]
OptFW = ProfileOptTime(inst, numTimeSeg, alphaL, lambdaL, OHCAProbL, profileL)
result_opt.append(OptFW.evaluatePWaalewijn(x_waalewijn))
result_uni.append(OptFW.evaluatePWaalewijn(x_uni))
result_diag.append(OptFW.evaluatePWaalewijn(x_waalewijn_noprofile))
result_prop.append(OptFW.evaluatePWaalewijn(x_prop))
# Output result to csv file
df_result = pd.DataFrame()
df_result['areaunit'] = df_incident['areaunit']
df_result['opt_profile'] = x_waalewijn
df_result['opt_diag'] = x_waalewijn_noprofile
df_result.to_csv('data/outputFiles/uncertainty_result.csv', index=False)
df_result2 = pd.DataFrame()
df_result2['opt_waalewijn'] = result_opt
df_result2['uni'] = result_uni
df_result2['opt_diag'] = result_diag
df_result2['propDemand'] = result_prop
df_result2.to_csv('data/outputFiles/uncertainty_result_evaluation.csv', index=False)
| carolinetjes/CFRrecruitment | uncertainty_analysis.py | uncertainty_analysis.py | py | 4,391 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.stats.dirichlet.rvs",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "scipy.stats.dirichlet",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.re... |
29465356973 | ## This module aims to compute the number and energy flux of DM particles from a supernova
import numpy as np
import matplotlib.pyplot as plt
from ..Step1_Kinematics import Kinematics
import scipy.integrate as integrate
#Particle Property
#Neutrino
M_nu = 0.32 # Unit:eV/c2
E_total_nu = 3.6e53*6.24150913e11 #Total energy of neutrinos #Transfer 2e51erg to unit of eV
E_per_nu = 10e6 #Mean energy of each neutrino #estimated value
#DM
M_DM = 1e03
#NFW Parameter
rho_s = 0.184e9
rs=24.42*3.08567758e21
#cross section (Neutrino and DM)
cs = 1e-30
def DM_flux(m_dm,e_per_nu,start,end,n_total):
gamma = Kinematics.energy_kicked_by_neutrino(E_per_nu, M_nu,m_dm)/m_dm
beta = (1-gamma**(-2))**0.5
R = (np.sum((start-end)**2))**0.5
pos = -start+end
time_delay = R*(1/beta-1)/3e10
print("time delay(s):"+str(time_delay))
T = R/3e10
t = np.linspace(0,time_delay,100)
def n_ori(l):
def get_mod(x):
x2 = x**2
return (x2[:,0] + x2[:,1] +x2[:,2])**0.5
r= get_mod(np.tensordot(l,pos,axes=0)/R+np.tensordot(np.ones(l.shape),start,axes=0))
x= r/rs
return 1/(x*(1+x)*(1+x))
def n(t):
l = 3e10*(T - beta*t/(1-beta))
l[-1]= 0.
return n_ori(l)
c = 3e10 #in unit of cm/s
phi = cs *n_total*rho_s/m_dm/(4*np.pi*(R**2))*n(t) *beta/(1-beta)*c
plt.plot(t, phi, color ='blue', label = 'DM Flux')
plt.xlabel('Time (s)')
plt.ylabel('Flux (#/cm^2*s)')
plt.legend(loc='upper right')
plt.show()
def DM_number(m_dm,e_per_nu,start,end,n_total):
gamma = Kinematics.energy_kicked_by_neutrino(E_per_nu, M_nu,m_dm)/m_dm
beta = (1-gamma**(-2))**0.5
time_delay = np.sum((start-end)**2)**0.5*(1/beta-1)/(3e10)
print("time delay:"+str(time_delay))
R = (np.sum((start-end)**2))**0.5
l = end -start
def f(t):
r=(np.sum((start+l*t)**2))**0.5
x= r/rs
return 1/(x*(1+x)*(1+x))
k = n_total*rho_s*cs/m_dm/(4*np.pi) /R
L_dm = integrate.nquad(f, [[0,1.]])[0]*k
L_nu = n_total/(4*np.pi*R*R)
print("DM Number(1/cm^2):"+str(L_dm))
print("Neutrino Number(1/cm^2):"+str(L_nu))
if __name__== '__main__':
print("Total number of neutrino:"+str(E_total_nu/E_per_nu))
start=np.array([0.87*3.08567758e21,0,2.4*3.08567758e18])
end =np.array([8.7*3.08567758e21,0,24*3.08567758e18])
DM_number(M_DM,E_per_nu ,start,end,E_total_nu/E_per_nu)
DM_flux(M_DM,E_per_nu ,start,end,E_total_nu/E_per_nu)
| CrazyAncestor/DM_Neutrino_Flux | old_codes/one_direction/Steps/Step2_DM_Flux/DM_flux.py | DM_flux.py | py | 2,634 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Step1_Kinematics.Kinematics.energy_kicked_by_neutrino",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "Step1_Kinematics.Kinematics",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "numpy.sum",
"line_number": 27,
"usage_type": "call"
}... |
31932657319 | from sympy import isprime
import random
import re
class ElGammal:
def __init__(self):
self.p=0
self.alpha=0
self.private_a=0
self.beta=0
# self.p=11
# self.alpha=2
# self.private_a=3
# self.beta=8
def genKey(self):
primes_list=[i for i in range(676,10000) if isprime(i)] #El rango es para codificar el bloque sin colisión
self.p=random.choice(primes_list)
self.alpha=random.randint(1,self.p-1)
self.private_a=random.randint(1,self.p-2)
self.beta=pow(self.alpha,self.private_a,self.p)
self.m=random.randint(1,self.p-2)
def setKey(self,p,alpha,private_a):
self.p=p
self.alpha=alpha%p
self.private_a=private_a%p
self.beta=pow(self.alpha,self.private_a,self.p)
self.m=random.randint(1,self.p-2)
def preprocess_stringv3(self,s):
s=re.sub('[^a-zA-Z]',"",s) #Elimina todo lo que no sean letras(espacios,números y otros)
s=s.lower()
# s=s[::-1]
while len(s)%2!=0:
s+='a'
return s
def block_convertv2(self,s,n,b=2): #Cada bloque es de 2 letras
s=self.preprocess_stringv3(s)
# s=s[::-1]
b=[s[i:i+b] for i in range(0,len(s),b)]
num_arr=[]
for bi in b:
l=len(bi)
num=0
for i in range(l):
num+=((ord(bi[i])-96))*26**i
num_arr.append(num%n)
return num_arr
def num_to_text(self,arr):
decimal_text=[]
final_text=[]
for block in arr:
dec_num=[]
cond=True
while cond:
dec_num.append(block%26)
if block//26==0:
cond=False
block//=26
decimal_text.append(dec_num)
final_string=[]
for char in decimal_text:
s=''
for n in char:
s+=(chr(n+96))
final_string.append(s)
final_text.append(final_string)
message=''
for s in final_text[0]:
message+=s
return message
def encrypt(self,x): #x es el mensaje a cifrar
x_=self.block_convertv2(x,self.p)
encrypt_m=[]
for x in x_:
y1=pow(self.alpha,self.m,self.p)
y2=(pow(self.beta,self.m,self.p)*x)%self.p
encrypt_m.append((y1,y2))
return encrypt_m
def extended_gcd(self,a, b):
if a == 0:
return b, 0, 1
else:
gcd, x, y = self.extended_gcd(b % a, a)
return gcd, y - (b // a) * x, x
def inverse_mod(self,a,n):
return self.extended_gcd(a,n)[1]
def decrypt(self,ys):
sol=[]
for ys_ in ys:
y1,y2=ys_
sol.append(y2*(self.inverse_mod(pow(y1,self.private_a,self.p),self.p))%self.p)
return sol
# gammal=ElGammal()
# gammal.genKey()
# m='This is a long proof'
# print(gammal.preprocess_stringv3(m))
# print(gammal.block_convertv2(m,gammal.p))
# e=gammal.encrypt(m)
# print(e)
# d=gammal.decrypt(e)
# print(d)
# print(gammal.num_to_text(d))
| JuanDa14Sa/Cripto | Main/ElGammal.py | ElGammal.py | py | 3,154 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sympy.isprime",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line... |
26946887509 | import numpy as np
import rogues
import scipy.linalg as sl
def condex(n, k=4, theta=100):
"""
CONDEX `Counterexamples' to matrix condition number estimators.
CONDEX(N, K, THETA) is a `counterexample' matrix to a condition
estimator. It has order N and scalar parameter THETA (default 100).
If N is not equal to the `natural' size of the matrix then
the matrix is padded out with an identity matrix to order N.
The matrix, its natural size, and the estimator to which it applies
are specified by K (default K = 4) as follows:
K = 1: 4-by-4, LINPACK (RCOND)
K = 2: 3-by-3, LINPACK (RCOND)
K = 3: arbitrary, LINPACK (RCOND) (independent of THETA)
K = 4: N >= 4, SONEST (Higham 1988)
(Note that in practice the K = 4 matrix is not usually a
counterexample because of the rounding errors in forming it.)
References:
A.K. Cline and R.K. Rew, A set of counter-examples to three
condition number estimators, SIAM J. Sci. Stat. Comput.,
4 (1983), pp. 602-611.
N.J. Higham, FORTRAN codes for estimating the one-norm of a real or
complex matrix, with applications to condition estimation
(Algorithm 674), ACM Trans. Math. Soft., 14 (1988), pp. 381-396.
"""
if k == 1: # Cline and Rew (1983), Example B.
a = np.array([[1, -1, -2 * theta, 0],
[0, 1, theta, -theta],
[0, 1, 1 + theta, -(theta + 1)],
[0, 0, 0, theta]])
elif k == 2: # Cline and Rew (1983), Example C.
a = np.array([[1, 1 - 2 / theta ** 2, -2],
[0, 1 / theta, -1 / theta],
[0, 0, 1]])
elif k == 3: # Cline and Rew (1983), Example D.
a = rogues.triw(n, -1).T
a[-1, -1] = -1
elif k == 4: # Higham (1988), p. 390.
x = np.ones((n, 3)) # First col is e
x[1:n, 1] = np.zeros(n - 1) # Second col is e(1)
# Third col is special vector b in SONEST
x[:, 2] = ((-1) ** np.arange(n)) * (1 + np.arange(n) / (n - 1))
# Q*Q' is now the orthogonal projector onto span(e(1),e,b)).
q = sl.orth(x)
p = np.eye(n) - np.asmatrix(q) * np.asmatrix(q.T)
a = np.eye(n) + theta * p
# Pad out with identity as necessary.
m, m = a.shape
if m < n:
for i in range(n - 1, m, -1):
a[i, i] = 1
return a
| macd/rogues | rogues/matrices/condex.py | condex.py | py | 2,556 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "rogues.triw",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 5... |
18046654139 | import pandas as pd
from konlpy.tag import Komoran
import tensorflow as tf
import numpy as np
from keras.layers import Dense, Conv1D, GlobalMaxPooling1D, Embedding, Dropout
from keras.models import Sequential
from keras.layers import LSTM, Bidirectional
from keras.models import Sequential, load_model
from keras.metrics import metrics
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.model_selection import train_test_split
from keras import backend as K
import pickle
# def recall_m(y_true, y_pred):
# true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
# possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
# recall = true_positives / (possible_positives + K.epsilon())
# return recall
# def precision_m(y_true, y_pred):
# true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
# predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
# precision = true_positives / (predicted_positives + K.epsilon())
# return precision
# def f1_m(y_true, y_pred):
# precision = precision_m(y_true, y_pred)
# recall = recall_m(y_true, y_pred)
# return 2*((precision*recall)/(precision+recall+K.epsilon()))
data = pd.read_csv('data_spacing.csv')
X_data = data['text']
y_data = data['isAbuse']
y_data = pd.to_numeric(y_data)
tokened = []
stop_words = []
ft = open('stopword.txt', 'r')
lines = ft.readlines()
for i in lines:
i = i.rstrip()
i=i.split(",")
for j in i:
stop_words.append(j)
ft.close()
komoran = Komoran()
for i in X_data:
word_tokens = komoran.morphs(i)
word_tokens = [word for word in word_tokens if not word in stop_words]
tokened.append(word_tokens)
tokenizer = tf.keras.preprocessing.text.Tokenizer()
tokenizer.fit_on_texts(tokened)
threshold = 2
total_cnt = len(tokenizer.word_index) # 단어의 수
rare_cnt = 0 # 등장 빈도수가 threshold보다 작은 단어의 개수를 카운트
total_freq = 0 # 훈련 데이터의 전체 단어 빈도수 총 합
rare_freq = 0 # 등장 빈도수가 threshold보다 작은 단어의 등장 빈도수의 총 합
for key, value in tokenizer.word_counts.items():
total_freq = total_freq + value
# 단어의 등장 빈도수가 threshold보다 작으면
if(value < threshold):
rare_cnt = rare_cnt + 1
rare_freq = rare_freq + value
print('단어 집합(vocabulary)의 크기 :',total_cnt)
print('등장 빈도가 %s번 이하인 희귀 단어의 수: %s'%(threshold - 1, rare_cnt))
print("단어 집합에서 희귀 단어의 비율:", (rare_cnt / total_cnt)*100)
print("전체 등장 빈도에서 희귀 단어 등장 빈도 비율:", (rare_freq / total_freq)*100)
vocab_size = total_cnt - rare_cnt + 1
print('단어 집합의 크기 :',vocab_size)
X_data = tokenizer.texts_to_sequences(tokened)
drop_data = [index for index, sentence in enumerate(X_data) if len(sentence) < 1]
paddedX = tf.keras.preprocessing.sequence.pad_sequences(X_data, padding='post', maxlen=100)
X_data = np.array(paddedX)
X_data = np.delete(X_data, drop_data, axis=0)
y_data = np.array(y_data)
y_data = np.delete(y_data, drop_data, axis=0)
with open('tokenizer.pickle', 'wb') as handle:
pickle.dump(tokenizer, handle)
print(X_data[0].shape)
print(X_data[1].shape)
X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size=0.2, random_state=1, stratify=y_data, shuffle=True)
embedding_dim = 128 # 임베딩 벡터의 차원
dropout_ratio = 0.5 # 드롭아웃 비율
num_filters = 128 # 커널의 수
kernel_size = [15,10] # 커널의 크기
hidden_units = 128 # 뉴런의 수
model = Sequential()
model.add(Embedding(vocab_size, embedding_dim))
model.add(Dropout(dropout_ratio))
model.add(Conv1D(num_filters, kernel_size[0], padding='valid', activation='swish', strides = 1))
model.add(Conv1D(num_filters, kernel_size[1], padding='valid', activation='swish', strides = 1))
model.add(GlobalMaxPooling1D(keepdims=True))
model.add(Bidirectional(LSTM(hidden_units))) # Bidirectional LSTM을 사용
model.add(Dense(1, activation='sigmoid'))
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)
mc = ModelCheckpoint('best_model.h5', monitor='val_acc', mode='max', verbose=1, save_best_only=True)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
# model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc' ,precision_m, recall_m, f1_m])
history = model.fit(X_train, y_train, epochs=30, callbacks=[es, mc], batch_size=64, validation_split=0.2)
print("\n 테스트 정확도: %.4f" % (model.evaluate(X_test, y_test)[1]))
| jaehyun1209/Deeplearning_AbuseFind | PreProcessAndModel.py | PreProcessAndModel.py | py | 4,595 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pandas.to_numeric",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "konlpy.tag.Komoran",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras... |
3532508808 | import os
os.system('cls')
from math import sqrt
from functools import reduce
class chofer():
def __init__(self, nombreCompleto):
self.__nombreCompleto = nombreCompleto
def getNombreCompleto(self):
return self.__nombreCompleto
class camion():
def __init__(self, patente, litrosDisponibles, ciudadActual, kmLitro, velMaxima):
self.__patente= patente
self.__litrosDiposnibles= litrosDisponibles
self.__ciudadActual = ciudadActual
self.__kmLitro =kmLitro
self.__velMaxima = velMaxima
def getPatente(self):
return self.__patente
def getVelMaxima(self):
return self.__velMaxima
def getCiudadActual(self):
return self.__ciudadActual
def setCiudadActual(self, AC):
self.__ciudadActual = AC
CiudadActual = property (getCiudadActual, setCiudadActual)
def getLitrosDisponibles(self):
return self.__litrosDiposnibles
def setLitrosDisponibles(self, LD):
self.__litrosDiposnibles = LD
LitrosDisponibles = property (getLitrosDisponibles, setLitrosDisponibles)
def getKmLitros(self):
return self.__kmLitro
Juan = chofer("Juan Mari")
Martin = chofer("Martin Torres")
Agustin = chofer ("Agustin De Luca")
camion1=camion ("ARG123",60,"Lomas",3, 60)
camion2=camion ("BRZ456",60,"Lanus", 5, 80)
camion3=camion ("URG678",60,"Escalada", 4,60)
def decorador(funcion):
def nuevaFuncion(*args):
print("ARCHIVO ABIERTO")
funcion(*args)
print("ARCHIVO CERRADO")
return nuevaFuncion
def cargarRecorrido():
destino=[]
while True:
ciudades=["Lanus", "Lomas", "Escalada", "Banfield"]
print("seleccionar ciudades")
print(f"1= {ciudades[0]}")
print(f"2= {ciudades[1]}")
print(f"3= {ciudades[2]}")
print(f"4= {ciudades[3]}")
print("0= terminar carga ")
x=int(input())
if(x!=0):
x=x-1
destino.append(ciudades[x])
print(destino)
else:
break
return destino
def sumar(x,y):
return x+y
@decorador
def guardar(estimacion):
archivo = "ArchivoParcial2.txt"
while True:
try:
with open (archivo, "a") as a:
a.write(f"\n{estimacion}")
print("VIAJE GUARDADO")
break
except:
print("Error al intentar abrir")
print (f"No se encuentra el archivo {archivo}, especifique su nombre correctamente:")
archivo = (input("Nombre de archivo:"))
@decorador
def leer():
archivo = "ArchivoParcial2.txt"
while True:
try:
with open (archivo, "r") as a:
contenido = a.read()
print(contenido)
break
except:
print("Error al intentar abrir")
print (f"No se encuentra el archivo {archivo}, especifique su nombre correctamente:")
archivo = (input("Nombre de archivo:"))
def estimarViaje(camion, recorrido, chofer):
estimado=["","","","","",""]
estimado[0]=camion.getPatente()
estimado[5]=chofer.getNombreCompleto()
d = {
"Lanus": (40,30),
"Lomas": (20,10),
"Banfield": (12,30),
"Escalada": (10, 34) }
km=[]
for x in recorrido:
x1=d[camion.CiudadActual][0]
x2=d[x][0]
y1=d[camion.CiudadActual][1]
y2=d[x][1]
resultado= sqrt((x2-x1)**2 + (y2-y1)**2)
km.append(resultado)
camion.CiudadActual=x
kmTotal= reduce(sumar, km)
print(f"total de kilometros: {kmTotal}")
estimado[1]= f"{kmTotal} kmTotal"
tiempoEstimado= int((kmTotal/camion.getVelMaxima()))
print(f"tiempo estimado: {tiempoEstimado} hora/s")
paradas=len(recorrido)
tiempoTotal=tiempoEstimado+paradas
print(f"tiempo estimado mas paradas: {tiempoTotal} hora/s")
estimado[2]=f"{tiempoTotal} hora/s"
consumo=int((kmTotal*camion.getKmLitros())/1)
print("consumo: "+str(consumo))
if(consumo >= camion.LitrosDisponibles):
print("se requieren mas litros")
camion.LitrosDisponibles+=1000
print("se cargaron 1000 litros al camion")
camion.LitrosDisponibles= camion.LitrosDisponibles-consumo
estimado[3]="Si Se cargo el tanque"
else:
print("no se necesito cargar el tanque")
camion.LitrosDisponibles= camion.LitrosDisponibles-consumo
estimado[3]="NO se cargo el tanque"
if(kmTotal>=1000):
estimado[4]="Si supero los 1000km"
else:
estimado[4]="NO supero los 1000km"
guardar(estimado)
while True:
print("seleccione camion")
print(f"1= {camion1.getPatente()}")
print(f"2= {camion2.getPatente()}")
print(f"3= {camion3.getPatente()}")
print("4= leer resumen")
print("0= EXIT")
x = int(input())
if(x==1):
print(f"seleccionaste a {camion1.getPatente()}")
recorrido = cargarRecorrido()
print (recorrido)
estimarViaje(camion1, recorrido, Juan)
elif(x==2):
print(f"seleccionaste a {camion2.getPatente()}")
recorrido = cargarRecorrido()
estimarViaje(camion2,recorrido, Martin)
elif(x==3):
print(f"seleccionaste a {camion3.getPatente()}")
recorrido = cargarRecorrido()
estimarViaje(camion3, recorrido, Agustin)
elif(x==4):
leer()
elif(x==0):
break
else:
print("valor incorrecto")
| IgnacioVelliz/Programa-Python | Parcial2.py | Parcial2.py | py | 5,595 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "os.system",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 137,
"usage_type": "call"
}
] |
38013843478 | #
# Author: Denis Tananaev
# File: makeTFrecords.py
# Date:9.02.2017
# Description: tool for the tfrecords convertion of the SUN3D dataset
#
import numpy as np
import skimage.io as io
import scipy.misc
import tensorflow as tf
def centered_crop(image,new_w,new_h):
'''Make centered crop of the image'''
height = image.shape[0]
width = image.shape[1]
left = (width - new_w)/2
top = (height - new_h)/2
right = (width + new_w)/2
bottom = (height + new_h)/2
return image[top:bottom,left:right]
def resizer_image(image):
'''Resize images by using bilinear interpolation'''
croped_image=centered_crop(image,550,450)
result=scipy.misc.imresize(croped_image, (192,256), interp='bilinear', mode=None)
return result
def resizer_depth(depth):
'''Resize depth by using nearest neighbour method '''
croped_image=centered_crop(depth,550,450)
result=scipy.misc.imresize(croped_image, (192,256), interp='nearest', mode=None)
return result
def read_file(textfile):
'''Read txt file and output array of strings line by line '''
with open(textfile) as f:
result = f.read().splitlines()
return result
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def make_tfrecords(tfrecords_filename,filename_pairs):
'''Convert pairs of (image, depth) tuple to the tfrecords format'''
writer = tf.python_io.TFRecordWriter(tfrecords_filename)
for img_path, depth_path in filename_pairs:
img = np.array(io.imread(img_path))
depth = np.array(io.imread(depth_path))
# The reason to store image sizes was demonstrated
# in the previous example -- we have to know sizes
# of images to later read raw serialized string,
# convert to 1d array and convert to respective
# shape that image used to have.
img=resizer_image(img)
depth=resizer_depth(depth)
img_raw = img.tostring()
depth_raw = depth.tostring()
height = img.shape[0]
width = img.shape[1]
example = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(height),
'width': _int64_feature(width),
'image_raw': _bytes_feature(img_raw),
'depth_raw': _bytes_feature(depth_raw)}))
writer.write(example.SerializeToString())
writer.close()
def createPairs(train_im,train_d):
'''Create array of tuples (image,depth) '''
#read the list of pathes to jpg data from txt
input_list=read_file(train_im)
#read the list of pathes to png data from txt
output_list=read_file(train_d)
result=[]
for i in range(0,len(input_list)):
temp=(input_list[i],output_list[i])
result.append(temp)
return result
| Dtananaev/DepthNet | tfCNN/data_processing/tools/makeTFrecords.py | makeTFrecords.py | py | 2,914 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "scipy.misc.misc.imresize",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "scipy.misc.misc",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "scipy.misc",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "scipy.misc.m... |
38115229575 | from app import app
from flask import jsonify, request
from models import Host, HostSchema
from scheduling import th
@app.route('/api/1.0/test')
def index():
return 'ok'
@app.route('/api/1.0/host', methods=['POST'])
def create_host():
if request.methods == 'POST':
ip = (request.json['ip'])
try:
host = Host.query.filter(Host.ip == ip).first()
if host is not None:
return jsonify({'success': False, "error": "this host already exists with id " + host.id})
host = Host(ip=ip)
db.session.add(host)
db.session.commit()
return jsonify({'success': True, "error": null})
except:
error ='ip upload error'
return jsonify({'success': False, "error": error})
@app.route('/api/1.0/host/<hostid>', methods=['GET', 'DELETE'])
def return_host(hostid):
if request.method == 'GET':
try:
id = int(hostid)
host = Host.query.filter(Host.id == id).first()
host_schema = HostSchema()
host_data = host_schema.dump(host)
return jsonify({'success': True, "error": 'null', 'data': host_data})
except:
error ='finding host error'
return jsonify({'success': False, "error": error})
elif request.method == 'DELETE':
try:
db.session.query(Host).filter(Host.id == hosid).delete()
db.session.commit()
return jsonify({'success': True, "error": 'null', 'data': host_data})
except:
return jsonify({'success': False, "error": 'Delete host error'})
@app.route('/api/1.0/hosts', methods=['GET'])
def return_hosts_list():
try:
hosts = Host.query.all()
host_schema = HostSchema(many=True)
hosts_list = host_schema.dump(hosts)
print(hosts_list)
return jsonify({'success': True, "error": 'null', 'data': hosts_list})
except:
return jsonify({'success': False, "error": 'Get ip list error'})
| evgeneh/pinger_back_py | view.py | view.py | py | 2,078 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "app.app.route",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "app.app",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "flask.request.methods",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"l... |
27969617509 | # type: ignore
from flask import render_template, redirect, url_for, session
from main import app, db
from admin import admin
from products.models import Product
import os
app.register_blueprint(admin)
@app.route('/')
def index():
products=Product.query.all()
return render_template('index.html', products=products)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/files')
def make_tree():
path="/"
tree = dict(name=os.path.basename(path), children=[])
print(tree)
try: lst = os.listdir(path)
except Exception as e:
print(e)
pass #ignore errors
else:
for name in lst:
fn = os.path.join(path, name)
if os.path.isdir(fn):
tree['children'].append(make_tree(fn))
else:
tree['children'].append(dict(name=name))
return tree
| muchirajunior/flask-ecommerce | routes.py | routes.py | py | 904 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "main.app.register_blueprint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "admin.admin",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "main.app",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "products.models",
... |
17156680991 | import torch
import numpy as np
from transformers import AutoTokenizer
from tqdm import tqdm
def get_lm_embeddings(mapper_model, test_df, trained_model_name, use_first_token_only = False):
mapper_model.set_parameters()
# Load pre-trained model tokenizer (vocabulary)
tokenizer = AutoTokenizer.from_pretrained(mapper_model.model_name, use_fast=True)
# Load the language model
model = mapper_model.get_model()
model = model.to(mapper_model.device)
model.eval()
model.zero_grad()
# Tokenize and convert to input IDs
tokens_tensor = tokenizer.batch_encode_plus(list(test_df.text.values),
max_length = mapper_model.max_length,
pad_to_max_length=True,
truncation=True,
return_tensors="pt")
tokens_tensor = tokens_tensor["input_ids"]
# Create list for all embeddings to be saved to
embeddings = []
# Batch tensor so we can iterate over inputs
test_loader = torch.utils.data.DataLoader(tokens_tensor, batch_size=mapper_model.eval_batch_size, shuffle=False)
# Make sure the torch algorithm runs without gradients (as we aren't training)
with torch.no_grad():
print(f"Iterating over inputs {trained_model_name}")
# Iterate over all batches, passing the batches through the test set
for test_batch in tqdm(test_loader):
# Get the model output from the test set
outputs = model(test_batch.to(mapper_model.device))
if use_first_token_only:
# Output only the model output from the first token position (I.e. the position that BERT NSP is trained on)
np_array = outputs[0][:,0,:].cpu().numpy()
else:
# Output the final average encoding across all characters as a numpy array
np_array = outputs[0].mean(dim=1).cpu().numpy()
# Append this encoding to a list
embeddings.append(np_array)
all_embeddings = np.concatenate(embeddings, axis=0)
return all_embeddings
| Peter-Devine/Feedback-Mapping | utils/bert_utils.py | bert_utils.py | py | 2,182 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "transformers.AutoTokenizer.from_pretrained",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "transformers.AutoTokenizer",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 30,
"usage_type": "cal... |
37897597748 | """
practice advance read-write options & strategies with pandas
"""
import pandas as pd
import matplotlib.pyplot as plt
def start():
"""set options for pandas"""
options = {
'display': {
'max_columns': None,
'max_colwidth': 25,
'expand_frame_repr': False, # Don't wrap to multiple pages
'max_rows': 14,
'max_seq_items': 50, # Max length of printed sequence
'precision': 4,
'show_dimensions': False
},
'mode': {
'chained_assignment': None # Controls SettingWithCopyWarning
}
}
for category, option in options.items():
for op, value in option.items():
pd.set_option(f'{category}.{op}', value) # Python 3.6+
start()
# read data - trades futures
data_dir = '/home/cn/data/sample_tick/'
trades_f = data_dir + 'ES_Sample/ES_Trades.csv'
tdf = pd.read_csv(trades_f)
# checkout the trade dataset
tdf.head()
tdf.count()
quotes_f = data_dir + 'ES_Sample/ES_Quotes.csv'
qdf = pd.read_csv(quotes_f)
qdf.head()
qdf.count()
# get top of file for limited number of rows
q_head = pd.read_csv(quotes_f, nrows=100)
# read using memory map - only use for small files on a machine with massive RAM:
q_m = pd.read_csv(quotes_f, memory_map=True) ## map the whole file into memory and read from there to be faster
q_m.head()
q_m = None
# read using chunk size as iterator
q_reader = pd.read_csv(quotes_f, chunksize=100000)
# drop columns i don't need
tdf.drop(columns=['Sales Condition', 'Exclude Record Flag'], inplace=True)
# how many days?
tdf.groupby('Date').count()
# if I want to split into days, using the chunking methods, what do I need to do?
# define a hash function taking a group as input and give out a hash as a group name
def sub_group_hash(x):
print(x)
return str(x)
tdf.columns
tdf.loc[0]['Date']
import datetime as dt
import os
tmp_hdf5 = "/tmp/groupby.h5"
os.remove(tmp_hdf5)
q_reader = pd.read_csv(quotes_f, chunksize=100000) # make a reader
# create the store and append, using data_columns where I possibily could aggregate
with pd.HDFStore(tmp_hdf5) as store:
# loop through the chunk here
for chunk in q_reader:
# creat4e a grouper for each chunk using the date
chunk_grp = chunk.groupby('Date')
# append each of the subgroubs to a separate group in the resulting hdf file
# this will be a loop around the sub_groups
for gr_name, grouped_df in chunk_grp:
gr_name = dt.datetime.strptime(gr_name, '%m/%d/%Y').strftime('%Y%m%d')
print(gr_name)
store.append('date_%s' % gr_name, grouped_df,
data_columns=['Symbol', 'Time', 'Price', 'Volume', 'Market Flag'])
# now we have an hdf file with subgroup by date
with pd.HDFStore(tmp_hdf5) as store:
# all of the groups are now the keys of the store
for gr_name in store.keys():
print(gr_name)
# this is a complete group that will fit in memory
# grouped = store.select(gr_name)
# perform the operation on grouped and write the new output
# grouped.groupby(......).apply(your_cool_function)
# print("store:\n%s" % store)
# print("\ndf:\n%s" % store['df'])
#
# # get the groups
# groups = store.select_column('df','A').unique()
# print("\ngroups:%s" % groups)
# # iterate over the groups and apply my operations
# l = []
# for g in groups:
# grp = store.select('df',where = [ 'A=%s' % g ])
# # this is a regular frame, aggregate however you would like
# l.append(grp[['D','E','F']].sum())
#
# print("\nresult:\n%s" % pd.concat(l, keys = groups))
# plotting - could be quite slow - moving here
tdf[['Price']].plot(kind='line')
plt.show()
tdf[['Volume']].plot(kind='bar')
plt.show()
tdf.head()
plt.xticks(rotation=45)
fig, ax = plt.subplots()
fig.autofmt_xdate()
tdf[0::20].groupby('Date').plot(x='Time', y='Price', ax=ax, legend=False)
plt.show() | nguyentu1602/pyexp | pyexp/pandas_practice.py | pandas_practice.py | py | 4,025 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.set_option",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
... |
4123119834 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# In[2]:
dataset = pd.read_csv(r'E:\Udemy corurse\[DesireCourse.Net] Udemy - Machine Learning A-Z™ Hands-On Python & R In Data Science\1.Machine Learning A-Z New\Part 2 - Regression\Section 4 - Simple Linear Regression\Salary_Data.csv')
# In[3]:
dataset.head()
# In[4]:
data = dataset.values
X = data[:, :-1]
y = data[:, 1]
# In[5]:
from sklearn.model_selection import train_test_split
# In[6]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# In[7]:
from sklearn.linear_model import LinearRegression
# In[8]:
lm =LinearRegression()
# In[9]:
lm.fit(X_train,y_train)
# In[10]:
y_prediction = lm.predict(X_test)
# In[11]:
plt.scatter(X_train ,y_train, color= 'red')
plt.plot(X_train, lm.predict(X_train),color='blue')
# In[12]:
plt.scatter(X_test ,y_test, color= 'red')
plt.plot(X_train, lm.predict(X_train),color='blue')
# In[ ]:
| Rizwan-khan-7/Machine-Learning | Regression_Analysis_Part I.py | Regression_Analysis_Part I.py | py | 1,045 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 53,
"usage_type": "call"
... |
20338749336 | import cv2
import numpy as np
# VARIABLES
# True while mouse button down, False while mouse button up
drawing = False
ix,iy = -1,-1
# FUNCTION
def draw_rectangle(event, x,y, flags, param):
global ix,iy,drawing
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix,iy = x,y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
cv2.circle(img, (ix,iy), int(np.sqrt(abs(x-ix)**2+abs(y-iy)**2)), (0,0,255), -1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
cv2.namedWindow(winname="my_drawing")
cv2.setMouseCallback("my_drawing", draw_rectangle)
# SHOWING IMAGE WITH OPENCV
img = cv2.imread("jupiter.jpg")
#img = np.zeros((512,1024,3))
while True:
cv2.imshow("my_drawing",img)
if cv2.waitKey(1) & 0xFF == 27:
break
cv2.destroyAllWindows() | eliottjohnson/VS_code | draw_circle_with_mouse.py | draw_circle_with_mouse.py | py | 798 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.EVENT_LBUTTONDOWN",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cv2.EVENT_MOUSEMOVE",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "cv2.circle",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.... |
24614073845 | import requests # for api calls
import time # for converting epoch to timedate format
import pprint as pp # for debugging
def convert_epoch_to_datetime(epoch):
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(epoch))
def get_news(ticker, startDate, endDate):
"""
Purpose: Get a list of news urls pertaining to a stock
:param ticker: Ticker symbol of a stock
:param startDate: When to start looking for news articles. Format is: 'YYYY-MM-DD'
:param endDate: When to stop looking for news articles. Format is: 'YYYY-MM-DD'
:return:
string[] headline
string[] url
string[] datetime # format: 'YYYY-MM-DD HH:MM:SS'
"""
datetime = []
headline = []
url = []
data = requests.get('https://finnhub.io/api/v1/company-news?symbol={}&from={}&to={}&token=br3gbbnrh5rai6tghkig'.format(ticker, startDate, endDate)).json()
for article in range(len(data)):
headline.append(data[article]['headline'])
url.append(data[article]['url'])
datetime.append(convert_epoch_to_datetime(data[article]['datetime']))
return headline, url, datetime
| ajsalagundi/Stock_Intelligence | data_application/web_scrapers/news_articles_retriever.py | news_articles_retriever.py | py | 1,137 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.strftime",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
}
] |
13170391452 | # this file exists to help tune the parameters of the stressor functions that put learners in different states
# it doesn't make sense to put the same load on a Jetson Nano as on a raspberry pi. What would moderately inconvenience the Nano would completely cripple the pi
# for this reason, we need to tune stressor parameters with reference to the learner's benchmarking scores
import axon
import asyncio
import sys
sys.path.append('..')
from states import state_dicts
from tasks import tasks
target_ip = '192.168.2.210'
async def main():
wrkr_handle = axon.client.RemoteWorker(target_ip)
benchmark_scores = await wrkr_handle.rpcs.get_benchmark_scores()
# the benchmark scores are unique to a (task, state) pair, the task is irrelavant for our purposes here but we need to pick one
state_names = list(state_dicts.keys())
task_names = list(tasks.keys())
# we only need to see the scores for one task
task_name = task_names[0]
print(' | training rate bps | data time spb | param_time spb')
# iterating over states
for state_name in state_names:
ts_key = (task_name, state_name)
print(state_name, ':', benchmark_scores[ts_key])
if (__name__ == '__main__'):
asyncio.run(main()) | DuncanMays/multi_orchestrator_pl | tests/tune_stressors.py | tune_stressors.py | py | 1,205 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "axon.client.RemoteWorker",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "axon.client",
... |
42578862241 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import importlib
importlib.reload(sys)
import time
import requests
import csv
url = "https://rdap.registro.br/domain/"
domains = ['stz.com.br',
'viasullogistica.com',
'eletronor.com',
'intelbras.com.br',
'bmlog.com.br',
'blueticket.com.br',
'taai.com.br',
'dgranel.com.br',
'baspan.com.br',
'karsten.com.br',
'yes.ind.br',
'latitudelog.com.br',
'intelbras.com.br',
'gmail.com',
'tketransporte.com.br',
'asserttecnologia.com.br',
'eletronor.com',
'intelbras.com.br',
'refnicnil.com.br',
'transpocrgo.com.br',
'gmail.com',
'positivo.com.br',
'intelbras.com.br',
'tsilvio.com.br',
'unimartra.com.br',
]
def get_url(domain):
session = requests.Session()
session.get(url)
r = requests.get(url+domain)
if r.status_code == 200:
return r.json()
else:
return None
def get_document(json):
if "entities" in json.keys():
entities = json["entities"]
for entity in entities:
if "publicIds" in entity:
public_ids = entity["publicIds"]
for ids in public_ids:
if ids["type"] == "cnpj":
return ids["identifier"]
def append_to_csv(domain, document, json):
file = open('leads_evento_agile_CNPJ_leadspedro.csv', 'a')
csv_row = [domain, document, json]
string = ''
for i in csv_row:
print(i)
string += str(i) + ';'
new_string = string[:-1]
new_string += '\n'
file.write(new_string)
file.close()
for domain in domains:
json = get_url(domain)
if json is not None:
append_to_csv(domain, get_document(json), json)
time.sleep(10)
| fiilipeneto/pandorasbox | Busca CNPJ.py | Busca CNPJ.py | py | 1,758 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "importlib.reload",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_... |
15184343497 | """Advent of Code - Day 1."""
from pathlib import Path
from typing import List
FILE_PATH = Path(__file__).parent.parent / "data" / "day_1.txt"
def find_elves_most_calories(calories: List, n: int) -> int:
"""Fat elf detection.
Find the elf carrying the most calories.
Args:
calories: Calorie list for puzzle.
n: Threshold for top n elves by calories.
Returns:
int: Total calories for top n elves.
"""
line_breaks = [int(i) for i, line in enumerate(calories) if line == ""]
# inner groups
elves = [
calories[(line_breaks[i] + 1) : line_breaks[i + 1]]
for i, _ in enumerate(line_breaks[:-1])
]
# outer groups
elves.insert(0, calories[: line_breaks[0]])
elves.append(calories[(line_breaks[-1] + 1) :])
# Sum per elf (tuple with elf number first, sum of calories second)
cals = [(num + 1, sum([int(cal) for cal in elf])) for num, elf in enumerate(elves)]
# Sort by calories
cals.sort(key=lambda x: x[-1], reverse=True)
# Sum top n calories
return sum([cal for _, cal in cals[:n]])
if __name__ == "__main__":
with open(FILE_PATH, "r") as file:
calories = [line.strip() for line in file.readlines()]
print(f"Part 1: {find_elves_most_calories(calories, n=1)}.")
print(f"Part 2: {find_elves_most_calories(calories, n=3)}.")
| albutz/aoc-2022 | src/day_1.py | day_1.py | py | 1,360 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
}
] |
42558176076 | #!/usr/bin/env python3
## Estimates errors using Monte Carlo sampling
# Hamish Silverwood, GRAPPA, UvA, 23 February 2015
import numpy as np
import gl_helper as gh
import pdb
import pickle
import sys
import numpy.random as rand
import matplotlib.pyplot as plt
#TEST this will eventually go outside
def ErSamp_gauss_linear_w_z():
fraction_err = 0.05
datafile = '/home/hsilverw/LoDaM/darcoda/Data_Sets/simplenu/simplenu_sigz_raw_sdz_p05_sdvz_5.dat'
data = np.loadtxt(datafile)
z_data = data[:, 0]
z_sampled = []
for z_val in z_data:
z_sampled.append(rand.normal(loc = z_val, scale= z_val*fraction_err))
return z_sampled
z_data_flat_distro = rand.random(2000000)
def ErSamp_flat_distro_test():
fraction_err = 0.001
z_data = z_data_flat_distro
z_sampled = []
for z_val in z_data:
z_sampled.append(abs(rand.normal(loc = z_val, scale = fraction_err)))
return z_sampled
def mc_nu_error(sampled_z_func, number_mcs, binmin, binmax, bincenter):
# sampled_z_func - returns a vector of z points
nu_vectors=[]
for jter in range(0, number_mcs):
jter_z_data = sampled_z_func()
jter_nu, dummy, dummy, dummy, dummy = gh.nu_sig_from_bins(binmin, binmax, jter_z_data, np.ones(len(jter_z_data)))
nu_vectors.append(jter_nu)
#Calculate standard deviations of nu
nu_vectors = np.array(nu_vectors)
nu_stdevs = []
nu_means = []
nu_medians = []
for pter in range(0, len(binmin)):
nu_stdevs.append(np.std(nu_vectors[:, pter]))
nu_means.append(np.mean(nu_vectors[:, pter]))
nu_medians.append(np.median(nu_vectors[:, pter]))
#pdb.set_trace()
#fig = plt.figure()
#ax = fig.add_subplot(111)
#no, bins, patches = ax.hist(nu_vectors[:,0], 100)
return np.array(nu_stdevs)
if __name__=="__main__":
binmin, binmax, bincenter = gh.bin_r_linear(0.2, 0.8, 12)
nu_stdevs = mc_nu_error(ErSamp_flat_distro_test, 100, binmin, binmax, bincenter)
pdb.set_trace()
| PascalSteger/gravimage | programs/gi_mc_errors.py | gi_mc_errors.py | py | 2,025 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.loadtxt",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "numpy.random.random",
... |
9246343257 | from typing import Any
from fastapi import APIRouter, HTTPException, status
from tortoise.exceptions import DoesNotExist
from project.app.src.suppliers.service import create
from project.app.src.suppliers.service import delete
from project.app.src.suppliers.service import get_all
from project.app.src.suppliers.service import get_by_id
from project.app.src.suppliers.service import update
from project.app.src.suppliers.schemas import SupplierIn
from project.app.src.suppliers.schemas import SupplierOut
from project.app.src.common.async_context_manager import AsyncContextManager
router = APIRouter(
prefix="/suppliers",
tags=["Suppliers"],
)
@router.get("/", status_code=status.HTTP_200_OK, response_model=list[SupplierOut])
async def get_suppliers(
is_active: bool = True,
is_archived: bool = False,
) -> Any:
return await get_all(is_active=is_active, is_archived=is_archived)
@router.get("/{supplier_id}", status_code=status.HTTP_200_OK, response_model=SupplierOut)
async def get_supplier_by_id(supplier_id: str) -> Any:
try:
return await get_by_id(supplier_id)
except DoesNotExist:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Supplier not found"
)
@router.post("/", status_code=status.HTTP_201_CREATED, response_model=SupplierOut)
async def create_new_supplier(supplier: SupplierIn) -> Any:
if len(supplier.inn) not in (10, 12):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="INN should have 10 or 12 numbers"
)
new_supplier = await create(supplier)
if not new_supplier:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Supplier cannot be created"
)
return new_supplier
@router.patch("/{supplier_id}", status_code=status.HTTP_200_OK, response_model=SupplierOut)
async def update_supplier_by_id(supplier_id: str, payload: SupplierIn) -> Any:
try:
supplier = await get_by_id(supplier_id)
except DoesNotExist:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Supplier: {supplier_id} not found"
)
if not supplier.can_be_edited:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"Cannot update supplier: {supplier_id}"
)
async with AsyncContextManager():
updated_supplier = await update(supplier_id, payload)
return updated_supplier
@router.delete("/{supplier_id}", status_code=status.HTTP_200_OK, response_model=SupplierOut)
async def delete_supplier_by_id(supplier_id: str) -> Any:
try:
supplier = await get_by_id(supplier_id)
except DoesNotExist:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Supplier {supplier_id} not found"
)
if not (
supplier.can_be_edited
):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"Cannot archive supplier {supplier_id}"
)
if supplier.is_archived:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"Supplier {supplier_id} is already archived"
)
async with AsyncContextManager():
updated_supplier = await delete(supplier_id)
return updated_supplier | ademchenkov/wm | project/app/src/suppliers/router.py | router.py | py | 3,086 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "project.app.src.suppliers.service.get_all",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "fastapi.status.HTTP_200_OK",
"line_number": 21,
"usage_type": "attribute"
}... |
37849511311 | #!/usr/bin/env python
#
# lib.py
#
# Helper code for CLI for interacting with switches via console device
#
try:
import click
import re
import swsssdk
import subprocess
import sys
except ImportError as e:
raise ImportError("%s - required module not found" % str(e))
DEVICE_PREFIX = "/dev/ttyUSB"
ERR_CMD = 1
ERR_DEV = 2
CONSOLE_PORT_TABLE = "CONSOLE_PORT"
BAUD_KEY = "baud_rate"
DEVICE_KEY = "remote_device"
FLOW_KEY = "flow_control"
DEFAULT_BAUD = "9600"
# QUIET == True => picocom will not output any messages, and pexpect will wait for console
# switch login or command line to let user interact with shell
# Downside: if console switch output ever does not match DEV_READY_MSG, program will think connection failed
# QUIET == False => picocom will output messages - welcome message is caught by pexpect, so successful
# connection will always lead to user interacting with shell
# Downside: at end of session, picocom will print exit message, exposing picocom to user
QUIET = False
DEV_READY_MSG = r"([Ll]ogin:|[Pp]assword:|[$>#])" # login prompt or command line prompt
TIMEOUT_SEC = 0.2
# runs command, exit if stderr is written to, returns stdout otherwise
# input: cmd (str), output: output of cmd (str)
def run_command(cmd):
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output = proc.stdout.read()
error = proc.stderr.read()
if error != "":
click.echo("Command resulted in error: {}".format(error))
sys.exit(ERR_CMD)
return output
# returns a sorted list of all devices (whose name matches DEVICE_PREFIX)
def getAllDevices():
cmd = "ls " + DEVICE_PREFIX + "*"
output = run_command(cmd)
devices = output.split('\n')
devices = list(filter(lambda dev: re.match(DEVICE_PREFIX + r"\d+", dev) != None, devices))
devices.sort(key=lambda dev: int(dev[len(DEVICE_PREFIX):]))
return devices
# exits if inputted line number does not correspond to a device
# input: linenum
def checkDevice(linenum):
devices = getAllDevices()
if DEVICE_PREFIX + str(linenum) not in devices:
click.echo("Line number {} does not exist".format(linenum))
sys.exit(ERR_DEV)
# returns a dictionary of busy devices and their info
# maps line number to (pid, process start time)
def getBusyDevices():
cmd = 'ps -eo pid,lstart,cmd | grep -E "(mini|pico)com"'
output = run_command(cmd)
processes = output.split('\n')
# matches any number of spaces then any number of digits
regexPid = r" *(\d+)"
# matches anything of form: Xxx Xxx ( 0)or(00) 00:00:00 0000
regexDate = r"([A-Z][a-z]{2} [A-Z][a-z]{2} [\d ]\d \d{2}:\d{2}:\d{2} \d{4})"
# matches any non-whitespace characters ending in minicom or picocom,
# then a space and any chars followed by /dev/ttyUSB<any digits>,
# then a space and any chars
regexCmd = r"\S*(?:(?:mini)|(?:pico))com .*" + DEVICE_PREFIX + r"(\d+)(?: .*)?"
regexProcess = re.compile(r"^"+regexPid+r" "+regexDate+r" "+regexCmd+r"$")
busyDevices = {}
for process in processes:
match = regexProcess.match(process)
if match != None:
pid = match.group(1)
date = match.group(2)
linenum_key = match.group(3)
busyDevices[linenum_key] = (pid, date)
return busyDevices
# returns actual baud rate, configured baud rate,
# and flow control settings of device corresponding to line number
# input: linenum (str), output: (actual baud (str), configured baud (str), flow control (bool))
def getConnectionInfo(linenum):
config_db = ConfigDBConnector()
config_db.connect()
entry = config_db.get_entry(CONSOLE_PORT_TABLE, str(linenum))
conf_baud = "-" if BAUD_KEY not in entry else entry[BAUD_KEY]
act_baud = DEFAULT_BAUD if conf_baud == "-" else conf_baud
flow_control = False
if FLOW_KEY in entry and entry[FLOW_KEY] == "1":
flow_control = True
return (act_baud, conf_baud, flow_control)
# returns the line number corresponding to target, or exits if line number cannot be found
# if deviceBool, interprets target as device name
# otherwise interprets target as line number
# input: target (str), deviceBool (bool), output: linenum (str)
def getLineNumber(target, deviceBool):
if not deviceBool:
return target
config_db = ConfigDBConnector()
config_db.connect()
devices = getAllDevices()
linenums = list(map(lambda dev: dev[len(DEVICE_PREFIX):], devices))
for linenum in linenums:
entry = config_db.get_entry(CONSOLE_PORT_TABLE, linenum)
if DEVICE_KEY in entry and entry[DEVICE_KEY] == target:
return linenum
click.echo("Device {} does not exist".format(target))
sys.exit(ERR_DEV)
return ""
| liang2biao/official_sonicbuild_201911_all | src/sonic-utilities/consutil/lib.py | lib.py | py | 4,829 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "subprocess.Popen",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "click.echo",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line... |
694104358 | from .models import Event, Slot, SignUp
from groups.methods import get_user_group_membership, group_to_json
from authentication.methods import user_to_json
from common.parsers import parse_datetime_to_epoch_time
# Constants
from common.constants import (
EVENT_ID,
TITLE,
DESCRIPTION,
START_DATE_TIME,
END_DATE_TIME,
LOCATION,
IS_PUBLIC,
GROUP,
IMAGE_URL,
)
from common.constants import TAG, TAG_NAME, TAG_ID
from common.constants import (
SLOT,
SLOT_ID,
SIGNUP_ID,
CONFIRMED_SIGNUP_COUNT,
PENDING_SIGNUP_COUNT,
AVAILABLE_SLOT_COUNT,
SIGNUP_DATE,
IS_CONFIRMED,
IS_ELIGIBLE,
IS_SIGNED_UP,
GENERAL_GROUP_TAG_NAME,
SIGNUPS,
CONFIRMED_SIGNUPS,
PENDING_SIGNUPS,
USER,
HAS_ATTENDED
)
def get_events(*args, **kwargs):
return Event.objects.filter(*args, **kwargs)
def get_slots(*args, **kwargs):
return Slot.objects.filter(*args, **kwargs)
def get_signups(*args, **kwargs):
return SignUp.objects.filter(*args, **kwargs)
def event_to_json(event, include_group=True):
data = {
EVENT_ID: event.id,
TITLE: event.title,
DESCRIPTION: event.description,
START_DATE_TIME: parse_datetime_to_epoch_time(event.start_date_time),
END_DATE_TIME: parse_datetime_to_epoch_time(event.end_date_time),
LOCATION: event.location,
IS_PUBLIC: event.is_public,
}
if event.image_url:
data[IMAGE_URL] = f"https://api.slotify.club{event.image_url.url}"
if include_group:
data[GROUP] = group_to_json(event.group)
return data
def signup_to_json(signup, include_slot=True, include_user=True):
data = {
SIGNUP_ID: signup.id,
SIGNUP_DATE: parse_datetime_to_epoch_time(signup.created_at),
IS_CONFIRMED: signup.is_confirmed,
HAS_ATTENDED: signup.has_attended,
}
if include_slot:
data[SLOT] = slot_to_json(signup.slot, include_availability=False)
if include_user:
data[USER] = user_to_json(signup.user)
return data
def get_slot_availability_data(slot):
confirmed_signups = len(get_signups(slot=slot, is_confirmed=True))
pending_signups = len(get_signups(slot=slot, is_confirmed=False))
available_slots = slot.limit - confirmed_signups
return confirmed_signups, pending_signups, available_slots
def get_existing_signup_for_slot(slot, user):
try:
return get_signups(slot=slot, user=user).get()
except SignUp.DoesNotExist:
return None
def get_existing_signup_for_any_event_slot(event, user):
try:
return get_signups(slot__event=event, user=user).get()
except SignUp.DoesNotExist:
return None
def slot_to_json(slot, include_availability=True, user=None, include_signups=False):
data = {TAG: {TAG_NAME: slot.tag.name, TAG_ID: slot.tag.id}, SLOT_ID: slot.id}
if include_availability:
confirmed, pending, available = get_slot_availability_data(slot)
data[CONFIRMED_SIGNUP_COUNT] = confirmed
data[PENDING_SIGNUP_COUNT] = pending
data[AVAILABLE_SLOT_COUNT] = available
if include_signups:
confirmed_signups = get_signups(slot=slot, is_confirmed=True)
pending_signups = get_signups(slot=slot, is_confirmed=False)
signup_data = {
CONFIRMED_SIGNUPS: [
signup_to_json(signup, include_slot=False)
for signup in confirmed_signups
],
PENDING_SIGNUPS: [
signup_to_json(signup, include_slot=False) for signup in pending_signups
],
}
data[SIGNUPS] = signup_data
# If user is specified, return user-specific data for the slot
if not user:
return data
existing_signup = get_existing_signup_for_slot(slot, user)
data[IS_SIGNED_UP] = existing_signup is not None
data[IS_CONFIRMED] = existing_signup is not None and existing_signup.is_confirmed
is_eligible = True
if slot.tag.is_exclusive_to_groups:
group = slot.event.group
membership = get_user_group_membership(user=user, group=group)
if membership is None or not membership.is_approved:
is_eligible = False
# Check if this is a general slot (any group members can join this slot regardless of tag)
# If not general slot, check if member has a matching slot tag
if (
not is_general_group_slot(slot)
and membership
and slot.tag != membership.tag
):
is_eligible = False
data[IS_ELIGIBLE] = is_eligible
return data
def is_general_group_slot(slot):
return slot.tag.name == GENERAL_GROUP_TAG_NAME
| cs3216-2021-a3-group12/slotify-backend | slotify/events/methods.py | methods.py | py | 4,696 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "models.Event.objects.filter",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "models.Event.objects",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "models.Event",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "mo... |
15205290527 | from rest_framework import permissions
from rest_framework.views import Request, View, status
from championships.models import Championship
from teams.models import Team
from datetime import datetime
class IsChampionshipOwner(permissions.BasePermission):
def has_object_permission(
self, request: Request, view: View, champs: Championship
) -> bool:
self.message = "You're not the championship owner to perform this action"
return request.user.is_authenticated and request.user == champs.staff_owner
class IsATeamOwner(permissions.BasePermission):
def has_object_permission(self, request: Request, view: View, team: Team) -> bool:
self.message = "You're not a team owner to perform this action"
check_owner = False
for user in team.users.all():
if user.id == request.user.id and user.is_team_owner:
check_owner = True
return check_owner
class HaveFivePlayers(permissions.BasePermission):
def has_object_permission(self, request: Request, view: View, team: Team) -> bool:
self.message = "Your team must have at least 5 players"
team_players_length = team.users.count()
return team_players_length >= 5
class IsTeamEsportCorrectly(permissions.BasePermission):
def has_object_permission(self, request: Request, view: View, team: Team) -> bool:
self.message = "Your team do not have same e_sport"
cs_id = view.kwargs["cs_id"]
champ = Championship.objects.get(id=cs_id)
return team.e_sport == champ.e_sport
class IsChampionshipFull(permissions.BasePermission):
def has_object_permission(self, request: Request, view: View, team: Team) -> bool:
self.message = "The championship is full"
cs_id = view.kwargs["cs_id"]
champ = Championship.objects.get(id=cs_id)
number_teams = champ.teams.count()
return number_teams < 8
class HasAnotherChampionshipAroundSevenDays(permissions.BasePermission):
def has_object_permission(self, request: Request, view: View, team: Team) -> bool:
self.message = "You've other championship around this championship date"
day_7_in_seconds = 604800
if team.championship.count() == 0:
return True
championship_id = view.kwargs["cs_id"]
championship = Championship.objects.get(id=championship_id)
champ_date = datetime(
championship.initial_date.year,
championship.initial_date.month,
championship.initial_date.day,
)
championship_date_in_seconds = champ_date.timestamp()
team_championships_date = team.championship.values("initial_date")
for initial_date in team_championships_date:
initial_datetime = datetime(
initial_date["initial_date"].year,
initial_date["initial_date"].month,
initial_date["initial_date"].day,
)
initial_datetime_seconds = initial_datetime.timestamp()
diference_date = abs(
championship_date_in_seconds - initial_datetime_seconds
)
if diference_date < day_7_in_seconds:
return False
return True
class InitialDateProvidedIsAtLeastSevenDaysAfter(permissions.BasePermission):
def has_permission(self, request: Request, view: View) -> bool:
self.message = "Only initial dates after 7 days by now"
day_7_in_seconds = 604800
initial_date_list = request.data["initial_date"].split("-")
date_now = datetime.now().timestamp()
champ_date = datetime(
int(initial_date_list[0]),
int(initial_date_list[1]),
int(initial_date_list[2]),
)
championship_date_in_seconds = champ_date.timestamp()
sub = championship_date_in_seconds - date_now
return sub > day_7_in_seconds
class IsChampOwnerTryngToEnterInIt(permissions.BasePermission):
def has_object_permission(self, request: Request, view: View, team: Team) -> bool:
self.message = "Championship owner can't play it"
cs_id = view.kwargs["cs_id"]
champ = Championship.objects.get(id=cs_id)
champ_owner_id = champ.staff_owner.id
for user in team.users.all():
if user.id == champ_owner_id:
return False
return True
class TeamOwnerHasBalanceToEnterInChampionship(permissions.BasePermission):
def has_object_permission(self, request: Request, view: View, team: Team) -> bool:
self.message = "Don't have enough money"
user_balance = request.user.history.balance
cs_id = view.kwargs["cs_id"]
champ = Championship.objects.get(id=cs_id)
champ_entry_amount = champ.entry_amount
return user_balance >= champ_entry_amount
| gamer-society-org/gamer-society | championships/permissions.py | permissions.py | py | 4,861 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.permissions.BasePermission",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.permissions",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "rest_framework.views.Request",
"line_number": 10,
"usage_type": "... |
2863809841 | import requests
import lxml.etree
import re
session = requests.Session()
response = session.get('http://www.baidu.com')
content = response.content.decode()
# print(content)
# -------------解析
doc = lxml.etree.HTML(content)
tree = doc.getroottree()
nodes = tree.xpath('//title/text()')
for i in nodes:
print(i)
| XiaJune/A-small | miller_cre/baidu.py | baidu.py | py | 323 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.Session",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "lxml.etree.etree.HTML",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "lxml.etree.etree",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "lxml.etre... |
20219566108 | import os
from . import utils
import ipywidgets as widgets
from IPython.display import display, clear_output, Javascript
class FileBrowser(object):
def __init__(self, funcName):
self.path = os.getcwd()
self._update_files()
self._chosenFileName = None
self.funcName = funcName
@property
def chosenFileName(self):
assert self._chosenFileName is not None, "File was not chosen"
return self._chosenFileName
def _update_files(self):
self.files = list()
self.dirs = list()
if(os.path.isdir(self.path)):
content = os.listdir(self.path)
content.sort()
for f in content:
ff = self.path + "/" + f
if os.path.isdir(ff):
self.dirs.append(f)
else:
self.files.append(f)
def widget(self):
box = widgets.VBox()
self._update(box)
return box
def _update(self, box):
clear_output()
def on_click(b):
if b.description == '..':
self.path = os.path.split(self.path)[0]
else:
self.path = os.path.join(self.path, b.description)
self._update_files()
self._update(box)
buttons = []
if self.files or self.dirs:
button = widgets.Button(description='..')
button.add_class('folder')
button.add_class('parentFolder')
button.on_click(on_click)
buttons.append(button)
for f in self.dirs:
button = widgets.Button(description=f)
button.add_class('folder')
button.on_click(on_click)
buttons.append(button)
for f in self.files:
button = widgets.Button(description=f)
button.add_class('file')
button.on_click(on_click)
buttons.append(button)
if len(buttons) == 0:
buttons.append(widgets.HTML("Replace "+self.funcName+"() by the following expression to save chosen path:<br>"+self.funcName+"('"+self.path+"',...)"))
box.children = tuple([widgets.HTML("<h2>%s</h2>" % (self.path,))] + buttons)
box.add_class('fileBrowser')
display(box)
if len(buttons) == 0: self._chosenFileName = self.path
def openFile(funcName, *p):
if len(p)>0 :
display(widgets.HTML("Delete path argument to choose file interactively: "+funcName+'()'))
return type('obj', (object,), {'chosenFileName' : p[0]})
assert utils.isJupyterNotebook()
f = FileBrowser(funcName)
f.widget()
return f
| gudasergey/pyFitIt | pyfitit/fileBrowser.py | fileBrowser.py | py | 2,642 | python | en | code | 28 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 2... |
25718178961 | # This exports all networks and their base attributes from an organization to a file
# and then imports them to another organization.
#
# You need to have Python 3 and the Requests module installed. You
# can download the module here: https://github.com/kennethreitz/requests
# or install it using pip.
#
# To run the script, enter:
# python copynetworks.py -k <API key> [-s <source org name>] [-d <destination org name>] [-f <file path>]
#
# Parameters '-s', '-d' and '-f' are optional, but at least two of them must be given.
#
# ** If '-s' and '-d' are given, data will be copied from src org to dst org
# ** If '-s' and '-f' are given, data will be dumped from src org to file
# ** If '-d' and '-f' are given, data will be imported from file to dst org
#
# To make script chaining easier, all lines containing informational messages to the user
# start with the character @
import sys, getopt, requests, json
def printusertext(p_message):
#prints a line of text that is meant for the user to read
#do not process these lines when chaining scripts
print('@ %s' % p_message)
def printhelp():
#prints help text
printusertext('This is a script that copies networks and their base attributes from a source organization')
printusertext('to another, called the destination organization. Both source, destination org and file ')
printusertext('parameters are optional, but at least two of them must be given.')
printusertext('')
printusertext('Usage:')
printusertext('python copynetworks.py -k <API key> [-s <source org name>] [-d <dest org name>] [-f <file path>]')
printusertext('')
printusertext(" ** If '-s' and '-d' are given, data will be copied from src org to dst org")
printusertext(" ** If '-s' and '-f' are given, data will be dumped from src org to file")
printusertext(" ** If '-d' and '-f' are given, data will be imported from file to dst org")
printusertext('')
printusertext('Use double quotes ("") in Windows to pass arguments containing spaces. Names are case-sensitive.')
def getorgid(p_apikey, p_orgname):
#looks up org id for a specific org name
#on failure returns 'null'
r = requests.get('https://dashboard.meraki.com/api/v0/organizations', headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return 'null'
rjson = r.json()
for record in rjson:
if record['name'] == p_orgname:
return record['id']
return('null')
def getshardurl(p_apikey, p_orgid):
#quick-n-dirty patch
return("api.meraki.com")
def getnwlist(p_apikey, p_shardurl, p_orgid):
#returns a list of all networks in an organization
#on failure returns a single record with 'null' name and id
r = requests.get('https://%s/api/v0/organizations/%s/networks' % (p_shardurl, p_orgid), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
returnvalue = []
if r.status_code != requests.codes.ok:
returnvalue.append({'name': 'null', 'id': 'null'})
return(returnvalue)
return(r.json())
def getnwid(p_apikey, p_shardurl, p_orgid, p_nwname):
#looks up network id for a network name
#on failure returns 'null'
r = requests.get('https://%s/api/v0/organizations/%s/networks' % (p_shardurl, p_orgid), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return 'null'
rjson = r.json()
for record in rjson:
if record['name'] == p_nwname:
return record['id']
return('null')
def createnw (p_apikey, p_shardurl, p_dstorg, p_nwdata):
#creates network if one does not already exist with the same name
#check if network exists
getnwresult = getnwid(p_apikey, p_shardurl, p_dstorg, p_nwdata['name'])
if getnwresult != 'null':
printusertext('WARNING: Skipping network "%s" (Already exists)' % p_nwdata['name'])
return('null')
if p_nwdata['type'] == 'combined':
#find actual device types
nwtype = 'wireless switch appliance'
else:
nwtype = p_nwdata['type']
if nwtype != 'systems manager':
r = requests.post('https://%s/api/v0/organizations/%s/networks' % (p_shardurl, p_dstorg), data=json.dumps({'timeZone': p_nwdata['timeZone'], 'tags': p_nwdata['tags'], 'name': p_nwdata['name'], 'organizationId': p_dstorg, 'type': nwtype}), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
else:
printusertext('WARNING: Skipping network "%s" (Cannot create SM networks)' % p_nwdata['name'])
return('ok')
def main(argv):
#get command line arguments
arg_apikey = 'null'
arg_srcorg = 'null'
arg_dstorg = 'null'
arg_filepath = 'null'
try:
opts, args = getopt.getopt(argv, 'hk:s:d:f:')
except getopt.GetoptError:
printhelp()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
printhelp()
sys.exit()
elif opt == '-k':
arg_apikey = arg
elif opt == '-s':
arg_srcorg = arg
elif opt == '-d':
arg_dstorg = arg
elif opt == '-f':
arg_filepath = arg
#count how many optional parameters have been given
optionscounter = 0
if arg_srcorg != 'null':
optionscounter += 1
if arg_dstorg != 'null':
optionscounter += 1
if arg_filepath != 'null':
optionscounter += 1
if arg_apikey == 'null' or optionscounter < 2:
printhelp()
sys.exit(2)
#get source organization id corresponding to org name provided by user
mode_gotsource = True
if arg_srcorg == 'null':
mode_gotsource = False
else:
srcorgid = getorgid(arg_apikey, arg_srcorg)
if srcorgid == 'null':
printusertext('ERROR: Fetching source organization failed')
sys.exit(2)
#get shard URL where Org is stored
srcshardurl = getshardurl(arg_apikey, srcorgid)
if srcshardurl == 'null':
printusertext('ERROR: Fetching Meraki cloud shard URL for source org failed')
printusertext(' Does it have API access enabled?')
sys.exit(2)
#get destination organization id corresponding to org name provided by user
mode_gotdestination = True
if arg_dstorg == 'null':
mode_gotdestination = False
else:
dstorgid = getorgid(arg_apikey, arg_dstorg)
if dstorgid == 'null':
printusertext('ERROR: Fetching destination organization failed')
sys.exit(2)
#get shard URL where Org is stored
dstshardurl = getshardurl(arg_apikey, dstorgid)
if dstshardurl == 'null':
printusertext('ERROR: Fetching Meraki cloud shard URL for destination org failed')
printusertext(' Does it have API access enabled?')
sys.exit(2)
#if user gave a source, fetch networks and their attributes from src org
if mode_gotsource:
nwlist = getnwlist(arg_apikey, srcshardurl, srcorgid)
if nwlist[0]['id'] == 'null':
printusertext('ERROR: Fetching network list from source org failed')
sys.exit(2)
#open buffer file for writing
mode_gotfile = True
if arg_filepath == 'null':
mode_gotfile = False
if mode_gotfile:
#if source given, open file for writing (output)
if mode_gotsource:
try:
f = open(arg_filepath, 'w')
except:
printusertext('ERROR: Unable to open file for writing')
sys.exit(2)
#if source omitted, open file for reading (input)
else:
try:
f = open(arg_filepath, 'r')
except:
printusertext('ERROR: Unable to open file for reading')
sys.exit(2)
#if user gave a source and a file, dump source org networks to file
if mode_gotsource and mode_gotfile:
try:
json.dump(nwlist, f)
except:
printusertext('ERROR: Writing to output file failed')
sys.exit(2)
#if user did not give source, but gave file, load networks list from file
if not(mode_gotsource) and mode_gotfile:
try:
nwlist = json.load(f)
except:
printusertext('ERROR: Reading from input file failed')
sys.exit(2)
#if user gave destination org, create networks according to nwlist content
if mode_gotdestination:
i = 0
for i in range (0, len(nwlist)):
createnw (arg_apikey, dstshardurl, dstorgid, nwlist[i])
#reached end of script
printusertext('End of script.')
if __name__ == '__main__':
main(sys.argv[1:]) | meraki/automation-scripts | copynetworks.py | copynetworks.py | py | 8,232 | python | en | code | 361 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "requests.codes",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "requests.codes",
"l... |
15991467145 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import DropPath, trunc_normal_
import math
import numpy as np
from models.head import *
up_kwargs = {'mode': 'bilinear', 'align_corners': False}
def load_state_dict(module, state_dict, strict=False):
"""Load state_dict to a module.
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
Default value for ``strict`` is set to ``False`` and the message for
param mismatch will be shown even if strict is False.
Args:
module (Module): Module that receives the state_dict.
state_dict (OrderedDict): Weights.
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
logger (:obj:`logging.Logger`, optional): Logger to log the error
message. If not specified, print function will be used.
"""
unexpected_keys = []
all_missing_keys = []
err_msg = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# use _load_from_state_dict to enable checkpoint version control
def load(module, prefix=''):
# recursively check parallel module in case that the model has a
# complicated structure, e.g., nn.Module(nn.Module(DDP))
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(state_dict, prefix, local_metadata, True,
all_missing_keys, unexpected_keys,
err_msg)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(module)
load = None # break load->load reference cycle
# ignore "num_batches_tracked" of BN layers
missing_keys = [
key for key in all_missing_keys if 'num_batches_tracked' not in key
]
if unexpected_keys:
err_msg.append('unexpected key in source '
f'state_dict: {", ".join(unexpected_keys)}\n')
if missing_keys:
err_msg.append(
f'missing keys in source state_dict: {", ".join(missing_keys)}\n')
if len(err_msg) > 0:
err_msg.insert(
0, 'The model and loaded state dict do not match exactly\n')
err_msg = '\n'.join(err_msg)
if strict:
raise RuntimeError(err_msg)
else:
print(err_msg)
def resize_pos_embed_4d(posemb, posemb_new):
'''return new position embedding'''
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
gs_old = posemb.shape[1] # 14
gs_new = posemb_new.shape[1] # 24
posemb_grid = posemb
posemb_grid = posemb_grid.permute(0, 3, 1,
2) # [1, 14, 14, dim]->[1, dim, 14, 14]
posemb_grid = F.interpolate(posemb_grid, size=(gs_new, gs_new), mode='bicubic') # [1, dim, 14, 14] -> [1, dim, 24, 24]
posemb_grid = posemb_grid.permute(0, 2, 3, 1) # [1, dim, 24, 24]->[1, 24, 24, dim]
return posemb_grid
def load_checkpoint(model,
filename,
map_location='cpu',
strict=False,
):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = torch.load(filename, map_location=map_location)
# OrderedDict is a subclass of dict
if not isinstance(checkpoint, dict):
raise RuntimeError(
f'No state_dict found in checkpoint file {filename}')
# get state_dict from checkpoint
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
# strip prefix of state_dict
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
# for MoBY, load model of online branch
if sorted(list(state_dict.keys()))[0].startswith('encoder'):
state_dict = {k.replace('encoder.', ''): v for k, v in state_dict.items() if k.startswith('encoder.')}
old_posemb = state_dict['pos_embed']
if model.pos_embed.shape != old_posemb.shape: # need resize the position embedding by interpolate
new_posemb = resize_pos_embed_4d(old_posemb, model.pos_embed)
state_dict['pos_embed'] = new_posemb
# load state_dict
load_state_dict(model, state_dict, strict)
print('load pretrained weight strct={}'.format(strict))
return checkpoint
class OutlookAttention(nn.Module):
"""
Implementation of outlook attention
--dim: hidden dim
--num_heads: number of heads
--kernel_size: kernel size in each window for outlook attention
return: token features after outlook attention
"""
def __init__(self, dim, num_heads, kernel_size=3, padding=1, stride=1,
qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
head_dim = dim // num_heads
self.num_heads = num_heads
self.kernel_size = kernel_size
self.padding = padding
self.stride = stride
self.scale = qk_scale or head_dim**-0.5
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn = nn.Linear(dim, kernel_size**4 * num_heads)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.unfold = nn.Unfold(kernel_size=kernel_size, padding=padding, stride=stride)
self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True)
def forward(self, x):
B, H, W, C = x.shape
v = self.v(x).permute(0, 3, 1, 2) # B, C, H, W
h, w = math.ceil(torch.true_divide(H, self.stride)), math.ceil(torch.true_divide(W, self.stride))
v = self.unfold(v).reshape(B, self.num_heads, C // self.num_heads,
self.kernel_size * self.kernel_size,
h * w).permute(0, 1, 4, 3, 2) # B,H,N,kxk,C/H
attn = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
attn = self.attn(attn).reshape(
B, h * w, self.num_heads, self.kernel_size * self.kernel_size,
self.kernel_size * self.kernel_size).permute(0, 2, 1, 3, 4) # B,H,N,kxk,kxk
attn = attn * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).permute(0, 1, 4, 3, 2).reshape(
B, C * self.kernel_size * self.kernel_size, h * w)
x = F.fold(x, output_size=(H, W), kernel_size=self.kernel_size,
padding=self.padding, stride=self.stride)
x = self.proj(x.permute(0, 2, 3, 1))
x = self.proj_drop(x)
return x
class Outlooker(nn.Module):
"""
Implementation of outlooker layer: which includes outlook attention + MLP
Outlooker is the first stage in our VOLO
--dim: hidden dim
--num_heads: number of heads
--mlp_ratio: mlp ratio
--kernel_size: kernel size in each window for outlook attention
return: outlooker layer
"""
def __init__(self, dim, kernel_size, padding, stride=1,
num_heads=1,mlp_ratio=3., attn_drop=0.,
drop_path=0., act_layer=nn.GELU,
norm_layer=nn.LayerNorm, qkv_bias=False,
qk_scale=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = OutlookAttention(dim, num_heads, kernel_size=kernel_size,
padding=padding, stride=stride,
qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop)
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class Mlp(nn.Module):
"Implementation of MLP"
def __init__(self, in_features, hidden_features=None,
out_features=None, act_layer=nn.GELU,
drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
"Implementation of self-attention"
def __init__(self, dim, num_heads=8, qkv_bias=False,
qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, H, W, C = x.shape
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads,
C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[
2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, H, W, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Transformer(nn.Module):
"""
Implementation of Transformer,
Transformer is the second stage in our VOLO
"""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False,
qk_scale=None, attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias,
qk_scale=qk_scale, attn_drop=attn_drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class ClassAttention(nn.Module):
"""
Class attention layer from CaiT, see details in CaiT
Class attention is the post stage in our VOLO, which is optional.
"""
def __init__(self, dim, num_heads=8, head_dim=None, qkv_bias=False,
qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
if head_dim is not None:
self.head_dim = head_dim
else:
head_dim = dim // num_heads
self.head_dim = head_dim
self.scale = qk_scale or head_dim**-0.5
self.kv = nn.Linear(dim,
self.head_dim * self.num_heads * 2,
bias=qkv_bias)
self.q = nn.Linear(dim, self.head_dim * self.num_heads, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(self.head_dim * self.num_heads, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
kv = self.kv(x).reshape(B, N, 2, self.num_heads,
self.head_dim).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[
1] # make torchscript happy (cannot use tensor as tuple)
q = self.q(x[:, :1, :]).reshape(B, self.num_heads, 1, self.head_dim)
attn = ((q * self.scale) @ k.transpose(-2, -1))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
cls_embed = (attn @ v).transpose(1, 2).reshape(
B, 1, self.head_dim * self.num_heads)
cls_embed = self.proj(cls_embed)
cls_embed = self.proj_drop(cls_embed)
return cls_embed
class ClassBlock(nn.Module):
"""
Class attention block from CaiT, see details in CaiT
We use two-layers class attention in our VOLO, which is optional.
"""
def __init__(self, dim, num_heads, head_dim=None, mlp_ratio=4.,
qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = ClassAttention(
dim, num_heads=num_heads, head_dim=head_dim, qkv_bias=qkv_bias,
qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop)
def forward(self, x):
cls_embed = x[:, :1]
cls_embed = cls_embed + self.drop_path(self.attn(self.norm1(x)))
cls_embed = cls_embed + self.drop_path(self.mlp(self.norm2(cls_embed)))
return torch.cat([cls_embed, x[:, 1:]], dim=1)
def get_block(block_type, **kargs):
"""
get block by name, specifically for class attention block in here
"""
if block_type == 'ca':
return ClassBlock(**kargs)
def rand_bbox(size, lam, scale=1):
"""
get bounding box as token labeling (https://github.com/zihangJiang/TokenLabeling)
return: bounding box
"""
W = size[1] // scale
H = size[2] // scale
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
class PatchEmbed(nn.Module):
"""
Image to Patch Embedding.
Different with ViT use 1 conv layer, we use 4 conv layers to do patch embedding
"""
def __init__(self, img_size=224, stem_conv=False, stem_stride=1,
patch_size=8, in_chans=3, hidden_dim=64, embed_dim=384):
super().__init__()
assert patch_size in [4, 8, 16]
self.stem_conv = stem_conv
if stem_conv:
self.conv = nn.Sequential(
nn.Conv2d(in_chans, hidden_dim, kernel_size=7, stride=stem_stride,
padding=3, bias=False), # 112x112
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1,
padding=1, bias=False), # 112x112
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1,
padding=1, bias=False), # 112x112
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
)
self.proj = nn.Conv2d(hidden_dim,
embed_dim,
kernel_size=patch_size // stem_stride,
stride=patch_size // stem_stride)
self.num_patches = (img_size // patch_size) * (img_size // patch_size)
def forward(self, x):
if self.stem_conv:
x = self.conv(x)
x = self.proj(x) # B, C, H, W
return x
class Downsample(nn.Module):
"""
Image to Patch Embedding, downsampling between stage1 and stage2
"""
def __init__(self, in_embed_dim, out_embed_dim, patch_size):
super().__init__()
self.proj = nn.Conv2d(in_embed_dim, out_embed_dim,
kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = x.permute(0, 3, 1, 2)
x = self.proj(x) # B, C, H, W
x = x.permute(0, 2, 3, 1)
return x
def outlooker_blocks(block_fn, index, dim, layers, num_heads=1, kernel_size=3,
padding=1,stride=1, mlp_ratio=3., qkv_bias=False, qk_scale=None,
attn_drop=0, drop_path_rate=0., **kwargs):
"""
generate outlooker layer in stage1
return: outlooker layers
"""
blocks = []
for block_idx in range(layers[index]):
block_dpr = drop_path_rate * (block_idx +
sum(layers[:index])) / (sum(layers) - 1)
blocks.append(block_fn(dim, kernel_size=kernel_size, padding=padding,
stride=stride, num_heads=num_heads, mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
drop_path=block_dpr))
blocks = nn.Sequential(*blocks)
return blocks
def transformer_blocks(block_fn, index, dim, layers, num_heads, mlp_ratio=3.,
qkv_bias=False, qk_scale=None, attn_drop=0,
drop_path_rate=0., **kwargs):
"""
generate transformer layers in stage2
return: transformer layers
"""
blocks = []
for block_idx in range(layers[index]):
block_dpr = drop_path_rate * (block_idx +
sum(layers[:index])) / (sum(layers) - 1)
blocks.append(
block_fn(dim, num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
drop_path=block_dpr))
blocks = nn.Sequential(*blocks)
return blocks
class VOLO_backbone(nn.Module):
"""
Vision Outlooker, the main class of our model
--layers: [x,x,x,x], four blocks in two stages, the first block is outlooker, the
other three are transformer, we set four blocks, which are easily
applied to downstream tasks
--img_size, --in_chans, --num_classes: these three are very easy to understand
--patch_size: patch_size in outlook attention
--stem_hidden_dim: hidden dim of patch embedding, d1-d4 is 64, d5 is 128
--embed_dims, --num_heads: embedding dim, number of heads in each block
--downsamples: flags to apply downsampling or not
--outlook_attention: flags to apply outlook attention or not
--mlp_ratios, --qkv_bias, --qk_scale, --drop_rate: easy to undertand
--attn_drop_rate, --drop_path_rate, --norm_layer: easy to undertand
--post_layers: post layers like two class attention layers using [ca, ca],
if yes, return_mean=False
--return_mean: use mean of all feature tokens for classification, if yes, no class token
--return_dense: use token labeling, details are here:
https://github.com/zihangJiang/TokenLabeling
--mix_token: mixing tokens as token labeling, details are here:
https://github.com/zihangJiang/TokenLabeling
--pooling_scale: pooling_scale=2 means we downsample 2x
--out_kernel, --out_stride, --out_padding: kerner size,
stride, and padding for outlook attention
"""
def __init__(self, layers, img_size=512, in_chans=3, patch_size=8,
stem_hidden_dim=64, embed_dims=None, num_heads=None, downsamples=None,
outlook_attention=None, mlp_ratios=None, qkv_bias=False, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
pooling_scale=2, out_kernel=3, out_stride=2, out_padding=1):
super().__init__()
self.patch_embed = PatchEmbed(stem_conv=True, stem_stride=2, patch_size=patch_size,
in_chans=in_chans, hidden_dim=stem_hidden_dim,
embed_dim=embed_dims[0])
# inital positional encoding, we add positional encoding after outlooker blocks
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size // pooling_scale,
img_size // patch_size // pooling_scale,
embed_dims[-1]))
self.pos_drop = nn.Dropout(p=drop_rate)
# set the main block in network
network = []
for i in range(len(layers)):
if outlook_attention[i]:
# stage 1
stage = outlooker_blocks(Outlooker, i, embed_dims[i], layers,
downsample=downsamples[i], num_heads=num_heads[i],
kernel_size=out_kernel, stride=out_stride,
padding=out_padding, mlp_ratio=mlp_ratios[i],
qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop_rate, norm_layer=norm_layer)
network.append(stage)
else:
# stage 2
stage = transformer_blocks(Transformer, i, embed_dims[i], layers,
num_heads[i], mlp_ratio=mlp_ratios[i],
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop_path_rate=drop_path_rate,
attn_drop=attn_drop_rate,
norm_layer=norm_layer)
network.append(stage)
if downsamples[i]:
# downsampling between two stages
network.append(Downsample(embed_dims[i], embed_dims[i + 1], 2))
self.network = nn.ModuleList(network)
trunc_normal_(self.pos_embed, std=.02)
def init_weights(self, pretrained=None, strict=False):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if isinstance(pretrained, str):
self.apply(_init_weights)
load_checkpoint(self, pretrained, strict=strict)
print('load pretained weight strict={}'.format(strict))
elif pretrained is None:
self.apply(_init_weights)
else:
raise TypeError('pretrained must be a str or None')
def forward_embeddings(self, x):
# patch embedding
x = self.patch_embed(x)
# B,C,H,W-> B,H,W,C
x = x.permute(0, 2, 3, 1)
return x
def forward_tokens(self, x):
out = []
for idx, block in enumerate(self.network):
if idx == 2: # add positional encoding after outlooker blocks
x = x + self.pos_embed
x = self.pos_drop(x)
x = block(x)
out.append(x.permute(0, 3, 1, 2).contiguous())
return out
def forward(self, x):
# step1: patch embedding
x = self.forward_embeddings(x)
# step2: tokens learning in the two stages
out = self.forward_tokens(x)
return tuple(out)
class VOLO(nn.Module):
def __init__(self, nclass, embed_dim, layers, num_heads, mlp_ratios, downsamples, outlook_attention,
aux=False, pretrained_root=None, head='seghead', edge_aux=False, stem_hidden_dim=None):
super(VOLO, self).__init__()
self.aux = aux
self.edge_aux = edge_aux
self.head_name = head
self.backbone = VOLO_backbone(layers=layers,
embed_dims=embed_dim,
num_heads=num_heads,
mlp_ratios=mlp_ratios,
downsamples=downsamples,
outlook_attention=outlook_attention,
stem_hidden_dim=stem_hidden_dim or 64,
img_size=512
)
if self.head_name == 'apchead':
self.decode_head = APCHead(in_channels=embed_dim[3], num_classes=nclass, in_index=3, channels=512)
if self.head_name == 'aspphead':
self.decode_head = ASPPHead(in_channels=embed_dim[3], num_classes=nclass, in_index=3)
if self.head_name == 'asppplushead':
self.decode_head = ASPPPlusHead(in_channels=embed_dim[3], num_classes=nclass, in_index=[0, 3])
if self.head_name == 'dahead':
self.decode_head = DAHead(in_channels=embed_dim[3], num_classes=nclass, in_index=3)
if self.head_name == 'dnlhead':
self.decode_head = DNLHead(in_channels=embed_dim[3], num_classes=nclass, in_index=3, channels=512)
if self.head_name == 'fcfpnhead':
self.decode_head = FCFPNHead(in_channels=embed_dim, num_classes=nclass, in_index=[0, 1, 2, 3], channels=256)
if self.head_name == 'cefpnhead':
self.decode_head = CEFPNHead(in_channels=embed_dim, num_classes=nclass, in_index=[0, 1, 2, 3], channels=256)
if self.head_name == 'fcnhead':
self.decode_head = FCNHead(in_channels=embed_dim[3], num_classes=nclass, in_index=3, channels=512)
if self.head_name == 'gchead':
self.decode_head = GCHead(in_channels=embed_dim[3], num_classes=nclass, in_index=3, channels=512)
if self.head_name == 'psahead':
self.decode_head = PSAHead(in_channels=embed_dim[3], num_classes=nclass, in_index=3)
if self.head_name == 'psphead':
self.decode_head = PSPHead(in_channels=embed_dim[3], num_classes=nclass, in_index=3)
if self.head_name == 'seghead':
self.decode_head = SegHead(in_channels=embed_dim, num_classes=nclass, in_index=[0, 1, 2, 3])
if self.head_name == 'unethead':
self.decode_head = UNetHead(in_channels=embed_dim, num_classes=nclass, in_index=[0, 1, 2, 3])
if self.head_name == 'uperhead':
self.decode_head = UPerHead(in_channels=embed_dim, num_classes=nclass)
if self.head_name == 'annhead':
self.decode_head = ANNHead(in_channels=embed_dim[2:], num_classes=nclass, in_index=[2, 3], channels=512)
if self.head_name == 'mlphead':
self.decode_head = MLPHead(in_channels=embed_dim, num_classes=nclass, in_index=[0, 1, 2, 3], channels=256)
if self.aux:
self.auxiliary_head = FCNHead(num_convs=1, in_channels=embed_dim[2], num_classes=nclass, in_index=2, channels=256)
if self.edge_aux:
self.edge_head = EdgeHead(in_channels=embed_dim[0:2], in_index=[0, 1], channels=embed_dim[0])
if pretrained_root is None:
self.backbone.init_weights()
else:
if 'upernet' in pretrained_root:
load_checkpoint(self, filename=pretrained_root, strict=False)
else:
self.backbone.init_weights(pretrained=pretrained_root, strict=False)
def forward(self, x):
size = x.size()[2:]
outputs = []
out_backbone = self.backbone(x)
x0 = self.decode_head(out_backbone)
if isinstance(x0, (list, tuple)):
for out in x0:
out = F.interpolate(out, size, **up_kwargs)
outputs.append(out)
else:
x0 = F.interpolate(x0, size, **up_kwargs)
outputs.append(x0)
if self.aux:
x1 = self.auxiliary_head(out_backbone)
x1 = F.interpolate(x1, size, **up_kwargs)
outputs.append(x1)
if self.edge_aux:
edge = self.edge_head(out_backbone)
edge = F.interpolate(edge, size, **up_kwargs)
outputs.append(edge)
return outputs
def volo_d1(nclass, pretrained=False, aux=False, head='uperhead', edge_aux=False):
if pretrained:
pretrained_root = './pretrained_weights/d1_224_84.2.pth.tar'
else:
pretrained_root = None
layers = [4, 4, 8, 2] # num of layers in the four blocks
embed_dims = [192, 384, 384, 384]
num_heads = [6, 12, 12, 12]
mlp_ratios = [3, 3, 3, 3]
downsamples = [True, False, False, False] # do downsampling after first block
outlook_attention = [True, False, False, False ]
# first block is outlooker (stage1), the other three are transformer (stage2)
model = VOLO(layers=layers,
embed_dim=embed_dims,
num_heads=num_heads,
mlp_ratios=mlp_ratios,
downsamples=downsamples,
outlook_attention=outlook_attention,
nclass=nclass, aux=aux, head=head, edge_aux=edge_aux, pretrained_root=pretrained_root
)
return model
def volo_d2(nclass, pretrained=False, aux=False, head='uperhead', edge_aux=False):
if pretrained:
pretrained_root = './pretrained_weights/rest_lite.pth'
else:
pretrained_root = None
layers = [6, 4, 10, 4]
embed_dims = [256, 512, 512, 512]
num_heads = [8, 16, 16, 16]
mlp_ratios = [3, 3, 3, 3]
downsamples = [True, False, False, False]
outlook_attention = [True, False, False, False]
# first block is outlooker (stage1), the other three are transformer (stage2)
model = VOLO(layers=layers,
embed_dim=embed_dims,
num_heads=num_heads,
mlp_ratios=mlp_ratios,
downsamples=downsamples,
outlook_attention=outlook_attention,
nclass=nclass, aux=aux, head=head, edge_aux=edge_aux, pretrained_root=pretrained_root
)
return model
def volo_d3(nclass, pretrained=False, aux=False, head='uperhead', edge_aux=False):
if pretrained:
pretrained_root = './pretrained_weights/rest_lite.pth'
else:
pretrained_root = None
layers = [8, 8, 16, 4]
embed_dims = [256, 512, 512, 512]
num_heads = [8, 16, 16, 16]
mlp_ratios = [3, 3, 3, 3]
downsamples = [True, False, False, False]
outlook_attention = [True, False, False, False]
# first block is outlooker (stage1), the other three are transformer (stage2)
model = VOLO(layers=layers,
embed_dim=embed_dims,
num_heads=num_heads,
mlp_ratios=mlp_ratios,
downsamples=downsamples,
outlook_attention=outlook_attention,
nclass=nclass, aux=aux, head=head, edge_aux=edge_aux, pretrained_root=pretrained_root
)
return model
def volo_d4(nclass, pretrained=False, aux=False, head='uperhead', edge_aux=False):
if pretrained:
pretrained_root = './pretrained_weights/rest_lite.pth'
else:
pretrained_root = None
layers = [8, 8, 16, 4]
embed_dims = [384, 768, 768, 768]
num_heads = [12, 16, 16, 16]
mlp_ratios = [3, 3, 3, 3]
downsamples = [True, False, False, False]
outlook_attention = [True, False, False, False]
# first block is outlooker (stage1), the other three are transformer (stage2)
model = VOLO(layers=layers,
embed_dim=embed_dims,
num_heads=num_heads,
mlp_ratios=mlp_ratios,
downsamples=downsamples,
outlook_attention=outlook_attention,
nclass=nclass, aux=aux, head=head, edge_aux=edge_aux, pretrained_root=pretrained_root
)
return model
def volo_d5(nclass, pretrained=False, aux=False, head='uperhead', edge_aux=False):
if pretrained:
pretrained_root = './pretrained_weights/d5_512_87.07.pth.tar'
else:
pretrained_root = None
layers = [12, 12, 20, 4]
embed_dims = [384, 768, 768, 768]
num_heads = [12, 16, 16, 16]
mlp_ratios = [4, 4, 4, 4]
downsamples = [True, False, False, False]
outlook_attention = [True, False, False, False]
# first block is outlooker (stage1), the other three are transformer (stage2)
model = VOLO(layers=layers,
embed_dim=embed_dims,
num_heads=num_heads,
mlp_ratios=mlp_ratios,
downsamples=downsamples,
outlook_attention=outlook_attention,
stem_hidden_dim=128,
nclass=nclass, aux=aux, head=head, edge_aux=edge_aux, pretrained_root=pretrained_root
)
return model
if __name__ == '__main__':
"""Notice if torch1.6, try to replace a / b with torch.true_divide(a, b)"""
from tools.flops_params_fps_count import flops_params_fps
model_base = volo_d1(nclass=6, aux=True, edge_aux=True, head='mlphead', pretrained=True)
flops_params_fps(model_base)
| zyxu1996/Efficient-Transformer | models/volo.py | volo.py | py | 34,846 | python | en | code | 67 | github-code | 36 | [
{
"api_name": "torch.nn.functional.interpolate",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "torch.load",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch... |
44539221196 | from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
import uuid
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///employees.db'
db = SQLAlchemy(app)
ma = Marshmallow(app)
# Модель данных для сотрудника
class Employee(db.Model):
id = db.Column(db.String(36), primary_key=True)
last_name = db.Column(db.String(50))
first_name = db.Column(db.String(50))
middle_name = db.Column(db.String(50))
position = db.Column(db.String(50))
def __init__(self, last_name, first_name, middle_name, position):
self.id = str(uuid.uuid4())
self.last_name = last_name
self.first_name = first_name
self.middle_name = middle_name
self.position = position
# Схема сериализации/десериализации для сотрудника
class EmployeeSchema(ma.Schema):
class Meta:
fields = ('id', 'last_name', 'first_name', 'middle_name', 'position')
employee_schema = EmployeeSchema()
employees_schema = EmployeeSchema(many=True)
# Создание нового сотрудника
@app.route('/employees', methods=['POST'])
def create_employee():
last_name = request.json['last_name']
first_name = request.json['first_name']
middle_name = request.json['middle_name']
position = request.json['position']
with app.app_context():
new_employee = Employee(last_name, first_name, middle_name, position)
db.session.add(new_employee)
db.session.commit()
return employee_schema.jsonify(new_employee)
# Получение всех сотрудников
@app.route('/employees', methods=['GET'])
def get_employees():
with app.app_context():
all_employees = Employee.query.all()
result = employees_schema.dump(all_employees)
return jsonify(result)
# Получение информации о конкретном сотруднике по его идентификатору
@app.route('/employees/<id>', methods=['GET'])
def get_employee(id):
with app.app_context():
employee = Employee.query.get(id)
return employee_schema.jsonify(employee)
# Обновление информации о сотруднике
@app.route('/employees/<id>', methods=['PUT'])
def update_employee(id):
with app.app_context():
employee = Employee.query.get(id)
last_name = request.json['last_name']
first_name = request.json['first_name']
middle_name = request.json['middle_name']
position = request.json['position']
employee.last_name = last_name
employee.first_name = first_name
employee.middle_name = middle_name
employee.position = position
db.session.commit()
return employee_schema.jsonify(employee)
# Удаление сотрудника
@app.route('/employees/<id>', methods=['DELETE'])
def delete_employee(id):
with app.app_context():
employee = Employee.query.get(id)
db.session.delete(employee)
db.session.commit()
return employee_schema.jsonify(employee)
if __name__ == '__main__':
with app.app_context():
db.create_all()
app.run() | InKarno27/CRUD_on_Python | main.py | main.py | py | 3,267 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_marshmallow.Marshmallow",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "u... |
22861904779 | import pandas as pd
import datetime as dt
from sodapy import Socrata
from airflow.hooks.base_hook import BaseHook
class Extract:
chicago_crime_portal = "https://data.cityofchicago.org/resource/ijzp-q8t2.json"
def __init__(self, start_time , end_time ) -> None:
self.start_time = start_time
self.end_time = end_time
self.client = Socrata("data.cityofchicago.org", app_token=BaseHook.get_connection("CITY_OF_CHICAGO_APP_TOKEN").password)
# Get all the updates in the last week.
self.updated_on_filter = "updated_on >= '"+ start_time +"' and updated_on < '"+ end_time +"'"
def execute_extraction(self) -> pd.DataFrame:
crimes = self.client.get_all("ijzp-q8t2",where = self.updated_on_filter)
crime_df = pd.DataFrame.from_records(crimes)
print(crime_df.head(5))
print(crime_df.columns)
return crime_df
if __name__=="__main__":
extract = Extract(dt.datetime.now()+dt.timedelta(days=-3))
data = extract.execute_extraction()
print(data)
| prabha-git/airflow | dags/chicago_crime_etl/extract.py | extract.py | py | 1,041 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sodapy.Socrata",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "airflow.hooks.base_hook.BaseHook.get_connection",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "airflow.hooks.base_hook.BaseHook",
"line_number": 12,
"usage_type": "name"... |
39783045715 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="diss-iamhectorotero",
version="0.0.1",
author="Hector Otero",
author_email="7hector2@gmail.com",
description="A package to train RNN in physical microworlds in a supervised or RL manner",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/iamhectorotero/diss",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.5.6',
)
| iamhectorotero/learning-physical-properties-with-rnns | libraries/setup.py | setup.py | py | 702 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 15,
"usage_type": "call"
}
] |
10358617907 | from __future__ import annotations
import math
import os
import pygame
from pgbot import emotion
EMOTIONS_PER_ROW = 2
NEGATIVE_EMOTIONS = {"bored": "exhausted", "happy": "sad"}
EMOTION_COLORS = {
"happy": (230, 28, 226),
"sad": (28, 28, 230),
"anger": (230, 36, 18),
"bored": (230, 181, 18),
"exhausted": (235, 127, 19),
"confused": (19, 235, 228),
}
def get_emotion_desc_dict(emotions: dict[str, int]):
"""
Get emotion description dict from emotion dict
"""
return {
"happy": {
"msg": "I feel... happi!\n"
"While I am happi, I'll make more dad jokes (Spot the dad joke in there?)\n"
"However, don't bonk me or say 'ded chat', as that would make me sad.\n"
f"*The snek's happiness level is `{emotions.get('happy', '0')}`, "
"don't let it go to zero!*",
"emoji_link": "https://cdn.discordapp.com/emojis/837389387024957440.png?v=1",
},
"sad": {
"msg": "I'm sad...\n"
"I don't feel like making any jokes. This is your fault, "
"**don't make me sad.**\nPet me pls :3\n"
f"*The snek's sadness level is `{-emotions.get('happy', 0)}`, play with "
"it to cheer it up*",
"emoji_link": "https://cdn.discordapp.com/emojis/824721451735056394.png?v=1",
},
"exhausted": {
"msg": "I'm exhausted. \nI ran too many commands, "
"so I'll be resting for a while..\n"
"Don't try to make me run commands for now, I'll most likely "
"just ignore it..\n"
f"*The snek's exhaustion level is `{-emotions.get('bored', 0)}`. "
"To make its exhaustion go down, let it rest for a bit.*",
"emoji_link": None,
},
"bored": {
"msg": "I'm booooooooored...\nNo one is spending time with me, "
"and I feel lonely :pg_depressed:\n"
f"*The snek's boredom level is `{emotions.get('bored', '0')}`, run "
"more command(s) to improve its mood.*",
"emoji_link": "https://cdn.discordapp.com/emojis/823502668500172811.png?v=1",
},
"confused": {
"msg": "I'm confused!\nEither there were too many exceptions in my code, "
"or too many commands were used wrongly!\n"
f"*The snek's confusion level is `{emotions.get('confused', '0')}`, "
"to lower its level of confusion, use proper command syntax.*",
"emoji_link": "https://cdn.discordapp.com/emojis/837402289709907978.png?v=1",
},
"anger": {
"msg": "I'm angry!\nI've been bonked too many times, you'd be "
"angry too if someone bonked you 50+ times :unamused:\n"
"No jokes, no quotes. :pg_angry:. Don't you dare pet me!\n"
f"*The snek's anger level is `{emotions.get('anger', '0')}`, "
"ask for its forgiveness to calm it down.*",
"emoji_link": "https://cdn.discordapp.com/emojis/779775305224159232.gif?v=1",
"override_emotion": "anger",
},
}
def generate_pie_slice(
center_x: int, center_y: int, radius: int, start_angle: int, end_angle: int
):
"""
Generate slice of the pie in the output
"""
p = [(center_x, center_y)]
# cover a bit more angle so that the boundaries are fully covered
for angle in range(start_angle - 91, end_angle - 89):
x = center_x + int(radius * math.cos(math.radians(angle)))
y = center_y + int(radius * math.sin(math.radians(angle)))
p.append((x, y))
return p
def get_emotion_percentage(emotions: dict[str, int], round_by: int = 1):
"""
Express emotions in terms of percentages, split complementary emotions into
their own emotions
"""
raw_emotion_percentage = {}
for key, value in emotions.items():
percentage = value / emotion.EMOTION_CAPS[key][1] * 100
if percentage < 0:
percentage = -percentage
key = NEGATIVE_EMOTIONS[key]
raw_emotion_percentage[key] = percentage
sum_of_emotions = sum([i for i in raw_emotion_percentage.values()])
emotion_percentage = {
key: round(raw_emotion / sum_of_emotions * 100, round_by)
if round_by != -1
else raw_emotion / sum_of_emotions * 100
for key, raw_emotion in sorted(
raw_emotion_percentage.items(), key=lambda item: item[1], reverse=True
)
}
return emotion_percentage
def emotion_pie_chart(emotions: dict[str, int], pie_radius: int):
"""
Generates a pie chart, given emotions and pie radius
Emotions must be in "raw form", like
{"happy": 34, "bored": -35, "anger": 89, "confused": 499}
"""
font = pygame.font.Font(os.path.join("assets", "tahoma.ttf"), 30)
font.bold = True
image = pygame.Surface((pie_radius * 2, pie_radius * 2 + 30 * len(emotions)))
image.fill((0, 0, 0, 0))
emotion_percentage = get_emotion_percentage(emotions)
emotion_pie_angle = {
key: percentage / 100 * 360 for key, percentage in emotion_percentage.items()
}
start_angle = 0
for key, angle in emotion_pie_angle.items():
if round(angle) != 0:
pygame.draw.polygon(
image,
EMOTION_COLORS[key],
generate_pie_slice(
pie_radius,
pie_radius,
pie_radius,
start_angle,
start_angle + round(angle),
),
)
start_angle += round(angle)
pygame.draw.circle(
image, (255, 255, 255), (pie_radius, pie_radius), pie_radius, width=10
)
i = 0
txt_x = 0
txt_y = pie_radius * 2
for bot_emotion, percentage in emotion_percentage.items():
txt = font.render(
f"{bot_emotion.title()} - {percentage}%", True, EMOTION_COLORS[bot_emotion]
)
txt_rect = txt.get_rect(topleft=(txt_x, txt_y))
image.blit(txt, txt_rect)
pygame.draw.rect(
image,
EMOTION_COLORS[bot_emotion],
(int(txt_x + pie_radius * 1.8 / EMOTIONS_PER_ROW), txt_y, 20, 40),
)
if i % EMOTIONS_PER_ROW != EMOTIONS_PER_ROW - 1:
txt_x += pie_radius * 2 / EMOTIONS_PER_ROW
else:
txt_x = 0
txt_y += 40
i += 1
return image
| gresm/PygameCommunityBot | pgbot/commands/utils/vibecheck.py | vibecheck.py | py | 6,443 | python | en | code | null | github-code | 36 | [
{
"api_name": "math.cos",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "math.radians",
"line_number": 89,
... |
19029066252 | import logging
import requests
import time
import json
from cifsdk.exceptions import AuthError, TimeoutError, NotFound, SubmissionFailed, InvalidSearch, CIFBusy
from cifsdk.constants import VERSION, PYVERSION, TOKEN
from pprint import pprint
from base64 import b64decode
from cifsdk.client.plugin import Client
import os
import zlib
from time import sleep
import random
if PYVERSION == 3:
basestring = (str, bytes)
requests.packages.urllib3.disable_warnings()
TRACE = os.environ.get('CIFSDK_CLIENT_HTTP_TRACE')
TIMEOUT = os.getenv('CIFSDK_CLIENT_HTTP_TIMEOUT', 120)
RETRIES = os.getenv('CIFSDK_CLIENT_HTTP_RETRIES', 5)
RETRIES_DELAY = os.getenv('CIFSDK_CLIENT_HTTP_RETRIES_DELAY', '30,60')
s, e = RETRIES_DELAY.split(',')
RETRIES_DELAY = random.uniform(int(s), int(e))
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.ERROR)
if TRACE:
logger.setLevel(logging.DEBUG)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.DEBUG)
class HTTP(Client):
def __init__(self, remote, token=TOKEN, proxy=None, timeout=int(TIMEOUT), verify_ssl=True, **kwargs):
super(HTTP, self).__init__(remote, token, **kwargs)
self.proxy = proxy
self.timeout = timeout
self.verify_ssl = verify_ssl
self.nowait = kwargs.get('nowait', False)
self.session = requests.Session()
self.session.headers["Accept"] = 'application/vnd.cif.v3+json'
self.session.headers['User-Agent'] = 'cifsdk-py/{}'.format(VERSION)
self.session.headers['Authorization'] = 'Token token=' + self.token
self.session.headers['Content-Type'] = 'application/json'
self.session.headers['Accept-Encoding'] = 'deflate'
def _check_status(self, resp, expect=200):
if resp.status_code == 400:
r = json.loads(resp.text)
raise InvalidSearch(r['message'])
if resp.status_code == 401:
raise AuthError('unauthorized')
if resp.status_code == 404:
raise NotFound('not found')
if resp.status_code == 408:
raise TimeoutError('timeout')
if resp.status_code == 422:
msg = json.loads(resp.text)
raise SubmissionFailed(msg['message'])
if resp.status_code == 429:
raise CIFBusy('RateLimit exceeded')
if resp.status_code in [500, 501, 502, 503, 504]:
raise CIFBusy('system seems busy..')
if resp.status_code != expect:
msg = 'unknown: %s' % resp.content
raise RuntimeError(msg)
def _get(self, uri, params={}, retry=True):
if not uri.startswith('http'):
uri = self.remote + uri
resp = self.session.get(uri, params=params, verify=self.verify_ssl, timeout=self.timeout)
n = RETRIES
try:
self._check_status(resp, expect=200)
n = 0
except Exception as e:
if resp.status_code == 429 or resp.status_code in [500, 501, 502, 503, 504]:
logger.error(e)
else:
raise e
while n != 0:
logger.warning('setting random retry interval to spread out the load')
logger.warning('retrying in %.00fs' % RETRIES_DELAY)
sleep(RETRIES_DELAY)
resp = self.session.get(uri, params=params, verify=self.verify_ssl, timeout=self.timeout)
if resp.status_code == 200:
break
if n == 0:
raise CIFBusy('system seems busy.. try again later')
data = resp.content
s = (int(resp.headers['Content-Length']) / 1024 / 1024)
logger.info('processing %.2f megs' % s)
msgs = json.loads(data.decode('utf-8'))
if msgs.get('data') and msgs['data'] == '{}':
msgs['data'] = []
if msgs.get('data') and isinstance(msgs['data'], basestring) and msgs['data'].startswith('{"hits":{"hits":[{"_source":'):
msgs['data'] = json.loads(msgs['data'])
msgs['data'] = [r['_source'] for r in msgs['data']['hits']['hits']]
if not msgs.get('status') and not msgs.get('message') == 'success':
raise RuntimeError(msgs)
if msgs.get('status') and msgs['status'] == 'failed':
raise InvalidSearch(msgs['message'])
if isinstance(msgs.get('data'), list):
for m in msgs['data']:
if m.get('message'):
try:
m['message'] = b64decode(m['message'])
except Exception as e:
pass
return msgs
def _post(self, uri, data):
if type(data) == dict:
data = json.dumps(data)
if self.nowait:
uri = '{}?nowait=1'.format(uri)
if isinstance(data, str):
data = data.encode('utf-8')
data = zlib.compress(data)
headers = {
'Content-Encoding': 'deflate'
}
resp = self.session.post(uri, data=data, verify=self.verify_ssl, headers=headers, timeout=self.timeout)
logger.debug(resp.content)
n = RETRIES
try:
self._check_status(resp, expect=201)
n = 0
except Exception as e:
if resp.status_code == 429 or resp.status_code in [500, 501, 502, 503, 504]:
logger.error(e)
else:
raise e
while n != 0:
logger.info('setting random retry interval to spread out the load')
logger.info('retrying in %.00fs' % RETRIES_DELAY)
sleep(RETRIES_DELAY)
resp = self.session.post(uri, data=data, verify=self.verify_ssl, headers=headers, timeout=self.timeout)
if resp.status_code in [200, 201]:
break
if n == 0:
raise CIFBusy('system seems busy.. try again later')
return json.loads(resp.content.decode('utf-8'))
def _delete(self, uri, params={}):
params = {f: params[f] for f in params if params.get(f)}
if params.get('nolog'):
del params['nolog']
if params.get('limit'):
del params['limit']
resp = self.session.delete(uri, data=json.dumps(params), verify=self.verify_ssl, timeout=self.timeout)
self._check_status(resp)
return json.loads(resp.content.decode('utf-8'))
def _patch(self, uri, data):
resp = self.session.patch(uri, data=json.dumps(data), verify=self.verify_ssl, timeout=self.timeout)
self._check_status(resp)
return json.loads(resp.content.decode('utf-8'))
def indicators_search(self, filters):
rv = self._get('/search', params=filters)
return rv['data']
def indicators_create(self, data):
data = str(data).encode('utf-8')
uri = "{0}/indicators".format(self.remote)
logger.debug(uri)
rv = self._post(uri, data)
return rv["data"]
def indicators_delete(self, filters):
uri = "{0}/indicators".format(self.remote)
logger.debug(uri)
rv = self._delete(uri, params=filters)
return rv["data"]
def feed(self, filters):
rv = self._get('/feed', params=filters)
return rv['data']
def ping(self, write=False):
t0 = time.time()
uri = '/ping'
if write:
uri = '/ping?write=1'
rv = self._get(uri)
if rv:
rv = (time.time() - t0)
logger.debug('return time: %.15f' % rv)
return rv
def tokens_search(self, filters):
rv = self._get('{}/tokens'.format(self.remote), params=filters)
return rv['data']
def tokens_delete(self, data):
rv = self._delete('{}/tokens'.format(self.remote), data)
return rv['data']
def tokens_create(self, data):
logger.debug(data)
rv = self._post('{}/tokens'.format(self.remote), data)
return rv['data']
def tokens_edit(self, data):
rv = self._patch('{}/tokens'.format(self.remote), data)
return rv['data']
Plugin = HTTP
| csirtgadgets/cifsdk-py-v3 | cifsdk/client/http.py | http.py | py | 8,165 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "cifsdk.constants.PYVERSION",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "requests.packages.urllib3.disable_warnings",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "requests.packages",
"line_number": 19,
"usage_type": "attribute"
... |
73349162343 | import pygame
pygame.init()
screen = pygame.display.set_mode((720, 480))
clock = pygame.time.Clock()
FPS = 60
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
ballsurface = pygame.Surface((50,50))
ballsurface.set_colorkey((0,0,0))
pygame.draw.circle(ballsurface, (255,0,0), (25,25),25)
ballsurface = ballsurface.convert_alpha()
ballrect = ballsurface.get_rect()
while True:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
if ballrect.y >= 20:
ballrect.move_ip(0, -20)
elif event.key == pygame.K_DOWN:
if ballrect.y <= screen.get_size()[1] - 80:
ballrect.move_ip(0, 20)
elif event.key == pygame.K_LEFT:
if ballrect.x >= 20 :
ballrect.move_ip(-20, 0)
elif event.key == pygame.K_RIGHT:
if ballrect.x <= screen.get_size()[0] -80:
ballrect.move_ip(20, 0)
screen.fill((255,255,255))
screen.blit(ballsurface,ballrect)
pygame.display.update() # Or pygame.display.flip()
| Tevvur/lab8 | lab8(1).py | lab8(1).py | py | 1,257 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Cloc... |
5953172719 | try:
from grove import grove_temperature_humidity_aht20
from grove.adc import ADC
from gpiozero import DigitalOutputDevice
import chainable_rgb_direct
except:
grove_temperature_humidity_aht20 = None
ADC = None
DigitalOutputDevice = None
chainable_rgb_direct = None
import random
class Plant:
def __init__(self):
if grove_temperature_humidity_aht20 != None and DigitalOutputDevice != None and ADC != None and chainable_rgb_direct != None:
self.i2c = grove_temperature_humidity_aht20.GroveTemperatureHumidityAHT20(
bus=4
)
self.fan_device = DigitalOutputDevice(18)
self.adc = ADC()
self.light_device = chainable_rgb_direct.rgb_led(2)
self.dummy_fan = False
self.dummy_light = False
def __str__(self):
return 'plant'
def temp_humi(self, *args):
if args[1] == True:
return {'temperature': random.uniform(10,30), 'humidity': random.uniform(0,200)}
temp, humi = self.i2c.read()
return {'temperature': temp, 'humidity': humi}
def water(self, *args):
if args[1] == True:
return {'water': random.uniform(0,1000)}
return {'water': self.adc.read_voltage(4)}
def moisture(self, *args):
if args[1] == True:
return {'moisture': random.uniform(0,200)}
return {'moisture': self.adc.read_voltage(2)}
def fan(self, *args):
if args[1] == True:
if args[0] != None:
self.dummy_fan = ('off', 'on')[args[0].lower()]
return {'fan': ('off', 'on')[self.dummy_fan]}
if args[0] != None:
new_state = args[0].lower()
if new_state == 'on':
self.fan_device.on()
elif new_state == 'off':
self.fan_device.off()
else:
raise ValueError(
f"Invalid fan state {new_state}. Must be 'on' or 'off'"
)
return {'fan': ('off', 'on')[self.fan_device.is_active]}
def light(self, *args):
if args[1] == True:
if args[0] != None:
self.dummy_light = ('off', 'on')[args[0].lower()]
return {'light': ('off', 'on')[self.dummy_light]}
if args[0] != None:
new_state = args[0].lower()
if new_state == 'on':
self.light_device.setOneLED(255, 255, 255, 0)
self.light_device.setOneLED(255, 255, 255, 1)
elif new_state == 'off':
self.light_device.setOneLED(0, 0, 0, 0)
self.light_device.setOneLED(0, 0, 0, 1)
else:
raise ValueError(
f"Invalid light state {new_state}. Must be 'on' or 'off'"
)
return {
'light':
('off', 'on')[0 not in (
self.light_device.r_all[0],
self.light_device.r_all[1],
self.light_device.g_all[0],
self.light_device.g_all[1],
self.light_device.b_all[0],
self.light_device.b_all[1]
)]
}
| Xermax3/ContainerFarm | Hardware/plant.py | plant.py | py | 2,790 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "grove.grove_temperature_humidity_aht20",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "grove.adc.ADC",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "gpiozero.DigitalOutputDevice",
"line_number": 9,
"usage_type": "name"
},
{
"ap... |
74330392745 | # How to detect specific color inside python
# from cv2 import getTrackbarPos
import numpy as np
import cv2 as cv
# img=cv.imread("resources/image.png")
# Convert in HSV (Hue, Saturation, Value)
# hue_img=cv.cvtColor(img,cv.COLOR_BGR2HSV)#rang barangi sakal my chla gye ga jb show kary gye
def slider():
pass
path = "resources/image.png"
# new img or new window bnaye gye essy
cv.namedWindow("Bars")
cv.resizeWindow("Bars", 900, 300)
# Track Bar
# cv.createTrackbar("Hue","Bars",0,179,slider) yaha sy start krna hai slider bnana
cv.createTrackbar("Hue min", "Bars", 0, 179, slider)
cv.createTrackbar("Hue max", "Bars", 179, 179, slider)
cv.createTrackbar("Sat min", "Bars", 0, 255, slider)
cv.createTrackbar("Sat max", "Bars", 255, 255, slider)
cv.createTrackbar("Val min", "Bars", 0, 255, slider)
cv.createTrackbar("Val max", "Bars", 255, 255, slider)
img = cv.imread(path)
hsv_img = cv.cvtColor(img, cv.COLOR_BGR2HSV)
# hue_min=getTrackbarPos("Hue min","Bars")
# print(hue_min)
while True:
img = cv.imread(path)
hsv_img = cv.cvtColor(img, cv.COLOR_BGR2HSV)
hue_min = cv.getTrackbarPos("Hue min", "Bars")
hue_max = cv.getTrackbarPos("Hue max", "Bars")
sat_min = cv.getTrackbarPos("Sat min", "Bars")
sat_max = cv.getTrackbarPos("Sat max", "Bars")
val_min = cv.getTrackbarPos("Val min", "Bars")
val_max = cv.getTrackbarPos("Val max", "Bars")
print(hue_min,hue_max,sat_min,sat_max,val_min,val_max)
# To see these changes inside an image
lower=np.array([hue_min,sat_min,val_min])
upper=np.array([hue_max,sat_max,val_max])
# image mask
mask_img=cv.inRange(hsv_img,lower,upper)
out_img=cv.bitwise_and(img,img,mask=mask_img)
cv.imshow("original",img)
cv.imshow("HSV",hsv_img)
cv.imshow("Mask",mask_img)
cv.imshow("Final Output",out_img)
if cv.waitKey(1) & 0xff ==ord('q'):
break
cv.destroyAllWindows()
| amirasghar123/Opencv-vision | 20_chptr.py | 20_chptr.py | py | 1,970 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.namedWindow",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.resizeWindow",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.createTrackbar",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.createTrackba... |
3460979087 | import pickle
import os
from bs4 import BeautifulSoup
from numpy import vectorize
import spacy
import unidecode
from word2number import w2n
import os
import pickle
#import contractions
nlp = spacy.load('en_core_web_lg')
# exclude words from spacy stopwords list
deselect_stop_words = ['no', 'not']
for w in deselect_stop_words:
nlp.vocab[w].is_stop = False
def strip_html_tags(text):
soup = BeautifulSoup(text, "html.parser")
stripped_text = soup.get_text(separator=" ")
return stripped_text
def remove_whitespace(text):
text = text.strip()
return " ".join(text.split())
def remove_accented_chars(text):
text = unidecode.unidecode(text)
return text
def expand_contractions(text):
text = contractions.fix(text)
return text
def text_preprocessing(text, accented_chars=True, contractions=True,
convert_num=True, extra_whitespace=True,
lemmatization=True, lowercase=True, punctuations=True,
remove_html=True, remove_num=True, special_chars=True,
stop_words=True):
if remove_html == True:
text = strip_html_tags(text)
if extra_whitespace == True:
text = remove_whitespace(text)
if accented_chars == True:
text = remove_accented_chars(text)
if contractions == True:
text = expand_contractions(text)
if lowercase == True:
text = text.lower()
doc = nlp(text)
clean_text = []
for token in doc:
flag = True
edit = token.text
if stop_words == True and token.is_stop and token.pos_ != 'NUM':
flag = False
if punctuations == True and token.pos_ == 'PUNCT' and flag == True:
flag = False
if special_chars == True and token.pos_ == 'SYM' and flag == True:
flag = False
if remove_num == True and (token.pos_ == 'NUM' or token.text.isnumeric()) \
and flag == True:
flag = False
if convert_num == True and token.pos_ == 'NUM' and flag == True:
edit = w2n.word_to_num(token.text)
elif lemmatization == True and token.lemma_ != "-PRON-" and flag == True:
edit = token.lemma_
if edit != "" and flag == True:
clean_text.append(edit)
return clean_text
def fix(PickleFile):
Path=r"C:\Users\moham\Desktop\info\abstracts"
Path_1=r"C:\Users\moham\Desktop\info\Treated_Abstracts"
os.makedirs(Path_1,exist_ok=True)
file=PickleFile.replace(".pkl",".txt")
if True :
f=open(os.path.join(Path,file),"r",encoding="UTF-8")
lines=f.readlines()
text=""
for line in lines :
text=text+" "+line
f_1=open(os.path.join(Path_1,file[:file.find(".txt")]+".pkl"),"wb")
pickle.dump(text_preprocessing(text), f_1)
f_1.close()
f.close()
Path=r"C:\Users\moham\Desktop\info\Treated_Abstracts"
vocab_to_int=dict()
max=0
for PickleFile in os.listdir(Path) :
with open(os.path.join(Path,PickleFile), 'rb') as f:
try:
loaded_text = pickle.load(f)
except :
fix(PickleFile)
loaded_text = pickle.load(f)
for mot in loaded_text :
if ((mot in vocab_to_int)==False):
vocab_to_int[mot]=1
else :
vocab_to_int[mot]+=1
T=[]
T_1=[]
s=0
for index,value in vocab_to_int.items():
T_1.append([index,value])
if value<10:
print (index)
s=s+1
else :
T.append([index,value])
T=sorted(T,key=lambda x:x[1],reverse=True)
print(s)
vocab_to_int=dict()
for i in range(len(T)):
vocab_to_int[T[i][0]]=i
int_to_vocab=dict()
for index,value in vocab_to_int.items():
int_to_vocab[value]=index
print(len(int_to_vocab),len(vocab_to_int))
with open(r"C:\Users\moham\Desktop\info\test\vocab_to_int.pkl", 'wb') as f:
pickle.dump(vocab_to_int, f)
with open(r"C:\Users\moham\Desktop\info\test\int_to_vocab.pkl", 'wb') as f:
pickle.dump(int_to_vocab, f)
with open(r"C:\Users\moham\Desktop\info\test\ifIchangemymind.pkl", 'wb') as f:
pickle.dump(T_1, f)
| abde0103/H-index-prediction | Code/PreprocessingAndGetIndices.py | PreprocessingAndGetIndices.py | py | 4,195 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "spacy.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "unidecode.unidecode",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "word2number.w2n.word... |
13990583368 | """
While the previous DFS approach works, it can potentially traverse long
sections of the graph without a real need. In order to improve further,
we can leverage the previously ignored properties of the similar relation: it
is an equivalence relation.
Equivalence relations bring a natural partition to the set, where in our
particular case each partition contains strings similar to each other. If we
model each partition as a disjoin-set, then we could optimize the
transitivity check: if two strings belong to same disjoint set, then they are
similar.
Knowing two strings belong to same disjoint-set, means retrieving the
representative for each one, and comparing those representatives. Retrieving
each representative, in a disjoint-set forests implementation, can be done in
O(log(n)). This should be better than the linear complexity from DFS.
The disjoint-set forest was taken from Cormen book.
NOTE: Despite theoretical advantages mentioned, this algorithm actually is
slower than the DFS one. Perhaps the paths to parent are too long (unbalanced
trees), or perhaps the path-compression is worth it only if you query more
times.
"""
from itertools import izip
class DisjointSet:
def __init__(self, val):
self.p = self
self.rank = 0
def make_set(x):
return DisjointSet(x)
def union(x, y):
link(find_set(x), find_set(y))
def link(x, y):
if x.rank > y.rank:
y.p = x
else:
x.p = y
if x.rank == y.rank:
y.rank += 1
def find_set(x):
if x != x.p:
x.p = find_set(x.p)
return x.p
class Solution(object):
def create_forest(self, pairs):
forest = dict()
for x, y in pairs:
if x not in forest:
forest[x] = make_set(x)
if y not in forest:
forest[y] = make_set(y)
union(forest[x], forest[y])
return forest
def are_similar(self, w1, w2, forest):
# reflexivity
if w1 == w2:
return True
# they can't be similar if one is not in forest
elif w1 not in forest or w2 not in forest:
return False
# symmetry & transitivity
elif find_set(forest[w1]) == find_set(forest[w2]):
return True
else:
return False
def areSentencesSimilarTwo(self, words1, words2, pairs):
if len(words1) != len(words2):
return False
forest = self.create_forest(pairs)
for w1, w2 in izip(words1, words2):
if not self.are_similar(w1, w2, forest):
return False
return True
| dariomx/topcoder-srm | leetcode/zero-pass/google/sentence-similarity-ii/Solution1.py | Solution1.py | py | 2,622 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.izip",
"line_number": 87,
"usage_type": "call"
}
] |
27786608731 | # -*- coding: utf-8 -*-
# * Credits:
# *
# * original Audio Profiles code by Regss
# * updates and additions through v1.4.1 by notoco and CtrlGy
# * updates and additions since v1.4.2 by pkscout
import xbmc
import json
import os
import sys
from resources.lib.fileops import *
from resources.lib.xlogger import Logger
from resources.lib.apsettings import loadSettings
from resources.lib.approfiles import Profiles
def _upgrade():
settings = loadSettings()
if settings['version_upgrade'] != settings['ADDONVERSION']:
settings['ADDON'].setSetting(
'version_upgrade', settings['ADDONVERSION'])
class apManual:
def __init__(self):
"""Runs the audio profiler switcher manually."""
settings = loadSettings()
lw = Logger(preamble='[Audio Profiles]', logdebug=settings['debug'])
lw.log(['script version %s started' %
settings['ADDONVERSION']], xbmc.LOGINFO)
lw.log(['debug logging set to %s' % settings['debug']], xbmc.LOGINFO)
lw.log(['SYS.ARGV: %s' % str(sys.argv)])
lw.log(['loaded settings', settings])
profiles = Profiles(settings, lw)
try:
mode = sys.argv[1]
except IndexError:
mode = False
lw.log(['MODE: %s' % str(mode)])
profiles.changeProfile(mode)
lw.log(['script version %s stopped' %
settings['ADDONVERSION']], xbmc.LOGINFO)
class apMonitor(xbmc.Monitor):
def __init__(self):
"""Starts the background process for automatic audio profile switching."""
xbmc.Monitor.__init__(self)
_upgrade()
self._init_vars()
self.LW.log(['background monitor version %s started' %
self.SETTINGS['ADDONVERSION']], xbmc.LOGINFO)
self.LW.log(['debug logging set to %s' %
self.SETTINGS['debug']], xbmc.LOGINFO)
self._change_profile(
self.SETTINGS['auto_default'], forceload=self.SETTINGS['force_auto_default'])
while not self.abortRequested():
if self.waitForAbort(10):
break
self.LW.log(['background monitor version %s stopped' %
self.SETTINGS['ADDONVERSION']], xbmc.LOGINFO)
def onNotification(self, sender, method, data):
data = json.loads(data)
if 'System.OnWake' in method:
self.LW.log(['MONITOR METHOD: %s DATA: %s' %
(str(method), str(data))])
self._change_profile(self.SETTINGS['auto_default'])
if 'Player.OnStop' in method:
self.LW.log(['MONITOR METHOD: %s DATA: %s' %
(str(method), str(data))])
self.waitForAbort(1)
if not self.KODIPLAYER.isPlaying():
self._change_profile(self.SETTINGS['auto_gui'])
if 'Player.OnPlay' in method:
self.LW.log(['MONITOR METHOD: %s DATA: %s' %
(str(method), str(data))])
self._auto_switch(data)
def onSettingsChanged(self):
self._init_vars()
def _init_vars(self):
self.SETTINGS = loadSettings()
self.PROFILESLIST = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
# this only includes mappings we are 100% sure are accurate every time
self.MAPTYPE = {'video': 'auto_videos', 'episode': 'auto_tvshows',
'musicvideo': 'auto_musicvideo', 'song': 'auto_music'}
self.LW = Logger(
preamble='[Audio Profiles Service]', logdebug=self.SETTINGS['debug'])
self.PROFILES = Profiles(self.SETTINGS, self.LW, auto=True)
self.KODIPLAYER = xbmc.Player()
self.LW.log(['the settings are:', self.SETTINGS])
self.LW.log(['initialized variables'])
def _auto_switch(self, data):
if self.SETTINGS['player_show']:
self.LW.log(['showing select menu'])
if self.PROFILES.changeProfile('popup') is not None:
self.LW.log(['option selected, returning'])
return
self.LW.log(
['select menu timed out or was closed with no selection - continuing to auto select'])
content_autoswitch = self._auto_switch_content(data)
self.LW.log(['got a content autoswitch of %s' % content_autoswitch])
if content_autoswitch not in ['auto_music', 'auto_pvr_radio']:
codec_setting, channels_setting = self._auto_switch_stream()
if codec_setting != '0':
the_setting = codec_setting
self.LW.log(['using the codec setting of %s' % the_setting])
elif channels_setting != '0':
the_setting = channels_setting
self.LW.log(['using the channels setting of %s' % the_setting])
elif self.SETTINGS['aggressive_music_match'] and codec_setting == '0' and channels_setting == '0' and content_autoswitch == 'auto_unknown':
the_setting = self.SETTINGS['auto_music']
self.LW.log(
['stream does not seem to be video, using the auto_music setting of %s' % the_setting])
else:
the_setting = self.SETTINGS[content_autoswitch]
self.LW.log(['using the content setting of %s' % the_setting])
else:
the_setting = self.SETTINGS[content_autoswitch]
self.LW.log(['using the content setting of %s' % the_setting])
self._change_profile(the_setting)
def _auto_switch_stream(self):
if self.SETTINGS['codec_delay'] > 0:
self.LW.log(['waiting %s seconds before trying to get stream details' % str(
self.SETTINGS['codec_delay'])])
self.waitForAbort(self.SETTINGS['codec_delay'])
response = xbmc.executeJSONRPC(
'{"jsonrpc":"2.0", "method":"Player.GetProperties", "params":{"playerid":1, "properties":["currentaudiostream"]}, "id":1}')
r_dict = json.loads(response)
self.LW.log(['got back audio stream data of:', r_dict])
try:
codec = r_dict['result']['currentaudiostream']['codec']
except (IndexError, KeyError, ValueError, TypeError):
codec = None
try:
channels = r_dict['result']['currentaudiostream']['channels']
except (IndexError, KeyError, ValueError, TypeError):
channels = None
self.LW.log(['got %s for the codec and %s for the channels' %
(str(codec), str(channels))])
if codec:
codec_set = 'auto_othercodec'
for check_codec in ['dtshd', 'truehd', 'ac3', 'eac3', 'dts', 'dca']:
self.LW.log(['checking %s against %s' % (codec, check_codec)])
if codec.startswith(check_codec):
if check_codec == 'dca':
check_codec = 'dts'
codec_set = 'auto_%s' % check_codec
break
else:
codec_set = 'none'
try:
codec_setting = self.SETTINGS[codec_set]
except KeyError:
codec_setting = '0'
if channels:
if channels > 2:
channels_set = 'auto_multichannel'
else:
channels_set = 'auto_stereo'
else:
channels_set = 'none'
try:
channels_setting = self.SETTINGS[channels_set]
except KeyError:
channels_setting = '0'
self.LW.log(['got codec set of %s and channels set of %s' %
(codec_set, channels_set)])
self.LW.log(['sending back codec setting of %s and channel setting of %s' % (
codec_setting, channels_setting)])
return codec_setting, channels_setting
def _auto_switch_content(self, data):
try:
thetype = data['item']['type']
except KeyError:
self.LW.log(
['data did not include valid item and/or type for playing media - aborting'])
return
self.LW.log(['the type is: %s' % thetype])
theset = self.MAPTYPE.get(thetype)
if not theset:
if thetype == 'movie':
# if video is a PVR recording assign to auto_pvr_tv
if self._check_playing_file('pvr://'):
theset = 'auto_pvr_tv'
# if video is not from library assign to auto_videos
elif 'id' not in data['item']:
theset = 'auto_videos'
# it must actually be a movie
else:
theset = 'auto_movies'
# distinguish pvr TV and pvr RADIO
elif 'channel' in thetype and 'channeltype' in data['item']:
if 'tv' in data['item']['channeltype']:
theset = 'auto_pvr_tv'
elif 'radio' in data['item']['channeltype']:
theset = 'auto_pvr_radio'
else:
theset = 'auto_unknown'
# detect cdda that kodi return as unknown
elif thetype == 'unknown':
if self._check_playing_file('cdda://'):
theset = 'auto_music'
else:
theset = 'auto_unknown'
else:
theset = 'auto_unknown'
self.LW.log(['got %s from the content auto switch' % theset])
return theset
def _change_profile(self, profile, forceload=False):
if profile in self.PROFILESLIST:
last_profile = self._get_last_profile()
self.LW.log(
['Last loaded profile: %s To switch profile: %s' % (last_profile, profile)])
if last_profile != profile or forceload:
self.PROFILES.changeProfile(profile)
else:
self.LW.log(['Same profile - profiles not switched'])
elif profile == str(len(self.PROFILESLIST) + 1):
self.LW.log(
['this auto switch setting is set to show the select menu - showing menu'])
self.PROFILES.changeProfile('popup')
def _check_playing_file(self, thestr):
try:
thefile = self.KODIPLAYER.getPlayingFile()
except RuntimeError:
self.LW.log(['error trying to get playing file from Kodi'])
return False
self.LW.log(['the playing file is: %s' % thefile])
return thefile.startswith(thestr)
def _get_last_profile(self):
loglines, profile = readFile(os.path.join(
self.SETTINGS['ADDONDATAPATH'], 'profile'))
self.LW.log(loglines)
if profile in self.PROFILESLIST:
return profile
else:
return ''
| pkscout/script.audio.profiles | resources/lib/audioprofiles.py | audioprofiles.py | py | 10,698 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "resources.lib.apsettings.loadSettings",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "resources.lib.apsettings.loadSettings",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "resources.lib.xlogger.Logger",
"line_number": 30,
"usage_type... |
74267315625 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""linebot.models.limit module."""
from abc import ABCMeta
from future.utils import with_metaclass
from .base import Base
class Recipient(with_metaclass(ABCMeta, Base)):
"""Recipient.
https://developers.line.biz/en/reference/messaging-api/#narrowcast-recipient
Recipient objects represent audiences. You can specify recipients based on
a combination of criteria using logical operator objects.
"""
def __init__(self, **kwargs):
"""__init__ method.
:param kwargs:
"""
super(Recipient, self).__init__(**kwargs)
self.type = None
class AudienceRecipient(Recipient):
"""AudienceRecipient."""
def __init__(self, group_id=None, **kwargs):
"""__init__ method.
:param int group_id: The audience ID. Create audiences with the
Manage Audience API.
:param kwargs:
"""
super(AudienceRecipient, self).__init__(**kwargs)
self.type = "audience"
self.audience_group_id = group_id
class RedeliveryRecipient(Recipient):
"""RedeliveryRecipient."""
def __init__(self, request_id=None, **kwargs):
"""__init__ method.
:param str request_id: The request ID of the narrowcast message previously sent.
The request IDs is an ID issued for each Messaging API request.
:param kwargs:
"""
super(RedeliveryRecipient, self).__init__(**kwargs)
self.type = "redelivery"
self.request_id = request_id
| line/line-bot-sdk-python | linebot/models/recipient.py | recipient.py | py | 2,083 | python | en | code | 1,739 | github-code | 36 | [
{
"api_name": "future.utils.with_metaclass",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "abc.ABCMeta",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "base.Base",
"line_number": 25,
"usage_type": "argument"
}
] |
74642998183 | # -*- coding: utf-8 -*-
"""
@author: Florian Koch
@license: All rights reserved
"""
import pandas as pd
import json
with open('../../data/city_of_zurich/aussichtspunkt.json') as data_file:
data = json.load(data_file)
df = pd.io.json.json_normalize(data, ['features', ['geometry', 'coordinates']],['name', ['features', 'properties', 'name']])
df1 = df[::2]
df1.columns = ['E', 'type', 'name']
df2 = df[1::2]
df2.columns = ['N', 'type', 'name']
df = pd.merge(df1,df2,how='outer', on=['name', 'type'])
df = df[['E', 'N', 'name', 'type']]
df.type = 'Sighting Point'
# print(df)
df.to_csv('../../data/prepared/sighting_point.csv')
| limo1996/ETH-DataScience | src/preprocess/aussichtspunkt.py | aussichtspunkt.py | py | 646 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.io.json.json_normalize",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.io",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pandas.merge",... |
12813950306 | # 연구소
import sys
from itertools import combinations
from collections import deque
import copy
input = sys.stdin.readline
N,M = map(int,input().split())
board = []
for i in range(N):
board.append(list(map(int,input().split())))
virus = []
comb = []
for i in range(N):
for j in range(M):
if board[i][j] == 0:
comb.append((i,j))
elif board[i][j] == 2:
virus.append((i,j))
dx = [0,0,1,-1]
dy = [1,-1,0,0]
def bfs():
answer = 0
temp = copy.deepcopy(board)
q = deque()
for v in virus:
q.append(v)
while q:
x,y = q.popleft()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if nx < 0 or nx >= N or ny < 0 or ny >= M:
continue
if temp[nx][ny] == 0:
temp[nx][ny] = 2
q.append((nx,ny))
for i in range(N):
answer += temp[i].count(0)
return answer
answer = 0
for cb in combinations(comb,3):
a,b,c = cb
board[a[0]][a[1]] = 1
board[b[0]][b[1]] = 1
board[c[0]][c[1]] = 1
answer = max(answer,bfs())
board[a[0]][a[1]] = 0
board[b[0]][b[1]] = 0
board[c[0]][c[1]] = 0
print(answer)
| Girin7716/PythonCoding | Etc/PS/Q16.py | Q16.py | py | 1,217 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.stdin",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "itertools.combinations"... |
31541638918 | import os
from setuptools import setup
def read_project_file(path):
proj_dir = os.path.dirname(__file__)
path = os.path.join(proj_dir, path)
with open(path, 'r') as f:
return f.read()
setup(
name = 'pyronic',
version = '0.1.1',
description = 'Suppress command output on success',
long_description = read_project_file('README.md'),
long_description_content_type = 'text/markdown',
author = 'Jonathon Reinhart',
author_email = 'Jonathon.Reinhart@gmail.com',
url = 'https://github.com/JonathonReinhart/pyronic',
python_requires = '>=3.4.0',
license = 'MIT',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
],
scripts=['pyronic'],
)
| JonathonReinhart/pyronic | setup.py | setup.py | py | 936 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6... |
38716436502 | #!/usr/bin/env python3
import re
from collections import defaultdict
from pprint import pprint
allergen_sets = defaultdict(list) # allergen => list of sets of foods
with open('input.txt', 'r') as infile:
line_re = re.compile(r'((\w+\s+)+)\(contains(.*)\)')
for line in infile:
m = line_re.match(line)
allergens = re.split(r',?\s', m[3].strip())
foods = m[1].strip().split(" ")
food_set = set(foods)
for a in allergens:
allergen_sets[a].append(food_set)
possible_sources = {}
for a in allergen_sets:
ls = allergen_sets[a]
possible = ls[0]
for s in ls[1:]:
possible = possible.intersection(s)
possible_sources[a] = possible
pprint(possible_sources)
allergen_sources = {}
while len(possible_sources) > 0:
known = [a for a in possible_sources if len(possible_sources[a]) == 1]
for allergen in known:
food = list(possible_sources[allergen])[0] # HACK
allergen_sources[food] = allergen
# Remove food from all other allergens
for a in possible_sources:
possible_sources[a].discard(food)
for a in known:
del possible_sources[a]
ls = sorted(allergen_sources.keys(), key=lambda x: allergen_sources[x])
print(','.join(ls)) | lvaughn/advent | 2020/21/food_list_2.py | food_list_2.py | py | 1,264 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_n... |
70955301864 | """Add description for 15 puzzle
Revision ID: bdeb38c37fg1
Revises: 05923bad79cf
Create Date: 2020-10-23 14:23:00.123969
"""
from sqlalchemy.sql import table, column
from sqlalchemy import String
from alembic import op
# revision identifiers, used by Alembic.
revision = 'bdeb38c37fg1'
down_revision = '05923bad79cf'
branch_labels = None
depends_on = None
def upgrade():
events = table('events', column('name', String), column('description', String))
op.execute(events.update().where(events.c.name == op.inline_literal('15 Puzzle')).values({'description': op.inline_literal('<p>U = Up</p><p>D = Down</p><p>R = Right</p><p>L = Left</p><p>Scramble by moving the piece U/D/L/R relative to the empty space, into the empty space.</p><p>Moves like R3 indicate to perform an R move 3 times.</p>')}))
def downgrade():
events = table('events', column('name', String), column('description', String))
op.execute(events.update().where(events.c.name == op.inline_literal('15 Puzzle')).values({'description': op.inline_literal('')}))
| euphwes/cubers.io | migrations/versions/047_bdeb38c37fg1_add_desc_to_15_puzzle.py | 047_bdeb38c37fg1_add_desc_to_15_puzzle.py | py | 1,041 | python | en | code | 27 | github-code | 36 | [
{
"api_name": "sqlalchemy.sql.table",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.sql.column",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "alem... |
17849208142 | import torch
import torch.nn as nn
from prodict import Prodict
class ODEFunc(nn.Module):
def __init__(self, dims: Prodict, device: str = torch.device("cpu")):
"""The Neural ODE decoder Module of TDNODE. Neural network function that considers as input
a tumor state and p-dimensional parameter encoding. Produces the next tumor state at the
next available time point.
Parameters
----------
dims : Prodict
A dictionary of the dimensionalities of the component modules to be used during
instantiation.
device : str, optional
The device on which to load the module, by default torch.device("cpu").
"""
super(ODEFunc, self).__init__()
self.input_dim = dims.INPUT_DIM
self.output_dim = dims.OUTPUT_DIM
self.hidden_dim = dims.HIDDEN_DIM
self.latent_dim = dims.LATENT_DIM
self.input_net = nn.Linear(self.input_dim, self.hidden_dim)
self.device = device
self.block2 = nn.Sequential(
nn.SELU(),
nn.Linear(self.hidden_dim, self.hidden_dim),
nn.SELU(),
nn.Linear(self.hidden_dim, self.hidden_dim),
)
self.block3 = nn.Sequential(
nn.SELU(),
nn.Linear(self.hidden_dim, self.hidden_dim),
nn.SELU(),
nn.Linear(self.hidden_dim, self.hidden_dim),
)
self.block4 = nn.Sequential(
nn.SELU(),
nn.Linear(self.hidden_dim, self.hidden_dim),
nn.SELU(),
nn.Linear(self.hidden_dim, self.hidden_dim),
)
self.block5 = nn.Sequential(nn.SELU(), nn.Linear(self.hidden_dim, self.hidden_dim))
self.block6 = nn.Sequential(
nn.SELU(),
nn.Linear(self.hidden_dim, self.hidden_dim),
nn.SELU(),
nn.Linear(self.hidden_dim, self.output_dim),
)
self.end_block = nn.Sequential(nn.SELU(), nn.Linear(self.hidden_dim, self.output_dim))
def forward(self, t: torch.Tensor, data: torch.Tensor):
"""_summary_
Parameters
----------
t : torch.Tensor
A tensor of time measurements to be used during the solve process. Shape: L_T x 1, where
L_T is the number of distinct time points in the batch.
data : torch.Tensor
The concatenated batch of initial condition and parameter encodings (only at the first
call). Shape: B x (c + p), where B is the batch size, c is the dimensionality of the
initial condition encoding, and p is the dimensionality of the parameter encoding.
Returns
-------
torch.Tensor
A tensor of c-dimensional predictions. Shape: B x L_T x c, where c is the
dimensionality of the initial condition encoding.
"""
x1 = self.input_net(data)
x2 = self.block2(x1.clone())
x3 = self.block3(x2)
x4 = self.block4(x3)
x4 += x3
x5 = self.block5(x4)
x5 += x2
x6 = self.block6(x5)
x7 = self.end_block(x1)
out = x6 + x7
returned = torch.cat(
[out, torch.zeros(out.shape[0], self.latent_dim, device=self.device)], dim=-1
)
return returned
| jameslu01/TDNODE | src/model/SLD/ode_func.py | ode_func.py | py | 3,303 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "prodict.Prodict",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_n... |
3110630730 | """
Creator:
Dhruuv Agarwal
Github: Dhr11
"""
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils import data
from tqdm import tqdm
from Voc_loader import VOCLoader
from Unet_model import Unet
from metrics import custom_conf_matrix
def train():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_loader = data.DataLoader(
VOCLoader("./",do_transform=True),
shuffle=True,
batch_size=1,
#num_workers=8,
)
val_loader = data.DataLoader(
VOCLoader("./",portion="val",do_transform=True),
batch_size=1,
#num_workers=8,
)
model = Unet()
print(device)
model = model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), weight_decay=5*1e-4, lr = 0.0001, momentum=0.9)
best_iou = -100
epoch = 0
total_epochs =100000
train_step=5
val_step =10
#cur_avg_loss = 0
train_losses = {}
#train_avg_losses= {}
val_losses = {}
val_iou = {}
train_iou = {}
iou_interval = val_step*2
model.cuda()
while(epoch<total_epochs):
epoch_loss=0
for (imgs,labels) in train_loader:
model.train()
imgs, labels = imgs.to(device), labels.to(device)
optimizer.zero_grad()
out = model(imgs)
loss = criterion(out,labels)
#out_pred = out.data.max(1)[1].cpu().numpy()
#print(out_pred.shape,out.shape,labels.shape)
loss.backward()
optimizer.step()
#cur_avg_loss = max([0,epoch])*cur_avg_loss + loss.item()
#cur_avg_loss /= (iter+1)
epoch_loss+=loss.item()
train_losses[epoch] = epoch_loss/len(train_loader)#loss.item()
#train_avg_losses[iter] = cur_avg_loss
if epoch % train_step==0:
print("epoch:",epoch," loss:",epoch_loss/len(train_loader))
if epoch % val_step==0: #or (iter+1)==total_iters:
calc_iou = epoch % iou_interval==0
print("val_step")
model.eval()
conf_mat = custom_conf_matrix([i for i in range(0,21)],21)
with torch.no_grad():
val_loss=0
for vi, (vimg,vlbl) in enumerate(tqdm(val_loader)):
vimg, vlbl = vimg.to(device), vlbl.to(device)
vout = model(imgs)
vloss = criterion(vout,vlbl)
if calc_iou:
pred = vout.data.max(1)[1].cpu().numpy()
gt = vlbl.data.cpu().numpy()
conf_mat.update_step(gt.flatten(), pred.flatten())
val_loss += vloss.item()
val_losses[epoch] = val_loss/len(val_loader)
if calc_iou:
score = conf_mat.compute_mean_iou()
print("epoch:",epoch," val loss:",val_loss/len(val_loader),"mean iou ",score)
if score>best_iou:
best_iou = score
state = {
"epoch": epoch + 1,
"model_state": model.state_dict(),
"optimizer_state": optimizer.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
"./",
"{}_epoch{}_best_model.pkl".format("Unet_pascalVOC", epoch),
)
torch.save(state, save_path)
else:
print("epoch:",epoch," val loss:",val_loss/len(val_loader))
conf_mat.reset()
epoch+=1
print(train_losses,val_losses,val_iou)
if __name__ == "__main__":
#run_id = random.randint(1, 100000)
train() | Dhr11/Semantic_Segmentation | Main_src.py | Main_src.py | py | 3,992 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.device",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data... |
32413559312 | import abc
from typing import Callable, Dict, List, Optional, Union
import torch
import torchmetrics
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.loggers.logger import Logger
from torch.utils.data import DataLoader, Dataset
from renate import defaults
from renate.data.datasets import _TransformedDataset
from renate.models import RenateModule
from renate.utils.distributed_strategies import create_strategy
from renate.utils.misc import int_or_str
class Evaluator(LightningModule, abc.ABC):
"""A general Evaluator module for collection of quantitative metrics on the test dataset.
This is an abstract interface which can be called with respect to a PyTorch Lightning `Trainer`.
and its `.test()` function. It collects quantitative observations with respect to a single
dataset. The metrics that are being collected are defined in the `create_metrics` function.
Args:
model: A `RenateModule` to be evaluated.
batch_size: The batch size to be used when creating the test data loader.
transform: The transformation applied for evaluation.
target_transform: The target transformation applied for evaluation.
logged_metrics: Metrics logged additional to the default ones.
"""
def __init__(
self,
model: RenateModule,
batch_size: int,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
logged_metrics: Optional[Dict[str, torchmetrics.Metric]] = None,
) -> None:
super().__init__()
self._model = model
self._model.deregister_hooks()
self._batch_size = batch_size
self._transform = transform
self._target_transform = target_transform
self._metric_collection = torchmetrics.MetricCollection(logged_metrics)
def on_model_test_start(
self,
test_dataset: Dataset,
test_collate_fn: Optional[Callable] = None,
task_id: Optional[str] = None,
) -> DataLoader:
"""Called before a model test starts."""
test_dataset = _TransformedDataset(
test_dataset,
transform=self._transform,
target_transform=self._target_transform,
)
self._task_id = task_id
return DataLoader(
test_dataset,
batch_size=self._batch_size,
shuffle=False,
pin_memory=True,
collate_fn=test_collate_fn,
)
def test_step(self, batch: List[torch.Tensor], batch_idx: int) -> None:
"""PyTorch Lightning function to perform the test step."""
x, y = batch
outputs = self(x)
self._metric_collection(outputs, y)
@abc.abstractmethod
def forward(self, x, task_id: Optional[str] = None) -> torch.Tensor:
"""Forward pass of the model.
Task ID can be used to specify, for example, the output head to perform the evaluation with
a specific data Chunk ID. Here, the `task_id` is used only to compute the test metrics.
"""
pass
def on_test_epoch_end(self) -> None:
"""PyTorch Lightning function to perform at the end of test loop.
Logs the metrics and resets the metric collection.
"""
self.log_dict(self._metric_collection.compute(), on_step=False, on_epoch=True)
self._metric_collection.reset()
class ClassificationEvaluator(Evaluator):
"""A classification Evaluator module for collection of quantitative metrics on the test
dataset.
"""
def forward(self, x, task_id: Optional[str] = None) -> torch.Tensor:
"""Forward pass of the model.
Task ID can be used to specify, for example, the output head to perform the evaluation with
a specific data Chunk ID. Here, the `task_id` is used only to compute the test metrics.
"""
if task_id is None:
task_id = self._task_id
return self._model.get_logits(x, task_id=task_id)
def evaluate(
model: RenateModule,
test_dataset: Union[List[Dataset], Dataset],
test_collate_fn: Optional[Callable] = None,
task_id: Union[List[str], str] = defaults.TASK_ID,
batch_size: int = defaults.BATCH_SIZE,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
logged_metrics: Optional[Dict[str, torchmetrics.Metric]] = None,
logger: Logger = defaults.LOGGER(**defaults.LOGGER_KWARGS),
accelerator: defaults.SUPPORTED_ACCELERATORS_TYPE = defaults.ACCELERATOR,
devices: Optional[int] = None,
strategy: str = defaults.DISTRIBUTED_STRATEGY,
precision: str = defaults.PRECISION,
) -> Dict[str, List[float]]:
"""Evaluate the model on the test dataset or a set of test datasets corresponding to distinct
tasks.
If the `test_dataset` are specified as a list of datasets, it is assumed to be ordered.
Similarly, in a case the `task_id` are specified as a list, it is assumed to be ordered. A task
ID list can be used to set specific model part to be used, for example, an output head with some
specific test dataset in the input sequence.
Args:
model: A `RenateModule` to be evaluated.
test_dataset: The test dataset(s) to be evaluated.
test_collate_fn: collate_fn used in the DataLoader.
task_id: The task id(s) of the test dataset(s).
batch_size: The batch size to be used when creating the test data loader.
transform: The transformation applied for evaluation.
target_transform: The target transformation applied for evaluation.
logged_metrics: Metrics logged additional to the default ones.
logger: Logger used by PyTorch Lightning to log intermediate results.
accelerator: Accelerator used by PyTorch Lightning to train the model.
devices: Devices used by PyTorch Lightning to train the model. If the devices flag is not
defined, it will assume devices to be "auto" and fetch the `auto_device_count` from the
`accelerator`.
strategy: Name of the distributed training strategy to use.
`More details <https://lightning.ai/docs/pytorch/stable/extensions/strategy.html>`__
precision: Type of bit precision to use.
`More details <https://lightning.ai/docs/pytorch/stable/common/precision_basic.html>`__
"""
if isinstance(test_dataset, Dataset):
test_dataset = [test_dataset]
if isinstance(task_id, str):
task_id = [task_id] * len(test_dataset)
assert len(task_id) == len(test_dataset)
evaluator = ClassificationEvaluator(
model=model,
batch_size=batch_size,
transform=transform,
target_transform=target_transform,
logged_metrics=logged_metrics,
)
trainer = Trainer(
accelerator=accelerator,
devices=devices,
logger=logger,
enable_checkpointing=False,
enable_progress_bar=False,
strategy=create_strategy(devices, strategy),
precision=int_or_str(precision),
)
results = {}
for i in range(len(test_dataset)):
test_loader = evaluator.on_model_test_start(test_dataset[i], test_collate_fn, task_id[i])
trainer.test(
evaluator,
test_loader,
)
for metric_name, value in trainer.logged_metrics.items():
if metric_name not in results:
results[metric_name] = []
results[metric_name].append(value.item())
return results
| awslabs/Renate | src/renate/evaluation/evaluator.py | evaluator.py | py | 7,525 | python | en | code | 251 | github-code | 36 | [
{
"api_name": "pytorch_lightning.LightningModule",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "abc.ABC",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "renate.models.RenateModule",
"line_number": 34,
"usage_type": "name"
},
{
"api_nam... |
40027566811 | import cv2
import os
import imutils
import time
import numpy as np
from matplotlib import pyplot as plt
MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
age_list = [
'(0, 2)', '(4, 6)', '(8, 12)', '(15, 20)', '(25, 32)', '(38, 43)',
'(48, 53)', '(60, 100)'
]
gender_list = ['Male', 'Female']
class Agender:
def __init__(self):
self.age_net = cv2.dnn.readNetFromCaffe('./data/deploy_age.prototxt',
'./data/age.caffemodel')
self.gender_net = cv2.dnn.readNetFromCaffe(
'./data/deploy_gender.prototxt', './data/gender.caffemodel')
self.face_cascade = cv2.CascadeClassifier(
'data/haarcascade_frontalface_alt.xml')
self.font = cv2.FONT_HERSHEY_SIMPLEX
def predict(self, image_path):
print('Predicting %s' % image_path)
# Load image
image = cv2.imread(image_path)
# Scale to gray
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = self.face_cascade.detectMultiScale(gray, 1.1, 5)
print("Found {} faces".format(str(len(faces))))
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 255, 0), 2)
# Get Face
face_img = image[y:y + h, h:h + w].copy()
blob = cv2.dnn.blobFromImage(
face_img, 1, (227, 227), MODEL_MEAN_VALUES, swapRB=False)
#Predict Age
self.age_net.setInput(blob)
age_preds = self.age_net.forward()
age = age_list[age_preds[0].argmax()]
print("Age Range: " + age)
#Predict Gender
self.gender_net.setInput(blob)
gender_preds = self.gender_net.forward()
gender = gender_list[gender_preds[0].argmax()]
print("Gender : " + gender)
overlay_text = "%s %s" % (gender, age)
cv2.putText(image, overlay_text, (x, y), self.font, 1,
(255, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('frame', image)
cv2.waitKey(3000) #pauses for 3 seconds
| cNille/Agender | prediction.py | prediction.py | py | 2,143 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.dnn.readNetFromCaffe",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "cv2.dnn.readNetFromCaffe",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.dn... |
73969955624 | import torch
from torch import nn
import torch.nn.functional as F
from onerl.networks.norm_layer import normalization_layer
class PreactResBlock(nn.Module):
def __init__(self,
in_channels: int,
out_channels: int,
stride: int,
norm_type: str,
groups: int):
super(PreactResBlock, self).__init__()
use_bias = norm_type == "none"
self.bn1 = normalization_layer(in_channels, norm_type, groups)
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=use_bias)
self.bn2 = normalization_layer(out_channels, norm_type, groups)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=use_bias)
if stride != 1 or in_channels != out_channels:
self.downsample = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
def forward(self, x):
shortcut = self.downsample(x) if hasattr(self, "downsample") else x
y = x
y = self.conv1(F.relu(self.bn1(y)))
y = self.conv2(F.relu(self.bn2(y)))
return y + shortcut
class ResnetEncoder(nn.Module):
def __init__(self,
in_channels: int,
num_layers: int = 3,
start_channels: int = 16,
norm_type: str = "batch_norm",
groups: int = 8):
super().__init__()
# network architecture
# initial conv
use_bias = norm_type == "none"
layers = [
nn.Conv2d(in_channels, start_channels, kernel_size=3, stride=1, padding=1, bias=use_bias),
normalization_layer(start_channels, norm_type, groups)
]
# res blocks
last_channels = num_channels = start_channels
for idx in range(num_layers):
layers.append(PreactResBlock(last_channels, num_channels, 2, norm_type, groups))
layers.append(PreactResBlock(num_channels, num_channels, 1, norm_type, groups))
last_channels = num_channels
num_channels *= 2
self.layers = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def forward(self, x):
# reshape N FS C H W --> N C*FS H W
if len(x.shape) == 5:
x = x.view(x.shape[0], -1, x.shape[-2], x.shape[-1])
# uint8 --> float
if x.dtype is torch.uint8:
x = x.to(torch.float) / 255
x = self.layers(x)
x = self.avgpool(x).view(x.shape[0], -1)
return x
| imoneoi/onerl | onerl/networks/resnet.py | resnet.py | py | 2,604 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "onerl.networks.norm_layer.normalization_layer",
"line_number": 18,
"usage_type": "call"
},
{
"api_nam... |
719879844 | from collective.honeypot import _
from collective.honeypot.config import ACCEPTED_LOG_LEVEL
from collective.honeypot.config import DISALLOW_ALL_POSTS
from collective.honeypot.config import EXTRA_PROTECTED_ACTIONS
from collective.honeypot.config import HONEYPOT_FIELD
from collective.honeypot.config import IGNORED_FORM_FIELDS
from collective.honeypot.config import SPAMMER_LOG_LEVEL
from collective.honeypot.config import WHITELISTED_ACTIONS
from collective.honeypot.config import WHITELISTED_START
from copy import deepcopy
from zExceptions import Forbidden
from zope.globalrequest import getRequest
from zope.i18n import translate
try:
from plone.restapi.deserializer import json_body
except ImportError:
json_body = None
import logging
import six
logger = logging.getLogger("collective.honeypot")
def found_honeypot(form, required):
"""Did a spammer find a honeypot?
We have two requirements:
1. The honeypot field MUST be there if required is True.
2. The honeypot field MUST be empty.
Return True when one of these requirements is not met.
"""
if not HONEYPOT_FIELD:
# Apparently the user is only interested in logging the
# requests, not in stopping spammers.
return False
if required and HONEYPOT_FIELD not in form:
# Spammer did not submit required field.
return "misses required field"
value = form.get(HONEYPOT_FIELD)
if not value:
# All tests are clear.
return False
# Spammer submitted forbidden field with non-empty value.
# But: we could have made a mistake and put in the honeypot
# field twice, which means it gets submitted as a list.
if isinstance(value, list):
value = "".join(value)
if not value:
# All clear
return False
return "has forbidden field"
def deny(msg=None):
# Deny access.
if msg is None:
msg = translate(
_(
"post_denied_label",
default="Posting denied due to possible spamming. "
"Please contact us if we are wrong.",
),
context=getRequest(),
)
raise Forbidden(msg)
def whitelisted(action):
if action in WHITELISTED_ACTIONS:
return True
# Check action start strings.
for white in WHITELISTED_START:
if action.startswith(white):
return True
return False
def get_form(request):
form = getattr(request, "form", {})
if (
not form
and getattr(request, "CONTENT_TYPE", "") == "application/json"
and json_body
):
# restapi post
form = json_body(request)
if not form and isinstance(request, dict):
form = request
# We may need to make a copy of the form. This may be expensive
# in memory, so we make sure to do this only once when needed.
copied = False
for field in IGNORED_FORM_FIELDS:
if field not in form:
continue
if not copied:
form = deepcopy(form)
copied = True
form.pop(field)
# Remove all password fields.
for field in form:
if "password" not in field:
continue
if not copied:
form = deepcopy(form)
copied = True
form.pop(field)
return form
def get_small_form(form):
# Avoid printing large textareas or complete file uploads.
small_form = {}
for key, value in form.items():
if not isinstance(value, six.string_types):
small_form[key] = value
continue
if len(value) > 250:
small_form[key] = value[:250] + "..."
return small_form
def check_post(request):
"""Log a POST request.
And possibly forbid access.
Could be useful in case of a spam attack.
"""
if request.get("REQUEST_METHOD", "").upper() != "POST":
return
if DISALLOW_ALL_POSTS:
logger.warn("All posts are disallowed.")
# block the request:
deny(msg="All posts are disallowed.")
ip = request.get("HTTP_X_FORWARDED_FOR") or request.get("REMOTE_ADDR", "unknown")
referer = request.get("HTTP_REFERER", "")
url = request.get("ACTUAL_URL", "")
action = url.split("/")[-1] # last part of url
action = action.lstrip("@")
if whitelisted(action):
logger.debug("Action whitelisted: %s.", action)
return
form = get_form(request)
if action in EXTRA_PROTECTED_ACTIONS:
result = found_honeypot(form, required=True)
else:
result = found_honeypot(form, required=False)
logger.debug("Checking honeypot fields for action %s. Result: %s.", action, result)
if not result:
try:
form = get_small_form(form)
except Exception:
# Do not crash just because we want to log something.
pass
logger.log(
ACCEPTED_LOG_LEVEL,
"ACCEPTED POST from ip %s, url %r, referer %r, with form " "%r",
ip,
url,
referer,
form,
)
return
logger.log(
SPAMMER_LOG_LEVEL,
"SPAMMER caught in honeypot: %s. ip %s, url %r",
result,
ip,
url,
)
# block the request:
deny()
| collective/collective.honeypot | collective/honeypot/utils.py | utils.py | py | 5,278 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "plone.restapi.deserializer.json_body",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "collective.honeypot.config.HONEYPOT_FIELD",
"line_number": 37,
"usage_type": "name... |
938560702 | import json
import jieba
import os
import argparse
frequency = {}
word2id = {"PAD": 0, "UNK": 1}
min_freq = 10
def cut(s):
arr = list(jieba.cut(s))
for word in arr:
if word not in frequency:
frequency[word] = 0
frequency[word] += 1
return arr
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input')
parser.add_argument('--output')
parser.add_argument('--gen_word2id', action="store_true")
args = parser.parse_args()
input_path = args.input
output_path = args.output
os.makedirs(output_path, exist_ok=True)
for filename in os.listdir(input_path):
fin = open(os.path.join(input_path, filename), "r", encoding="utf8")
fout = open(os.path.join(output_path, filename), "w", encoding="utf8")
for line in fin:
data = json.loads(line)
data["statement"] = cut(data["statement"])
for option in ["A", "B", "C", "D"]:
data["option_list"][option] = cut(data["option_list"][option])
print(json.dumps(data, ensure_ascii=False, sort_keys=True), file=fout)
if args.gen_word2id:
for word in frequency:
if frequency[word] >= min_freq:
word2id[word] = len(word2id)
json.dump(word2id, open("../data/word2id.txt", "w", encoding="utf8"), indent=2, ensure_ascii=False)
| china-ai-law-challenge/CAIL2020 | sfks/baseline/utils/cutter.py | cutter.py | py | 1,406 | python | en | code | 150 | github-code | 36 | [
{
"api_name": "jieba.cut",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_... |
32065161333 | import random
from django.shortcuts import render , redirect , get_object_or_404
from django.http import HttpResponse
from django.contrib.auth.models import User , auth
from django.contrib import messages
from .models import Profile, Post, Likepost, FollowersCount , Notification , Comment
from django.contrib.auth.decorators import login_required
from itertools import chain
from django.contrib.auth.models import User
from django.contrib.auth import update_session_auth_hash # hi
from .forms import UsernameChangeForm
from django.db import transaction
from .forms import PostForm
from django.contrib.auth import get_user_model
from core import models
# Create your views here.
def register(request):
# return HttpResponse('<h1> welcome to social book </h1>')
if request.method == 'POST':
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
password2 = request.POST['password2']
if password == password2 :
if User.objects.filter(email=email).exists():
messages.info(request, 'Email is already in use')
return redirect('register')
elif User.objects.filter(username = username).exists():
messages.info(request, 'Username is already in use')
return redirect('register')
else:
user = User.objects.create_user(username= username ,email=email ,password=password)
user.save()
user_login = auth.authenticate(username=username,password=password)
auth.login(request, user_login)
user_model = User.objects.get(username=username)
new_profile = Profile.objects.create(user=user_model, id_user=user_model.id)
new_profile.save()
return redirect('index')
else:
messages.error(request, 'Passwords do not match')
return redirect("register")
else:
return render(request,'signup.html')
def Login(request):
if request.method == 'POST':
usernames = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=usernames, password=password)
if user is not None:
auth.login(request, user)
return redirect('/')
else:
messages.info(request , "invalid username or password")
return redirect('login')
else:
return render(request, 'signin.html')
@login_required(login_url='login')
def index(request):
user_object = User.objects.get(username=request.user.username)
user_profile = Profile.objects.get(user=user_object)
user_following_list = []
feed = []
user_following = FollowersCount.objects.filter(follower=request.user.username)
for users in user_following:
user_following_list.append(users.user)
for usernames in user_following_list:
feed_lists = Post.objects.filter(user=usernames)
feed.append(feed_lists)
user_posts = Post.objects.filter(user=request.user.username)
feed.append(user_posts)
feed_list = list(chain(*feed))
# User suggestion logic
all_users = User.objects.all()
user_following_all = []
for user in user_following:
user_list = User.objects.filter(username=user.user).first()
if user_list:
user_following_all.append(user_list)
new_suggestions_list = [x for x in all_users if (x not in user_following_all)]
current_user = User.objects.filter(username=request.user.username)
final_suggestions_list = [x for x in new_suggestions_list if (x not in current_user)]
random.shuffle(final_suggestions_list)
username_profile = []
username_profile_list = []
for users in final_suggestions_list:
username_profile.append(users.id)
for ids in username_profile:
profile_lists = Profile.objects.filter(id_user=ids)
username_profile_list.append(profile_lists)
suggestions_username_profile_list = list(chain(*username_profile_list))
return render(request, 'index.html', {'user_profile': user_profile, 'posts': feed_list, 'suggestions_username_profile_list': suggestions_username_profile_list[:4]})
def logout(request):
auth.logout(request)
return render(request, 'signin.html')
@login_required(login_url='login')
def settings(request):
user_profile = Profile.objects.get(user=request.user)
username_form = UsernameChangeForm()
if request.method == 'POST':
if 'new_username' in request.POST:
username_form = UsernameChangeForm(request.POST)
if username_form.is_valid():
new_username = username_form.cleaned_data['new_username']
with transaction.atomic():
old_username = request.user.username
request.user.username = new_username
request.user.save()
# Update profile image, bio, and location
if request.FILES.get('image') is not None:
user_profile.profileimg = request.FILES.get('image')
if request.FILES.get('image1') is not None:
user_profile.profileimg2 = request.FILES.get('image1')
user_profile.bio = request.POST.get('bio', '')
user_profile.location = request.POST.get('location', '')
user_profile.save()
# Handle other updates and notifications if applicable
# Update the session and authentication hash
update_session_auth_hash(request, request.user)
else:
# Handle other profile updates
if request.FILES.get('image') is not None:
user_profile.profileimg = request.FILES.get('image')
if request.FILES.get('image1') is not None:
user_profile.profileimg2 = request.FILES.get('image1')
user_profile.bio = request.POST.get('bio', '')
user_profile.location = request.POST.get('location', '')
user_profile.save()
return redirect('settings')
return render(request, 'setting.html', {'user_profile': user_profile, 'username_form': username_form})
@login_required(login_url='login')
def post(request):
return HttpResponse('<h1>Post</h1>')
@login_required(login_url='signin')
def upload(request):
if request.method == 'POST':
user = request.user.username
image = request.FILES.get('image_upload')
caption = request.POST['caption']
new_post = Post.objects.create(user=user, image=image, caption=caption)
new_post.save()
return redirect('/')
else:
return redirect('/')
@login_required(login_url='login')
def like_post(request):
username = request.user.username
post_id = request.GET.get('post_id')
post = Post.objects.get(id=post_id)
like = Likepost.objects.filter(post_id=post_id, username=username).first()
if like is None:
new_like = Likepost.objects.create(post_id=post_id, username=username)
new_like.save()
post.likes = post.likes + 1
post.save()
# Only create a notification for the post owner
if post.user != request.user: # Avoid notifying yourself
# Get the user object for the post owner
post_owner = User.objects.get(username=post.user)
notification = Notification(
user=post_owner, # Use the post owner's user instance here
notification_type='Like',
post=post,
sender=request.user
)
notification.save()
else:
like.delete()
post.likes = post.likes - 1
post.save()
return redirect('/')
@login_required(login_url='login')
def profile(request, pk):
user_object = User.objects.get(username=pk)
user_profile = Profile.objects.get(user=user_object)
user_posts = Post.objects.filter(user=pk)
user_post_length = len(user_posts)
follower = request.user.username
user = pk
if FollowersCount.objects.filter(follower=follower, user=user).first():
button_text = 'Unfollow'
else:
button_text = 'Follow'
user_followers = len(FollowersCount.objects.filter(user=pk))
user_following = len(FollowersCount.objects.filter(follower=pk))
context = {
'user_object': user_object,
'user_profile': user_profile,
'user_posts': user_posts,
'user_post_length': user_post_length,
'button_text': button_text,
'user_followers': user_followers,
'user_following': user_following,
}
return render(request, 'profile.html', context)
@login_required(login_url='signin')
def follow(request):
if request.method == 'POST':
follower = request.POST['follower']
user = request.POST['user']
if FollowersCount.objects.filter(follower=follower, user=user).first():
delete_follower = FollowersCount.objects.get(follower=follower, user=user)
delete_follower.delete()
return redirect('/profile/'+user)
else:
new_follower = FollowersCount.objects.create(follower=follower, user=user)
new_follower.save()
return redirect('/profile/'+user)
else:
return redirect('/')
@login_required(login_url='login')
def search(request):
user_object = User.objects.get(username=request.user.username)
user_profile = Profile.objects.get(user=user_object)
if request.method == 'POST':
username = request.POST['username']
username_object = User.objects.filter(username__icontains=username)
username_profile = []
username_profile_list = []
for users in username_object:
username_profile.append(users.id)
for ids in username_profile:
profile_lists = Profile.objects.filter(id_user=ids)
username_profile_list.append(profile_lists)
username_profile_list = list(chain(*username_profile_list))
return render(request, 'search.html', {'user_profile': user_profile, 'username_profile_list': username_profile_list})
from uuid import UUID
@login_required(login_url='login')
def delete_post(request, post_id):
try:
# Get the post object
post = Post.objects.get(id=post_id)
# Check if the user is the owner of the post
if post.user == request.user.username:
# Delete the post
post.delete()
return redirect('index')
else:
# Handle the case where the user is not the owner of the post
return HttpResponse("You are not authorized to delete this post.")
except Post.DoesNotExist:
# Handle the case where the post with the given post_id doesn't exist
return HttpResponse("The post does not exist.")
@login_required(login_url='login')
def edit_post(request, post_id):
post = get_object_or_404(Post, id=post_id)
if post.user != request.user.username:
return HttpResponse("You are not authorized to edit this post.")
if request.method == 'POST':
form = PostForm(request.POST, request.FILES, instance=post)
if form.is_valid():
if not request.FILES.get('image'):
form.cleaned_data['image'] = post.image
form.save()
return redirect('index')
else:
form = PostForm(instance=post)
return render(request, 'edit_post.html', {'form': form, 'post': post})
@login_required(login_url="login")
def notification(request):
user_notification = Notification.objects.filter(user=request.user , is_read= False)
return render(request, 'notifications.html', {'notifications': user_notification})
@login_required(login_url='login')
def view_post(request, post_id):
post = get_object_or_404(Post, id=post_id)
comments = Comment.objects.filter(post=post)
if request.method == 'POST':
content = request.POST.get('content')
Comment.objects.create(user=request.user, post=post, content=content)
return render(request, 'view_post.html', {'post': post, 'comments': comments})
@login_required(login_url='login')
def add_comment(request, post_id):
post = get_object_or_404(Post, id=post_id)
if request.method == 'POST':
content = request.POST.get('content')
Comment.objects.create(user=request.user, post=post, content=content)
return redirect('view_post', post_id=post.id)
@login_required(login_url='login')
def delete_comment(request, comment_id):
comment = get_object_or_404(Comment, id=comment_id)
# Check if the user is the owner of the comment
if comment.user == request.user:
comment.delete()
return redirect('view_post', post_id=comment.post.id)
@login_required(login_url='login')
def edit_comment(request, comment_id):
comment = get_object_or_404(Comment, id=comment_id)
# Check if the user is the owner of the comment
if comment.user == request.user:
if request.method == 'POST':
comment.content = request.POST.get('content')
comment.save()
return redirect('view_post', post_id=comment.post.id)
return render(request, 'edit_comment.html', {'comment': comment}) | ahmedradwan21/ATR_Social | core/views.py | views.py | py | 13,622 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "django.contrib.auth.models.User.objects.filter",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User",
"line_number": ... |
4965329236 | from settings.database import get_all_resumes, add, clear_table
from settings.config import ProfessionStep, ResumeGroup
from settings.tools import group_steps_to_resume
import locale
from typing import NamedTuple
from operator import attrgetter
from datetime import datetime, date
from rich.progress import track
locale.setlocale(locale.LC_TIME, 'ru_RU.UTF-8')
datetime.strptime
MonthDict = {
"январь": 1,
"февраль": 2,
"март": 3,
"апрель": 4,
"май": 5,
"июнь": 6,
"июль": 7,
"август": 8,
"сентябрь": 9,
"октябрь": 10,
"ноябрь": 11,
"декабрь": 12,
"january": 1,
"february":2,
"march": 3,
"april": 4,
"may": 5,
"june": 6,
"july": 7,
"august": 8,
"september": 9,
"october": 10,
"november": 11,
"december": 12,
}
class interval(NamedTuple):
step: ProfessionStep
start_date: date
# steps = set(['Июль 2014 — Май 2015', 'Июль 2010 — Февраль 2012', 'Февраль 2012 — Июнь 2014', 'Июнь 2016 — по настоящее время', 'Июнь 2001 — Июль 2010', 'Июль 2015 — Март 2016', 'Июль 2014 — Май 2015', 'Июль 2010 — Февраль 2012', 'Февраль 2012 — Июнь 2014', 'Июнь 2016 — по настоящее время', 'Июнь 2001 — Июль 2010', 'Июль 2015 — Март 2016', 'Июль 2014 — Май 2015', 'Июль 2010 — Февраль 2012', 'Февраль 2012 — Июнь 2014', 'Июнь 2016 — по настоящее время', 'Июнь 2001 — Июль 2010', 'Июль 2015 — Март 2016', 'Июль 2014 — Май 2015', 'Июль 2010 — Февраль 2012', 'Февраль 2012 — Июнь 2014', 'Июнь 2016 — по настоящее время', 'Июнь 2001 — Июль 2010', 'Июль 2015 — Март 2016', 'Июль 2014 — Май 2015', 'Июль 2010 — Февраль 2012', 'Февраль 2012 — Июнь 2014', 'Июнь 2016 — по настоящее время', 'Июнь 2001 — Июль 2010', 'Июль 2015 — Март 2016', 'Июль 2014 — Май 2015', 'Июль 2010 — Февраль 2012', 'Февраль 2012 — Июнь 2014', 'Июнь 2016 — по настоящее время', 'Июль 2015 — Март 2016'])
def sort_steps(steps: list[ProfessionStep]) -> list[ProfessionStep]:
intervals = []
for step in steps:
step_start = step.experienceInterval.split(" — ")[0]
if not step_start: continue
try:
start_year = step_start.split()[-1]
except:
exit(f"Err:{step.experienceInterval}")
start_month = next(k for i, k in MonthDict.items() if i==step_start.split()[0].lower())
start_date = datetime.strptime(f"{start_month}.{start_year}", "%m.%Y")
intervals.append(interval(step, start_date))
return [interval.step for interval in sorted(intervals, key=attrgetter("start_date"))]
resumes = group_steps_to_resume(get_all_resumes("New"))
for resume in track(range(len(resumes)), description="[red]Осталось:"):
sorded_steps = sort_steps(resumes[resume].ITEMS)
resumes[resume].ITEMS = sorded_steps
clear_table('New')
print("записываем в бд")
for resume in resumes:
for step in resume.ITEMS:
add(table_name='New', data=step) | SalomanYu/Trajectory | test.py | test.py | py | 3,493 | python | ru | code | 1 | github-code | 36 | [
{
"api_name": "locale.setlocale",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "locale.LC_TIME",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "da... |
11025748669 | """user token nonce
Revision ID: 71503b29c05a
Revises: aac9a548d9f5
Create Date: 2018-05-04 13:42:42.222974
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '71503b29c05a'
down_revision = 'aac9a548d9f5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tokens', sa.Column('nonce', sa.String(length=8), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('tokens', 'nonce')
# ### end Alembic commands ###
| akahard2dj/Blackberry | migrations/versions/71503b29c05a_user_token_nonce.py | 71503b29c05a_user_token_nonce.py | py | 661 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.String"... |
16969152957 | # -*- coding: utf-8 -*-
from jsonfield.fields import JSONField
from natasha import (
Segmenter,
MorphVocab,
MoneyExtractor,
NamesExtractor,
NewsEmbedding,
NewsMorphTagger,
NewsSyntaxParser,
NewsNERTagger,
PER,
LOC,
ORG
)
from natasha.grammars.date import Date, MONTHS, MONTH, DAY, YEAR, YEAR_SHORT, YEAR_WORD
from natasha.extractors import Extractor
from yargy import (
rule, or_
)
from yargy.predicates import dictionary
from celery.utils.log import get_logger
from django.utils.translation import ugettext_lazy as _
from edw.models.mixins import ModelMixin
from edw.models.mixins.nlp import Doc
from edw.tasks import extract_ner_data
def get_month_value_by_key(key):
try:
month = MONTHS.__getitem__(key)
except:
month = MONTHS.get(key)
return month
MONTH_NAME = dictionary(MONTHS).interpretation(
Date.month.normalized().custom(get_month_value_by_key)
)
DATE = or_(
rule(
DAY,
'.',
MONTH,
'.',
or_(
YEAR,
YEAR_SHORT
),
YEAR_WORD.optional()
),
rule(
YEAR,
YEAR_WORD
),
rule(
DAY,
MONTH_NAME
),
rule(
MONTH_NAME,
YEAR,
YEAR_WORD.optional()
),
rule(
DAY,
MONTH_NAME,
YEAR,
YEAR_WORD.optional()
),
).interpretation(
Date
)
class DatesExtractor(Extractor):
def __init__(self, morph):
Extractor.__init__(self, DATE, morph)
class NERMixin(ModelMixin):
"""
Миксин для работы с NER. Добавляет в модель методы получения списка именованных сущностей, а также прочие методы,
необходимые для работы с ними
"""
EXTRACTED_TYPES = [PER, LOC, ORG, 'DATE', 'MONEY']
NO_INDEX_TYPES = [PER, LOC, 'DATE', 'MONEY']
REPLACERS = [
(' | | ', ' '),
('"|«|«|»|»|“|”|‘|’|‚|„', '\"'),
('–|—', '-'),
('…', '...'),
('>', '>'),
('<', '<'),
]
NER_TASK_WAIT_EXECUTION_INTERVAL = 5
ner_data = JSONField(verbose_name=_("NER data"), default={},
help_text=_("Data obtained after recognition of named entities for the given text"))
def get_ner_source(self):
'''
Метод для получения исходных данных для получения именованных сущностей. Требуется перекрыть в модели где осуществляется
примешивание
:return:
'''
return self.entity_name
@classmethod
def get_extracted_types(cls):
return cls.EXTRACTED_TYPES
@classmethod
def get_no_index_types(cls):
return cls.NO_INDEX_TYPES
@classmethod
def get_segmenter(cls):
segmenter = getattr(cls, "_segmenter", None)
if not segmenter:
segmenter = Segmenter()
cls._segmenter = segmenter
return segmenter
@classmethod
def get_morph_vocab(cls):
morph_vocab = getattr(cls, "_morph_vocab", None)
if not morph_vocab:
morph_vocab = MorphVocab()
cls._morph_vocab = morph_vocab
return morph_vocab
@classmethod
def get_extractors(cls):
extractors = getattr(cls, "_extractors", None)
if not extractors:
morph_vocab = cls.get_morph_vocab()
extractors = [DatesExtractor(morph_vocab), MoneyExtractor(morph_vocab)]
cls._extractors = extractors
return extractors
@classmethod
def get_embedding(cls):
embedding = getattr(cls, "_embedding", None)
if not embedding:
embedding = NewsEmbedding()
cls._embedding = embedding
return embedding
@classmethod
def get_morph_tagger(cls):
morph_tagger = getattr(cls, "_morph_tagger", None)
if not morph_tagger:
embedding = cls.get_embedding()
morph_tagger = NewsMorphTagger(embedding)
cls._morph_tagger = morph_tagger
return morph_tagger
@classmethod
def get_syntax_parser(cls):
syntax_parser = getattr(cls, "_syntax_parser", None)
if not syntax_parser:
embedding = cls.get_embedding()
syntax_parser = NewsSyntaxParser(embedding)
cls._syntax_parser = syntax_parser
return syntax_parser
@classmethod
def get_ner_tagger(cls):
ner_tagger = getattr(cls, "_ner_tagger", None)
if not ner_tagger:
embedding = cls.get_embedding()
ner_tagger = NewsNERTagger(embedding)
cls._ner_tagger = ner_tagger
return ner_tagger
@staticmethod
def _extract_ner(doc, morph_tagger, morph_vocab, syntax_parser, ner_tagger, extractors, extracted_types):
# Apply morph
doc.tag_morph(morph_tagger)
# Lemmatize
for token in doc.tokens:
token.lemmatize(morph_vocab)
# Parse syntax
doc.parse_syntax(syntax_parser)
# NER extract
doc.tag_ner(ner_tagger, extractors=extractors)
# Normalize data
if doc.spans:
for span in doc.spans:
span.normalize(morph_vocab)
# Extend person data
if doc.spans:
names_extractor = NamesExtractor(morph_vocab)
for span in doc.spans:
if span.type == PER:
span.extract_fact(names_extractor)
# Get result
result = {}
for _ in doc.spans:
span_type = _.type
if span_type in extracted_types:
if not span_type in result:
result.update({span_type: []})
data = _.as_json
result[span_type].append(data)
return result
def extract_ner(self):
'''
Данный метод вызывать только через task`и! Если его вызывать из инстанции объекта то это приведет к перерасходу
памяти из-за того, что для каждого запущенного потока сервера будет создана копия данных нужных для извлечения
именованных сущностей. Каждая копия использует 250-350 мегабайт оперативной памяти, на боевом сервере создается
практически столько потоков сколько есть процессорных ядер, у сервером с большим количеством ядер это приведет
к тому что память будет использоваться крайне неэффективно.
'''
doc = Doc(self.get_ner_source())
doc.segment(self.get_segmenter())
morph_tagger = self.get_morph_tagger()
morph_vocab = self.get_morph_vocab()
syntax_parser = self.get_syntax_parser()
ner_tagger = self.get_ner_tagger()
extractors = self.get_extractors()
extracted_types = self.get_extracted_types()
return self._extract_ner(doc, morph_tagger, morph_vocab, syntax_parser,
ner_tagger, extractors, extracted_types)
def extract_ner_by_task(self):
ner_data = {}
try:
result = extract_ner_data.apply_async(
kwargs={
"obj_id": self.id,
"obj_model": self.__class__.__name__.lower()
},
expires=self.NER_TASK_WAIT_EXECUTION_INTERVAL,
retry=False,
)
except extract_ner_data.OperationalError as exc:
logger = get_logger('logfile_error')
logger.exception('Sending task raised: %r', exc)
else:
try:
ner_data = result.get(
interval=self.NER_TASK_WAIT_EXECUTION_INTERVAL,
propagate=False,
)
except Exception:
pass
return ner_data
@property
def highlighter_context(self):
result = []
_already_append = []
for span_type in self.ner_data.keys():
for ner_data_by_type in self.ner_data[span_type]:
text = ner_data_by_type['text']
if not text in _already_append:
_already_append.append(text)
result.append({
'text': text,
'type': span_type.lower(),
})
return result
def cleaned_text_for_index(self):
# Получаем данные для индексации тем же методом, что и при распознавании.
text = self.get_ner_source()
if self.ner_data:
# Цикл по всем имеющимся в объекте типам данных NER
for span_type in self.ner_data.keys():
# Цикл по всем данным определенного типа
for ner_data_by_type in self.ner_data[span_type]:
# Если данные включены в список исключаемого к индексации - удаляем их
if ner_data_by_type['type'] in self.NO_INDEX_TYPES:
text = text.replace(ner_data_by_type['text'], ' ')
return text
| infolabs/django-edw | backend/edw/models/mixins/nlp/ner.py | ner.py | py | 9,781 | python | ru | code | 6 | github-code | 36 | [
{
"api_name": "natasha.grammars.date.MONTHS.__getitem__",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "natasha.grammars.date.MONTHS",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "natasha.grammars.date.MONTHS.get",
"line_number": 43,
"usage_type":... |
995230871 | # Authors: Antoine Ginies <aginies@suse.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
configuration
"""
import os
import subprocess
import yaml
import virtscenario.firmware as fw
import virtscenario.dict as c
import virtscenario.util as util
import virtscenario.guest as guest
import virtscenario.hypervisors as hv
conffile_locations = [
'.',
'~/.local/virt-scenario',
'/etc/virt-scenario',
'/etc',
]
conffile_name = 'virtscenario.yaml'
hvfile_name = 'virthosts.yaml'
def find_file_dir(name, what):
"""
find file
"""
global conffile_locations
conffile = "{}/{}".format(conffile_locations[0], name)
for path in conffile_locations:
path = os.path.expanduser(path)
tofind = "{}/{}".format(path, name)
if what == "file":
if os.path.isfile(tofind):
#print("configuration found: "+tofind)
return tofind
elif what == "dir":
if os.path.isdir(tofind):
return tofind
return conffile
def find_conffile():
global conffile_name
return find_file_dir(conffile_name, "file")
def find_hvfile():
global hvfile_name
return find_file_dir(hvfile_name, "file")
def find_vmconfig_dir():
return find_file_dir("vmconfig", "dir")
def check_conffile(conf):
"""
check if the configuration file is present
"""
if os.path.isfile(conf) is False:
util.print_error(conf+" configuration Yaml file Not found!")
print("Please select one to contine:")
print("conf /path/to/file.yaml")
return False
return True
class Configuration():
"""
all stuff relative to configuration
"""
conffile = find_conffile()
hvfile = find_hvfile()
util.check_iam_root()
vm_config_store = find_vmconfig_dir()
emulator = None
inputkeyboard = ""
inputmouse = ""
xml_all = None
vcpu = name = diskpath = memory = osdef = ondef = cpumode = power = watchdog = ""
audio = usb = disk = features = clock = network = filename = tpm = iothreads = ""
callsign = custom = security = video = controller = hugepages = toreport = ""
loader = config = fw_info = vm_config = cdrom = vnet = hostfs = vmimage = ""
# default is local
hypervisor_name = "localhost"
STORAGE_DATA = STORAGE_DATA_REC = host_filesystem = xmldata = nothing_to_report = ""
memory_pin = False
# There is some Immutable in dict for the moment...
#IMMUT = immut.Immutable()
CONSOLE = guest.create_console()#IMMUT.console_data)
CHANNEL = guest.create_channel()#IMMUT.channel_data)
GRAPHICS = guest.create_graphics()#IMMUT.graphics_data)
#MEMBALLOON = guest.create_memballoon()#IMMUT.memballoon_data)
RNG = guest.create_rng()#IMMUT.rng_data)
#METADATA = guest.create_metadata()#IMMUT.metadata_data)
# what kind of configuration should be done; default is both mode
mode = "both"
all_modes = ['guest', 'host', 'both']
# by default set some value as off
overwrite = force_sev = "off"
on_off_options = ['on', 'off']
dataprompt = {
'name': None,
'vcpu': None,
'memory': None,
'memory_backing': None,
'machine': None,
'boot_dev': None,
'vnet': None,
'cdrom': None,
'mainconf': conffile,
'hvconf': hvfile,
'hvselected': None,
'path': '/var/lib/libvirt/images',
'orverwrite': 'off',
'cluster_size': None,
'disk_target': None,
'lazy_refcounts': None,
'disk_cache': None,
'preallocation': None,
'encryption': None,
'capacity': None,
'format': None,
}
# default os
listosdef = {
'arch': "x86_64",
'machine': "pc-q35-6.2",
'boot_dev': 'hd',
}
def basic_config(self):
"""
init the basic configuration
"""
self.vcpu = ""
self.memory = ""
self.osdef = ""
self.name = ""
self.ondef = ""
self.cpumode = ""
self.power = ""
self.watchdog = ""
self.audio = ""
self.usb = ""
self.disk = ""
self.features = ""
self.clock = ""
self.network = ""
self.vnet = "default"
self.filename = ""
self.tpm = ""
self.iothreads = ""
self.callsign = ""
self.custom = ""
self.loader = None
self.security = ""
self.video = ""
self.config = ""
self.hostfs = ""
self.cdrom = ""
self.xmldata = ""
self.fw_info = fw.default_firmware_info()
self.nothing_to_report = True
# prefile STORAGE_DATA in case of...
self.STORAGE_DATA = {
# XML part
'disk_type': 'file',
'disk_cache': '',
'disk_target': 'vda',
'disk_bus': 'virtio',
'format': '',
'unit': 'G',
'capacity': '20',
'cluster_size': '1024',
'lazy_refcounts': '',
'preallocation': '',
'compression_type': 'zlib',
'encryption': '',
#'password': '',
}
# This dict is the recommended settings for storage
self.STORAGE_DATA_REC = {}
# prefile host_filesystem
self.host_filesystem = {
'fmode': '644',
'dmode': '755',
'target_dir': '/tmp/',
'source_dir': '/tmp/host',
}
# BasicConfiguration
# pre filed in case of...
data = c.BasicConfiguration()
self.emulator = guest.create_emulator(data.emulator("/usr/bin/qemu-system-x86_64"))
self.inputkeyboard = guest.create_input(data.input("keyboard", "virtio"))
self.inputmouse = guest.create_input(data.input("mouse", "virtio"))
# Using virtscenario.yaml to file some VAR
with open(self.conf.conffile) as file:
config = yaml.full_load(file)
# parse all section of the yaml file
for item, value in config.items():
# check mathing section
if item == "hypervisors":
for dall in value:
for datai, valuei in dall.items():
if datai == 'hvconf':
self.conf.hvfile = valuei
else:
util.print_error("Unknow parameter in hypervisors section: {}".format(datai))
elif item == "config":
for dall in value:
for datai, valuei in dall.items():
if datai == 'path':
self.vm_config = valuei
elif datai == 'vm-config-store':
self.vm_config_store = valuei
else:
util.print_error("Unknown parameter in config section: {}".format(datai))
elif item == "emulator":
for dall in value:
for datai, valuei in dall.items():
if datai == "emulator":
self.emulator = guest.create_emulator(data.emulator(valuei))
elif datai == "fw_meta":
self.fw_info = fw.reload_firmware_info(valuei)
else:
util.print_error("Unknow parameter in emulator section")
elif item == "host_filesystem":
for dall in value:
for datai, valuei in dall.items():
if datai == "fmode":
self.host_filesystem['fmode'] = valuei
elif datai == "dmode":
self.host_filesystem['dmode'] = valuei
elif datai == "source_dir":
self.host_filesystem['source_dir'] = valuei
elif datai == "target_dir":
self.host_filesystem['target_dir'] = valuei
else:
util.print_error("Unknow parameter in host_filesystem section")
elif item == "input":
# Parse keyboard and mouse
for dall in value:
for datai, valuei in dall.items():
if datai == "keyboard":
self.inputkeyboard = guest.create_input(data.input("keyboard", valuei))
elif datai == "mouse":
self.inputmouse = guest.create_input(data.input("mouse", valuei))
else:
util.print_error("Unknow parameter in input section")
elif item == "architecture":
# Parse list os def section
for dall in value:
for datai, valuei in dall.items():
if datai == "arch":
self.conf.listosdef.update({'arch': valuei})
else:
util.print_error("Unknow parameter in lisofdef section")
elif item == "STORAGE_DATA":
# available option in config.yaml file, all other ignored
storage_dict = ["disk_type", "disk_cache", "disk_target", "disk_bus", "path",
"format", "unit", "capacity", "cluster_size",
"lazy_refcounts", "preallocation", "compression_type",
"encryption",
]
# Parse storage section
for dall in value:
for datai, valuei in dall.items():
# check the option is the same and file it
if datai in storage_dict:
self.STORAGE_DATA[datai] = valuei
#print("DEBUG "+datai+":"+str(valuei))
else:
util.print_error("Unknow option for storage!")
else:
util.print_error("Unknow Section: {}".format(item))
hv.load_hypervisors(self.conf.hvfile)
def check_storage(self):
"""
use storage data from config.yaml if available, compare to recommended
create a list to show diff between user setting and recommended
"""
self.toreport = {1:{}, 2:{}, 3:{}, 4:{}, 5:{}, 6:{}}
nestedindex = 0
# Create the XML disk part
# DISK PATH
# if no data path set use recommended
if self.STORAGE_DATA['path'] == "":
self.STORAGE_DATA['path'] = self.conf.diskpath['path']
# if path differ grab data to report
if self.conf.diskpath['path'] != self.STORAGE_DATA['path']:
# there is no diff is no user setting
if self.STORAGE_DATA['path'] != "":
nestedindex += 1
self.toreport[nestedindex]['title'] = "Disk path"
self.toreport[nestedindex]['rec'] = self.STORAGE_DATA['path']
self.toreport[nestedindex]['set'] = self.conf.diskpath['path']
# PREALLOCATION
if self.STORAGE_DATA['preallocation'] is False:
self.STORAGE_DATA['preallocation'] = "off"
# no preallocation has been set, using recommended
# if they differ grab data to report
if self.STORAGE_DATA['preallocation'] != self.STORAGE_DATA_REC['preallocation']:
# there is no diff if no user setting
if self.STORAGE_DATA['preallocation'] != "":
nestedindex += 1
self.toreport[nestedindex]['title'] = "Disk preallocation"
self.toreport[nestedindex]['rec'] = self.STORAGE_DATA_REC['preallocation']
self.toreport[nestedindex]['set'] = self.STORAGE_DATA['preallocation']
if self.STORAGE_DATA['preallocation'] == "":
self.STORAGE_DATA['preallocation'] = self.STORAGE_DATA_REC['preallocation']
# ENCRYPTION
if self.STORAGE_DATA['encryption'] is False:
self.STORAGE_DATA['encryption'] = "off"
if self.STORAGE_DATA['encryption'] is True:
self.STORAGE_DATA['encryption'] = "on"
if self.STORAGE_DATA_REC['encryption'] is True:
self.STORAGE_DATA_REC['encryption'] == "on"
if self.STORAGE_DATA_REC['encryption'] is False:
self.STORAGE_DATA_REC['encryption'] == "off"
# if they differ grab data to report
if self.STORAGE_DATA['encryption'] != self.STORAGE_DATA_REC['encryption']:
# there is no diff if no user setting
if self.STORAGE_DATA['encryption'] != "":
nestedindex += 1
self.toreport[nestedindex]['title'] = "Disk Encryption"
self.toreport[nestedindex]['rec'] = self.STORAGE_DATA_REC['encryption']
self.toreport[nestedindex]['set'] = self.STORAGE_DATA['encryption']
# if no encryption set and recommended is on
if self.STORAGE_DATA['encryption'] == "" and self.STORAGE_DATA_REC['encryption'] == "on":
self.STORAGE_DATA['encryption'] = "on"
# ask for password in case of encryption on
if self.STORAGE_DATA['encryption'] == "on":
# Ask for the disk password
if self.conf.vmimage is None:
if self.gtk is not True:
password = util.input_password()
else:
password = self.conf.password
self.STORAGE_DATA['password'] = password
# DISKCACHE
if self.STORAGE_DATA['disk_cache'] != self.STORAGE_DATA_REC['disk_cache']:
if self.STORAGE_DATA['disk_cache'] != "":
nestedindex += 1
self.toreport[nestedindex]['title'] = "Disk Cache"
self.toreport[nestedindex]['rec'] = self.STORAGE_DATA_REC['disk_cache']
self.toreport[nestedindex]['set'] = self.STORAGE_DATA['disk_cache']
# if no disk_cache use the recommanded one
if self.STORAGE_DATA['disk_cache'] == "":
self.STORAGE_DATA['disk_cache'] = self.STORAGE_DATA_REC['disk_cache']
# LAZY_REFCOUNTS
if self.STORAGE_DATA['lazy_refcounts'] is False:
self.STORAGE_DATA['lazy_refcounts'] = "off"
if self.STORAGE_DATA['lazy_refcounts'] is True:
self.STORAGE_DATA['lazy_refcounts'] = "on"
if self.STORAGE_DATA_REC['lazy_refcounts'] is True:
self.STORAGE_DATA_REC['lazy_refcounts'] == "on"
if self.STORAGE_DATA_REC['lazy_refcounts'] is False:
self.STORAGE_DATA_REC['lazy_refcounts'] == "off"
if self.STORAGE_DATA['lazy_refcounts'] != self.STORAGE_DATA_REC['lazy_refcounts']:
if self.STORAGE_DATA['lazy_refcounts'] != "":
nestedindex += 1
self.toreport[nestedindex]['title'] = "Disk Lazy_refcounts"
self.toreport[nestedindex]['rec'] = self.STORAGE_DATA_REC['lazy_refcounts']
self.toreport[nestedindex]['set'] = self.STORAGE_DATA['lazy_refcounts']
# if no disk_cache use the recommanded one
if self.STORAGE_DATA['lazy_refcounts'] == "":
self.STORAGE_DATA['lazy_refcounts'] = self.STORAGE_DATA_REC['lazy_refcounts']
# DISK FORMAT
if self.STORAGE_DATA['format'] != self.STORAGE_DATA_REC['format']:
if self.STORAGE_DATA['format'] != "":
nestedindex += 1
self.toreport[nestedindex]['title'] = "Disk Format"
self.toreport[nestedindex]['rec'] = self.STORAGE_DATA_REC['format']
self.toreport[nestedindex]['set'] = self.STORAGE_DATA['format']
# if no disk format use the recommanded one
if self.STORAGE_DATA['format'] == "":
self.STORAGE_DATA['format'] = self.STORAGE_DATA_REC['format']
# user specify an image to use
if self.conf.vmimage is not None:
output = subprocess.check_output(["qemu-img", "info", self.conf.vmimage])
output = output.decode("utf-8")
format_line = [line for line in output.splitlines() if "file format:" in line][0]
image_format = format_line.split(":")[1].strip()
self.STORAGE_DATA['format'] = image_format
self.STORAGE_DATA['source_file'] = self.conf.vmimage
else:
self.STORAGE_DATA['source_file'] = self.STORAGE_DATA['path']+"/"+self.callsign+"."+self.STORAGE_DATA['format']
# Remove index in dict which are empty
if nestedindex >= 1:
for _count in range(1, 6):
if len(self.toreport) != nestedindex:
self.toreport.pop(len(self.toreport))
self.nothing_to_report = False
else:
self.nothing_to_report = True
def set_memory_pin(self, value):
self.memory_pin = value
def pre_hypervisor_setting(self):
"""
need to check hypervisor value earlier
"""
hypervisor_n = self.conf.dataprompt.get('hvselected')
if hypervisor_n != None:
util.print_ok("Selected Hypervisor: " +hypervisor_n)
self.hypervisor_name = hypervisor_n
else:
self.hypervisor_name = "localhost"
util.print_ok("Selected Hypervisor: localhost")
self.hypervisor = hv.connect_hypervisor(self.hypervisor_name)
if not self.hypervisor.is_connected():
util.print_error("No connection to LibVirt: "+self.hypervisor_name)
return
def check_user_settings(self, virtum):
"""
Check if the user as set some stuff, if yes use it
only usefull for Guest setting
"""
vcpuuser = self.conf.dataprompt.get('vcpu')
if vcpuuser != None:
self.vcpu = guest.create_cpu({'vcpu': vcpuuser})
else:
self.vcpu = guest.create_cpu(virtum.vcpu)
nameuser = self.conf.dataprompt.get('name')
if nameuser != None:
self.name = guest.create_name({'VM_name': nameuser})
self.callsign = nameuser
else:
self.name = guest.create_name(virtum.name)
diskpathuser = self.conf.dataprompt.get('path')
if diskpathuser != None:
self.conf.diskpath = {'path': diskpathuser}
self.STORAGE_DATA.update({'path': diskpathuser})
clustersize = self.conf.dataprompt.get('cluster_size')
if clustersize != None:
self.STORAGE_DATA.update({'cluster_size': clustersize})
preallocation = self.conf.dataprompt.get('preallocation')
if preallocation != None:
self.STORAGE_DATA.update({'preallocation': preallocation})
encryption = self.conf.dataprompt.get('encryption')
if encryption != None:
self.STORAGE_DATA.update({'encryption': encryption})
# fore both mode in case of encryption on as we need uuid from VM image
self.conf.mode = "both"
disk_cache = self.conf.dataprompt.get('disk_cache')
if disk_cache != None:
self.STORAGE_DATA.update({'disk_cache': disk_cache})
lazy_refcounts = self.conf.dataprompt.get('lazy_refcounts')
if lazy_refcounts != None:
self.STORAGE_DATA.update({'lazy_refcounts': lazy_refcounts})
disk_target = self.conf.dataprompt.get('disk_target')
if disk_target != None:
self.STORAGE_DATA.update({'disk_target': disk_target})
capacity = self.conf.dataprompt.get('capacity')
if capacity != None:
self.STORAGE_DATA.update({'capacity': capacity})
disk_format = self.conf.dataprompt.get('format')
if disk_format != None:
self.STORAGE_DATA.update({'format': disk_format})
# memory_backing = self.conf.dataprompt.get('memory_backing')
# if memory_backing != None:
# self.memory_backing = guest.create_memory_backing()
# else:
# self.memory_backing = ""
memoryuser = self.conf.dataprompt.get('memory')
if memoryuser != None:
mem_dict = {
'mem_unit': 'Gib',
'max_memory': memoryuser,
'current_mem_unit': 'Gib',
'memory': memoryuser,
}
if virtum.memory_pin:
mem_dict['pin'] = virtum.memory_pin
self.memory = guest.create_memory(mem_dict)
else:
self.memory = guest.create_memory(virtum.memory)
cdrom = self.conf.dataprompt.get('dvd')
if cdrom != None:
self.cdrom = guest.create_cdrom({'source_file': cdrom})
# if CD/DVD selected swith boot dev to cdrom by default
self.conf.listosdef.update({'boot_dev': 'cdrom'})
vmimage = self.conf.dataprompt.get('vmimage')
if vmimage != "":
self.conf.vmimage = vmimage
machineuser = self.conf.dataprompt.get('machine')
bootdevuser = self.conf.dataprompt.get('boot_dev')
if machineuser != None:
self.conf.listosdef.update({'machine': machineuser})
if bootdevuser != None:
self.conf.listosdef.update({'boot_dev': bootdevuser})
self.osdef = guest.create_osdef(self.conf.listosdef)
vnet = self.conf.dataprompt.get('vnet')
if vnet != None:
self.vnet = vnet
overwrite = self.conf.dataprompt.get('overwrite')
if overwrite != None:
self.conf.overwrite = overwrite
return self
| aginies/virt-scenario | src/virtscenario/configuration.py | configuration.py | py | 22,528 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "os.path.expanduser",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_nu... |
19416762509 | from check_your_heuristic.dataset.ReCoRDDataset import ReCoRDDataset
from check_your_heuristic.heuristics.Heuristic import BaseHeuristicSolver
from typing import Dict, Any, List
import pandas as pd
import string
import numpy as np
import logging
class ReCoRDHeuristics(BaseHeuristicSolver):
def __init__(self, config: Dict[str, Any], dataset: ReCoRDDataset):
super(BaseHeuristicSolver, self).__init__(dataset=dataset, config=config)
self.passage_column = config["passage_column"]
self.question_column = config["question_column"]
self.entities_column = config["entities_column"]
@staticmethod
def normalize_answer(text: str):
"""Lower text and remove punctuation, articles and extra whitespace."""
def white_space_fix(line):
return ' '.join(line.split())
def remove_punct(line):
exclude = set(string.punctuation)
return ''.join(ch for ch in line if ch not in exclude)
return white_space_fix(remove_punct(text.lower()))
def _get_entities(self, row, column_name:str):
words = [
row[self.passage_column][x["start"]: x["end"]]
for x in row[column_name]
]
return words
def get_basic_pred(self,
row: pd.DataFrame,
words: List[str],
_words: List[str],
line_candidates: List[str]
) -> str:
if len(_words) == 0:
if len(words) == 1:
pred = words[0]
else:
for word in words:
line_candidates.append(row[self.question_column].replace("@placeholder", word))
pred_idx = np.random.choice(np.arange(1, len(line_candidates)),
size=1)[0]
pred = np.array(words)[pred_idx]
elif len(_words) == 1:
pred = _words[0]
else:
for word in _words:
line_candidates.append(row[self.question_column].replace("@placeholder", word))
pred_idx = np.random.choice(np.arange(1, len(line_candidates)),
size=1)[0]
pred = np.array(_words)[pred_idx]
return pred
def filtration_count_heuristic(self, row: pd.DataFrame) -> str:
"""
Heuristic that removes some candidates and filters out candidates depended on times they occurred in the text
If there are a lot of candidates the one is chosen randomly
"""
line_candidates = []
_words = []
text = row[self.passage_column].split()
words = self._get_entities(row=row, column_name=self.entities_column)
for word in words:
if word[:-2] not in row[self.question_column] or text.count(words[:-2]) >= 2:
_words.append(word)
pred = self.get_basic_pred(row=row, words=words, _words=_words, line_candidates=line_candidates)
return self.normalize_answer(pred)
def remove_candidates_heuristic(self, row: pd.DataFrame) -> str:
"""
Heuristic that removes candidates that occur in the question
"""
words = self._get_entities(row=row, column_name=self.entities_column)
line_candidates = []
_words = []
for word in words:
if word[:-1] not in row[self.question_column]:
_words.append(word)
pred = self.get_basic_pred(row=row, words=words, _words=_words, line_candidates=line_candidates)
return self.normalize_answer(pred)
def metric_max_over_ground_truths(self, row: pd.DataFrame, predictions_colname: str) -> float:
"""
As there is several true answers, we go over all and compute metric for all
:param row: row of the data frame predicted
:param predictions_colname: the name for heuristic
:return:
"""
scores_for_ground_truths = [0]
prediction = row[predictions_colname]
ground_truths = self._get_entities(row=row, column_name=self.target_name)
for ground_truth in ground_truths:
score = self.exact_match_score(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def check_heuristics(self) -> Dict[str, float]:
"""
Checks how the heuristics are present in the data sets and prints the results
:return: json-like object with all the results
"""
result = {}
self.train["pred_remove_candidates_heuristic"] = self.train.apply(
self.remove_candidates_heuristic,
axis=1
)
result["exact_match_score_remove_candidates_heuristic"] = np.mean(self.train.apply(
lambda row: self.metric_max_over_ground_truths(
row=row,
predictions_colname="pred_remove_candidates_heuristic"
),
axis=1).to_list()
)
self.train["pred_filtration_count_heuristic"] = self.train.apply(
self.filtration_count_heuristic,
axis=1
)
self.train["true_filtration_count_heuristic"] = self.train[self.target_name]
result["exact_match_score_filtration_count_heuristic_train"] = np.mean(self.train.apply(
lambda row: self.metric_max_over_ground_truths(
row=row,
predictions_colname="true_filtration_count_heuristic"
),
axis=1
).to_list()
)
if self.valid is not None:
result_df = pd.DataFrame()
result_df["pred_remove_candidates_heuristic"] = self.valid.apply(
self.remove_candidates_heuristic,
axis=1
)
result_df["true_labels"] = self.valid[self.target_name]
result["exact_match_score_remove_candidates_heuristic_valid"] = np.mean(result_df.apply(
lambda row: self.metric_max_over_ground_truths(
row=row,
predictions_colname="pred_remove_candidates_heuristic"
),
axis=1
).to_list()
)
result_df["pred_filtration_count_heuristic"] = self.valid.apply(
self.filtration_count_heuristic,
axis=1
)
result_df["true_filtration_count_heuristic"] = self.valid[self.target_name]
result["exact_match_score_filtration_count_heuristic_valid"] = np.mean(result_df.apply(
lambda row: self.metric_max_over_ground_truths(
row=row,
predictions_colname="pred_filtration_count_heuristic"
),
axis=1).to_list()
)
for key, value in result.items():
print(key, '\n', value, '\n')
return result
def exact_match_score(self, prediction: str, ground_truth: str) -> bool:
return prediction == self.normalize_answer(ground_truth)
def all_methods(self):
logging.error("Method is deprecated for this type of dataset")
raise AttributeError
def random_balanced_choice(self):
logging.error("Method is deprecated for this type of dataset")
raise AttributeError
def random_choice(self):
logging.error("Method is deprecated for this type of dataset")
raise AttributeError
def majority_class(self):
logging.error("Method is deprecated for this type of dataset")
raise AttributeError
def show_report(self):
logging.error("Method is deprecated for this type of dataset")
raise AttributeError
| tatiana-iazykova/check_your_heuristic | check_your_heuristic/heuristics/ReCoRDHeuristics.py | ReCoRDHeuristics.py | py | 7,723 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "check_your_heuristic.heuristics.Heuristic.BaseHeuristicSolver",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 11,
"usage_type": "name"
},
{
... |
19698435188 | import numpy as np
from numpy import ma
import matplotlib.pyplot as plt
import matplotlib.patches as mp
from matplotlib.collections import PatchCollection
m_s = 4
m_e = 1
x_s, y_s = 0, 0
x_e, y_e = 2, 0
x_c, y_c = (m_s*x_s+m_e*x_e)/(m_s+m_e), (m_s*y_s+m_e*y_e)/(m_s+m_e)
gamma = 1.0
x = np.linspace(-2.5, 4, 1000)
y = np.linspace(-2.5, 2.5, 1000)
X, Y = np.meshgrid(x, y)
Omega = np.sqrt(gamma*((m_s+m_e)/(((x_s-x_e)**2+(y_s-y_e)**2)**(3/2))))
def f(X, Y):
global x_s, y_s, m_s, x_e, y_e, m_e, x_c, y_c, Omega
U = Omega**2*(X-x_c) - gamma*m_s*((X-x_s)/(((X-x_s)**2+(Y-y_s)**2)**(3/2))) \
- gamma*m_e*((X-x_e)/(((X-x_e)**2+(Y-y_e)**2)**(3/2)))
V = Omega**2*(Y-y_c) - gamma*m_s*((Y-y_s)/(((X-x_s)**2+(Y-y_s)**2)**(3/2))) \
- gamma*m_e*((Y-y_e)/(((X-x_e)**2+(Y-y_e)**2)**(3/2)))
return U, V
U, V = f(X, Y)
def findZero(i1, i2, f, I=50):
rn = i1
rp = i2
for i in range(I):
ra = np.array(rn) - np.array(f(*rn))*((np.array(rn)-np.array(rp))/(np.array(f(*rn))-np.array(f(*rp))))
rp = rn
rn = ra
return rn
#U = gamma*( -(m_s/((X**2+Y**2)**(3/2)))*X - (m_e/(((X-d)**2+Y**2)**(3/2)))*(X-d) )
#V = gamma*( -(m_s/((X**2+Y**2)**(3/2)))*Y - (m_e/(((X-d)**2+Y**2)**(3/2)))*Y )
G_e = m_s/(((x_s-x_e)**2+(y_s+y_e)**2)**(3/2))
D = 0.1
A = np.sqrt(U**2+V**2)
#print(np.max(A))
sel = np.logical_and(A <= (G_e+D), A >= (G_e-D))
#print(np.sum(sel))
fig, ax = plt.subplots()
def limit(x, l=5):
x[x > l] = l
x[x < -l] = -l
return x
def limit2d(x, y, l=5):
m = np.hypot(x, y) > l
x[m] = (x[m]/np.hypot(x[m], y[m]))*l
y[m] = (y[m]/np.hypot(x[m], y[m]))*l
return x, y
U, V = limit2d(U, V)
A = limit(A)
xmin, xmax, ymin, ymax = np.amin(x), np.amax(x), np.amin(y), np.amax(y)
extent = xmin, xmax, ymin, ymax
plt.imshow(5-A, cmap=plt.cm.jet, alpha=1, extent=extent)
#plt.colorbar()
plt.imshow(sel, cmap=plt.cm.jet, alpha=0.5, extent=extent)
#plt.colorbar()
stx = x.shape[0]/10
sty = y.shape[0]/10
Q = plt.quiver(X[::stx, ::sty], Y[::stx, ::sty], U[::stx, ::sty], V[::stx, ::sty],
scale=100, width=.002,linewidth=1)
patches = []
colors = []
Sun = mp.Circle((x_s,y_s), 0.1, ec='none')
patches.append(Sun)
colors.append('#FFFF00')
Earth = mp.Circle((x_e,y_e), 0.1, ec='none')
patches.append(Earth)
colors.append('#0000FF')
p = [(0,0)] #make set
initial = [
[[1, 1], [0.5, 0.5]],
[[-1, -1], [-0.5, -0.5]],
]
for i1, i2 in initial:
pn = findZero(np.array(i1), np.array(i2), f)
print(pn)
L = mp.Circle((pn[0], pn[1]), 0.1, ec='none')
patches.append(L)
colors.append('#FFFFFF')
collection = PatchCollection(patches, facecolors=colors)
ax.add_collection(collection)
plt.axis('off')
plt.savefig("lagrangePoints.png")
plt.axis('on')
plt.show() | JakeI/PlaneteSimulator | LagrangePoints/lagrangePoints.py | lagrangePoints.py | py | 2,767 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.linspace",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_n... |
31063781935 |
from ..utils import Object
class MessageForwardInfo(Object):
"""
Contains information about a forwarded message
Attributes:
ID (:obj:`str`): ``MessageForwardInfo``
Args:
origin (:class:`telegram.api.types.MessageForwardOrigin`):
Origin of a forwarded message
date (:obj:`int`):
Point in time (Unix timestamp) when the message was originally sent
public_service_announcement_type (:obj:`str`):
The type of a public service announcement for the forwarded message
from_chat_id (:obj:`int`):
For messages forwarded to the chat with the current user (Saved Messages), to the Replies bot chat, or to the channel's discussion group, the identifier of the chat from which the message was forwarded last time; 0 if unknown
from_message_id (:obj:`int`):
For messages forwarded to the chat with the current user (Saved Messages), to the Replies bot chat, or to the channel's discussion group, the identifier of the original message from which the new message was forwarded last time; 0 if unknown
Returns:
MessageForwardInfo
Raises:
:class:`telegram.Error`
"""
ID = "messageForwardInfo"
def __init__(self, origin, date, public_service_announcement_type, from_chat_id, from_message_id, **kwargs):
self.origin = origin # MessageForwardOrigin
self.date = date # int
self.public_service_announcement_type = public_service_announcement_type # str
self.from_chat_id = from_chat_id # int
self.from_message_id = from_message_id # int
@staticmethod
def read(q: dict, *args) -> "MessageForwardInfo":
origin = Object.read(q.get('origin'))
date = q.get('date')
public_service_announcement_type = q.get('public_service_announcement_type')
from_chat_id = q.get('from_chat_id')
from_message_id = q.get('from_message_id')
return MessageForwardInfo(origin, date, public_service_announcement_type, from_chat_id, from_message_id)
| iTeam-co/pytglib | pytglib/api/types/message_forward_info.py | message_forward_info.py | py | 2,080 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "utils.Object",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "utils.Object.read",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "utils.Object",
"line_number": 43,
"usage_type": "name"
}
] |
73391053864 | from gym import spaces
from gym import Env
import numpy as np
from pathlib import Path
from PIL import Image
from gym.utils import seeding
from spg.view import TopDownView
import arcade
from spg.playground import Playground, Room
from spg.playground.collision_handlers import get_colliding_entities
from spg.utils.definitions import CollisionTypes
from spg.element import ColorWall
# My Custom Entities
from resources.apple import Apple, AppleCollisionType
from resources.reversedForwardBase import ReversedForwardBase
from resources.reverseHeadAgent import ReverseHeadAgent
# Created a Custom Collision Handler to confirm when an Apple and Agent collide
def apple_agent_collision(arbiter, _, data):
playground: Playground = data["playground"]
(apple, _), (agent, _) = get_colliding_entities(playground, arbiter)
assert isinstance(apple, Apple)
assert isinstance(agent, ReversedForwardBase)
if apple.agent == agent:
agent.activate(apple)
return True
class PerturbationEnv(Env):
def __init__(self):
super().__init__()
self.seed()
# Initialization of playground and interaction
playground = Room(size=(256, 256), wall_color=arcade.color.AERO_BLUE)
playground.add_interaction(
AppleCollisionType.APPLE, CollisionTypes.PART, apple_agent_collision
)
# Initialization of agent (can reverse X or Y controls on creation)
agent = ReverseHeadAgent(reverse=(False, False))
playground.add(agent)
# Initialization of walls
wall_1 = ColorWall(
pos_start=(50, 50),
pos_end=(100, 100),
width=5,
color=arcade.color.AERO_BLUE,
)
playground.add(wall_1, ((50, 0), 0))
wall_2 = ColorWall(
pos_start=(50, 50),
pos_end=(100, 100),
width=5,
color=arcade.color.AERO_BLUE,
)
playground.add(wall_2, ((-50, 0), 0))
wall_3 = ColorWall(
pos_start=(-50, 50),
pos_end=(100, 50),
width=5,
color=arcade.color.AERO_BLUE,
)
playground.add(wall_3, wall_3.wall_coordinates)
self.playground = playground
self.agent = self.playground.agents[0]
self.playground.time_limit = 1000
self.gui = TopDownView(self.playground)
self.images = []
self.no_of_apples = self.num_apples()
# Code for creating action and observation space taken from:
# https://github.com/gaorkl/spg-experiments/blob/master/spg_experiments/envs/spg/base.py
# Create action space
lows = []
highs = []
for controller in zip(self.agent.controllers):
lows.append(controller[0].min)
highs.append(controller[0].max)
self.action_space = spaces.Box(
low=np.array(lows).astype(np.float32),
high=np.array(highs).astype(np.float32),
dtype=np.float32,
)
# Create observation space
elems = 0
for sensor in self.agent.sensors:
if isinstance(sensor.shape, int):
elems += sensor.shape
else:
elems += np.prod(sensor.shape)
self.observation_space = spaces.Box(
low=0,
high=1,
shape=(elems,),
dtype=np.float32,
)
def step(self, action):
# Code for obtaining the controller names was taken from:
# https://github.com/gaorkl/spg-experiments/blob/master/spg_experiments/envs/spg/base.py
commands = {}
command_dict = {}
for controller, act in zip(self.agent.controllers, action):
commands[controller.name] = act
command_dict[self.agent] = commands
_observation, msg, reward, _done = self.playground.step(commands=command_dict)
reward = reward[self.agent]
reward -= 0.01
observation = self._get_obs()
if msg is None:
msg = {}
# Checks if all apples have been eaten
done = bool(self.no_of_apples == 0) or bool(
self.playground.timestep >= self.playground.time_limit
)
self.gui.update()
self.no_of_apples = self.num_apples()
return observation, reward, done, msg
def render(self, mode="rgb_array"):
im = Image.fromarray(self.gui.get_np_img())
self.images.append(im)
return None
def reset(self):
self.playground.reset()
observation = self._get_obs()
self.images = []
self.no_of_apples = self.num_apples()
return observation
# Additional methods for functionality
def num_apples(self):
apples = 0
for element in self.playground.elements:
if isinstance(element, Apple):
apples += 1
return apples
# Saves all images to a file to name.png
def save_gif(self, name):
im = self.images[0]
im.save(
Path(f"gifs/{name}.gif"),
format="GIF",
append_images=self.images[1:],
save_all=True,
duration=10,
loop=1,
)
# Code for seed taken from:
# https://github.com/openai/gym/blob/master/gym/envs/box2d/lunar_lander.py
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
# Code for get and process observation taken from:
# https://github.com/gaorkl/spg-experiments/blob/master/spg_experiments/envs/spg/base.py
# Calculate values for sensors on agent and return numpy array
def _get_obs(self):
sensor_values = {}
for sensor in self.agent.sensors:
sensor_values[sensor.name] = sensor._values
return self.process_obs(sensor_values)
# Creates numpy array from values in _get_obs()
def process_obs(self, obs):
obs_vec = []
for _, v in obs.items():
obs_vec.append(v.ravel())
return np.concatenate(obs_vec)
| aomerCS/IN3007 | gym_env/envs/perturbation_world.py | perturbation_world.py | py | 6,057 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "spg.playground.Playground",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "spg.playground.collision_handlers.get_colliding_entities",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "resources.apple.Apple",
"line_number": 27,
"usage_type... |
22057061997 | import pandas as pd
import helper
import weaviate
# initiate the Weaviate client
client = weaviate.Client("http://localhost:8080")
client.timeout_config = (3, 200)
# empty schema and create new schema
client.schema.delete_all()
schema = {
"classes": [
{
"class": "Wine",
"properties": [
{
"name": "title",
"dataType": ["text"]
},
{
"name": "description",
"dataType": ["text"]
}
]
}
]
}
client.schema.create(schema)
# open wine dataset (10000 items)
df = pd.read_csv('data/wine_reviews.csv', index_col=0)
def add_wines(data, batch_size=512, debug_mode=False):
""" upload wines to Weaviate
:param data: wine data in panda dataframe object
:type data: panda dataframe object (2 columns: 'title' and 'description')
:param batch_size: number of data objects to put in one batch, defaults to 512
:type batch_size: int, optional
:param debug_mode: set to True if you want to display upload errors, defaults to False
:type debug_mode: bool, optional
"""
no_items_in_batch = 0
for index, row in data.iterrows():
wine_object = {
"title": row["title"] + '.',
"description": row["description"],
}
wine_uuid = helper.generate_uuid('wine', row["title"]+row["description"])
client.batch.add_data_object(wine_object, "Wine", wine_uuid)
no_items_in_batch += 1
if no_items_in_batch >= batch_size:
results = client.batch.create_objects()
if debug_mode:
for result in results:
if result['result'] != {}:
helper.log(result['result'])
message = str(index) + ' / ' + str(data.shape[0]) + ' items imported'
helper.log(message)
no_items_in_batch = 0
client.batch.create_objects()
add_wines(df.head(2500), batch_size=99, debug_mode=True) | weaviate/weaviate-examples | semanticsearch-transformers-wines/import.py | import.py | py | 2,091 | python | en | code | 253 | github-code | 36 | [
{
"api_name": "weaviate.Client",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "helper.generate_uuid",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "helper.log",
... |
6862212397 | # Utilities
############################################################################
# Imports
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import pickle
############################################################################
# Function to read the embedding_dataset from the "total_embedding.pickle"
def creatEmbedding():
with open("C:\\Users\\balln\\Desktop\\PART_PythonGUI\\data_base\\total_embedding.pickle", 'rb') as f:
embedding_dataset = pickle.load(f)
return embedding_dataset
############################################################################
# Returns 2 embedding vectors one of the author of the imposter text and one for William Shakespeare
def twoEmbeddings(embedding_dataset, imposterText):
# Find The embedding of the imposter book that was chosen
filtered_imposter = embedding_dataset[embedding_dataset['book'] == imposterText]
embedding_imposter = None
if not filtered_imposter.empty:
# Access the embedding value of the first matching row
embedding_imposter = filtered_imposter['embedding'].iloc[0]
# Get The embedding of the first book matched to be written by William Shakespeare
filtered_shake = embedding_dataset[embedding_dataset['author'] == 'Shakespeare']
embedding_shake = None
if not filtered_shake.empty:
embedding_shake = filtered_shake['embedding'].iloc[0]
return embedding_shake, embedding_imposter
#################################################################################################
# Return cosine Similarity between 2 embeddings
def cosine_similarity_percentage(embed1, embed2):
similarity = cosine_similarity(embed1.reshape(1, -1), embed2.reshape(1, -1))
similarity_percentage = np.clip(similarity[0][0], -1, 1) * 100
return similarity_percentage
| TuvalZit/Capstone-Project-23-1-R-18 | GUI/Backend/GetEmbedding.py | GetEmbedding.py | py | 1,878 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pickle.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise.cosine_similarity",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 39,
"usage_type": "call"
}
] |
36127494424 | from fastapi import FastAPI, Path
from typing import Optional
from pydantic import BaseModel
class Item(BaseModel):
name: str
price: float
color: Optional[str] = None
inventory = {
1: {
'name': 'Milk',
'price': 3.99,
'color': 'white'
}
}
app = FastAPI()
@app.get('/')
async def home():
return [1,'a','3']#{"Welcome": "Home Python"}
@app.get("/items/{item_id}")
async def get_item(item_id:int = Path(None, description="This is item get method")):
return inventory[item_id]
@app.post('/create-items/{item_id}')
async def create_item(item_id:int, item: Item):
if item_id in inventory:
return "Item already exists"
else:
inventory[item_id] = item
return inventory[item_id]
| Kelvingandhi/kafka_sample | test_main.py | test_main.py | py | 761 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "fastapi.Path",
"... |
23992985122 | '''
542. 01 Matrix
https://leetcode.com/problems/01-matrix/
'''
from collections import defaultdict, deque
from typing import List
# Approach 1: BFS from '1' columns to '0'
# The problem is we do BFS for 1, which is sub-optimal
class Solution:
def updateMatrix(self, matrix: List[List[int]]) -> List[List[int]]:
m = len(matrix)
n = len(matrix[0])
def bfs(i,j):
q = deque()
q.append((i,j,0))
visited = set()
visited.add((i,j))
while q:
x, y, distance = q.popleft()
if matrix[x][y] == 0:
return distance
neighbors = [(x,y+1), (x,y-1), (x-1,y), (x+1,y)]
for i,j in neighbors:
if i<0 or i>=m or j<0 or j>=n:
continue
if (i,j) not in visited:
q.append((i,j,distance+1))
visited.add((i,j))
for i in range(m):
for j in range(n):
if matrix[i][j] == 1:
matrix[i][j] = bfs(i,j)
return matrix
# Approach 2: Simultaneous BFS
# Instead of calling BFS from each '1' cell, go in the other direction
# from '0' cell to '1' and update distance - this will be guaranteed to be shortest using BFS
from collections import deque
class Solution:
def updateMatrix(self, matrix: List[List[int]]) -> List[List[int]]:
R = len(matrix)
if R == 0:
return matrix
C = len(matrix[0])
#dist = defaultdict(lambda: float('inf'))
queue = deque()
for r in range(R):
for c in range(C):
if matrix[r][c] == 0:
queue.append((r,c))
else:
matrix[r][c] = float('inf')
dr = [-1,1,0,0]
dc = [0,0,1,-1]
# bfs
while queue:
r, c = queue.popleft()
# 4 directions - east, west, north, south
for i in range(4):
rr = r + dr[i]
cc = c + dc[i]
# eliminate border cases
if rr >= 0 and cc >=0 and rr < R and cc < C:
if matrix[rr][cc] > matrix[r][c] + 1:
matrix[rr][cc] = matrix[r][c] + 1
queue.append((rr,cc))
return matrix
| asset311/leetcode | bfs/01_matrix.py | 01_matrix.py | py | 2,531 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "collections.deque",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "collections.deque",
"li... |
29981196912 | # Example: echo server, using StreamServer
import logging
import argparse
from gruvi import get_hub, util
from gruvi.stream import StreamServer
logging.basicConfig()
parser = argparse.ArgumentParser()
parser.add_argument('port', type=int)
args = parser.parse_args()
def echo_handler(stream, protocol, client):
peer = client.getpeername()
print('Connection from {0}'.format(util.saddr(peer)))
while True:
buf = stream.read(4096)
if not buf:
break
stream.write(buf)
print('Connection closed')
server = StreamServer(echo_handler)
server.listen(('0.0.0.0', args.port))
hub = get_hub()
hub.switch(interrupt=True)
| cocagne/gruvi | examples/echoserver1.py | echoserver1.py | py | 667 | python | en | code | null | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "gruvi.util.saddr",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "gruvi.uti... |
3315546924 | import nltk
def nouns_transform(sentence):
tokenized = nltk.word_tokenize(sentence)
def is_noun (pos):
return pos[:2] == 'NN'
return [word for (word, pos) in nltk.pos_tag(tokenized) if is_noun(pos)]
def render_template(template_name='html pages/home.html', context={}):
html_str = ""
with open(template_name, 'r') as f:
html_str = f.read()
html_str = html_str.format(**context)
return html_str
def main(environ, start_response):
query = environ.get("QUERY_STRING")
path = environ.get("PATH_INFO")
if len(query) > len("text="):
sentence = query[5:].replace("+"," ").replace("%27","'")
nouns = nouns_transform(sentence)
data = render_template(template_name='html pages/nouns.html', context={"nouns_key": nouns})
elif path == "/":
data = render_template()
else:
data = render_template(template_name='html pages/404.html')
data = data.encode("utf-8")
start_response(
f"200 OK", [
("Content-Type", "text/html"),
("Content-Length", str(len(data)))
]
)
return iter([data])
| valya007/junior_technical_test | webapp.py | webapp.py | py | 1,171 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.word_tokenize",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "nltk.pos_tag",
"line_number": 9,
"usage_type": "call"
}
] |
18736574490 | from django import forms
from ..contrib.sysdate import dateFromLocal
class DateBigInput(forms.DateInput):
def value_from_datadict(self, data, files, name):
valData = super().value_from_datadict(data, files, name)
return dateFromLocal(valData)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['attrs']['data-provide'] = 'datepicker'
context['widget']['attrs']['data-date-language'] = 'th-th'
# context['widget']['attrs']['data-date-startdate'] = '-10d'
# context['widget']['attrs']['data-date-enddate'] = '+10d'
context['widget']['attrs']['class'] = 'form-control'
return context
class NumberBigInput(forms.TextInput):
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['attrs']['data-inputmask-alias'] = 'currency'
context['widget']['attrs']['class'] = 'form-control'
return context
class TextBigInput(forms.TextInput):
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['attrs']['class'] = 'form-control'
return context
class DecimalBigInput(forms.TextInput):
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['attrs']['data-inputmask-alias'] = 'currency'
context['widget']['attrs']['class'] = 'form-control'
return context
class IntegerBigInput(forms.TextInput):
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['attrs']['data-inputmask-alias'] = 'integer'
context['widget']['attrs']['class'] = 'form-control'
return context
# class ChoiceBigSelect(forms.Select):
# def get_context(self, name, value, attrs):
# context = super().get_context(name, value, attrs)
# context['widget']['attrs']['class'] = 'form-control'
# return context
class FormMixinBig:
use_required_attribute = False
readonly = []
field_require = []
currency = []
# selectChoice = []
def form_init(self):
self.__WidgetBase()
self.__FieldBase()
def __FieldBase(self):
for field in iter(self.fields):
self.fields[field].required = False
fieldType = self.fields[field].widget.__class__.__name__
if fieldType not in ['Select', 'Select2','HiddenInput']:
clsAttr = self.fields[field].widget.attrs.get('class','')
if clsAttr.find('form-control') < 0 :
self.fields[field].widget.attrs['class'] = '{} form-control'.format(clsAttr)
if fieldType in ['DateInput', 'DateTimeInput']:
self.fields[field].widget = DateBigInput()
if field in self.currency:
self.fields[field].widget = NumberBigInput()
if field in self.readonly or self.readonly == 'all':
self.fields[field].widget.attrs['readonly'] = True
if fieldType in ['DateInput', 'DateTimeInput']:
self.fields[field].widget.attrs['disabled'] = True
if field in self.field_require or self.field_require == 'all':
self.fields[field].required = True
# if self.fields[field].label and not self.fields[field].label[0:1] == '*':
# self.fields[field].label = '*{}'.format(self.fields[field].lable)
def __WidgetBase(self):
if not hasattr(self,'Meta') : return
if not hasattr(self.Meta, 'widgets'): return
for widget in self.Meta.widgets:
fieldType = self.Meta.widgets[widget].__class__.__name__
if fieldType not in ['Select', 'Select2','HiddenInput']:
clsAttr = self.Meta.widgets[widget].attrs.get('class', '')
if clsAttr.find('form-control') < 0:
self.Meta.widgets[widget].attrs['class'] = '{} form-control'.format(clsAttr)
class BaseEditModelForm(forms.ModelForm,FormMixinBig):
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
self.form_init()
def clean(self):
super().clean()
for f in self.cleaned_data:
if not self.cleaned_data[f] and not self.cleaned_data[f] == 0:
self.cleaned_data[f] = None
class BaseEditForm(forms.Form,FormMixinBig):
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
self.form_init()
| MindKafuu/demo | demoWeb/extends/formext.py | formext.py | py | 4,630 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.DateInput",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "contrib.sysdate.dateFromLocal",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "d... |
28700017158 | #aoc20150101
import pathlib
import sys
def parse(path):
return list(pathlib.Path(path).read_text())
def solve(puzzleInput):
finalFloor = 0
for datum in puzzleInput:
if datum == '(':
finalFloor += 1
else:
finalFloor -= 1
return finalFloor
if __name__ == "__main__":
for path in sys.argv[1:]:
puzzleInput = parse(path)
print(puzzleInput, solve(puzzleInput))
| dbm19/aoc2015 | aoc201501/aoc20150101.py | aoc20150101.py | py | 401 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 22,
"usage_type": "attribute"
}
] |
69846477224 | import logging
from random import randint
from time import sleep
from typing import List
import pandas as pd
import requests # type: ignore
from models.auction import Lot, Lots
from models.item import Item, Items
from models.media import Media, Medias
from scrape.item_scraper import ItemScraper
from scrape.lot_scraper import LotScraper
from scrape.media_scraper import MediaScraper
from utils.aws import S3Connector
logger = logging.getLogger(__name__)
class AuctionScraper:
def __init__(self, access_token: str, bucket: str):
self.access_token = access_token
self.s3_connector = S3Connector(bucket)
def crawl(self, sample: int = 0):
lots_scraper = LotScraper(self.access_token)
lots: Lots = lots_scraper.crawl()
if lots:
df = pd.DataFrame.from_records(lots.dict()["lots"])
path = self.s3_connector.create_parquet_path("lots")
self.s3_connector.write_parquet(df, path)
logger.info(f"Wrote to {path}")
logger.info(f"{len(lots.lots)} lots crawled..")
self.item_ids = list({lot.item.id for lot in lots.lots})
logger.info(f"{len(self.item_ids)} item ids found..")
scraped_item_ids: List[int] = self._read_item_ids()
print(f"Previous {len(scraped_item_ids)} item ids found")
item_ids = [
item_id for item_id in self.item_ids if item_id not in scraped_item_ids
]
if sample > 0:
sample_item_ids = []
for _ in range(sample):
rand_item_id = randint(0, len(item_ids) - 1)
sample_item_ids.append(rand_item_id)
item_ids = sample_item_ids
logger.info(f"{len(item_ids)} item ids found. Crawling items..")
print(f"{len(item_ids)} item ids found")
item_scraper = ItemScraper(self.access_token)
media_scraper = MediaScraper(self.access_token)
handled_items: List[Item] = []
handled_media: List[Media] = []
try:
logger.info(f"Crawling {len(item_ids)}")
for i, item_id in enumerate(item_ids):
item: Item = item_scraper.crawl(item_id)
sleep(1)
if item:
handled_items.append(item)
media: Media = media_scraper.crawl(item_id)
sleep(1)
if media:
handled_media.append(media)
scraped_item_ids.append(item_id)
logger.info(f"Item {i+1}/{len(item_ids)} crawled.")
print(f"Item {i+1}/{len(item_ids)} crawled.")
# write intermediate hanlded item_ids to s3 in case of failure
if (i + 1) % 100 == 0:
self._store(
items=handled_items,
media=handled_media,
item_ids=scraped_item_ids,
)
# write the final results
self._store(
items=handled_items,
media=handled_media,
item_ids=scraped_item_ids,
)
except Exception as e:
logger.error(e)
logger.info("Storing processed item_ids.")
def _store(self, items: list[Item], media: list[Media], item_ids: list[int]):
items_ = Items(items=items)
df_items = pd.DataFrame.from_records(items_.dict()["items"])
self._write_to_s3(df_items, "items")
medias_ = Medias(media=media)
df_media = pd.DataFrame.from_records(medias_.dict()["media"])
self._write_to_s3(df_media, "media")
self._write_item_ids(item_ids)
# clear handled items
items = []
media = []
def random_lots(self, lots: List[Lot], size: int = 1) -> Lots:
sample_lots: List[Lot] = []
for _ in range(size):
random_lot: Lot = lots[randint(0, len(lots))]
sample_lots.append(random_lot)
return sample_lots
def _write_to_s3(self, df: pd.DataFrame, name: str):
path = self.s3_connector.create_parquet_path(name)
self.s3_connector.write_parquet(df, path)
logger.info(f"Wrote to {path}")
def _read_item_ids(self) -> List[int]:
df = self.s3_connector.read_csv("item_ids.csv")
return list(df["id"].values)
def _write_item_ids(self, item_ids: List[int]) -> pd.DataFrame:
df = pd.DataFrame(item_ids, columns=["id"])
self.s3_connector.write_csv(df, "item_ids.csv")
| lassebenni/scraper-wow-auctionhouse | scrape/auction_scraper.py | auction_scraper.py | py | 4,503 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "utils.aws.S3Connector",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "scrape.lot_scraper.LotScraper",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": ... |
26285045448 | from functools import wraps
from . import database as db
from .reddit import user_exists, subreddit_exists
from sqlalchemy.exc import IntegrityError
from . import constants as c
from .template import get_template
from .utils import message_url
from sqlalchemy.orm import joinedload
_COMMANDS = {}
_MENTION_COMMANDS = {}
_INV_MSG = r"""Invalid arguments specified for {cmd}.
Required arguments: {args}
"""
_SUB_EXISTS_MSG = r"""You are already subscribed to /u/{author} on /r/{subreddit}
"""
_SUB_NOT_EXISTS_MSG = r"""You are not already subscribed to /u/{author} on /r/{subreddit}
"""
_SUB_REMOVED_MSG = r"""You are now unsubscribed to /u/{author} on /r/{subreddit}
"""
def command(command, *fargs, owner_only=False):
def wrapper(func):
_COMMANDS[command] = (func, fargs, owner_only)
return func
return wrapper
def mention_command(command, *fargs):
def wrapper(func):
_MENTION_COMMANDS[command] = (func, fargs)
return func
return wrapper
def check_mention(message):
body = message.body.strip()
args = body.split()[1:]
command = args.pop(0).lower()
if command not in _MENTION_COMMANDS.keys():
return
func, rargs = _MENTION_COMMANDS[command]
if len(args) != len(rargs):
message.reply(
get_template("base.j2").render(
message=_INV_MSG.format(cmd=command, args=", ".join(rargs))
)
)
return
func(message, *args)
def check_command(message):
body = message.body.strip()
args = body.split()
command = args.pop(0).lower()[1:]
if command not in _COMMANDS.keys():
return
func, rargs, owner_only = _COMMANDS[command]
if owner_only:
user = message.author.name.lower()
if not user == c.OWNER_USERNAME:
return
if len(args) != len(rargs):
message.reply(
get_template("base.j2").render(
message=_INV_MSG.format(cmd=command, args=", ".join(rargs))
)
)
return
func(message, *args)
@command("post", "Subreddit", owner_only=True)
def post(message, sub):
sub_s = sub.split("/")[-1].lower()
subreddit = db.session.query(db.Subreddit).filter_by(name=sub_s).first()
if not subreddit:
message.reply(f"The subreddit /r/{sub_s} is not in my database.")
return
subreddit.post = True
db.session.add(subreddit)
db.session.commit()
message.reply(f"I will now comment on posts in /r/{sub_s}")
@command("nopost", "Subreddit", owner_only=True)
def nopost(message, sub):
sub_s = sub.split("/")[-1].lower()
subreddit = db.session.query(db.Subreddit).filter_by(name=sub_s).first()
if not subreddit:
message.reply(f"The subreddit /r/{sub_s} is not in my database.")
return
subreddit.post = False
db.session.add(subreddit)
db.session.commit()
message.reply(f"I will not comment on posts in /r/{sub_s} from now on.")
@command("unsubscribe", "Author", "Subreddit")
def unsubscribe(message, auth, sub):
auth_s = auth.split("/")[-1]
author_s = auth_s.lower()
sub_s = sub.split("/")[-1]
subreddit_s = sub_s.lower()
subscriber_s = message.author.name.lower()
subscription = db.get_subscription(subscriber_s, author_s, subreddit_s)
if not subscription:
message.reply(
get_template("base.j2").render(
message=_SUB_NOT_EXISTS_MSG.format(author=auth_s, subreddit=sub_s)
)
)
return
try:
db.session.delete(subscription)
db.session.commit()
except:
db.session.rollback()
raise
message.reply(
get_template("base.j2").render(
message=_SUB_REMOVED_MSG.format(author=auth_s, subreddit=sub_s)
)
)
@command("mysubscriptions")
def mysubscriptions(message):
subscriber = message.author.name.lower()
subscriptions = (
db.session.query(db.Subscription)
.join(db.Subscription.subscriber)
.filter(db.User.username == subscriber)
.options(
joinedload(db.Subscription.author), joinedload(db.Subscription.subreddit)
)
.all()
)
message.reply(get_template("subscriptions.j2").render(subscriptions=subscriptions))
@command("subscribe", "Author", "Subreddit")
def subscribe(message, auth, sub):
auth_s = auth.split("/")[-1]
author_s = auth_s.lower()
sub_s = sub.split("/")[-1]
subreddit_s = sub_s.lower()
subscriber_s = message.author.name.lower()
author = db.get_or_create_if(
db.User, lambda: user_exists(author_s), username=author_s
)
if not author:
message.reply(
get_template("base.j2").render(
message=f"The user /u/{author_s} doesn't exist"
)
)
return
subreddit = db.get_or_create_if(
db.Subreddit, lambda: subreddit_exists(subreddit_s), name=subreddit_s
)
if not subreddit:
message.reply(
get_template("base.j2").render(
message=f"The subreddit /r/{subreddit_s} doesn't exist"
)
)
return
subscriber = db.create_or_get(db.User, username=subscriber_s)
subscription = db.Subscription(
author=author, subreddit=subreddit, subscriber=subscriber
)
try:
db.session.add(subscription)
db.session.commit()
except IntegrityError:
db.session.rollback()
message.reply(
get_template("base.j2").render(
message=_SUB_EXISTS_MSG.format(author=auth_s, subreddit=sub_s)
)
)
return
message.reply(
get_template("base.j2").render(
message=c.SUBSCRIPTION_SUCCESS.format(author=auth_s, subreddit=sub_s)
)
)
@mention_command("subscribe")
def msubcribe(message):
author_s = message.submission.author.name.lower()
subreddit_s = message.submission.subreddit.display_name.lower()
subscriber_s = message.author.name.lower()
author = db.create_or_get(db.User, username=author_s)
subreddit = db.create_or_get(db.Subreddit, name=subreddit_s)
subscriber = db.create_or_get(db.User, username=subscriber_s)
subscription = db.Subscription(
author=author, subreddit=subreddit, subscriber=subscriber
)
try:
db.session.add(subscription)
db.session.commit()
except IntegrityError:
db.session.rollback()
message.author.message(
subject="Re: subscribe",
message=get_template("base.j2").render(
message=_SUB_EXISTS_MSG.format(author=author_s, subreddit=subreddit_s)
),
)
return
message.author.message(
subject="Re: subscribe",
message=get_template("base.j2").render(
message=c.SUBSCRIPTION_SUCCESS.format(
author=author_s, subreddit=subreddit_s
)
),
)
| necessary129/InformsYouBot | InformsYouBot/commands.py | commands.py | py | 6,941 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "template.get_template",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "template.get_template",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "template.get_template",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "te... |
9173707874 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
from the month of edge deletion, find the SR before, at the time and after
"""
from collections import defaultdict
import codecs
import os
import json
import numpy as np
IN_DIR = "../../../DATA/General/MO_MENT_networks"
os.chdir(IN_DIR)
F_IN = "mention_edges_monthly_SR"
F_OUT = "mention_network_dir_w"
MONTHS = ["5", "6", "7", "8", "9", "10", "11"]
def read_in_monthly_mentions(MO):
monthly_ment = defaultdict(dict)
f_in = str(MO) + "mention_edges_monthly_SR"
f = open(f_in, 'r')
for line in f:
(u1, u2, SR, w1, w2) = line.split()
userA = int(u1)
userB = int(u2)
w1 = int(w1)
w2 = int(w2)
if w1 > 0:
monthly_ment[(u1, u2)] = w1
if w2 > 0:
monthly_ment[(u2, u1)] = w2
f.close()
return monthly_ment
def save_monthly_network_dir_w(MO):
monthly_ment = read_in_monthly_mentions(MO)
f_out = str(MO) + "mention_edgelist_dir_w"
f = open(f_out, 'w')
for el in monthly_ment:
userA = el[0]
userB = el[1]
w = monthly_ment[el]
f.write(str(userA) + '\t' + str(userB) + '\t' + str(w) + '\n')
f.close()
for MO in MONTHS:
save_monthly_network_dir_w(MO)
| sanja7s/SR_Twitter | src_graph/create_MO_MENT_networks.py | create_MO_MENT_networks.py | py | 1,146 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 21,
"usage_type": "call"
}
] |
71713008103 | import os
import time
from dotenv import dotenv_values
from selenium import webdriver
from selenium.webdriver import Chrome
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.service import Service
from utilities.commons.exceptions import DriverSetupFailedException
from utilities.commons.context import Context
from utilities.scripts.script_executor import ScriptExecutor
import requests
from requests.exceptions import ConnectionError
def _get_env_values() -> dict:
config = dotenv_values(".env")
return {**config} if config else None
def _load_init_configs(context):
env_values = _get_env_values()
context: Context = context
if env_values is not None:
context.WEB_URL = env_values.get("WEB_URL")
context.API_URL = env_values.get("API_URL")
else:
raise Exception(f"Env file/values are missing")
def _get_service() -> Service:
try:
service = Service(executable_path=ChromeDriverManager(path=os.path.dirname(__file__)).install())
except DriverSetupFailedException:
raise DriverSetupFailedException(msg="Failed to install driver or create service")
return service
def _get_chrome_options():
options = webdriver.ChromeOptions()
options.add_argument("no-sandbox")
options.add_argument("--verbose")
options.add_argument("--disable-gpu")
options.add_argument("--disable-web-security")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--ignore-certificate-errors")
options.add_argument("--allow-insecure-localhost")
return options
def _get_local_webdriver():
service: Service = _get_service()
try:
browser = Chrome(service=service, \
options=_get_chrome_options())
except DriverSetupFailedException:
raise DriverSetupFailedException(msg="Failed to initialize driver")
return browser
def _build_selenium_remote_base_url(host):
if host is None or len(host) > 1:
return f"http://{host}:4444/"
else:
raise ValueError("host cannot be None or empty, please provide host")
def _get_remote_selenium_url(hosts: list) -> str:
if len(hosts) < 1:
raise ValueError("Please enter host(s) as list, at least one host is expected")
retry = True
max_retries = 5
retry_attempt = 0
while retry:
retry_attempt += 1
for host in hosts:
try:
selenium_remote_url = _build_selenium_remote_base_url(host)
status_code = requests.get(selenium_remote_url).status_code
if status_code == 200:
retry = False
return selenium_remote_url + "wd/hub"
else:
print(f"request to {selenium_remote_url} failed due to status code: {status_code} ")
retry = False
except ConnectionError:
print(f"{selenium_remote_url} is not reachable \n")
if hosts.index(host) + 1 < len(hosts):
print(f"now trying to reach selenium at:{_build_selenium_remote_base_url(hosts[hosts.index(host) + 1])}")
if retry_attempt >= max_retries:
retry = False
time.sleep(5)
print(f"*** retry attempt:{retry_attempt} ***\n")
def _get_webdriver_from_remote():
remote_selenium_url = _get_remote_selenium_url(hosts=["selenium", "localhost"])
return webdriver.Remote(command_executor=remote_selenium_url, \
options=_get_chrome_options())
def before_scenario(context, scenario):
_load_init_configs(context)
context.browser = _get_local_webdriver() \
if os.environ.get("RUN_ENV") == "local" \
else _get_webdriver_from_remote()
def after_scenario(context, scenario):
context.browser.quit()
def after_all(context):
ScriptExecutor.delete_temp_files()
'''
#other available hooks
def before_step(context, step):
pass
def after_step(context, step):
pass
def before_feature(context, feature):
pass
def after_feature(context, feature):
pass
def before_tag(context, tag):
pass
def after_tag(context, tag):
pass
def before_all(context):
pass
def after_all(context):
pass
'''
| HarshDevSingh/python-behave | environment.py | environment.py | py | 4,291 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.dotenv_values",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "utilities.commons.context.Context",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.chrome.service.Service",
"line_number": 32,
"usage_type": "call... |
19021910953 | from collections import Iterable
l = isinstance('abc',Iterable)
print(l)
L=list(range(1,11))
print(L)
L2 = [x * x for x in range(1,20)]
print(L2)
L3 = [m + n for m in 'abc' for n in 'hkl']
print(L3)
import os
L4 = [d for d in os.listdir('.')]
print('all dir',L4)
L5 = ['Hello','World',18,'Apple',None]
L6 = [s.lower() for s in L5 if isinstance(s,str)]
print(L6)
L7 = ['Hello','World',18,'Apple',None]
L8 = [s.lower() if isinstance(s,str) else s for s in L7]
print(L8)
| jacena/python3 | iterable.py | iterable.py | py | 484 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.Iterable",
"line_number": 2,
"usage_type": "argument"
},
{
"api_name": "os.listdir",
"line_number": 15,
"usage_type": "call"
}
] |
25412177948 | from ..shared.list_arithmatic import add
from ..shared.digits import to_digits
from itertools import permutations
from ..shared.solver import Solver
def digit_sum(n:int)->int:
return add(to_digits(n))
def digit_sums(biggest: int):
pool = list(range(0,biggest))
for a, b in permutations(pool,2):
power = a**b
yield a, b, power, digit_sum(power)
def _solve(print=print):
biggest = max(digit_sums(100), key=lambda x: x[3])
print(f"{biggest[0]}**{biggest[1]}=={biggest[2]}")
print(f"Digit sum: {biggest[3]}")
return True
description = '''A googol (10**100) is a massive number: one followed by one-hundred zeros; 100**100 is almost
unimaginably large: one followed by two-hundred zeros. Despite their size, the sum of the digits in each number is only 1.
Considering natural numbers of the form, a**b, where a, b < 100, what is the maximum digital sum?
'''
solver = Solver(56,
'Powerful digit sum',
description,
_solve
) | bathcat/pyOiler | src/pyoiler/problems/euler056.py | euler056.py | py | 1,033 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "shared.list_arithmatic.add",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "shared.digits.to_digits",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "itertools.permutations",
"line_number": 12,
"usage_type": "call"
},
{
"api_name"... |
14696977373 | import json
import os.path as osp
from typing import Union
class Prompter(object):
__slots__ = ("template", "_verbose")
def __init__(self, template_name: str = "", verbose: bool = False):
self._verbose = verbose
if not template_name:
template_name = "KoRAE_template"
file_name = osp.join("templates", f"{template_name}.json")
if not osp.exists(file_name):
raise ValueError(f"Can't read {file_name}")
with open(file_name) as fp:
self.template = json.load(fp)
if self._verbose:
print(
f"Using prompt template {template_name}: {self.template['description']}"
)
def generate_prompt(
self,
instruction: str,
system_msg: Union[None, str] = None,
input: Union[None, str] = None,
output: Union[None, str] = None,
) -> str:
# returns the full prompt from instruction and optional system message and input
# if a label is provided, it's also appended
if system_msg:
res = self.template['prompt'].format(
prompt=system_msg,
instruction=instruction + " " + input
)
else:
res = self.template['no_prompt'].format(
instruction=instruction + " " + input,
)
if output:
res = f"{res}{output}"
return res
def get_response(self, output: str) -> str:
return output.split(self.template["response_split"])[1].strip() | gauss5930/KoRAE | finetuning/utils/prompter.py | prompter.py | py | 1,553 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
... |
14505653809 | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torchvision as tv
from time import time
from src.model.madry_model import WideResNet
from src.attack import FastGradientSignUntargeted
from src.utils import makedirs, create_logger, tensor2cuda, numpy2cuda, evaluate, save_model
from src.argument import parser, print_args
class Trainer():
def __init__(self, args, logger, attack):
self.args = args
self.logger = logger
self.attack = attack
def standard_train(self, model, tr_loader, va_loader=None):
self.train(model, tr_loader, va_loader, False)
def adversarial_train(self, model, tr_loader, va_loader=None):
self.train(model, tr_loader, va_loader, True)
def train(self, model, tr_loader, va_loader=None, adv_train=False):
args = self.args
logger = self.logger
opt = torch.optim.SGD(model.parameters(), args.learning_rate,
weight_decay=args.weight_decay,
momentum=args.momentum)
scheduler = torch.optim.lr_scheduler.MultiStepLR(opt,
milestones=[40000, 60000],
gamma=0.1)
_iter = 0
begin_time = time()
for epoch in range(1, args.max_epoch+1):
for data, label in tr_loader:
data, label = tensor2cuda(data), tensor2cuda(label)
if adv_train:
# When training, the adversarial example is created from a random
# close point to the original data point. If in evaluation mode,
# just start from the original data point.
adv_data = self.attack.perturb(data, label, 'mean', True)
output = model(adv_data, _eval=False)
else:
output = model(data, _eval=False)
loss = F.cross_entropy(output, label)
opt.zero_grad()
loss.backward()
opt.step()
if _iter % args.n_eval_step == 0:
t1 = time()
if adv_train:
with torch.no_grad():
stand_output = model(data, _eval=True)
pred = torch.max(stand_output, dim=1)[1]
# print(pred)
std_acc = evaluate(pred.cpu().numpy(), label.cpu().numpy()) * 100
pred = torch.max(output, dim=1)[1]
# print(pred)
adv_acc = evaluate(pred.cpu().numpy(), label.cpu().numpy()) * 100
else:
adv_data = self.attack.perturb(data, label, 'mean', False)
with torch.no_grad():
adv_output = model(adv_data, _eval=True)
pred = torch.max(adv_output, dim=1)[1]
# print(label)
# print(pred)
adv_acc = evaluate(pred.cpu().numpy(), label.cpu().numpy()) * 100
pred = torch.max(output, dim=1)[1]
# print(pred)
std_acc = evaluate(pred.cpu().numpy(), label.cpu().numpy()) * 100
t2 = time()
logger.info(f'epoch: {epoch}, iter: {_iter}, lr={opt.param_groups[0]["lr"]}, '
f'spent {time()-begin_time:.2f} s, tr_loss: {loss.item():.3f}')
logger.info(f'standard acc: {std_acc:.3f}%, robustness acc: {adv_acc:.3f}%')
# begin_time = time()
# if va_loader is not None:
# va_acc, va_adv_acc = self.test(model, va_loader, True)
# va_acc, va_adv_acc = va_acc * 100.0, va_adv_acc * 100.0
# logger.info('\n' + '='*30 + ' evaluation ' + '='*30)
# logger.info('test acc: %.3f %%, test adv acc: %.3f %%, spent: %.3f' % (
# va_acc, va_adv_acc, time() - begin_time))
# logger.info('='*28 + ' end of evaluation ' + '='*28 + '\n')
begin_time = time()
if _iter % args.n_store_image_step == 0:
tv.utils.save_image(torch.cat([data.cpu(), adv_data.cpu()], dim=0),
os.path.join(args.log_folder, f'images_{_iter}.jpg'),
nrow=16)
if _iter % args.n_checkpoint_step == 0:
file_name = os.path.join(args.model_folder, f'checkpoint_{_iter}.pth')
save_model(model, file_name)
_iter += 1
# scheduler depends on training interation
scheduler.step()
if va_loader is not None:
t1 = time()
va_acc, va_adv_acc = self.test(model, va_loader, True, False)
va_acc, va_adv_acc = va_acc * 100.0, va_adv_acc * 100.0
t2 = time()
logger.info('\n'+'='*20 +f' evaluation at epoch: {epoch} iteration: {_iter} ' \
+'='*20)
logger.info(f'test acc: {va_acc:.3f}%, test adv acc: {va_adv_acc:.3f}%, spent: {t2-t1:.3f} s')
logger.info('='*28+' end of evaluation '+'='*28+'\n')
def test(self, model, loader, adv_test=False, use_pseudo_label=False):
# adv_test is False, return adv_acc as -1
total_acc = 0.0
num = 0
total_adv_acc = 0.0
with torch.no_grad():
for data, label in loader:
data, label = tensor2cuda(data), tensor2cuda(label)
output = model(data, _eval=True)
pred = torch.max(output, dim=1)[1]
te_acc = evaluate(pred.cpu().numpy(), label.cpu().numpy(), 'sum')
total_acc += te_acc
num += output.shape[0]
if adv_test:
# use predicted label as target label
with torch.enable_grad():
adv_data = self.attack.perturb(data,
pred if use_pseudo_label else label,
'mean',
False)
adv_output = model(adv_data, _eval=True)
adv_pred = torch.max(adv_output, dim=1)[1]
adv_acc = evaluate(adv_pred.cpu().numpy(), label.cpu().numpy(), 'sum')
total_adv_acc += adv_acc
else:
total_adv_acc = -num
return total_acc / num , total_adv_acc / num
def main(args):
save_folder = '%s_%s' % (args.dataset, args.affix)
log_folder = os.path.join(args.log_root, save_folder)
model_folder = os.path.join(args.model_root, save_folder)
makedirs(log_folder)
makedirs(model_folder)
setattr(args, 'log_folder', log_folder)
setattr(args, 'model_folder', model_folder)
logger = create_logger(log_folder, args.todo, 'info')
print_args(args, logger)
model = WideResNet(depth=34, num_classes=10, widen_factor=10, dropRate=0.0)
attack = FastGradientSignUntargeted(model,
args.epsilon,
args.alpha,
min_val=0,
max_val=1,
max_iters=args.k,
_type=args.perturbation_type)
if torch.cuda.is_available():
model.cuda()
trainer = Trainer(args, logger, attack)
if args.todo == 'train':
transform_train = tv.transforms.Compose([
tv.transforms.RandomCrop(32, padding=4, fill=0, padding_mode='constant'),
tv.transforms.RandomHorizontalFlip(),
tv.transforms.ToTensor(),
])
tr_dataset = tv.datasets.CIFAR10(args.data_root,
train=True,
transform=transform_train,
download=True)
tr_loader = DataLoader(tr_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4)
# evaluation during training
te_dataset = tv.datasets.CIFAR10(args.data_root,
train=False,
transform=tv.transforms.ToTensor(),
download=True)
te_loader = DataLoader(te_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4)
trainer.train(model, tr_loader, te_loader, args.adv_train)
elif args.todo == 'test':
te_dataset = tv.datasets.CIFAR10(args.data_root,
train=False,
transform=tv.transforms.ToTensor(),
download=True)
te_loader = DataLoader(te_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4)
checkpoint = torch.load(args.load_checkpoint)
model.load_state_dict(checkpoint)
std_acc, adv_acc = trainer.test(model, te_loader, adv_test=True, use_pseudo_label=False)
print(f"std acc: {std_acc * 100:.3f}%, adv_acc: {adv_acc * 100:.3f}%")
else:
raise NotImplementedError
if __name__ == '__main__':
args = parser()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
main(args) | ylsung/pytorch-adversarial-training | cifar-10/main.py | main.py | py | 9,835 | python | en | code | 230 | github-code | 36 | [
{
"api_name": "torch.optim.SGD",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.lr_scheduler.MultiStepLR",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": ... |
10744207821 | import numpy
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
img_path = '/home/moby/PycharmProjects/datasets_processing/figures/'
plt.rcParams.update({
'font.family': 'serif',
'font.sans-serif': ['Times'],
'text.latex.preamble':
r'\usepackage[T2A]{fontenc}'
r'\usepackage[utf8]{inputenc}'
})
classes = [
'Normal (Без спец. техники)',
'Muting (Пиццикато)',
'Vibrato (Вибрато)',
'Pull-off (Нисх. легато)',
'Hammer-on (Восх. легато)',
'Sliding (Глиссандо)',
'Bending (Бенд)'
]
vals = np.array([
2009,
385,
637,
525,
581,
1162,
1281
])
def absolute_value(val):
a = numpy.round(val / 100. * vals.sum(), 0)
return int(a)
fig1, ax1 = plt.subplots(figsize=(9, 5))
ax1.pie(vals, labels=classes, autopct=absolute_value,
shadow=False, startangle=90,
textprops={'fontsize': 15}
)
# ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle
# fig1.set_size_inches(18.5, 10.5)
# plt.tight_layout()
plt.savefig(img_path + "gpt_pie" + '.pdf')
plt.show()
| SergWh/datasets_processing | thesis_plots/gpt_classes.py | gpt_classes.py | py | 1,188 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.rcParams.update",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"... |
45923290828 | from SortAlgs import *
import matplotlib.pyplot as plt
import plotly.express as px
import timeit
import random
import numpy as np
N = 10000
X = [random.randint(0, N) for _ in range(N)]
naive_time = timeit.timeit("naive_sort(X, X)", globals=globals(), number=1)
merge_time = timeit.timeit("merge_sort(X, X)", globals=globals(), number=1)
count_time = timeit.timeit("count_sort(X, X)", globals=globals(), number=1)
naive_complexity = lambda n: naive_time/10000**2 * n**2
merge_complexity = lambda n: merge_time/10000/np.log2(10000) * n*np.log2(n)
count_complexity = lambda n: count_time/10000 * n
n = np.logspace(1, 10, 10000)
fig = px.line(x=n, y=naive_complexity(n)/3600/24/365.25, labels={"x": "n", "y": "time [years]"}, title="Naive sort")
fig.add_scatter(x=n, y=merge_complexity(n)/3600/24/365.25, name="Merge sort")
fig.add_scatter(x=n, y=count_complexity(n)/3600/24/365.25, name="Count sort")
fig.update_xaxes(type="log")
fig.update_yaxes(type="log")
fig.update_layout(yaxis=dict(exponentformat="power"), xaxis=dict(exponentformat="power"))
fig.show() | SchardtS/Coding-Club | 2023_12_18_SortingAlgorithms/Simon/ComplexityVisualization.py | ComplexityVisualization.py | py | 1,087 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "random.randint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "timeit.timeit",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "timeit.timeit",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "timeit.timeit",
"line_n... |
21618269201 | from __future__ import absolute_import
import logging
import tempfile
import unittest
from apache_beam.examples.cookbook import group_with_coder
from apache_beam.testing.util import open_shards
# Patch group_with_coder.PlayerCoder.decode(). To test that the PlayerCoder was
# used, we do not strip the prepended 'x:' string when decoding a Player object.
group_with_coder.PlayerCoder.decode = lambda self, s: group_with_coder.Player( # type: ignore[assignment]
s.decode('utf-8'))
class GroupWithCoderTest(unittest.TestCase):
SAMPLE_RECORDS = [
'joe,10',
'fred,3',
'mary,7',
'joe,20',
'fred,6',
'ann,5',
'joe,30',
'ann,10',
'mary,1'
]
def create_temp_file(self, records):
with tempfile.NamedTemporaryFile(delete=False) as f:
for record in records:
f.write(b'%s\n' % record.encode('utf-8'))
return f.name
def test_basics_with_type_check(self):
# Run the workflow with pipeline_type_check option. This will make sure
# the typehints associated with all transforms will have non-default values
# and therefore any custom coders will be used. In our case we want to make
# sure the coder for the Player class will be used.
temp_path = self.create_temp_file(self.SAMPLE_RECORDS)
group_with_coder.run(
['--input=%s*' % temp_path, '--output=%s.result' % temp_path],
save_main_session=False)
# Parse result file and compare.
results = []
with open_shards(temp_path + '.result-*-of-*') as result_file:
for line in result_file:
name, points = line.split(',')
results.append((name, int(points)))
logging.info('result: %s', results)
self.assertEqual(
sorted(results),
sorted([('x:ann', 15), ('x:fred', 9), ('x:joe', 60), ('x:mary', 8)]))
def test_basics_without_type_check(self):
# Run the workflow without pipeline_type_check option. This will make sure
# the typehints associated with all transforms will have default values and
# therefore any custom coders will not be used. The default coder (pickler)
# will be used instead.
temp_path = self.create_temp_file(self.SAMPLE_RECORDS)
group_with_coder.run([
'--no_pipeline_type_check',
'--input=%s*' % temp_path,
'--output=%s.result' % temp_path
],
save_main_session=False)
# Parse result file and compare.
results = []
with open_shards(temp_path + '.result-*-of-*') as result_file:
for line in result_file:
name, points = line.split(',')
results.append((name, int(points)))
logging.info('result: %s', results)
self.assertEqual(
sorted(results),
sorted([('ann', 15), ('fred', 9), ('joe', 60), ('mary', 8)]))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| a0x8o/kafka | sdks/python/apache_beam/examples/cookbook/group_with_coder_test.py | group_with_coder_test.py | py | 2,877 | python | en | code | 59 | github-code | 36 | [
{
"api_name": "apache_beam.examples.cookbook.group_with_coder.PlayerCoder",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "apache_beam.examples.cookbook.group_with_coder",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "apache_beam.examples.cookbook.grou... |
34373633188 | from django.urls import path
from . import views
from django.urls import include, path, re_path
# /playlist
# /user/<id>
urlpatterns = [
path('index/', views.index, name='index'), # page
re_path('^index/registration/', views.registration),
path('login/', views.login), # page
path('logout/', views.logout),
path('user/playlist', views.userlist), # page userId=1&playlistId=2
path('userlist/track/', views.addTrack), #page
re_path('^userlist/track/add/', views.addTrack),
re_path('^userlists/delete/', views.userlists), # page -_-
re_path('^userlists/create/add/', views.addPlaylist),
re_path('^userlists/create/', views.addPlaylist),
path('user/playlist/modify/', views.modify_pl),
path('user/playlist/modify/add/', views.modify_pl),
path('user/playlist/modify/put/', views.mod_put),
path('user/playlist/modify/put/put/', views.mod_put)
]
| Maxgioman/Python | playlist/urls.py | urls.py | py | 900 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.re_path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.pat... |
32699799681 | import io
import json
import sys
INDENT = 4 * " "
DEFAULT_NEWRESPARAM = "res"
def toCppType(t):
if isinstance(t, list):
if len(t) == 2:
return "{}<{}>".format(t[0], t[1])
elif len(t) == 3:
return "{}<{}<{}>>".format(t[0], t[1], t[2])
else:
raise RuntimeError("unexpected nesting level of template types: {}".format(t))
else:
return t
def generateKernelInstantiation(kernelTemplateInfo, templateValues, opCodes, outFile, API):
# Extract some information.
opName = kernelTemplateInfo["opName"]
returnType = kernelTemplateInfo["returnType"]
templateParams = kernelTemplateInfo["templateParams"]
runtimeParams = kernelTemplateInfo["runtimeParams"]
opCodeAsTemplateParam = False
if "opCodeAsTemplateParam" in kernelTemplateInfo:
opCodeAsTemplateParam = True if kernelTemplateInfo["opCodeAsTemplateParam"] == 1 else False
if len(templateParams) != len(templateValues):
raise RuntimeError(
f"kernel \"{opName}\" has {len(templateParams)} template parameters, but "
f"{len(templateValues)} template values are supplied in an instantiation"
)
if opCodes is not None:
# We assume that the op-code is the first run-time parameter.
opCodeType = runtimeParams[0]["type"]
runtimeParams = runtimeParams[1:]
else:
opCodeType = None
# Create mapping from original template argument names to assigned C++
# types.
templateArgToCppType = {tp["name"]: toCppType(tv) for tp, tv in zip(templateParams, templateValues)}
# ToDo: commented by mdokter - maybe remove. I think this would be too verbose
# Comments indicating values assigned to original template arguments.
# for tp in templateParams:
# outStr = INDENT + "// {} = {}\n".format(tp["name"], templateArgToCppType[tp["name"]])
# outFile.write(outStr)
# The function wrapping the generated kernel instantiation always has
# the return type void. If the considered kernel returns a scalar value,
# we prepend an additional run-time parameter.
extendedRuntimeParams = [
{"name": DEFAULT_NEWRESPARAM, "type": "{} *".format(returnType), "isOutput": True}
] if (returnType != "void") else []
# Add all run-time parameters of the kernel. We need to copy, because
# we apply string replacements to the types.
extendedRuntimeParams.extend([rp.copy() for rp in runtimeParams])
# Replace occurences of original template arguments by their assigned
# types.
for rp in extendedRuntimeParams:
for tpIdx, tp in enumerate(templateParams):
if isinstance(templateValues[tpIdx], list):
rp["type"] = rp["type"].replace("typename {}::VT".format(tp["name"]), templateValues[tpIdx][1])
rp["type"] = rp["type"].replace(tp["name"], templateArgToCppType[tp["name"]])
if rp["type"].endswith("*&"):
rp["type"] = rp["type"][:-2] + "**"
rp["isOutput"] = True
if rp["type"].endswith("&"):
rp["type"] = rp["type"][:-1]
rp["isOutput"] = True
elif "isOutput" not in rp:
rp["isOutput"] = False
isCreateDaphneContext = opName == "createDaphneContext"
# typesForName = "__".join([("{}_{}".format(tv[0], tv[1]) if isinstance(tv, list) else tv) for tv in templateValues])
typesForName = "__".join([
rp["type"]
[((rp["type"].rfind("::") + 2) if "::" in rp["type"] else 0):]
.replace("const ", "")
.replace(" **", "" if rp["isOutput"] else "_variadic")
.replace(" *", "_variadic" if "isVariadic" in rp and rp["isVariadic"] else "")
.replace("& ", "")
.replace("<", "_").replace(">", "")
for rp in extendedRuntimeParams
])
if typesForName != "":
typesForName = "__" + typesForName
params = ", ".join(
["{} {}".format(rtp["type"], rtp["name"]) for rtp in extendedRuntimeParams] +
([] if isCreateDaphneContext else ["DCTX(ctx)"])
)
def generateFunction(opCode):
# Obtain the name of the function to be generated from the opName by
# removing suffices "Sca"/"Mat"/"Obj" (they are not required here), and
# potentially by inserting the opCode into the name.
if API != "CPP":
funcName = API + "_" + opName
else:
funcName = "_" + opName
while funcName[-3:] in ["Sca", "Mat", "Obj"]:
funcName = funcName[:-3]
funcName = funcName.replace("::", "_")
if opCode is not None:
opCodeWord = opCodeType[:-len("OpCode")]
funcName = funcName.replace(opCodeWord, opCode[0].upper() + opCode[1:].lower())
funcName = funcName.replace(opCodeWord.lower(), opCode.lower())
# Signature of the function wrapping the kernel instantiation.
outFile.write(INDENT + "void {}{}({}) {{\n".format(
funcName,
typesForName,
# Run-time parameters, possibly including DaphneContext:
params
))
# List of parameters for the call.
if opCode is None or opCodeAsTemplateParam:
callParams = []
else:
callParams = ["{}::{}".format(opCodeType, opCode)]
callParams.extend([
# Dereference double pointer for output parameters.
"{}{}".format("*" if (rp["type"].endswith("**") and rp["isOutput"]) else "", rp["name"])
for rp in extendedRuntimeParams[(0 if returnType == "void" else 1):]
])
# List of template parameters for the call.
callTemplateParams = [toCppType(tv) for tv in templateValues]
if opCodeAsTemplateParam and opCode is not None:
opCodeWord = opCodeType[:-len("OpCode")]
callTemplateParams = ["{}::{}".format(opCodeWord if API == "CPP" else API + "::" + opCodeWord, opCode)] + callTemplateParams
# Body of that function: delegate to the kernel instantiation.
outFile.write(2 * INDENT)
if returnType != "void":
outFile.write("*{} = ".format(DEFAULT_NEWRESPARAM))
kernelCallString = "{}{}::apply({});\n" if opCodeAsTemplateParam else "{}{}({});\n"
outFile.write(kernelCallString.format(
opName if API == "CPP" else (API + "::" + opName),
# Template parameters, if the kernel is a template:
"<{}>".format(", ".join(callTemplateParams)) if len(templateValues) else "",
# Run-time parameters, possibly including DaphneContext:
", ".join(callParams + ([] if isCreateDaphneContext else ["ctx"])),
))
outFile.write(INDENT + "}\n")
# Generate the function(s).
if opCodes is None:
generateFunction(None)
else:
for opCode in opCodes:
generateFunction(opCode)
# outFile.write(INDENT + "\n")
def printHelp():
print("Usage: python3 {} INPUT_SPEC_FILE OUTPUT_CPP_FILE API".format(sys.argv[0]))
print(__doc__)
if __name__ == "__main__":
if len(sys.argv) == 2 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"):
printHelp()
sys.exit(0)
elif len(sys.argv) != 4:
print("Wrong number of arguments.")
print()
printHelp()
sys.exit(1)
# Parse arguments.
inFilePath = sys.argv[1]
outFilePath = sys.argv[2]
API = sys.argv[3]
ops_inst_str = ""
header_str = ""
# Load the specification (which kernel template shall be instantiated
# with which template arguments) from a JSON-file.
with open(inFilePath, "r") as inFile:
kernelsInfo = json.load(inFile)
for kernelInfo in kernelsInfo:
kernelTemplateInfo = kernelInfo["kernelTemplate"]
if "api" in kernelInfo:
for api in kernelInfo["api"]:
for name in api["name"]:
# print("Processing API: " + name)
# print(" OpName: " + kernelTemplateInfo["opName"])
# print(" Instantiations: " + str(api["instantiations"]))
# if "opCodes" in api:
# print(" opCodes: " + str(api["opCodes"]))
if name == API:
# Comment reporting the kernel name.
ops_inst_str += INDENT + "// {}\n".format("-" * 76)
ops_inst_str += INDENT + "// {}\n".format(kernelTemplateInfo["opName"])
ops_inst_str += INDENT + "// {}\n".format("-" * 76)
# Include for the required header.
if API != "CPP":
header_str = header_str + "#include <runtime/local/kernels/{}/{}>\n".format(API, kernelTemplateInfo["header"])
else:
header_str = header_str + "#include <runtime/local/kernels/{}>\n".format(kernelTemplateInfo["header"])
outBuf = io.StringIO()
for instantiation in api["instantiations"]:
generateKernelInstantiation(kernelTemplateInfo, instantiation,
api.get("opCodes", None), outBuf, API)
ops_inst_str += outBuf.getvalue()
else:
if API == "CPP":
# Comment reporting the kernel name.
ops_inst_str += INDENT + "// {}\n".format("-" * 76)
ops_inst_str += INDENT + "// {}\n".format(kernelTemplateInfo["opName"])
ops_inst_str += INDENT + "// {}\n".format("-" * 76)
# Include for the required header.
header_str = header_str + "#include <runtime/local/kernels/{}>\n".format(kernelTemplateInfo["header"])
# One function per instantiation of the kernel.
opCodes = kernelInfo.get("opCodes", None)
outBuf = io.StringIO()
for instantiation in kernelInfo["instantiations"]:
generateKernelInstantiation(kernelTemplateInfo, instantiation, opCodes, outBuf, API)
ops_inst_str += outBuf.getvalue()
with open(outFilePath, "w") as outFile:
outFile.write("// This file was generated by {}. Don't edit manually!\n\n".format(sys.argv[0]))
outFile.write("#include <runtime/local/context/DaphneContext.h>\n")
outFile.write(header_str)
outFile.write("\nextern \"C\" {\n")
outFile.write(ops_inst_str)
outFile.write("}\n")
| daphne-eu/daphne | src/runtime/local/kernels/genKernelInst.py | genKernelInst.py | py | 10,745 | python | en | code | 51 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number":... |
1636559220 | import sys
import os
import subprocess
import graphviz_gen
from PySide6.QtWidgets import QApplication, QWidget, QPushButton, QLineEdit, QPlainTextEdit, QVBoxLayout
from PySide6.QtCore import QFile, QThread, Slot, Qt
from PySide6.QtUiTools import QUiLoader
from PySide6.QtSvgWidgets import QSvgWidget
class Base(QWidget):
def __init__(self):
super(Base, self).__init__()
self.load_ui()
# self.thread = Worker()
self.domain_in = self.ui.findChild(QLineEdit, "domain_in")
self.text_box = self.ui.findChild(QPlainTextEdit, "log_out")
self.btn = self.ui.findChild(QPushButton, 'rs_button')
self.graph_btn = self.ui.findChild(QPushButton, 'graph_btn')
def load_ui(self):
loader = QUiLoader()
path = os.path.join(os.path.dirname(__file__), "form.ui")
ui_file = QFile(path)
ui_file.open(QFile.ReadOnly)
self.ui = loader.load(ui_file, self)
ui_file.close()
class GraphWindow(QWidget):
def __init__(self, path):
super().__init__()
layout = QVBoxLayout()
self.svg_wid = QSvgWidget(self)
self.svg_wid.load(path)
# self.svg_wid.show()
layout.addWidget(self.svg_wid)
self.setLayout(layout)
class Implement(Base):
def __init__(self):
super().__init__()
self.w = None
print(self.text_box)
self.btn.clicked.connect(self.run_script)
self.graph_btn.clicked.connect(self.graph_disp)
@Slot()
def run_script(self):
text_value = self.domain_in.text()
print(type(text_value))
out_file = open("output", 'w')
script_path = "{insert your path here for the bash script }"
rc = subprocess.run(
[script_path, text_value],
stdout=out_file,
stderr=subprocess.PIPE
)
out_file.close()
self.show_output_box()
@Slot()
def show_output_box(self):
out_file = open("output").read()
print(out_file)
self.text_box.clear()
self.text_box.insertPlainText(out_file)
def graph_disp(self):
graphviz_gen.gengraph(self.domain_in.text())
# if self.w is None:
print(self.domain_in.text())
self.w = GraphWindow("final_graph"+self.domain_in.text()+".svg")
self.w.show()
if __name__ == "__main__":
app = QApplication([])
widget = Implement()
widget.show()
# widget.show_ui_object()
sys.exit(app.exec_())
| 0x000922/Network-Troubleshooter | main.py | main.py | py | 2,487 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PySide6.QtWidgets.QWidget",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "PySide6.QtWidgets.QLineEdit",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "PySide6.QtWidgets.QPlainTextEdit",
"line_number": 18,
"usage_type": "argument"
... |
506390690 | """
Snapping
"""
def snap_points_to_near_line(lineShp, pointShp, epsg, workGrass,
outPoints, location='overlap_pnts', api='grass',
movesShp=None):
"""
Move points to overlap near line
API's Available:
* grass;
* saga.
"""
if api == 'grass':
"""
Uses GRASS GIS to find near lines.
"""
import os; import numpy
from geopandas import GeoDataFrame
from glass.pys.oss import fprop
from glass.wenv.grs import run_grass
from glass.rd.shp import shp_to_obj
from glass.wt.shp import df_to_shp
# Create GRASS GIS Location
grassBase = run_grass(workGrass, location=location, srs=epsg)
import grass.script as grass
import grass.script.setup as gsetup
gsetup.init(grassBase, workGrass, location, 'PERMANENT')
# Import some GRASS GIS tools
from glass.gp.prox import grs_near as near
from glass.tbl.attr import geomattr_to_db
from glass.it.shp import shp_to_grs, grs_to_shp
# Import data into GRASS GIS
grsLines = shp_to_grs(
lineShp, fprop(lineShp, 'fn', forceLower=True)
)
grsPoint = shp_to_grs(
pointShp, fprop(pointShp, 'fn', forceLower=True)
)
# Get distance from points to near line
near(grsPoint, grsLines, nearCatCol="tocat", nearDistCol="todistance")
# Get coord of start/end points of polylines
geomattr_to_db(grsLines, ['sta_pnt_x', 'sta_pnt_y'], 'start', 'line')
geomattr_to_db(grsLines, ['end_pnt_x', 'end_pnt_y'], 'end', 'line')
# Export data from GRASS GIS
ogrPoint = grs_to_shp(grsPoint, os.path.join(
workGrass, grsPoint + '.shp', 'point', asMultiPart=True
))
ogrLine = grs_to_shp(grsLines, os.path.join(
workGrass, grsLines + '.shp', 'point', asMultiPart=True
))
# Points to GeoDataFrame
pntDf = shp_to_obj(ogrPoint)
# Lines to GeoDataFrame
lnhDf = shp_to_obj(ogrLine)
# Erase unecessary fields
pntDf.drop(["todistance"], axis=1, inplace=True)
lnhDf.drop([c for c in lnhDf.columns.values if c != 'geometry' and
c != 'cat' and c != 'sta_pnt_x' and c != 'sta_pnt_y' and
c != 'end_pnt_x' and c != 'end_pnt_y'],
axis=1, inplace=True)
# Join Geometries - Table with Point Geometry and Geometry of the
# nearest line
resultDf = pntDf.merge(
lnhDf, how='inner', left_on='tocat', right_on='cat')
# Move points
resultDf['geometry'] = [geoms[0].interpolate(
geoms[0].project(geoms[1])
) for geoms in zip(resultDf.geometry_y, resultDf.geometry_x)]
resultDf.drop(["geometry_x", "geometry_y", "cat_x", "cat_y"],
axis=1, inplace=True)
resultDf = GeoDataFrame(
resultDf, crs={"init" : 'epsg:{}'.format(epsg)}, geometry="geometry"
)
# Check if points are equal to any start/end points
resultDf["x"] = resultDf.geometry.x
resultDf["y"] = resultDf.geometry.y
resultDf["check"] = numpy.where(
(resultDf["x"] == resultDf["sta_pnt_x"]) & (resultDf["y"] == resultDf["sta_pnt_y"]),
1, 0
)
resultDf["check"] = numpy.where(
(resultDf["x"] == resultDf["end_pnt_x"]) & (resultDf["y"] == resultDf["end_pnt_y"]),
1, 0
)
# To file
df_to_shp(resultDf, outPoints)
elif api == 'saga':
"""
Snap Points to Lines using SAGA GIS
"""
from glass.pys import execmd
mv="" if not movesShp else f" -MOVES {movesShp}"
cmd = (
f"saga_cmd shapes_points 19 -INPUT {pointShp} "
f"-SNAP {lineShp} "
f"-OUTPUT {outPoints}{mv}"
)
outcmd = execmd(cmd)
else:
raise ValueError(f"{api} is not available!")
return outPoints
| jasp382/glass | glass/gp/snp.py | snp.py | py | 4,241 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "glass.wenv.grs.run_grass",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "grass.script.setup.init",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "grass.script.setup",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "g... |
43300161534 | """Implements the core parts of flow graph creation.
"""
import sys
import collections
import types
import __builtin__
from rpython.tool.error import source_lines
from rpython.rlib import rstackovf
from rpython.flowspace.argument import CallSpec
from rpython.flowspace.model import (Constant, Variable, Block, Link,
c_last_exception, const, FSException)
from rpython.flowspace.framestate import FrameState
from rpython.flowspace.specialcase import (rpython_print_item,
rpython_print_newline)
from rpython.flowspace.operation import op
from rpython.flowspace.bytecode import BytecodeCorruption
w_None = const(None)
class FlowingError(Exception):
""" Signals invalid RPython in the function being analysed"""
ctx = None
def __str__(self):
msg = ["\n"]
msg += map(str, self.args)
msg += [""]
msg += source_lines(self.ctx.graph, None, offset=self.ctx.last_offset)
return "\n".join(msg)
class StopFlowing(Exception):
pass
class SpamBlock(Block):
def __init__(self, framestate):
Block.__init__(self, framestate.getvariables())
self.framestate = framestate
self.dead = False
def make_recorder(self):
return BlockRecorder(self)
class EggBlock(Block):
def __init__(self, inputargs, prevblock, booloutcome):
Block.__init__(self, inputargs)
self.prevblock = prevblock
self.booloutcome = booloutcome
@property
def ancestor(self):
parent = self.prevblock
while isinstance(parent, EggBlock):
parent = parent.prevblock
return parent
@property
def dead(self):
return self.ancestor.dead
@property
def framestate(self):
return self.ancestor.framestate
def make_recorder(self):
recorder = BlockRecorder(self)
curr = self
while isinstance(curr, EggBlock):
prev = curr.prevblock
recorder = Replayer(prev, curr.booloutcome, recorder)
curr = prev
return recorder
def extravars(self, last_exception=None, last_exc_value=None):
self.last_exception = last_exception
def fixeggblocks(graph):
for block in graph.iterblocks():
if isinstance(block, SpamBlock):
del block.framestate # memory saver
# ____________________________________________________________
class Recorder(object):
def append(self, operation):
raise NotImplementedError
def guessbool(self, ctx, w_condition):
raise AssertionError("cannot guessbool(%s)" % (w_condition,))
class BlockRecorder(Recorder):
# Records all generated operations into a block.
def __init__(self, block):
self.crnt_block = block
# Final frame state after the operations in the block
# If this is set, no new space op may be recorded.
self.final_state = None
def append(self, operation):
self.crnt_block.operations.append(operation)
def guessbool(self, ctx, w_condition):
block = self.crnt_block
links = []
for case in [False, True]:
egg = EggBlock([], block, case)
ctx.pendingblocks.append(egg)
link = Link([], egg, case)
links.append(link)
block.exitswitch = w_condition
block.closeblock(*links)
# forked the graph. Note that False comes before True by default
# in the exits tuple so that (just in case we need it) we
# actually have block.exits[False] = elseLink and
# block.exits[True] = ifLink.
raise StopFlowing
def guessexception(self, ctx, *cases):
block = self.crnt_block
links = []
for case in [None] + list(cases):
if case is not None:
if case is Exception:
last_exc = Variable('last_exception')
else:
last_exc = Constant(case)
last_exc_value = Variable('last_exc_value')
vars = [last_exc, last_exc_value]
vars2 = [Variable(), Variable()]
else:
vars = []
vars2 = []
egg = EggBlock(vars2, block, case)
ctx.pendingblocks.append(egg)
link = Link(vars, egg, case)
if case is not None:
link.extravars(last_exception=last_exc, last_exc_value=last_exc_value)
egg.extravars(last_exception=last_exc)
links.append(link)
block.exitswitch = c_last_exception
block.closeblock(*links)
raise StopFlowing
class Replayer(Recorder):
def __init__(self, block, booloutcome, nextreplayer):
self.crnt_block = block
self.listtoreplay = block.operations
self.booloutcome = booloutcome
self.nextreplayer = nextreplayer
self.index = 0
def append(self, operation):
operation.result = self.listtoreplay[self.index].result
assert operation == self.listtoreplay[self.index], (
'\n'.join(["Not generating the same operation sequence:"] +
[str(s) for s in self.listtoreplay[:self.index]] +
[" ---> | while repeating we see here"] +
[" | %s" % operation] +
[str(s) for s in self.listtoreplay[self.index:]]))
self.index += 1
def guessbool(self, ctx, w_condition):
assert self.index == len(self.listtoreplay)
ctx.recorder = self.nextreplayer
return self.booloutcome
def guessexception(self, ctx, *classes):
assert self.index == len(self.listtoreplay)
ctx.recorder = self.nextreplayer
outcome = self.booloutcome
if outcome is not None:
egg = self.nextreplayer.crnt_block
w_exc_cls, w_exc_value = egg.inputargs[-2:]
if isinstance(egg.last_exception, Constant):
w_exc_cls = egg.last_exception
assert not isinstance(w_exc_cls.value, list)
raise RaiseImplicit(FSException(w_exc_cls, w_exc_value))
# ____________________________________________________________
_unary_ops = [
('UNARY_POSITIVE', op.pos),
('UNARY_NEGATIVE', op.neg),
('UNARY_CONVERT', op.repr),
('UNARY_INVERT', op.invert),
]
def unaryoperation(OPCODE, operation):
def UNARY_OP(self, *ignored):
w_1 = self.popvalue()
w_result = operation(w_1).eval(self)
self.pushvalue(w_result)
UNARY_OP.__name__ = OPCODE
return UNARY_OP
_binary_ops = [
('BINARY_MULTIPLY', op.mul),
('BINARY_TRUE_DIVIDE', op.truediv),
('BINARY_FLOOR_DIVIDE', op.floordiv),
('BINARY_DIVIDE', op.div),
('BINARY_MODULO', op.mod),
('BINARY_ADD', op.add),
('BINARY_SUBTRACT', op.sub),
('BINARY_SUBSCR', op.getitem),
('BINARY_LSHIFT', op.lshift),
('BINARY_RSHIFT', op.rshift),
('BINARY_AND', op.and_),
('BINARY_XOR', op.xor),
('BINARY_OR', op.or_),
('INPLACE_MULTIPLY', op.inplace_mul),
('INPLACE_TRUE_DIVIDE', op.inplace_truediv),
('INPLACE_FLOOR_DIVIDE', op.inplace_floordiv),
('INPLACE_DIVIDE', op.inplace_div),
('INPLACE_MODULO', op.inplace_mod),
('INPLACE_ADD', op.inplace_add),
('INPLACE_SUBTRACT', op.inplace_sub),
('INPLACE_LSHIFT', op.inplace_lshift),
('INPLACE_RSHIFT', op.inplace_rshift),
('INPLACE_AND', op.inplace_and),
('INPLACE_XOR', op.inplace_xor),
('INPLACE_OR', op.inplace_or),
]
def binaryoperation(OPCODE, operation):
"""NOT_RPYTHON"""
def BINARY_OP(self, _):
w_2 = self.popvalue()
w_1 = self.popvalue()
w_result = operation(w_1, w_2).eval(self)
self.pushvalue(w_result)
BINARY_OP.__name__ = OPCODE
return BINARY_OP
_unsupported_ops = [
('BINARY_POWER', "a ** b"),
('BUILD_CLASS', 'defining classes inside functions'),
('EXEC_STMT', 'exec statement'),
('STOP_CODE', '???'),
('STORE_NAME', 'modifying globals'),
('INPLACE_POWER', 'a **= b'),
('LOAD_LOCALS', 'locals()'),
('IMPORT_STAR', 'import *'),
('MISSING_OPCODE', '???'),
('DELETE_GLOBAL', 'modifying globals'),
('DELETE_NAME', 'modifying globals'),
('DELETE_ATTR', 'deleting attributes'),
]
def unsupportedoperation(OPCODE, msg):
def UNSUPPORTED(self, *ignored):
raise FlowingError("%s is not RPython" % (msg,))
UNSUPPORTED.__name__ = OPCODE
return UNSUPPORTED
compare_method = [
"cmp_lt", # "<"
"cmp_le", # "<="
"cmp_eq", # "=="
"cmp_ne", # "!="
"cmp_gt", # ">"
"cmp_ge", # ">="
"cmp_in",
"cmp_not_in",
"cmp_is",
"cmp_is_not",
"cmp_exc_match",
]
class FlowContext(object):
def __init__(self, graph, code):
self.graph = graph
func = graph.func
self.pycode = code
self.w_globals = Constant(func.__globals__)
self.blockstack = []
self.init_closure(func.__closure__)
self.f_lineno = code.co_firstlineno
self.last_offset = 0
self.init_locals_stack(code)
self.joinpoints = {}
def init_closure(self, closure):
if closure is None:
self.closure = []
else:
self.closure = list(closure)
assert len(self.closure) == len(self.pycode.co_freevars)
def init_locals_stack(self, code):
"""
Initialize the locals and the stack.
The locals are ordered according to self.pycode.signature.
"""
self.nlocals = code.co_nlocals
# locals_w is immutable in the sense that every write should make a new
# list first. this means FlowContext.getstate does not have to make a
# copy of locals_w. This is a good trade-off, because changes to
# locals_w (in STORE_FAST and DELETE_FAST) are much less common that
# calls to getstate, which happens after every bytecode
self.locals_w = [None] * code.co_nlocals
self.stack = []
@property
def stackdepth(self):
return len(self.stack)
def pushvalue(self, w_object):
self.stack.append(w_object)
def popvalue(self):
return self.stack.pop()
def peekvalue(self, index_from_top=0):
# NOTE: top of the stack is peekvalue(0).
index = ~index_from_top
return self.stack[index]
def settopvalue(self, w_object, index_from_top=0):
index = ~index_from_top
self.stack[index] = w_object
def popvalues(self, n):
if n == 0:
return []
values_w = self.stack[-n:]
del self.stack[-n:]
return values_w
def dropvaluesuntil(self, finaldepth):
del self.stack[finaldepth:]
def getstate(self, next_offset):
return FrameState(self.locals_w, self.stack[:],
self.last_exception, self.blockstack[:], next_offset)
def setstate(self, state):
""" Reset the context to the given frame state. """
self.locals_w = state.locals_w[:]
self.stack = state.stack[:]
self.last_exception = state.last_exception
self.blockstack = state.blocklist[:]
self._normalize_raise_signals()
def _normalize_raise_signals(self):
st = self.stack
for i in range(len(st)):
if isinstance(st[i], RaiseImplicit):
st[i] = Raise(st[i].w_exc)
def guessbool(self, w_condition):
if isinstance(w_condition, Constant):
return w_condition.value
return self.recorder.guessbool(self, w_condition)
def maybe_merge(self):
recorder = self.recorder
if getattr(recorder, 'final_state', None) is not None:
self.mergeblock(recorder.crnt_block, recorder.final_state)
raise StopFlowing
def record(self, spaceop):
spaceop.offset = self.last_offset
self.recorder.append(spaceop)
def do_op(self, op):
self.maybe_merge()
self.record(op)
self.guessexception(op.canraise)
return op.result
def guessexception(self, exceptions):
"""
Catch possible exceptions implicitly.
"""
if not exceptions:
return
# Implicit exceptions are ignored unless they are caught explicitly
if self.has_exc_handler():
self.recorder.guessexception(self, *exceptions)
def has_exc_handler(self):
return any(isinstance(block, (ExceptBlock, FinallyBlock))
for block in self.blockstack)
def build_flow(self):
graph = self.graph
self.pendingblocks = collections.deque([graph.startblock])
while self.pendingblocks:
block = self.pendingblocks.popleft()
if not block.dead:
self.record_block(block)
def record_block(self, block):
self.setstate(block.framestate)
next_offset = block.framestate.next_offset
self.recorder = block.make_recorder()
try:
while True:
next_offset = self.handle_bytecode(next_offset)
self.recorder.final_state = self.getstate(next_offset)
except StopFlowing:
pass
except FlowingError as exc:
if exc.ctx is None:
exc.ctx = self
raise
self.recorder = None
def mergeblock(self, currentblock, currentstate):
next_offset = currentstate.next_offset
# can 'currentstate' be merged with one of the blocks that
# already exist for this bytecode position?
candidates = self.joinpoints.setdefault(next_offset, [])
for block in candidates:
newstate = block.framestate.union(currentstate)
if newstate is not None:
break
else:
newblock = self.make_next_block(currentblock, currentstate)
candidates.insert(0, newblock)
return
if newstate.matches(block.framestate):
outputargs = currentstate.getoutputargs(newstate)
currentblock.closeblock(Link(outputargs, block))
return
newblock = SpamBlock(newstate)
varnames = self.pycode.co_varnames
for name, w_value in zip(varnames, newstate.locals_w):
if isinstance(w_value, Variable):
w_value.rename(name)
# unconditionally link the current block to the newblock
outputargs = currentstate.getoutputargs(newstate)
link = Link(outputargs, newblock)
currentblock.closeblock(link)
# to simplify the graph, we patch the old block to point
# directly at the new block which is its generalization
block.dead = True
block.operations = ()
block.exitswitch = None
outputargs = block.framestate.getoutputargs(newstate)
block.recloseblock(Link(outputargs, newblock))
candidates.remove(block)
candidates.insert(0, newblock)
self.pendingblocks.append(newblock)
def make_next_block(self, block, state):
newstate = state.copy()
newblock = SpamBlock(newstate)
# unconditionally link the current block to the newblock
outputargs = state.getoutputargs(newstate)
link = Link(outputargs, newblock)
block.closeblock(link)
self.pendingblocks.append(newblock)
return newblock
# hack for unrolling iterables, don't use this
def replace_in_stack(self, oldvalue, newvalue):
w_new = Constant(newvalue)
stack_items_w = self.stack
for i in range(self.stackdepth - 1, - 1, -1):
w_v = stack_items_w[i]
if isinstance(w_v, Constant):
if w_v.value is oldvalue:
# replace the topmost item of the stack that is equal
# to 'oldvalue' with 'newvalue'.
stack_items_w[i] = w_new
break
def handle_bytecode(self, next_offset):
self.last_offset = next_offset
next_offset, methodname, oparg = self.pycode.read(next_offset)
try:
offset = getattr(self, methodname)(oparg)
return offset if offset is not None else next_offset
except FlowSignal as signal:
return self.unroll(signal)
def unroll(self, signal):
while self.blockstack:
block = self.blockstack.pop()
if isinstance(signal, block.handles):
return block.handle(self, signal)
block.cleanupstack(self)
return signal.nomoreblocks(self)
def getlocalvarname(self, index):
return self.pycode.co_varnames[index]
def getconstant_w(self, index):
return const(self.pycode.consts[index])
def getname_u(self, index):
return self.pycode.names[index]
def getname_w(self, index):
return Constant(self.pycode.names[index])
def appcall(self, func, *args_w):
"""Call an app-level RPython function directly"""
w_func = const(func)
return self.do_op(op.simple_call(w_func, *args_w))
def BAD_OPCODE(self, _):
raise FlowingError("This operation is not RPython")
def BREAK_LOOP(self, oparg):
raise Break
def CONTINUE_LOOP(self, startofloop):
raise Continue(startofloop)
def not_(self, w_obj):
w_bool = op.bool(w_obj).eval(self)
return const(not self.guessbool(w_bool))
def UNARY_NOT(self, _):
w_obj = self.popvalue()
self.pushvalue(self.not_(w_obj))
def cmp_lt(self, w_1, w_2):
return op.lt(w_1, w_2).eval(self)
def cmp_le(self, w_1, w_2):
return op.le(w_1, w_2).eval(self)
def cmp_eq(self, w_1, w_2):
return op.eq(w_1, w_2).eval(self)
def cmp_ne(self, w_1, w_2):
return op.ne(w_1, w_2).eval(self)
def cmp_gt(self, w_1, w_2):
return op.gt(w_1, w_2).eval(self)
def cmp_ge(self, w_1, w_2):
return op.ge(w_1, w_2).eval(self)
def cmp_in(self, w_1, w_2):
return op.contains(w_2, w_1).eval(self)
def cmp_not_in(self, w_1, w_2):
return self.not_(self.cmp_in(w_1, w_2))
def cmp_is(self, w_1, w_2):
return op.is_(w_1, w_2).eval(self)
def cmp_is_not(self, w_1, w_2):
return self.not_(op.is_(w_1, w_2).eval(self))
def exception_match(self, w_exc_type, w_check_class):
"""Checks if the given exception type matches 'w_check_class'."""
if not isinstance(w_check_class, Constant):
raise FlowingError("Non-constant except guard.")
check_class = w_check_class.value
if not isinstance(check_class, tuple):
# the simple case
if issubclass(check_class, (NotImplementedError, AssertionError)):
raise FlowingError(
"Catching NotImplementedError, AssertionError, or a "
"subclass is not valid in RPython (%r)" % (check_class,))
return self.guessbool(op.issubtype(w_exc_type, w_check_class).eval(self))
# special case for StackOverflow (see rlib/rstackovf.py)
if check_class == rstackovf.StackOverflow:
w_real_class = const(rstackovf._StackOverflow)
return self.guessbool(op.issubtype(w_exc_type, w_real_class).eval(self))
# checking a tuple of classes
for klass in w_check_class.value:
if self.exception_match(w_exc_type, const(klass)):
return True
return False
def cmp_exc_match(self, w_1, w_2):
return const(self.exception_match(w_1, w_2))
def COMPARE_OP(self, testnum):
w_2 = self.popvalue()
w_1 = self.popvalue()
w_result = getattr(self, compare_method[testnum])(w_1, w_2)
self.pushvalue(w_result)
def exc_from_raise(self, w_arg1, w_arg2):
"""
Create a wrapped exception from the arguments of a raise statement.
Returns an FSException object whose w_value is an instance of w_type.
"""
from rpython.rlib.debug import ll_assert_not_none
check_not_none = False
w_is_type = op.isinstance(w_arg1, const(type)).eval(self)
if self.guessbool(w_is_type):
# this is for all cases of the form (Class, something)
if self.guessbool(op.is_(w_arg2, w_None).eval(self)):
# raise Type: we assume we have to instantiate Type
w_value = op.simple_call(w_arg1).eval(self)
else:
w_valuetype = op.type(w_arg2).eval(self)
if self.guessbool(op.issubtype(w_valuetype, w_arg1).eval(self)):
# raise Type, Instance: let etype be the exact type of value
w_value = w_arg2
check_not_none = True
else:
# raise Type, X: assume X is the constructor argument
w_value = op.simple_call(w_arg1, w_arg2).eval(self)
else:
# the only case left here is (inst, None), from a 'raise inst'.
if not self.guessbool(op.is_(w_arg2, const(None)).eval(self)):
exc = TypeError("instance exception may not have a "
"separate value")
raise Raise(const(exc))
w_value = w_arg1
check_not_none = True
if check_not_none:
w_value = op.simple_call(const(ll_assert_not_none),
w_value).eval(self)
w_type = op.type(w_value).eval(self)
return FSException(w_type, w_value)
def RAISE_VARARGS(self, nbargs):
if nbargs == 0:
if self.last_exception is not None:
w_exc = self.last_exception
else:
w_exc = const(TypeError(
"raise: no active exception to re-raise"))
raise Raise(w_exc)
if nbargs >= 3:
self.popvalue()
if nbargs >= 2:
w_value = self.popvalue()
w_type = self.popvalue()
operror = self.exc_from_raise(w_type, w_value)
else:
w_type = self.popvalue()
operror = self.exc_from_raise(w_type, w_None)
raise Raise(operror)
def import_name(self, name, glob=None, loc=None, frm=None, level=-1):
try:
mod = __import__(name, glob, loc, frm, level)
except ImportError as e:
raise Raise(const(e))
return const(mod)
def IMPORT_NAME(self, nameindex):
modulename = self.getname_u(nameindex)
glob = self.w_globals.value
fromlist = self.popvalue().value
level = self.popvalue().value
w_obj = self.import_name(modulename, glob, None, fromlist, level)
self.pushvalue(w_obj)
def import_from(self, w_module, w_name):
assert isinstance(w_module, Constant)
assert isinstance(w_name, Constant)
try:
return op.getattr(w_module, w_name).eval(self)
except FlowingError:
exc = ImportError("cannot import name '%s'" % w_name.value)
raise Raise(const(exc))
def IMPORT_FROM(self, nameindex):
w_name = self.getname_w(nameindex)
w_module = self.peekvalue()
self.pushvalue(self.import_from(w_module, w_name))
def RETURN_VALUE(self, oparg):
w_returnvalue = self.popvalue()
raise Return(w_returnvalue)
def END_FINALLY(self, oparg):
# unlike CPython, there are two statically distinct cases: the
# END_FINALLY might be closing an 'except' block or a 'finally'
# block. In the first case, the stack contains three items:
# [exception type we are now handling]
# [exception value we are now handling]
# [Raise]
# In the case of a finally: block, the stack contains only one
# item (unlike CPython which can have 1, 2 or 3 items):
# [subclass of FlowSignal]
w_top = self.popvalue()
if w_top == w_None:
# finally: block with no unroller active
return
elif isinstance(w_top, FlowSignal):
# case of a finally: block
raise w_top
else:
# case of an except: block. We popped the exception type
self.popvalue() # Now we pop the exception value
signal = self.popvalue()
raise signal
def POP_BLOCK(self, oparg):
block = self.blockstack.pop()
block.cleanupstack(self) # the block knows how to clean up the value stack
def JUMP_ABSOLUTE(self, jumpto):
return jumpto
def YIELD_VALUE(self, _):
assert self.pycode.is_generator
w_result = self.popvalue()
op.yield_(w_result).eval(self)
# XXX yield expressions not supported. This will blow up if the value
# isn't popped straightaway.
self.pushvalue(None)
PRINT_EXPR = BAD_OPCODE
PRINT_ITEM_TO = BAD_OPCODE
PRINT_NEWLINE_TO = BAD_OPCODE
def PRINT_ITEM(self, oparg):
w_item = self.popvalue()
w_s = op.str(w_item).eval(self)
self.appcall(rpython_print_item, w_s)
def PRINT_NEWLINE(self, oparg):
self.appcall(rpython_print_newline)
def JUMP_FORWARD(self, target):
return target
def JUMP_IF_FALSE(self, target):
# Python <= 2.6 only
w_cond = self.peekvalue()
if not self.guessbool(op.bool(w_cond).eval(self)):
return target
def JUMP_IF_TRUE(self, target):
# Python <= 2.6 only
w_cond = self.peekvalue()
if self.guessbool(op.bool(w_cond).eval(self)):
return target
def POP_JUMP_IF_FALSE(self, target):
w_value = self.popvalue()
if not self.guessbool(op.bool(w_value).eval(self)):
return target
def POP_JUMP_IF_TRUE(self, target):
w_value = self.popvalue()
if self.guessbool(op.bool(w_value).eval(self)):
return target
def JUMP_IF_FALSE_OR_POP(self, target):
w_value = self.peekvalue()
if not self.guessbool(op.bool(w_value).eval(self)):
return target
self.popvalue()
def JUMP_IF_TRUE_OR_POP(self, target):
w_value = self.peekvalue()
if self.guessbool(op.bool(w_value).eval(self)):
return target
return target
self.popvalue()
def JUMP_IF_NOT_DEBUG(self, target):
pass
def GET_ITER(self, oparg):
w_iterable = self.popvalue()
w_iterator = op.iter(w_iterable).eval(self)
self.pushvalue(w_iterator)
def FOR_ITER(self, target):
w_iterator = self.peekvalue()
self.blockstack.append(IterBlock(self, target))
w_nextitem = op.next(w_iterator).eval(self)
self.blockstack.pop()
self.pushvalue(w_nextitem)
def SETUP_LOOP(self, target):
block = LoopBlock(self, target)
self.blockstack.append(block)
def SETUP_EXCEPT(self, target):
block = ExceptBlock(self, target)
self.blockstack.append(block)
def SETUP_FINALLY(self, target):
block = FinallyBlock(self, target)
self.blockstack.append(block)
def SETUP_WITH(self, target):
# A simpler version than the 'real' 2.7 one:
# directly call manager.__enter__(), don't use special lookup functions
# which don't make sense on the RPython type system.
w_manager = self.peekvalue()
w_exit = op.getattr(w_manager, const("__exit__")).eval(self)
self.settopvalue(w_exit)
w_enter = op.getattr(w_manager, const('__enter__')).eval(self)
w_result = op.simple_call(w_enter).eval(self)
block = WithBlock(self, target)
self.blockstack.append(block)
self.pushvalue(w_result)
def WITH_CLEANUP(self, oparg):
# Note: RPython context managers receive None in lieu of tracebacks
# and cannot suppress the exception.
unroller = self.popvalue()
w_exitfunc = self.popvalue()
self.pushvalue(unroller)
if isinstance(unroller, Raise):
w_exc = unroller.w_exc
# The annotator won't allow to merge exception types with None.
# Replace it with the exception value...
op.simple_call(w_exitfunc, w_exc.w_value, w_exc.w_value, w_None
).eval(self)
else:
op.simple_call(w_exitfunc, w_None, w_None, w_None).eval(self)
def LOAD_FAST(self, varindex):
w_value = self.locals_w[varindex]
if w_value is None:
raise FlowingError("Local variable referenced before assignment")
self.pushvalue(w_value)
def LOAD_CONST(self, constindex):
w_const = self.getconstant_w(constindex)
self.pushvalue(w_const)
def find_global(self, w_globals, varname):
try:
value = w_globals.value[varname]
except KeyError:
# not in the globals, now look in the built-ins
try:
value = getattr(__builtin__, varname)
except AttributeError:
raise FlowingError("global name '%s' is not defined" % varname)
return const(value)
def LOAD_GLOBAL(self, nameindex):
w_result = self.find_global(self.w_globals, self.getname_u(nameindex))
self.pushvalue(w_result)
LOAD_NAME = LOAD_GLOBAL
def LOAD_ATTR(self, nameindex):
"obj.attributename"
w_obj = self.popvalue()
w_attributename = self.getname_w(nameindex)
w_value = op.getattr(w_obj, w_attributename).eval(self)
self.pushvalue(w_value)
LOOKUP_METHOD = LOAD_ATTR
def LOAD_DEREF(self, varindex):
cell = self.closure[varindex]
try:
content = cell.cell_contents
except ValueError:
name = self.pycode.co_freevars[varindex]
raise FlowingError("Undefined closure variable '%s'" % name)
self.pushvalue(const(content))
def STORE_FAST(self, varindex):
w_newvalue = self.popvalue()
assert w_newvalue is not None
self.locals_w = self.locals_w[:]
self.locals_w[varindex] = w_newvalue
if isinstance(w_newvalue, Variable):
w_newvalue.rename(self.getlocalvarname(varindex))
def STORE_GLOBAL(self, nameindex):
varname = self.getname_u(nameindex)
raise FlowingError(
"Attempting to modify global variable %r." % (varname))
def POP_TOP(self, oparg):
self.popvalue()
def ROT_TWO(self, oparg):
w_1 = self.popvalue()
w_2 = self.popvalue()
self.pushvalue(w_1)
self.pushvalue(w_2)
def ROT_THREE(self, oparg):
w_1 = self.popvalue()
w_2 = self.popvalue()
w_3 = self.popvalue()
self.pushvalue(w_1)
self.pushvalue(w_3)
self.pushvalue(w_2)
def ROT_FOUR(self, oparg):
w_1 = self.popvalue()
w_2 = self.popvalue()
w_3 = self.popvalue()
w_4 = self.popvalue()
self.pushvalue(w_1)
self.pushvalue(w_4)
self.pushvalue(w_3)
self.pushvalue(w_2)
def DUP_TOP(self, oparg):
w_1 = self.peekvalue()
self.pushvalue(w_1)
def DUP_TOPX(self, itemcount):
delta = itemcount - 1
while True:
itemcount -= 1
if itemcount < 0:
break
w_value = self.peekvalue(delta)
self.pushvalue(w_value)
for OPCODE, op in _unary_ops:
locals()[OPCODE] = unaryoperation(OPCODE, op)
for OPCODE, op in _binary_ops:
locals()[OPCODE] = binaryoperation(OPCODE, op)
for OPCODE, op in _unsupported_ops:
locals()[OPCODE] = unsupportedoperation(OPCODE, op)
def BUILD_LIST_FROM_ARG(self, _):
# This opcode was added with pypy-1.8. Here is a simpler
# version, enough for annotation.
last_val = self.popvalue()
self.pushvalue(op.newlist().eval(self))
self.pushvalue(last_val)
def call_function(self, oparg, w_star=None, w_starstar=None):
if w_starstar is not None:
raise FlowingError("Dict-unpacking is not RPython")
n_arguments = oparg & 0xff
n_keywords = (oparg >> 8) & 0xff
keywords = {}
for _ in range(n_keywords):
w_value = self.popvalue()
w_key = self.popvalue()
key = w_key.value
keywords[key] = w_value
arguments = self.popvalues(n_arguments)
args = CallSpec(arguments, keywords, w_star)
w_function = self.popvalue()
if args.keywords or isinstance(args.w_stararg, Variable):
shape, args_w = args.flatten()
hlop = op.call_args(w_function, Constant(shape), *args_w)
else:
hlop = op.simple_call(w_function, *args.as_list())
self.pushvalue(hlop.eval(self))
def CALL_FUNCTION(self, oparg):
self.call_function(oparg)
CALL_METHOD = CALL_FUNCTION
def CALL_FUNCTION_VAR(self, oparg):
w_varargs = self.popvalue()
self.call_function(oparg, w_varargs)
def CALL_FUNCTION_KW(self, oparg):
w_varkw = self.popvalue()
self.call_function(oparg, None, w_varkw)
def CALL_FUNCTION_VAR_KW(self, oparg):
w_varkw = self.popvalue()
w_varargs = self.popvalue()
self.call_function(oparg, w_varargs, w_varkw)
def newfunction(self, w_code, defaults_w):
if not all(isinstance(value, Constant) for value in defaults_w):
raise FlowingError("Dynamically created function must"
" have constant default values.")
code = w_code.value
globals = self.w_globals.value
defaults = tuple([default.value for default in defaults_w])
fn = types.FunctionType(code, globals, code.co_name, defaults)
return Constant(fn)
def MAKE_FUNCTION(self, numdefaults):
w_codeobj = self.popvalue()
defaults = self.popvalues(numdefaults)
fn = self.newfunction(w_codeobj, defaults)
self.pushvalue(fn)
def STORE_ATTR(self, nameindex):
"obj.attributename = newvalue"
w_attributename = self.getname_w(nameindex)
w_obj = self.popvalue()
w_newvalue = self.popvalue()
op.setattr(w_obj, w_attributename, w_newvalue).eval(self)
def unpack_sequence(self, w_iterable, expected_length):
w_len = op.len(w_iterable).eval(self)
w_correct = op.eq(w_len, const(expected_length)).eval(self)
if not self.guessbool(op.bool(w_correct).eval(self)):
w_exc = self.exc_from_raise(const(ValueError), const(None))
raise Raise(w_exc)
return [op.getitem(w_iterable, const(i)).eval(self)
for i in range(expected_length)]
def UNPACK_SEQUENCE(self, itemcount):
w_iterable = self.popvalue()
items = self.unpack_sequence(w_iterable, itemcount)
for w_item in reversed(items):
self.pushvalue(w_item)
def slice(self, w_start, w_end):
w_obj = self.popvalue()
w_result = op.getslice(w_obj, w_start, w_end).eval(self)
self.pushvalue(w_result)
def SLICE_0(self, oparg):
self.slice(w_None, w_None)
def SLICE_1(self, oparg):
w_start = self.popvalue()
self.slice(w_start, w_None)
def SLICE_2(self, oparg):
w_end = self.popvalue()
self.slice(w_None, w_end)
def SLICE_3(self, oparg):
w_end = self.popvalue()
w_start = self.popvalue()
self.slice(w_start, w_end)
def storeslice(self, w_start, w_end):
w_obj = self.popvalue()
w_newvalue = self.popvalue()
op.setslice(w_obj, w_start, w_end, w_newvalue).eval(self)
def STORE_SLICE_0(self, oparg):
self.storeslice(w_None, w_None)
def STORE_SLICE_1(self, oparg):
w_start = self.popvalue()
self.storeslice(w_start, w_None)
def STORE_SLICE_2(self, oparg):
w_end = self.popvalue()
self.storeslice(w_None, w_end)
def STORE_SLICE_3(self, oparg):
w_end = self.popvalue()
w_start = self.popvalue()
self.storeslice(w_start, w_end)
def deleteslice(self, w_start, w_end):
w_obj = self.popvalue()
op.delslice(w_obj, w_start, w_end).eval(self)
def DELETE_SLICE_0(self, oparg):
self.deleteslice(w_None, w_None)
def DELETE_SLICE_1(self, oparg):
w_start = self.popvalue()
self.deleteslice(w_start, w_None)
def DELETE_SLICE_2(self, oparg):
w_end = self.popvalue()
self.deleteslice(w_None, w_end)
def DELETE_SLICE_3(self, oparg):
w_end = self.popvalue()
w_start = self.popvalue()
self.deleteslice(w_start, w_end)
def LIST_APPEND(self, oparg):
w_value = self.popvalue()
if sys.version_info < (2, 7):
w_list = self.popvalue()
else:
w_list = self.peekvalue(oparg - 1)
w_append_meth = op.getattr(w_list, const('append')).eval(self)
op.simple_call(w_append_meth, w_value).eval(self)
def DELETE_FAST(self, varindex):
if self.locals_w[varindex] is None:
varname = self.getlocalvarname(varindex)
message = "local variable '%s' referenced before assignment"
raise UnboundLocalError(message, varname)
self.locals_w = self.locals_w[:]
self.locals_w[varindex] = None
def STORE_MAP(self, oparg):
w_key = self.popvalue()
w_value = self.popvalue()
w_dict = self.peekvalue()
op.setitem(w_dict, w_key, w_value).eval(self)
def STORE_SUBSCR(self, oparg):
"obj[subscr] = newvalue"
w_subscr = self.popvalue()
w_obj = self.popvalue()
w_newvalue = self.popvalue()
op.setitem(w_obj, w_subscr, w_newvalue).eval(self)
def BUILD_SLICE(self, numargs):
if numargs == 3:
w_step = self.popvalue()
elif numargs == 2:
w_step = w_None
else:
raise BytecodeCorruption
w_end = self.popvalue()
w_start = self.popvalue()
w_slice = op.newslice(w_start, w_end, w_step).eval(self)
self.pushvalue(w_slice)
def DELETE_SUBSCR(self, oparg):
"del obj[subscr]"
w_subscr = self.popvalue()
w_obj = self.popvalue()
op.delitem(w_obj, w_subscr).eval(self)
def BUILD_TUPLE(self, itemcount):
items = self.popvalues(itemcount)
w_tuple = op.newtuple(*items).eval(self)
self.pushvalue(w_tuple)
def BUILD_LIST(self, itemcount):
items = self.popvalues(itemcount)
w_list = op.newlist(*items).eval(self)
self.pushvalue(w_list)
def BUILD_MAP(self, itemcount):
w_dict = op.newdict().eval(self)
self.pushvalue(w_dict)
def NOP(self, *args):
pass
# XXX Unimplemented 2.7 opcodes ----------------
# Set literals, set comprehensions
def BUILD_SET(self, oparg):
raise NotImplementedError("BUILD_SET")
def SET_ADD(self, oparg):
raise NotImplementedError("SET_ADD")
# Dict comprehensions
def MAP_ADD(self, oparg):
raise NotImplementedError("MAP_ADD")
# Closures
STORE_DEREF = BAD_OPCODE
LOAD_CLOSURE = BAD_OPCODE
MAKE_CLOSURE = BAD_OPCODE
### Frame blocks ###
class FlowSignal(Exception):
"""Abstract base class for translator-level objects that instruct the
interpreter to change the control flow and the block stack.
The concrete subclasses correspond to the various values WHY_XXX
values of the why_code enumeration in ceval.c:
WHY_NOT, OK, not this one :-)
WHY_EXCEPTION, Raise
WHY_RERAISE, implemented differently, see Reraise
WHY_RETURN, Return
WHY_BREAK, Break
WHY_CONTINUE, Continue
WHY_YIELD not needed
"""
def nomoreblocks(self, ctx):
raise BytecodeCorruption("misplaced bytecode - should not return")
def __eq__(self, other):
return type(other) is type(self) and other.args == self.args
class Return(FlowSignal):
"""Signals a 'return' statement.
Argument is the wrapped object to return.
"""
def __init__(self, w_value):
self.w_value = w_value
def nomoreblocks(self, ctx):
w_result = self.w_value
link = Link([w_result], ctx.graph.returnblock)
ctx.recorder.crnt_block.closeblock(link)
raise StopFlowing
@property
def args(self):
return [self.w_value]
@staticmethod
def rebuild(w_value):
return Return(w_value)
class Raise(FlowSignal):
"""Signals an application-level exception
(i.e. an OperationException)."""
def __init__(self, w_exc):
self.w_exc = w_exc
def nomoreblocks(self, ctx):
w_exc = self.w_exc
if w_exc.w_type == const(ImportError):
msg = 'ImportError is raised in RPython: %s' % (
getattr(w_exc.w_value, 'value', '<not a constant message>'),)
raise ImportError(msg)
link = Link([w_exc.w_type, w_exc.w_value], ctx.graph.exceptblock)
ctx.recorder.crnt_block.closeblock(link)
raise StopFlowing
@property
def args(self):
return [self.w_exc.w_type, self.w_exc.w_value]
@classmethod
def rebuild(cls, w_type, w_value):
return cls(FSException(w_type, w_value))
class RaiseImplicit(Raise):
"""Signals an exception raised implicitly"""
def nomoreblocks(self, ctx):
w_exc = self.w_exc
if isinstance(w_exc.w_type, Constant):
exc_cls = w_exc.w_type.value
else:
exc_cls = Exception
msg = "implicit %s shouldn't occur" % exc_cls.__name__
w_type = Constant(AssertionError)
w_value = Constant(AssertionError(msg))
link = Link([w_type, w_value], ctx.graph.exceptblock)
ctx.recorder.crnt_block.closeblock(link)
raise StopFlowing
class Break(FlowSignal):
"""Signals a 'break' statement."""
@property
def args(self):
return []
@staticmethod
def rebuild():
return Break.singleton
Break.singleton = Break()
class Continue(FlowSignal):
"""Signals a 'continue' statement.
Argument is the bytecode position of the beginning of the loop."""
def __init__(self, jump_to):
self.jump_to = jump_to
@property
def args(self):
return [const(self.jump_to)]
@staticmethod
def rebuild(w_jump_to):
return Continue(w_jump_to.value)
class FrameBlock(object):
"""Abstract base class for frame blocks from the blockstack,
used by the SETUP_XXX and POP_BLOCK opcodes."""
def __init__(self, ctx, handlerposition):
self.handlerposition = handlerposition
self.stackdepth = ctx.stackdepth
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.handlerposition == other.handlerposition and
self.stackdepth == other.stackdepth)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.handlerposition, self.stackdepth))
def cleanupstack(self, ctx):
ctx.dropvaluesuntil(self.stackdepth)
def handle(self, ctx, unroller):
raise NotImplementedError
class LoopBlock(FrameBlock):
"""A loop block. Stores the end-of-loop pointer in case of 'break'."""
handles = (Break, Continue)
def handle(self, ctx, unroller):
if isinstance(unroller, Continue):
# re-push the loop block without cleaning up the value stack,
# and jump to the beginning of the loop, stored in the
# exception's argument
ctx.blockstack.append(self)
return unroller.jump_to
else:
# jump to the end of the loop
self.cleanupstack(ctx)
return self.handlerposition
class ExceptBlock(FrameBlock):
"""An try:except: block. Stores the position of the exception handler."""
handles = Raise
def handle(self, ctx, unroller):
# push the exception to the value stack for inspection by the
# exception handler (the code after the except:)
self.cleanupstack(ctx)
assert isinstance(unroller, Raise)
w_exc = unroller.w_exc
# the stack setup is slightly different than in CPython:
# instead of the traceback, we store the unroller object,
# wrapped.
ctx.pushvalue(unroller)
ctx.pushvalue(w_exc.w_value)
ctx.pushvalue(w_exc.w_type)
ctx.last_exception = w_exc
return self.handlerposition # jump to the handler
class IterBlock(ExceptBlock):
"""A pseudo-block to catch the StopIteration inside FOR_ITER"""
def handle(self, ctx, unroller):
w_exc = unroller.w_exc
if ctx.exception_match(w_exc.w_type, const(StopIteration)):
ctx.popvalue()
return self.handlerposition
else:
return ctx.unroll(unroller)
class FinallyBlock(FrameBlock):
"""A try:finally: block. Stores the position of the exception handler."""
handles = FlowSignal
def handle(self, ctx, unroller):
# any abnormal reason for unrolling a finally: triggers the end of
# the block unrolling and the entering the finally: handler.
self.cleanupstack(ctx)
ctx.pushvalue(unroller)
return self.handlerposition # jump to the handler
class WithBlock(FinallyBlock):
def handle(self, ctx, unroller):
return FinallyBlock.handle(self, ctx, unroller)
| mozillazg/pypy | rpython/flowspace/flowcontext.py | flowcontext.py | py | 45,650 | python | en | code | 430 | github-code | 36 | [
{
"api_name": "rpython.flowspace.model.const",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "rpython.tool.error.source_lines",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "rpython.flowspace.model.Block",
"line_number": 38,
"usage_type": "name"
}... |
3678250480 | from typing import Any, List, Text
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer
from rasa.nlu.training_data import Message, TrainingData
from rasa.nlu.constants import TEXT_ATTRIBUTE, TOKENS_NAMES, MESSAGE_ATTRIBUTES
from rasa.utils.io import DEFAULT_ENCODING
class MitieTokenizer(Tokenizer):
provides = [TOKENS_NAMES[attribute] for attribute in MESSAGE_ATTRIBUTES]
defaults = {
# add __CLS__ token to the end of the list of tokens
"use_cls_token": False
}
@classmethod
def required_packages(cls) -> List[Text]:
return ["mitie"]
def train(
self, training_data: TrainingData, config: RasaNLUModelConfig, **kwargs: Any
) -> None:
for example in training_data.training_examples:
for attribute in MESSAGE_ATTRIBUTES:
if example.get(attribute) is not None:
example.set(
TOKENS_NAMES[attribute],
self.tokenize(example.get(attribute), attribute),
)
def process(self, message: Message, **kwargs: Any) -> None:
message.set(TOKENS_NAMES[TEXT_ATTRIBUTE], self.tokenize(message.text))
def _token_from_offset(
self, text: bytes, offset: int, encoded_sentence: bytes
) -> Token:
return Token(
text.decode(DEFAULT_ENCODING),
self._byte_to_char_offset(encoded_sentence, offset),
)
def tokenize(self, text: Text, attribute: Text = TEXT_ATTRIBUTE) -> List[Token]:
import mitie
encoded_sentence = text.encode(DEFAULT_ENCODING)
tokenized = mitie.tokenize_with_offsets(encoded_sentence)
tokens = [
self._token_from_offset(token, offset, encoded_sentence)
for token, offset in tokenized
]
self.add_cls_token(tokens, attribute)
return tokens
@staticmethod
def _byte_to_char_offset(text: bytes, byte_offset: int) -> int:
return len(text[:byte_offset].decode(DEFAULT_ENCODING))
| msamogh/rasa-frames | rasa/nlu/tokenizers/mitie_tokenizer.py | mitie_tokenizer.py | py | 2,085 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "rasa.nlu.tokenizers.tokenizer.Tokenizer",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "rasa.nlu.constants.TOKENS_NAMES",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "rasa.nlu.constants.MESSAGE_ATTRIBUTES",
"line_number": 13,
"usage... |
37803606526 | """
Wrapper Class for the Github Secrets Filler
"""
import os
import sys
import dotenv
import github
from ..GithubEnvironmentSecret import GithubEnvironmentSecret
class Filler:
dotenv_values = None
github_repository = None
environment = None
gh_env_secret = None
def __init__(self, args):
'''
Filler Constructor, takes parsed arguments from the cmd
'''
self.load_api_token(args)
self.load_dotenv_values(args.dotenv_file)
self.load_github_repository(args.repository_name)
self.load_environment(args.environment)
self.load_github_environment_secret()
def load_github_environment_secret(self):
'''
Init the GithubEnvironmentSecret that wraps custom API calls to Github
'''
self.gh_env_secret = GithubEnvironmentSecret(
repository=self.github_repository,
environment=self.environment
)
def load_api_token(self, args):
'''
Handle the GITHUB_TOKEN environment variable
Exits the program if not found
'''
if args.github_token:
os.environ["GITHUB_TOKEN"] = args.github_token
if not os.getenv('GITHUB_TOKEN'):
print("Could not retrieve GITHUB_TOKEN")
sys.exit(1)
def load_dotenv_values(self, dotenv_file):
'''
Load the values from the dotenv_file
'''
if not os.path.isfile(dotenv_file):
print(f"Could not open DOTENV file {dotenv_file}")
sys.exit(1)
try:
self.dotenv_values = dotenv.dotenv_values(dotenv_file)
except Exception as exception:
print(f"Could not load DOTENV file : {str(exception)}")
sys.exit(1)
def load_github_repository(self, repository_name):
'''
Try to fetch the Github Repository with the Token
'''
github_connector = github.Github(os.getenv("GITHUB_TOKEN"))
try:
self.github_repository = github_connector \
.get_repo(repository_name)
except github.GithubException as exception:
print(
f"Could not retrieve Repository {repository_name} : "
f"{str(exception)}"
)
sys.exit(1)
def load_environment(self, environment):
'''
Load the environment name
'''
self.environment = environment
def create_secrets(self):
'''
Creates or updates secrets for Project Environment
'''
for dotenv_key in self.dotenv_values:
dotenv_val = self.dotenv_values[dotenv_key]
if self.gh_env_secret.secret_exists(dotenv_key):
print(f" » Updating Secret {dotenv_key} ...")
else:
print(f" » Creating Secret {dotenv_key} ...")
self.gh_env_secret.add_secret(
key=dotenv_key,
value=dotenv_val
)
| ArteGEIE/github-secrets-filler | bin/libraries/filler/Filler.py | Filler.py | py | 2,996 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "GithubEnvironmentSecret.GithubEnvironmentSecret",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "os.getenv",
"line_number": 51,
"usage_type": "call"
},
{
"api_nam... |
70472354663 | '''
Created on Aug 1, 2019
@author: jsaavedr
Reading an image
'''
import pai_io
import matplotlib.pyplot as plt
if __name__ == '__main__':
filename = '../images/gray/lion_gray.jpg'
image = pai_io.imread(filename, as_gray = True)
print('shape: {}'.format(image.shape))
##showing image
plt.imshow (image, cmap = 'gray')
plt.title('image')
plt.axis('off')
plt.show() | Cotorrra/CC5508-Imagenes | Basic Tools/pai_basis/example_1.py | example_1.py | py | 399 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pai_io.imread",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.p... |
40294323346 | import time
import random
import lxc
import code
import string
from multiprocessing.pool import ThreadPool
import os
from os.path import join, isfile, exists
import json
LXC_BASE = "tmpl_apach"
MOUNTPOINT = "files"
LXC_IP = "10.10.13.7"
##### GRADER FUNCTIONS
import http.client
ip1 = '127.0.0.1'
ip2 = LXC_IP
def DisableSSLv3():
import socket, ssl, pprint
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# disable sslv3 and sslv2
ssl_sock = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_SSLv3, do_handshake_on_connect=False)
try:
ssl_sock.connect(('localhost', 443))
ssl_sock.do_handshake()
return False
except:
ssl_sock.close()
# enable tlsv1
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_sock = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1, do_handshake_on_connect=False)
try:
ssl_sock.connect(('localhost', 443))
ssl_sock.do_handshake()
return True
except:
return False
def RestrictWeakSSLCiphers():
import socket, ssl, pprint
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# disable sslv3 and sslv2
ciphers = "EXP:NULL:ADH:LOW:MD5:RC4"
ssl_sock = ssl.wrap_socket(s, do_handshake_on_connect=False, ciphers=ciphers)
try:
ssl_sock.connect(('localhost', 443))
ssl_sock.do_handshake()
return False
except:
ssl_sock.close()
# enable tlsv1
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_sock = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1, do_handshake_on_connect=False)
try:
ssl_sock.connect(('localhost', 443))
ssl_sock.do_handshake()
return True
except:
return False
def SetServerTokenToProd():
try:
ret = True
conn = http.client.HTTPConnection("localhost")
conn.request("GET","/")
res = conn.getresponse()
headers = res.getheaders()
for header in headers:
if(header[0].lower() == 'server' and (header[1].find('Ubuntu') != -1)):
ret = False
except:
return False
return ret
def SetServerSignatureToOff():
try:
ret = True
conn = http.client.HTTPConnection("localhost")
conn.request("GET","/")
res = conn.getresponse()
body = res.read()
if(body.find('Apache') != -1):
ret = False
except:
return False
return ret
def DirectoryListing():
try:
ret = True
conn = http.client.HTTPConnection("localhost")
conn.request("GET","/DirectoryListing/")
res = conn.getresponse()
if(res.status == 200):
return False
except:
return False
return ret
##### END OF GRADER FUNCTIONS
def filter_submition(submition):
for root, subdirs, files in os.walk(submition):
for filename in files:
if not (filename.endswith(".conf") or filename.endswith(".load") or filename =="envvars" or filename=="magic"):
os.remove(join(root,filename))
print("deleted",join(root,filename))
def run_test_body():
# apache test syntax
import subprocess
cmd = "apachectl configtest"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
res =str(p.stdout.read())
retval = p.wait()
if retval != 0:
return {"stat":10,"msg":"`apachectl configtest` failed.",'data': {'stdio':res,'retval':retval}} # stat: 10 cmd error
print("ctl passed")
# apache restart
cmd = "service apache2 restart"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
res =str(p.stdout.read())
retval = p.wait()
if retval != 0:
return {"stat":10,"msg":"`service apache2 restart` failed.",'data': {'stdio':res,'retval':retval}}
print("restart passed")
# evals
funcs = [
('DisableSSLv3', 20), \
('RestrictWeakSSLCiphers', 20), \
('SetServerTokenToProd', 20), \
('SetServerSignatureToOff', 20), \
('DirectoryListing', 20)]
score = 0
result = []
max = 0
for func in funcs:
res = (func, False)
max += func[1]
if(globals()[func[0]]()):
res = (func, True)
score += func[1]
result.append(res)
if max == score:
return {"stat":1,"msg":"VeryWell!",'data': {'stdio':'','retval':'','score':score,'result':result}} # completely done
else:
return {"stat":2,"msg":"Try Harder.",'data': {'stdio':'','retval':'','score':score,'result':result}} # partially done
def run_test():
#global ip2
#ip2 = argz[0]
res = run_test_body()
with open("/files/result.txt","w") as f:
#with open("/root/result.txt","w") as f:
f.write(json.dumps(res))
return 10
def thread_worker(child):
child.start()
child.wait("RUNNING", 15)
if not child.running:
return {"stat":0,"msg":"child cannot start.",'data': None}
#child.attach_wait(lxc.attach_run_command, ["service", "networking", "restart"])
#if not child.get_ips(timeout=15):
# return {"stat":0,"msg":"failed to get ip address of container",'data': None}
#ip2 = child.get_ips()[0]
child.attach_wait(lxc.attach_run_command, ["rm","-rf","/etc/apache2/conf-available*"])
child.attach_wait(lxc.attach_run_command, ["rm","-rf","/etc/apache2/conf-enabled/*"])
child.attach_wait(lxc.attach_run_command, ["rm","-rf","/etc/apache2/mods-available/*"])
child.attach_wait(lxc.attach_run_command, ["rm","-rf","/etc/apache2/mods-enabled/*"])
child.attach_wait(lxc.attach_run_command, ["rm","-rf","/etc/apache2/sites-available/*"])
child.attach_wait(lxc.attach_run_command, ["rm","-rf","/etc/apache2/sites-enabled/*"])
child.attach_wait(lxc.attach_run_command, ["cp","-r","/files/etc","/"])
time.sleep(0.3)
res = child.attach_wait(run_test)
if(res >= 256):
res = res / 256
if(res == 10):
child_root_fs = child.get_config_item('lxc.rootfs').split(':')[-1]
return {"stat":200,"msg":"extract info file",'data': {'file':join(child_root_fs,"root","result.txt")}}
else:
return {"stat":0,"msg":"Illegal state",'data': None}
def grade(folder,pid,file,submition):
try:
filter_submition(submition)
base = lxc.Container(LXC_BASE)
if not base.defined:
print("Base is not defined.")
return {"stat":0,"msg":"خطای سرور",'data': None}
if base.running:
print("Base container is running")
return {"stat":0,"msg":"خطای سرور",'data': None}
c_name = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(16))
child = lxc.Container(c_name)
if not child.defined:
try:
child = base.clone(c_name, bdevtype="overlayfs",
flags=lxc.LXC_CLONE_SNAPSHOT)
child.append_config_item("lxc.mount.entry", submition +" "+ MOUNTPOINT + " none bind 0 0")
#child.append_config_item("lxc.mount.entry", submition +"/etc/apache2 etc/apache2 none bind 0 0")
child.save_config()
pool = ThreadPool(processes=1)
async_result = pool.apply_async(thread_worker, (child,))
result = async_result.get(timeout=60)
child.stop()
if(result["stat"]==200):
with open(join(submition,"result.txt")) as f:
#with open(result["data"]["file"]) as f:
jsonstr =f.read()
jsonstr = json.loads(jsonstr)
result = jsonstr
if result["stat"] == 1:
import hashlib
#team_folder = hashlib.md5(("6a204bd89f3c8348afd5c77c717a097a"+team).encode('utf-8')).hexdigest()
#flag = "infosec-" + hashlib.md5((folder + "infosec-3f51f45f424a61516b5cc8b6663d919c").encode('utf-8')).hexdigest()
flag = "infosec-3f51f45f424a61516b5cc8b6663d919c"
result["data"]["flag"] = flag
#child.destroy()
return result
except Exception as inst:
import traceback
e = traceback.format_exc()
return {"stat":0,"msg":"failed due exception",'data': {'exp': inst,'stack':e}}
finally:
#print(c_name)
if child.running:#pass
child.stop()
child.destroy()
else:
return {"stat":0,"msg":"duplicate Name " + c_name,'data': None}
except Exception as inst:
return {"stat":0,"msg":"failed due exception",'data': {'exp': inst,'stack':e}}
| mabdi/ctf-pylxc | challs/Apache_Man_2/grader.py | grader.py | py | 8,833 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "socket.socket",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "ssl.wrap_so... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.