blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b68506190b7a94f34b31ed508a0169719a494f3a | 6925685704cd8ab3af76ac39aa99504c8653654e | /create_document.py | 36393d56798f037ebd0681136c1d105f247c03ff | [] | no_license | iamkitametam/createCV | 08ecaecd8013841c3acc053c800f6081df2a1ff5 | 36d949b0808f58770e06961572b7576e7c2b8700 | refs/heads/master | 2021-04-09T07:20:45.427150 | 2020-04-24T17:22:46 | 2020-04-24T17:22:46 | 248,849,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,707 | py | from docxtpl import *
import json
# def create_CV_using_my_word_template(FIO, birth_date, location, languages, salary, jobs_return, educations_return, \
# additional_educations_return):
def create_CV_using_my_word_template(person):
FIO = person["FIO"]
birth_date = person["birth_date"]
location = person["location"]
languages = person["languages"]
salary = person["salary"]
jobs_return = person["jobs"]
educations_return = person["educations"]
additional_educations_return = person["additional_educations"]
photo_file_name = person["photo_file_name"]
doc = DocxTemplate("my_word_template.docx")
context = { 'ФАМИЛИЯ_ИМЯ_ОТЧЕСТВО' : FIO.upper(),
'ДАТА_РОЖДЕНИЯ' : birth_date,
'МЕСТО_ЖИТЕЛЬСТВА' : location,
'ЗАРПЛАТНЫЕ_ОЖИДАНИЯ' : salary}
# 'ДАТА_РАБОТЫ': job["date"],
# 'НАЗВАНИЕ_КОМПАНИИ': job["name"],
# 'НАЗВАНИЕ_ДОЛЖНОСТИ': job["position"],
# 'ПУНКТ_ОПИСАНИЯ_ДОЛЖНОСТИ': Listing(
# job_description), # Кафедра теоретической физики \n• И еще какая-то кафедра \n• И еще какая-то, более длинная кафедра"),
# # 'ПУНКТ_ОПИСАНИЯ_ДОЛЖНОСТИ' : Listing("• " + job["description"][0] + "\n• " + job["description"][1]), # Кафедра теоретической физики \n• И еще какая-то кафедра \n• И еще какая-то, более длинная кафедра"),
# 'ДАТА_ОБРАЗОВАНИЯ': education["date"],
# 'НАЗВАНИЕ_ВУЗА': education["name"],
# 'НАЗВАНИЕ_ФАКУЛЬТЕТА': education["facultee"],
# 'ДАТА_ДОП_ОБРАЗОВАНИЯ': additional_education["date"],
# 'НАЗВАНИЕ_ДОП_ОБРАЗОВАНИЯ': additional_education["name"],
# 'ОПИСАНИЕ_ДОП_ОБРАЗОВАНИЯ': additional_education["description"]
# JOBS
for i in range(0,len(jobs_return)):
j = jobs_return[i]
try:
# jj = j["description"]
# job_bullets = jj.split(". ")
# job_bullets = str(j['description']).split("\n")
# job_description = "• " + job_bullets[0] + "\n"
job_description = j["description"]
# for i2 in range(1, len(job_bullets)):
# job_description = job_description + "• " + job_bullets[i2] + "\n"
except KeyError:
job_description = ""
context.update({'ДАТА_РАБОТЫ_' + str(i+1): j["date"].replace("настоящее время","н.в.")})
context.update({'НАЗВАНИЕ_КОМПАНИИ_' + str(i+1): j["name"]})
context.update({'МЕСТО_РАБОТЫ_' + str(i+1): j["location"]})
context.update({'САЙТ_РАБОТЫ_' + str(i+1): j["url"]})
context.update({'НАЗВАНИЕ_ДОЛЖНОСТИ_' + str(i+1): j["position"]})
context.update({'ПУНКТ_ОПИСАНИЯ_ДОЛЖНОСТИ_' + str(i+1): Listing(job_description)})
# 'НАЗВАНИЕ_КОМПАНИИ': job["name"],
# 'НАЗВАНИЕ_ДОЛЖНОСТИ': job["position"],
# 'ПУНКТ_ОПИСАНИЯ_ДОЛЖНОСТИ': Listing(
# job_description), # Кафедра теоретической физики \n• И еще какая-то кафедра \n• И еще какая-то, более длинная кафедра"),
# 'ПУНКТ_ОПИСАНИЯ_ДОЛЖНОСТИ' : Listing("• " + job["description"][0] + "\n• " + job["description"][1]), # Кафедра теоретической физики \n• И еще какая-то кафедра \n• И еще какая-то, более длинная кафедра"),
# EDUCATIONS ##################################################################################
for i in range(0,len(educations_return)):
e = educations_return[i]
context.update({'ДАТА_ОБРАЗОВАНИЯ_' + str(i+1): e["date"].replace("настоящее время","н.в.")})
context.update({'НАЗВАНИЕ_ВУЗА_' + str(i+1): e["name"]})
# context.update({'НАЗВАНИЕ_ФАКУЛЬТЕТА_' + str(i+1): e["facultee"]})
# context.update({'ОПИСАНИЕ_ОБРАЗОВАНИЯ_' + str(i+1): Listing(education_description)})
context.update({'ОПИСАНИЕ_ОБРАЗОВАНИЯ_' + str(i+1): educations_return[i]["description"]})
# ADDITIONAL EDUCATIONS #######################################################################
for i in range(0,len(additional_educations_return)):
additional_e = additional_educations_return[i]
context.update({'ДАТА_ДОП_ОБРАЗОВАНИЯ_' + str(i+1): additional_e["date"].replace("настоящее время","н.в.")})
context.update({'НАЗВАНИЕ_ДОП_ОБРАЗОВАНИЯ_' + str(i+1): additional_e["name"]})
context.update({'ОПИСАНИЕ_ДОП_ОБРАЗОВАНИЯ_' + str(i+1): additional_e["description"]})
# LANGUAGES
for i in range(0,len(languages)):
context.update({'ЯЗЫК_' + str(i+1): languages[i]["language"] + ":"})
context.update({'УРОВЕНЬ_ЯЗЫКА_' + str(i+1): languages[i]["level"]})
# PHOTO
try:
doc.replace_pic('default_userpic.jpg',photo_file_name)
except:
print("no photo")
# RENDER & SAVE
doc.render(context)
doc.save("docxs/" + FIO.upper() + ".docx")
| [
"belyaevalexande@mail.ru"
] | belyaevalexande@mail.ru |
c02b77245ed38a8f84525c9b47df2dacacb66e37 | bd33047bed5809de3695f13b53ccea0d7f8783f7 | /Software Project-Website with database/Serverimplemented/flask/app/functions.py | cc86d49a468d087690004cd838b6280da6e46738 | [] | no_license | Emharsh/Projects | 3449869ce30f041de2a931b6103dc0d1d5c40563 | 45d4934b9a724df8af7d45dc78f7a49e6feb3254 | refs/heads/master | 2023-01-27T23:18:06.777835 | 2020-04-10T22:52:56 | 2020-04-10T22:52:56 | 92,080,105 | 1 | 0 | null | 2023-01-11T01:27:34 | 2017-05-22T17:23:35 | TSQL | UTF-8 | Python | false | false | 22,624 | py | from flask import render_template, request
from app import app
from app import db, models, mail
from flask_mail import Mail, Message
import time, random, string, datetime, uuid, os
################
# BASIC OPERATIONS
# Implement on pages which require a user (type) being logged in.
def logged_in(session, accessLevel):
if session.get('user') and session.get('randomid'):
p = models.Users.query.filter_by(email=session['user'], session=session['randomid'], accountType=accessLevel).first();
if not p:
return False;
return True;
def login(request, session):
if request.method == "POST":
if request.form['action'] == "Log in":
p = models.Users.query.filter_by(email=request.form['email'], password=request.form['password']).first()
if p:
session['user'] = request.form['email'];
session['randomid'] = str(uuid.uuid4());
p.session = session['randomid'];
db.session.add(p);
db.session.commit();
return True;
return False;
################
# USER OPTIONS
def bookcourse(request, session):
if request.args.get('book') == None:
return False
p = models.Users.query.filter_by(email=session['user'], session=session['randomid']).first();
userid = p.id
p = models.course_schedule.query.filter_by(id=request.args.get('book')).first()
q = models.course_prerequisite.query.filter_by(courseId=p.courseId).all()
for r in q:
s = models.user_courses.query.filter_by(userId=userid, courseId=r.requiredCourseId).first()
if not s:
return "Not required courses"
q = models.user_booking.query.filter_by(userId=userid).all()
occupiedUserDays = []
for r in q:
s = models.course_schedule.query.filter_by(id=r.scheduleId).first()
t = models.Courses.query.filter_by(id=s.courseId).first()
combinations = '{0:07b}'.format(s.combination)
daycount = float(s.startDay)
count = 0
i = 0
while count != t.duration:
if combinations[i] == "1":
occupiedUserDays.insert(-1,daycount)
count += 1
i += 1
if len(combinations) == i:
i = 0
daycount += 1
q = models.Courses.query.filter_by(id=p.courseId).first()
runningCourseScheduleDays = []
combinations = '{0:07b}'.format(p.combination)
daycount = float(p.startDay)
count = 0
i = 0
while count != q.duration:
if combinations[i] == "1":
runningCourseScheduleDays.insert(-1,daycount)
count += 1
i += 1
if len(combinations) == i:
i = 0
daycount += 1
if not set(occupiedUserDays).isdisjoint(runningCourseScheduleDays):
return "User has other courses clashing with this one"
q = models.user_booking.query.filter_by(scheduleId=p.id).all()
r = models.Areas.query.filter_by(id=p.areaId).first()
s = models.Courses.query.filter_by(id=p.courseId).first()
if len(q) >= r.capacity or len(q) >= s.maxdelegates:
p = models.user_booking(userId=userid, scheduleId=p.id, waiting=len(q)+1, reminder=0)
db.session.add(p)
db.session.commit()
p = models.Users.query.filter_by(id=userid).first()
msg = Message("Confirmation of waiting list booking")
msg.html = p.name + ", you are in the waiting list for the course " + s.title
msg.recipients = [p.email]
mail.send(msg)
return "Bookings exceeded course or classrooms maximum. Waiting list."
p = models.user_booking(userId=userid, scheduleId=p.id, reminder=0, waiting=0)
db.session.add(p)
db.session.commit()
p = models.Users.query.filter_by(id=userid).first()
msg = Message("Confirmation of booking")
msg.html = p.name + ", congratulations, your booking for " + s.title + " has been successful."
msg.recipients = [p.email]
mail.send(msg)
return "Booked"
def deletebooking(request, session):
if request.args.get('withdraw') == None:
return False
else:
p = models.Users.query.filter_by(email=session['user'], session=session['randomid']).first();
q = models.user_booking.query.filter_by(userId=p.id, scheduleId=request.args.get('withdraw')).first()
r = models.rawcourse_schedule.query.filter_by(scheduleId=request.args.get('withdraw')).first()
msg = Message("You have unbooked the course")
msg.html = p.name + ", your booking for " + r.courseTitle + " has been cancelled."
msg.recipients = [p.email]
mail.send(msg)
db.session.delete(q)
db.session.commit()
return True
################
# ADMIN OPTIONS
def return_course_details():
p = models.Courses.query.all()
return p
def return_area_details():
p = models.Areas.query.all()
return p
def schedule_course(request):
if request.method == "POST":
if request.form['action'] == "Schedule course":
a = models.Courses.query.filter_by(id=request.form['courseId']).first()
if not a:
return
p = models.course_schedule.query.filter_by(areaId=request.form['areaId']).all()
occupiedAreaDays = []
for q in p:
combinations = '{0:07b}'.format(q.combination)
daycount = float(q.startDay)
count = 0
i = 0
while count != a.duration:
if combinations[i] == "1":
occupiedAreaDays.insert(-1,daycount)
count += 1
i += 1
if len(combinations) == i:
i = 0
daycount += 1
p = models.course_schedule.query.filter_by(trainerId=request.form['trainerId']).all()
occupiedTrainerDays = []
for q in p:
combinations = '{0:07b}'.format(q.combination)
daycount = float(q.startDay)
count = 0
i = 0
while count != a.duration:
if combinations[i] == "1":
occupiedTrainerDays.insert(-1,daycount)
count += 1
i += 1
if len(combinations) == i:
i = 0
daycount += 1
inputcombination = 0
unixdays = (time.mktime(datetime.datetime.strptime(request.form['date'], "%Y-%m-%d").timetuple()) + 3600) / 86400
startDay = round(((unixdays / 7) - int(unixdays / 7)) * 7, 1) #Day of the week. The db startday is the unix day
for day in request.form.getlist('days'):
# The start day is the most powerful, the highets binary number
# so that when calculating the combination, it is the first check
# The days start from thursday (1/1/1970)
if startDay == 0:
if day == "Thursday":
inputcombination += 2*2*2*2*2*2
if day == "Friday":
inputcombination += 2*2*2*2*2
if day == "Saturday":
inputcombination += 2*2*2*2
if day == "Sunday":
inputcombination += 2*2*2
if day == "Monday":
inputcombination += 2*2
if day == "Tuesday":
inputcombination += 2
if day == "Wednesday":
inputcombination += 1
if startDay == 1:
if day == "Friday":
inputcombination += 2*2*2*2*2*2
if day == "Saturday":
inputcombination += 2*2*2*2*2
if day == "Sunday":
inputcombination += 2*2*2*2
if day == "Monday":
inputcombination += 2*2*2
if day == "Tuesday":
inputcombination += 2*2
if day == "Wednesday":
inputcombination += 2
if day == "Thursday":
inputcombination += 1
if startDay == 2:
if day == "Saturday":
inputcombination += 2*2*2*2*2*2
if day == "Sunday":
inputcombination += 2*2*2*2*2
if day == "Monday":
inputcombination += 2*2*2*2
if day == "Tuesday":
inputcombination += 2*2*2
if day == "Wednesday":
inputcombination += 2*2
if day == "Thursday":
inputcombination += 2
if day == "Friday":
inputcombination += 1
if startDay == 3:
if day == "Sunday":
inputcombination += 2*2*2*2*2*2
if day == "Monday":
inputcombination += 2*2*2*2*2
if day == "Tuesday":
inputcombination += 2*2*2*2
if day == "Wednesday":
inputcombination += 2*2*2
if day == "Thursday":
inputcombination += 2*2
if day == "Friday":
inputcombination += 2
if day == "Saturday":
inputcombination += 1
if startDay == 4:
if day == "Monday":
inputcombination += 2*2*2*2*2*2
if day == "Tuesday":
inputcombination += 2*2*2*2*2
if day == "Wednesday":
inputcombination += 2*2*2*2
if day == "Thursday":
inputcombination += 2*2*2
if day == "Friday":
inputcombination += 2*2
if day == "Saturday":
inputcombination += 2
if day == "Sunday":
inputcombination += 1
if startDay == 5:
if day == "Tuesday":
inputcombination += 2*2*2*2*2*2
if day == "Wednesday":
inputcombination += 2*2*2*2*2
if day == "Thursday":
inputcombination += 2*2*2*2
if day == "Friday":
inputcombination += 2*2*2
if day == "Saturday":
inputcombination += 2*2
if day == "Sunday":
inputcombination += 2
if day == "Monday":
inputcombination += 1
if startDay == 6:
if day == "Wednesday":
inputcombination += 2*2*2*2*2*2
if day == "Thursday":
inputcombination += 2*2*2*2*2
if day == "Friday":
inputcombination += 2*2*2*2
if day == "Saturday":
inputcombination += 2*2*2
if day == "Sunday":
inputcombination += 2*2
if day == "Monday":
inputcombination += 2
if day == "Tuesday":
inputcombination += 1
# print(inputcombination)
# print('{0:07b}'.format(inputcombination))
newScheduleDays = []
combinations = '{0:07b}'.format(inputcombination)
daycount = unixdays
count = 0
i = 0
while count != a.duration:
if combinations[i] == "1":
newScheduleDays.insert(-1,daycount)
count += 1
i += 1
if len(combinations) == i:
i = 0
daycount += 1
# print(occupiedTrainerDays)
# print(occupiedAreaDays)
# print(newScheduleDays)
if set(newScheduleDays).isdisjoint(occupiedTrainerDays) and set(newScheduleDays).isdisjoint(occupiedAreaDays):
b = models.course_schedule(courseId=request.form['courseId'], areaId=request.form['areaId'], trainerId=request.form['trainerId'], startDay=unixdays, endDay=newScheduleDays[-2], combination=inputcombination)
db.session.add(b)
p = models.course_schedule.query.filter_by(courseId=request.form['courseId'], areaId=request.form['areaId'], trainerId=request.form['trainerId'], startDay=unixdays, endDay=newScheduleDays[-2], combination=inputcombination).first()
q = models.Areas.query.filter_by(id=request.form['areaId']).first()
r = models.Trainers.query.filter_by(id=request.form['trainerId']).first()
arInfo = q.city + " - " + q.areaType
rawCombination = ""
for i in request.form.getlist('days'):
rawCombination = rawCombination + " " + i
# print(rawCombination)
endingday = datetime.datetime.fromtimestamp(int(newScheduleDays[-2]) * 86400).strftime('%Y-%m-%d')
s = models.rawcourse_schedule(scheduleId=p.id, courseTitle=a.title, areaInfo=arInfo, trainerName=r.name, startDay=request.form['date'], combination=rawCombination, endDay=endingday)
db.session.add(s)
db.session.commit()
return
def delete_schedule(request):
if request.args.get('delschedule') == None:
return False
else:
p = models.course_schedule.query.filter_by(id=request.args.get('delschedule')).first();
db.session.delete(p)
q = models.user_booking.query.filter_by(scheduleId=request.args.get('delschedule')).all()
for r in q:
db.session.delete(r)
r = models.rawcourse_schedule.query.filter_by(scheduleId=request.args.get('delschedule')).first()
db.session.delete(r)
db.session.commit()
return True
def return_trainer_details():
p = models.Trainers.query.all()
return p
def create_course(request):
if request.method == "POST":
if request.form['action'] == "Create course":
# print("TEsT")
p = models.Courses(title=request.form['title'], description=request.form['description'], duration=request.form['duration'], maxdelegates=request.form['maxdelegates'])
db.session.add(p);
db.session.commit()
p = models.Courses.query.filter_by(title=request.form['title'], description=request.form['description'], duration=request.form['duration'], maxdelegates=request.form['maxdelegates']).first()
for prerequisite in request.form.getlist('prerequisites'):
q = models.course_prerequisite(courseId=p.id, requiredCourseId=prerequisite)
db.session.add(q)
db.session.commit()
return
def edit_course(request):
if request.method == "POST":
if request.form['action'] == "Edit course":
p = models.Courses.query.filter_by(id=request.form['id']).first()
p.title = request.form['title']
p.description = request.form['description']
p.duration = request.form['duration']
p.maxdelegates = request.form['maxdelegates']
db.session.add(p);
db.session.commit();
return
def delete_course(request):
if not request.args.get('delcourse') == None:
p = models.Courses.query.filter_by(id=request.args.get('delcourse')).first()
q = models.course_prerequisite.query.filter_by(courseId=request.args.get('delcourse')).all()
for r in q:
db.session.delete(r)
db.session.delete(p);
db.session.commit();
return True
# if request.method == "POST":
# if request.form['action'] == "Delete course":
# p = models.Courses.query.filter_by(id=request.form['id']).first()
# db.session.delete(p);
# db.session.commit();
return
# def schedule_course(request):
# if request.method == "POST":
# if request.form['action'] == "Schedule course":
# p = models.course_schedule(courseId=request.form['course'], areaId=request.form['area'], trainerId=request.form['trainer'], startDay=request.form['startDay'], monday=request.form['monday'], tuesday=request.form['tuesday'], wednesday=request.form['wednesday'], thursday=request.form['thursday'], friday=request.form['friday'], saturday=request.form['saturday'], sunday=request.form['sunday'])
# db.session.add(p);
# db.session.commit();
# return
def create_trainer(request):
if request.method == "POST":
if request.form['action'] == "Create lecturers":
p = models.Trainers(name=request.form['name'], address=request.form['address'], email=request.form['email'], phoneNumber=request.form['phone'])
db.session.add(p);
db.session.commit();
return
def edit_trainer(request):
if request.method == "POST":
if request.form['action'] == "Edit lecturers":
p = models.Trainers.query.filter_by(id=request.form['id']).first()
p.name = request.form['name']
p.address=request.form['address']
p.email=request.form['email']
p.phoneNumber=request.form['phone']
db.session.add(p);
db.session.commit();
return
def delete_trainer(request):
if not request.args.get('deltrainer') == None:
p = models.Trainers.query.filter_by(id=request.args.get('deltrainer')).first()
db.session.delete(p);
db.session.commit();
return True
# if request.method == "POST":
# if request.form['action'] == "Delete lecturers":
# p = models.Trainers(id=request.form['id']).first()
# db.session.delete(p);
# db.session.commit();
return
def create_area(request):
if request.method == "POST":
if request.form['action'] == "Create training area":
imagename = ""
if not request.files['image'].filename == '':
imagename = str(int(time.time())) + ".png"
request.files['image'].save(os.path.join(app.config['UPLOAD_FOLDER'], imagename))
else:
imagename = "https://thebenclark.files.wordpress.com/2014/03/facebook-default-no-profile-pic.jpg"
p = models.Areas(city=request.form['city'], streetAddress=request.form['address'], areaType=request.form['type'], accessibility=request.form['accessibility'], capacity=request.form['capacity'], imagename=imagename, facilities=request.form['facilities'])
db.session.add(p);
db.session.commit();
return
def edit_area(request):
if request.method == "POST":
if request.form['action'] == "Edit training area":
p = models.Areas.query.filter_by(id=request.form['id']).first()
p.city=request.form['city']
p.streetAddress=request.form['address']
p.areaType=request.form['type']
p.accessibility=request.form['accessibility']
p.capacity=request.form['capacity']
db.session.add(p);
db.session.commit();
return
def delete_area(request):
if not request.args.get('delarea') == None:
p = models.Areas.query.filter_by(id=request.args.get('delarea')).first()
db.session.delete(p);
db.session.commit();
return True
# if request.method == "POST":
# if request.form['action'] == "Delete training area":
# p = models.Areas.query.filter_by(id=request.form['id']).first()
# db.session.delete(p);
# db.session.commit();
return
def create_user(request):
if request.method == "POST":
if request.form['action'] == "Create user":
password = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))
p = models.Users(name=request.form['name'], password=password, address=request.form['address'], email=request.form['email'], phoneNumber=request.form['phone'], disability=request.form['dis'], accountType=0)
msg = Message("Your FDM password")
msg.html = "Your password:<br> " + password + "<br><br> Thank you for registering with FDM."
msg.recipients = [request.form['email']]
mail.send(msg)
db.session.add(p);
db.session.commit();
return
def edit_user(request):
if request.method == "POST":
if request.form['action'] == "Edit user":
password = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))
p = models.Users(id=request.form['id'])
p.name=request.form['name']
p.password=request.form['password']
p.address=request.form['address']
p.email=request.form['email']
p.phoneNumber=request.form['phone']
db.session.add(p);
db.session.commit();
return
def delete_user(request):
if request.method == "POST":
if request.form['action'] == "Delete user":
password = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))
p = models.Users(id=request.form['id'])
db.session.add(p);
db.session.commit();
return
################
# GENERAL OPTIONS
def apply_for_instructor(request):
if 'action' in request.form and request.form == "POST":
msg = Message("New instructor request")
msg.html = request.form['name'] + " has applied for instructor from the website: <br><br>" + request.form['comments'] + "<br><br> Phone number: " + request.form['phone']
msg.recipients = ["testnetwork49@gmail.com"]
mail.send(msg)
return
def contact_process(request):
if request.form == "POST":
msg = Message("Contact from " + request.form['name'] + ": " + request.form['subject'])
msg.html = "Return email: " + request.form['email'] + "<br><br> Message: " + request.form['message']
msg.recipients = ["testnetwork49@gmail.com"]
mail.send(msg)
return
| [
"harshit.verma777@gmail.com"
] | harshit.verma777@gmail.com |
c6ddac9e303b762b38d565c374ec231de78f1052 | aac63f0f178945e8109f74ebb9bbb59165185172 | /news/urls.py | e0d7f3b27f0854cb4fa0912eb93b73f36dddd8c4 | [] | no_license | okumujustine/hacker-news-clone | 587f7e88f53d576ee58e5dfff78f4d18e046b4db | 7f70d18325c7627237de719e04bdde9ad75a8d5d | refs/heads/main | 2023-01-02T13:41:37.825072 | 2020-11-04T14:52:41 | 2020-11-04T14:52:41 | 310,032,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py |
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views
from apps.core.views import signup
from apps.story.views import frontpage, search, submit, newest, vote, story
urlpatterns = [
path('', frontpage, name='frontpage'),
path('s/<int:story_id>/vote/', vote, name='vote'),
path('s/<int:story_id>/', story, name='story'),
path('u/', include('apps.userprofile.urls')),
path('newest/', newest, name='newest'),
path('search/', search, name='search'),
path('submit/', submit, name='submit'),
path('signup/', signup, name='signup'),
path('login/', views.LoginView.as_view(template_name='core/login.html'), name='login'),
path('logout/', views.LogoutView.as_view(), name='logout'),
path('admin/', admin.site.urls),
]
| [
"okumujustine01@gmail.com"
] | okumujustine01@gmail.com |
6421b6872e99bedd503d2be14db41c6292fdd9ca | e7902010824edf11c386b74d0cf1d4e7b5fe698a | /Downloads/hackmit2018-master/inputgenerator.py | 867160eedf26df6c3faf42a30ee81d7794f22b58 | [] | no_license | z-anderson/hackmit | e3429496ab662762980251f2cf06e09d4db94139 | 078ddc53b91fc6c199c8cf909c4b967613f3760f | refs/heads/master | 2021-07-20T01:29:27.628136 | 2019-01-19T21:33:01 | 2019-01-19T21:33:01 | 103,835,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | import random
import sys
# random ints and floats TEST TEST TEST
def gen_int():
return random.randint(-(sys.maxsize), sys.maxsize)
def gen_float():
#print(type(sys.maxsize * 1.0))
return random.randrange(-(sys.maxsize * 1.0), sys.maxsize * 1.0)
if __name__ == '__main__':
print("exec")
print(gen_float())
| [
"zoeand398@gmail.com"
] | zoeand398@gmail.com |
fc6b3d226bbf27414b9873a6166718c97218c228 | 16fcf452e6165a0de5bc540c57b6e6b82d822bb1 | /Learntek_code/4_June_18/while2.py | 7a9891325874d47ce4779e35a821980c21e374a2 | [] | no_license | mohitraj/mohitcs | e794e9ad2eb536e3b8e385fb8d222e8ade95c802 | d6399b2acf69f5667c74f69715a0b55060bf19d1 | refs/heads/master | 2021-09-09T00:21:23.099224 | 2021-09-07T16:39:07 | 2021-09-07T16:39:07 | 87,798,669 | 5 | 6 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | import getpass
print "Hello World "
print "Please enter the password\t"
pass1 = getpass.getpass()
flag1 =0
num =0
while True:
if pass1=="India":
print "Welcome in India"
break
else :
print "Wrong password type again"
num = num+1
print num
if num==3:
break
print "Please enter the password again\t"
pass1 = getpass.getpass() | [
"mohitraj.cs@gmail.com"
] | mohitraj.cs@gmail.com |
525379ed03b39dc09421131f1b21c85a278b744d | ab1f25e6266a71ea23f1d3e04ec8635ae550d1df | /HW6/Task-1/temp_HW6/person.py | 9dc7cb45a9f296a612d9c858867a544884bb3914 | [] | no_license | Pavlenkovv/e-commerce | 5143d897cf779007181a7a7b85a41acf3dfc02c4 | 0d04d7dfe3353716db4d9c2ac55b0c9ba54daf47 | refs/heads/master | 2023-01-25T03:13:41.238258 | 2020-12-06T22:16:53 | 2020-12-06T22:16:53 | 313,103,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | class Person:
"""Any Person"""
def __init__(self, surname=None, name=None, age=None, *args, **kwargs):
self.surname = surname
self.name = name
self.age = age
def __str__(self):
return f'Surname: {self.surname}, name: {self.name}, age: {self.age}'
| [
"pavlenko.vyacheslav@gmail.com"
] | pavlenko.vyacheslav@gmail.com |
8218503f435cbe00db8d250591c65cb172a05f75 | 0f522f38bf86d3b4f2545b148c7e40efd01518bc | /twitterTools/get_user_stream.py | dafdac463901afe35c1e223aca69183cb133834b | [] | no_license | guiem/TwitterAnalytics | 20dccaef403ef09a315cd248307ec12e22054bbd | b4b4774b2b39e7ea6528ac5ecb5fd3ad032f2636 | refs/heads/master | 2021-01-18T13:59:25.295512 | 2015-05-13T15:29:24 | 2015-05-13T15:29:24 | 21,535,991 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | from twitter import *
from settings import *
auth=OAuth(ACCESS_TOKEN, ACCESS_TOKEN_SECRET,CONSUMER_KEY, CONSUMER_SECRET)
twitter_userstream = TwitterStream(auth=auth,follow=['guiemb'],domain='userstream.twitter.com')
for msg in twitter_userstream.user():
if 'direct_message' in msg:
print msg['direct_message']['text']
| [
"g@guiem.info"
] | g@guiem.info |
9110c17c536a75f79e78eb9fd5b7ac57cb8aeddc | 070990498e06678d1e42f5297757a8e861772894 | /Sites/Nigeria/Kara.py | 6570edbf0958db1072a26ea5332cc62863bf817e | [] | no_license | sysall/WebScrapping | 202218bdcf764e484e633c5ff3b41d16541af16d | 26abdb0507e9063b0feb15655d1b2d31f815f20a | refs/heads/master | 2020-06-18T17:24:55.620879 | 2019-07-11T11:40:15 | 2019-07-11T11:40:15 | 196,380,889 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,032 | py | from bs4 import BeautifulSoup
import requests
def categoryKara():
site = 'http://www.kara.com.ng/'
page_response = requests.get(site, headers={'User-Agent': 'Mozilla/5.0'})
page_content = BeautifulSoup(page_response.content, "html.parser")
category = page_content.find('ul', {"id": "navigationpro-top"}).findAll("li", {"class": "level0"})
categories_urls = []
for item in category:
urlCategory = item.find('a').get("href")
categories_urls.append(
urlCategory
)
return categories_urls
#print(categoryKara())
def getAllPage():
subUrl = categoryKara()
page = []
for url in subUrl:
page_response = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})
page_content = BeautifulSoup(page_response.content, "html.parser")
try:
maxPage = int(page_content.find('div',{"class":"pager"}).findAll('li')[-2].text) + 1
id = list(range(maxPage))
del id[0]
for el in id:
link = url + "?p=" + str(el)
page.append(
link
)
except:
link1 = url
page.append(
link1
)
return page
#print(getAllPage())
def scrapKara(origin):
site = 'http://www.kara.com.ng/'
page = getAllPage()
produits = []
for url in page:
page_response = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})
page_content = BeautifulSoup(page_response.content, "html.parser")
logo = "http://137.74.199.121/img/logo/ng/kara.jpg"
logoS= "http://137.74.199.121/img/logo/ng/logoS/kara.jpg"
annonce = page_content.find("div", {"class": "category-products"}).findAll('li', {"class": "item"})
for item in annonce:
try:
url = item.find('h2', {"class": "product-name"}).find('a').get("href")
lib = item.find('h2', {"class": "product-name"}).find('a').text
img = item.findAll("img")[0].get("src")
try:
prix = int(
item.find("span", {"class": "price"}).text.replace(u'.00', '').replace(u',', '').replace(u'₦', ''))
except:
prix=0
produits.append(
{
'libProduct': lib,
'slug': '',
'descProduct': '',
'priceProduct': prix,
'imgProduct': img,
'numSeller': '',
'src': site,
'urlProduct': url,
'logo': logo,
'logoS':logoS,
'origin': origin,
'country':'ng'
}
)
except:
continue
return produits
produits = scrapKara(origin=0)
url = 'http://api.comparez.co/ads/insert-product/'
for item in produits:
response = requests.post(url, data=item)
# api response
print(response.json())
| [
"50021226+sysall@users.noreply.github.com"
] | 50021226+sysall@users.noreply.github.com |
1dee9eaec67b0c0952431a177322b33833f669d8 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/GCNet/dependency/mmdet/models/detectors/point_rend.py | e9d1d4b639d2027b566b58ab2b44017d39b48e54 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 1,366 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class PointRend(TwoStageDetector):
"""PointRend: Image Segmentation as Rendering
This detector is the implementation of
`PointRend <https://arxiv.org/abs/1912.08193>`_.
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None):
super(PointRend, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
30acb4f59b587dcedb03497789085c67b3cd741d | 8c1bf43b527314b35dfda149bb404864642b07bc | /src/apps/accounts/models.py | 57ad302330fcaafb7e4bd0c3793f01c206db5f44 | [
"Apache-2.0"
] | permissive | aminabromand/ecommerce | f36bbe6fb0410fb48e612adbb1937fc336d08abe | d807a49a788820fc7561b9d4696d06cf88560b91 | refs/heads/master | 2022-12-09T20:43:14.208733 | 2018-10-06T09:39:32 | 2018-10-06T09:39:32 | 135,045,866 | 0 | 0 | Apache-2.0 | 2022-12-08T02:21:56 | 2018-05-27T12:50:05 | Python | UTF-8 | Python | false | false | 6,596 | py | from datetime import timedelta
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.db.models.signals import pre_save, post_save
from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager
)
from django.core.mail import send_mail
from django.template.loader import get_template
from django.utils import timezone
from ecommerce.utils import random_string_generator, unique_key_generator
# send_mail(subject, message, from_email, recipient_list, html_message)
DEFAULT_ACTIVATION_DAYS = getattr(settings, 'DEFAULT_ACTIVATION_DAYS', 7)
# Create your models here.
class UserManager(BaseUserManager):
def create_user(self, email, full_name=None, password=None, is_active=True, is_staff=False, is_admin=False):
if not email:
raise ValueError("Users must have an email address")
if not password:
raise ValueError("Users must have a password address")
# if not full_name:
# raise ValueError("Users must have a fullname")
user_obj = self.model(
email = self.normalize_email(email),
full_name = full_name,
)
user_obj.set_password(password) # change user password as well
user_obj.staff = is_staff
user_obj.admin = is_admin
user_obj.is_active = is_active
user_obj.save(using=self._db)
return user_obj
def create_staffuser(self, email, full_name=None, password=None):
user = self.create_user(
email,
full_name=full_name,
password=password,
is_staff=True,
)
return user
def create_superuser(self, email, full_name=None, password=None):
user = self.create_user(
email,
full_name=full_name,
password=password,
is_staff=True,
is_admin=True,
)
return user
class User(AbstractBaseUser):
# username = models.CharField()
email = models.EmailField(max_length=255, unique=True)
full_name = models.CharField(max_length=255, blank=True, null=True)
is_active = models.BooleanField(default=True)
staff = models.BooleanField(default=False)
admin = models.BooleanField(default=False)
timestamp = models.DateTimeField(auto_now_add=True)
# confirm = models.BooleanField(default=False)
# confirmed_date = models.DateTimeField()
USERNAME_FIELD = 'email' # could be username if we wanted to
REQUIRED_FIELDS = [] # ['full_name'] # USERNAME_FIELD and password are required by default
objects = UserManager()
def __str__(self):
return self.email
def get_full_name(self):
if self.full_name:
return self.full_name
return self.email
def get_short_name(self):
return self.email
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_lable):
return True
@property
def is_staff(self):
if self.is_admin:
return True
return self.staff
@property
def is_admin(self):
return self.admin
class EmailActivationQuerySet(models.query.QuerySet): # EmailActiation.objects.all().confirmable()
def confirmable(self):
# DEFAULT_ACTIVATION_DAYS
now = timezone.now()
start_range = now - timedelta(days=DEFAULT_ACTIVATION_DAYS)
# does my object have a timestamp here
end_range = now
return self.filter(
activated = False,
forced_expired = False
).filter(
timestamp__gt = start_range,
timestamp__lte = end_range
)
class EmailActivationManager(models.Manager):
def get_queryset(self):
return EmailActivationQuerySet(self.model, using=self._db)
def confirmable(self):
return self.get_queryset().confirmable()
def email_exists(self, email):
return self.get_queryset().filter(Q(email=email) | Q(user__email=email)).filter(activated=False)
class EmailActivation(models.Model):
user = models.ForeignKey(User)
email = models.EmailField()
key = models.CharField(max_length=120, blank=True, null=True)
activated = models.BooleanField(default=False)
forced_expired = models.BooleanField(default=False)
expires = models.IntegerField(default=7) # 7 Days
timestamp = models.DateTimeField(auto_now_add=True)
update = models.DateTimeField(auto_now=True)
objects = EmailActivationManager()
def __str__(self):
return self.email
def can_activate(self):
qs = EmailActivation.objects.filter(pk=self.pk).confirmable()
if qs.exists():
return True
return False
def activate(self):
if self.can_activate():
# pre activation user signal
user = self.user
user.is_active = True
user.save()
# post signal for user just activated
self.activated = True
self.save()
return True
return False
def regenerate(self):
self.key = None
self.save()
if self.key is not None:
return True
return False
def send_activation(self):
if not self.activated and not self.forced_expired:
if self.key:
base = getattr(settings, 'BASE_URL', 'https://127.0.0.1:8000')
key_path = reverse('account:email-activate', kwargs={'key': self.key})
path = '{base}{path}'.format(base=base, path=key_path)
context = {
'path': path,
'email': self.email,
}
key = random_string_generator(size=45)
txt_ = get_template('registration/emails/verify.txt').render(context)
html_ = get_template('registration/emails/verify.html').render(context)
subject = '1-Click Email Verification'
from_email = settings.DEFAULT_FROM_EMAIL
recipient_list = [self.email]
sent_mail = None
print("account.models: sending email...")
try:
sent_mail = send_mail(
subject,
txt_,
from_email,
recipient_list,
html_message=html_,
fail_silently=False
)
except Exception as e:
print("account.models: exception!")
print(e)
print("account.models: email sent")
return sent_mail
return False
def pre_save_email_activation(sender, instance, *args, **kwargs):
if not instance.activated and not instance.forced_expired:
if not instance.key:
instance.key = unique_key_generator(instance)
pre_save.connect(pre_save_email_activation, sender=EmailActivation)
def post_save_user_create_receiver(sender, instance, created, *args, **kwargs):
if created:
obj = EmailActivation.objects.create(user=instance, email=instance.email)
obj.send_activation()
post_save.connect(post_save_user_create_receiver, sender=User)
class Profile(models.Model):
user = models.OneToOneField(User)
# extra fields
class GuestEmail(models.Model):
email = models.EmailField()
active = models.BooleanField(default=True)
update = models.DateTimeField(auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.email
| [
"amin.r.abromand@gmail.com"
] | amin.r.abromand@gmail.com |
7d30d7f5aee1e2173cc4c0a715e6cbcbba6682b6 | f8cda1fb9f5461893ae08c08a6683c4a48f1281c | /paramz/optimization/stochastics.py | 5041c57cff24bb88d1a08b561c0933c22acb951a | [
"BSD-3-Clause"
] | permissive | beckdaniel/paramz | 6053b23b93366ab5baa4e23a2889a3f34cb941b5 | 6f518aadeb2778040f192950458b72a89dd1933f | refs/heads/master | 2021-01-18T08:50:21.923754 | 2016-01-22T10:50:23 | 2016-01-22T10:50:23 | 51,257,243 | 0 | 0 | null | 2016-02-07T17:35:42 | 2016-02-07T17:35:42 | null | UTF-8 | Python | false | false | 4,927 | py | #===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of paramax nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
class StochasticStorage(object):
'''
This is a container for holding the stochastic parameters,
such as subset indices or step length and so on.
self.d has to be a list of lists:
[dimension indices, nan indices for those dimensions]
so that the minibatches can be used as efficiently as possible.10
'''
def __init__(self, model):
"""
Initialize this stochastic container using the given model
"""
def do_stochastics(self):
"""
Update the internal state to the next batch of the stochastic
descent algorithm.
"""
pass
def reset(self):
"""
Reset the state of this stochastics generator.
"""
class SparseGPMissing(StochasticStorage):
def __init__(self, model, batchsize=1):
"""
Here we want to loop over all dimensions everytime.
Thus, we can just make sure the loop goes over self.d every
time. We will try to get batches which look the same together
which speeds up calculations significantly.
"""
import numpy as np
self.Y = model.Y_normalized
bdict = {}
#For N > 1000 array2string default crops
opt = np.get_printoptions()
np.set_printoptions(threshold=np.inf)
for d in range(self.Y.shape[1]):
inan = np.isnan(self.Y)[:, d]
arr_str = np.array2string(inan, np.inf, 0, True, '', formatter={'bool':lambda x: '1' if x else '0'})
try:
bdict[arr_str][0].append(d)
except:
bdict[arr_str] = [[d], ~inan]
np.set_printoptions(**opt)
self.d = bdict.values()
class SparseGPStochastics(StochasticStorage):
"""
For the sparse gp we need to store the dimension we are in,
and the indices corresponding to those
"""
def __init__(self, model, batchsize=1, missing_data=True):
self.batchsize = batchsize
self.output_dim = model.Y.shape[1]
self.Y = model.Y_normalized
self.missing_data = missing_data
self.reset()
self.do_stochastics()
def do_stochastics(self):
import numpy as np
if self.batchsize == 1:
self.current_dim = (self.current_dim+1)%self.output_dim
self.d = [[[self.current_dim], np.isnan(self.Y[:, self.current_dim]) if self.missing_data else None]]
else:
self.d = np.random.choice(self.output_dim, size=self.batchsize, replace=False)
bdict = {}
if self.missing_data:
opt = np.get_printoptions()
np.set_printoptions(threshold=np.inf)
for d in self.d:
inan = np.isnan(self.Y[:, d])
arr_str = np.array2string(inan,np.inf, 0,True, '',formatter={'bool':lambda x: '1' if x else '0'})
try:
bdict[arr_str][0].append(d)
except:
bdict[arr_str] = [[d], ~inan]
np.set_printoptions(**opt)
self.d = bdict.values()
else:
self.d = [[self.d, None]]
def reset(self):
self.current_dim = -1
self.d = None
| [
"ibinbei@gmail.com"
] | ibinbei@gmail.com |
cbdb0d10a26b61e4a27b69ded1432b532a93316a | bc404fa8507698d53bb15c4a77d15cec0e59f766 | /runner.py | ab851aab133a95586485b4b113ab510b3f470969 | [] | no_license | avioj/web_parser | 8cdcd58d0f64936b76cc21e5711a9ea5a3327b19 | 4b4e4456dfff18b0f72c749ee6e5d92704b06c50 | refs/heads/master | 2020-05-27T15:45:05.091910 | 2019-05-26T20:26:11 | 2019-05-26T20:26:11 | 188,685,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.webdriver import WebDriver
from helpers import get_info_by_elements
from pages import StartPage, RECOMMENDED, RECOMMENDED, DATE, NAME, PRICE_MIN, PRICE_MAX, BRAND
def app(min_price, max_price, sorting_type, search_string):
chrome_options = Options()
chrome_options.add_argument("--disable-notifications")
chrome_options.add_argument("--start-maximized")
browser = WebDriver(chrome_options=chrome_options)
browser.get("https://iledebeaute.ru/")
start = StartPage(browser)
products_page = start.search_by_name(search_string)
products_page.pick_sorting(sorting_type)
products_page.pick_price(min_price, max_price)
products_list = products_page.get_all_products()
print(get_info_by_elements(products_list))
browser.quit()
| [
"Vladimir.Tsyuman@acronis.com"
] | Vladimir.Tsyuman@acronis.com |
1f67fe7255fb1282c3fcc2652a59677474c9bda8 | 784936ad8234b5c3c20311ce499551ee02a08879 | /lab4/patterns/pattern04.py | 3fcf0f3989546c699ae05960faf3d52c1bb8cec2 | [] | no_license | jonlin97/CPE101 | 100ba6e5030364d4045f37e317aa05fd6a06cb08 | 985d64497a9861f59ab7473322b9089bfa57fd10 | refs/heads/master | 2021-06-16T01:31:31.025153 | 2017-02-28T19:29:11 | 2017-02-28T19:29:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | import driver
def letter(row, col):
if row in [2,3,4] and col in [3,4,5,6]:
return 'M'
else:
return 'S'
if __name__ == '__main__':
driver.comparePatterns(letter)
| [
"eitan.simler@gmail.com"
] | eitan.simler@gmail.com |
784e9c85e32828f97de016e7afd5cdc013864d03 | eb00dd00f692368b2287c6dab561bd5829603e34 | /autocomplete/app.py | 6e84c9bab580186e521e32f4eaab55d9ff2a74c6 | [] | no_license | islammohamed/elastic-geo-autocomplete-python | 96ff9e2cd1d8052161cfd06ddf0d5935de0bfceb | 1a0e4274a8b98da35fd51ccd5e87ea296f3f5a93 | refs/heads/master | 2021-08-22T23:27:09.660054 | 2017-12-01T17:05:11 | 2017-12-01T17:05:11 | 112,765,200 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | from wsgiref import simple_server
import falcon
from elasticsearch import Elasticsearch
from resources import CityAutoCompleteResource
api = application = falcon.API()
api.add_route('/autocomplete', CityAutoCompleteResource(Elasticsearch()))
if __name__ == '__main__':
httpd = simple_server.make_server('127.0.0.1', 8000, api)
httpd.serve_forever() | [
"iabdelaziz@me.com"
] | iabdelaziz@me.com |
6a719acd4dffddb0f7e4bfdb24bdd51f2e28e5b9 | bb6e74879df228310c19eb0135f304852cd1762c | /250 LeftAndRightHandedDiv2.py | 15c03489466b7d1808234c1ac467cec24e4a5281 | [] | no_license | lidiamcfreitas/TC.Problems | c27772bf8eb83dcba9fffac02792abe7ac4b3f8e | 883f6123fa9776d2311f6d141cf423cc8112cd23 | refs/heads/master | 2020-06-01T00:43:57.537503 | 2017-09-07T22:37:38 | 2017-09-07T22:37:38 | 18,874,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py |
import string
class LeftAndRightHandedDiv2:
def count(self, S):
return S.count('RL')
#test = LeftAndRightHandedDiv2()
#S = input("insert a row (x to close): ")
#print(test.count(S))
#while S!='x':
#S = input("insert a row (x to close): ")
#print(test.count(S)) | [
"lidiamcfreitas@gmail.com"
] | lidiamcfreitas@gmail.com |
265ae5eb5a7eb8f9efc75d1bef07d8e1d7eedfa0 | caea498739d1939c9023bddddbf5bd03216a5c8b | /017 电话号码的组合.py | 455a955ca4624deb3bebb74ef98d49b89630f991 | [] | no_license | Gavinee/Leetcode | 6ef7ccfd52a1bbeb740d144c8ad5026b4a91cf93 | 28bee990099a4c82451217df3f6aee0dc08908c2 | refs/heads/master | 2020-03-25T13:29:46.433075 | 2018-10-20T15:15:40 | 2018-10-20T15:15:40 | 143,828,738 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,386 | py | """
给定一个仅包含数字 2-9 的字符串,返回所有它能表示的字母组合。
给出数字到字母的映射如下(与电话按键相同)。注意 1 不对应任何字母。
'2':'abc'
'3':'def'
'4':'ghi'
'5':'jkl'
'6':'mno'
'7':'pqrs'
'8':'tuv'
'9':'wxyz'
"""
__author__ = 'Qiufeng'
class Solution:
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
i = 0
strr = ""
list1 = []
if digits =="":
return []
self.Combination(i,digits,strr,list1)
return list1
def Combination(self,i,digits,strr,list1):
if i==len(digits):
list1.append(strr)
return
str1 = ""
temp = []
if digits[i]=='2':
str1 = 'abc'
elif digits[i]=='3':
str1 = 'def'
elif digits[i]=='4':
str1 = 'ghi'
elif digits[i]=='5':
str1 = 'jkl'
elif digits[i]=='6':
str1 = 'mno'
elif digits[i]=='7':
str1 = 'pqrs'
elif digits[i]=='8':
str1 = 'tuv'
elif digits[i]=='9':
str1 = 'wxyz'
for j in range(0,len(str1),1):
tt = strr
tt+=str1[j]
self.Combination(i+1,digits,tt,list1)
| [
"noreply@github.com"
] | Gavinee.noreply@github.com |
711f6841e89c8344abde885ea1e587c29ed47b91 | ac0beeece749860e243b13d272ebf55f791f6d11 | /cgparsermx.py | 9747b7babd70b5a658b016b35e865add19e5182f | [] | no_license | dev-gektor/xdebugtoolkit | 341c9247a21f01347eda64f8f44cdae32b9b877b | 4766fc7ccf01209f22194345b93dcddbe175d9e9 | refs/heads/master | 2023-03-08T12:29:55.273688 | 2023-02-28T13:00:26 | 2023-02-28T13:00:26 | 88,850,898 | 0 | 0 | null | 2017-04-20T10:02:22 | 2017-04-20T10:02:21 | null | UTF-8 | Python | false | false | 4,385 | py | from mx.TextTools import *
from cgparser import *
class Context:
def __init__(self):
self.entries = []
self._last_entry = None
self._last_raw_call = None
self._fl_cache = {}
self._fn_cache = {}
def set_version(self, taglist, text, l, r, subtags):
self.version = text[l:r]
def set_fl(self, taglist, text, l, r, subtags):
self._last_entry = RawEntry()
self.entries.append(self._last_entry)
fl = text[l:r]
try:
self._last_entry.fl = self._fl_cache[fl]
except KeyError:
self._last_entry.fl = self._fl_cache[fl] = FileName(fl)
def set_fn(self, taglist, text, l, r, subtags):
fn = text[l:r]
try:
self._last_entry.fn = self._fn_cache[fn]
except KeyError:
self._last_entry.fn = self._fn_cache[fn] = FunctionName(fn)
def set_summary(self, taglist, text, l, r, subtags):
pass
def set_position(self, taglist, text, l, r, subtags):
self._last_entry.position = int(text[l:r])
def set_time(self, taglist, text, l, r, subtags):
self._last_entry.self_time = int(text[l:r])
def set_subcall_cfn(self, taglist, text, l, r, subtags):
self._last_raw_call = RawCall()
self._last_entry.add_subcall(self._last_raw_call)
cfn = text[l:r]
try:
self._last_raw_call.cfn = self._fn_cache[cfn]
except KeyError:
self._last_raw_call.cfn = self._fn_cache[cfn] = FunctionName(cfn)
def set_subcall_position(self, taglist, text, l, r, subtags):
self._last_raw_call.position = int(text[l:r])
def set_subcall_time(self, taglist, text, l, r, subtags):
self._last_raw_call.inclusive_time = int(text[l:r])
contextobj = Context()
header_table = (
# version
(None, Word, 'version: ', MatchFail),
(contextobj.set_version, AllNotIn+CallTag, newline, MatchFail),
(None, AllIn, newline, MatchFail),
# cmd
(None, Word, 'cmd: ', MatchFail),
('cmd', AllNotIn, newline, MatchFail),
(None, AllIn, newline, MatchFail),
# part
(None, Word, 'part: ', MatchFail),
('part', AllNotIn, newline, MatchFail),
(None, AllIn, newline, MatchFail),
# events
(None, Word, 'events: ', MatchFail),
('events', AllNotIn, newline, MatchFail),
(None, AllIn, newline, MatchFail),
)
subcall_table = (
# cfn
(None, Word, 'cfn=', MatchFail),
(contextobj.set_subcall_cfn, AllNotIn + CallTag, newline, MatchFail),
(None, AllIn, newline, MatchFail),
# calls
(None, Word, 'calls=1 0 0', MatchFail),
(None, AllIn, newline, MatchFail),
# position
(contextobj.set_subcall_position, AllIn + CallTag, number, MatchFail),
(None, Word, ' ', MatchFail),
# time
(contextobj.set_subcall_time, AllIn + CallTag, number, MatchFail),
(None, AllIn, newline, MatchFail),
)
entry_table = (
# fl
(None, Word, 'fl=', MatchFail),
#('fl', AllNotIn, newline, MatchFail),
#('fl', AllNotIn, newline, MatchFail),
(contextobj.set_fl, AllNotIn + CallTag, newline, MatchFail),
(None, AllIn, newline, MatchFail),
# fn
(None, Word, 'fn=', MatchFail),
#('fn', AllNotIn, newline, MatchFail),
(contextobj.set_fn, AllNotIn + CallTag, newline, MatchFail),
(None, AllIn, newline, MatchFail),
# summary
(None, Word, 'summary: ', +3),
(contextobj.set_summary, AllNotIn + CallTag, newline, MatchFail),
(None, AllIn, newline, MatchFail),
# position
(contextobj.set_position, AllIn + CallTag, number, MatchFail),
(None, AllIn, ' ', MatchFail),
# time
(contextobj.set_time, AllIn + CallTag, number, MatchFail),
(None, AllIn, newline, MatchFail),
# subcalls
(None, Word + LookAhead, 'cfn=', MatchOk),
(None, Table, subcall_table, MatchFail, -1),
)
cg_table = (
# header
(None, Table, header_table, MatchFail),
# body
(None, Word + LookAhead, 'fl=', MatchOk),
(None, Table, entry_table, MatchFail, -1),
)
if __name__ == '__main__':
import sys
import time
contents = open(sys.argv[1]).read()
timer = time.time()
result, taglist, nextindex = tag(contents, cg_table, 0)
if result != 1:
raise Exception('finished with an error')
print time.time() - timer
#print_tags(text,taglist)
| [
"alexey.kupershtokh@gmail.com"
] | alexey.kupershtokh@gmail.com |
1e58630a652a291c0879b8f6a45341709683ef2f | ace9426785a56e17157bec654dfce519737301ad | /FP_analysis.py | 927a561600cc55730181e6de324ed75218d2fa91 | [] | no_license | TAGPhD/Fiber-Photometry | e09534fa0535f32c8af0b2221863428759a76876 | 4261bb4118525cd44fed39b95a3f7f4ace272d14 | refs/heads/main | 2023-02-03T16:50:49.795351 | 2020-12-18T02:34:16 | 2020-12-18T02:34:16 | 322,462,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,726 | py | # -*- coding: utf-8 -*-
"""
FP_analysis.py
Converting Matlab analysis program for Fiber Photometry into Python.
Based on the analysis described in Martianova et al. 2019.
First attempt.
"""
# importing necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import csc_matrix, eye, diags
from scipy.sparse.linalg import spsolve
from sklearn import linear_model
###############################################################################
### defining some important functions
def read_signal(path_name,file_name):
"""
Reads in the data and returns it as pandas dataframe.
"""
raw_signal = pd.read_csv(path_name+file_name)
return raw_signal
def roll_mean(signal_col):
"""
Takes in a cloumn signal (column of DataFrame) and produces the moving
window mean as another DataFrame column.
Window = 21 before, example program from Martianova used 10.
"""
col_of_means = signal_col.rolling(window=10,center=True,min_periods=1).mean()
return col_of_means
"""
The following two functions (WhittakerSmooth and airPLS) are from a public
github, see python file airPLS_Python.py (and below) for the legalese and
further notes.
airPLS.py Copyright 2014 Renato Lombardo - renato.lombardo@unipa.it
Baseline correction using adaptive iteratively reweighted penalized least squares
This program is a translation in python of the R source code of airPLS version 2.0
by Yizeng Liang and Zhang Zhimin - https://code.google.com/p/airpls
Reference:
Z.-M. Zhang, S. Chen, and Y.-Z. Liang, Baseline correction using adaptive
iteratively reweighted penalized least squares. Analyst 135 (5), 1138-1146
(2010).
Description from the original documentation:
Baseline drift always blurs or even swamps signals and deteriorates analytical
results, particularly in multivariate analysis. It is necessary to correct
baseline drift to perform further data analysis. Simple or modified polynomial
fitting has been found to be effective in some extent. However, this method
requires user intervention and prone to variability especially in low signal-
to-noise ratio environments. The proposed adaptive iteratively reweighted
Penalized Least Squares (airPLS) algorithm doesn't require any user
intervention and prior information, such as detected peaks. It iteratively
changes weights of sum squares errors (SSE) between the fitted baseline and
original signals, and the weights of SSE are obtained adaptively using between
previously fitted baseline and original signals. This baseline estimator is
general, fast and flexible in fitting baseline.
"""
def WhittakerSmooth(x,w,lambda_,differences=1):
'''
Penalized least squares algorithm for background fitting
input
x: input data (i.e. chromatogram of spectrum)
w: binary masks (value of the mask is zero if a point belongs to peaks
and one otherwise)
lambda_: parameter that can be adjusted by user. The larger lambda is,
the smoother the resulting background
differences: integer indicating the order of the difference of penalties
output
the fitted background vector
'''
X=np.matrix(x)
m=X.size
i=np.arange(0,m) # Hmm this doesn't seem to be used at all? TG
E=eye(m,format='csc')
D=E[1:]-E[:-1] # numpy.diff() does not work with sparse matrix. This is a workaround.
W=diags(w,0,shape=(m,m))
A=csc_matrix(W+(lambda_*D.T*D))
B=csc_matrix(W*X.T)
background=spsolve(A,B)
return np.array(background)
def airPLS(x, lambda_=100, porder=1, itermax=15):
'''
Adaptive iteratively reweighted penalized least squares for baseline fitting
input
x: input data (i.e. chromatogram of spectrum)
lambda_: parameter that can be adjusted by user. The larger lambda is,
the smoother the resulting background, z
porder: adaptive iteratively reweighted penalized least squares for
baseline fitting
output
the fitted background vector
'''
m=x.shape[0]
w=np.ones(m)
for i in range(1,itermax+1):
z=WhittakerSmooth(x,w,lambda_, porder)
d=x-z
dssn=np.abs(d[d<0].sum())
if(dssn<0.001*(abs(x)).sum() or i==itermax):
if(i==itermax): print('WARING max iteration reached!')
break
w[d>=0]=0 # d>0 means that this point is part of a peak, so its weight
# is set to 0 in order to ignore it
w[d<0]=np.exp(i*np.abs(d[d<0])/dssn)
w[0]=np.exp(i*(d[d<0]).max()/dssn)
w[-1]=w[0]
return z
def standardize_signal(signal_col):
"""
Standardize the signal by subtracting the median and dividing by the
standard deviation.
"""
stdz_signal = (signal_col - np.median(signal_col)) / np.std(signal_col)
#stdz_signal = (signal_col - np.mean(signal_col)) / np.std(signal_col)
return stdz_signal
def linear_reg(z_ref,z_sig):
"""
Performs linear regression on the reference signal (iso) to fit it to
a signal (RCaMP or GCaMP). Returns np array with fitted iso
Partly copied from the Jupyter Notebook by
Martianova and associates (see lab OneNote)
z_ref is raw_signal_iso['Stdz(Number,Color)']
z_sig is raw_signal_(color)cmp['Stdz(Number,Color)']
The Pandas data series need to be reshaped to work with lin.fit
(which perform a linear regression fit with the function Lasso)
"""
lin = linear_model.Lasso(alpha=0.0001,precompute=True,max_iter=1000,
positive=True, random_state=9999, selection='random')
nref = len(z_ref)
nsig = len(z_sig)
n = min(nref,nsig)
ref = np.array(z_ref[0:n].values.reshape(n,1))
sig = np.array(z_sig[0:n].values.reshape(n,1))
lin.fit(ref,sig)
z_ref_fitted = lin.predict(ref.reshape(n,1)).reshape(n,)
return z_ref_fitted
###############################################################################
### READING IN RAW SIGNAL (already de-interleaved)
# Make sure the path name is correct to retrieve the data (can copy/paste
# using windows explorer, just make sure all the slashes are /, and the path
# name ends with /). Verify the names of the files (copy/paste works well) and
# make sure the name ends with .csv.
# Note 410/415 is isosbestic, 470 is GCaMP, 560 is RCaMP
path_name = "C:/Users/HP/Desktop/Python Programs/Matlab conversion/Test Data/"
file_name_iso = "FST_C333_DatCreM2_410Raw_2020_8_13_(10.11.308)_2020-08-13T10_11_25.csv"
file_name_gcmp = "FST_C333_DatCreM2_470Raw_2020_8_13_(10.11.308)_2020-08-13T10_11_25.csv"
file_name_rcmp = "FST_C333_DatCreM2_560Raw_2020_8_13_(10.11.308)_2020-08-13T10_11_25.csv"
file_name_key = "FST_C333_DatCreM2_KeyDown_2020_8_13_(10.11.308)_2020-08-13T10_11_04.csv"
raw_signal_iso = read_signal(path_name,file_name_iso)
raw_signal_gcmp = read_signal(path_name,file_name_gcmp)
raw_signal_rcmp = read_signal(path_name,file_name_rcmp)
key_down = read_signal(path_name,file_name_key)
# Converting to relevant time (in seconds)
key_down["Timestamp"] -= raw_signal_iso["Timestamp"][0]
raw_signal_iso["Timestamp"] -= raw_signal_iso["Timestamp"][0]
raw_signal_gcmp["Timestamp"] -= raw_signal_gcmp["Timestamp"][0]
raw_signal_rcmp["Timestamp"] -= raw_signal_rcmp["Timestamp"][0]
### Step 1 - Moving window mean to smooth the signal
# This is the 'smoothed' signal, if ever refered to below. Also called the
# mean signal
raw_signal_iso['Mean0R'] = roll_mean(raw_signal_iso["Unmarked Fiber0R"])
raw_signal_iso['Mean1R'] = roll_mean(raw_signal_iso["Marked Fiber1R"])
raw_signal_iso['Mean2G'] = roll_mean(raw_signal_iso["Unmarked Fiber2G"])
raw_signal_iso['Mean3G'] = roll_mean(raw_signal_iso["Marked Fiber3G"])
raw_signal_gcmp['Mean2G'] = roll_mean(raw_signal_gcmp["Unmarked Fiber2G"])
raw_signal_gcmp['Mean3G'] = roll_mean(raw_signal_gcmp["Marked Fiber3G"])
raw_signal_rcmp['Mean0R'] = roll_mean(raw_signal_rcmp["Unmarked Fiber0R"])
raw_signal_rcmp['Mean1R'] = roll_mean(raw_signal_rcmp["Marked Fiber1R"])
# Plotting an example mean to see how things are progressing - looks good!
plt.figure()
plt.plot(raw_signal_iso["Timestamp"],raw_signal_iso["Unmarked Fiber2G"],'k',\
raw_signal_iso["Timestamp"],raw_signal_iso["Mean2G"],'b',\
raw_signal_gcmp["Timestamp"],raw_signal_gcmp["Mean2G"],'g')
plt.legend(("Raw Iso","Mean Iso","Mean GCaMP"))
plt.title("Unmarked Fiber, ROI 2G")
plt.savefig("Testing Means for 2G.pdf")
### Step 2 - is baseline correction with airPLS, from Zhang et al. 2010.
# A python version of the functions is available on gibhub, just need to
# understand how it takes in data and what it outputs!
lambda_ = 5e4 # SUPER IMPORTANT, controls flatness fo baseline.
# Current best value known: 1e9 (from MATLAB version trials)
# Martianova's exp program used lambd = 5e4
porder = 1
itermax = 50 # These values recommended by exp prog
raw_signal_iso['BLC 0R'] = airPLS(raw_signal_iso['Mean0R'],lambda_,porder,itermax)
raw_signal_iso['BLC 1R'] = airPLS(raw_signal_iso['Mean1R'],lambda_,porder,itermax)
raw_signal_iso['BLC 2G'] = airPLS(raw_signal_iso['Mean2G'],lambda_,porder,itermax)
raw_signal_iso['BLC 3G'] = airPLS(raw_signal_iso['Mean3G'],lambda_,porder,itermax)
raw_signal_gcmp['BLC 2G'] = airPLS(raw_signal_gcmp['Mean2G'],lambda_,porder,itermax)
raw_signal_gcmp['BLC 3G'] = airPLS(raw_signal_gcmp['Mean3G'],lambda_,porder,itermax)
raw_signal_rcmp['BLC 0R'] = airPLS(raw_signal_rcmp['Mean0R'],lambda_,porder,itermax)
raw_signal_rcmp['BLC 1R'] = airPLS(raw_signal_rcmp['Mean1R'],lambda_,porder,itermax)
# Plotting an example baseline correction to see how things are progressing
plt.figure()
plt.plot(raw_signal_iso["Timestamp"],raw_signal_iso["Mean0R"],'b',\
raw_signal_iso["Timestamp"],raw_signal_iso["BLC 0R"],'purple')#,\
# raw_signal_rcmp["Timestamp"],raw_signal_rcmp["Mean2G"],'r')
plt.legend(("Mean Iso","BLC Iso"))
plt.title("Unmarked Fiber, ROI 0R")
plt.savefig("Testing BLC for 0R.pdf")
# It came out REALLY flat. I think I might need some real data to test this on,
# to be sure things are coming out right. This fake data doesn't have any
# changes to it, since it wasn't connected to a mouse and the fibers were not
# manipulated during recording.
### Step 2.5 - Subtract the BLC signal from the smoothed (mean) signal
# This step was not listed in the paper, but is in the exp program. So I'm
# adding it here. It also was not included in the Matlab version of the
# program (again, because it wasn't in the paper.)
raw_signal_iso['Sig0R'] = raw_signal_iso['Mean0R'] - raw_signal_iso['BLC 0R']
raw_signal_iso['Sig1R'] = raw_signal_iso['Mean1R'] - raw_signal_iso['BLC 1R']
raw_signal_iso['Sig2G'] = raw_signal_iso['Mean2G'] - raw_signal_iso['BLC 2G']
raw_signal_iso['Sig3G'] = raw_signal_iso['Mean3G'] - raw_signal_iso['BLC 3G']
raw_signal_gcmp['Sig2G'] = raw_signal_gcmp['Mean2G'] - raw_signal_gcmp['BLC 2G']
raw_signal_gcmp['Sig3G'] = raw_signal_gcmp['Mean3G'] - raw_signal_gcmp['BLC 3G']
raw_signal_rcmp['Sig0R'] = raw_signal_rcmp['Mean0R'] - raw_signal_rcmp['BLC 0R']
raw_signal_rcmp['Sig1R'] = raw_signal_rcmp['Mean1R'] - raw_signal_rcmp['BLC 1R']
# Plot this to be sure the signal was corrected properly
plt.figure()
plt.plot(raw_signal_gcmp['Timestamp'],raw_signal_gcmp['Sig3G'],'orange')
plt.title(('Corrected Smoothed Signal, Marked 3G'))
### Step 3 - standardize the waveform.
# This appears to be the baseline corrected signal minus the median value,
# then divide by the standard deviation. I thought it was the mean we are
# supposed to subtract, but Martianova says "median(Int)", so median it is.
raw_signal_iso['Stdz0R'] = standardize_signal(raw_signal_iso['Sig0R'])
raw_signal_iso['Stdz1R'] = standardize_signal(raw_signal_iso['Sig1R'])
raw_signal_iso['Stdz2G'] = standardize_signal(raw_signal_iso['Sig2G'])
raw_signal_iso['Stdz3G'] = standardize_signal(raw_signal_iso['Sig3G'])
raw_signal_gcmp['Stdz2G'] = standardize_signal(raw_signal_gcmp['Sig2G'])
raw_signal_gcmp['Stdz3G'] = standardize_signal(raw_signal_gcmp['Sig3G'])
raw_signal_rcmp['Stdz0R'] = standardize_signal(raw_signal_rcmp['Sig0R'])
raw_signal_rcmp['Stdz1R'] = standardize_signal(raw_signal_rcmp['Sig1R'])
# Plotting an example standardized signal to see how things are progressing
plt.figure()
plt.plot(raw_signal_gcmp["Timestamp"],raw_signal_gcmp["Mean3G"],'b',\
raw_signal_gcmp["Timestamp"],raw_signal_gcmp["BLC 3G"],'purple',\
raw_signal_gcmp["Timestamp"],raw_signal_gcmp["Stdz3G"],'g')
plt.legend(("Mean Gcmp","BLC Gcmp","Stdrzd Gcmp"))
plt.title("Marked Fiber, ROI 3G")
plt.savefig("Testing Standardization for 3G.pdf")
# Still need real data, since this data doesn't seem to be producing any
# recognizable results when analyzed, making me think something may be wrong
# with the program. NEED REAL DATA!!!!
### Step 4 - apply non-negative robust linear regression.
# Basically, fit the Isobestic signal to the complimentary GCaMP (or RCaMP)
# signal. Not all signals have same length (off by 1, usually). Need to trim
# back to shortest signal
ni = len(raw_signal_iso["Timestamp"])
nr = len(raw_signal_rcmp["Timestamp"])
ng = len(raw_signal_gcmp["Timestamp"])
n = min(ni,ng,nr)
indx = list(range(n))
final_sig_GCaMP = pd.DataFrame(raw_signal_gcmp['Timestamp'][0:n])
final_sig_RCaMP = pd.DataFrame(raw_signal_rcmp['Timestamp'][0:n])
final_sig_RCaMP['FitIso0R'] = linear_reg(raw_signal_iso['Stdz0R'],raw_signal_rcmp['Stdz0R'])
final_sig_RCaMP['FitIso1R'] = linear_reg(raw_signal_iso['Stdz1R'],raw_signal_rcmp['Stdz1R'])
final_sig_GCaMP['FitIso2G'] = linear_reg(raw_signal_iso['Stdz2G'],raw_signal_gcmp['Stdz2G'])
final_sig_GCaMP['FitIso3G'] = linear_reg(raw_signal_iso['Stdz3G'],raw_signal_gcmp['Stdz3G'])
# plotting to see how the signal is changing
plt.figure()
plt.plot(raw_signal_gcmp["Timestamp"],raw_signal_gcmp["Stdz3G"],'g',\
final_sig_GCaMP["Timestamp"],final_sig_GCaMP["FitIso3G"],'purple')
plt.legend(("Stdrzd Gcmp","LR of 3G iso"))
plt.title("Marked Fiber, ROI 3G")
plt.savefig("Testing Linear Regression for 3G.pdf")
### Step 5 - bringing it all together
# z(dF/F) = Stdz_sig - FitIso_sig
final_sig_RCaMP['zdFF 0R'] = raw_signal_rcmp['Stdz0R'][0:n] - final_sig_RCaMP['FitIso0R']
final_sig_RCaMP['zdFF 1R'] = raw_signal_rcmp['Stdz1R'][0:n] - final_sig_RCaMP['FitIso1R']
final_sig_GCaMP['zdFF 2G'] = raw_signal_gcmp['Stdz2G'][0:n] - final_sig_GCaMP['FitIso2G']
final_sig_GCaMP['zdFF 3G'] = raw_signal_gcmp['Stdz3G'][0:n] - final_sig_GCaMP['FitIso3G']
### plotting final signals and saving figures
x_key = list(key_down["Timestamp"])
plt.figure()
plt.plot(final_sig_RCaMP['Timestamp'], final_sig_RCaMP['zdFF 0R'],'red')
for keyline in x_key:
plt.axvline(x=keyline,ls='--',color='black')
plt.legend(("Signal","Event"))
plt.title("Final Signal, RCaMP Unmarked Fiber")
plt.ylabel("z dF/F")
plt.xlabel("Time (sec)")
plt.savefig("Final Signal RCaMP Unmrk, M3 Cre Test.pdf")
plt.figure()
plt.plot(final_sig_RCaMP['Timestamp'], final_sig_RCaMP['zdFF 1R'],'red')
for keyline in x_key:
plt.axvline(x=keyline,ls='--',color='black')
plt.legend(("Signal","Event"))
plt.title("Final Signal, RCaMP Marked Fiber")
plt.ylabel("z dF/F")
plt.xlabel("Time (sec)")
plt.savefig("Final Signal RCaMP Mrk, M3 Cre Test.pdf")
plt.figure()
plt.plot(final_sig_GCaMP['Timestamp'], final_sig_GCaMP['zdFF 2G'],'green')
for keyline in x_key:
plt.axvline(x=keyline,ls='--',color='black')
plt.legend(("Signal","Event"))
plt.title("Final Signal, GCaMP Unmarked Fiber")
plt.ylabel("z dF/F")
plt.xlabel("Time (sec)")
plt.savefig("Final Signal GCaMP Unmrk, M3 Cre Test.pdf")
plt.figure()
plt.plot(final_sig_GCaMP['Timestamp'], final_sig_GCaMP['zdFF 3G'],'green')
for keyline in x_key:
plt.axvline(x=keyline,ls='--',color='black')
plt.legend(("Signal","Event"))
plt.title("Final Signal, GCaMP Marked Fiber")
plt.ylabel("z dF/F")
plt.xlabel("Time (sec)")
plt.savefig("Final Signal GCaMP Mrk, M3 Cre Test.pdf")
# save the final signals into csv files
final_sig_RCaMP.to_csv('RCaMP_M3 Cre Test.csv',index=False)
final_sig_GCaMP.to_csv('GCaMP_M3 Cre Test.csv',index=False)
key_down.to_csv('keydown_M3 Cre Test.csv',index=False)
# See if can test with older, good data. Will have to insert titles for columns
| [
"noreply@github.com"
] | TAGPhD.noreply@github.com |
1ca12f40b6da6c54896751b8fdc0c2ed2ce7ded5 | d2fb1de19bb55e3b03db94b4fdce396fe56a223e | /caesure/ecdsa_secp256k1.py | a24f78a2d48632ae1799cf42702c8927e03412a2 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | mikegogulski/caesure | 83a2a0a5d9b7c16339d54076bc54d351dbe0c3e4 | ccee420665e3fb4e7a005241efc6832ead4b90d8 | refs/heads/master | 2021-01-22T00:02:40.058902 | 2014-11-04T05:54:25 | 2014-11-04T05:54:25 | 26,273,215 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | # -*- Mode: Python -*-
import caesure.secp256k1
from bitcoin import dhash
class KEY:
def __init__ (self):
self.p = None
def set_pubkey (self, key):
self.p = key
def verify (self, data, sig):
return caesure.secp256k1.verify (self.p, dhash (data), sig)
| [
"gitsam@rushing.nightmare.com"
] | gitsam@rushing.nightmare.com |
18af2cc7a6b22f0c1889986b3e99f59f37426d8a | b51d363dc64e3d959f08ff96f9fb1358d76c5333 | /Code/helper/wikidump/extract_docs_from_nq.py | a99d14cec0f1822a6c4379ebf3d2caa849b56b6a | [] | no_license | bernhard2202/qa-annotation | 40cdfa3560015e696caa3ab6176cc9949a1c99ac | ec92ff1bdef23d3c36059fc24e5d1b2c1dc3d5b4 | refs/heads/master | 2023-01-09T12:51:21.272493 | 2020-11-14T11:46:43 | 2020-11-14T11:46:43 | 297,882,550 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | import json
import urllib.parse
all_docs = {}
with open('../data/v1.0-simplified_nq-dev-all.jsonl', 'r') as f:
for line in f:
dat = json.loads(line)
if dat['document_title'] not in all_docs:
tags = ['<H1>', '<H2>', '<H3>', '<Tr>', '<Td>', '<Ul>', '<Th>', '</Th>', '<Li>', '<Table>', '<P>', '<Br>', '</H1>', '</H2>', '</H3>', '</Tr>', '</Td>', '</Ul>', '</Li>', '</Table>', '</P>', '</Br>']
doc_text = ' '.join([t['token'] for t in dat['document_tokens'] if not t['html_token'] or t['token'] in tags])
if doc_text.find('<H2> References </H2>') > 0:
doc_text = doc_text[:doc_text.find('<H2> References </H2>')]
if doc_text.find('About Wikipedia') > 0:
doc_text = doc_text[:doc_text.find('About Wikipedia')]
tokens = doc_text.split(' ')
text = ' '.join([t if t not in tags else '\n' for t in tokens])
all_docs[dat['document_title']] = text
with_short = 0
with open('../data/v1.0-simplified_simplified-nq-train.jsonl', 'r') as f:
for line in f:
dat = json.loads(line)
url_info = urllib.parse.parse_qs(dat['document_url'][dat['document_url'].find('?') + 1:])
dat['document_title'] = url_info['title'][0].replace('_', ' ')
if dat['document_title'] not in all_docs:
doc_text = dat['document_text']
if doc_text.find('<H2> References </H2>') > 0:
doc_text = doc_text[:doc_text.find('<H2> References </H2>')]
if doc_text.find('About Wikipedia') > 0:
doc_text = doc_text[:doc_text.find('About Wikipedia')]
tags = ['<H1>', '<H2>', '<H3>', '<Tr>', '<Td>', '<Ul>', '<Th>', '</Th>', '<Li>', '<Table>', '<P>', '<Br>',
'</H1>', '</H2>', '</H3>', '</Tr>', '</Td>', '</Ul>', '</Li>', '</Table>', '</P>', '</Br>']
tokens = doc_text.split(' ')
text = ' '.join([t if t not in tags else '\n' for t in tokens])
all_docs[dat['document_title']] = text
# print(' '.join([t['token'] for t in dat['document_tokens'] if not t['html_token']]))
with open('../data/all_docs.json', 'w') as f:
json.dump(all_docs, f) | [
"bernhard2202@gmail.com"
] | bernhard2202@gmail.com |
d99ff535dc1910cb9019d6f11a9939d50cc55669 | acb7228022a36218846bc3f431e7a45057bb581d | /mappingpedia/migrations/0003_auto_20180214_1501.py | 9c5d9d620a594b0c3db4110b7ac1bfa980b4358a | [
"Apache-2.0"
] | permissive | oeg-upm/mappingpedia-userinterface | c6ba106f3072a4d37c1c34573e2d72882429dd1b | 1738b32f704bbf66f1ed8b78c99c71d49b208d43 | refs/heads/master | 2021-11-03T14:34:39.044575 | 2019-04-26T07:02:11 | 2019-04-26T07:02:11 | 111,107,643 | 3 | 1 | Apache-2.0 | 2018-02-28T11:55:04 | 2017-11-17T13:40:44 | HTML | UTF-8 | Python | false | false | 692 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-02-14 15:01
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mappingpedia', '0002_auto_20180214_1409'),
]
operations = [
migrations.RenameField(
model_name='executionprogress',
old_name='result_page',
new_name='result_url',
),
migrations.AlterField(
model_name='executionprogress',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2018, 2, 14, 15, 0, 54, 799127)),
),
]
| [
"ahmad88me@gmail.com"
] | ahmad88me@gmail.com |
f1da8b2e8cd2b49b4089ef7c8d1561bd7405bb9c | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/348/85739/submittedfiles/testes.py | 6041d85fcddfaab01edb49cb3b652c18ffee68af | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | b = 0
a = 100
for i in range(0,a,1):
if (a%(i+1)) !=0:
b = b + 1
print(b)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
db0bdccce561f8b237475243b79b6ca953659b28 | 372e8da7c485faf4dd4673ea0caf386b479b5243 | /Codewars/Conway's Game of Life - Unlimited Edition.py | fe5215a57ea061eb2bebdbb99da8203cc143a060 | [] | no_license | caimengyuan/daydayup | c5bd1a478dfe9c820b7a1aa03d58543b6b824706 | 7e4ade670b01b4e337c24b7984707988c74be404 | refs/heads/master | 2021-07-06T14:24:14.119627 | 2020-12-29T11:45:17 | 2020-12-29T11:45:17 | 216,771,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | '''
The rules of the game are:
Any live cell with fewer than two live neighbours dies, as if caused by underpopulation.
Any live cell with more than three live neighbours dies, as if by overcrowding.
Any live cell with two or three live neighbours lives on to the next generation.
Any dead cell with exactly three live neighbours becomes a live cell.
Each cell's neighborhood is the 8 cells immediately around it (i.e. Moore Neighborhood).
The universe is infinite in both the x and y dimensions and all cells are initially dead - except for those specified in the arguments.
The return value should be a 2d array cropped around all of the living cells. (If there are no living cells, then return [[]].)
'''
def get_neighbours(x, y):
return {(x + i, y + j) for i in range(-1, 2) for j in range(-1, 2)}
def get_generation(cells, generations):
if not cells: return cells
xm, ym, xM, yM = 0, 0, len(cells[0]) - 1, len(cells) - 1
cells = {(x, y) for y, l in enumerate(cells) for x, c in enumerate(l) if c}
for _ in range(generations):
cells = {(x, y) for x in range(xm - 1, xM + 2) for y in range(ym - 1, yM + 2)
if 2 < len(cells & get_neighbours(x, y)) < 4 + ((x, y) in cells)}
xm, ym = min(x for x, y in cells), min(y for x, y in cells)
xM, yM = max(x for x, y in cells), max(y for x, y in cells)
return [[int((x, y) in cells) for x in range(xm, xM + 1)] for y in range(ym, yM + 1)]
| [
"38752797+caimengyuan@users.noreply.github.com"
] | 38752797+caimengyuan@users.noreply.github.com |
914e6d5b204037a256d93b34c1729e48a16ba7b7 | 39a4b3ac18d4ae7fd88561f182fa683bdd76d439 | /test_attention.py | 3c6052e8ff0952162b030380292e1e3cab88b307 | [] | no_license | rnsandeep/Attention | 62a24ef22424a237fa0d31b8e7ae9d5469008922 | 5d4e32e00c0bf426dd25f1443c860c5ee04e57c1 | refs/heads/master | 2020-06-11T04:53:07.237849 | 2019-07-02T09:45:39 | 2019-07-02T09:45:39 | 193,854,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,683 | py | # -*- coding: utf-8 -*-
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
#import time
import torch.nn.functional as F
from networks import AttnVGG, VGG
import os
from PIL import Image
import copy, cv2
import sys, shutil, pickle
from sklearn.metrics import classification_report, confusion_matrix
from time import time
# Data augmentation and normalization for training
# Just normalization for validation
def datatransforms(mean, std, crop_size, resize_size):
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize( mean, std) #[0.00021798351, 0.00016647576, 0.00016200541], [5.786733e-05, 5.2953397e-05, 4.714992e-05] ) #mean, std) #[0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(resize_size),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
transforms.Normalize(mean, std) #[0.00021798351, 0.00016647576, 0.00016200541], [5.786733e-05, 5.2953397e-05, 4.714992e-05]) #mean, std)#[0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize(resize_size),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
transforms.Normalize(mean, std) #[0.00021798351, 0.00016647576, 0.00016200541], [5.786733e-05, 5.2953397e-05, 4.714992e-05]) #mean, std)#[0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
return data_transforms
data_dir = sys.argv[1]
mean_file = sys.argv[3]
#mean_std = np.load(mean_file)
mean = (0.7012, 0.5517, 0.4875) #torch.tensor([0.485, 0.456, 0.406]) #[0.4616, 0.4006, 0.3602])
std = (0.0942, 0.1331, 0.1521) #torch.tensor([0.229, 0.224, 0.225]) #[0.2287, 0.2160, 0.2085])
crop_size = int(sys.argv[4])
resize_size = int(sys.argv[5])
data_transforms = datatransforms( mean, std, crop_size, resize_size)
phase = 'test'
BATCH_SIZE=16
class ImageFolderWithPaths(datasets.ImageFolder):
"""Custom dataset that includes image file paths. Extends
torchvision.datasets.ImageFolder
"""
# override the __getitem__ method. this is the method dataloader calls
def __getitem__(self, index):
# this is what ImageFolder normally returns
original_tuple = super(ImageFolderWithPaths, self).__getitem__(index)
# the image file path
path = self.imgs[index][0]
# make a new tuple that includes original and the path
tuple_with_path = (original_tuple + (path,))
return tuple_with_path
image_datasets = {x: ImageFolderWithPaths(os.path.join(data_dir, x),
data_transforms[x])
for x in [phase]}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=BATCH_SIZE,
shuffle=True, num_workers=12)
for x in [phase]}
dataset_sizes = {x: len(image_datasets[x]) for x in [phase]}
class_names = image_datasets[phase].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def load_model(path):
model = torch.load(path)
return model
def load_inputs_outputs(dataloaders):
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
return inputs, labels
def convert_to_numpy(x):
return x.data.cpu().numpy()
def calculatePrecisionRecallAccuracy(labels, outputs):
tp = 0
fp = 0
tn = 0
fn = 0
for label, output in zip(labels, outputs):
if label==output and label ==0:
tn = tn+1
elif label==output and label!=0:
tp = tp+1
elif label!=output and label == 0:
fp = fp+1
else:
fn = fn +1
precision = tp*1.0/(tp+fp)
recall = tp*1.0/(tp+fn)
accuracy = (tp+tn)*1.0/(tp+fp+fn+tn)
return precision, recall, accuracy
def load_tensor_inputs(paths, data_transforms):
loader = data_transforms[phase]
images = [loader(Image.open(path)) for path in paths]
return torch.stack(images)
def eval_model(model, dataloaders):
model.eval() # Set model to evaluate mode
running_corrects = 0
output = []
label = []
total = 0
all_times = []
count =0
start = time()
for inputs, labels, paths in dataloaders[phase]:
total+= len(paths)
inputs = inputs.to(device)
labels = labels.to(device)
outputs, _, _ = model.forward(inputs)
probs, outputs = torch.max(outputs, 1)
outputs_np = convert_to_numpy(outputs)
labels_np = convert_to_numpy(labels)
output += (list(outputs_np))
label += (list(labels_np))
running_corrects += np.sum(outputs_np == labels_np)
count = count +1
all_times.append(time()-start)
start = time()
sys.stdout.write('count: {:d}/{:d}, average time:{:f} \r' \
.format(count*BATCH_SIZE, len(dataloaders[phase])*BATCH_SIZE, np.mean(np.array(all_times))/BATCH_SIZE ))
sys.stdout.flush()
accuracy = running_corrects*1.0/dataset_sizes[phase]
print("\n")
# print(confusion_matrix(label, output))
return accuracy, label, output
def load_attention_model(model_path, num_classes):
model = AttnVGG(num_classes=num_classes, attention=True, normalize_attn=True)
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint['state_dict'])
return model.to(device)
if __name__=="__main__":
model_path = sys.argv[2]
num_classes = int(sys.argv[6])
output_dir = sys.argv[7]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print(model_path)
model = load_attention_model(model_path, num_classes)
since = time()
accuracy, label, output = eval_model(model, dataloaders)
PR, RC, ACC = calculatePrecisionRecallAccuracy(label, output)
print(confusion_matrix(label, output))
print("Precision:", PR, "Recall:", RC, "accuracy:", ACC)
print(classification_report(label, output))
last = time()
total_time = last-since
print("total time taken to process;", total_time, "per image:", total_time*1.0/len(output))
pickle.dump([accuracy, label, output],open(os.path.join(output_dir, os.path.basename(model_path)[:-8]+'_'+str(crop_size)+'_'+str(resize_size)+'_accuracy.pkl'),'wb'))
| [
"sandeep@heallo.ai"
] | sandeep@heallo.ai |
9fb6d57a05cd22669324eb53ed3ed38b65315fc1 | bbd3b7e9559cd6369a03f63af2fdc2dc050ad063 | /twitter-clone/tweet_api/migrations/0002_auto_20210521_2004.py | fe200389271e43a9c1b314470c13af62b6c20918 | [
"MIT",
"LicenseRef-scancode-free-unknown"
] | permissive | TanmayBhosale/MSCxHacktoberfest | be98e547d2c2e891ac21f808ae9ceeb4d82a4a5d | 81d32f7b6734c50ce71bbf20dc4b9f5d5b442396 | refs/heads/master | 2023-08-17T09:54:17.837581 | 2021-10-06T09:55:58 | 2021-10-06T09:55:58 | 413,794,719 | 0 | 0 | MIT | 2021-10-05T11:45:34 | 2021-10-05T11:45:33 | null | UTF-8 | Python | false | false | 566 | py | # Generated by Django 3.2.3 on 2021-05-21 14:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tweet_api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tweet',
name='userHandle',
field=models.CharField(default='adminOP', max_length=50),
),
migrations.AddField(
model_name='tweet',
name='userName',
field=models.CharField(default='Admin', max_length=50),
),
]
| [
"tanmaybhosale24@gmail.com"
] | tanmaybhosale24@gmail.com |
ea35143cdc0111cd7637ce9c09e8694f82c80c7d | 3d91c09bca4e68bf7a527cb40ed70ac208495b93 | /library/migrations/0004_auto_20201128_0844.py | faa172d3cb83cc52c23cfb2b00723338c7b633e8 | [] | no_license | Kaik-a/OCR-Projet13 | 02e9d8c9228d6d7a09013b4ab2570304c01dfc28 | ac339002279397f43316e33a869cce797b5d92b2 | refs/heads/main | 2023-02-17T09:39:11.184120 | 2021-01-11T15:50:58 | 2021-01-11T15:50:58 | 311,875,691 | 0 | 0 | null | 2021-01-11T15:50:59 | 2020-11-11T05:51:34 | CSS | UTF-8 | Python | false | false | 368 | py | # Generated by Django 3.1.3 on 2020-11-28 08:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("library", "0003_auto_20201128_0823"),
]
operations = [
migrations.AlterUniqueTogether(
name="lendedgame",
unique_together={("owned_game", "return_date")},
),
]
| [
"mehdi.bichari@outscale.com"
] | mehdi.bichari@outscale.com |
79b2d60ad47fe671be90b1d175b9e0722a485c58 | 6135fb303687e0faa8e9280eba28804ced4ae58e | /study/study_unittest_pro/test_login.py | d22232f0faf98818cc3c2aff3211be713366fe4a | [] | no_license | a284025258/study_jiafa_base | 21164178d3b0d4f4811ada73e687a2c5e749cc43 | e19b2be16a196de22fc5fbd7047694f42c30e3c7 | refs/heads/master | 2022-11-18T14:44:01.677915 | 2020-07-15T14:52:17 | 2020-07-15T14:52:17 | 279,310,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,356 | py | import unittest
from login import login_check
class LoginTestCase(unittest.TestCase):
def __init__(self, expect, data, method_name):
self.expect = eval(expect)
self.data = eval(data)
super().__init__(method_name)
def setUp(self) -> None:
pass
def tearDown(self) -> None:
pass
@classmethod
def setUpClass(cls) -> None:
pass
@classmethod
def tearDownClass(cls) -> None:
pass
'''登录测试用例'''
def test01_login_case_pass(self):
'''正常登录'''
# 准备测试用例数据 1.入参 2.预期结果
# username = 'admin'
# password = '123456'
# expect = {'code': 0, 'msg': '登录成功'}
# 执行功能函数,获取实际结果
# **self.data字典拆包
result = login_check(**self.data)
# 断言预期和实际结果
try:
self.assertEqual(self.expect, result)
except AssertionError as e:
print('该条用例未通过')
print(f'预期结果:{self.expect}')
print(f'实际结果:{result}')
raise e
else:
print('该条用例通过')
print(f'预期结果:{self.expect}')
print(f'实际结果:{result}')
def test02_login_case_pwd_error(self):
pass
| [
"qi284025258@163.com"
] | qi284025258@163.com |
995a2aa8cf3a57d3c7accd1f89bed75984bff1a3 | 3ffb76e16771957fba3ee1c053358e0401414592 | /zajecia/fleet/views.py | df260379634d7a53de886126e5bf9033cb6c515b | [] | no_license | MKowalski234/SDABackend | 74159bc7b58f7d24a459c38f3336979c84336d42 | 44f0dc3c925ef29c4046ca373b3cb56196e7ea1f | refs/heads/master | 2022-10-09T15:42:50.269602 | 2020-06-07T10:20:10 | 2020-06-07T10:20:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | from django.views import View
from .models import Car, PETROL_CHOISES
from django.shortcuts import render, redirect
from .forms import SimpleCarForm, ModelCarForm
def form_view(request):
form = ModelCarForm()
if request.method == "POST":
form = ModelCarForm(request.POST)
if form.is_valid():
form.save()
cars = Car.objects.all()
return render(request, "fleet/lista.html", {
"elements": cars,
"formularz": form
})
| [
"k.serwata@live.com"
] | k.serwata@live.com |
f123a0f066e453c6a0bc146af57dcaca178274a9 | dd5a3900ec93388677fc8b947e24ad43604b3fa5 | /Sample/class eg/lab questions/2B/2.py | 51f82be4f595d0d8cadb1c9d9c2ec5c18da8f2b5 | [] | no_license | Skipper609/python | 9bc52b4199ca20b2fa2daffb713f617052c0c169 | 54cd5d6f28117a78e1b140ed8d3cbafd31793e56 | refs/heads/master | 2022-02-17T05:23:26.189013 | 2019-08-01T05:44:33 | 2019-08-01T05:44:33 | 198,154,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py |
def find_sum(lst):
return sum(lst)
inp = [int(i) for i in input("Enter the series of numbers seperated by spaces :").split()]
sm = find_sum(inp)
print(f"The sum for the list {inp} is {sm}") | [
"sudhanva000@gmail.com"
] | sudhanva000@gmail.com |
7f271a553860b8386270632196e05e93106e5631 | 5cbf6cf8a9eb958391c371c6181c49155533b6ba | /leetcode_链表_18.排序链表(快排+归并).py | 4825959173e4f80a6369e29f6246967d3a75fdf9 | [] | no_license | cmychina/Leetcode | dec17e6e5eb25fad138a24deba1d2f087db416f7 | 18e6ac79573b3f535ca5e3eaa477eac0e60bf510 | refs/heads/master | 2022-12-20T16:09:46.709808 | 2020-09-28T04:04:54 | 2020-09-28T04:04:54 | 282,446,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,223 | py | """
链表的快排与归并排序
"""
from linklist import *
class Solution:
def sortList(self, head: ListNode) -> ListNode:
"""
归并排序,要找中点,链表中点用快慢指针
:param head:
:return:
"""
if not head or not head.next:
return head
slow,fast=head,head
while fast.next and fast.next.next:
slow=slow.next
fast=fast.next.next
right=self.sortList(slow.next)
slow.next=None#切断
left=self.sortList(head)
return self.mergesort(left,right)
def mergesort(self,head1,head2):
ans=ListNode(-1)
pre=ans
while head1 and head2:
if head1.val<=head2.val:
pre.next=head1
head1=head1.next
pre=pre.next
else:
pre.next=head2
head2=head2.next
pre=pre.next
if head1:
pre.next=head1
if head2:
pre.next=head2
return ans.next
class Solution:
def sortList(self, head: ListNode) -> ListNode:
"""
快排
:param head:
:return:
"""
if not head or not head.next:
return head
ans = ListNode(-1)
ans.next = head
return self.quicksort(ans, None)
def quicksort(self, head, end):
if head == end or head.next == end or head.next.next == end:
return head
tmp = ListNode(-1)
partition = head.next
p = partition
#用来记录排序结果?
t = tmp
while p.next!=end:
if p.next.val < partition.val:
t.next = p.next
t = t.next
p.next = p.next.next
#大于partitio的val,不操作
else:
p = p.next
t.next = head.next#head.next 是未排序前
head.next = tmp.next
self.quicksort(head, partition)
self.quicksort(partition, end)
return head.next
if __name__=="__main__":
a=[4,5,3,6,1,7,8,2]
l1=convert.list2link(a)
s=Solution()
out=s.sortList(l1)
print(convert.link2list(out))
| [
"noreply@github.com"
] | cmychina.noreply@github.com |
b277f0d27a1a1bc16d0c56b6ca8d5a27cbcb6c93 | 974c5a4f101d0e6f4dfa5fc2f7c641c9d2bd8184 | /sdk/cognitivelanguage/azure-ai-language-questionanswering/azure/ai/language/questionanswering/authoring/aio/_operations/_operations.py | 0e46617f5d1c327c085652d1db7dc3c6ae718e0c | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | gaoyp830/azure-sdk-for-python | 4816f04c554dcffb7510a6b7044b0c86a2dd32e1 | 1c66defa502b754abcc9e5afa444ca03c609342f | refs/heads/master | 2022-10-20T21:33:44.281041 | 2022-09-29T17:03:13 | 2022-09-29T17:03:13 | 250,355,505 | 0 | 0 | MIT | 2020-03-26T19:42:13 | 2020-03-26T19:42:12 | null | UTF-8 | Python | false | false | 153,782 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, List, Optional, TypeVar, Union, cast, overload
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.polling.async_base_polling import AsyncLROBasePolling
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from ..._operations._operations import (
build_add_feedback_request,
build_create_project_request,
build_delete_project_request,
build_deploy_project_request,
build_export_request,
build_get_project_details_request,
build_import_assets_request,
build_list_deployments_request,
build_list_projects_request,
build_list_qnas_request,
build_list_sources_request,
build_list_synonyms_request,
build_update_qnas_request,
build_update_sources_request,
build_update_synonyms_request,
)
from .._vendor import MixinABC
if sys.version_info >= (3, 9):
from collections.abc import MutableMapping
else:
from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports
JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class QuestionAnsweringAuthoringClientOperationsMixin(MixinABC): # pylint: disable=too-many-public-methods
@distributed_trace
def list_projects(
self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any
) -> AsyncIterable[JSON]:
"""Gets all projects for a user.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/list-projects
for more information.
:keyword top: The maximum number of resources to return from the collection. Default value is
None.
:paramtype top: int
:keyword skip: An offset into the collection of the first resource to be returned. Default
value is None.
:paramtype skip: int
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSON]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"createdDateTime": "2020-02-20 00:00:00", # Optional. Project creation
date-time.
"description": "str", # Optional. Description of the project.
"language": "str", # Optional. Language of the text records. This is BCP-47
representation of a language. For example, use "en" for English; "es" for Spanish
etc. If not set, use "en" for English as default.
"lastDeployedDateTime": "2020-02-20 00:00:00", # Optional. Represents the
project last deployment date-time.
"lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. Represents the
project last modified date-time.
"multilingualResource": bool, # Optional. Resource enabled for multiple
languages across projects or not.
"projectName": "str", # Optional. Name of the project.
"settings": {
"defaultAnswer": "str" # Optional. Default Answer response when no
good match is found in the knowledge base.
}
}
"""
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_projects_request(
top=top,
skip=skip,
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
path_format_arguments = {
"Endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
return request
async def extract_data(pipeline_response):
deserialized = pipeline_response.http_response.json()
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
@distributed_trace_async
async def get_project_details(self, project_name: str, **kwargs: Any) -> JSON:
"""Get the requested project metadata.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/get-project-details
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"createdDateTime": "2020-02-20 00:00:00", # Optional. Project creation
date-time.
"description": "str", # Optional. Description of the project.
"language": "str", # Optional. Language of the text records. This is BCP-47
representation of a language. For example, use "en" for English; "es" for Spanish
etc. If not set, use "en" for English as default.
"lastDeployedDateTime": "2020-02-20 00:00:00", # Optional. Represents the
project last deployment date-time.
"lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. Represents the
project last modified date-time.
"multilingualResource": bool, # Optional. Resource enabled for multiple
languages across projects or not.
"projectName": "str", # Optional. Name of the project.
"settings": {
"defaultAnswer": "str" # Optional. Default Answer response when no
good match is found in the knowledge base.
}
}
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
request = build_get_project_details_request(
project_name=project_name,
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), {})
return cast(JSON, deserialized)
@overload
async def create_project(
self, project_name: str, options: JSON, *, content_type: str = "application/json", **kwargs: Any
) -> JSON:
"""Create or update a project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/create-project
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param options: Parameters needed to create the project. Required.
:type options: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
options = {
"language": "str", # Language of the text records. This is BCP-47
representation of a language. For example, use "en" for English; "es" for Spanish
etc. If not set, use "en" for English as default. Required.
"description": "str", # Optional. Description of the project.
"multilingualResource": bool, # Optional. Set to true to enable creating
knowledgebases in different languages for the same resource.
"settings": {
"defaultAnswer": "str" # Optional. Default Answer response when no
good match is found in the knowledge base.
}
}
# response body for status code(s): 200, 201
response == {
"createdDateTime": "2020-02-20 00:00:00", # Optional. Project creation
date-time.
"description": "str", # Optional. Description of the project.
"language": "str", # Optional. Language of the text records. This is BCP-47
representation of a language. For example, use "en" for English; "es" for Spanish
etc. If not set, use "en" for English as default.
"lastDeployedDateTime": "2020-02-20 00:00:00", # Optional. Represents the
project last deployment date-time.
"lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. Represents the
project last modified date-time.
"multilingualResource": bool, # Optional. Resource enabled for multiple
languages across projects or not.
"projectName": "str", # Optional. Name of the project.
"settings": {
"defaultAnswer": "str" # Optional. Default Answer response when no
good match is found in the knowledge base.
}
}
"""
@overload
async def create_project(
self, project_name: str, options: IO, *, content_type: str = "application/json", **kwargs: Any
) -> JSON:
"""Create or update a project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/create-project
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param options: Parameters needed to create the project. Required.
:type options: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200, 201
response == {
"createdDateTime": "2020-02-20 00:00:00", # Optional. Project creation
date-time.
"description": "str", # Optional. Description of the project.
"language": "str", # Optional. Language of the text records. This is BCP-47
representation of a language. For example, use "en" for English; "es" for Spanish
etc. If not set, use "en" for English as default.
"lastDeployedDateTime": "2020-02-20 00:00:00", # Optional. Represents the
project last deployment date-time.
"lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. Represents the
project last modified date-time.
"multilingualResource": bool, # Optional. Resource enabled for multiple
languages across projects or not.
"projectName": "str", # Optional. Name of the project.
"settings": {
"defaultAnswer": "str" # Optional. Default Answer response when no
good match is found in the knowledge base.
}
}
"""
@distributed_trace_async
async def create_project(self, project_name: str, options: Union[JSON, IO], **kwargs: Any) -> JSON:
"""Create or update a project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/create-project
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param options: Parameters needed to create the project. Is either a model type or a IO type.
Required.
:type options: JSON or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:return: JSON object
:rtype: JSON
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200, 201
response == {
"createdDateTime": "2020-02-20 00:00:00", # Optional. Project creation
date-time.
"description": "str", # Optional. Description of the project.
"language": "str", # Optional. Language of the text records. This is BCP-47
representation of a language. For example, use "en" for English; "es" for Spanish
etc. If not set, use "en" for English as default.
"lastDeployedDateTime": "2020-02-20 00:00:00", # Optional. Represents the
project last deployment date-time.
"lastModifiedDateTime": "2020-02-20 00:00:00", # Optional. Represents the
project last modified date-time.
"multilingualResource": bool, # Optional. Resource enabled for multiple
languages across projects or not.
"projectName": "str", # Optional. Name of the project.
"settings": {
"defaultAnswer": "str" # Optional. Default Answer response when no
good match is found in the knowledge base.
}
}
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(options, (IO, bytes)):
_content = options
else:
_json = options
request = build_create_project_request(
project_name=project_name,
content_type=content_type,
api_version=self._config.api_version,
json=_json,
content=_content,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.status_code == 200:
if response.content:
deserialized = response.json()
else:
deserialized = None
if response.status_code == 201:
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, cast(JSON, deserialized), {})
return cast(JSON, deserialized)
async def _delete_project_initial(self, project_name: str, **kwargs: Any) -> Optional[JSON]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]]
request = build_delete_project_request(
project_name=project_name,
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
response_headers = {}
if response.status_code == 200:
if response.content:
deserialized = response.json()
else:
deserialized = None
if response.status_code == 202:
response_headers["Operation-Location"] = self._deserialize(
"str", response.headers.get("Operation-Location")
)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
@distributed_trace_async
async def begin_delete_project(self, project_name: str, **kwargs: Any) -> AsyncLROPoller[JSON]:
"""Delete the project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/delete-project
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSON]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"createdDateTime": "2020-02-20 00:00:00", # Required.
"jobId": "str", # Required.
"lastUpdatedDateTime": "2020-02-20 00:00:00", # Required.
"status": "str", # Job Status. Required. Known values are: "notStarted",
"running", "succeeded", "failed", "cancelled", "cancelling", and
"partiallyCompleted".
"errors": [
{
"code": "str", # One of a server-defined set of error codes.
Required. Known values are: "InvalidRequest", "InvalidArgument",
"Unauthorized", "Forbidden", "NotFound", "ProjectNotFound",
"OperationNotFound", "AzureCognitiveSearchNotFound",
"AzureCognitiveSearchIndexNotFound", "TooManyRequests",
"AzureCognitiveSearchThrottling",
"AzureCognitiveSearchIndexLimitReached", "InternalServerError", and
"ServiceUnavailable".
"message": "str", # A human-readable representation of the
error. Required.
"details": [
...
],
"innererror": {
"code": "str", # One of a server-defined set of
error codes. Required. Known values are: "InvalidRequest",
"InvalidParameterValue", "KnowledgeBaseNotFound",
"AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", and
"ExtractionFailure".
"message": "str", # Error message. Required.
"details": {
"str": "str" # Optional. Error details.
},
"innererror": ...,
"target": "str" # Optional. Error target.
},
"target": "str" # Optional. The target of the error.
}
],
"expirationDateTime": "2020-02-20 00:00:00" # Optional.
}
"""
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_project_initial( # type: ignore
project_name=project_name, cls=lambda x, y, z: x, headers=_headers, params=_params, **kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def _export_initial(
self, project_name: str, *, file_format: str = "json", asset_kind: Optional[str] = None, **kwargs: Any
) -> Optional[JSON]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]]
request = build_export_request(
project_name=project_name,
file_format=file_format,
asset_kind=asset_kind,
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
response_headers = {}
if response.status_code == 200:
if response.content:
deserialized = response.json()
else:
deserialized = None
if response.status_code == 202:
response_headers["Operation-Location"] = self._deserialize(
"str", response.headers.get("Operation-Location")
)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
@distributed_trace_async
async def begin_export(
self, project_name: str, *, file_format: str = "json", asset_kind: Optional[str] = None, **kwargs: Any
) -> AsyncLROPoller[JSON]:
"""Export project metadata and assets.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/export
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:keyword file_format: Knowledge base Import or Export format. Known values are: "json", "tsv",
and "excel". Default value is "json".
:paramtype file_format: str
:keyword asset_kind: Kind of the asset of the project. Known values are: "qnas" and "synonyms".
Default value is None.
:paramtype asset_kind: str
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSON]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"createdDateTime": "2020-02-20 00:00:00", # Required.
"jobId": "str", # Required.
"lastUpdatedDateTime": "2020-02-20 00:00:00", # Required.
"resultUrl": "str", # URL to download the result of the Export Job.
Required.
"status": "str", # Job Status. Required. Known values are: "notStarted",
"running", "succeeded", "failed", "cancelled", "cancelling", and
"partiallyCompleted".
"errors": [
{
"code": "str", # One of a server-defined set of error codes.
Required. Known values are: "InvalidRequest", "InvalidArgument",
"Unauthorized", "Forbidden", "NotFound", "ProjectNotFound",
"OperationNotFound", "AzureCognitiveSearchNotFound",
"AzureCognitiveSearchIndexNotFound", "TooManyRequests",
"AzureCognitiveSearchThrottling",
"AzureCognitiveSearchIndexLimitReached", "InternalServerError", and
"ServiceUnavailable".
"message": "str", # A human-readable representation of the
error. Required.
"details": [
...
],
"innererror": {
"code": "str", # One of a server-defined set of
error codes. Required. Known values are: "InvalidRequest",
"InvalidParameterValue", "KnowledgeBaseNotFound",
"AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", and
"ExtractionFailure".
"message": "str", # Error message. Required.
"details": {
"str": "str" # Optional. Error details.
},
"innererror": ...,
"target": "str" # Optional. Error target.
},
"target": "str" # Optional. The target of the error.
}
],
"expirationDateTime": "2020-02-20 00:00:00" # Optional.
}
"""
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._export_initial( # type: ignore
project_name=project_name,
file_format=file_format,
asset_kind=asset_kind,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def _import_assets_initial(
self,
project_name: str,
options: Optional[Union[JSON, IO]] = None,
*,
file_format: str = "json",
asset_kind: Optional[str] = None,
**kwargs: Any
) -> Optional[JSON]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(options, (IO, bytes)):
_content = options
else:
if options is not None:
_json = options
else:
_json = None
request = build_import_assets_request(
project_name=project_name,
file_format=file_format,
asset_kind=asset_kind,
content_type=content_type,
api_version=self._config.api_version,
json=_json,
content=_content,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
response_headers = {}
if response.status_code == 200:
if response.content:
deserialized = response.json()
else:
deserialized = None
if response.status_code == 202:
response_headers["Operation-Location"] = self._deserialize(
"str", response.headers.get("Operation-Location")
)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
@overload
async def begin_import_assets(
self,
project_name: str,
options: Optional[JSON] = None,
*,
file_format: str = "json",
asset_kind: Optional[str] = None,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[JSON]:
"""Import project assets.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/import
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param options: Project assets the needs to be imported. Default value is None.
:type options: JSON
:keyword file_format: Knowledge base Import or Export format. Known values are: "json", "tsv",
and "excel". Default value is "json".
:paramtype file_format: str
:keyword asset_kind: Kind of the asset of the project. Known values are: "qnas" and "synonyms".
Default value is None.
:paramtype asset_kind: str
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSON]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
options = {
"assets": {
"qnas": [
{
"activeLearningSuggestions": [
{
"clusterHead": "str", # Optional.
Question chosen as the head of suggested questions cluster by
Active Learning clustering algorithm.
"suggestedQuestions": [
{
"autoSuggestedCount":
0, # Optional. The number of times the question was
suggested automatically by the Active Learning
algorithm.
"question": "str", #
Optional. Question suggested by the Active Learning
feature.
"userSuggestedCount":
0 # Optional. The number of times the question was
suggested explicitly by the user.
}
]
}
],
"answer": "str", # Optional. Answer text.
"dialog": {
"isContextOnly": bool, # Optional. To mark
if a prompt is relevant only with a previous question or not. If
true, do not include this QnA as answer for queries without
context; otherwise, ignores context and includes this QnA in
answers.
"prompts": [
{
"displayOrder": 0, #
Optional. Index of the prompt. It is used for ordering of
the prompts.
"displayText": "str", #
Optional. Text displayed to represent a follow up
question prompt.
"qna": {
"activeLearningSuggestions": [
{
"clusterHead": "str", # Optional. Question
chosen as the head of suggested questions
cluster by Active Learning clustering
algorithm.
"suggestedQuestions": [
{
"autoSuggestedCount": 0, # Optional.
The number of times the question was
suggested automatically by the Active
Learning algorithm.
"question": "str", # Optional.
Question suggested by the Active
Learning feature.
"userSuggestedCount": 0 # Optional.
The number of times the question was
suggested explicitly by the user.
}
]
}
],
"answer": "str", #
Optional. Answer text.
"dialog": ...,
"id": 0, # Optional.
Unique ID for the QnA.
"metadata": {
"str": "str"
# Optional. Metadata associated with the answer,
useful to categorize or filter question answers.
},
"questions": [
"str" #
Optional. List of questions associated with the
answer.
],
"source": "str" #
Optional. Source from which QnA was indexed e.g.
https://docs.microsoft.com/en-us/azure/cognitive-services/QnAMaker/FAQs
.
},
"qnaId": 0 # Optional. ID of
the QnA corresponding to the prompt.
}
]
},
"id": 0, # Optional. Unique ID for the QnA.
"lastUpdatedDateTime": "2020-02-20 00:00:00", #
Optional. Date-time when the QnA was last updated.
"metadata": {
"str": "str" # Optional. Metadata associated
with the answer, useful to categorize or filter question answers.
},
"questions": [
"str" # Optional. List of questions
associated with the answer.
],
"source": "str", # Optional. Source from which QnA
was indexed e.g.
https://docs.microsoft.com/en-us/azure/cognitive-services/QnAMaker/FAQs
.
"sourceDisplayName": "str" # Optional. Friendly name
of the Source.
}
],
"synonyms": [
{
"alterations": [
"str" # Collection of word alterations.
Required.
]
}
]
},
"fileUri": "str", # Optional. Import data File URI.
"metadata": {
"language": "str", # Language of the text records. This is BCP-47
representation of a language. For example, use "en" for English; "es" for
Spanish etc. If not set, use "en" for English as default. Required.
"description": "str", # Optional. Description of the project.
"multilingualResource": bool, # Optional. Set to true to enable
creating knowledgebases in different languages for the same resource.
"settings": {
"defaultAnswer": "str" # Optional. Default Answer response
when no good match is found in the knowledge base.
}
}
}
# response body for status code(s): 200
response == {
"createdDateTime": "2020-02-20 00:00:00", # Required.
"jobId": "str", # Required.
"lastUpdatedDateTime": "2020-02-20 00:00:00", # Required.
"status": "str", # Job Status. Required. Known values are: "notStarted",
"running", "succeeded", "failed", "cancelled", "cancelling", and
"partiallyCompleted".
"errors": [
{
"code": "str", # One of a server-defined set of error codes.
Required. Known values are: "InvalidRequest", "InvalidArgument",
"Unauthorized", "Forbidden", "NotFound", "ProjectNotFound",
"OperationNotFound", "AzureCognitiveSearchNotFound",
"AzureCognitiveSearchIndexNotFound", "TooManyRequests",
"AzureCognitiveSearchThrottling",
"AzureCognitiveSearchIndexLimitReached", "InternalServerError", and
"ServiceUnavailable".
"message": "str", # A human-readable representation of the
error. Required.
"details": [
...
],
"innererror": {
"code": "str", # One of a server-defined set of
error codes. Required. Known values are: "InvalidRequest",
"InvalidParameterValue", "KnowledgeBaseNotFound",
"AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", and
"ExtractionFailure".
"message": "str", # Error message. Required.
"details": {
"str": "str" # Optional. Error details.
},
"innererror": ...,
"target": "str" # Optional. Error target.
},
"target": "str" # Optional. The target of the error.
}
],
"expirationDateTime": "2020-02-20 00:00:00" # Optional.
}
"""
@overload
async def begin_import_assets(
self,
project_name: str,
options: Optional[IO] = None,
*,
file_format: str = "json",
asset_kind: Optional[str] = None,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[JSON]:
"""Import project assets.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/import
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param options: Project assets the needs to be imported. Default value is None.
:type options: IO
:keyword file_format: Knowledge base Import or Export format. Known values are: "json", "tsv",
and "excel". Default value is "json".
:paramtype file_format: str
:keyword asset_kind: Kind of the asset of the project. Known values are: "qnas" and "synonyms".
Default value is None.
:paramtype asset_kind: str
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSON]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"createdDateTime": "2020-02-20 00:00:00", # Required.
"jobId": "str", # Required.
"lastUpdatedDateTime": "2020-02-20 00:00:00", # Required.
"status": "str", # Job Status. Required. Known values are: "notStarted",
"running", "succeeded", "failed", "cancelled", "cancelling", and
"partiallyCompleted".
"errors": [
{
"code": "str", # One of a server-defined set of error codes.
Required. Known values are: "InvalidRequest", "InvalidArgument",
"Unauthorized", "Forbidden", "NotFound", "ProjectNotFound",
"OperationNotFound", "AzureCognitiveSearchNotFound",
"AzureCognitiveSearchIndexNotFound", "TooManyRequests",
"AzureCognitiveSearchThrottling",
"AzureCognitiveSearchIndexLimitReached", "InternalServerError", and
"ServiceUnavailable".
"message": "str", # A human-readable representation of the
error. Required.
"details": [
...
],
"innererror": {
"code": "str", # One of a server-defined set of
error codes. Required. Known values are: "InvalidRequest",
"InvalidParameterValue", "KnowledgeBaseNotFound",
"AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", and
"ExtractionFailure".
"message": "str", # Error message. Required.
"details": {
"str": "str" # Optional. Error details.
},
"innererror": ...,
"target": "str" # Optional. Error target.
},
"target": "str" # Optional. The target of the error.
}
],
"expirationDateTime": "2020-02-20 00:00:00" # Optional.
}
"""
@distributed_trace_async
async def begin_import_assets(
self,
project_name: str,
options: Optional[Union[JSON, IO]] = None,
*,
file_format: str = "json",
asset_kind: Optional[str] = None,
**kwargs: Any
) -> AsyncLROPoller[JSON]:
"""Import project assets.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/import
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param options: Project assets the needs to be imported. Is either a model type or a IO type.
Default value is None.
:type options: JSON or IO
:keyword file_format: Knowledge base Import or Export format. Known values are: "json", "tsv",
and "excel". Default value is "json".
:paramtype file_format: str
:keyword asset_kind: Kind of the asset of the project. Known values are: "qnas" and "synonyms".
Default value is None.
:paramtype asset_kind: str
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSON]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"createdDateTime": "2020-02-20 00:00:00", # Required.
"jobId": "str", # Required.
"lastUpdatedDateTime": "2020-02-20 00:00:00", # Required.
"status": "str", # Job Status. Required. Known values are: "notStarted",
"running", "succeeded", "failed", "cancelled", "cancelling", and
"partiallyCompleted".
"errors": [
{
"code": "str", # One of a server-defined set of error codes.
Required. Known values are: "InvalidRequest", "InvalidArgument",
"Unauthorized", "Forbidden", "NotFound", "ProjectNotFound",
"OperationNotFound", "AzureCognitiveSearchNotFound",
"AzureCognitiveSearchIndexNotFound", "TooManyRequests",
"AzureCognitiveSearchThrottling",
"AzureCognitiveSearchIndexLimitReached", "InternalServerError", and
"ServiceUnavailable".
"message": "str", # A human-readable representation of the
error. Required.
"details": [
...
],
"innererror": {
"code": "str", # One of a server-defined set of
error codes. Required. Known values are: "InvalidRequest",
"InvalidParameterValue", "KnowledgeBaseNotFound",
"AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", and
"ExtractionFailure".
"message": "str", # Error message. Required.
"details": {
"str": "str" # Optional. Error details.
},
"innererror": ...,
"target": "str" # Optional. Error target.
},
"target": "str" # Optional. The target of the error.
}
],
"expirationDateTime": "2020-02-20 00:00:00" # Optional.
}
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._import_assets_initial( # type: ignore
project_name=project_name,
options=options,
file_format=file_format,
asset_kind=asset_kind,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
async def _deploy_project_initial(self, project_name: str, deployment_name: str, **kwargs: Any) -> Optional[JSON]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]]
request = build_deploy_project_request(
project_name=project_name,
deployment_name=deployment_name,
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
response_headers = {}
if response.status_code == 200:
if response.content:
deserialized = response.json()
else:
deserialized = None
if response.status_code == 202:
response_headers["Operation-Location"] = self._deserialize(
"str", response.headers.get("Operation-Location")
)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
@distributed_trace_async
async def begin_deploy_project(
self, project_name: str, deployment_name: str, **kwargs: Any
) -> AsyncLROPoller[JSON]:
"""Deploy project to production.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/deploy-project
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param deployment_name: The name of the specific deployment of the project to use. Required.
:type deployment_name: str
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSON]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"deploymentName": "str", # Optional. Name of the deployment.
"lastDeployedDateTime": "2020-02-20 00:00:00" # Optional. Represents the
project last deployment date-time.
}
"""
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._deploy_project_initial( # type: ignore
project_name=project_name,
deployment_name=deployment_name,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
@distributed_trace
def list_deployments(
self, project_name: str, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any
) -> AsyncIterable[JSON]:
"""List all deployments of a project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/list-deployments
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:keyword top: The maximum number of resources to return from the collection. Default value is
None.
:paramtype top: int
:keyword skip: An offset into the collection of the first resource to be returned. Default
value is None.
:paramtype skip: int
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSON]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"deploymentName": "str", # Optional. Name of the deployment.
"lastDeployedDateTime": "2020-02-20 00:00:00" # Optional. Represents the
project last deployment date-time.
}
"""
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_deployments_request(
project_name=project_name,
top=top,
skip=skip,
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
path_format_arguments = {
"Endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
return request
async def extract_data(pipeline_response):
deserialized = pipeline_response.http_response.json()
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
@distributed_trace
def list_synonyms(
self, project_name: str, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any
) -> AsyncIterable[JSON]:
"""Gets all the synonyms of a project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/get-synonyms
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:keyword top: The maximum number of resources to return from the collection. Default value is
None.
:paramtype top: int
:keyword skip: An offset into the collection of the first resource to be returned. Default
value is None.
:paramtype skip: int
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSON]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"alterations": [
"str" # Collection of word alterations. Required.
]
}
"""
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_synonyms_request(
project_name=project_name,
top=top,
skip=skip,
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
path_format_arguments = {
"Endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
return request
async def extract_data(pipeline_response):
deserialized = pipeline_response.http_response.json()
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
@overload
async def update_synonyms( # pylint: disable=inconsistent-return-statements
self, project_name: str, synonyms: JSON, *, content_type: str = "application/json", **kwargs: Any
) -> None:
"""Updates all the synonyms of a project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/update-synonyms
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param synonyms: All the synonyms of a project. Required.
:type synonyms: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
synonyms = {
"nextLink": "str", # Optional.
"value": [
{
"alterations": [
"str" # Collection of word alterations. Required.
]
}
]
}
"""
@overload
async def update_synonyms( # pylint: disable=inconsistent-return-statements
self, project_name: str, synonyms: IO, *, content_type: str = "application/json", **kwargs: Any
) -> None:
"""Updates all the synonyms of a project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/update-synonyms
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param synonyms: All the synonyms of a project. Required.
:type synonyms: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update_synonyms( # pylint: disable=inconsistent-return-statements
self, project_name: str, synonyms: Union[JSON, IO], **kwargs: Any
) -> None:
"""Updates all the synonyms of a project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/update-synonyms
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param synonyms: All the synonyms of a project. Is either a model type or a IO type. Required.
:type synonyms: JSON or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[None]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(synonyms, (IO, bytes)):
_content = synonyms
else:
_json = synonyms
request = build_update_synonyms_request(
project_name=project_name,
content_type=content_type,
api_version=self._config.api_version,
json=_json,
content=_content,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@distributed_trace
def list_sources(
self, project_name: str, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any
) -> AsyncIterable[JSON]:
"""Gets all the sources of a project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/get-sources
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:keyword top: The maximum number of resources to return from the collection. Default value is
None.
:paramtype top: int
:keyword skip: An offset into the collection of the first resource to be returned. Default
value is None.
:paramtype skip: int
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSON]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"sourceKind": "str", # Supported source types. Required. Known values are:
"file" and "url".
"sourceUri": "str", # URI location for the file or url. Required.
"contentStructureKind": "str", # Optional. Content structure type for
sources. "unstructured"
"displayName": "str", # Optional. Friendly name of the Source.
"lastUpdatedDateTime": "2020-02-20 00:00:00", # Optional. Date-time when the
QnA was last updated.
"source": "str" # Optional. Unique source identifier. Name of the file if
it's a 'file' source; otherwise, the complete URL if it's a 'url' source.
}
"""
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_sources_request(
project_name=project_name,
top=top,
skip=skip,
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
path_format_arguments = {
"Endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
return request
async def extract_data(pipeline_response):
deserialized = pipeline_response.http_response.json()
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
async def _update_sources_initial(
self, project_name: str, sources: Union[List[JSON], IO], **kwargs: Any
) -> Optional[JSON]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(sources, (IO, bytes)):
_content = sources
else:
_json = sources
request = build_update_sources_request(
project_name=project_name,
content_type=content_type,
api_version=self._config.api_version,
json=_json,
content=_content,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
response_headers = {}
if response.status_code == 200:
if response.content:
deserialized = response.json()
else:
deserialized = None
if response.status_code == 202:
response_headers["Operation-Location"] = self._deserialize(
"str", response.headers.get("Operation-Location")
)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
@overload
async def begin_update_sources(
self, project_name: str, sources: List[JSON], *, content_type: str = "application/json", **kwargs: Any
) -> AsyncLROPoller[AsyncIterable[JSON]]:
"""Updates the sources of a project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/update-sources
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param sources: Update sources parameters of a project. Required.
:type sources: list[JSON]
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns an iterator like instance of JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.core.async_paging.AsyncItemPaged[JSON]]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
sources = [
{
"op": "str", # Update operation type for assets. Required. Known
values are: "add", "delete", and "replace".
"value": {
"sourceKind": "str", # Supported source types. Required.
Known values are: "file" and "url".
"sourceUri": "str", # URI location for the file or url.
Required.
"contentStructureKind": "str", # Optional. Content structure
type for sources. "unstructured"
"displayName": "str", # Optional. Friendly name of the
Source.
"refresh": bool, # Optional. Boolean flag used to refresh
data from the Source.
"source": "str" # Optional. Unique source identifier. Name
of the file if it's a 'file' source; otherwise, the complete URL if it's
a 'url' source.
}
}
]
# response body for status code(s): 200, 202
response == {
"sourceKind": "str", # Supported source types. Required. Known values are:
"file" and "url".
"sourceUri": "str", # URI location for the file or url. Required.
"contentStructureKind": "str", # Optional. Content structure type for
sources. "unstructured"
"displayName": "str", # Optional. Friendly name of the Source.
"lastUpdatedDateTime": "2020-02-20 00:00:00", # Optional. Date-time when the
QnA was last updated.
"source": "str" # Optional. Unique source identifier. Name of the file if
it's a 'file' source; otherwise, the complete URL if it's a 'url' source.
}
"""
@overload
async def begin_update_sources(
self, project_name: str, sources: IO, *, content_type: str = "application/json", **kwargs: Any
) -> AsyncLROPoller[AsyncIterable[JSON]]:
"""Updates the sources of a project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/update-sources
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param sources: Update sources parameters of a project. Required.
:type sources: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns an iterator like instance of JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.core.async_paging.AsyncItemPaged[JSON]]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200, 202
response == {
"sourceKind": "str", # Supported source types. Required. Known values are:
"file" and "url".
"sourceUri": "str", # URI location for the file or url. Required.
"contentStructureKind": "str", # Optional. Content structure type for
sources. "unstructured"
"displayName": "str", # Optional. Friendly name of the Source.
"lastUpdatedDateTime": "2020-02-20 00:00:00", # Optional. Date-time when the
QnA was last updated.
"source": "str" # Optional. Unique source identifier. Name of the file if
it's a 'file' source; otherwise, the complete URL if it's a 'url' source.
}
"""
@distributed_trace_async
async def begin_update_sources(
self, project_name: str, sources: Union[List[JSON], IO], **kwargs: Any
) -> AsyncLROPoller[AsyncIterable[JSON]]:
"""Updates the sources of a project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/update-sources
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param sources: Update sources parameters of a project. Is either a list type or a IO type.
Required.
:type sources: list[JSON] or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns an iterator like instance of JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.core.async_paging.AsyncItemPaged[JSON]]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200, 202
response == {
"sourceKind": "str", # Supported source types. Required. Known values are:
"file" and "url".
"sourceUri": "str", # URI location for the file or url. Required.
"contentStructureKind": "str", # Optional. Content structure type for
sources. "unstructured"
"displayName": "str", # Optional. Friendly name of the Source.
"lastUpdatedDateTime": "2020-02-20 00:00:00", # Optional. Date-time when the
QnA was last updated.
"source": "str" # Optional. Unique source identifier. Name of the file if
it's a 'file' source; otherwise, the complete URL if it's a 'url' source.
}
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(sources, (IO, bytes)):
_content = sources
else:
_json = sources
def prepare_request(next_link=None):
if not next_link:
request = build_update_sources_request(
project_name=project_name,
content_type=content_type,
api_version=self._config.api_version,
json=_json,
content=_content,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
path_format_arguments = {
"Endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
return request
async def extract_data(pipeline_response):
deserialized = pipeline_response.http_response.json()
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_sources_initial( # type: ignore
project_name=project_name,
sources=sources,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
async def internal_get_next(next_link=None):
if next_link is None:
return pipeline_response
return await get_next(next_link)
return AsyncItemPaged(internal_get_next, extract_data)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
@distributed_trace
def list_qnas(
self,
project_name: str,
*,
source: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable[JSON]:
"""Gets all the QnAs of a project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/get-qnas
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:keyword source: Source of the QnA. Default value is None.
:paramtype source: str
:keyword top: The maximum number of resources to return from the collection. Default value is
None.
:paramtype top: int
:keyword skip: An offset into the collection of the first resource to be returned. Default
value is None.
:paramtype skip: int
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSON]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200
response == {
"activeLearningSuggestions": [
{
"clusterHead": "str", # Optional. Question chosen as the
head of suggested questions cluster by Active Learning clustering
algorithm.
"suggestedQuestions": [
{
"autoSuggestedCount": 0, # Optional. The
number of times the question was suggested automatically by the
Active Learning algorithm.
"question": "str", # Optional. Question
suggested by the Active Learning feature.
"userSuggestedCount": 0 # Optional. The
number of times the question was suggested explicitly by the
user.
}
]
}
],
"answer": "str", # Optional. Answer text.
"dialog": {
"isContextOnly": bool, # Optional. To mark if a prompt is relevant
only with a previous question or not. If true, do not include this QnA as
answer for queries without context; otherwise, ignores context and includes
this QnA in answers.
"prompts": [
{
"displayOrder": 0, # Optional. Index of the prompt.
It is used for ordering of the prompts.
"displayText": "str", # Optional. Text displayed to
represent a follow up question prompt.
"qna": {
"activeLearningSuggestions": [
{
"clusterHead": "str", #
Optional. Question chosen as the head of suggested
questions cluster by Active Learning clustering
algorithm.
"suggestedQuestions": [
{
"autoSuggestedCount": 0, # Optional. The number
of times the question was suggested automatically
by the Active Learning algorithm.
"question":
"str", # Optional. Question suggested by the
Active Learning feature.
"userSuggestedCount": 0 # Optional. The number
of times the question was suggested explicitly by
the user.
}
]
}
],
"answer": "str", # Optional. Answer text.
"dialog": ...,
"id": 0, # Optional. Unique ID for the QnA.
"metadata": {
"str": "str" # Optional. Metadata
associated with the answer, useful to categorize or filter
question answers.
},
"questions": [
"str" # Optional. List of questions
associated with the answer.
],
"source": "str" # Optional. Source from
which QnA was indexed e.g.
https://docs.microsoft.com/en-us/azure/cognitive-services/QnAMaker/FAQs
.
},
"qnaId": 0 # Optional. ID of the QnA corresponding
to the prompt.
}
]
},
"id": 0, # Optional. Unique ID for the QnA.
"lastUpdatedDateTime": "2020-02-20 00:00:00", # Optional. Date-time when the
QnA was last updated.
"metadata": {
"str": "str" # Optional. Metadata associated with the answer, useful
to categorize or filter question answers.
},
"questions": [
"str" # Optional. List of questions associated with the answer.
],
"source": "str" # Optional. Source from which QnA was indexed e.g.
https://docs.microsoft.com/en-us/azure/cognitive-services/QnAMaker/FAQs .
}
"""
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_qnas_request(
project_name=project_name,
source=source,
top=top,
skip=skip,
api_version=self._config.api_version,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
path_format_arguments = {
"Endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
return request
async def extract_data(pipeline_response):
deserialized = pipeline_response.http_response.json()
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
async def _update_qnas_initial(
self, project_name: str, qnas: Union[List[JSON], IO], **kwargs: Any
) -> Optional[JSON]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[JSON]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(qnas, (IO, bytes)):
_content = qnas
else:
_json = qnas
request = build_update_qnas_request(
project_name=project_name,
content_type=content_type,
api_version=self._config.api_version,
json=_json,
content=_content,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
response_headers = {}
if response.status_code == 200:
if response.content:
deserialized = response.json()
else:
deserialized = None
if response.status_code == 202:
response_headers["Operation-Location"] = self._deserialize(
"str", response.headers.get("Operation-Location")
)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
@overload
async def begin_update_qnas(
self, project_name: str, qnas: List[JSON], *, content_type: str = "application/json", **kwargs: Any
) -> AsyncLROPoller[AsyncIterable[JSON]]:
"""Updates the QnAs of a project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/update-qnas
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param qnas: Update QnAs parameters of a project. Required.
:type qnas: list[JSON]
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns an iterator like instance of JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.core.async_paging.AsyncItemPaged[JSON]]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
qnas = [
{
"op": "str", # Update operation type for assets. Required. Known
values are: "add", "delete", and "replace".
"value": {
"activeLearningSuggestions": [
{
"clusterHead": "str", # Optional. Question
chosen as the head of suggested questions cluster by Active
Learning clustering algorithm.
"suggestedQuestions": [
{
"autoSuggestedCount": 0, #
Optional. The number of times the question was suggested
automatically by the Active Learning algorithm.
"question": "str", #
Optional. Question suggested by the Active Learning
feature.
"userSuggestedCount": 0 #
Optional. The number of times the question was suggested
explicitly by the user.
}
]
}
],
"answer": "str", # Optional. Answer text.
"dialog": {
"isContextOnly": bool, # Optional. To mark if a
prompt is relevant only with a previous question or not. If true, do
not include this QnA as answer for queries without context;
otherwise, ignores context and includes this QnA in answers.
"prompts": [
{
"displayOrder": 0, # Optional. Index
of the prompt. It is used for ordering of the prompts.
"displayText": "str", # Optional.
Text displayed to represent a follow up question prompt.
"qna": ...,
"qnaId": 0 # Optional. ID of the QnA
corresponding to the prompt.
}
]
},
"id": 0, # Optional. Unique ID for the QnA.
"metadata": {
"str": "str" # Optional. Metadata associated with
the answer, useful to categorize or filter question answers.
},
"questions": [
"str" # Optional. List of questions associated with
the answer.
],
"source": "str" # Optional. Source from which QnA was
indexed e.g.
https://docs.microsoft.com/en-us/azure/cognitive-services/QnAMaker/FAQs .
}
}
]
# response body for status code(s): 200, 202
response == {
"activeLearningSuggestions": [
{
"clusterHead": "str", # Optional. Question chosen as the
head of suggested questions cluster by Active Learning clustering
algorithm.
"suggestedQuestions": [
{
"autoSuggestedCount": 0, # Optional. The
number of times the question was suggested automatically by the
Active Learning algorithm.
"question": "str", # Optional. Question
suggested by the Active Learning feature.
"userSuggestedCount": 0 # Optional. The
number of times the question was suggested explicitly by the
user.
}
]
}
],
"answer": "str", # Optional. Answer text.
"dialog": {
"isContextOnly": bool, # Optional. To mark if a prompt is relevant
only with a previous question or not. If true, do not include this QnA as
answer for queries without context; otherwise, ignores context and includes
this QnA in answers.
"prompts": [
{
"displayOrder": 0, # Optional. Index of the prompt.
It is used for ordering of the prompts.
"displayText": "str", # Optional. Text displayed to
represent a follow up question prompt.
"qna": {
"activeLearningSuggestions": [
{
"clusterHead": "str", #
Optional. Question chosen as the head of suggested
questions cluster by Active Learning clustering
algorithm.
"suggestedQuestions": [
{
"autoSuggestedCount": 0, # Optional. The number
of times the question was suggested automatically
by the Active Learning algorithm.
"question":
"str", # Optional. Question suggested by the
Active Learning feature.
"userSuggestedCount": 0 # Optional. The number
of times the question was suggested explicitly by
the user.
}
]
}
],
"answer": "str", # Optional. Answer text.
"dialog": ...,
"id": 0, # Optional. Unique ID for the QnA.
"metadata": {
"str": "str" # Optional. Metadata
associated with the answer, useful to categorize or filter
question answers.
},
"questions": [
"str" # Optional. List of questions
associated with the answer.
],
"source": "str" # Optional. Source from
which QnA was indexed e.g.
https://docs.microsoft.com/en-us/azure/cognitive-services/QnAMaker/FAQs
.
},
"qnaId": 0 # Optional. ID of the QnA corresponding
to the prompt.
}
]
},
"id": 0, # Optional. Unique ID for the QnA.
"lastUpdatedDateTime": "2020-02-20 00:00:00", # Optional. Date-time when the
QnA was last updated.
"metadata": {
"str": "str" # Optional. Metadata associated with the answer, useful
to categorize or filter question answers.
},
"questions": [
"str" # Optional. List of questions associated with the answer.
],
"source": "str" # Optional. Source from which QnA was indexed e.g.
https://docs.microsoft.com/en-us/azure/cognitive-services/QnAMaker/FAQs .
}
"""
@overload
async def begin_update_qnas(
self, project_name: str, qnas: IO, *, content_type: str = "application/json", **kwargs: Any
) -> AsyncLROPoller[AsyncIterable[JSON]]:
"""Updates the QnAs of a project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/update-qnas
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param qnas: Update QnAs parameters of a project. Required.
:type qnas: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns an iterator like instance of JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.core.async_paging.AsyncItemPaged[JSON]]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200, 202
response == {
"activeLearningSuggestions": [
{
"clusterHead": "str", # Optional. Question chosen as the
head of suggested questions cluster by Active Learning clustering
algorithm.
"suggestedQuestions": [
{
"autoSuggestedCount": 0, # Optional. The
number of times the question was suggested automatically by the
Active Learning algorithm.
"question": "str", # Optional. Question
suggested by the Active Learning feature.
"userSuggestedCount": 0 # Optional. The
number of times the question was suggested explicitly by the
user.
}
]
}
],
"answer": "str", # Optional. Answer text.
"dialog": {
"isContextOnly": bool, # Optional. To mark if a prompt is relevant
only with a previous question or not. If true, do not include this QnA as
answer for queries without context; otherwise, ignores context and includes
this QnA in answers.
"prompts": [
{
"displayOrder": 0, # Optional. Index of the prompt.
It is used for ordering of the prompts.
"displayText": "str", # Optional. Text displayed to
represent a follow up question prompt.
"qna": {
"activeLearningSuggestions": [
{
"clusterHead": "str", #
Optional. Question chosen as the head of suggested
questions cluster by Active Learning clustering
algorithm.
"suggestedQuestions": [
{
"autoSuggestedCount": 0, # Optional. The number
of times the question was suggested automatically
by the Active Learning algorithm.
"question":
"str", # Optional. Question suggested by the
Active Learning feature.
"userSuggestedCount": 0 # Optional. The number
of times the question was suggested explicitly by
the user.
}
]
}
],
"answer": "str", # Optional. Answer text.
"dialog": ...,
"id": 0, # Optional. Unique ID for the QnA.
"metadata": {
"str": "str" # Optional. Metadata
associated with the answer, useful to categorize or filter
question answers.
},
"questions": [
"str" # Optional. List of questions
associated with the answer.
],
"source": "str" # Optional. Source from
which QnA was indexed e.g.
https://docs.microsoft.com/en-us/azure/cognitive-services/QnAMaker/FAQs
.
},
"qnaId": 0 # Optional. ID of the QnA corresponding
to the prompt.
}
]
},
"id": 0, # Optional. Unique ID for the QnA.
"lastUpdatedDateTime": "2020-02-20 00:00:00", # Optional. Date-time when the
QnA was last updated.
"metadata": {
"str": "str" # Optional. Metadata associated with the answer, useful
to categorize or filter question answers.
},
"questions": [
"str" # Optional. List of questions associated with the answer.
],
"source": "str" # Optional. Source from which QnA was indexed e.g.
https://docs.microsoft.com/en-us/azure/cognitive-services/QnAMaker/FAQs .
}
"""
@distributed_trace_async
async def begin_update_qnas(
self, project_name: str, qnas: Union[List[JSON], IO], **kwargs: Any
) -> AsyncLROPoller[AsyncIterable[JSON]]:
"""Updates the QnAs of a project.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/update-qnas
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param qnas: Update QnAs parameters of a project. Is either a list type or a IO type. Required.
:type qnas: list[JSON] or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns an iterator like instance of JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.core.async_paging.AsyncItemPaged[JSON]]
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# response body for status code(s): 200, 202
response == {
"activeLearningSuggestions": [
{
"clusterHead": "str", # Optional. Question chosen as the
head of suggested questions cluster by Active Learning clustering
algorithm.
"suggestedQuestions": [
{
"autoSuggestedCount": 0, # Optional. The
number of times the question was suggested automatically by the
Active Learning algorithm.
"question": "str", # Optional. Question
suggested by the Active Learning feature.
"userSuggestedCount": 0 # Optional. The
number of times the question was suggested explicitly by the
user.
}
]
}
],
"answer": "str", # Optional. Answer text.
"dialog": {
"isContextOnly": bool, # Optional. To mark if a prompt is relevant
only with a previous question or not. If true, do not include this QnA as
answer for queries without context; otherwise, ignores context and includes
this QnA in answers.
"prompts": [
{
"displayOrder": 0, # Optional. Index of the prompt.
It is used for ordering of the prompts.
"displayText": "str", # Optional. Text displayed to
represent a follow up question prompt.
"qna": {
"activeLearningSuggestions": [
{
"clusterHead": "str", #
Optional. Question chosen as the head of suggested
questions cluster by Active Learning clustering
algorithm.
"suggestedQuestions": [
{
"autoSuggestedCount": 0, # Optional. The number
of times the question was suggested automatically
by the Active Learning algorithm.
"question":
"str", # Optional. Question suggested by the
Active Learning feature.
"userSuggestedCount": 0 # Optional. The number
of times the question was suggested explicitly by
the user.
}
]
}
],
"answer": "str", # Optional. Answer text.
"dialog": ...,
"id": 0, # Optional. Unique ID for the QnA.
"metadata": {
"str": "str" # Optional. Metadata
associated with the answer, useful to categorize or filter
question answers.
},
"questions": [
"str" # Optional. List of questions
associated with the answer.
],
"source": "str" # Optional. Source from
which QnA was indexed e.g.
https://docs.microsoft.com/en-us/azure/cognitive-services/QnAMaker/FAQs
.
},
"qnaId": 0 # Optional. ID of the QnA corresponding
to the prompt.
}
]
},
"id": 0, # Optional. Unique ID for the QnA.
"lastUpdatedDateTime": "2020-02-20 00:00:00", # Optional. Date-time when the
QnA was last updated.
"metadata": {
"str": "str" # Optional. Metadata associated with the answer, useful
to categorize or filter question answers.
},
"questions": [
"str" # Optional. List of questions associated with the answer.
],
"source": "str" # Optional. Source from which QnA was indexed e.g.
https://docs.microsoft.com/en-us/azure/cognitive-services/QnAMaker/FAQs .
}
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[JSON]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(qnas, (IO, bytes)):
_content = qnas
else:
_json = qnas
def prepare_request(next_link=None):
if not next_link:
request = build_update_qnas_request(
project_name=project_name,
content_type=content_type,
api_version=self._config.api_version,
json=_json,
content=_content,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
path_format_arguments = {
"Endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
return request
async def extract_data(pipeline_response):
deserialized = pipeline_response.http_response.json()
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
polling = kwargs.pop("polling", True) # type: Union[bool, AsyncPollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_qnas_initial( # type: ignore
project_name=project_name,
qnas=qnas,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
async def internal_get_next(next_link=None):
if next_link is None:
return pipeline_response
return await get_next(next_link)
return AsyncItemPaged(internal_get_next, extract_data)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
if polling is True:
polling_method = cast(
AsyncPollingMethod,
AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs),
) # type: AsyncPollingMethod
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
@overload
async def add_feedback( # pylint: disable=inconsistent-return-statements
self, project_name: str, feedback: JSON, *, content_type: str = "application/json", **kwargs: Any
) -> None:
"""Update Active Learning feedback.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/add-feedback
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param feedback: Feedback for Active Learning. Required.
:type feedback: JSON
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
feedback = {
"records": [
{
"qnaId": 0, # Optional. Unique ID of the QnA.
"userId": "str", # Optional. Unique identifier of the user.
"userQuestion": "str" # Optional. User suggested question
for the QnA.
}
]
}
"""
@overload
async def add_feedback( # pylint: disable=inconsistent-return-statements
self, project_name: str, feedback: IO, *, content_type: str = "application/json", **kwargs: Any
) -> None:
"""Update Active Learning feedback.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/add-feedback
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param feedback: Feedback for Active Learning. Required.
:type feedback: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def add_feedback( # pylint: disable=inconsistent-return-statements
self, project_name: str, feedback: Union[JSON, IO], **kwargs: Any
) -> None:
"""Update Active Learning feedback.
See
https://learn.microsoft.com/rest/api/cognitiveservices/questionanswering/question-answering-projects/add-feedback
for more information.
:param project_name: The name of the project to use. Required.
:type project_name: str
:param feedback: Feedback for Active Learning. Is either a model type or a IO type. Required.
:type feedback: JSON or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:return: None
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[None]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(feedback, (IO, bytes)):
_content = feedback
else:
_json = feedback
request = build_add_feedback_request(
project_name=project_name,
content_type=content_type,
api_version=self._config.api_version,
json=_json,
content=_content,
headers=_headers,
params=_params,
)
path_format_arguments = {
"Endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
| [
"noreply@github.com"
] | gaoyp830.noreply@github.com |
eb0c32a682abc73083d3f4107b867774b5972b20 | 67e3388ed0e1154dc0ceb189c83a8521eba79fae | /katpoint/test/test_projection.py | 032e6ecdfb607b5818f3fbdd90f050914c002c10 | [
"BSD-3-Clause"
] | permissive | astrojhgu/katpoint | 3dd0f946d76859280ade5f55c85f08538cc24462 | b0fa342c2f6dcebe7474cc405d5fbdb4f3f295bc | refs/heads/master | 2020-12-31T00:40:42.452856 | 2017-01-31T10:30:25 | 2017-01-31T10:30:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,066 | py | ################################################################################
# Copyright (c) 2009-2016, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Tests for the projection module."""
# pylint: disable-msg=C0103,W0212
import unittest
import numpy as np
import katpoint
try:
from .aips_projection import newpos, dircos
found_aips = True
except ImportError:
found_aips = False
def skip(reason=''):
"""Use nose to skip a test."""
try:
import nose
raise nose.SkipTest(reason)
except ImportError:
pass
def assert_angles_almost_equal(x, y, decimal):
primary_angle = lambda x: x - np.round(x / (2.0 * np.pi)) * 2.0 * np.pi
np.testing.assert_almost_equal(primary_angle(x - y), np.zeros(np.shape(x)), decimal=decimal)
class TestProjectionSIN(unittest.TestCase):
"""Test orthographic projection."""
def setUp(self):
self.plane_to_sphere = katpoint.plane_to_sphere['SIN']
self.sphere_to_plane = katpoint.sphere_to_plane['SIN']
N = 100
max_theta = np.pi / 2.0
self.az0 = np.pi * (2.0 * np.random.rand(N) - 1.0)
# Keep away from poles (leave them as corner cases)
self.el0 = 0.999 * np.pi * (np.random.rand(N) - 0.5)
# (x, y) points within unit circle
theta = max_theta * np.random.rand(N)
phi = 2 * np.pi * np.random.rand(N)
self.x = np.sin(theta) * np.cos(phi)
self.y = np.sin(theta) * np.sin(phi)
def test_random_closure(self):
"""SIN projection: do random projections and check closure."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
aa, ee = self.plane_to_sphere(self.az0, self.el0, xx, yy)
np.testing.assert_almost_equal(self.x, xx, decimal=10)
np.testing.assert_almost_equal(self.y, yy, decimal=10)
assert_angles_almost_equal(az, aa, decimal=10)
assert_angles_almost_equal(el, ee, decimal=10)
def test_aips_compatibility(self):
"""SIN projection: compare with original AIPS routine."""
if not found_aips:
skip("AIPS projection module not found")
return
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
az_aips, el_aips = np.zeros(az.shape), np.zeros(el.shape)
x_aips, y_aips = np.zeros(xx.shape), np.zeros(yy.shape)
for n in xrange(len(az)):
az_aips[n], el_aips[n], ierr = \
newpos(2, self.az0[n], self.el0[n], self.x[n], self.y[n])
x_aips[n], y_aips[n], ierr = \
dircos(2, self.az0[n], self.el0[n], az[n], el[n])
self.assertEqual(ierr, 0)
assert_angles_almost_equal(az, az_aips, decimal=9)
assert_angles_almost_equal(el, el_aips, decimal=9)
np.testing.assert_almost_equal(xx, x_aips, decimal=9)
np.testing.assert_almost_equal(yy, y_aips, decimal=9)
def test_corner_cases(self):
"""SIN projection: test special corner cases."""
# SPHERE TO PLANE
# Origin
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 0.0], decimal=12)
# Points 90 degrees from reference point on sphere
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, -np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
# Reference point at pole on sphere
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi, 1e-8))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
# Points outside allowed domain on sphere
self.assertRaises(ValueError, self.sphere_to_plane, 0.0, 0.0, np.pi, 0.0)
self.assertRaises(ValueError, self.sphere_to_plane, 0.0, 0.0, 0.0, np.pi)
# PLANE TO SPHERE
# Origin
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
# Points on unit circle in plane
ae = np.array(self.plane_to_sphere(0.0, 0.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [0.0, -np.pi / 2.0], decimal=12)
# Reference point at pole on sphere
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
# Points outside allowed domain in plane
self.assertRaises(ValueError, self.plane_to_sphere, 0.0, 0.0, 2.0, 0.0)
self.assertRaises(ValueError, self.plane_to_sphere, 0.0, 0.0, 0.0, 2.0)
class TestProjectionTAN(unittest.TestCase):
"""Test gnomonic projection."""
def setUp(self):
self.plane_to_sphere = katpoint.plane_to_sphere['TAN']
self.sphere_to_plane = katpoint.sphere_to_plane['TAN']
N = 100
# Stay away from edge of hemisphere
max_theta = np.pi / 2.0 - 0.01
self.az0 = np.pi * (2.0 * np.random.rand(N) - 1.0)
# Keep away from poles (leave them as corner cases)
self.el0 = 0.999 * np.pi * (np.random.rand(N) - 0.5)
theta = max_theta * np.random.rand(N)
phi = 2 * np.pi * np.random.rand(N)
# Perform inverse TAN mapping to spread out points on plane
self.x = np.tan(theta) * np.cos(phi)
self.y = np.tan(theta) * np.sin(phi)
def test_random_closure(self):
"""TAN projection: do random projections and check closure."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
aa, ee = self.plane_to_sphere(self.az0, self.el0, xx, yy)
np.testing.assert_almost_equal(self.x, xx, decimal=8)
np.testing.assert_almost_equal(self.y, yy, decimal=8)
assert_angles_almost_equal(az, aa, decimal=8)
assert_angles_almost_equal(el, ee, decimal=8)
def test_aips_compatibility(self):
"""TAN projection: compare with original AIPS routine."""
if not found_aips:
skip("AIPS projection module not found")
return
# AIPS TAN only deprojects (x, y) coordinates within unit circle
r = self.x * self.x + self.y * self.y
az0, el0 = self.az0[r <= 1.0], self.el0[r <= 1.0]
x, y = self.x[r <= 1.0], self.y[r <= 1.0]
az, el = self.plane_to_sphere(az0, el0, x, y)
xx, yy = self.sphere_to_plane(az0, el0, az, el)
az_aips, el_aips = np.zeros(az.shape), np.zeros(el.shape)
x_aips, y_aips = np.zeros(xx.shape), np.zeros(yy.shape)
for n in xrange(len(az)):
az_aips[n], el_aips[n], ierr = \
newpos(3, az0[n], el0[n], x[n], y[n])
x_aips[n], y_aips[n], ierr = \
dircos(3, az0[n], el0[n], az[n], el[n])
self.assertEqual(ierr, 0)
assert_angles_almost_equal(az, az_aips, decimal=10)
assert_angles_almost_equal(el, el_aips, decimal=10)
np.testing.assert_almost_equal(xx, x_aips, decimal=10)
np.testing.assert_almost_equal(yy, y_aips, decimal=10)
def test_corner_cases(self):
"""TAN projection: test special corner cases."""
# SPHERE TO PLANE
# Origin
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 0.0], decimal=12)
# Points 45 degrees from reference point on sphere
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi / 4.0, 0.0))
np.testing.assert_almost_equal(xy, [1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, -np.pi / 4.0, 0.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi / 4.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, -np.pi / 4.0))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
# Reference point at pole on sphere
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, 0.0, np.pi / 4.0))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi, np.pi / 4.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi / 2.0, np.pi / 4.0))
np.testing.assert_almost_equal(xy, [1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, -np.pi / 2.0, np.pi / 4.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
# Points outside allowed domain on sphere
self.assertRaises(ValueError, self.sphere_to_plane, 0.0, 0.0, np.pi, 0.0)
self.assertRaises(ValueError, self.sphere_to_plane, 0.0, 0.0, 0.0, np.pi)
# PLANE TO SPHERE
# Origin
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
# Points on unit circle in plane
ae = np.array(self.plane_to_sphere(0.0, 0.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 4.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 4.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 4.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [0.0, -np.pi / 4.0], decimal=12)
# Reference point at pole on sphere
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, -np.pi / 4.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, -np.pi / 4.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, -np.pi / 4.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [np.pi, -np.pi / 4.0], decimal=12)
class TestProjectionARC(unittest.TestCase):
"""Test zenithal equidistant projection."""
def setUp(self):
self.plane_to_sphere = katpoint.plane_to_sphere['ARC']
self.sphere_to_plane = katpoint.sphere_to_plane['ARC']
N = 100
# Stay away from edge of circle
max_theta = np.pi - 0.01
self.az0 = np.pi * (2.0 * np.random.rand(N) - 1.0)
# Keep away from poles (leave them as corner cases)
self.el0 = 0.999 * np.pi * (np.random.rand(N) - 0.5)
# (x, y) points within circle of radius pi
theta = max_theta * np.random.rand(N)
phi = 2 * np.pi * np.random.rand(N)
self.x = theta * np.cos(phi)
self.y = theta * np.sin(phi)
def test_random_closure(self):
"""ARC projection: do random projections and check closure."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
aa, ee = self.plane_to_sphere(self.az0, self.el0, xx, yy)
np.testing.assert_almost_equal(self.x, xx, decimal=8)
np.testing.assert_almost_equal(self.y, yy, decimal=8)
assert_angles_almost_equal(az, aa, decimal=8)
assert_angles_almost_equal(el, ee, decimal=8)
def test_aips_compatibility(self):
"""ARC projection: compare with original AIPS routine."""
if not found_aips:
skip("AIPS projection module not found")
return
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
az_aips, el_aips = np.zeros(az.shape), np.zeros(el.shape)
x_aips, y_aips = np.zeros(xx.shape), np.zeros(yy.shape)
for n in xrange(len(az)):
az_aips[n], el_aips[n], ierr = \
newpos(4, self.az0[n], self.el0[n], self.x[n], self.y[n])
x_aips[n], y_aips[n], ierr = \
dircos(4, self.az0[n], self.el0[n], az[n], el[n])
self.assertEqual(ierr, 0)
assert_angles_almost_equal(az, az_aips, decimal=8)
assert_angles_almost_equal(el, el_aips, decimal=8)
np.testing.assert_almost_equal(xx, x_aips, decimal=8)
np.testing.assert_almost_equal(yy, y_aips, decimal=8)
def test_corner_cases(self):
"""ARC projection: test special corner cases."""
# SPHERE TO PLANE
# Origin
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 0.0], decimal=12)
# Points 90 degrees from reference point on sphere
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [np.pi / 2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-np.pi / 2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, np.pi / 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, -np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, -np.pi / 2.0], decimal=12)
# Reference point at pole on sphere
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, -np.pi / 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi, 0.0))
np.testing.assert_almost_equal(xy, [0.0, np.pi / 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [np.pi / 2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-np.pi / 2.0, 0.0], decimal=12)
# Point diametrically opposite the reference point on sphere
xy = np.array(self.sphere_to_plane(np.pi, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(np.abs(xy), [np.pi, 0.0], decimal=12)
# Points outside allowed domain on sphere
self.assertRaises(ValueError, self.sphere_to_plane, 0.0, 0.0, 0.0, np.pi)
# PLANE TO SPHERE
# Origin
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
# Points on unit circle in plane
ae = np.array(self.plane_to_sphere(0.0, 0.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [1.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [-1.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, 1.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [0.0, -1.0], decimal=12)
# Points on circle with radius pi in plane
ae = np.array(self.plane_to_sphere(0.0, 0.0, np.pi, 0.0))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, -np.pi, 0.0))
assert_angles_almost_equal(ae, [-np.pi, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, np.pi))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, -np.pi))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
# Reference point at pole on sphere
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, np.pi / 2.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, -np.pi / 2.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, np.pi / 2.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, -np.pi / 2.0))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
# Points outside allowed domain in plane
self.assertRaises(ValueError, self.plane_to_sphere, 0.0, 0.0, 4.0, 0.0)
self.assertRaises(ValueError, self.plane_to_sphere, 0.0, 0.0, 0.0, 4.0)
class TestProjectionSTG(unittest.TestCase):
"""Test stereographic projection."""
def setUp(self):
self.plane_to_sphere = katpoint.plane_to_sphere['STG']
self.sphere_to_plane = katpoint.sphere_to_plane['STG']
N = 100
# Stay well away from point of projection
max_theta = 0.8 * np.pi
self.az0 = np.pi * (2.0 * np.random.rand(N) - 1.0)
# Keep away from poles (leave them as corner cases)
self.el0 = 0.999 * np.pi * (np.random.rand(N) - 0.5)
# Perform inverse STG mapping to spread out points on plane
theta = max_theta * np.random.rand(N)
r = 2.0 * np.sin(theta) / (1.0 + np.cos(theta))
phi = 2 * np.pi * np.random.rand(N)
self.x = r * np.cos(phi)
self.y = r * np.sin(phi)
def test_random_closure(self):
"""STG projection: do random projections and check closure."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
aa, ee = self.plane_to_sphere(self.az0, self.el0, xx, yy)
np.testing.assert_almost_equal(self.x, xx, decimal=9)
np.testing.assert_almost_equal(self.y, yy, decimal=9)
assert_angles_almost_equal(az, aa, decimal=9)
assert_angles_almost_equal(el, ee, decimal=9)
def test_aips_compatibility(self):
"""STG projection: compare with original AIPS routine."""
if not found_aips:
skip("AIPS projection module not found")
return
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
az_aips, el_aips = np.zeros(az.shape), np.zeros(el.shape)
x_aips, y_aips = np.zeros(xx.shape), np.zeros(yy.shape)
for n in xrange(len(az)):
az_aips[n], el_aips[n], ierr = \
newpos(6, self.az0[n], self.el0[n], self.x[n], self.y[n])
x_aips[n], y_aips[n], ierr = \
dircos(6, self.az0[n], self.el0[n], az[n], el[n])
self.assertEqual(ierr, 0)
# AIPS NEWPOS STG has poor accuracy on azimuth angle (large closure errors by itself)
# assert_angles_almost_equal(az, az_aips, decimal=9)
assert_angles_almost_equal(el, el_aips, decimal=9)
np.testing.assert_almost_equal(xx, x_aips, decimal=9)
np.testing.assert_almost_equal(yy, y_aips, decimal=9)
def test_corner_cases(self):
"""STG projection: test special corner cases."""
# SPHERE TO PLANE
# Origin
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 0.0], decimal=12)
# Points 90 degrees from reference point on sphere
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, -np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, -2.0], decimal=12)
# Reference point at pole on sphere
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, -2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 2.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [2.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-2.0, 0.0], decimal=12)
# Points outside allowed domain on sphere
self.assertRaises(ValueError, self.sphere_to_plane, 0.0, 0.0, np.pi, 0.0)
self.assertRaises(ValueError, self.sphere_to_plane, 0.0, 0.0, 0.0, np.pi)
# PLANE TO SPHERE
# Origin
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
# Points on circle of radius 2.0 in plane
ae = np.array(self.plane_to_sphere(0.0, 0.0, 2.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, -2.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 2.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, -2.0))
assert_angles_almost_equal(ae, [0.0, -np.pi / 2.0], decimal=12)
# Reference point at pole on sphere
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 2.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, -2.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, 2.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, -2.0))
assert_angles_almost_equal(ae, [np.pi, 0.0], decimal=12)
class TestProjectionCAR(unittest.TestCase):
"""Test plate carree projection."""
def setUp(self):
self.plane_to_sphere = katpoint.plane_to_sphere['CAR']
self.sphere_to_plane = katpoint.sphere_to_plane['CAR']
N = 100
# Unrestricted (az0, el0) points on sphere
self.az0 = np.pi * (2.0 * np.random.rand(N) - 1.0)
self.el0 = np.pi * (np.random.rand(N) - 0.5)
# Unrestricted (x, y) points on corresponding plane
self.x = np.pi * (2.0 * np.random.rand(N) - 1.0)
self.y = np.pi * (np.random.rand(N) - 0.5)
def test_random_closure(self):
"""CAR projection: do random projections and check closure."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
aa, ee = self.plane_to_sphere(self.az0, self.el0, xx, yy)
np.testing.assert_almost_equal(self.x, xx, decimal=12)
np.testing.assert_almost_equal(self.y, yy, decimal=12)
assert_angles_almost_equal(az, aa, decimal=12)
assert_angles_almost_equal(el, ee, decimal=12)
def sphere_to_plane_mattieu(targetaz,targetel,scanaz,scanel):
#produces direction cosine coordinates from scanning antenna azimuth,elevation coordinates
#see _coordinate options.py for derivation
ll=np.cos(targetel)*np.sin(targetaz-scanaz)
mm=np.cos(targetel)*np.sin(scanel)*np.cos(targetaz-scanaz)-np.cos(scanel)*np.sin(targetel)
return ll,mm
def plane_to_sphere_mattieu(targetaz,targetel,ll,mm):
scanaz=targetaz-np.arcsin(np.clip(ll/np.cos(targetel),-1.0,1.0))
scanel=np.arcsin(np.clip((np.sqrt(1.0-ll**2-mm**2)*np.sin(targetel)+np.sqrt(np.cos(targetel)**2-ll**2)*mm)/(1.0-ll**2),-1.0,1.0))
#alternate equations which gives same result
# scanel_alternate1=np.arcsin((np.sqrt(1.0-ll**2-mm**2)*np.sin(targetel)+np.cos(targetel)*np.cos(targetaz-scanaz)*mm)/(1.0-ll**2))
# num=np.cos(targetel)*np.cos(targetaz-scanaz)#or num=np.sqrt(np.cos(targetel)**2-ll**2)
# den=np.sin(targetel)**2+num**2
# scanel_alternate2=np.arcsin((np.sqrt(((den-mm**2)*(den-num**2)))+num*mm)/den)
return scanaz,scanel
class TestProjectionSSN(unittest.TestCase):
"""Test swapped orthographic projection."""
def setUp(self):
self.plane_to_sphere = katpoint.plane_to_sphere['SSN']
self.sphere_to_plane = katpoint.sphere_to_plane['SSN']
N = 100
self.az0 = np.pi * (2.0 * np.random.rand(N) - 1.0)
# Keep away from poles (leave them as corner cases)
self.el0 = 0.999 * np.pi * (np.random.rand(N) - 0.5)
# (x, y) points within complicated SSN domain - clipped unit circle
cos_el0 = np.cos(self.el0)
# The x coordinate is bounded by +- cos(el0)
self.x = (2 * np.random.rand(N) - 1) * cos_el0
# The y coordinate ranges between two (semi-)circles centred on origin:
# the unit circle on one side and circle of radius cos(el0) on other side
y_offset = -np.sqrt(cos_el0 ** 2 - self.x ** 2)
y_range = -y_offset + np.sqrt(1.0 - self.x ** 2)
self.y = (y_range * np.random.rand(N) + y_offset) * np.sign(self.el0)
def test_random_closure(self):
"""SSN projection: do random projections and check closure."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
xx, yy = self.sphere_to_plane(self.az0, self.el0, az, el)
aa, ee = self.plane_to_sphere(self.az0, self.el0, xx, yy)
np.testing.assert_almost_equal(self.x, xx, decimal=10)
np.testing.assert_almost_equal(self.y, yy, decimal=10)
assert_angles_almost_equal(az, aa, decimal=10)
assert_angles_almost_equal(el, ee, decimal=10)
def test_vs_mattieu(self):
"""SSN projection: compare against Mattieu's original version."""
az, el = self.plane_to_sphere(self.az0, self.el0, self.x, self.y)
ll, mm = sphere_to_plane_mattieu(self.az0, self.el0, az, el)
aa, ee = plane_to_sphere_mattieu(self.az0, self.el0, ll, mm)
np.testing.assert_almost_equal(self.x, ll, decimal=10)
np.testing.assert_almost_equal(self.y, -mm, decimal=10)
assert_angles_almost_equal(az, aa, decimal=10)
assert_angles_almost_equal(el, ee, decimal=10)
def test_corner_cases(self):
"""SSN projection: test special corner cases."""
# SPHERE TO PLANE
# Origin
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 0.0], decimal=12)
# Points 90 degrees from reference point on sphere
xy = np.array(self.sphere_to_plane(0.0, 0.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [-1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [1.0, 0.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, -1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, 0.0, 0.0, -np.pi / 2.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
# Reference point at pole on sphere
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, 0.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi, 1e-8))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
xy = np.array(self.sphere_to_plane(0.0, np.pi / 2.0, -np.pi / 2.0, 0.0))
np.testing.assert_almost_equal(xy, [0.0, 1.0], decimal=12)
# Points outside allowed domain on sphere
self.assertRaises(ValueError, self.sphere_to_plane, 0.0, 0.0, np.pi, 0.0)
self.assertRaises(ValueError, self.sphere_to_plane, 0.0, 0.0, 0.0, np.pi)
# PLANE TO SPHERE
# Origin
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 0.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
# Points on unit circle in plane
ae = np.array(self.plane_to_sphere(0.0, 0.0, 1.0, 0.0))
assert_angles_almost_equal(ae, [-np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, -1.0, 0.0))
assert_angles_almost_equal(ae, [np.pi / 2.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, -np.pi / 2.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, 0.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
# Reference point at pole on sphere
ae = np.array(self.plane_to_sphere(0.0, np.pi / 2.0, 0.0, 1.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -np.pi / 2.0, 0.0, -1.0))
assert_angles_almost_equal(ae, [0.0, 0.0], decimal=12)
# Test valid (x, y) domain
ae = np.array(self.plane_to_sphere(0.0, 1.0, 0.0, -np.cos(1.0)))
assert_angles_almost_equal(ae, [0.0, np.pi / 2.0], decimal=12)
ae = np.array(self.plane_to_sphere(0.0, -1.0, 0.0, np.cos(1.0)))
assert_angles_almost_equal(ae, [0.0, -np.pi / 2.0], decimal=12)
# Points outside allowed domain in plane
self.assertRaises(ValueError, self.plane_to_sphere, 0.0, 0.0, 2.0, 0.0)
self.assertRaises(ValueError, self.plane_to_sphere, 0.0, 0.0, 0.0, 2.0)
| [
"gijs@pythonic.nl"
] | gijs@pythonic.nl |
b051860e551512b9a2903bc66e8cf34c23be2fde | b10ef0636ba7a60f3adc99d7047b0a51474631f3 | /Homework assignment 2/q_2.py | 52732c1d17667f6a210073002bb037ff75233ff4 | [] | no_license | matanep/data-structures-course | 21e2b0cfe411e7be1600989b7cf8c8f47f68fe42 | d129762d751f91b6e875294f8cadc733360958df | refs/heads/master | 2021-01-19T00:20:31.683478 | 2017-06-16T12:13:46 | 2017-06-16T12:13:46 | 87,156,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,629 | py | ### question 2 ###
#Imports:
import pandas as pd
import numpy as np
#Path for data:
path="edges.xlsx"
#Data handling:
data = pd.ExcelFile(path)
df = data.parse("Sheet1")
edges=[tuple(x) for x in df.to_records(index=False)]
#part a
def creat_adjacency_matrix(edges=list):
number_of_vertices = max(max(edges))
matrix=np.zeros((number_of_vertices,number_of_vertices))
for edge in edges:
print edge
matrix[edge[0]-1,edge[1]-1]=1
matrix[edge[1]-1, edge[0]-1] = 1
return matrix
#part b
def creat_adjacency_dict(edges=list):
adjacency_dict={}
for edge in edges:
if edge[0] in adjacency_dict:
adjacency_dict[edge[0]].append(edge[1])
else:
adjacency_dict[edge[0]]=[edge[1]]
if edge[1] in adjacency_dict:
adjacency_dict[edge[1]].append(edge[0])
else:
adjacency_dict[edge[1]]=[edge[0]]
return adjacency_dict
print creat_adjacency_dict(edges)[1]
#part c
class Queue():
def __init__(self,max_size):
self.max_size= max_size
self.items = []
def front(self):
return self.items[-1]
def empty(self):
return self.items == []
def enqueue(self, item):
if len(self.items)<self.max_size: #Check that the length of the queue is less than the 'maximum_size'.
self.items.insert(0,item)
else:
return "Queue is full"
def dequeue(self):
return self.items.pop()
def BFS(edges=list,v=int):
number_of_vertices = max(max(edges))
queue=Queue(number_of_vertices)
visited=[False for i in range(number_of_vertices)]
print(v)
visited[v-1]= True
queue.enqueue(v)
adjacency_dict=creat_adjacency_dict(edges)
while (not queue.empty()):
x= queue.dequeue()
neighbors_of_x=adjacency_dict[x]
for neighbor in neighbors_of_x:
print neighbor
if not visited[neighbor-1]:
print neighbor
visited[neighbor-1]=True
queue.enqueue(neighbor)
# print BFS(edges,1)
#part d
def DFS(edges=list):
number_of_vertices = max(max(edges))
global color
color=['white' for i in range(number_of_vertices)]
global adjacency_dict
adjacency_dict=creat_adjacency_dict(edges)
for vertex in range(len(color)):
if color[vertex]== 'white':
VISIT(vertex)
def VISIT(u):
color[u]= 'gray'
print(u+1)
neighbors_of_u = adjacency_dict[u+1]
for neighbor in neighbors_of_u:
if color[neighbor-1]=='white':
VISIT(neighbor-1)
color[u]= 'Black'
| [
"matanep@gmail.com"
] | matanep@gmail.com |
2f2158010ebb5f904ab5ba70a9d4f451492dfca3 | 9f2b1da26d38436be2d629bf07b3a70eb03fccc3 | /Analysis/mitgcm/ice_leads/Analysis/paper_plot_snapshots.py | 85260c29ccb9a2d071b1e1cbe821cc860c7794be | [] | no_license | milicak/python_tools | d5e39eb8ed3e210fe3fffea28dddc1102c970b74 | 5fc20a637fbfb7d772ce6f76b66e649e8adf6591 | refs/heads/master | 2023-06-12T08:47:31.377116 | 2019-11-06T08:42:30 | 2019-11-06T08:42:30 | 77,075,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,926 | py | import numpy as np
#%matplotlib inline
#np.shape !!!!!
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import scipy.io
import numpy.ma as ma
from disc_cb import discrete_cmap
#import my_nanfilter
#from my_nanfilter import my_nanfilterbox
import nccf
from netCDF4 import Dataset
import sys
from netcdf_functions import nc_read
from netcdf_functions import ncgetdim
# MITGCM packages
sys.path.append('/fimm/home/bjerknes/milicak/models/MITgcm/utils/MITgcmutils/')
from MITgcmutils import rdmds
from needJet2 import shfn
from disc_cb import discrete_cmap
nx=512
ny=512
nz=512
cmap_needjet2=shfn()
root_folder='/export/grunchfs/unibjerknes/milicak/bckup/mitgcm/ice_leads/'
projects=['Exp01.3','Exp01.4','Exp01.5','Exp01.6','Exp01.7','Exp01.8','Exp01.9','Exp01.10','Exp01.11']
projectslbs=['Exp01_3','Exp01_4','Exp01_5','Exp01_6','Exp01_7','Exp01_8','Exp01_9','Exp01_10','Exp01_11']
itr=900*14
variable_name=['S']; #T for temp; S for salt
# compute amoc
for i in range(0,9):
print i,projects[i]
foldername=root_folder+projects[i]+'/'
print foldername
if i==0:
depth=rdmds(foldername+'Depth');
xc=rdmds(foldername+'XC');
yc=rdmds(foldername+'YC');
drc=rdmds(foldername+'DRC');
Z=np.cumsum(drc);
x=np.squeeze(xc[0,:])
y=np.squeeze(yc[:,0])
section=255
variable=rdmds(foldername+'S',itr);
# xz section
fig = plt.figure()
#im1 = pcolor(x,-Z,np.squeeze(variable[:,section,:]),cmap=cmap_needjet2,vmin=32,vmax=32.02)
im1 = plt.pcolormesh(x,-Z,np.squeeze(variable[:,section,:]),linewidth=0,rasterized=True,shading='flat',cmap=cmap_needjet2,vmin=32,vmax=32.02)
#im1.set_edgecolor('face')
plt.ylim((-128,0))
plt.xlim((0,128))
cb = plt.colorbar(im1,pad=0.02) # pad is the distance between colorbar and figure
cb.set_label('[psu]')
# cb.set_label('[' r'$^\circ$' 'C]')
plt.ylabel('depth [m]')
plt.xlabel('x [m]')
#plt.show()
plt.savefig('paperfigs/verticalxz_section_'+projectslbs[i]+'_'+str(itr)+'.eps', bbox_inches='tight',format='eps', dpi=300)
plt.clf()
plt.close(fig)
# yz section
fig = plt.figure()
#im1 = pcolor(x,-Z,np.squeeze(variable[:,section,:]),cmap=cmap_needjet2,vmin=32,vmax=32.02)
im1 = plt.pcolormesh(y,-Z,np.squeeze(variable[:,:,section]),linewidth=0,rasterized=True,shading='flat',cmap=cmap_needjet2,vmin=32,vmax=32.02)
#im1.set_edgecolor('face')
plt.ylim((-128,0))
plt.xlim((0,128))
cb = plt.colorbar(im1,pad=0.02) # pad is the distance between colorbar and figure
cb.set_label('[psu]')
# cb.set_label('[' r'$^\circ$' 'C]')
plt.ylabel('depth [m]')
plt.xlabel('y [m]')
#plt.show()
plt.savefig('paperfigs/verticalyz_section_'+projectslbs[i]+'_'+str(itr)+'.eps', bbox_inches='tight',format='eps', dpi=300)
plt.clf()
plt.close(fig) | [
"ilicakme@gmail.com"
] | ilicakme@gmail.com |
1f852ee677c4b99dd1b100c890892eb2d8884e22 | 80124dad14cd25dbe51565f907d65810556d3ac5 | /draw_with_mouse.py | 738370c11481e7362ccd882177f18363784249c8 | [] | no_license | paul-lkx/opencvdemo | e4ed8d675cb251950089f1eeffa02acbf4e8da92 | e68c6a5ff71331481e0cbe6ac8753b19803cbce5 | refs/heads/master | 2021-05-05T05:26:53.687230 | 2018-02-08T06:40:32 | 2018-02-08T06:40:32 | 118,704,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,499 | py |
import cv2
import numpy as np
# events = [i for i in dir(cv2) if 'EVENT' in i]
#
# print(events)
def nothing(x):
pass
# Global value turn True when LButtonDown
drawing = False
# when True draw rectangle, press 'm' draw curve
mode = True
ix,iy = 1,-1
# set up the callback function
def draw_circle(event,x,y,flags,param):
r = cv2.getTrackbarPos('R', 'image')
g = cv2.getTrackbarPos('G', 'image')
b = cv2.getTrackbarPos('B', 'image')
color = (r,g,b)
global ix,iy,drawing,mode
# return the axis when LButtonDown
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix,iy = x,y
elif event == cv2.EVENT_MOUSEMOVE and flags == cv2.EVENT_FLAG_LBUTTON:
if drawing == True:
if mode == True:
cv2.rectangle(img,(ix,iy),(x,y),color,1)# draw rectangle
else:
# cv2.circle(img,(x,y),3,(0,0,255),-1)# draw circle
r = int(np.sqrt((x-ix)**2+(y-iy)**2))
cv2.circle(img,(x,y),r,color,-1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
img = np.zeros((512,512,3),np.uint8)
cv2.namedWindow('image')
cv2.createTrackbar('R','image',0,255,nothing)
cv2.createTrackbar('G','image',0,255,nothing)
cv2.createTrackbar('B','image',0,255,nothing)
cv2.setMouseCallback('image', draw_circle)
while(1):
cv2.imshow('image',img)
k = cv2.waitKey(1)&0xFF
if k == ord('m'):
mode = not mode
elif k == 27:
break
cv2.destroyAllWindows() | [
"464716642@qq.com"
] | 464716642@qq.com |
e71f2ad4ff01abe55a1af73d50b4b2075d281736 | b2f3b7b3be11a63d5d1ddfea945439402394efe7 | /routers/stock_dividends.py | 867f17a070eb93246f322a74b193cce05c8808cc | [] | no_license | leonardoo/fast_api_stock_bvc | a8a57b9e2e3822c84829a91702ba2ce73c6ff439 | c91b9267360ed0aacd2e98a1da9b1e3b160dc837 | refs/heads/main | 2023-08-13T08:05:41.064300 | 2021-10-08T00:05:14 | 2021-10-08T00:05:14 | 383,130,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,012 | py | from typing import List
from datetime import datetime
from fastapi import APIRouter, Depends
from starlette.responses import JSONResponse
from models.stock import Stock
from models.stock_dividends import StockDividends
from models.users import User
from plugins.fastapi_users import fastapi_users
router = APIRouter(
prefix="/dividends",
tags=["dividends"],
)
def get_current_year():
return datetime.now().year
@router.post("/", response_model=StockDividends)
async def create_dividend(dividend: StockDividends, user: User = Depends(fastapi_users.current_user(verified=True))):
stock = await Stock.objects.get_or_none(nemo=dividend.nemo)
if not stock:
return JSONResponse(status_code=404, content={"message": "Stock not found"})
dividend_data = dividend.dict(exclude_unset=True)
total = dividend_data.pop("total")
paid_amount = dividend_data.pop("paid_amount")
dividend_data.pop("nemo")
dividend_data["ex_dividend_date"] = str(dividend_data["ex_dividend_date"])
dividend_data["paid_at"] = str(dividend_data["paid_at"])
dividend_data["stock_id"] = stock.id
dividend_obj = await StockDividends.objects.get_or_create(**dividend_data)
dividend_obj.total = total
dividend_obj.paid_amount = paid_amount
await dividend_obj.update()
return dividend_obj
@router.get("/", response_model=List[StockDividends])
async def get_list_dividends():
year = get_current_year()
data = StockDividends.objects.filter(paid_at__gte=f"{year}-01-01", paid_at__lt=f"{year+1}-01-01")
data = data.select_related("stock_id")
data = data.order_by("paid_at")
return await data.all()
@router.get("/{nemo}", response_model=List[StockDividends])
async def get_stock(nemo: str):
stock = await Stock.objects.get_or_none(nemo=nemo)
if not stock:
return JSONResponse(status_code=404, content={"message": "Stock not found"})
data = StockDividends.objects
data = data.filter(stock_id=stock.id)
return await data.all()
| [
"leonardoorozcop@gmail.com"
] | leonardoorozcop@gmail.com |
d8f06548a64e4a780a3cd09530a87be81b32d7fa | 834bb79bb8cdc5999d231869bf837c7f781e7324 | /analysis/svm.py | f348fbd343241ebfec8ccaa75e1e13cee429c255 | [] | no_license | KhosrowArian/Dot.Dot.Chess | 7f0469c8579331c6318317e251f6a5795c838312 | fc786ff72e1cd39a2fe328babaf30b5d2c9279fe | refs/heads/master | 2023-07-05T00:27:50.352978 | 2021-08-11T21:17:12 | 2021-08-11T21:17:12 | 394,382,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,495 | py | from ml import *
from sklearn import tree
# import matplotlib - very important
import matplotlib.pyplot as plt
# import confusion matrix
from sklearn.metrics import confusion_matrix
# import seaborn
import seaborn as sns
from cf_matrix import *
from matplotlib.colors import ListedColormap
import numpy as np
import datetime
def plot_multiclass_fig_2D():
"""
Example function to plot 2D points of different colours
"""
### construct the dataframe that we'll plot
### plot the graph:
fig, ax = plt.subplots()
df = get_chess_df()
df_new = df[["result","elo_diff", "time_since_gm_diff", "gm_age_diff", "age_diff"]]
# define the color mapping
color_mapping = {
'-1': "red",
'0': "blue",
'1': "green",
}
# define the label mapping
label_mapping = {
"Draw",
"Black wins",
"White wins"
}
# drop the unneeded columns and rows
df_new.dropna()
# for each class
for cls in ['0','-1','1']:
# get the examples of that class
examples = df_new[df_new['result'] == cls].to_numpy()
print(examples)
# and then plot it with the color of our liking
Xs = examples[:, 1] # get all rows from column 0 (elo_diff)
Ys = examples[:, 2] # get all rows from column 1 (time_since_gm_diff)
# for running different tests
ax.scatter(Xs, Ys, c=color_mapping[cls], alpha=0.3) # c: color
# title, axes
ax.set_title("Scatter Plot")
ax.set_xlabel("elo_diff")
ax.set_ylabel("time_since_gm_diff")
ax.legend(labels=label_mapping)
# save the figure
plt.savefig("../graphs/2d-scatter")
def plot_multiclass_fig_3D():
df = get_chess_df()
df_new = df[["result","elo_diff", "time_since_gm_diff", "gm_age_diff", "age_diff"]]
"""
Example function to plot 3D points of different colours
"""
### construct the dataframe that we'll plot
### plot the graph:
ax = plt.axes(projection='3d') # Creating a 3D axes instead of 2D like usual
# define the color mapping
color_mapping = {
'-1': "red",
'0': "blue",
'1': "green",
}
# define the label mapping
label_mapping = {
"Draw",
"Black wins",
"White wins"
}
# drop the unneeded columns and rows
df_new.dropna()
# for each class
for cls in ['-1', '0', '1']:
# get the examples of that class
examples = df_new[df_new['result'] == cls].to_numpy()
# and then plot it with the color of our liking
Xs = examples[:, 1] # get all rows from column 0 (elo_diff)
Ys = examples[:, 2] # get all rows from column 1 (time_since_gm_diff)
Zs = examples[:, 3] # get all rows from column 2 (gm_age_diff)
ax.scatter3D(Xs, Ys, Zs, c=color_mapping[cls]) # c: color
# title, axes
ax.set_title("Scatter Plot")
ax.set_xlabel("Elo Difference")
ax.set_ylabel("Time Since GM Difference")
ax.set_zlabel("Age they became GM difference")
ax.legend(labels=label_mapping)
# save the figure
plt.savefig("../graphs/3d-scatter.png")
def svm(model_name="svm"):
TARGET_NAME = "result"
FEATURE_NAMES = ["elo_diff", "time_since_gm_diff"]
model, ohe, train_df, test_df = get_trained_model("chess", model_name, TARGET_NAME, FEATURE_NAMES)
test_acc, test_y_pred, test_y_targ = get_model_accuracy(model, test_df, ohe, "chess", TARGET_NAME, FEATURE_NAMES)
train_acc, train_y_pred, train_y_targ = get_model_accuracy(model, train_df, ohe, "chess", TARGET_NAME, FEATURE_NAMES)
print("[" + model_name + "] Test accuracy: ", test_acc)
print("[" + model_name + "] Training accuracy: ", train_acc)
# examples = df.to_numpy()
# X = examples[:, :2]
# y = df['Class']
# def make_meshgrid(x, y, h=0.02):
# x_min, x_max = x.min() - 1, x.max() + 1
# y_min, y_max = y.min() - 1, y.max() + 1
# xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# return xx, yy
# def plot_contours(ax, clf, xx, yy, **params):
# Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Z = Z.reshape(xx.shape)
# out = ax.contourf(xx, yy, Z, **params)
# return out
# # define the label mapping
# label_mapping = {
# "Win",
# "Loss",
# "Draw"
# }
# clf = model.fit(X, y)
# fig, ax = plt.subplots()
# # title for the plots
# title = ('Decision surface of linear SVC for determining banknote forgery')
# # Set-up grid for plotting.
# X0, X1 = X[:, 0], X[:, 1]
# xx, yy = make_meshgrid(X0, X1)
# plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)
# ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
# ax.set_ylabel('Skewness')
# ax.set_xlabel('Variance')
# # ax.set_xticks(())
# # ax.set_yticks(())
# ax.set_title(title)
# ax.legend(labels=label_mapping)
# # plt.show()
# # using DTrimarchi's file to make a confusion matrix
# # make_confusion_matrix(cf_matrix, ['True Neg','False Pos','False Neg','True Pos'], 'auto', True, True, True, True, True, True, None, 'Blues', 'Logistic refression to determine if driver is arrested')
# if model_name is not 'dummy':
# plt.savefig("../graphs/chess_svm.png")
if __name__ == "__main__":
print("SVM")
svm()
# plot_multiclass_fig_3D()
# plot_multiclass_fig_2D() | [
"65761790+KhosrowArian@users.noreply.github.com"
] | 65761790+KhosrowArian@users.noreply.github.com |
e83a949fb6918809da479930ce5638965fe1e451 | f0ad3fed3e93cd96c720eddf624fec5e4b57abcc | /SWEA/List2_4843_특별한정렬.py | f9167bf6b5505de292bfb1080171c9d3069ef449 | [] | no_license | datasci-study/sehwaryu | 2efbddf5ac00e2852c873d3a3e599acb1b56398b | 61d2e4e65603117e4e65785e0df67494ad940daa | refs/heads/master | 2022-12-24T01:03:38.067722 | 2020-09-29T09:32:27 | 2020-09-29T09:32:27 | 291,937,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | # [입력]
# 첫 줄에 테스트 케이스 개수 T가 주어진다. 1<=T<=50
# 다음 줄에 정수의 개수 N이 주어지고 다음 줄에 N개의 정수 ai가 주어진다. 10<=N<=100, 1<=ai<=100
# [출력]
# 각 줄마다 "#T" (T는 테스트 케이스 번호)를 출력한 뒤, 특별히 정렬된 숫자를 10개까지 출력한다.
T = int(input())
for t in range(T):
N = int(input())
lst = list(map(int, input().split()))
# 먼저 리스트 정렬하기
lst.sort(reverse = True)
count = N
result = []
# 카운트를 하나씩 줄이고 홀수, 짝수 될 때마다 리스트의 앞, 뒤에서 pop 하고 새 리스트에 append하기
for i in range(N):
count -=1
if count % 2 == 0:
result.append(lst[-1])
lst.pop(-1)
else:
result.append(lst[0])
lst.pop(0)
print("#{} ".format(t+1), end='')
print(*result)
| [
"sehwa_ryu@berkeley.edu"
] | sehwa_ryu@berkeley.edu |
e1afce12eacc30e6c3e3491c12e832645f797735 | 529d4e724358397962fa4278972afec2c1c8111d | /lect1_exercise1_dat_NETWORK.py | c75a4f0f9e54318a98ee31739176064db1f57210 | [
"MIT"
] | permissive | wobniarin/Optimization_Energy_Systems_0121 | 490b8fe2369c981fcd93c355c7d3d52d62e3a8c0 | 693cb6ab59c796f58de28e1a9d33a27d0fba2a57 | refs/heads/main | 2023-02-13T06:07:13.552499 | 2021-01-12T22:18:28 | 2021-01-12T22:18:28 | 327,304,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,223 | py | # Optimization Course - Lecture 1 Exercise 1 using PYOMO
# Author: Íngrid Munné-Collado
# Date: 07/01/2021
# Requirements: Install pyomo, glpk and gurobi. You should apply for an academic license in gurobi
from pyomo.environ import *
import pyomo.environ as pyo
from pyomo.opt import SolverFactory
import pandas as pd
import numpy as np
# Creating the model
model = AbstractModel()
# Defining Sets
model.G = Set() #generators
model.D = Set() #demand
model.N = Set() #buses in the network
model.L = Set() # Lines in the network
# Defining Parameters
model.Pgmax = Param(model.G)
model.Pdmax = Param(model.D)
model.costs_g = Param(model.G)
model.costs_d = Param(model.D)
model.Fmaxnn = Param(model.N, model.N, mutable=True)
model.Bnn = Param(model.N, model.N)
model.location_generators = Param(model.G, model.N)
model.location_demands = Param(model.D, model.N)
# Defining Variables
model.pd = Var(model.D, within=NonNegativeReals)
model.pg = Var(model.G, within=NonNegativeReals)
model.thetan = Var(model.N, within=Reals)
model.flownm = Var(model.L, within=Reals)
# Defining Objective Function
def SW(model):
return sum(model.costs_d[d] * model.pd[d] for d in model.D) - sum(model.costs_g[g] * model.pg[g] for g in model.G)
model.social_welfare = Objective(rule=SW, sense=maximize)
# Defining constraints
# C1 demand max constraint
def pd_MAX_limit(model,d):
return model.pd[d] <= model.Pdmax[d]
model.pd_max_limit = Constraint(model.D, rule=pd_MAX_limit)
# C2 generators max constraint
def pg_MAX_limit(model,g):
return model.pg[g] <= model.Pgmax[g]
model.pgmax_limit = Constraint(model.G, rule=pg_MAX_limit)
# C4 Power flow Upper bound
def Powerflownm(model, n, m):
pf_nm = model.Bnn[n,m] * (model.thetan[n] - model.thetan[m])
return pf_nm <= model.Fmaxnn[n,m]
model.powerflow = Constraint(model.N, model.N, rule=Powerflownm)
# C5 SLACK BUS
def slack(model):
return model.thetan[0] == 0
model.slackbus = Constraint(model.N, rule=slack)
# C6 NODAL POWER BALANCE
def nodalPowerBalancen(model, n):
gen_node_n = sum(model.pg[g] * model.location_generators[g,n] for g in model.G)
dem_node_n = sum(model.pd[d] * model.location_demands[d,n] for d in model.D)
powerflow_n = sum(model.Bnn[n,m] * (model.thetan[n] - model.thetan[m]) for m in model.N)
return dem_node_n + powerflow_n - gen_node_n == 0
model.nodalPFB = Constraint(model.N, rule=nodalPowerBalancen)
# choose the solver
opt = pyo.SolverFactory('gurobi')
## in order to solve the problem we have to run this command in a terminal prompt
## pyomo solve --solver=glpk Transport_problem_example_pyomo.py datos.dat
# Create a model instance and optimize
instance = model.create_instance('data_L1_E1_NETWORK.dat')
# Create a "dual" suffic component on the instance
# so the solver plugin will know which suffixes to collect
instance.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT)
# Solve the optimization problem
results = opt.solve(instance)
# Display results of the code.
instance.display()
# Display all dual variables
print("Duals")
for c in instance.component_objects(pyo.Constraint, active = True):
print(" Constraint, c")
for index in c:
print(" ", index, instance.dual[c[index]])
| [
"ingrid.munne@citcea.upc.edu"
] | ingrid.munne@citcea.upc.edu |
ccb672ecd5d7a8c073a4ab47a31deabb52f42f8c | 7ccec383aff02faf30fa94499a72e4568bc23b4d | /logisticRegression.py | 3b7d0aeb5c41952f7fbe6c5f8bcab0f83bfe5e21 | [] | no_license | adamgregorymartin/xc_ski_world_cup_predictions | 6af0916344d26c43a0e7425f1751556f0ccfb5a2 | f14c4fe075480f35588ff2dbeb794321e44e884f | refs/heads/main | 2023-03-31T07:44:43.724116 | 2021-03-31T21:04:32 | 2021-03-31T21:04:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,355 | py | '''
Functions responsible for training a logistic regression model
'''
import numpy as np
import storage
import trainingData
import machineLearning
def cost(X, y, theta, reg):
# Expects training matrix (X), column vector of labels (y),
# column vector of coefficents (theta), and regularization constant (reg)
# Returns logistic regression cost, gradient
m = X.shape[0]
h = machineLearning.sigmoid(X.dot(theta))
J = np.sum(np.log(h) * (-y) - np.log(1-h) * (1-y)) / m
J = J + np.sum(theta[1:,:] ** 2) * reg / (2*m) # add regularization
grad = (np.transpose(X).dot(h-y)) / m
grad[1:,:] = grad[1:,:] + theta[1:,:] * reg / m # add regularization
return J, grad
def predictProb(X, theta):
# predic yHat
return machineLearning.sigmoid(X.dot(theta))
def predictBool(X, theta):
# classify yHat to either 0 or 1
return np.round(predictProb(X, theta), 0)
def accuracy(X, y, theta):
# return theta's training accuracy
p = predictBool(X, theta)
return np.mean((p == y).astype(int))
def trainLogisticRegression(data, order, reg):
# data is numpy matrix
# order is the maximum degree of each expansion
# Get data
X = data[:,:-1]
y = data[:,-1:]
# Add nonlinear terms
X = machineLearning.expandFeatures(X, order)
# Normalize features so that gradient descent works well
# Don't normalize the constant column, because this has sigma=0
X[:,1:], mu, sigma = machineLearning.normalize(X[:,1:])
# Initialize theta and run gradient descent
theta = np.zeros((X.shape[1], 1))
theta, costHistory = machineLearning.gradientDescent(X, y, theta, cost, .05, reg, 10000)
if True:
print('Progression of cost through gradient descent:')
print(costHistory[0])
print(costHistory[int(len(costHistory)/2)])
print(costHistory[-1])
# Output
print('Training Accuracy: ' + str(accuracy(X, y, theta)))
theta = machineLearning.undoNormalizeTheta(theta, mu, sigma)
return theta
def trainWithoutOutliers(data, order, reg, sds):
# Train twice
# The first time, train like normal
# Then remove outliers, and train again
# Theoretically this could improve performance on a test set
# Set up
theta = trainLogisticRegression(data, order, reg)
X = data[:,:-1]
X = machineLearning.expandFeatures(X, order)
y = data[:,-1:]
error = np.absolute(predictProb(X, theta) - y)
mu = np.mean(error)
print('Average abs(error) original: ' + str(mu))
sd = np.std(error)
goodRows = np.where(error < (sds*sd + mu))[0]
print('Removed ' + str(X.shape[0] - goodRows.shape[0]) + ' training samples.')
newTheta = trainLogisticRegression(data[goodRows,:], order, reg)
newMu = np.mean(np.absolute(predictProb(X[goodRows,:], newTheta) - y[goodRows,:]))
print('Average abs(error) after outlier removal: ' + str(newMu))
return newTheta
def main():
# Test Module Functionality
if False:
data = trainingData.collect1()
storage.store2DListAsCsv(data, './data/trainingData/trainingData1.csv')
data = machineLearning.matrix(data)
else:
data = storage.read2DListFromCsv('./data/trainingData/trainingData1.csv')
data = machineLearning.matrix(data)
print(str(data.shape[0]) + ' x ' + str(data.shape[1]))
trainWithoutOutliers(data, 2, 0, 2)
if __name__ == '__main__': # Call main() if this was run from the command line
main()
| [
"noreply@github.com"
] | adamgregorymartin.noreply@github.com |
1d66a920a4baf615b514dd181f5f1ee8e73eddd2 | 1c8a0fcf0aece43cb2cf30d1ecf2d84a74a734c3 | /leet_easy_python/leet_035/leet_035.py | eff4faa9d905ea86a91ded1d65f30fb053f244e4 | [] | no_license | zmeeks/leet_code | df936d38aa63115150fb464c74d6a112f1669d89 | ea3d7201c3f5b2db2ca6fc526e0f9bc36e17bfad | refs/heads/master | 2021-01-20T22:20:21.487734 | 2017-12-06T10:36:22 | 2017-12-06T10:36:22 | 101,813,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | class Solution(object):
def searchInsert(self, nums, target):
sz = len(nums)
for i in range(0,sz):
if target <= nums[i]: return i
return sz
"""
:type nums: List[int]
:type target: int
:rtype: int
""" | [
"noreply@github.com"
] | zmeeks.noreply@github.com |
2ba77bd1543fecc3b09cfa30cfb07a9d11f5ca35 | 04f2c7d0f75ab63c097de5282cd3ef73eb6c3eac | /src/cbir/views/home.py | cda71b6d71350c0118b042e4f676ff7ed937a583 | [] | no_license | tiendv/coloursearch | c5d630ab29d86dbff3f2bdd994c9304b8a130394 | 2a02b4954d824248a725d484ad3718cdad725de0 | refs/heads/master | 2022-07-15T22:06:09.484872 | 2020-04-11T11:03:08 | 2020-04-11T11:03:08 | 226,481,613 | 0 | 1 | null | 2022-06-21T23:54:04 | 2019-12-07T08:47:14 | CSS | UTF-8 | Python | false | false | 146 | py | from django.shortcuts import render
def home(request, *args, **kwargs):
print(request.user)
return render(request, 'html/home.html', {}) | [
"nxhao235@gmail.com"
] | nxhao235@gmail.com |
4474e3448a616a9b20f1ebfd59355cdfb86e3848 | cbf36c9bfaa50e5293353e9b87a61d4c86e8d35c | /DatabaseToExcel.py | d8ef0fa989bc08a3e85bf6b2c9030296c4a68abd | [] | no_license | srslakshmi1997/python-data-export-to-excel-from-sqlite3 | 3db1589ddcab4becf1671a0f0c90b09e53ebdf05 | f9128ba6fb9891757518e068af326d1ed2daee4f | refs/heads/master | 2021-02-14T13:59:48.477961 | 2020-03-04T05:01:09 | 2020-03-04T05:01:09 | 244,809,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,052 | py | import xlsxwriter
import sqlite3
# Open the workbook and define the worksheet
book = xlsxwriter.Workbook("Quotedb.xlsx")
sheet = book.add_worksheet("dataimported")
# Establish a Sqlite connection
path = 'Quotedatabase.db'
db = sqlite3.connect(path)
# Get the cursor, which is used to traverse the database, line by line
cursor = db.cursor()
#Read data from the customerdb database file
cursor.execute('''select * from quotedb''')
all_rows = cursor.fetchall()
#initialize rows and columns of the worksheet
row = 0
col = 0
#Insert the columns name in to the excel
column_Values = [ 'Name' ,'Project_name','Filament_length(Hours)', 'Print_time(Hours)','Raw_material_cost_per_meter',
'Raw_material_cost',
'Power_consumption_cost',
'Machine_depreciation_cost',
'Total_mfg_cost',
'Number_of_grids_used',
'Number_of_hours_of_Post_process',
'Wet_sanding_cost',
'Total_post_process_cost',
'Total_design_cost',
'Total_slicing_cost',
'Total_shipping_cost',
'Total_Packaging_cost',
'Total_profit_cost',
'Internet_charges',
'Conversation_charges',
'Laptop_electricity_charges',
'Laptop_depreciation_charges',
'Admin_and_Marketing_costs',
'Rent_cost',
'Total_Misc_costs',
'Total_Project_Cost']
for heading in column_Values:
sheet.write(row,col,heading)
col+=1
# Create a For loop to iterate through each entries in the db file
for entry in all_rows:
row += 1
col = 0
for data_val in entry:
sheet.write(row,col,data_val)
col += 1
#Close the workbook
book.close()
# Close the cursor
cursor.close()
# Commit the transaction
db.commit()
# Close the database connection
db.close()
| [
"noreply@github.com"
] | srslakshmi1997.noreply@github.com |
217e6f4cf75c0656465d8517ea95969b311fa004 | d15fc49a8b41ba1cb4975b2ff0cc2b176e531f9c | /LoveCalculator/main/forms.py | c469c6b63f4ca2bb76b83bbbfe8b3dd6b81e0adb | [] | no_license | ayo6706/Love-Calculator | 431d76dd69d5eaaf6549677a23a950aabb37625c | 553cdcfda7073eb2998887cde77be58875343f4e | refs/heads/master | 2023-02-07T13:02:22.318885 | 2020-12-31T01:36:52 | 2020-12-31T01:36:52 | 325,684,893 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | from django.forms import TextInput
from django import forms
class NameForm(forms.Form):
male = forms.CharField(label='Your male', max_length=100)
female = forms.CharField(label='Your female', max_length=100)
widgets = {'name': TextInput(attrs={'class': 'input', 'placeholder': 'City Name'})}
| [
"35877863+ayo6706@users.noreply.github.com"
] | 35877863+ayo6706@users.noreply.github.com |
e962e6adecf53124cca11673516f81f4f41c7a92 | 763139777e428f743a735660f92f343e53bb4f41 | /rate/wsgi.py | 012e16065542a90497c704b7752ef9b104d1bb42 | [
"MIT"
] | permissive | iyerikuzwe/Award | 0dfad8d9750042cbbec55207c6def579194d6990 | a5ac352a7d05d23c92167022e00648caeab62590 | refs/heads/master | 2020-05-04T21:30:34.308139 | 2019-04-05T08:00:23 | 2019-04-05T08:00:23 | 179,478,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | """
WSGI config for rate project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rate.settings")
application = get_wsgi_application()
| [
"iyerikuzweregine19@gmail.com"
] | iyerikuzweregine19@gmail.com |
6acac5aa9038019a7a170dab4d585a97d014b64c | 81b1a96bffbfbb44ee6f67014a6b35a5c2b1dfcd | /train_point_regression.py | 090eb319726355b4fdc8981393437aba37b70ed4 | [] | no_license | zhangjinsong3/point_regression.pytorch | dfa60cb71b93c1f66dc368f06030151f207e8665 | 81d34dd687a9f5cf5eff782cc733a9fa2a6271b9 | refs/heads/master | 2020-04-25T21:31:09.160697 | 2019-02-28T09:35:08 | 2019-02-28T09:35:08 | 173,082,313 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,950 | py | import torch
import torch.nn as nn
import torchvision
import torchvision.models as models
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Hyper parameters
num_epochs = 400
batch_size = 64
batch_size_test = 1
learning_rate = 0.0001
# Data loader
from datasets import BoxPoint, BoxPointFromSeven, BoxPointAsOne
trans = transforms.Compose(transforms=[transforms.Resize(96, 96),
transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.1, hue=0.1),
transforms.ToTensor()])
# train_dataset = BoxPoint('/media/zjs/A22A53E82A53B7CD/kuaice/data_mingjian/data/data_train.txt',
# '/media/zjs/A22A53E82A53B7CD/kuaice/data_mingjian/data/data_train',
# ignore=['0', '4', '5', '6'],
# transform=trans)
train_dataset = BoxPointFromSeven('../../data_mingjian/data/image_train/train.txt',
'../../data_mingjian/data/image_train',
transform=trans)
# train_dataset = BoxPointAsOne('../../data_mingjian/data/image_train/train_one.txt',
# '../../data_mingjian/data/image_train',
# transform=trans)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
trans_test = transforms.Compose(transforms=[transforms.Resize(96, 96),
transforms.ToTensor()])
# test_dataset = BoxPoint('/media/zjs/A22A53E82A53B7CD/kuaice/data_mingjian/data/data_test.txt',
# '/media/zjs/A22A53E82A53B7CD/kuaice/data_mingjian/data/data_test',
# ignore=['0', '4', '5', '6'],
# transform=trans_test)
test_dataset = BoxPointFromSeven('../../data_mingjian/data/image_test/test.txt',
'../../data_mingjian/data/image_test',
transform=trans_test)
# test_dataset = BoxPointAsOne('../../data_mingjian/data/image_test/test.txt',
# '../../data_mingjian/data/image_test',
# transform=trans_test)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size_test,
shuffle=False)
basenet = models.resnet18(pretrained=False, num_classes=2)
# basenet.avgpool = nn.AdaptiveAvgPool2d(1)
basenet.avgpool = nn.AvgPool2d(7, stride=1)
# from networks import miniResNet
# basenet = miniResNet(models.resnet.BasicBlock, [2, 2, 2])
# from networks import PointNet
# basenet = PointNet()
from networks import MobileNet
basenet = MobileNet()
# resume training from checkpoint
# checkpoint = torch.load('./checkpoints/pointnet_all_0200.ckpt')
# basenet.load_state_dict(checkpoint)
# Loss and optimizer
criterion = nn.SmoothL1Loss()
# criterion = nn.MSELoss()
# from losses import TripletLoss
# criterion = TripletLoss(margin=1)
optimizer = torch.optim.Adam(basenet.parameters(), lr=learning_rate, weight_decay=1e-5)
# Train the model
writer = SummaryWriter()
# show the net
# dummy_input = torch.rand(8, 3, 224, 224)
# with SummaryWriter(comment='resnet34') as w:
# w.add_graph(basenet, dummy_input)
basenet = basenet.to(device)
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (image, point) in enumerate(train_loader):
image = image.to(device)
point = point.to(device)
# Forward pass
output1 = basenet(image)
loss = criterion(output1, point)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
# show the loss curve
writer.add_scalar('scalar/loss_0', loss.item(), epoch * total_step + i)
# show the filter learned
# for name, param in basenet.named_parameters():
# writer.add_histogram(name, param.clone().cpu().data.numpy(), epoch * total_step + i)
if (i + 1) % 100 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item()))
if (epoch + 1) % 20 == 0:
from eval_point_regression import eval_point_regression
# Save the model checkpoint
eval_point_regression(basenet, train_loader, batch_size=batch_size)
eval_point_regression(basenet, test_loader, batch_size=batch_size_test)
torch.save(basenet.state_dict(), 'checkpoints/mobilenet_0_%04d.pth' % (epoch+1))
basenet.train()
print("box regression trainning finished! ")
writer.close()
| [
"01376022@sf-express.com"
] | 01376022@sf-express.com |
394f102c2bfe249737e0c07da11cd436beb8c177 | 9c93f463b693f29f493ee3e86d6df702d3dcf394 | /tests/test_vis_gradcam.py | f8e49f486f476928ae60a0ffec3e727f3c34c1c7 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | eddyleelin/MONAI | a52fc5264c3b9874b5b85afd6435bb46b25eef17 | d78c669c67e38ddfbe572f6a0438e9df0b8c65d7 | refs/heads/main | 2023-06-02T07:56:31.604062 | 2021-06-01T14:06:58 | 2021-06-01T14:06:58 | 373,514,231 | 0 | 0 | Apache-2.0 | 2021-06-03T13:19:17 | 2021-06-03T13:19:17 | null | UTF-8 | Python | false | false | 3,223 | py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.networks.nets import DenseNet, DenseNet121, SEResNet50
from monai.visualize import GradCAM
# 2D
TEST_CASE_0 = [
{
"model": "densenet2d",
"shape": (2, 1, 48, 64),
"feature_shape": (2, 1, 1, 2),
"target_layers": "class_layers.relu",
},
(2, 1, 48, 64),
]
# 3D
TEST_CASE_1 = [
{
"model": "densenet3d",
"shape": (2, 1, 6, 6, 6),
"feature_shape": (2, 1, 2, 2, 2),
"target_layers": "class_layers.relu",
},
(2, 1, 6, 6, 6),
]
# 2D
TEST_CASE_2 = [
{
"model": "senet2d",
"shape": (2, 3, 64, 64),
"feature_shape": (2, 1, 2, 2),
"target_layers": "layer4",
},
(2, 1, 64, 64),
]
# 3D
TEST_CASE_3 = [
{
"model": "senet3d",
"shape": (2, 3, 8, 8, 48),
"feature_shape": (2, 1, 1, 1, 2),
"target_layers": "layer4",
},
(2, 1, 8, 8, 48),
]
class TestGradientClassActivationMap(unittest.TestCase):
@parameterized.expand([TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3])
def test_shape(self, input_data, expected_shape):
if input_data["model"] == "densenet2d":
model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=3)
if input_data["model"] == "densenet3d":
model = DenseNet(
spatial_dims=3, in_channels=1, out_channels=3, init_features=2, growth_rate=2, block_config=(6,)
)
if input_data["model"] == "senet2d":
model = SEResNet50(spatial_dims=2, in_channels=3, num_classes=4)
if input_data["model"] == "senet3d":
model = SEResNet50(spatial_dims=3, in_channels=3, num_classes=4)
device = "cuda:0" if torch.cuda.is_available() else "cpu"
model.to(device)
model.eval()
cam = GradCAM(nn_module=model, target_layers=input_data["target_layers"])
image = torch.rand(input_data["shape"], device=device)
result = cam(x=image, layer_idx=-1)
np.testing.assert_array_equal(cam.nn_module.class_idx.cpu(), model(image).max(1)[-1].cpu())
fea_shape = cam.feature_map_size(input_data["shape"], device=device)
self.assertTupleEqual(fea_shape, input_data["feature_shape"])
self.assertTupleEqual(result.shape, expected_shape)
# check result is same whether class_idx=None is used or not
result2 = cam(x=image, layer_idx=-1, class_idx=model(image).max(1)[-1].cpu())
np.testing.assert_array_almost_equal(result, result2)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | eddyleelin.noreply@github.com |
30afc63d7c5839fede97f2925e6bbb6f93e81b28 | e65453aecb1b64f75a4a6eee7ca1328984773d5d | /Test/test1.py | 662d8a12291d456ee624881943ae9a53dc213b46 | [] | no_license | huyendtt58/raSAT | 1a9a0a1c05b81877416e82c9c102ae92c6d80931 | b4f7c8995eef71bd099046c761ea19ea904fd18d | refs/heads/master | 2021-01-19T14:27:24.036231 | 2017-02-23T12:36:52 | 2017-02-23T12:36:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,520 | py | import fnmatch
import os
import subprocess
import csv
matches = []
def run(directory, initLowerBound, initUpperBound, initSbox, timeout, resultFile):
lowerBound = initLowerBound
upperBound = initUpperBound
#sbox = initSbox
solvedProblems = 0
with open(os.path.join(directory, resultFile), 'wb') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow(['Problem', 'nVars', 'maxVars', 'nAPIs', 'time', 'iaTime', 'testingTime', 'usCoreTime', 'parsingTime', 'decompositionTime', 'miniSATTime', 'miniSATVars', 'miniSATClauses', 'miniSATCalls', 'raSATClauses', 'decomposedLearnedClauses', 'UNSATLearnedClauses', 'unknownLearnedClauses', 'result', 'raSATResult', 'EQ', 'NEQ'])
csvfile.close()
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, '*.smt2'):
print "Checking ", filename
sbox = initSbox * 10
nVars = 0
maxVars = 0
nAPIs = 0
iaTime = 0
testingTime=0
usTime=0
parsingTime=0
decompositionTime=0
miniSATTime=0
miniSATVars = 0;
time=0
miniSATCalls=0
miniSATClauses = 0
raSATClauses=0
decomposedLearnedClauses=0
UNSATLearnedClauses=0
unknownLearnedClauses=0
result='unknown'
raSATResult = 'unknown'
isEquation = '0'
isNotEquation = '0'
try:
f = open(os.path.join(root, filename))
for line in f:
if line.startswith('(set-info :status'):
result = line[18:len(line)-2]
f.close()
except IOError:
result = 'unknown'
bounds = ['lb=-1 1', 'lb=-10 10', 'lb=-inf inf']
boundsNum = len(bounds)
boundIndex = 0
while (raSATResult != 'sat' and time < timeout and boundIndex < boundsNum):
if raSATResult == 'unknown':
sbox = sbox / 10
subprocess.call(["./raSAT", os.path.join(root, filename), bounds[boundIndex], 'sbox=' + str(sbox), 'tout=' + str(timeout-time)])
try:
with open(os.path.join(root, filename) + '.tmp', 'rb') as csvfile:
reader = csv.reader(csvfile)
output = reader.next()
nVars = output[1]
maxVars = output[2]
nAPIs = output[3]
time += float(output[4])
iaTime += float(output[5])
testingTime += float(output[6])
usTime += float(output[7])
parsingTime += float(output[8])
decompositionTime += float(output[9])
miniSATTime += float(output[10])
miniSATVars += float(output[11])
miniSATClauses += float(output[12])
miniSATCalls += float(output[13])
raSATClauses += float(output[14])
decomposedLearnedClauses += float(output[15])
UNSATLearnedClauses += float(output[16])
unknownLearnedClauses += float(output[17])
isEquation = output[18]
isNotEquation = output[19]
raSATResult = output[20]
csvfile.close()
except IOError:
raSATResult = 'timeout'
if raSATResult == 'unsat':
boundIndex += 1
if raSATResult == 'sat' or raSATResult == 'unsat':
solvedProblems += 1
with open(os.path.join(directory, resultFile), 'a') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow([os.path.join(root, filename), nVars, maxVars, nAPIs, time, iaTime, testingTime, usTime, parsingTime, decompositionTime, miniSATTime, miniSATVars, miniSATClauses, miniSATCalls, raSATClauses, decomposedLearnedClauses, UNSATLearnedClauses, unknownLearnedClauses, result, raSATResult, isEquation, isNotEquation])
csvfile.close()
try:
os.remove(os.path.join(root, filename) + '.tmp')
except OSError:
pass
try:
os.remove(os.path.join(root, filename)[:-5] + '.in')
except OSError:
pass
try:
os.remove(os.path.join(root, filename)[:-5] + '.out')
except OSError:
pass
try:
os.remove(os.path.join(root, filename)[:-5] + '.rs')
except OSError:
pass
with open(os.path.join(directory, resultFile), 'a') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow(['Problem', 'nVars', 'maxVars', 'nAPIs', 'time', 'iaTime', 'testingTime', 'usCoreTime', 'parsingTime', 'decompositionTime', 'miniSATTime', 'miniSATVars', 'miniSATClauses', 'miniSATCalls', 'raSATClauses', 'decomposedLearnedClauses', 'UNSATLearnedClauses', 'unknownLearnedClauses', 'result', solvedProblems, 'EQ', 'NEQ'])
csvfile.close()
#run ('zankl', -10, 10, 0.1, 500, 'with_dependency_sensitivity_restartSmallerBox_boxSelectionUsingSensitivity.xls')
#run ('QF_NRA/meti-tarski', -10, 10, 0.1, 500, 'with_dependency_sensitivity_restartSmallerBox_boxSelectionUsingSensitivity.xls')
#run ('Test/meti-tarski', -1, 1, 0.1, 60, 'result.xls')
#run ('Test/zankl', -10, 10, 0.1, 30, 'result.xls')
#run ('Test/smtlib-20140121/QF_NIA/AProVE', -10, 10, 0.1, 60, 'result.xls')
#run ('Test/smtlib-20140121/QF_NIA/calypto', -10, 10, 0.1, 60, 'result.xls')
#run ('Test/smtlib-20140121/QF_NIA/leipzig', -10, 10, 0.1, 60, 'result.xls')
#run ('Test/smtlib-20140121/QF_NIA/mcm', -10, 10, 0.1, 60, 'result.xls')
#run ('Test/smtlib-20140121/QF_NRA/hycomp', -10, 10, 0.1, 60, '1-5-8.csv')
run ('Test/smtlib-20140121/QF_NRA/meti-tarski', -10, 10, 0.1, 60, '1-5-8-11.csv')
#run ('Test/test', -10, 10, 0.1, 60, 'result.csv')
| [
"toilatung90@gmail.com"
] | toilatung90@gmail.com |
6fbfae0cf1207d6277108f1b8f71145614c4f181 | 62a5b12c4d8d90aa1565d88b845c09686070997f | /run_apis/trainer.py | ee0a73f3d4b43afd1e024734e9dee38c2bb7426e | [
"Apache-2.0"
] | permissive | mileswyn/med_seg_nas | 2d3c1f054e7f414e19087cc141d230671dcbc987 | eb52c0f4a40e3ed3a3fed0b3b5a7fb96365cd920 | refs/heads/master | 2023-07-16T06:26:32.043761 | 2021-09-06T08:04:30 | 2021-09-06T08:04:30 | 403,521,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,650 | py | import logging
import time
import torch.nn as nn
from dataset.prefetch_data import data_prefetcher
from tools import utils
class Trainer(object):
def __init__(self, train_data, val_data, optimizer=None, criterion=None,
scheduler=None, config=None, report_freq=None):
self.train_data = train_data
self.val_data = val_data
self.optimizer = optimizer
self.criterion = criterion
self.scheduler = scheduler
self.config = config
self.report_freq = report_freq
def train(self, model, epoch):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
data_time = utils.AverageMeter()
batch_time = utils.AverageMeter()
model.train()
start = time.time()
prefetcher = data_prefetcher(self.train_data)
input, target = prefetcher.next()
step = 0
while input is not None:
data_t = time.time() - start
self.scheduler.step()
n = input.size(0)
if step==0:
logging.info('epoch %d lr %e', epoch, self.optimizer.param_groups[0]['lr'])
self.optimizer.zero_grad()
logits= model(input)
if self.config.optim.label_smooth:
loss = self.criterion(logits, target, self.config.optim.smooth_alpha)
else:
loss = self.criterion(logits, target)
loss.backward()
if self.config.optim.use_grad_clip:
nn.utils.clip_grad_norm_(model.parameters(), self.config.optim.grad_clip)
self.optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
batch_t = time.time() - start
start = time.time()
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
data_time.update(data_t)
batch_time.update(batch_t)
if step!=0 and step % self.report_freq == 0:
logging.info(
'Train epoch %03d step %03d | loss %.4f top1_acc %.2f top5_acc %.2f | batch_time %.3f data_time %.3f',
epoch, step, objs.avg, top1.avg, top5.avg, batch_time.avg, data_time.avg)
input, target = prefetcher.next()
step += 1
logging.info('EPOCH%d Train_acc top1 %.2f top5 %.2f batch_time %.3f data_time %.3f',
epoch, top1.avg, top5.avg, batch_time.avg, data_time.avg)
return top1.avg, top5.avg, objs.avg, batch_time.avg, data_time.avg
def infer(self, model, epoch=0):
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
data_time = utils.AverageMeter()
batch_time = utils.AverageMeter()
model.eval()
start = time.time()
prefetcher = data_prefetcher(self.val_data)
input, target = prefetcher.next()
step = 0
while input is not None:
step += 1
data_t = time.time() - start
n = input.size(0)
logits = model(input)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
batch_t = time.time() - start
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
data_time.update(data_t)
batch_time.update(batch_t)
if step % self.report_freq == 0:
logging.info(
'Val epoch %03d step %03d | top1_acc %.2f top5_acc %.2f | batch_time %.3f data_time %.3f',
epoch, step, top1.avg, top5.avg, batch_time.avg, data_time.avg)
start = time.time()
input, target = prefetcher.next()
logging.info('EPOCH%d Valid_acc top1 %.2f top5 %.2f batch_time %.3f data_time %.3f',
epoch, top1.avg, top5.avg, batch_time.avg, data_time.avg)
return top1.avg, top5.avg, batch_time.avg, data_time.avg
class SearchTrainer(object):
def __init__(self, train_data, val_data, search_optim, criterion, scheduler, config, args):
self.train_data = train_data
self.val_data = val_data
self.search_optim = search_optim
self.criterion = criterion
self.scheduler = scheduler
self.sub_obj_type = config.optim.sub_obj.type
self.args = args
def train(self, model, epoch, optim_obj='Weights', search_stage=0):
assert optim_obj in ['Weights', 'Arch']
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
sub_obj_avg = utils.AverageMeter()
data_time = utils.AverageMeter()
batch_time = utils.AverageMeter()
model.train()
start = time.time()
if optim_obj == 'Weights':
prefetcher = data_prefetcher(self.train_data)
elif optim_obj == 'Arch':
prefetcher = data_prefetcher(self.val_data)
input, target = prefetcher.next()
step = 0
while input is not None:
input, target = input.cuda(), target.cuda()
data_t = time.time() - start
n = input.size(0)
if optim_obj == 'Weights':
self.scheduler.step()
if step==0:
logging.info('epoch %d weight_lr %e', epoch, self.search_optim.weight_optimizer.param_groups[0]['lr'])
logits, loss, sub_obj = self.search_optim.weight_step(input, target, model, search_stage)
elif optim_obj == 'Arch':
if step==0:
logging.info('epoch %d arch_lr %e', epoch, self.search_optim.arch_optimizer.param_groups[0]['lr'])
logits, loss, sub_obj = self.search_optim.arch_step(input, target, model, search_stage)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
del logits, input, target
batch_t = time.time() - start
objs.update(loss, n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
sub_obj_avg.update(sub_obj)
data_time.update(data_t)
batch_time.update(batch_t)
if step!=0 and step % self.args.report_freq == 0:
logging.info(
'Train%s epoch %03d step %03d | loss %.4f %s %.2f top1_acc %.2f top5_acc %.2f | batch_time %.3f data_time %.3f',
optim_obj ,epoch, step, objs.avg, self.sub_obj_type, sub_obj_avg.avg,
top1.avg, top5.avg, batch_time.avg, data_time.avg)
start = time.time()
step += 1
input, target = prefetcher.next()
return top1.avg, top5.avg, objs.avg, sub_obj_avg.avg, batch_time.avg
def infer(self, model, epoch):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
sub_obj_avg = utils.AverageMeter()
data_time = utils.AverageMeter()
batch_time = utils.AverageMeter()
model.train() # don't use running_mean and running_var during search
start = time.time()
prefetcher = data_prefetcher(self.val_data)
input, target = prefetcher.next()
step = 0
while input is not None:
step += 1
data_t = time.time() - start
n = input.size(0)
logits, loss, sub_obj = self.search_optim.valid_step(input, target, model)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
batch_t = time.time() - start
objs.update(loss, n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
sub_obj_avg.update(sub_obj)
data_time.update(data_t)
batch_time.update(batch_t)
if step % self.args.report_freq == 0:
logging.info(
'Val epoch %03d step %03d | loss %.4f %s %.2f top1_acc %.2f top5_acc %.2f | batch_time %.3f data_time %.3f',
epoch, step, objs.avg, self.sub_obj_type, sub_obj_avg.avg, top1.avg, top5.avg,
batch_time.avg, data_time.avg)
start = time.time()
input, target = prefetcher.next()
return top1.avg, top5.avg, objs.avg, sub_obj_avg.avg, batch_time.avg
| [
"mileswyn@163.com"
] | mileswyn@163.com |
c06f46629735752534a755c9f8214b08c2ac169d | b93b09c5e85af32c56cfd9aaed5c7bdef79cdea5 | /Cookbook/Chapter2/2-5.py | 9f66d6cde99253c2cd9e94591ba9d91a9193f581 | [] | no_license | Biwoco-Playground/Learn-Docker_VNL | ca0a7388b00a0126b7b0cec03454200602d7ed67 | 116bebfcb89d8378271ff7b473d719bc180a24be | refs/heads/master | 2023-06-26T06:48:52.385640 | 2021-07-25T13:30:09 | 2021-07-25T13:30:09 | 377,758,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | import re
text = 'Today is 11/27/2012. PyCon starts 3/13/2013.'
a = re.sub(r'(\d+)/(\d+)/(\d+)$',r'\3-\1-\2',text)
print(re.sub(r'(\d+)/(\d+)/(\d+)',r'\3-\1-\2',text)) | [
"long.speed00@gmail.com"
] | long.speed00@gmail.com |
671d28386b9fc24c44d41ea6a4d3c0b0b5de96e6 | e581e65d9b905ca1419b7d5570dc211d4602e451 | /questions/migrations/0004_auto_20160225_1634.py | 2a2ab4111b49d7a2fd9acb5e6b59b975ad18b256 | [] | no_license | ltoyoda/toyoda-grs | b80ffc27d65bbe32e59adbb45e7b3eb8b3b7d024 | 76f8bd55f0f17412a3506af33f21549700cf492d | refs/heads/master | 2021-01-21T13:44:13.022778 | 2016-05-11T06:43:54 | 2016-05-11T06:43:54 | 51,477,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hw1', '0003_input_file_name'),
]
operations = [
migrations.RenameField(
model_name='input',
old_name='file_name',
new_name='f',
),
]
| [
"ltoyoda@hotmail.com"
] | ltoyoda@hotmail.com |
33a033e17793c4a5a6e99eb22091ab20c8b438ee | 57a12d208b43d6902df05a4cc35a57456ce05def | /pages/taxes_page.py | 1061c6dbe9297032722239e10c1781f4f56a0e40 | [] | no_license | zankrus/bank_ui_tests_selenium | 914f6ba749eca5c0167b4699c8ae3da86a387402 | bfc8bef694267c19ff9509f0442092f569e24b14 | refs/heads/master | 2023-07-08T06:56:49.357975 | 2020-08-29T17:03:06 | 2020-08-29T17:03:06 | 284,901,105 | 0 | 0 | null | 2023-06-30T22:13:07 | 2020-08-04T06:52:54 | Python | UTF-8 | Python | false | false | 1,820 | py | """Файл страницы Проверки налоговых задолженностей"""
import allure
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from common.tax_page_constants import TaxPageConstants as Const
from locators.tax_page_locators import TaxPageLocators
class TaxesPage:
"""
Класс налоговых задолженностей
"""
def __init__(self, app):
self.app = app
self.wait = WebDriverWait(self.app.wd, 10)
def check_taxes_button(self):
return self.app.wd.find_element(*TaxPageLocators.CHECK_TAXES_BUTTON)
@allure.step("Кликаем на кнопку Проверить налоги")
def click_on_check_taxes_button(self):
self.wait.until(
EC.presence_of_element_located(TaxPageLocators.CHECK_TAXES_BUTTON)
)
return self.check_taxes_button().click()
def taxes_check_result_message(self):
self.wait.until(
EC.text_to_be_present_in_element(
TaxPageLocators.TAXES_CHECK_RESULT, Const.TAXES_CHECK_RESULT_TEXT
)
)
return self.app.wd.find_element(*TaxPageLocators.TAXES_CHECK_RESULT)
@allure.step("Проверка - Появились ли результаты из Гос.Инф.Системы")
def taxes_check_result_text_is_displayed(self):
return self.taxes_check_result_message().is_displayed()
def pay_tax_button(self):
self.wait.until(EC.presence_of_element_located(TaxPageLocators.PAY_TAX_BUTTON))
return self.app.wd.find_element(*TaxPageLocators.PAY_TAX_BUTTON)
@allure.step("Нажимаем оплатить")
def click_pay_tax_button(self):
return self.pay_tax_button().click()
| [
"sealthepirate@gmail.com"
] | sealthepirate@gmail.com |
6700eca38728dbb927b47565afc6b22590c2510e | 885a1638ef1384543cca6d4792145b3916775082 | /CreateDemoTenant/scripts/pkg_PrismCentralDemo__install__Task_getCloudAccount.py | badaabb25dfb9ad85f7d44e29db6e7ba9ff19686 | [] | no_license | wolfganghuse/calm-demo-env | 1a465f56052cd12ef7dfaa4a6010bc1847341208 | 56af8dd3d62970f3c88b842f5e5eaa3c9a7d3a2c | refs/heads/main | 2023-06-25T14:22:35.236869 | 2021-07-30T09:14:23 | 2021-07-30T09:14:23 | 364,549,322 | 0 | 0 | null | 2021-07-30T08:51:14 | 2021-05-05T11:09:55 | Python | UTF-8 | Python | false | false | 1,347 | py | account_name = 'NTNX_LOCAL_AZ'
username = "@@{cred_PCDemo.username}@@"
username_secret = "@@{cred_PCDemo.secret}@@"
api_server = "@@{address}@@"
api_server_port = "9440"
api_server_endpoint = "/api/nutanix/v3/accounts/list"
length = 100
url = "https://{}:{}{}".format(
api_server,
api_server_port,
api_server_endpoint
)
payload = {
'filter': 'state!=DELETED;state!=DRAFT;name=={}'.format(account_name)
}
method = "POST"
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
r = urlreq(
url,
verb=method,
auth='BASIC',
user=username,
passwd=username_secret,
params=json.dumps(payload),
headers=headers,
verify=False
)
if r.ok:
resp = json.loads(r.content)
for account in resp['entities']:
if account['metadata']['name'] == account_name:
print("CLOUD_ACCOUNT_UUID={}".format(account['status']['resources']['data']['cluster_account_reference_list'][0]['uuid']))
print("PC_ACCOUNT_UUID={}".format(account['metadata']['uuid']))
# If the call failed
else:
# print the content of the response (which should have the error message)
print("Request failed", json.dumps(
json.loads(r.content),
indent=4
))
print("Headers: {}".format(headers))
print("Payload: {}".format(payload))
exit(1)
# endregion | [
"wolfgang.huse@nutanix.com"
] | wolfgang.huse@nutanix.com |
56ac09696a34ae248023b01558ccd45b9b325c66 | 290b722119abafbef6ba4ae75bd3917ed65be6bf | /LocationTracer/05_APConGeoLoc.py | 0018ab7c188b95b0410a16cf2f0d07702a6f5b68 | [] | no_license | Santhosh-23mj/Simply-Python | 37e7c873d5e073953692ff397925e94f0c9145db | 514ee59f6631d7d53903f0061e7132a1873fea7f | refs/heads/master | 2020-11-30T05:22:58.219752 | 2020-01-03T13:31:33 | 2020-01-03T13:31:33 | 230,315,419 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | #!/usr/bin/python3
"""
Geolocating using the MAC of APs that we connected
It requires wigle.net account which is a opensource database for
MAC to Location Lookup
This program prints the Latitude and Longitude of the MAC addresses of the
APs that we connected to from public database called wigle which could fetch
the locations of where we have been :)
"""
import re
import sys
import optparse
import mechanize
import urllib.parse
from _winreg import *
# Convert REG BINARY to MAC
def val2Addr(val):
addr = ""
for ch in val:
addr += ("%02x" %ord(ch))
addr = addr.strip(" ").replace(" ",":")[0:17]
return addr
"""
use this if the above doesnt work
ls = []
ls.append(val[0])
val = val[:12]
for i in range(len(val)):
if( i%2 == 0 ):
ls.append(":")
ls.append(val[i])
return ''.join(ls)
"""
# Get the Latitude and Longitude from the MAC Address
def fetchLatLon( username, passwd, netid ):
browser = mechanize.Browser()
browser.open("http://www.wigle.net")
reqData = urllib.parse.urlencode({'credential_0':username,'credential_1':passwd})
browser.open("http://wigle.net/gps/gps/main/login",reqData)
params = {}
params['netid'] = netid
reqParams = urllib.parse.urlencode(params)
respUrl = "http://wigle.net/gps/gps/main/confirmquery"
resp = browser.open(respUrl,reqParams).read()
mapLat = "N/A"
mapLon = "N/A"
rLat = re.findall(r'maplat=.*\&',resp)
if( rLat ):
mapLat = rLat[0].split("&")[0].split("=")[1]
rLon = re.findall(r'maplot=.*\&',resp)
if( rLon ):
mapLon = rLon[0].split("&")[0].split("=")[1]
print("[+] Latitude : " + mapLat + " Longitude : " + mapLon)
# Print out the AP Connections and their MAC From Registry
def printNets( username, passwd ):
net = "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\NetworkList\\Signatures\\Unmanaged"
key = OpenKey(HKEY_LOCAL_MACHINE,net)
print("[*] Your Networks...")
for i in range(1,50):
try:
guid = EnumKey(key,i)
netKey = OpenKey(key,str(guid))
n,addr,t = EnumValue(netKey,5)
n,name,t = EnumValue(netKey,4)
macAddr = val2Addr(addr)
netName = str(name)
print("[+]",netName,macAddr,sep=" ")
fetchLatLon( username, passwd, macAddr )
CloseKey(netKey)
except:
break
def main():
parser = optparse.OptionParser(usage = "Usage : python3 %s -u <username> -p <password>" %sys.argv[0])
parser.add_option("-u", dest = 'username', type = str, help = "Specify username for wigle.net")
parser.add_option("-p", dest = 'passwd', type = str, help = "Specify password for wigle.net")
options,args = parser.parse_args()
username = options.username
passwd = options.passwd
if( username == None or passwd == None ):
print(parser.usage)
exit(0)
else:
printNets( username, passwd )
if( __name__ == "__main__" ):
main()
| [
"n00bie@localhost.localdomain"
] | n00bie@localhost.localdomain |
015c82df35e97de18b9305763af3e704c034c483 | 342b6e7860db183d214901608271566c493e3317 | /test_gen_1.py | 957b7d91172ba5f8836949ea43def91a4b6daecd | [] | no_license | Mukesh-BR/Multiple-Myleoma-Detection | 3429ef30ab0971ea940bbd6d7951a52339e94ad7 | 5b1af2bc6d6308f11be8cca61556e71b977e2663 | refs/heads/master | 2020-12-09T10:27:56.378133 | 2020-01-11T18:20:32 | 2020-01-11T18:20:32 | 233,276,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,965 | py | import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
import numpy as np
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
import warnings
import os
import keras
import cv2
import matplotlib.pyplot as plt
from loss import dice_coef_loss,dice_coef
from preprocess import preprocess_mask,preprocess_image
import numpy as np
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def unet(input_size):
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
# conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
# conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
# drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(pool4))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(1, 1, activation = 'sigmoid', padding = 'same', kernel_initializer = 'he_normal')(conv9)
#conv10 = Conv2D(3, 1, activation = 'sigmoid')(conv9)
model = Model(input = inputs, output = conv9)
#model.summary()
return model
model=unet((256,256,3))
model.summary()
# training_data_x=[]
# training_data_y=[]
# test_data_x=[]
# test_data_y=[]
# og_path="//content//drive//My Drive//abc//pqr"
# test_path="//content//drive//My Drive//abc//pqr"
# CATEGORIES=["og","mask","test_og","test_mask"]
# def create_dataset():
# path=os.path.join(og_path,CATEGORIES[0])
# for img in os.listdir(path):
# img_array=cv2.imread(os.path.join(path,img))
# training_data_x.append(img_array)
# path=os.path.join(og_path,CATEGORIES[1])
# for img in os.listdir(path):
# img_array=cv2.imread(os.path.join(path,img))
# training_data_y.append(img_array)
# path=os.path.join(og_path,CATEGORIES[2])
# for img in os.listdir(path):
# img_array=cv2.imread(os.path.join(path,img))
# test_data_x.append(img_array)
# path=os.path.join(og_path,CATEGORIES[3])
# for img in os.listdir(path):
# img_array=cv2.imread(os.path.join(path,img))
# test_data_y.append(img_array)
# create_dataset()
# training_data_x=np.asarray(training_data_x)
# training_data_y=np.asarray(training_data_y)
# test_data_x=np.asarray(test_data_x)
# test_data_y=np.asarray(test_data_y)
data_gen_args_mask = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2,preprocessing_function=preprocess_mask)
data_gen_args_image = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2,
preprocessing_function=preprocess_image)
image_datagen = keras.preprocessing.image.ImageDataGenerator(**data_gen_args_image)
mask_datagen = keras.preprocessing.image.ImageDataGenerator(**data_gen_args_mask)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_generator = image_datagen.flow_from_directory(
'/home/team6/Project/MiMM_SBILab/patches/train/images',
class_mode=None,
target_size=(256,256),
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'/home/team6/Project/MiMM_SBILab/patches/train/masks',
class_mode=None,
color_mode="grayscale",
target_size=(256, 256),
seed=seed)
#combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
print(len(image_generator))
model.compile(optimizer="adam",loss=dice_coef_loss,metrics=["accuracy",dice_coef])
# callbacks = [
# keras.callbacks.EarlyStopping(monitor='loss', patience=25, verbose=1),
# keras.callbacks.ModelCheckpoint("Resnet_50_{epoch:03d}.hdf5", monitor='loss', verbose=1, mode='auto'),
# keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, verbose=1, mode='auto', epsilon=0.01, cooldown=0, min_lr=1e-6),
# keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None, update_freq='epoch')
# #NotifyCB
# ]
model.load_weights("Resnet_50_050.hdf5")
print("Loaded weights")
# model.fit_generator(
# train_generator,
# steps_per_epoch=1700,
# epochs=100,
# initial_epoch=50,
# callbacks=callbacks)
ans=model.predict_generator(image_generator,steps=1700,verbose=1)
print(ans.shape)
count=0
np.save('preds',ans)
for i in range(ans.shape[0]):
for j in range(ans.shape[1]):
for k in range(ans.shape[2]):
for l in range(ans.shape[3]):
if(ans[i][j][k][l]!=0):
count+=1
print(count)
# cv2.imwrite("Sample.jpg",ans)
# cv2.imwrite("preprocess.jpg",img_result)
# h1.fit(training_data_x,training_data_y,epochs=10,batch_size=3)
# pred=h1.evaluate(test_data_x,test_data_y)
# print("loss"+str(pred[0]))
# print("acc"+str(pred[1]))
| [
"noreply@github.com"
] | Mukesh-BR.noreply@github.com |
f5321eb8661a42727f6e2a696d81223087657719 | 048c06a0e3f25a720bb1805902a4274c46a7534f | /Merge Sort/merge_sort.py | 4488f5555a2781af48a7b4a3d2e6daf8e4a7f2a5 | [] | no_license | jgdj01/Projeto_Algoritmo | 11d3c993137751926332d9bd9107559ca0125ab2 | 849b0c7458b78cfb8e4d9a3e5cfe48c6a08f0657 | refs/heads/master | 2020-03-29T20:47:04.548653 | 2018-10-08T23:17:18 | 2018-10-08T23:17:18 | 150,330,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,155 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import timeit
# Merges dois subarrays de arr[].
# Primeiro subarray é arr[l..m]
# Segundo subarray é arr[m+1..r]
def merge(arr, l, m, r):
n1 = m - l + 1
n2 = r - m
# criação de arrays temporárias
L = [0] * (n1)
R = [0] * (n2)
# Copia a data para os arrays temporários L[] e R[]
for i in range(0, n1):
L[i] = arr[l + i]
for j in range(0, n2):
R[j] = arr[m + 1 + j]
# Merge os arrays temporários de volta para arr[l..r]
i = 0 # Index inicial do primeiro subarray
j = 0 # Index inicial do primeiro subarray
k = l # Index inicial do subarray "merged"
while i < n1 and j < n2:
if L[i] <= R[j]:
arr[k] = L[i]
i += 1
else:
arr[k] = R[j]
j += 1
k += 1
# Copia os elementos restanto em L[], se tiver algum
while i < n1:
arr[k] = L[i]
i += 1
k += 1
#
while j < n2:
arr[k] = R[j]
j += 1
k += 1
# l é para o index esquerdo e r é para o index direito do sub-array que será
# ordenado
def mergeSort(arr, l, r):
if l < r:
# O mesmo para (l+r)//2, mas evita overflow para
# l e h grandes
m = (l + (r - 1)) // 2
# Sort first and second halves
mergeSort(arr, l, m)
mergeSort(arr, m + 1, r)
merge(arr, l, m, r)
##Função Main
def main():
#Arquivo para teste
arquivo = open('entrada-aleatorio-10.txt', 'r')
dados = arquivo.read()
elementos = [int (i) for i in dados.split()]
print('\tTamanho: ',len(elementos))
print('\nSEM ORDENAR -> ', elementos)
n = len(elementos)
#Essa parte irá contar quanto tempo foi gasto na execução do algoritmo
tempinicial = timeit.default_timer()
mergeSort(elementos, 0, n-1)
tempfinal = timeit.default_timer()
print('\nDEPOIS DE ORDENAR -> ', elementos)
print('\n\t\tDuracao: %f' % (tempfinal - tempinicial))
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | jgdj01.noreply@github.com |
f1f2d68c5146969fd7396d0ba82af9d738d4f821 | 263c34e8f9ec88ecc16062588e675684d6939cb5 | /rgb_som_fiumi.py | e9f28684cf2b512d321a8d7a10db09b4394ea640 | [] | no_license | massimiliano-unina/fluvial-s1s2-max | eee96ed114132b618e329b49bbb5c88f92dc31f8 | dfc82346a122f345642e7b9f308f7ab9b4430e82 | refs/heads/master | 2023-03-30T05:20:11.867767 | 2021-04-12T17:04:38 | 2021-04-12T17:04:38 | 357,274,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,176 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 30 14:22:05 2020
@author: massi
"""
import os
# import gdal
data_path = r"D:\fiumiunsupervised\drive-download-20210125T123716Z-001\out2\\" #"D:\Albufera_2019_processed\subset_albufera\s1\\"
out_path = r"D:\fiumiunsupervised\drive-download-20210125T123716Z-001\out3\\"
# som_out_path = r"C:\Users\massi\Downloads\drive-download-20201201T153241Z-001\som\\"
otb_path = r"C:\Users\massi\Downloads\OTB-6.6.1-Win64\bin\\"
if not os.path.exists(out_path):
os.makedirs(out_path)
# if not os.path.exists(som_out_path):
# os.makedirs(som_out_path)
import numpy as np
dir_list = os.listdir(data_path)
dir_list.sort()
print(np.size(dir_list))
from openpyxl import load_workbook
workbook = load_workbook(filename=r"C:\Users\massi\OneDrive\Desktop\Incendi Boschivi\SQI_3_pH.xlsx")
print(workbook.sheetnames)
sheet_to_focus = 'SQI'
for s in range(len(workbook.sheetnames)):
if workbook.sheetnames[s] == sheet_to_focus:
break
workbook.active = s
sheet = workbook.active
for value in sheet.iter_rows(min_row=2, min_col=2,max_col=5,values_only=True):
print(value[2])
# print(sheet["B2:D4"].values)
for Num in range(np.size(dir_list)):
print(dir_list[Num])
# for file in dir_list:
# if file.find("VV_Po_S1_pre_") != -1:
# print(file)
# file_inr1_pre = os.path.join(data_path, file )
# name_ = 13
# file_inr1_pre2 = os.path.join(out_path, file )
# # file_inr2 = os.path.join(file_inr1[:len(data_path)], "B8" + file_inr1[len(data_path)+6:])
# file_inr4= os.path.join(file_inr1_pre[:len(data_path)] ,"RF_Po_S1S2_" + file_inr1_pre[len(data_path)+name_:])
# file_inr5= os.path.join(file_inr1_pre[:len(data_path)] ,"VH_Po_S1_" + file_inr1_pre[len(data_path)+name_:])
# file_inr5_pre= os.path.join(file_inr1_pre[:len(data_path)] ,"VH_Po_S1_pre_" + file_inr1_pre[len(data_path)+name_:])
# file_inr5_post= os.path.join(file_inr1_pre[:len(data_path)] ,"VH_Po_S1_post_" + file_inr1_pre[len(data_path)+name_:])
# file_inr1= os.path.join(file_inr1_pre[:len(data_path)] ,"VV_Po_S1_" + file_inr1_pre[len(data_path)+name_:])
# file_inr1_post= os.path.join(file_inr1_pre[:len(data_path)] ,"VV_Po_S1_post_" + file_inr1_pre[len(data_path)+name_:])
# file_inr52= os.path.join(file_inr1_pre2[:len(out_path)] ,"VH_Po_S1_" + file_inr1_pre2[len(out_path)+name_:])
# file_inr5_pre2= os.path.join(file_inr1_pre2[:len(out_path)] ,"VH_Po_S1_pre_" + file_inr1_pre2[len(out_path)+name_:])
# file_inr5_post2= os.path.join(file_inr1_pre2[:len(out_path)] ,"VH_Po_S1_post_" + file_inr1_pre2[len(out_path)+name_:])
# file_inr12= os.path.join(file_inr1_pre2[:len(out_path)] ,"VV_Po_S1_" + file_inr1_pre2[len(out_path)+name_:])
# file_inr1_post2= os.path.join(file_inr1_pre2[:len(out_path)] ,"VV_Po_S1_post_" + file_inr1_pre2[len(out_path)+name_:])
# # dataset = gdal.Open(file_inr4, gdal.GA_ReadOnly)
# # rf = dataset.ReadAsArray()
# # # gvv_0 = -10*np.log10(gvv_0)
# # dataset = None
# # rf3 = rf.astype('float32')
# # file_inr42= os.path.join(file_inr1_pre2[:len(data_path)] ,"RF_Po_S1S2_" + file_inr1_pre2[len(data_path)+name_:])
# # imsave(file_inr42, rf3)
# conc = os.path.join(otb_path, "otbcli_Superimpose")
# command = conc + " -inr " + file_inr4 + " -inm " + file_inr1_pre + " -out " + file_inr1_pre2 + " -interpolator linear"
# os.system(command)
# command = conc + " -inr " + file_inr4 + " -inm " + file_inr5 + " -out " + file_inr52 + " -interpolator linear"
# os.system(command)
# command = conc + " -inr " + file_inr4 + " -inm " + file_inr5_pre + " -out " + file_inr5_pre2 + " -interpolator linear"
# os.system(command)
# command = conc + " -inr " + file_inr4 + " -inm " + file_inr5_post + " -out " + file_inr5_post2 + " -interpolator linear"
# os.system(command)
# command = conc + " -inr " + file_inr4 + " -inm " + file_inr1 + " -out " + file_inr12 + " -interpolator linear"
# os.system(command)
# command = conc + " -inr " + file_inr4 + " -inm " + file_inr1_post + " -out " + file_inr1_post2 + " -interpolator linear"
# os.system(command)
# for file in dir_list:
# if file.find("VV_Osti_S1_pre_") != -1:
# print(file)
# file_inr1_pre = os.path.join(data_path, file )
# name_ = 15
# file_inr1_pre2 = os.path.join(out_path, file )
# # file_inr2 = os.path.join(file_inr1[:len(data_path)], "B8" + file_inr1[len(data_path)+6:])
# file_inr4= os.path.join(file_inr1_pre[:len(data_path)] ,"RF_Osti_S1S2_" + file_inr1_pre[len(data_path)+name_:])
# file_inr5= os.path.join(file_inr1_pre[:len(data_path)] ,"VH_Osti_S1_" + file_inr1_pre[len(data_path)+name_:])
# file_inr5_pre= os.path.join(file_inr1_pre[:len(data_path)] ,"VH_Osti_S1_pre_" + file_inr1_pre[len(data_path)+name_:])
# file_inr5_post= os.path.join(file_inr1_pre[:len(data_path)] ,"VH_Osti_S1_post_" + file_inr1_pre[len(data_path)+name_:])
# file_inr1= os.path.join(file_inr1_pre[:len(data_path)] ,"VV_Osti_S1_" + file_inr1_pre[len(data_path)+name_:])
# file_inr1_post= os.path.join(file_inr1_pre[:len(data_path)] ,"VV_Osti_S1_post_" + file_inr1_pre[len(data_path)+name_:])
# file_inr52= os.path.join(file_inr1_pre2[:len(out_path)] ,"VH_Osti_S1_" + file_inr1_pre2[len(out_path)+name_:])
# file_inr5_pre2= os.path.join(file_inr1_pre2[:len(out_path)] ,"VH_Osti_S1_pre_" + file_inr1_pre2[len(out_path)+name_:])
# file_inr5_post2= os.path.join(file_inr1_pre2[:len(out_path)] ,"VH_Osti_S1_post_" + file_inr1_pre2[len(out_path)+name_:])
# file_inr12= os.path.join(file_inr1_pre2[:len(out_path)] ,"VV_Osti_S1_" + file_inr1_pre2[len(out_path)+name_:])
# file_inr1_post2= os.path.join(file_inr1_pre2[:len(out_path)] ,"VV_Osti_S1_post_" + file_inr1_pre2[len(out_path)+name_:])
# # dataset = gdal.Open(file_inr4, gdal.GA_ReadOnly)
# # rf = dataset.ReadAsArray()
# # # gvv_0 = -10*np.log10(gvv_0)
# # dataset = None
# # rf3 = rf.astype('float32')
# # file_inr42= os.path.join(file_inr1_pre2[:len(data_path)] ,"RF_Osti_S1S2_" + file_inr1_pre2[len(data_path)+name_:])
# # imsave(file_inr42, rf3)
# conc = os.path.join(otb_path, "otbcli_Superimpose")
# command = conc + " -inr " + file_inr4 + " -inm " + file_inr1_pre + " -out " + file_inr1_pre2 + " -interpolator linear"
# os.system(command)
# command = conc + " -inr " + file_inr4 + " -inm " + file_inr5 + " -out " + file_inr52 + " -interpolator linear"
# os.system(command)
# command = conc + " -inr " + file_inr4 + " -inm " + file_inr5_pre + " -out " + file_inr5_pre2 + " -interpolator linear"
# os.system(command)
# command = conc + " -inr " + file_inr4 + " -inm " + file_inr5_post + " -out " + file_inr5_post2 + " -interpolator linear"
# os.system(command)
# command = conc + " -inr " + file_inr4 + " -inm " + file_inr1 + " -out " + file_inr12 + " -interpolator linear"
# os.system(command)
# command = conc + " -inr " + file_inr4 + " -inm " + file_inr1_post + " -out " + file_inr1_post2 + " -interpolator linear"
# os.system(command)
# # data_path = r"D:\fiumiunsupervised\drive-download-20210125T123716Z-001\\"
# # out_path = r"D:\fiumiunsupervised\drive-download-20210125T123716Z-001\out2\\"
# # # som_out_path = r"C:\Users\massi\Downloads\drive-download-20201201T153241Z-001\som\\"
# # otb_path = r"C:\Users\massi\Downloads\OTB-6.6.1-Win64\bin\\"
# # if not os.path.exists(out_path):
# # os.makedirs(out_path)
# # # if not os.path.exists(som_out_path):
# # # os.makedirs(som_out_path)
# # dir_list = os.listdir(data_path)
# # dir_list.sort()
# # # for file in dir_list:
# # # if file.find("VV_Po_S1_pre_") != -1:
# # # print(file)
# # # file_inr1_pre = os.path.join(data_path, file )
# # # name_ = 13
# # # file_inr1_pre2 = os.path.join(out_path, file )
# # # # file_inr2 = os.path.join(file_inr1[:len(data_path)], "B8" + file_inr1[len(data_path)+6:])
# # # file_inr4= os.path.join(file_inr1_pre[:len(data_path)] ,"RF_Po_S1S2_" + file_inr1_pre[len(data_path)+name_:])
# # # file_inr5= os.path.join(file_inr1_pre[:len(data_path)] ,"VH_Po_S1_" + file_inr1_pre[len(data_path)+name_:])
# # # file_inr5_pre= os.path.join(file_inr1_pre[:len(data_path)] ,"VH_Po_S1_pre_" + file_inr1_pre[len(data_path)+name_:])
# # # file_inr5_post= os.path.join(file_inr1_pre[:len(data_path)] ,"VH_Po_S1_post_" + file_inr1_pre[len(data_path)+name_:])
# # # file_inr1= os.path.join(file_inr1_pre[:len(data_path)] ,"VV_Po_S1_" + file_inr1_pre[len(data_path)+name_:])
# # # file_inr1_post= os.path.join(file_inr1_pre[:len(data_path)] ,"VV_Po_S1_post_" + file_inr1_pre[len(data_path)+name_:])
# # # file_inr52= os.path.join(file_inr1_pre2[:len(out_path)] ,"VH_Po_S1_" + file_inr1_pre2[len(out_path)+name_:])
# # # file_inr5_pre2= os.path.join(file_inr1_pre2[:len(out_path)] ,"VH_Po_S1_pre_" + file_inr1_pre2[len(out_path)+name_:])
# # # file_inr5_post2= os.path.join(file_inr1_pre2[:len(out_path)] ,"VH_Po_S1_post_" + file_inr1_pre2[len(out_path)+name_:])
# # # file_inr12= os.path.join(file_inr1_pre2[:len(out_path)] ,"VV_Po_S1_" + file_inr1_pre2[len(out_path)+name_:])
# # # file_inr1_post2= os.path.join(file_inr1_pre2[:len(out_path)] ,"VV_Po_S1_post_" + file_inr1_pre2[len(out_path)+name_:])
# # # # multi_file_inr1 = os.path.join(file_inr1_pre[:len(data_path)] ,"out\Multi_VVVH_Po_S1_" + file_inr1_pre[len(data_path)+name_:])
# # # # dspk_file_inr1 = os.path.join(file_inr1_pre[:len(data_path)] ,"out\Dspk_Multi_VVVH_Po_S1_" + file_inr1_pre[len(data_path)+name_:])
# # # # conc_vv = os.path.join(otb_path, "otbcli_ConcatenateImages")
# # # # cmd_vv_vh = conc_vv + " -il " + file_inr1_pre + " " + file_inr1 + " "+ file_inr1_post + " " + file_inr5_pre + " " + file_inr5 + " "+ file_inr5_post + " -out " + multi_file_inr1
# # # # os.system(cmd_vv_vh)
# # # # dspk_vv = os.path.join(otb_path, "otbcli_Despeckle")
# # # # cmd_spk = dspk_vv +" -in " + multi_file_inr1 + " -filter gammamap -filter.gammamap.rad 3 -out " + dspk_file_inr1
# # # # os.system(cmd_spk)
# # # # single_vv = os.path.join(otb_path, "otbcli_BandMathX")
# # # # vv_pre = single_vv +" -il " + dspk_file_inr1 + " -out " + file_inr1_pre2 + " -exp im1b1"
# # # # vv_ = single_vv +" -il " + dspk_file_inr1 + " -out " + file_inr12 + " -exp im1b2"
# # # # vv_post = single_vv +" -il " + dspk_file_inr1 + " -out " + file_inr1_post2 + " -exp im1b3"
# # # # vh_pre = single_vv +" -il " + dspk_file_inr1 + " -out " + file_inr5_pre2 + " -exp im1b4"
# # # # vh_ = single_vv +" -il " + dspk_file_inr1 + " -out " + file_inr52 + " -exp im1b5"
# # # # vh_post = single_vv +" -il " + dspk_file_inr1 + " -out " + file_inr5_post2 + " -exp im1b6"
# # # # os.system(vv_pre)
# # # # os.system(vv_)
# # # # os.system(vv_post)
# # # # os.system(vh_pre)
# # # # os.system(vh_)
# # # # os.system(vh_post)
# # # dspk_vv = os.path.join(otb_path, "otbcli_Despeckle")
# # # cmd_spk = dspk_vv +" -in " + file_inr1_pre + " -filter gammamap -filter.gammamap.rad 3 -out " + file_inr1_pre2
# # # os.system(cmd_spk)
# # # cmd_spk = dspk_vv +" -in " + file_inr1 + " -filter gammamap -filter.gammamap.rad 3 -out " + file_inr12
# # # os.system(cmd_spk)
# # # cmd_spk = dspk_vv +" -in " + file_inr1_post + " -filter gammamap -filter.gammamap.rad 3 -out " + file_inr1_post2
# # # os.system(cmd_spk)
# # # cmd_spk = dspk_vv +" -in " + file_inr5_pre + " -filter gammamap -filter.gammamap.rad 3 -out " + file_inr5_pre2
# # # os.system(cmd_spk)
# # # cmd_spk = dspk_vv +" -in " + file_inr5 + " -filter gammamap -filter.gammamap.rad 3 -out " + file_inr52
# # # os.system(cmd_spk)
# # # cmd_spk = dspk_vv +" -in " + file_inr1_post + " -filter gammamap -filter.gammamap.rad 3 -out " + file_inr5_post2
# # # os.system(cmd_spk)
# # for file in dir_list:
# # if file.find("VV_Osti_S1_pre_") != -1:
# # print(file)
# # file_inr1_pre = os.path.join(data_path, file )
# # name_ = 15
# # file_inr1_pre2 = os.path.join(out_path, file )
# # # file_inr2 = os.path.join(file_inr1[:len(data_path)], "B8" + file_inr1[len(data_path)+6:])
# # file_inr4= os.path.join(file_inr1_pre[:len(data_path)] ,"RF_Osti_S1S2_" + file_inr1_pre[len(data_path)+name_:])
# # file_inr5= os.path.join(file_inr1_pre[:len(data_path)] ,"VH_Osti_S1_" + file_inr1_pre[len(data_path)+name_:])
# # file_inr5_pre= os.path.join(file_inr1_pre[:len(data_path)] ,"VH_Osti_S1_pre_" + file_inr1_pre[len(data_path)+name_:])
# # file_inr5_post= os.path.join(file_inr1_pre[:len(data_path)] ,"VH_Osti_S1_post_" + file_inr1_pre[len(data_path)+name_:])
# # file_inr1= os.path.join(file_inr1_pre[:len(data_path)] ,"VV_Osti_S1_" + file_inr1_pre[len(data_path)+name_:])
# # file_inr1_post= os.path.join(file_inr1_pre[:len(data_path)] ,"VV_Osti_S1_post_" + file_inr1_pre[len(data_path)+name_:])
# # file_inr52= os.path.join(file_inr1_pre2[:len(out_path)] ,"VH_Osti_S1_" + file_inr1_pre2[len(out_path)+name_:])
# # file_inr5_pre2= os.path.join(file_inr1_pre2[:len(out_path)] ,"VH_Osti_S1_pre_" + file_inr1_pre2[len(out_path)+name_:])
# # file_inr5_post2= os.path.join(file_inr1_pre2[:len(out_path)] ,"VH_Osti_S1_post_" + file_inr1_pre2[len(out_path)+name_:])
# # file_inr12= os.path.join(file_inr1_pre2[:len(out_path)] ,"VV_Osti_S1_" + file_inr1_pre2[len(out_path)+name_:])
# # file_inr1_post2= os.path.join(file_inr1_pre2[:len(out_path)] ,"VV_Osti_S1_post_" + file_inr1_pre2[len(out_path)+name_:])
# # dspk_vv = os.path.join(otb_path, "otbcli_Despeckle")
# # cmd_spk = dspk_vv +" -in " + file_inr1_pre + " -filter gammamap -filter.gammamap.rad 3 -out " + file_inr1_pre2
# # os.system(cmd_spk)
# # cmd_spk = dspk_vv +" -in " + file_inr1 + " -filter gammamap -filter.gammamap.rad 3 -out " + file_inr12
# # os.system(cmd_spk)
# # cmd_spk = dspk_vv +" -in " + file_inr1_post + " -filter gammamap -filter.gammamap.rad 3 -out " + file_inr1_post2
# # os.system(cmd_spk)
# # cmd_spk = dspk_vv +" -in " + file_inr5_pre + " -filter gammamap -filter.gammamap.rad 3 -out " + file_inr5_pre2
# # os.system(cmd_spk)
# # cmd_spk = dspk_vv +" -in " + file_inr5 + " -filter gammamap -filter.gammamap.rad 3 -out " + file_inr52
# # os.system(cmd_spk)
# # cmd_spk = dspk_vv +" -in " + file_inr1_post + " -filter gammamap -filter.gammamap.rad 3 -out " + file_inr5_post2
# # os.system(cmd_spk)
| [
"massimiliano.gargiulo@foodealab.com"
] | massimiliano.gargiulo@foodealab.com |
d0845ef3a1cdc83bad5106cf298a5a112ad40978 | d667b878b59a78747c183706b5fb8d32c397e3ed | /ecs/invoke.py | 0df3fae88d1d2bb7af15ed024ce2b3d31e0a2bb9 | [] | no_license | CrCliff/psa-dataset | f8bd23d97e16b23377d2942da09df99b3fb0f83d | 70bf151d6808559fa3f1b9e8783c72e98355c142 | refs/heads/master | 2023-07-17T14:41:22.234418 | 2021-09-04T22:26:32 | 2021-09-04T22:26:32 | 401,169,678 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,214 | py | from typing import Dict, Tuple
import boto3
import time
START=0
STOP=60
SUBNETS = ["subnet-1f26da53", "subnet-47113d21"]
SECURITY_GROUPS = ["sg-0ce615b54d6fb23c1"]
ECS_CLUSTER = "arn:aws:ecs:us-east-1:027517924056:cluster/psa-process"
ECS_TASK_DEFINITION = "psa-process"
S3_BUCKET = "psa-dataset"
S3_PREFIX_IN = "raw"
S3_PREFIX_OUT = "processed"
def s3_urls(i: int) -> Tuple[str, str]:
sub = (i // 100) * 100
return (
f"s3://{S3_BUCKET}/{S3_PREFIX_IN}/{sub:04}/{i:04}.csv",
f"s3://{S3_BUCKET}/{S3_PREFIX_OUT}/{sub:04}/{i:04}.csv",
)
def get_params(s3_in: str, s3_out: str) -> dict:
return {
"cluster": ECS_CLUSTER,
"count": 1,
"enableECSManagedTags": True,
"enableExecuteCommand": False,
"launchType": "FARGATE",
"networkConfiguration": {
"awsvpcConfiguration": {
"subnets": SUBNETS,
"securityGroups": SECURITY_GROUPS,
"assignPublicIp": "ENABLED",
}
},
"overrides": {
"containerOverrides": [
{
"name": "psa-process",
"environment": [
{"name": "S3_IN", "value": s3_in},
{
"name": "S3_OUT",
"value": s3_out,
},
],
}
]
},
"tags": [
{
"key": "S3_IN",
"value": s3_in,
},
{
"key": "S3_OUT",
"value": s3_out,
},
],
"propagateTags": "TASK_DEFINITION",
"taskDefinition": ECS_TASK_DEFINITION,
}
if __name__ == "__main__":
ecs = boto3.client("ecs", region_name="us-east-1")
for i in range(START, STOP):
s3_in, s3_out = s3_urls(i)
params = get_params(s3_in, s3_out)
resp = ecs.run_task(**params)
print(i, resp)
if i != 0 and i % 49 == 0:
# We can only run 50 tasks concurrently, wait for these to finish
print(f'Waiting on task {i}...')
time.sleep(240)
| [
"crcliff@comcast.net"
] | crcliff@comcast.net |
8c98fa28e49a5214073594a1f5ac17aac6c6149c | 57b239fc73dd860026d4c4dba6473b185d4c8327 | /TDjango/wsgi.py | 4196bfe2d37e8fc79f8e4339dd23a2e8015663ba | [] | no_license | wrench1815/TDjango | 389bb3d30ddc437cd9783c0227ad67df66b94901 | d8b66718ea32940faef8e733073f7b62b998fe3f | refs/heads/main | 2023-07-18T18:25:43.546531 | 2021-09-14T08:41:53 | 2021-09-14T08:41:53 | 396,708,597 | 0 | 0 | null | 2021-09-14T08:41:54 | 2021-08-16T09:01:09 | Python | UTF-8 | Python | false | false | 391 | py | """
WSGI config for TDjango project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TDjango.settings')
application = get_wsgi_application()
| [
"hardeepkumar1815@gmail.com"
] | hardeepkumar1815@gmail.com |
38918003f1a0ac70cadd9f6485dadf79698f1cae | 24235130620413b2744e9527928659d46aa01d30 | /src/modules/commentary.py | 692876fc0026628d3c0464b17552e82abf67548c | [
"MIT",
"CC0-1.0"
] | permissive | Rohan-Great/Python-Hand-Cricket | c38e57bac5103d0d4085df56bbe98462cd3ff0cd | 36fc6fe65faa7cf9e6afe9a1102b1aa38aafd2e1 | refs/heads/main | 2023-07-14T14:44:37.056598 | 2021-08-27T04:57:00 | 2021-08-27T04:57:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,995 | py | import random
# Contains the commentary notes.
# Wickets
bowled_commentary = ["bowled, what a beauty! The middle stump is broken",
"bowled. The batter misses, but the ball hits the target.",
"bowled, crashing into the stumps."]
caught_commentary = ["caught, that was a terrific blinder. \
The fielder deserves a round of applause...",
"caught, that was a simple catch to the wicketkeeper.",
"caught, in the air...\
and straight into the hands of a fielder.",
"caught, simple catch to the fielder. \
That was a soft dismissal."]
lbw_commentary = ["LBW, dead plumb, the batter is a goner. \
Three reds and no inside edge, the batter has to leave",
"LBW, right in front of the wickets."]
stumped_commentary = ["stumped!! \
The batter is outside the crease and the bails are whipped off!",
"stumped!! That was quick wicketkeeping.",
"stumped!! \
That's why you shouldn't overstep the batting crease unnecessarily."]
def outCall():
x = random.randint(0, 5)
if x == 0 or x == 1:
y = random.choice(bowled_commentary)
elif x == 2 or x == 3:
y = random.choice(caught_commentary)
elif x == 4:
y = random.choice(lbw_commentary)
elif x == 5:
y = random.choice(stumped_commentary)
return y
# Runs
commentary_6runs = [", SIX, What a shot! \
That went too far away from the stadium.",
", SIX, into the stands.",
", SIX, over the fielder and out of the park.",
", SIX, this one went over the roof!",
", SIX, flat six! This one was slammed into the stands"]
commentary_5runs = [", 5 runs to the batting side. \
Just a single, but wait...misfield and four.",
", 5 runs to the batting side. Missed run out becomes \
worse for the fielding side as the ball races to the boundary."]
commentary_4runs = [", FOUR! The ball races to the boundary.",
", FOUR! \
The fielders can't stop the ball as it races towards the boundary.",
", FOUR! Slammed towards the ropes!",
", FOUR! One bounce, and into the stands.",
", FOUR! Misfield and four runs."]
def scoreRun(score, bowler, batter):
if score == '6':
print(bowler, "to", batter, random.choice(commentary_6runs))
elif score == '5':
print(bowler, "to", batter, random.choice(commentary_5runs))
elif score == '4':
print(bowler, "to", batter, random.choice(commentary_4runs))
elif score == '3':
print(bowler, "to", batter, ", 3 runs")
elif score == '2':
print(bowler, "to", batter, ", 2 runs")
elif score == '1':
print(bowler, "to", batter, ", 1 run")
elif score == '0':
print(bowler, "to", batter, ", NO RUN")
elif score == 'W':
print(bowler, "to", batter, ", OUT", outCall())
| [
"noreply@github.com"
] | Rohan-Great.noreply@github.com |
080b7a8f9c3404f88082ce2d1bc92ccbac697ccd | 180bfde53b69f0512ad93f4af4cc353694f6277a | /19day/01-1-100奇偶数函数.py | a0c2f1cd1d000b4076a082e1b3a110d387972398 | [] | no_license | huguowei123/1807 | 377c68ae1e39daee794518ba47024d1091ceae61 | 74b47b4b7d64d9526c8af17b17eadc2e2b24215c | refs/heads/master | 2020-03-23T22:03:30.512353 | 2018-08-23T09:31:22 | 2018-08-23T09:31:22 | 142,150,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | def introduce():
for i in range(1,101):
if i%2 == 0:
print("%d是偶数"%i)
else:
print("%d是奇数"%i)
introduce()
| [
"1156800122@qq.com"
] | 1156800122@qq.com |
537812e32367a2bd0e450ad4abc43309c4eed96b | aa8a1e46432a49338868624d749fd0fc3a033331 | /b_nonvat_re_test.py | b6e6cbeb8c582e87eba17f87298a167ff5ad505c | [] | no_license | suamafafa/plantdisease | db8d17d8e1dc27e5c58e3572ff4e4c15efd29427 | 1106fbe3ed533b962e9df3698aa7e6f468373cc8 | refs/heads/master | 2020-04-01T13:12:30.198703 | 2018-11-24T13:24:06 | 2018-11-24T13:24:06 | 151,208,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,683 | py | #nonvat=normal for test
#numpy, placeholder
import tensorflow as tf
import numpy as np
import pandas as pd
import datetime
import time
import os
import glob
import math
import argparse
import sys
import random
import cv2
np.set_printoptions(threshold=np.inf)
parser = argparse.ArgumentParser()
parser.add_argument("--load_model", action='store_true', help="test is do --load_model")
parser.add_argument("--load_model_path", default=None, help="path for checkpoint")
parser.add_argument("--augm", action='store_true', help="augmentation is do")
parser.add_argument("--save_dir", help="path for save the model and logs")
parser.add_argument("--batch_size", type=int, default=32, help="batch size")
parser.add_argument("--epoch", type=int, help="epoch")
parser.add_argument("--print_loss_freq", type=int, default=500, help="print loss epoch frequency")
parser.add_argument("--dropout", type=float, default=0.5, help="dropout_rate. test: 0.0, train=0.2")
parser.add_argument("--nclass", type=int)
parser.add_argument("--model", help="inception, resnet")
parser.add_argument("--gpu_config", default=0, help="0:gpu0, 1:gpu1, -1:both")
a = parser.parse_args()
for k, v in a._get_kwargs():
print(k, "=", v)
import tensorflow_hub as hub
if a.model == "inception":
model_size = 299
module = hub.Module("https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1", trainable=False)
elif a.model == "resnet":
model_size = 224
module = hub.Module("https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/1", trainable=False)
#config
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
if a.gpu_config == '0':
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True, visible_device_list='0'))
elif a.gpu_config == '1':
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True, visible_device_list='1'))
start_time = time.time()
print("start time : " + str(start_time))
#params
csv_name = 'tomato_df_train_random.csv'
csv = pd.read_csv(csv_name, header=None)
#test_csv_name = 'tomato_test_only_tomato.csv'
test_csv_name = 'tomato_df_test_random.csv'
test_csv = pd.read_csv(test_csv_name, header=None)
#path col=0
#label col=4
sample_size = csv.shape[0]
n_class = len(np.unique(csv[4]))
seedd = 1141919
#function
def ransu(k):
return np.random.randint(0, k)
def ransu2(k):
return np.random.randint(-k, k)
def afine(img, k=50):
#img = cv2.imread(img)
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rows,cols,ch = img.shape
pts1 = np.float32([[0,0],[0,256],[256,0],[256,256]])
lt = [ransu2(k), ransu2(k)]
rt = [256-ransu2(k), ransu2(k)]
lb = [ransu2(k), 256-ransu2(k)]
rb = [256-ransu2(k), 256-ransu2(k)]
pts2 = np.float32([lt,lb,rt,rb])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(im,M,(256,256))
return dst
def moment(matrix):
mask = np.zeros((256, 256))
for x in range(256):
for y in range(256):
if sum(matrix[x][y]) < 30:
mask[x][y] = np.array(0)
else:
mask[x][y] = np.array(255)
mu = cv2.moments(mask, False)
x,y= int(mu["m10"]/mu["m00"]) , int(mu["m01"]/mu["m00"])
return x,y
def rotation(img, center, angle, scale):
center = tuple(np.array(center)+(ransu(30),ransu(30)))
rotation_matrix = cv2.getRotationMatrix2D(center, angle, scale)
img_dst = cv2.warpAffine(img, rotation_matrix, (256,256))
return img_dst
def makemask(matrix):
mask = np.zeros((256, 256, 3))
for x in range(256):
for y in range(256):
if sum(matrix[x][y]) < 30:
mask[x][y] = np.array([0, 0, 0]) # Black pixel if no object
else:
mask[x][y] = np.array([255, 255, 255])
return mask
def overlay(foreground, background):
# Convert uint8 to float
foreground = foreground.astype(float)
background = background.astype(float)
mask = makemask(foreground)
# Normalize the alpha mask to keep intensity between 0 and 1
mask = mask.astype(float)/255
# Multiply the foreground with the alpha matte
foreground = cv2.multiply(mask, foreground)
# Multiply the background with ( 1 - alpha )
background = cv2.multiply((1-mask), background)
# Add the masked foreground and background.
outImage = cv2.add(foreground, background)
outImage = outImage.astype('uint8')
return outImage
def np_loader(csv, idxs):
#csv is already read
imgs = []
labels = []
for idx in idxs:
img = cv2.imread(csv.iloc[idx,0])
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (model_size, model_size))
images.append(img.astype(np.float32)/255.0)
tmp = np.zeros(n_class)
tmp[int(csv.iloc[idex,4])] = 1
labels.append(tmp)
return imgs, labels
#--------------ImageLoad-----------------#
with tf.name_scope('LoadImage'):
filename_queue = tf.train.string_input_producer([csv_name], shuffle=True)
reader = tf.TextLineReader()
_, val = reader.read(filename_queue)
record_defaults = [["a"], ["a"], [0], ["a"], [0], [0]]
path, _, _, _, label, _ = tf.decode_csv(val, record_defaults=record_defaults)
readfile = tf.read_file(path)
image = tf.image.decode_jpeg(readfile, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.cast(image, dtype=np.float32)
image = tf.image.resize_images(image, (model_size, model_size))
label = tf.one_hot(label, depth=n_classes)
label_batch, x_batch = tf.train.batch([label, image],batch_size=a.batch_size, allow_smaller_final_batch=False)
label_batch = tf.cast(label_batch, dtype=np.float32)
test_filename_queue = tf.train.string_input_producer([test_csv_name], shuffle=False)
test_reader = tf.TextLineReader()
_, test_val = test_reader.read(test_filename_queue)
record_defaults = [["a"], ["a"], [0], ["a"], [0], [0]]
test_path, _, _, _, test_label, _ = tf.decode_csv(test_val, record_defaults=record_defaults)
test_readfile = tf.read_file(test_path)
test_image = tf.image.decode_jpeg(test_readfile, channels=3)
test_image = tf.image.convert_image_dtype(test_image, dtype=tf.float32)
test_image = tf.cast(test_image, dtype=np.float32)
test_image = tf.image.resize_images(test_image, (model_size, model_size))
test_label = tf.one_hot(test_label, depth=n_classes)
test_label_batch, test_x_batch = tf.train.batch([test_label, test_image],batch_size=a.batch_size, allow_smaller_final_batch=False)
test_label_batch = tf.cast(test_label_batch, dtype=np.float32)
am_testing = tf.placeholder(dtype=bool,shape=())
data = tf.cond(am_testing, lambda:test_x_batch, lambda:x_batch)
label = tf.cond(am_testing, lambda:test_label_batch, lambda:label_batch)
#--------------Model-----------------#
#QQQ
#with tf.variable_scope('def_model', reuse=tf.AUTO_REUSE)
def model(data):
logits_ = tf.layers.dense(inputs=module(data), units=1000)
dropout_ = tf.layers.dropout(inputs=logits_, rate=drop)
logits = tf.layers.dense(inputs= dropout_, units=n_class)
out = tf.nn.softmax(logits)
return out
with tf.name_scope('model'):
with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
y = model(data)
#--------------Loss&Opt-----------------#
with tf.name_scope("cost"):
cost = -tf.reduce_mean(tf.reduce_sum(label*tf.log(y), axis=[1]))
with tf.name_scope("opt"):
#trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "trainable_section")
trainable_vars = [var for var in tf.trainable_variables()]
adam = tf.train.AdamOptimizer(0.0002,0.5)
gradients_vars = adam.compute_gradients(cost, var_list=trainable_vars)
train_op = adam.apply_gradients(gradients_vars)
def Accuracy(y, label):
correct_pred = tf.equal(tf.argmax(y,1), tf.argmax(label,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
return accuracy
with tf.name_scope("accuracy"):
accuracy = Accuracy(y, label)
#--------------Summary-----------------#
with tf.name_scope('summary'):
with tf.name_scope('image_summary'):
tf.summary.image('image', tf.image.convert_image_dtype(data, dtype=tf.uint8, saturate=True), collections=['train'])
tf.summary.image('image2', data, collections=['train'])
tf.summary.image('image3', tf.image.convert_image_dtype(data*255.0, dtype=tf.uint8, saturate=True), collections=['train'])
with tf.name_scope("train_summary"):
cost_summary_train = tf.summary.scalar('train_loss', cost, collections=['train'])
acc_summary_train = tf.summary.scalar("train_accuracy", accuracy, collections=['train'])
with tf.name_scope("test_summary"):
acc_summary_test = tf.summary.scalar("test_accuracy", accuracy)
for var in tf.trainable_variables():
var_summary = tf.summary.histogram(var.op.name + '/Variable_histogram', var, collections=['train'])
for grad, var in gradients_vars:
grad_summary = tf.summary.histogram(var.op.name + '/Gradients', grad, collections=['train'])
#---------------Session-----------------#
init = tf.global_variables_initializer()
#saver = tf.train.Saver()
tmp_config = tf.ConfigProto(
gpu_options=tf.GPUOptions(
visible_device_list="1",
allow_growth = True
)
)
saver = tf.train.Saver()
with tf.Session(config=tmp_config) as sess:
if a.load_model is not True:
if not os.path.exists(a.save_dir):
os.mkdir(a.save_dir)
os.mkdir(os.path.join(a.save_dir,'summary'))
os.mkdir(os.path.join(a.save_dir,'model'))
sess.run(init)
print(trainable_vars)
print("Session Start")
print("")
merged = tf.summary.merge_all(key="train")
summary_writer = tf.summary.FileWriter(os.path.join(a.save_dir,'summary'), graph=sess.graph)
graph = tf.get_default_graph()
placeholders = [ op for op in graph.get_operations() if op.type == "Placeholder"]
print("placeholder", placeholders)
step = 0
for epo in range(a.epoch):
for i in range(sample_size//a.batch_size):
sess.run(train_op, feed_dict={am_testing: False, drop:a.dropout})
if step % a.print_loss_freq == 0:
print(step)
train_acc = sess.run(accuracy, feed_dict={am_testing: False, drop:0.0})
print("train accuracy", train_acc)
summary_writer.add_summary(sess.run(merged, feed_dict={data:train_imgs, label:train_labels, drop:0.0}), step)
step_num = -(-test_csv.shape[0]//a.batch_size)
tmp_acc = 0
for i in range(step_num):
tmp_acc += sess.run(accuracy, feed_dict={am_testing: True, drop:0.0})
test_acc = tmp_acc/step_num
print('test_acc', test_acc)
summary_writer.add_summary(tf.Summary(value=[
tf.Summary.Value(tag="test_summary/test_accuracy", simple_value=test_acc)]), step)
if step % 500 == 0:
# SAVE
saver.save(sess, a.save_dir + "/model/model.ckpt")
step += 1
saver.save(sess, a.save_dir + "/model/model.ckpt")
print('saved at '+ a.save_dir)
else:
print("a.load_model True")
end_time = time.time()
print( 'time : ' + str(end_time - start_time))
| [
"suamandfafa@outlook.jp"
] | suamandfafa@outlook.jp |
a9f48ff59e26c73c144f019cb99abf392b22dbf2 | 220c56f115a40b2e5050341f01d1dd3c771526b0 | /rawsocket_local_db.py | e55be98ad6c6c71abd949dc507be45b60faf5c3b | [] | no_license | sqlcyi2008/baibao | 36aea53e5925d02563225ccd006977d7a044ca54 | adf3484b1e6005aa715196ca50655469189f3d0f | refs/heads/master | 2021-01-21T14:27:58.641585 | 2017-11-14T02:48:06 | 2017-11-14T02:48:06 | 95,283,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | # coding:utf-8
import socket
import dpkt
# 监听的主机IPhost = "192.168.1.100"
socket_protocol = socket.IPPROTO_IP
sniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket_protocol)
sniffer.bind(("127.0.0.1", 0))
sniffer.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
# receive all packages
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
try:
while True:
raw_buffer = sniffer.recvfrom(65535)[0]
ipp = dpkt.ip.IP(raw_buffer)
ip = '%d.%d.%d.%d' % tuple(map(ord, list(ipp.src.decode())))
print(ip+":"+str(ipp.data.dport))
if ipp.data.__class__.__name__ == 'TCP' and ipp.data.dport == 3306:
tcp = ipp.data.data.decode()
if tcp.startswith('GET') or tcp.startswith('POST'):
print(tcp.splitlines()[0])
except KeyboardInterrupt:
pass
# disabled promiscuous mode
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF) | [
"123438115@qq.com"
] | 123438115@qq.com |
7602753b4108477e5072369a11d3453b2d9ccb2e | 23b5576bd5f5d896345c92b6ad35cd1755b12c6d | /dimensionalquantity/dimensional.py | 7bf86bbf75983148caa703dcf73d99fb98461599 | [
"MIT"
] | permissive | stefantkeller/dimensionalquantity | 6c8aeda90076f3ad03775d6df0e96272a273962b | 544c5804dc415629056793ea691ece645adf9695 | refs/heads/master | 2021-01-21T14:40:05.924262 | 2017-10-28T17:06:11 | 2017-10-28T17:06:11 | 95,323,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,881 | py | #! /usr/bin/python3
# -*- coding: utf-8 -*-
"""
This file defines the class Dimensional.
This class is at the heart of the concept behind dimensionalquantity:
each dimension -- as a name -- is represented by a dictionary key,
with the corresponding value being the exponent of how often a certain dimension is referred to.
For example, a volume is given by a qubic length.
Hence, the Dimensional of a volume is represented by {'L':3}.
This representation is simple enough and doesn't need its own class.
The class Dimensional provides the functionality
to add, subtract, multiply, etc. instances of Dimensional.
"""
from functools import wraps
# wrapper for Dimensional operations (such as __add__, __sub__) to
# i. make code more readable by putting reoccuring stuff here,
# ii. make behavior more intuitive (Dimensional looks like a dict,
# so it should at least run silently for dicts),
# iii. report sensible error messages
def compatible_with_operation(operation='<undefined>'):
def decorate_specified_operation(method):
@wraps(method)
def decorated(self, other, **kwargs):
try:
return method(self, other)
except (KeyError, AttributeError):
# while technically an error, for the user there is no real difference
# between a Dimensional and a dict or ordered_dict or default_dict, etc.
# hence KeyError is easy to fix:
# input 'other' has to be converted into Dimensional
# (if compatible with dict)
# otherwise the 'KeyError' is actually a TypeError
# note: isinstance(other, dict) is True also derived instances
# such as defaultdict or OrderedDict
if isinstance(other, dict):
return method(self, Dimensional(other))
else:
raise TypeError(''.join(['unsupported operand type(s) for {}:'.format(operation),
' \'{}\' and \'{}\''.format(type(self).__name__,
type(other).__name__)]))
return decorated
return decorate_specified_operation
class Dimensional(dict):
"""Base class for working with dimensions.
Args:
Any valid dictionary argument.
The keys represent the name of the dimension,
while the values how often said a certain dimension is referred to."""
def __getitem__(self, key):
return super(Dimensional, self).get(key,0)
@compatible_with_operation('+')
def __add__(self, other):
return Dimensional({key:self[key]+other[key] for key in set(self.keys()).union(other.keys())})
@compatible_with_operation('+')
def __radd__(self, other):
return self.__add__(other)
@compatible_with_operation('-')
def __sub__(self, other):
return Dimensional({key:self[key]-other[key] for key in set(self.keys()).union(other.keys())})
@compatible_with_operation('-')
def __rsub__(self, other):
return Dimensional({key:other[key]-self[key] for key in set(self.keys()).union(other.keys())})
def __mul__(self, other):
if isinstance(other, (int, float, complex)):
return Dimensional({key:other*value for key, value in self.items()})
else:
raise TypeError(''.join(['unsupported operand type(s) for /:',
' \'{}\' and \'{}\''.format(type(self).__name__,
type(other).__name__)]))
def __rmul__(self, other):
return self*other
def __repr__(self):
"""default (because derived from dict): {'a':1, 'b':2, ...}"""
return 'Dimensional({})'.format(super(Dimensional, self).__repr__())
| [
"stefantkeller@gmail.com"
] | stefantkeller@gmail.com |
95292dbab6b727fc93cbd5ed860178fecee84ca4 | 752116ef4b69a3049fef0cfe9b3d212548cc81b1 | /sources/actions/watch/describe.py | ef16f46eb7e4fe787faa620233f6f13455fd54fb | [] | no_license | VDOMBoxGroup/runtime2.0 | e54af4af7a642f34b0e07b5d4096320494fb9ae8 | cb9932f5f75d5c6d7889f26d58aee079b4127299 | refs/heads/develop | 2023-07-07T11:06:10.817093 | 2023-07-03T06:11:55 | 2023-07-03T06:11:55 | 62,622,255 | 0 | 12 | null | 2023-05-23T02:55:00 | 2016-07-05T09:09:48 | Python | UTF-8 | Python | false | false | 4,418 | py |
from logs import console
from utils.structure import Structure
from utils.parsing import VALUE, Parser, ParsingException
from ..auxiliary import section, show
from .auxiliary import query
REQUEST = "<action name=\"describe\">%s</action>"
SOURCE_OBJECTS_OPTION = "<option name=\"source\">objects</option>"
SOURCE_GARBAGE_OPTION = "<option name=\"source\">garbage</option>"
SOURCE_CHANGES_OPTION = "<option name=\"source\">changes</option>"
FILTER_BY_SERVER_OPTION = "<option name=\"filter\">server</option>"
SORT_BY_NAME = "SORT BY NAME"
SORT_BY_COUNTER = "SORT BY COUNTYER"
SORT_VALUES = {
"n": SORT_BY_NAME,
"name": SORT_BY_NAME,
"c": SORT_BY_COUNTER,
"counter": SORT_BY_COUNTER
}
ORDER_BY_ASCENDING = "ORDER BY ASCENDING"
ORDER_BY_DESCENDING = "ORDER BY DESCENDING"
ORDER_VALUES = {
"a": ORDER_BY_ASCENDING,
"asc": ORDER_BY_ASCENDING,
"ascending": ORDER_BY_ASCENDING,
"d": ORDER_BY_DESCENDING,
"desc": ORDER_BY_DESCENDING,
"descending": ORDER_BY_DESCENDING
}
def sort_by_name(x):
return x[0]
def sort_by_counter(x):
return x[1], -x[2], x[0]
def builder(parser):
# <reply>
def reply():
result = Structure(entries=None)
# <descriptions>
def descriptions():
result.entries = []
# <subgroup>
def subgroup(name):
subgroup = []
result.entries.append((name, subgroup))
# <description>
def description(object):
value = yield VALUE
subgroup.append((object, value))
# </description>
return description
# </subgroup>
return subgroup
# </descriptions>
yield descriptions
parser.accept(result)
# </reply>
return reply
def run(address=None, port=None, timeout=None,
all=False, sort=None, order=None, limit=None,
objects=False, garbage=False, changes=False):
"""
describe server object changes
:param address: specifies server address
:key int port: specifies server port
:key float timeout: specifies timeout to wait for reply
:key switch all: disable objects filtering
:key sort: sort entries by "name" or by "counter"
:key order: sort entries "asc"ending or "desc"ending
:key int limit: limit output
:key switch objects: use all objects
:key switch garbage: use objects from garbage
:key switch changes: use changes
"""
try:
if sum((objects, garbage, changes)) > 1:
raise Exception("Options \"objects\", \"garbage\" and \"changes\" are mutually exclusive")
sort = SORT_VALUES.get((sort or "").lower(), SORT_BY_NAME)
if sort is SORT_BY_COUNTER and order is None:
order = "desc"
order = ORDER_VALUES.get((order or "").lower(), ORDER_BY_ASCENDING)
options = "".join(filter(None, (
SOURCE_OBJECTS_OPTION if objects else None,
SOURCE_GARBAGE_OPTION if garbage else None,
SOURCE_CHANGES_OPTION if changes else None,
None if all else FILTER_BY_SERVER_OPTION,)))
request = REQUEST % options
message = query("describe objects", address, port, request, timeout=timeout)
parser = Parser(builder=builder, notify=True, supress=True)
result = parser.parse(message)
if not result:
raise Exception("Incorrect response")
except ParsingException as error:
console.error("unable to parse, line %s: %s" % (error.lineno, error))
except Exception as error:
console.error(error)
else:
console.write()
with section("objects"):
if result.entries:
key = sort_by_counter if sort is SORT_BY_COUNTER else sort_by_name
reverse = order is ORDER_BY_DESCENDING
entries = sorted(result.entries, key=key, reverse=reverse)
if limit is not None:
entries = entries[:limit]
for name, subgroup in entries:
with section(name):
for object, description in subgroup:
with section(object, lazy=False):
for part in description.split(" < "):
show(part, longer=True)
else:
show("no objects")
| [
"nikolay.grishkov@vdombox.ru"
] | nikolay.grishkov@vdombox.ru |
87cee6b3fc7d259b87a0cb05ee0fee88ed14e10f | 9ae2d337cbfa56768580187cc507f9c3c4ace1a8 | /test/test_meshzoo.py | abc4f1e9a37a6ea0c9ac22e5aea80e860fa44c3f | [
"MIT"
] | permissive | tongluocq/meshzoo | 5a734012e02f70bdf37147a3520b733f5095da02 | 46d3a999b7537fdcea92cd19ae53920b8639b0b3 | refs/heads/master | 2020-09-26T11:21:16.086387 | 2019-10-16T16:36:25 | 2019-10-16T16:36:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,706 | py | import numpy
import pytest
import meshzoo
from helpers import _near_equal
def test_cube():
points, cells = meshzoo.cube()
assert len(points) == 1331
assert len(cells) == 5000
points, cells = meshzoo.cube(nx=3, ny=3, nz=3)
assert len(points) == 27
assert all(numpy.sum(points, axis=0) == [13.5, 13.5, 13.5])
assert len(cells) == 40
def test_hexagon():
points, cells = meshzoo.hexagon(2)
assert len(points) == 61
assert _near_equal(numpy.sum(points, axis=0), [0.0, 0.0, 0.0])
assert len(cells) == 96
@pytest.mark.parametrize(
"num_twists, num_points, num_cells, ref1, ref2",
[
[1, 5890, 11400, [0, 0, 0], [2753575 / 9.0, 2724125 / 9.0, 58900 / 3.0]],
[2, 5890, 11400, [0, 0, 0], [2797750 / 9.0, 2679950 / 9.0, 58900 / 3.0]],
],
)
def test_moebius(num_twists, num_points, num_cells, ref1, ref2):
points, cells = meshzoo.moebius(num_twists, 190, 31, mode="smooth")
assert len(points) == num_points
assert len(cells) == num_cells
assert _near_equal(numpy.sum(points, axis=0), ref1, tol=1.0e-10)
sum_points2 = numpy.sum(points ** 2, axis=0)
assert numpy.allclose(sum_points2, ref2, rtol=1.0e-12, atol=0.0)
@pytest.mark.parametrize(
"num_twists, num_points, num_cells, ref1, ref2",
[
[
1,
5700,
11020,
[0, 0, 0],
[[296107.21982759, 292933.72844828, 19040.94827586]],
],
[
2,
5700,
11020,
[0, 0, 0],
[[300867.45689655, 288173.49137931, 19040.94827586]],
],
],
)
def test_moebius2(num_twists, num_points, num_cells, ref1, ref2):
points, cells = meshzoo.moebius(nl=190, nw=30, num_twists=num_twists, mode="smooth")
assert len(points) == num_points
assert len(cells) == num_cells
assert _near_equal(numpy.sum(points, axis=0), ref1, tol=1.0e-10)
sum_points2 = numpy.sum(points ** 2, axis=0)
assert numpy.allclose(sum_points2, ref2, rtol=1.0e-12, atol=0.0)
@pytest.mark.parametrize(
"num_twists, num_points, num_cells, ref1, ref2",
[
[1, 1000, 1800, [0, 0, 0], [1418750 / 27.0, 1418750 / 27.0, 137500 / 27.0]],
[2, 1000, 1800, [0, 0, 0], [484375 / 9.0, 1384375 / 27.0, 137500 / 27.0]],
],
)
def test_moebius3(num_twists, num_points, num_cells, ref1, ref2):
points, cells = meshzoo.moebius(num_twists, 100, 10, mode="classical")
assert len(points) == num_points
assert len(cells) == num_cells
assert _near_equal(numpy.sum(points, axis=0), ref1, tol=1.0e-10)
sum_points2 = numpy.sum(points ** 2, axis=0)
assert numpy.allclose(sum_points2, ref2, rtol=1.0e-12, atol=0.0)
def test_pseudomoebius():
points, cells = meshzoo.moebius(nl=190, nw=31, mode="pseudo")
assert len(points) == 5890
assert len(cells) == 11400
assert _near_equal(numpy.sum(points, axis=0), [0, 0, 0], tol=1.0e-10)
sum_points2 = numpy.sum(points ** 2, axis=0)
ref2 = [2753575 / 9.0, 2724125 / 9.0, 58900 / 3.0]
assert numpy.allclose(sum_points2, ref2, rtol=1.0e-12, atol=0.0)
def test_rectangle():
points, cells = meshzoo.rectangle(nx=11, ny=11, zigzag=False)
assert len(points) == 121
assert _near_equal(numpy.sum(points, axis=0), [60.5, 60.5, 0.0])
assert len(cells) == 200
points, cells = meshzoo.rectangle(nx=11, ny=11, zigzag=True)
assert len(points) == 121
assert _near_equal(numpy.sum(points, axis=0), [60.5, 60.5, 0.0])
assert len(cells) == 200
points, cells = meshzoo.rectangle(nx=2, ny=2, zigzag=True)
assert len(points) == 4
assert _near_equal(numpy.sum(points, axis=0), [2.0, 2.0, 0.0])
assert len(cells) == 2
points, cells = meshzoo.rectangle(nx=3, ny=2, zigzag=False)
assert len(points) == 6
assert _near_equal(numpy.sum(points, axis=0), [3.0, 3.0, 0.0])
assert len(cells) == 4
assert set(cells[0]) == set([0, 1, 4])
assert set(cells[2]) == set([0, 3, 4])
points, cells = meshzoo.rectangle(nx=3, ny=2, zigzag=True)
assert len(points) == 6
assert _near_equal(numpy.sum(points, axis=0), [3.0, 3.0, 0.0])
assert len(cells) == 4
assert set(cells[0]) == set([0, 1, 4])
assert set(cells[2]) == set([0, 3, 4])
def test_simple_arrow():
points, cells = meshzoo.simple_arrow()
assert len(points) == 5
assert _near_equal(numpy.sum(points, axis=0), [7.0, 0.0, 0.0])
assert len(cells) == 4
def test_simple_shell():
points, cells = meshzoo.simple_shell()
assert len(points) == 5
assert _near_equal(numpy.sum(points, axis=0), [0.0, 0.0, 1.0])
assert len(cells) == 4
def test_triangle():
points, cells = meshzoo.triangle(4)
assert len(points) == 15
assert _near_equal(numpy.sum(points, axis=0), [0.0, 0.0, 0.0])
assert len(cells) == 16
def test_tube():
points, cells = meshzoo.tube(n=10)
assert len(points) == 20
assert _near_equal(numpy.sum(points, axis=0), [0.0, 0.0, 0.0])
assert len(cells) == 20
def test_plot2d():
points, cells = meshzoo.triangle(4)
meshzoo.show2d(points, cells)
# def test_ball():
# points, cells = meshzoo.meshpy.ball.create_ball_mesh(10)
# assert len(points) == 1360
# assert len(cells) == 5005
#
#
# def test_cube():
# points, cells = meshzoo.meshpy.cube.create_mesh(10)
# assert len(points) == 50
# assert len(cells) == 68
#
#
# def test_ellipse():
# points, cells = meshzoo.meshpy.ellipse.create_mesh(0.5, 1, 100)
# assert len(points) == 1444
# assert len(cells) == 2774
#
#
# def test_lshape():
# points, cells = meshzoo.meshpy.lshape.create_mesh()
# assert len(points) == 38
# assert len(cells) == 58
#
#
# def test_lshape3d():
# points, cells = meshzoo.meshpy.lshape3d.create_mesh()
# assert len(points) == 943
# assert len(cells) == 3394
#
#
# def test_pacman():
# points, cells = meshzoo.meshpy.pacman.create_pacman_mesh()
# assert len(points) == 446
# assert len(cells) == 831
#
#
# def test_rectangle():
# points, cells = meshzoo.meshpy.rectangle.create_mesh()
# assert len(points) == 88
# assert len(cells) == 150
#
#
# def test_rectangle_with_hole():
# points, cells = meshzoo.meshpy.rectangle_with_hole.create_mesh()
# assert len(points) == 570
# assert len(cells) == 964
#
#
# def test_tetrahedron():
# points, cells = meshzoo.meshpy.tetrahedron.create_tetrahedron_mesh()
# assert len(points) == 604
# assert len(cells) == 1805
#
#
# def test_torus():
# points, cells = meshzoo.meshpy.torus.create_mesh()
# assert len(points) == 921
# assert len(cells) == 2681
# Disable for now since Gmsh doesn't pass for the version installed on travis
# (trusty).
# def test_screw():
# points, cells = meshzoo.pygmsh.screw.create_screw_mesh()
# assert len(points) == 2412
# assert len(cells) == 7934
# Disable for now since we need mshr in a dev version for mshr.Extrude2D
# def test_toy():
# points, cells = meshzoo.mshr.toy.create_toy_mesh()
# assert len(points) == 2760
# assert len(cells) == 11779
# if __name__ == '__main__':
# test_plot2d()
# # import meshio
# # points_, cells_ = meshzoo.triangle(7)
# # meshio.write('triangle.vtu', points_, {'triangle': cells_})
# # points_, cells_ = meshzoo.cube()
# # meshio.write('cube.vtu', points_, {'tetra': cells_})
def test_edges():
_, cells = meshzoo.triangle(2)
edges_nodes, edges_cells = meshzoo.create_edges(cells)
assert numpy.all(
edges_nodes
== [[0, 1], [0, 3], [1, 2], [1, 3], [1, 4], [2, 4], [3, 4], [3, 5], [4, 5]]
)
assert numpy.all(edges_cells == [[3, 1, 0], [5, 4, 2], [6, 3, 4], [8, 7, 6]])
| [
"nico.schloemer@gmail.com"
] | nico.schloemer@gmail.com |
ad8d284ca702cbca876636308f2827bf8ad23093 | 735dfed95f1440d6d846f30881e37ad1b6e0ea78 | /JiHyeok/프로그래머스 레벨1/최대공약수와 최소공배수.py | cd0a02a8e9218b003f4a6736cd7ad938ac93ea86 | [] | no_license | SunivAlgo/Algorithm | bb5814bf19aa4059a5b7e506c992b41bc62bd2ec | 71d2e568153fbfd7cb16085366fac3927e1e2c54 | refs/heads/main | 2023-04-13T10:47:31.463661 | 2021-04-23T05:07:20 | 2021-04-23T05:07:20 | 332,732,134 | 1 | 0 | null | 2021-01-25T16:30:40 | 2021-01-25T11:58:09 | Python | UTF-8 | Python | false | false | 460 | py | def solution(n, m):
answer = []
temp = 0
min = 1 # 최대공약수
max = 1 # 최소공배수
if(n > m):
temp = n
n =m
m = temp
a = n
b = m
c = 2
while c <= a :
if (a % c == 0) & (b % c == 0) :
min *= c
a = a / c
b = b / c
else :
c += 1
max = int(min * a * b)
answer.append(min)
answer.append(max)
return answer | [
"wjdgnl97@gmail.com"
] | wjdgnl97@gmail.com |
b2e2cdba0f33620717e3c4cfa422c75d6fb4d68e | d52680372e53b747397bef8d6bf14b9b5762b93b | /manage.py | 7f317b6a261123beaf316ddd0f4aab214a416a4d | [] | no_license | deadly-panda/BioTiful | f6428875f1140d046e920cb674399c3c00d416a7 | 85632f543e3d343479eaafa4c31e3c1d0387d3a9 | refs/heads/master | 2023-05-02T21:47:42.550235 | 2021-06-04T11:22:54 | 2021-06-04T11:22:54 | 366,228,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bioTiful.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"thebesthaddadi@gmail.com"
] | thebesthaddadi@gmail.com |
347c3466199efde856db6a23300ca530780c8fee | 979a506581fa9644db51d93a548f35803a651d1d | /RegistrationForm/migrations/0003_auto_20171028_0518.py | 7f2dea0bb2174d5e67ffde4a5fdbfd049075f92c | [] | no_license | aishna2502/CodeOff | ea46afe32c5675bd9b5f2a602eecf21a43d11e2b | 2bfd07dcc504143c9a5c5582301c1afcfb0afa65 | refs/heads/master | 2021-07-20T23:24:49.617863 | 2017-10-28T07:17:28 | 2017-10-28T07:17:28 | 108,568,404 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-10-28 05:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('RegistrationForm', '0002_auto_20171028_0152'),
]
operations = [
migrations.AddField(
model_name='user',
name='job_time',
field=models.CharField(choices=[(b'DayJob', b'DayJob'), (b'NightJob', b'NightJob')], default=1, max_length=100),
preserve_default=False,
),
migrations.AlterField(
model_name='user',
name='job',
field=models.CharField(choices=[(b'DeskJob_comp', b'DeskJob_comp'), (b'DeskJob_normal', b'DeskJob_normal')], max_length=100),
),
]
| [
"aishna987@yahoo.co.in"
] | aishna987@yahoo.co.in |
586fbbda387bcc0fd0af65aec9272afaf55c6d94 | db3a0578ef5d79cee7f9e96fa3fd291bbaaf9eb4 | /Pygame/Bees/bees.py | 90620d058d1920a15e8892180aa8c230a8c0d55f | [
"MIT"
] | permissive | otisgbangba/python-lessons | 0477a766cda6bc0e2671e4cce2f95bc62c8d3c43 | a29f5383b56b21e6b0bc21aa9acaec40ed4df3cc | refs/heads/master | 2022-11-03T22:10:52.845204 | 2020-06-13T15:42:40 | 2020-06-13T15:42:40 | 261,255,751 | 1 | 0 | MIT | 2020-05-04T17:48:12 | 2020-05-04T17:48:11 | null | UTF-8 | Python | false | false | 1,885 | py | import pygame, random
from pygame.locals import *
from util import loadImage
from bee import Bee
from flower import Flower
from score import Score
pygame.init()
TITLE = 'Bee, Get the Nectar!'
screen = pygame.display.set_mode((1280, 720), 0)
screenRect = screen.get_rect()
Bee.loadImages()
Flower.loadImages()
background = loadImage('clover-large.jpg')
font = pygame.font.Font(None, 48)
text = font.render(TITLE, 1, Color('white'))
textpos = text.get_rect(centerx=screenRect.width/2, centery=25)
background.blit(text, textpos)
screen.blit(background, (0, 0))
pygame.display.flip()
bee = Bee(screenRect)
flowers = pygame.sprite.Group()
score = Score()
drawingGroup = pygame.sprite.RenderUpdates()
drawingGroup.add(bee)
drawingGroup.add(score)
pygame.display.set_caption(TITLE)
pygame.mouse.set_visible(0)
clock = pygame.time.Clock()
angles = (( 45, 0, -45),
( 90, 0, -90),
(135, 180, -135))
# game loop
loop = True
while loop:
# get input
for event in pygame.event.get():
if event.type == QUIT \
or (event.type == KEYDOWN and event.key == K_ESCAPE):
loop = False
keystate = pygame.key.get_pressed()
xdir = keystate[K_RIGHT] - keystate[K_LEFT] # -1, 0, or 1
ydir = keystate[K_DOWN] - keystate[K_UP]
bee.setAngle(angles[ydir+1][xdir+1])
bee.rect = bee.rect.move((xdir * 8, ydir * 8)).clamp(screenRect)
# Detect collisions
for flower in pygame.sprite.spritecollide(bee, flowers, True):
score.score += 1
flower.kill()
if random.randint(0, 50) == 0:
flower = Flower(screenRect)
drawingGroup.add(flower)
flowers.add(flower)
drawingGroup.clear(screen, background)
drawingGroup.update()
changedRects = drawingGroup.draw(screen)
pygame.display.update(changedRects)
# maintain frame rate
clock.tick(40)
pygame.quit()
| [
"daveb@davebsoft.com"
] | daveb@davebsoft.com |
fc5c6cf54acdc92357aedf5a77af4161c7885cb0 | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/Box/Users/DeleteUser.py | 19db5a19eb485a39cf7171ed247400616e188e0a | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,041 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# DeleteUser
# Deletes a specified user.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeleteUser(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeleteUser Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Box/Users/DeleteUser')
def new_input_set(self):
return DeleteUserInputSet()
def _make_result_set(self, result, path):
return DeleteUserResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteUserChoreographyExecution(session, exec_id, path)
class DeleteUserInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteUser
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved during the OAuth2 process.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_Force(self, value):
"""
Set the value of the Force input for this Choreo. ((optional, boolean) Whether or not the user should be deleted even when they still own files.)
"""
InputSet._set_input(self, 'Force', value)
def set_Notify(self, value):
"""
Set the value of the Notify input for this Choreo. ((optional, boolean) Indicates that the user should receive an email notification of the transfer.)
"""
InputSet._set_input(self, 'Notify', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((required, string) The id of the user whose information should be updated.)
"""
InputSet._set_input(self, 'UserID', value)
class DeleteUserResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeleteUser Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Box.)
"""
return self._output.get('Response', None)
class DeleteUserChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteUserResultSet(response, path)
| [
"dattasaurabh82@gmail.com"
] | dattasaurabh82@gmail.com |
822835e46cda97ecc4506eb170ccb752f5db310b | f8e21925dedb4c8f64682506edb0c29e9d37670e | /main.py | 1f447214b4774bfa9f5c21d8a2b98017ac4811b2 | [] | no_license | whitehatjrdemo1/Prajwal-142 | 320862d55b1a92fc5598734f1cb46540c150215b | 6459815caad4427295d3085c57a4447803c79c82 | refs/heads/main | 2023-08-29T03:51:40.320791 | 2021-10-21T10:41:45 | 2021-10-21T10:41:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,226 | py | from flask import Flask, jsonify, request
import csv
from demographic_filtering import output
from content_filtering import get_recommendations
from storage import all_movies, didnotwatch, not_liked_movies, liked_movies
app = Flask(__name__)
@app.route('/get-movie')
def get_movie():
return jsonify({'data': all_movies[0], 'status': 'success'})
@app.route('/liked-movie', methods=['POST'])
def liked_movie():
movie = all_movies[0]
all_movies = all_movies[1:]
liked_movies.append(movie)
return jsonify({'status': 'success'}), 201
@app.route('/un-liked-movie', methods=['POST'])
def unliked_movie():
movie = all_movies[0]
all_movies = all_movies[1:]
not_liked_movies.append(movie)
return jsonify({'status': 'success'}), 201
@app.route('/unwatched', methods=['POST'])
def unwatched():
movie = all_movies[0]
all_movies = all_movies[1:]
didnotwatch.append(movie)
return jsonify({'status': 'success'}), 201
@app.route('/popular-movies')
def popular_movies():
movie_data = []
print(output)
for movie in output:
_d = {'title': movie[0], 'poster_link': movie[1], 'release_date': movie[2],
'duration': movie[3], 'rating': movie[4], 'overview': movie[5]}
movie_data.append(_d)
return jsonify({'data': movie_data, 'status': 'success'}), 200
@app.route('/recommended-movies')
def recommended_movies():
all_recomended = []
for liked_movie in liked_movies:
output = get_recommendations(liked_movie[19])
for data in output:
all_recomended.append(data)
import itertools
all_recomended.sort()
all_recomended = list(all_recomended for all_recomended,
_ in itertools.groupby(all_recomended))
movie_data = []
for recomended in all_recomended:
_d = {'title': recomended[0], 'poster_link': recomended[1], 'release_date': recomended[2]
or 'N/A', 'duration': recomended[3], 'rating': recomended[4], 'overview': recomended[5]}
movie_data.append(_d)
return jsonify({'data': movie_data, 'status': 'success'}), 200
if (__name__ == '__main__'):
app.run()
| [
"noreply@github.com"
] | whitehatjrdemo1.noreply@github.com |
8a586e81a999e578ee81906cb78ff41bbe4393c3 | d617cf350d4da3c5ade57b30d4547afd57869ce3 | /Python/Framework/Twisted/项目实战/跨平台物联网消息网关/IotGateway/src/Command/RedirectNotify.py | d415248fe81fda49f17dc30e5801ec68554ddd7f | [] | no_license | fhkankan/markup | d97fade1a7adf057b8ac3414b605c822ae9e60f6 | 40278e988b92d54391e635cda48ce4acb4fc1a6d | refs/heads/master | 2023-03-11T08:00:21.093242 | 2023-03-03T04:24:30 | 2023-03-03T04:24:30 | 119,963,229 | 12 | 10 | null | 2023-08-30T07:53:44 | 2018-02-02T09:44:12 | Python | UTF-8 | Python | false | false | 524 | py | '''
Created on Sep 9, 2014
@author: Changlong
'''
from BaseNotify import CBaseNotify
import BaseCommand
from Utils import Config
class CRedirectNotify(CBaseNotify):
'''
classdocs
'''
command_id=0x00060007
def __init__(self,data=None,protocol=None,client_id=0,addr=Config.domain_name):
'''
Constructor
'''
CBaseNotify.__init__(self, data, protocol,client_id)
self.command_id=CRedirectNotify.command_id
self.body[BaseCommand.PN_ADDR]=addr
| [
"fu.hang.2008@163.com"
] | fu.hang.2008@163.com |
b4fdf0086dda0bb0a9e8e631adbd62959995d35f | be01d0d54723d1e876c9a15618921dffe2b2255a | /Python/BinarySearch/two_sumII.py | 0d534e7163571ad8332aad8f4b807b4999e276c6 | [] | no_license | jxlxt/leetcode | 17e7f25bf94dd334ac0d6254ffcffa003ed04c10 | a6e6e5be3dd5f9501d0aa4caa6744621ab887f51 | refs/heads/master | 2023-05-26T22:10:03.997428 | 2023-05-24T02:36:05 | 2023-05-24T02:36:05 | 118,216,055 | 0 | 0 | null | 2018-01-20T06:31:57 | 2018-01-20T06:30:06 | null | UTF-8 | Python | false | false | 1,122 | py | #! /Users/xiaotongli/anaconda3/bin/python
# -*- coding: utf-8 -*-
# @Time : 9/28/18 10:57 PM
# @Author : Xiaotong Li
# @School : University of California, Santa Cruz
# @FileName: autocomplete_System.py
# @Software: PyCharm
class Solution:
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
# the first method is dictinoary method
dict = {}
# enumerate() get the index and value of array
for i, num in enumerate(numbers):
if target - num in dict:
return [dict[target-num]+1, i+1]
dict[num] = i
# binary search method
for i in range(len(numbers)):
left, right = i+1, len(numbers) - 1
res = target - numbers[i]
while left <= right:
mid = left + (right - left) // 2
if numbers[mid] == res:
return [i+1, mid+1]
elif numbers[mid] < res:
left = mid + 1
else:
right = mid - 1
| [
"xli239@ucsc.edu"
] | xli239@ucsc.edu |
bbc97a0c69bc5c0d0fd4008b39d904edef1921b0 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/periodicities/Month/Cycle_Month_25_M_360.py | 82ea2d119aaaddab2bb5d8c525d24df36e2a36a3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 82 | py | import tests.periodicities.period_test as per
per.buildModel((360 , 'M' , 25));
| [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
d7b9c65e55bf1008ffb01f78a760efb6cb133677 | 80309f0f8959dcba957aacbe454a0aaca4d9335b | /activity4a.py | 66bf7befe4141bdf089e8e57d2c694db51f56f8d | [] | no_license | Gajendra123-source/activity4a.py | 9724179a3b35f998dd9844dced970e803ce686f9 | 36dec391728b910cbce3bb6070349697346d1ba7 | refs/heads/main | 2023-08-23T18:52:59.312197 | 2021-10-01T19:59:16 | 2021-10-01T19:59:16 | 410,719,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | #Jendra Poudel
#ISQA_3900-851, 09/25/2021
#This program will remove dublicates (repeated) data/char from a list and publish
#a new list with no dublicates
def Remove_dublicate(namesList):
print("Initial List of Names\n ['barry','belinda','george','hank','kahn','karthik','maria','maria',"
"'maria',,'maria','sam','sam','will']\n")
print("List of unique names after removing dublicated names")
final_list = []
for num in namesList:
if num not in final_list:
final_list.append(num)
return final_list
names = ['mary','bill','sam','maria','kahn','bill','barry','george','hank','belinda','maria','karthik']
print(Remove_dublicate(names)) | [
"noreply@github.com"
] | Gajendra123-source.noreply@github.com |
5bc08a32ba5bc9e78823dc89fe5070e1deb89e25 | 057d2d1e2a78fc89851154e87b0b229e1e1f003b | /venv/Lib/site-packages/keystoneclient/auth/identity/v2.py | add1da4f5d894be3192f1253735eca8da6d07f56 | [
"Apache-2.0"
] | permissive | prasoon-uta/IBM-Cloud-Secure-File-Storage | 276dcbd143bd50b71121a73bc01c8e04fe3f76b0 | 82a6876316715efbd0b492d0d467dde0ab26a56b | refs/heads/master | 2022-12-13T00:03:31.363281 | 2018-02-22T02:24:11 | 2018-02-22T02:24:11 | 122,420,622 | 0 | 2 | Apache-2.0 | 2022-12-08T05:15:19 | 2018-02-22T02:26:48 | Python | UTF-8 | Python | false | false | 7,824 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import logging
from oslo_config import cfg
import six
from keystoneclient import access
from keystoneclient.auth.identity import base
from keystoneclient import exceptions
from keystoneclient import utils
_logger = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Auth(base.BaseIdentityPlugin):
"""Identity V2 Authentication Plugin.
:param string auth_url: Identity service endpoint for authorization.
:param string trust_id: Trust ID for trust scoping.
:param string tenant_id: Tenant ID for project scoping.
:param string tenant_name: Tenant name for project scoping.
:param bool reauthenticate: Allow fetching a new token if the current one
is going to expire. (optional) default True
"""
@classmethod
def get_options(cls):
options = super(Auth, cls).get_options()
options.extend([
cfg.StrOpt('tenant-id', help='Tenant ID'),
cfg.StrOpt('tenant-name', help='Tenant Name'),
cfg.StrOpt('trust-id', help='Trust ID'),
])
return options
def __init__(self, auth_url,
trust_id=None,
tenant_id=None,
tenant_name=None,
reauthenticate=True):
super(Auth, self).__init__(auth_url=auth_url,
reauthenticate=reauthenticate)
self._trust_id = trust_id
self.tenant_id = tenant_id
self.tenant_name = tenant_name
@property
def trust_id(self):
# Override to remove deprecation.
return self._trust_id
@trust_id.setter
def trust_id(self, value):
# Override to remove deprecation.
self._trust_id = value
def get_auth_ref(self, session, **kwargs):
headers = {'Accept': 'application/json'}
url = self.auth_url.rstrip('/') + '/tokens'
params = {'auth': self.get_auth_data(headers)}
if self.tenant_id:
params['auth']['tenantId'] = self.tenant_id
elif self.tenant_name:
params['auth']['tenantName'] = self.tenant_name
if self.trust_id:
params['auth']['trust_id'] = self.trust_id
_logger.debug('Making authentication request to %s', url)
resp = session.post(url, json=params, headers=headers,
authenticated=False, log=False)
try:
resp_data = resp.json()['access']
except (KeyError, ValueError):
raise exceptions.InvalidResponse(response=resp)
return access.AccessInfoV2(**resp_data)
@abc.abstractmethod
def get_auth_data(self, headers=None):
"""Return the authentication section of an auth plugin.
:param dict headers: The headers that will be sent with the auth
request if a plugin needs to add to them.
:return: A dict of authentication data for the auth type.
:rtype: dict
"""
pass # pragma: no cover
_NOT_PASSED = object()
class Password(Auth):
"""A plugin for authenticating with a username and password.
A username or user_id must be provided.
:param string auth_url: Identity service endpoint for authorization.
:param string username: Username for authentication.
:param string password: Password for authentication.
:param string user_id: User ID for authentication.
:param string trust_id: Trust ID for trust scoping.
:param string tenant_id: Tenant ID for tenant scoping.
:param string tenant_name: Tenant name for tenant scoping.
:param bool reauthenticate: Allow fetching a new token if the current one
is going to expire. (optional) default True
:raises TypeError: if a user_id or username is not provided.
"""
def __init__(self, auth_url, username=_NOT_PASSED, password=None,
user_id=_NOT_PASSED, **kwargs):
super(Password, self).__init__(auth_url, **kwargs)
if username is _NOT_PASSED and user_id is _NOT_PASSED:
msg = 'You need to specify either a username or user_id'
raise TypeError(msg)
if username is _NOT_PASSED:
username = None
if user_id is _NOT_PASSED:
user_id = None
self.user_id = user_id
self._username = username
self._password = password
@property
def username(self):
# Override to remove deprecation.
return self._username
@username.setter
def username(self, value):
# Override to remove deprecation.
self._username = value
@property
def password(self):
# Override to remove deprecation.
return self._password
@password.setter
def password(self, value):
# Override to remove deprecation.
self._password = value
def get_auth_data(self, headers=None):
auth = {'password': self.password}
if self.username:
auth['username'] = self.username
elif self.user_id:
auth['userId'] = self.user_id
return {'passwordCredentials': auth}
@classmethod
def load_from_argparse_arguments(cls, namespace, **kwargs):
if not (kwargs.get('password') or namespace.os_password):
kwargs['password'] = utils.prompt_user_password()
return super(Password, cls).load_from_argparse_arguments(namespace,
**kwargs)
@classmethod
def get_options(cls):
options = super(Password, cls).get_options()
options.extend([
cfg.StrOpt('username',
dest='username',
deprecated_name='user-name',
help='Username to login with'),
cfg.StrOpt('user-id', help='User ID to login with'),
cfg.StrOpt('password', secret=True, help='Password to use'),
])
return options
class Token(Auth):
"""A plugin for authenticating with an existing token.
:param string auth_url: Identity service endpoint for authorization.
:param string token: Existing token for authentication.
:param string tenant_id: Tenant ID for tenant scoping.
:param string tenant_name: Tenant name for tenant scoping.
:param string trust_id: Trust ID for trust scoping.
:param bool reauthenticate: Allow fetching a new token if the current one
is going to expire. (optional) default True
"""
def __init__(self, auth_url, token, **kwargs):
super(Token, self).__init__(auth_url, **kwargs)
self._token = token
@property
def token(self):
# Override to remove deprecation.
return self._token
@token.setter
def token(self, value):
# Override to remove deprecation.
self._token = value
def get_auth_data(self, headers=None):
if headers is not None:
headers['X-Auth-Token'] = self.token
return {'token': {'id': self.token}}
@classmethod
def get_options(cls):
options = super(Token, cls).get_options()
options.extend([
cfg.StrOpt('token', secret=True, help='Token'),
])
return options
| [
"prasoon1812@gmail.com"
] | prasoon1812@gmail.com |
319a8ecd8143da437cd5720b73ed24a1a396c1cc | 2f09e893c3a21f4a17c95b99446d1efbf0b109f7 | /huaytools/tensorflow/layers/__init__.py | 6f45582415c893a8cb74a1d2dd931b0b6805be10 | [
"MIT"
] | permissive | knight134/huaytools | b19f0078e724963415c63d60218ae3cc624f598a | cbecd6771c05f8241e756a7619047589397b16d3 | refs/heads/master | 2020-04-24T18:30:27.732740 | 2018-05-27T13:51:24 | 2018-05-27T13:51:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,595 | py | """
"""
import tensorflow as tf
import tensorlayer as tl
# from .cnn import *
# from .rnn import *
from .attention import *
from .embedding import *
logging = tf.logging
def dense(inputs, n_units,
activation=tf.nn.relu,
use_bias=True,
W_init=tf.truncated_normal_initializer(stddev=0.1),
W_init_args=None,
b_init=tf.constant_initializer(value=0.0),
b_init_args=None,
name="dense",
reuse=None):
"""全连接层
input_shape: [batch_size, n_features]
output_shape: [batch_size, n_units]
References:
tf.layers.Dense
tl.layers.DenseLayer
"""
W_init_args = {} if W_init_args is None else W_init_args
b_init_args = {} if b_init_args is None else b_init_args
logging.info("DenseLayer: %s - n_units: %d activation: %s" % (name, n_units, activation.__name__))
# n_inputs = int(tf.convert_to_tensor(inputs).get_shape()[-1])
inputs = tf.convert_to_tensor(inputs)
n_inputs = inputs.get_shape()[-1].value
with tf.variable_scope(name, reuse=reuse):
W = tf.get_variable('W', shape=[n_inputs, n_units], initializer=W_init, dtype=tf.float32,
**W_init_args)
if use_bias:
b = tf.get_variable('b', shape=[n_units], initializer=b_init, dtype=tf.float32,
**b_init_args)
# outputs = act(tf.matmul(inputs, W) + b)
outputs = activation(tf.nn.xw_plus_b(inputs, W, b))
else:
outputs = activation(tf.matmul(inputs, W))
return outputs
| [
"imhuay@163.com"
] | imhuay@163.com |
c06703cb4f9de9c16e2f826147656e8e816920bb | b8dd96def2a6b1b08e16238b96f8cd7bed9a4a9c | /read_ip_table.py | 9fea2069a55086e4ad8f6ba6df908d4ce8e2df5b | [] | no_license | jiejie1993/ipv6_longest_prefix_match | 1edf6377dbecf4f6e3ee16c7caf6f3141f84ed72 | 0d61c9343c929834753b705e025e027749e90784 | refs/heads/master | 2021-06-22T16:18:48.363409 | 2020-11-29T13:27:57 | 2020-11-29T13:27:57 | 143,245,812 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | # -*- coding: utf-8 -*-
"""
/******************************************************************************
* FileName : read_ip_table.py
* Author : Guo Yujie
* CreateDate : 2018.06.20
* Revision : V1.0
* Description : read the next hop table to dict
* Copyright : Copyright (c) 2000-2020 FiberHome
* OtherInfo :
* ModifyLog :
******************************************************************************/
"""
import csv
def read_ip_table():
"""
this function is to read the (ip_mask + next_hop) data to a dict
type output table: dict
"""
dict_ip={}
with open('ip.csv')as f:
reader=csv.reader(f,delimiter=',')
for row in reader:
dict_ip[row[0]]=row[1]
return dict_ip
| [
"384300189@qq.com"
] | 384300189@qq.com |
1548d5d2f9a1f0420dc18688bb58d6d32b25877a | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2021_10_01/operations/_mhsm_private_endpoint_connections_operations.py | 071dc770d7741dd509f07515c1cb63522e1d153e | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 26,266 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar, Union, cast
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_resource_request(
subscription_id: str,
resource_group_name: str,
name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-10-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}/privateEndpointConnections") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-10-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}/privateEndpointConnections/{privateEndpointConnectionName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_put_request(
subscription_id: str,
resource_group_name: str,
name: str,
private_endpoint_connection_name: str,
*,
json: Optional[_models.MHSMPrivateEndpointConnection] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-10-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}/privateEndpointConnections/{privateEndpointConnectionName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-10-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}/privateEndpointConnections/{privateEndpointConnectionName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
class MHSMPrivateEndpointConnectionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.keyvault.v2021_10_01.KeyVaultManagementClient`'s
:attr:`mhsm_private_endpoint_connections` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_resource(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> Iterable[_models.MHSMPrivateEndpointConnectionsListResult]:
"""The List operation gets information about the private endpoint connections associated with the
managed HSM Pool.
:param resource_group_name: Name of the resource group that contains the managed HSM pool.
:type resource_group_name: str
:param name: Name of the managed HSM Pool.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MHSMPrivateEndpointConnectionsListResult or the
result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateEndpointConnectionsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-10-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.MHSMPrivateEndpointConnectionsListResult]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
name=name,
api_version=api_version,
template_url=self.list_by_resource.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_by_resource_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
name=name,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("MHSMPrivateEndpointConnectionsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ManagedHsmError, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}/privateEndpointConnections"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> _models.MHSMPrivateEndpointConnection:
"""Gets the specified private endpoint connection associated with the managed HSM Pool.
:param resource_group_name: Name of the resource group that contains the managed HSM pool.
:type resource_group_name: str
:param name: Name of the managed HSM Pool.
:type name: str
:param private_endpoint_connection_name: Name of the private endpoint connection associated
with the managed hsm pool.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MHSMPrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-10-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.MHSMPrivateEndpointConnection]
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
name=name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
template_url=self.get.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ManagedHsmError, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MHSMPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}/privateEndpointConnections/{privateEndpointConnectionName}"} # type: ignore
@distributed_trace
def put(
self,
resource_group_name: str,
name: str,
private_endpoint_connection_name: str,
properties: _models.MHSMPrivateEndpointConnection,
**kwargs: Any
) -> _models.MHSMPrivateEndpointConnection:
"""Updates the specified private endpoint connection associated with the managed hsm pool.
:param resource_group_name: Name of the resource group that contains the managed HSM pool.
:type resource_group_name: str
:param name: Name of the managed HSM Pool.
:type name: str
:param private_endpoint_connection_name: Name of the private endpoint connection associated
with the managed hsm pool.
:type private_endpoint_connection_name: str
:param properties: The intended state of private endpoint connection.
:type properties: ~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MHSMPrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-10-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.MHSMPrivateEndpointConnection]
_json = self._serialize.body(properties, 'MHSMPrivateEndpointConnection')
request = build_put_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
name=name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.put.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('MHSMPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
put.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}/privateEndpointConnections/{privateEndpointConnectionName}"} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> Optional[_models.MHSMPrivateEndpointConnection]:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-10-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[Optional[_models.MHSMPrivateEndpointConnection]]
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
name=name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize('MHSMPrivateEndpointConnection', pipeline_response)
if response.status_code == 202:
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}/privateEndpointConnections/{privateEndpointConnectionName}"} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> LROPoller[_models.MHSMPrivateEndpointConnection]:
"""Deletes the specified private endpoint connection associated with the managed hsm pool.
:param resource_group_name: Name of the resource group that contains the managed HSM pool.
:type resource_group_name: str
:param name: Name of the managed HSM Pool.
:type name: str
:param private_endpoint_connection_name: Name of the private endpoint connection associated
with the managed hsm pool.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either MHSMPrivateEndpointConnection or the
result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.keyvault.v2021_10_01.models.MHSMPrivateEndpointConnection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-10-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.MHSMPrivateEndpointConnection]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
name=name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('MHSMPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}/privateEndpointConnections/{privateEndpointConnectionName}"} # type: ignore
| [
"noreply@github.com"
] | test-repo-billy.noreply@github.com |
08153e3318bfc006e914166f3b541d7429041bad | 46c8b6033f6b02377ec80bb3c7b2250d49c7ba1d | /node_modules/fsevents/build/config.gypi | 548d0ab35b6b2ba0d7cc9bcf44a1be091f5e0049 | [
"MIT"
] | permissive | akharrou/42-Hackathon-BuildTheBay | 810d4d5416785f41312bcb8a766f5c7e5b1bcab7 | d8c67aed81ff4b440b857e88766cfd118cec9d83 | refs/heads/master | 2023-01-14T02:14:47.219447 | 2020-09-15T15:49:52 | 2020-09-15T15:49:52 | 180,026,714 | 2 | 0 | MIT | 2023-01-04T18:59:47 | 2019-04-07T21:35:30 | JavaScript | UTF-8 | Python | false | false | 5,795 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_gyp_path": "tools/icu/icu-system.gyp",
"icu_small": "false",
"icu_ver_major": "63",
"llvm_version": "0",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_experimental_http_parser": "false",
"node_install_npm": "false",
"node_module_version": 67,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local/Cellar/node/11.13.0",
"node_release_urlbase": "",
"node_report": "true",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_large_pages": "false",
"node_use_openssl": "true",
"node_use_pch": "false",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "67.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": "true",
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "10.0",
"nodedir": "/nfs/2018/a/akharrou/.node-gyp/11.13.0",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/nfs/2018/a/akharrou/Desktop/42-BuildTheBay-Project/node_modules/fsevents/lib/binding/Release/node-v67-darwin-x64/fse.node",
"module_name": "fse",
"module_path": "/nfs/2018/a/akharrou/Desktop/42-BuildTheBay-Project/node_modules/fsevents/lib/binding/Release/node-v67-darwin-x64",
"napi_version": "4",
"node_abi_napi": "napi",
"save_dev": "",
"legacy_bundling": "",
"dry_run": "",
"viewer": "man",
"only": "",
"commit_hooks": "true",
"browser": "",
"also": "",
"sign_git_commit": "",
"rollback": "true",
"usage": "",
"audit": "true",
"globalignorefile": "/nfs/2018/a/akharrou/.brew/etc/npmignore",
"shell": "/bin/zsh",
"maxsockets": "50",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"metrics_registry": "https://registry.npmjs.org/",
"timing": "",
"init_license": "ISC",
"if_present": "",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "Infinity",
"preid": "",
"long": "",
"local_address": "",
"git_tag_version": "true",
"cert": "",
"registry": "https://registry.npmjs.org/",
"noproxy": "",
"fetch_retries": "2",
"versions": "",
"message": "%s",
"key": "",
"globalconfig": "/nfs/2018/a/akharrou/.brew/etc/npmrc",
"prefer_online": "",
"logs_max": "10",
"always_auth": "",
"global_style": "",
"cache_lock_retries": "10",
"update_notifier": "true",
"heading": "npm",
"audit_level": "low",
"searchlimit": "20",
"read_only": "",
"offline": "",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"allow_same_version": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/nfs/2018/a/akharrou/.npmrc",
"init_module": "/nfs/2018/a/akharrou/.npm-init.js",
"cidr": "",
"user": "33479",
"node_version": "11.13.0",
"save": "true",
"ignore_prepublish": "",
"editor": "vi",
"auth_type": "legacy",
"tag": "latest",
"script_shell": "",
"progress": "true",
"global": "",
"searchstaleness": "900",
"optional": "true",
"ham_it_up": "",
"save_prod": "",
"force": "",
"bin_links": "true",
"searchopts": "",
"node_gyp": "/nfs/2018/a/akharrou/.brew/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"depth": "Infinity",
"sso_poll_frequency": "500",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"tag_version_prefix": "v",
"strict_ssl": "true",
"sso_type": "oauth",
"scripts_prepend_node_path": "warn-only",
"save_prefix": "^",
"ca": "",
"save_exact": "",
"group": "42188",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"prefer_offline": "",
"cache_lock_stale": "60000",
"otp": "",
"cache_min": "10",
"searchexclude": "",
"cache": "/nfs/2018/a/akharrou/.npm",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/6.7.0 node/v11.13.0 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"send_metrics": "",
"save_bundle": "",
"umask": "0022",
"node_options": "",
"init_version": "1.0.0",
"init_author_name": "",
"git": "git",
"scope": "",
"unsafe_perm": "true",
"tmp": "/var/folders/zz/zyxvpxvq6csfxvn_n00042rw0010p7/T",
"onload_script": "",
"link": "",
"prefix": "/nfs/2018/a/akharrou/.brew"
}
}
| [
"idev.aymen@gmail.com"
] | idev.aymen@gmail.com |
41dc2263cb8835ee73b5aef79ac4b05394eb1d5f | 4aecb256f4aeca5db010a90a020fde2bc01b0fa7 | /main_app/admin.py | eb7ef3d03120e0aa8f847f76b13384a911526a0f | [] | no_license | kennyyseo/find-a-pet | ed7b8d23f2819df37073d84a8d9880c43cd34d34 | ab936b99200d1131824f8a314015beacf2aa0e7b | refs/heads/master | 2022-12-31T12:29:47.827765 | 2020-10-22T19:06:20 | 2020-10-22T19:06:20 | 296,955,738 | 0 | 2 | null | 2020-10-22T19:06:21 | 2020-09-19T21:38:33 | HTML | UTF-8 | Python | false | false | 113 | py | from django.contrib import admin
from .models import Pet
# Register your models here.
admin.site.register(Pet)
| [
"kennyyseo@gmail.com"
] | kennyyseo@gmail.com |
afeebd636416a886d7f9ed90d354fd7b7d02c895 | 71cc3524493e30366f122fdbdfd4260ca0ae8934 | /harbor_client/model/retention_policy_scope.py | c7b1ef5000b7ed44731db1b1367749fcd29b7d6f | [] | no_license | moule3053/harbor-python-client-api | f293a42bac0e2eee54d43d89af12fb215146bd06 | 31abc14deaf6bb62badc4d9a7b687c60e6fc99eb | refs/heads/master | 2023-08-24T23:16:45.144820 | 2021-10-11T22:54:36 | 2021-10-11T22:54:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,210 | py | """
Harbor API
These APIs provide services for manipulating Harbor project. # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from harbor_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from harbor_client.exceptions import ApiAttributeError
class RetentionPolicyScope(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'level': (str,), # noqa: E501
'ref': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'level': 'level', # noqa: E501
'ref': 'ref', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""RetentionPolicyScope - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
level (str): [optional] # noqa: E501
ref (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""RetentionPolicyScope - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
level (str): [optional] # noqa: E501
ref (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"vb@container-registry.com"
] | vb@container-registry.com |
e9d440dc659c231422d6c25d47814016b3ae2368 | a65e5dc54092a318fc469543c3b96f6699d0c60b | /Personel/AATIF/Python/OOP-Assignment/prog1.py | 2b4f95a75c432fd02775bc8ab30d5769b65c2216 | [] | no_license | shankar7791/MI-10-DevOps | e15bfda460ffd0afce63274f2f430445d04261fe | f0b9e8c5be7b28298eb6d3fb6badf11cd033881d | refs/heads/main | 2023-07-04T15:25:08.673757 | 2021-08-12T09:12:37 | 2021-08-12T09:12:37 | 339,016,230 | 1 | 0 | null | 2021-08-12T09:12:37 | 2021-02-15T08:50:08 | JavaScript | UTF-8 | Python | false | false | 305 | py | #Program to count the Number Of Instances Of Class In Python
class Inst_class:
counter = 0
def __init__(self):
Inst_class.counter += 1
I1 = Inst_class()
I2 = Inst_class()
I3 = Inst_class()
I4 = Inst_class()
print("Number Of Instance Class: ", Inst_class.counter) | [
"siddiquiaatif115@gmail.com"
] | siddiquiaatif115@gmail.com |
46280f7f0ae9742a013a39b95a602707cd769acc | 698af1de36b1aa6384223a5e979c27e6d2a9079a | /app/migrations/0001_initial.py | aa3b48b003f443f110a1379a691c33822123feb4 | [] | no_license | shubhamkumar252083/bosonQ-psi-backend | e628dd840835e77f9e40de59489d591bd2765a28 | 69d59912660fb057227a47c5e89fae6eb77d3660 | refs/heads/master | 2023-08-22T17:04:11.395312 | 2021-10-11T16:53:35 | 2021-10-11T16:53:35 | 416,012,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | # Generated by Django 3.2.8 on 2021-10-11 10:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='React',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('RegnNo', models.PositiveIntegerField()),
('ApplicantName', models.CharField(max_length=30)),
('State', models.CharField(choices=[('KA', 'KA'), ('TN', 'TN'), ('JK', 'JK'), ('AP', 'AP'), ('TS', 'TS'), ('AN', 'AN'), ('NL', 'NL')], default='KA', max_length=2)),
],
),
]
| [
"shubhamkumar252083@gmail.com"
] | shubhamkumar252083@gmail.com |
9eb05f6cc675b8de9b8267f8bb5259c9f774603b | f35e02668d3819efe67addf43f724d1c05a090a2 | /employees/urls.py | 0040cc875f5cba221d00357e10e38c9c67ba5a45 | [] | no_license | Atum19/employees_db | fbcf5945c801a5d93e56573cc35a59ae089fc32a | 35b42974c9b02e24c35757cd1bf8ce45ca5ef669 | refs/heads/master | 2021-01-19T16:36:11.481268 | 2017-04-19T08:33:16 | 2017-04-19T08:33:16 | 88,275,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | from django.conf.urls import include, url
import debug_toolbar # for debug mode
from views import employees, departments
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
# employees part
url(r'^$', employees.employees_list, name='home'),
url(r'^employees/add_form/$', employees.EmployeeAdd.as_view(), name='employees_add'),
url(r'^employees/(?P<pk>\d+)/view/$', employees.EmployeeDetail.as_view(), name='employees_view'),
url(r'^employees/(?P<pk>\d+)/edit/$', employees.EmployeeUpdate.as_view(), name='employees_edit'),
url(r'^employees/(?P<pk>\d+)/delete/$', employees.EmployeeDelete.as_view(), name='employees_delete'),
url(r'^employees/search_names/$', employees.EmployeeSearchList.as_view(), name='employees_search'),
# departments part
url(r'^departments/$', departments.departments_list, name='departments_list'),
url(r'^departments/add_form/$', departments.DepartmentAdd.as_view(), name='departments_add'),
url(r'^departments/(?P<pk>\d+)/edit/$', departments.DepartmentUpdate.as_view(), name='departments_edit'),
url(r'^departments/(?P<pk>\d+)/delete/$', departments.DepartmentDelete.as_view(), name='departments_delete'),
]
| [
"osmstas@gmail.com"
] | osmstas@gmail.com |
da9a073d426253f4f74df5f982a4c0fd2cf697bd | a81c1492783e7cafcaf7da5f0402d2d283b7ce37 | /google/ads/google_ads/v6/proto/resources/ad_group_criterion_pb2.py | 6a447067f79a24f9a96374b62dd4f696aab9a5b9 | [
"Apache-2.0"
] | permissive | VincentFritzsche/google-ads-python | 6650cf426b34392d1f58fb912cb3fc25b848e766 | 969eff5b6c3cec59d21191fa178cffb6270074c3 | refs/heads/master | 2023-03-19T17:23:26.959021 | 2021-03-18T18:18:38 | 2021-03-18T18:18:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 66,824 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/resources/ad_group_criterion.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v6.proto.common import criteria_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2
from google.ads.google_ads.v6.proto.common import custom_parameter_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_common_dot_custom__parameter__pb2
from google.ads.google_ads.v6.proto.enums import ad_group_criterion_approval_status_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__criterion__approval__status__pb2
from google.ads.google_ads.v6.proto.enums import ad_group_criterion_status_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__criterion__status__pb2
from google.ads.google_ads.v6.proto.enums import bidding_source_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_bidding__source__pb2
from google.ads.google_ads.v6.proto.enums import criterion_system_serving_status_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_criterion__system__serving__status__pb2
from google.ads.google_ads.v6.proto.enums import criterion_type_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_criterion__type__pb2
from google.ads.google_ads.v6.proto.enums import quality_score_bucket_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_quality__score__bucket__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/resources/ad_group_criterion.proto',
package='google.ads.googleads.v6.resources',
syntax='proto3',
serialized_options=b'\n%com.google.ads.googleads.v6.resourcesB\025AdGroupCriterionProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V6.Resources\312\002!Google\\Ads\\GoogleAds\\V6\\Resources\352\002%Google::Ads::GoogleAds::V6::Resources',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n:google/ads/googleads/v6/resources/ad_group_criterion.proto\x12!google.ads.googleads.v6.resources\x1a-google/ads/googleads/v6/common/criteria.proto\x1a\x35google/ads/googleads/v6/common/custom_parameter.proto\x1a\x46google/ads/googleads/v6/enums/ad_group_criterion_approval_status.proto\x1a=google/ads/googleads/v6/enums/ad_group_criterion_status.proto\x1a\x32google/ads/googleads/v6/enums/bidding_source.proto\x1a\x43google/ads/googleads/v6/enums/criterion_system_serving_status.proto\x1a\x32google/ads/googleads/v6/enums/criterion_type.proto\x1a\x38google/ads/googleads/v6/enums/quality_score_bucket.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto\"\xe2$\n\x10\x41\x64GroupCriterion\x12H\n\rresource_name\x18\x01 \x01(\tB1\xe0\x41\x05\xfa\x41+\n)googleads.googleapis.com/AdGroupCriterion\x12\x1e\n\x0c\x63riterion_id\x18\x38 \x01(\x03\x42\x03\xe0\x41\x03H\x01\x88\x01\x01\x12`\n\x06status\x18\x03 \x01(\x0e\x32P.google.ads.googleads.v6.enums.AdGroupCriterionStatusEnum.AdGroupCriterionStatus\x12Z\n\x0cquality_info\x18\x04 \x01(\x0b\x32?.google.ads.googleads.v6.resources.AdGroupCriterion.QualityInfoB\x03\xe0\x41\x03\x12?\n\x08\x61\x64_group\x18\x39 \x01(\tB(\xe0\x41\x05\xfa\x41\"\n googleads.googleapis.com/AdGroupH\x02\x88\x01\x01\x12Q\n\x04type\x18\x19 \x01(\x0e\x32>.google.ads.googleads.v6.enums.CriterionTypeEnum.CriterionTypeB\x03\xe0\x41\x03\x12\x1a\n\x08negative\x18: \x01(\x08\x42\x03\xe0\x41\x05H\x03\x88\x01\x01\x12\x80\x01\n\x15system_serving_status\x18\x34 \x01(\x0e\x32\\.google.ads.googleads.v6.enums.CriterionSystemServingStatusEnum.CriterionSystemServingStatusB\x03\xe0\x41\x03\x12~\n\x0f\x61pproval_status\x18\x35 \x01(\x0e\x32`.google.ads.googleads.v6.enums.AdGroupCriterionApprovalStatusEnum.AdGroupCriterionApprovalStatusB\x03\xe0\x41\x03\x12 \n\x13\x64isapproval_reasons\x18; \x03(\tB\x03\xe0\x41\x03\x12\x19\n\x0c\x62id_modifier\x18= \x01(\x01H\x04\x88\x01\x01\x12\x1b\n\x0e\x63pc_bid_micros\x18> \x01(\x03H\x05\x88\x01\x01\x12\x1b\n\x0e\x63pm_bid_micros\x18? \x01(\x03H\x06\x88\x01\x01\x12\x1b\n\x0e\x63pv_bid_micros\x18@ \x01(\x03H\x07\x88\x01\x01\x12#\n\x16percent_cpc_bid_micros\x18\x41 \x01(\x03H\x08\x88\x01\x01\x12*\n\x18\x65\x66\x66\x65\x63tive_cpc_bid_micros\x18\x42 \x01(\x03\x42\x03\xe0\x41\x03H\t\x88\x01\x01\x12*\n\x18\x65\x66\x66\x65\x63tive_cpm_bid_micros\x18\x43 \x01(\x03\x42\x03\xe0\x41\x03H\n\x88\x01\x01\x12*\n\x18\x65\x66\x66\x65\x63tive_cpv_bid_micros\x18\x44 \x01(\x03\x42\x03\xe0\x41\x03H\x0b\x88\x01\x01\x12\x32\n effective_percent_cpc_bid_micros\x18\x45 \x01(\x03\x42\x03\xe0\x41\x03H\x0c\x88\x01\x01\x12\x65\n\x18\x65\x66\x66\x65\x63tive_cpc_bid_source\x18\x15 \x01(\x0e\x32>.google.ads.googleads.v6.enums.BiddingSourceEnum.BiddingSourceB\x03\xe0\x41\x03\x12\x65\n\x18\x65\x66\x66\x65\x63tive_cpm_bid_source\x18\x16 \x01(\x0e\x32>.google.ads.googleads.v6.enums.BiddingSourceEnum.BiddingSourceB\x03\xe0\x41\x03\x12\x65\n\x18\x65\x66\x66\x65\x63tive_cpv_bid_source\x18\x17 \x01(\x0e\x32>.google.ads.googleads.v6.enums.BiddingSourceEnum.BiddingSourceB\x03\xe0\x41\x03\x12m\n effective_percent_cpc_bid_source\x18# \x01(\x0e\x32>.google.ads.googleads.v6.enums.BiddingSourceEnum.BiddingSourceB\x03\xe0\x41\x03\x12\x66\n\x12position_estimates\x18\n \x01(\x0b\x32\x45.google.ads.googleads.v6.resources.AdGroupCriterion.PositionEstimatesB\x03\xe0\x41\x03\x12\x12\n\nfinal_urls\x18\x46 \x03(\t\x12\x19\n\x11\x66inal_mobile_urls\x18G \x03(\t\x12\x1d\n\x10\x66inal_url_suffix\x18H \x01(\tH\r\x88\x01\x01\x12\"\n\x15tracking_url_template\x18I \x01(\tH\x0e\x88\x01\x01\x12N\n\x15url_custom_parameters\x18\x0e \x03(\x0b\x32/.google.ads.googleads.v6.common.CustomParameter\x12\x43\n\x07keyword\x18\x1b \x01(\x0b\x32+.google.ads.googleads.v6.common.KeywordInfoB\x03\xe0\x41\x05H\x00\x12G\n\tplacement\x18\x1c \x01(\x0b\x32-.google.ads.googleads.v6.common.PlacementInfoB\x03\xe0\x41\x05H\x00\x12Y\n\x13mobile_app_category\x18\x1d \x01(\x0b\x32\x35.google.ads.googleads.v6.common.MobileAppCategoryInfoB\x03\xe0\x41\x05H\x00\x12X\n\x12mobile_application\x18\x1e \x01(\x0b\x32\x35.google.ads.googleads.v6.common.MobileApplicationInfoB\x03\xe0\x41\x05H\x00\x12N\n\rlisting_group\x18 \x01(\x0b\x32\x30.google.ads.googleads.v6.common.ListingGroupInfoB\x03\xe0\x41\x05H\x00\x12\x46\n\tage_range\x18$ \x01(\x0b\x32,.google.ads.googleads.v6.common.AgeRangeInfoB\x03\xe0\x41\x05H\x00\x12\x41\n\x06gender\x18% \x01(\x0b\x32*.google.ads.googleads.v6.common.GenderInfoB\x03\xe0\x41\x05H\x00\x12L\n\x0cincome_range\x18& \x01(\x0b\x32/.google.ads.googleads.v6.common.IncomeRangeInfoB\x03\xe0\x41\x05H\x00\x12R\n\x0fparental_status\x18\' \x01(\x0b\x32\x32.google.ads.googleads.v6.common.ParentalStatusInfoB\x03\xe0\x41\x05H\x00\x12\x46\n\tuser_list\x18* \x01(\x0b\x32,.google.ads.googleads.v6.common.UserListInfoB\x03\xe0\x41\x05H\x00\x12N\n\ryoutube_video\x18( \x01(\x0b\x32\x30.google.ads.googleads.v6.common.YouTubeVideoInfoB\x03\xe0\x41\x05H\x00\x12R\n\x0fyoutube_channel\x18) \x01(\x0b\x32\x32.google.ads.googleads.v6.common.YouTubeChannelInfoB\x03\xe0\x41\x05H\x00\x12?\n\x05topic\x18+ \x01(\x0b\x32).google.ads.googleads.v6.common.TopicInfoB\x03\xe0\x41\x05H\x00\x12N\n\ruser_interest\x18- \x01(\x0b\x32\x30.google.ads.googleads.v6.common.UserInterestInfoB\x03\xe0\x41\x05H\x00\x12\x43\n\x07webpage\x18. \x01(\x0b\x32+.google.ads.googleads.v6.common.WebpageInfoB\x03\xe0\x41\x05H\x00\x12U\n\x11\x61pp_payment_model\x18/ \x01(\x0b\x32\x33.google.ads.googleads.v6.common.AppPaymentModelInfoB\x03\xe0\x41\x05H\x00\x12R\n\x0f\x63ustom_affinity\x18\x30 \x01(\x0b\x32\x32.google.ads.googleads.v6.common.CustomAffinityInfoB\x03\xe0\x41\x05H\x00\x12N\n\rcustom_intent\x18\x31 \x01(\x0b\x32\x30.google.ads.googleads.v6.common.CustomIntentInfoB\x03\xe0\x41\x05H\x00\x12R\n\x0f\x63ustom_audience\x18J \x01(\x0b\x32\x32.google.ads.googleads.v6.common.CustomAudienceInfoB\x03\xe0\x41\x05H\x00\x12V\n\x11\x63ombined_audience\x18K \x01(\x0b\x32\x34.google.ads.googleads.v6.common.CombinedAudienceInfoB\x03\xe0\x41\x05H\x00\x1a\x8d\x03\n\x0bQualityInfo\x12\x1f\n\rquality_score\x18\x05 \x01(\x05\x42\x03\xe0\x41\x03H\x00\x88\x01\x01\x12m\n\x16\x63reative_quality_score\x18\x02 \x01(\x0e\x32H.google.ads.googleads.v6.enums.QualityScoreBucketEnum.QualityScoreBucketB\x03\xe0\x41\x03\x12o\n\x18post_click_quality_score\x18\x03 \x01(\x0e\x32H.google.ads.googleads.v6.enums.QualityScoreBucketEnum.QualityScoreBucketB\x03\xe0\x41\x03\x12k\n\x14search_predicted_ctr\x18\x04 \x01(\x0e\x32H.google.ads.googleads.v6.enums.QualityScoreBucketEnum.QualityScoreBucketB\x03\xe0\x41\x03\x42\x10\n\x0e_quality_score\x1a\xbc\x03\n\x11PositionEstimates\x12\'\n\x15\x66irst_page_cpc_micros\x18\x06 \x01(\x03\x42\x03\xe0\x41\x03H\x00\x88\x01\x01\x12+\n\x19\x66irst_position_cpc_micros\x18\x07 \x01(\x03\x42\x03\xe0\x41\x03H\x01\x88\x01\x01\x12(\n\x16top_of_page_cpc_micros\x18\x08 \x01(\x03\x42\x03\xe0\x41\x03H\x02\x88\x01\x01\x12<\n*estimated_add_clicks_at_first_position_cpc\x18\t \x01(\x03\x42\x03\xe0\x41\x03H\x03\x88\x01\x01\x12:\n(estimated_add_cost_at_first_position_cpc\x18\n \x01(\x03\x42\x03\xe0\x41\x03H\x04\x88\x01\x01\x42\x18\n\x16_first_page_cpc_microsB\x1c\n\x1a_first_position_cpc_microsB\x19\n\x17_top_of_page_cpc_microsB-\n+_estimated_add_clicks_at_first_position_cpcB+\n)_estimated_add_cost_at_first_position_cpc:t\xea\x41q\n)googleads.googleapis.com/AdGroupCriterion\x12\x44\x63ustomers/{customer_id}/adGroupCriteria/{ad_group_id}~{criterion_id}B\x0b\n\tcriterionB\x0f\n\r_criterion_idB\x0b\n\t_ad_groupB\x0b\n\t_negativeB\x0f\n\r_bid_modifierB\x11\n\x0f_cpc_bid_microsB\x11\n\x0f_cpm_bid_microsB\x11\n\x0f_cpv_bid_microsB\x19\n\x17_percent_cpc_bid_microsB\x1b\n\x19_effective_cpc_bid_microsB\x1b\n\x19_effective_cpm_bid_microsB\x1b\n\x19_effective_cpv_bid_microsB#\n!_effective_percent_cpc_bid_microsB\x13\n\x11_final_url_suffixB\x18\n\x16_tracking_url_templateB\x82\x02\n%com.google.ads.googleads.v6.resourcesB\x15\x41\x64GroupCriterionProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V6.Resources\xca\x02!Google\\Ads\\GoogleAds\\V6\\Resources\xea\x02%Google::Ads::GoogleAds::V6::Resourcesb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_common_dot_custom__parameter__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__criterion__approval__status__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__criterion__status__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_bidding__source__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_criterion__system__serving__status__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_criterion__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_quality__score__bucket__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_ADGROUPCRITERION_QUALITYINFO = _descriptor.Descriptor(
name='QualityInfo',
full_name='google.ads.googleads.v6.resources.AdGroupCriterion.QualityInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='quality_score', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.QualityInfo.quality_score', index=0,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='creative_quality_score', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.QualityInfo.creative_quality_score', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='post_click_quality_score', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.QualityInfo.post_click_quality_score', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='search_predicted_ctr', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.QualityInfo.search_predicted_ctr', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_quality_score', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.QualityInfo._quality_score',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=4072,
serialized_end=4469,
)
_ADGROUPCRITERION_POSITIONESTIMATES = _descriptor.Descriptor(
name='PositionEstimates',
full_name='google.ads.googleads.v6.resources.AdGroupCriterion.PositionEstimates',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='first_page_cpc_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.PositionEstimates.first_page_cpc_micros', index=0,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='first_position_cpc_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.PositionEstimates.first_position_cpc_micros', index=1,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='top_of_page_cpc_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.PositionEstimates.top_of_page_cpc_micros', index=2,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='estimated_add_clicks_at_first_position_cpc', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.PositionEstimates.estimated_add_clicks_at_first_position_cpc', index=3,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='estimated_add_cost_at_first_position_cpc', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.PositionEstimates.estimated_add_cost_at_first_position_cpc', index=4,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_first_page_cpc_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.PositionEstimates._first_page_cpc_micros',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_first_position_cpc_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.PositionEstimates._first_position_cpc_micros',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_top_of_page_cpc_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.PositionEstimates._top_of_page_cpc_micros',
index=2, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_estimated_add_clicks_at_first_position_cpc', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.PositionEstimates._estimated_add_clicks_at_first_position_cpc',
index=3, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_estimated_add_cost_at_first_position_cpc', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.PositionEstimates._estimated_add_cost_at_first_position_cpc',
index=4, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=4472,
serialized_end=4916,
)
_ADGROUPCRITERION = _descriptor.Descriptor(
name='AdGroupCriterion',
full_name='google.ads.googleads.v6.resources.AdGroupCriterion',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005\372A+\n)googleads.googleapis.com/AdGroupCriterion', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='criterion_id', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.criterion_id', index=1,
number=56, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.status', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='quality_info', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.quality_info', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ad_group', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.ad_group', index=4,
number=57, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005\372A\"\n googleads.googleapis.com/AdGroup', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.type', index=5,
number=25, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='negative', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.negative', index=6,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='system_serving_status', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.system_serving_status', index=7,
number=52, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='approval_status', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.approval_status', index=8,
number=53, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='disapproval_reasons', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.disapproval_reasons', index=9,
number=59, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bid_modifier', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.bid_modifier', index=10,
number=61, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cpc_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.cpc_bid_micros', index=11,
number=62, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cpm_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.cpm_bid_micros', index=12,
number=63, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cpv_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.cpv_bid_micros', index=13,
number=64, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='percent_cpc_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.percent_cpc_bid_micros', index=14,
number=65, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_cpc_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.effective_cpc_bid_micros', index=15,
number=66, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_cpm_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.effective_cpm_bid_micros', index=16,
number=67, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_cpv_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.effective_cpv_bid_micros', index=17,
number=68, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_percent_cpc_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.effective_percent_cpc_bid_micros', index=18,
number=69, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_cpc_bid_source', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.effective_cpc_bid_source', index=19,
number=21, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_cpm_bid_source', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.effective_cpm_bid_source', index=20,
number=22, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_cpv_bid_source', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.effective_cpv_bid_source', index=21,
number=23, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_percent_cpc_bid_source', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.effective_percent_cpc_bid_source', index=22,
number=35, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='position_estimates', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.position_estimates', index=23,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='final_urls', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.final_urls', index=24,
number=70, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='final_mobile_urls', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.final_mobile_urls', index=25,
number=71, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='final_url_suffix', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.final_url_suffix', index=26,
number=72, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tracking_url_template', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.tracking_url_template', index=27,
number=73, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='url_custom_parameters', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.url_custom_parameters', index=28,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='keyword', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.keyword', index=29,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='placement', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.placement', index=30,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mobile_app_category', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.mobile_app_category', index=31,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mobile_application', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.mobile_application', index=32,
number=30, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='listing_group', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.listing_group', index=33,
number=32, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='age_range', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.age_range', index=34,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='gender', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.gender', index=35,
number=37, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='income_range', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.income_range', index=36,
number=38, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='parental_status', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.parental_status', index=37,
number=39, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_list', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.user_list', index=38,
number=42, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='youtube_video', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.youtube_video', index=39,
number=40, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='youtube_channel', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.youtube_channel', index=40,
number=41, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='topic', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.topic', index=41,
number=43, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_interest', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.user_interest', index=42,
number=45, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='webpage', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.webpage', index=43,
number=46, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='app_payment_model', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.app_payment_model', index=44,
number=47, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='custom_affinity', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.custom_affinity', index=45,
number=48, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='custom_intent', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.custom_intent', index=46,
number=49, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='custom_audience', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.custom_audience', index=47,
number=74, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='combined_audience', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.combined_audience', index=48,
number=75, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\005', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_ADGROUPCRITERION_QUALITYINFO, _ADGROUPCRITERION_POSITIONESTIMATES, ],
enum_types=[
],
serialized_options=b'\352Aq\n)googleads.googleapis.com/AdGroupCriterion\022Dcustomers/{customer_id}/adGroupCriteria/{ad_group_id}~{criterion_id}',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='criterion', full_name='google.ads.googleads.v6.resources.AdGroupCriterion.criterion',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_criterion_id', full_name='google.ads.googleads.v6.resources.AdGroupCriterion._criterion_id',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_ad_group', full_name='google.ads.googleads.v6.resources.AdGroupCriterion._ad_group',
index=2, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_negative', full_name='google.ads.googleads.v6.resources.AdGroupCriterion._negative',
index=3, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_bid_modifier', full_name='google.ads.googleads.v6.resources.AdGroupCriterion._bid_modifier',
index=4, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_cpc_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion._cpc_bid_micros',
index=5, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_cpm_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion._cpm_bid_micros',
index=6, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_cpv_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion._cpv_bid_micros',
index=7, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_percent_cpc_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion._percent_cpc_bid_micros',
index=8, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_effective_cpc_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion._effective_cpc_bid_micros',
index=9, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_effective_cpm_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion._effective_cpm_bid_micros',
index=10, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_effective_cpv_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion._effective_cpv_bid_micros',
index=11, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_effective_percent_cpc_bid_micros', full_name='google.ads.googleads.v6.resources.AdGroupCriterion._effective_percent_cpc_bid_micros',
index=12, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_final_url_suffix', full_name='google.ads.googleads.v6.resources.AdGroupCriterion._final_url_suffix',
index=13, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_tracking_url_template', full_name='google.ads.googleads.v6.resources.AdGroupCriterion._tracking_url_template',
index=14, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=656,
serialized_end=5362,
)
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['creative_quality_score'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_quality__score__bucket__pb2._QUALITYSCOREBUCKETENUM_QUALITYSCOREBUCKET
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['post_click_quality_score'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_quality__score__bucket__pb2._QUALITYSCOREBUCKETENUM_QUALITYSCOREBUCKET
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['search_predicted_ctr'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_quality__score__bucket__pb2._QUALITYSCOREBUCKETENUM_QUALITYSCOREBUCKET
_ADGROUPCRITERION_QUALITYINFO.containing_type = _ADGROUPCRITERION
_ADGROUPCRITERION_QUALITYINFO.oneofs_by_name['_quality_score'].fields.append(
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['quality_score'])
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['quality_score'].containing_oneof = _ADGROUPCRITERION_QUALITYINFO.oneofs_by_name['_quality_score']
_ADGROUPCRITERION_POSITIONESTIMATES.containing_type = _ADGROUPCRITERION
_ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_first_page_cpc_micros'].fields.append(
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['first_page_cpc_micros'])
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['first_page_cpc_micros'].containing_oneof = _ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_first_page_cpc_micros']
_ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_first_position_cpc_micros'].fields.append(
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['first_position_cpc_micros'])
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['first_position_cpc_micros'].containing_oneof = _ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_first_position_cpc_micros']
_ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_top_of_page_cpc_micros'].fields.append(
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['top_of_page_cpc_micros'])
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['top_of_page_cpc_micros'].containing_oneof = _ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_top_of_page_cpc_micros']
_ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_estimated_add_clicks_at_first_position_cpc'].fields.append(
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['estimated_add_clicks_at_first_position_cpc'])
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['estimated_add_clicks_at_first_position_cpc'].containing_oneof = _ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_estimated_add_clicks_at_first_position_cpc']
_ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_estimated_add_cost_at_first_position_cpc'].fields.append(
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['estimated_add_cost_at_first_position_cpc'])
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['estimated_add_cost_at_first_position_cpc'].containing_oneof = _ADGROUPCRITERION_POSITIONESTIMATES.oneofs_by_name['_estimated_add_cost_at_first_position_cpc']
_ADGROUPCRITERION.fields_by_name['status'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__criterion__status__pb2._ADGROUPCRITERIONSTATUSENUM_ADGROUPCRITERIONSTATUS
_ADGROUPCRITERION.fields_by_name['quality_info'].message_type = _ADGROUPCRITERION_QUALITYINFO
_ADGROUPCRITERION.fields_by_name['type'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_criterion__type__pb2._CRITERIONTYPEENUM_CRITERIONTYPE
_ADGROUPCRITERION.fields_by_name['system_serving_status'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_criterion__system__serving__status__pb2._CRITERIONSYSTEMSERVINGSTATUSENUM_CRITERIONSYSTEMSERVINGSTATUS
_ADGROUPCRITERION.fields_by_name['approval_status'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_ad__group__criterion__approval__status__pb2._ADGROUPCRITERIONAPPROVALSTATUSENUM_ADGROUPCRITERIONAPPROVALSTATUS
_ADGROUPCRITERION.fields_by_name['effective_cpc_bid_source'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_bidding__source__pb2._BIDDINGSOURCEENUM_BIDDINGSOURCE
_ADGROUPCRITERION.fields_by_name['effective_cpm_bid_source'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_bidding__source__pb2._BIDDINGSOURCEENUM_BIDDINGSOURCE
_ADGROUPCRITERION.fields_by_name['effective_cpv_bid_source'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_bidding__source__pb2._BIDDINGSOURCEENUM_BIDDINGSOURCE
_ADGROUPCRITERION.fields_by_name['effective_percent_cpc_bid_source'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_bidding__source__pb2._BIDDINGSOURCEENUM_BIDDINGSOURCE
_ADGROUPCRITERION.fields_by_name['position_estimates'].message_type = _ADGROUPCRITERION_POSITIONESTIMATES
_ADGROUPCRITERION.fields_by_name['url_custom_parameters'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_custom__parameter__pb2._CUSTOMPARAMETER
_ADGROUPCRITERION.fields_by_name['keyword'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._KEYWORDINFO
_ADGROUPCRITERION.fields_by_name['placement'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._PLACEMENTINFO
_ADGROUPCRITERION.fields_by_name['mobile_app_category'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._MOBILEAPPCATEGORYINFO
_ADGROUPCRITERION.fields_by_name['mobile_application'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._MOBILEAPPLICATIONINFO
_ADGROUPCRITERION.fields_by_name['listing_group'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._LISTINGGROUPINFO
_ADGROUPCRITERION.fields_by_name['age_range'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._AGERANGEINFO
_ADGROUPCRITERION.fields_by_name['gender'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._GENDERINFO
_ADGROUPCRITERION.fields_by_name['income_range'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._INCOMERANGEINFO
_ADGROUPCRITERION.fields_by_name['parental_status'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._PARENTALSTATUSINFO
_ADGROUPCRITERION.fields_by_name['user_list'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._USERLISTINFO
_ADGROUPCRITERION.fields_by_name['youtube_video'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._YOUTUBEVIDEOINFO
_ADGROUPCRITERION.fields_by_name['youtube_channel'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._YOUTUBECHANNELINFO
_ADGROUPCRITERION.fields_by_name['topic'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._TOPICINFO
_ADGROUPCRITERION.fields_by_name['user_interest'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._USERINTERESTINFO
_ADGROUPCRITERION.fields_by_name['webpage'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._WEBPAGEINFO
_ADGROUPCRITERION.fields_by_name['app_payment_model'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._APPPAYMENTMODELINFO
_ADGROUPCRITERION.fields_by_name['custom_affinity'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._CUSTOMAFFINITYINFO
_ADGROUPCRITERION.fields_by_name['custom_intent'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._CUSTOMINTENTINFO
_ADGROUPCRITERION.fields_by_name['custom_audience'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._CUSTOMAUDIENCEINFO
_ADGROUPCRITERION.fields_by_name['combined_audience'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_criteria__pb2._COMBINEDAUDIENCEINFO
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['keyword'])
_ADGROUPCRITERION.fields_by_name['keyword'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['placement'])
_ADGROUPCRITERION.fields_by_name['placement'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['mobile_app_category'])
_ADGROUPCRITERION.fields_by_name['mobile_app_category'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['mobile_application'])
_ADGROUPCRITERION.fields_by_name['mobile_application'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['listing_group'])
_ADGROUPCRITERION.fields_by_name['listing_group'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['age_range'])
_ADGROUPCRITERION.fields_by_name['age_range'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['gender'])
_ADGROUPCRITERION.fields_by_name['gender'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['income_range'])
_ADGROUPCRITERION.fields_by_name['income_range'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['parental_status'])
_ADGROUPCRITERION.fields_by_name['parental_status'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['user_list'])
_ADGROUPCRITERION.fields_by_name['user_list'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['youtube_video'])
_ADGROUPCRITERION.fields_by_name['youtube_video'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['youtube_channel'])
_ADGROUPCRITERION.fields_by_name['youtube_channel'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['topic'])
_ADGROUPCRITERION.fields_by_name['topic'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['user_interest'])
_ADGROUPCRITERION.fields_by_name['user_interest'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['webpage'])
_ADGROUPCRITERION.fields_by_name['webpage'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['app_payment_model'])
_ADGROUPCRITERION.fields_by_name['app_payment_model'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['custom_affinity'])
_ADGROUPCRITERION.fields_by_name['custom_affinity'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['custom_intent'])
_ADGROUPCRITERION.fields_by_name['custom_intent'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['custom_audience'])
_ADGROUPCRITERION.fields_by_name['custom_audience'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['criterion'].fields.append(
_ADGROUPCRITERION.fields_by_name['combined_audience'])
_ADGROUPCRITERION.fields_by_name['combined_audience'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['criterion']
_ADGROUPCRITERION.oneofs_by_name['_criterion_id'].fields.append(
_ADGROUPCRITERION.fields_by_name['criterion_id'])
_ADGROUPCRITERION.fields_by_name['criterion_id'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_criterion_id']
_ADGROUPCRITERION.oneofs_by_name['_ad_group'].fields.append(
_ADGROUPCRITERION.fields_by_name['ad_group'])
_ADGROUPCRITERION.fields_by_name['ad_group'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_ad_group']
_ADGROUPCRITERION.oneofs_by_name['_negative'].fields.append(
_ADGROUPCRITERION.fields_by_name['negative'])
_ADGROUPCRITERION.fields_by_name['negative'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_negative']
_ADGROUPCRITERION.oneofs_by_name['_bid_modifier'].fields.append(
_ADGROUPCRITERION.fields_by_name['bid_modifier'])
_ADGROUPCRITERION.fields_by_name['bid_modifier'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_bid_modifier']
_ADGROUPCRITERION.oneofs_by_name['_cpc_bid_micros'].fields.append(
_ADGROUPCRITERION.fields_by_name['cpc_bid_micros'])
_ADGROUPCRITERION.fields_by_name['cpc_bid_micros'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_cpc_bid_micros']
_ADGROUPCRITERION.oneofs_by_name['_cpm_bid_micros'].fields.append(
_ADGROUPCRITERION.fields_by_name['cpm_bid_micros'])
_ADGROUPCRITERION.fields_by_name['cpm_bid_micros'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_cpm_bid_micros']
_ADGROUPCRITERION.oneofs_by_name['_cpv_bid_micros'].fields.append(
_ADGROUPCRITERION.fields_by_name['cpv_bid_micros'])
_ADGROUPCRITERION.fields_by_name['cpv_bid_micros'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_cpv_bid_micros']
_ADGROUPCRITERION.oneofs_by_name['_percent_cpc_bid_micros'].fields.append(
_ADGROUPCRITERION.fields_by_name['percent_cpc_bid_micros'])
_ADGROUPCRITERION.fields_by_name['percent_cpc_bid_micros'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_percent_cpc_bid_micros']
_ADGROUPCRITERION.oneofs_by_name['_effective_cpc_bid_micros'].fields.append(
_ADGROUPCRITERION.fields_by_name['effective_cpc_bid_micros'])
_ADGROUPCRITERION.fields_by_name['effective_cpc_bid_micros'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_effective_cpc_bid_micros']
_ADGROUPCRITERION.oneofs_by_name['_effective_cpm_bid_micros'].fields.append(
_ADGROUPCRITERION.fields_by_name['effective_cpm_bid_micros'])
_ADGROUPCRITERION.fields_by_name['effective_cpm_bid_micros'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_effective_cpm_bid_micros']
_ADGROUPCRITERION.oneofs_by_name['_effective_cpv_bid_micros'].fields.append(
_ADGROUPCRITERION.fields_by_name['effective_cpv_bid_micros'])
_ADGROUPCRITERION.fields_by_name['effective_cpv_bid_micros'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_effective_cpv_bid_micros']
_ADGROUPCRITERION.oneofs_by_name['_effective_percent_cpc_bid_micros'].fields.append(
_ADGROUPCRITERION.fields_by_name['effective_percent_cpc_bid_micros'])
_ADGROUPCRITERION.fields_by_name['effective_percent_cpc_bid_micros'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_effective_percent_cpc_bid_micros']
_ADGROUPCRITERION.oneofs_by_name['_final_url_suffix'].fields.append(
_ADGROUPCRITERION.fields_by_name['final_url_suffix'])
_ADGROUPCRITERION.fields_by_name['final_url_suffix'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_final_url_suffix']
_ADGROUPCRITERION.oneofs_by_name['_tracking_url_template'].fields.append(
_ADGROUPCRITERION.fields_by_name['tracking_url_template'])
_ADGROUPCRITERION.fields_by_name['tracking_url_template'].containing_oneof = _ADGROUPCRITERION.oneofs_by_name['_tracking_url_template']
DESCRIPTOR.message_types_by_name['AdGroupCriterion'] = _ADGROUPCRITERION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AdGroupCriterion = _reflection.GeneratedProtocolMessageType('AdGroupCriterion', (_message.Message,), {
'QualityInfo' : _reflection.GeneratedProtocolMessageType('QualityInfo', (_message.Message,), {
'DESCRIPTOR' : _ADGROUPCRITERION_QUALITYINFO,
'__module__' : 'google.ads.googleads.v6.resources.ad_group_criterion_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.resources.AdGroupCriterion.QualityInfo)
})
,
'PositionEstimates' : _reflection.GeneratedProtocolMessageType('PositionEstimates', (_message.Message,), {
'DESCRIPTOR' : _ADGROUPCRITERION_POSITIONESTIMATES,
'__module__' : 'google.ads.googleads.v6.resources.ad_group_criterion_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.resources.AdGroupCriterion.PositionEstimates)
})
,
'DESCRIPTOR' : _ADGROUPCRITERION,
'__module__' : 'google.ads.googleads.v6.resources.ad_group_criterion_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.resources.AdGroupCriterion)
})
_sym_db.RegisterMessage(AdGroupCriterion)
_sym_db.RegisterMessage(AdGroupCriterion.QualityInfo)
_sym_db.RegisterMessage(AdGroupCriterion.PositionEstimates)
DESCRIPTOR._options = None
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['quality_score']._options = None
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['creative_quality_score']._options = None
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['post_click_quality_score']._options = None
_ADGROUPCRITERION_QUALITYINFO.fields_by_name['search_predicted_ctr']._options = None
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['first_page_cpc_micros']._options = None
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['first_position_cpc_micros']._options = None
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['top_of_page_cpc_micros']._options = None
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['estimated_add_clicks_at_first_position_cpc']._options = None
_ADGROUPCRITERION_POSITIONESTIMATES.fields_by_name['estimated_add_cost_at_first_position_cpc']._options = None
_ADGROUPCRITERION.fields_by_name['resource_name']._options = None
_ADGROUPCRITERION.fields_by_name['criterion_id']._options = None
_ADGROUPCRITERION.fields_by_name['quality_info']._options = None
_ADGROUPCRITERION.fields_by_name['ad_group']._options = None
_ADGROUPCRITERION.fields_by_name['type']._options = None
_ADGROUPCRITERION.fields_by_name['negative']._options = None
_ADGROUPCRITERION.fields_by_name['system_serving_status']._options = None
_ADGROUPCRITERION.fields_by_name['approval_status']._options = None
_ADGROUPCRITERION.fields_by_name['disapproval_reasons']._options = None
_ADGROUPCRITERION.fields_by_name['effective_cpc_bid_micros']._options = None
_ADGROUPCRITERION.fields_by_name['effective_cpm_bid_micros']._options = None
_ADGROUPCRITERION.fields_by_name['effective_cpv_bid_micros']._options = None
_ADGROUPCRITERION.fields_by_name['effective_percent_cpc_bid_micros']._options = None
_ADGROUPCRITERION.fields_by_name['effective_cpc_bid_source']._options = None
_ADGROUPCRITERION.fields_by_name['effective_cpm_bid_source']._options = None
_ADGROUPCRITERION.fields_by_name['effective_cpv_bid_source']._options = None
_ADGROUPCRITERION.fields_by_name['effective_percent_cpc_bid_source']._options = None
_ADGROUPCRITERION.fields_by_name['position_estimates']._options = None
_ADGROUPCRITERION.fields_by_name['keyword']._options = None
_ADGROUPCRITERION.fields_by_name['placement']._options = None
_ADGROUPCRITERION.fields_by_name['mobile_app_category']._options = None
_ADGROUPCRITERION.fields_by_name['mobile_application']._options = None
_ADGROUPCRITERION.fields_by_name['listing_group']._options = None
_ADGROUPCRITERION.fields_by_name['age_range']._options = None
_ADGROUPCRITERION.fields_by_name['gender']._options = None
_ADGROUPCRITERION.fields_by_name['income_range']._options = None
_ADGROUPCRITERION.fields_by_name['parental_status']._options = None
_ADGROUPCRITERION.fields_by_name['user_list']._options = None
_ADGROUPCRITERION.fields_by_name['youtube_video']._options = None
_ADGROUPCRITERION.fields_by_name['youtube_channel']._options = None
_ADGROUPCRITERION.fields_by_name['topic']._options = None
_ADGROUPCRITERION.fields_by_name['user_interest']._options = None
_ADGROUPCRITERION.fields_by_name['webpage']._options = None
_ADGROUPCRITERION.fields_by_name['app_payment_model']._options = None
_ADGROUPCRITERION.fields_by_name['custom_affinity']._options = None
_ADGROUPCRITERION.fields_by_name['custom_intent']._options = None
_ADGROUPCRITERION.fields_by_name['custom_audience']._options = None
_ADGROUPCRITERION.fields_by_name['combined_audience']._options = None
_ADGROUPCRITERION._options = None
# @@protoc_insertion_point(module_scope)
| [
"noreply@github.com"
] | VincentFritzsche.noreply@github.com |
df84bf9d01fc1b6084257e37167497a0c70e75dd | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /Configuration/Generator/python/SingleElectronFlatPt5To100_pythia8_cfi.py | 37df2ba0d8904688e35cfd867a38350252f6e5ef | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 813 | py | import FWCore.ParameterSet.Config as cms
generator = cms.EDFilter("Pythia8PtGun",
PGunParameters = cms.PSet(
MaxPt = cms.double(5.),
MinPt = cms.double(100.),
ParticleID = cms.vint32(11),
AddAntiParticle = cms.bool(True),
MaxEta = cms.double(2.5),
MaxPhi = cms.double(3.14159265359),
MinEta = cms.double(-2.5),
MinPhi = cms.double(-3.14159265359) ## in radians
),
Verbosity = cms.untracked.int32(0), ## set to 1 (or greater) for printouts
psethack = cms.string('single electron pt 5 to 100'),
firstRun = cms.untracked.uint32(1),
PythiaParameters = cms.PSet(parameterSets = cms.vstring())
)
| [
"you@somedomain.com"
] | you@somedomain.com |
bb5c4a178311c0c518c3f3ebdd8d1c2c6be4c22b | ea2fe13acb40c6139d7a5b5527eee8b9e9c24321 | /tasks/migrations/0003_auto_20160409_1925.py | 08c5bea75403ffffc37ef4e1fc5e659d5b847f3e | [] | no_license | rmad17/todolist-django | 006e1dff753dd9cf3824e882d1483ddf869a7236 | 48c1fb10fd381ee177c149efa3fd426bde3c46ce | refs/heads/master | 2021-01-10T05:13:12.125910 | 2016-04-13T17:24:43 | 2016-04-13T17:24:43 | 55,792,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-09 19:25
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0002_auto_20160409_1924'),
]
operations = [
migrations.AlterField(
model_name='task',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2016, 4, 9, 19, 25, 8, 76061)),
),
]
| [
"souravbasu17@gmail.com"
] | souravbasu17@gmail.com |
7c258ecc296b93e65bf8e0cbc5b9c3df0c21f607 | 21818228cb62d31b9685de44deb27cfd90430573 | /ccxt/flowbtc.py | 2153a8b8e285212a60a2754aaf3d616c1ebb77d1 | [] | no_license | mico/cryptoArbitrage | d9d5d2f89e3fccc0b84d9c13b771edef0f2b00a1 | ea9ef03e79f302b36948746c77e4acbb3d6f01b7 | refs/heads/master | 2021-03-22T00:17:30.448593 | 2018-05-28T05:08:21 | 2018-05-28T05:08:21 | 108,232,310 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 7,310 | py | # -*- coding: utf-8 -*-
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
class flowbtc (Exchange):
def describe(self):
return self.deep_extend(super(flowbtc, self).describe(), {
'id': 'flowbtc',
'name': 'flowBTC',
'countries': 'BR', # Brazil
'version': 'v1',
'rateLimit': 1000,
'hasCORS': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/28162465-cd815d4c-67cf-11e7-8e57-438bea0523a2.jpg',
'api': 'https://api.flowbtc.com:8400/ajax',
'www': 'https://trader.flowbtc.com',
'doc': 'http://www.flowbtc.com.br/api/',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'public': {
'post': [
'GetTicker',
'GetTrades',
'GetTradesByDate',
'GetOrderBook',
'GetProductPairs',
'GetProducts',
],
},
'private': {
'post': [
'CreateAccount',
'GetUserInfo',
'SetUserInfo',
'GetAccountInfo',
'GetAccountTrades',
'GetDepositAddresses',
'Withdraw',
'CreateOrder',
'ModifyOrder',
'CancelOrder',
'CancelAllOrders',
'GetAccountOpenOrders',
'GetOrderFee',
],
},
},
})
def fetch_markets(self):
response = self.publicPostGetProductPairs()
markets = response['productPairs']
result = []
for p in range(0, len(markets)):
market = markets[p]
id = market['name']
base = market['product1Label']
quote = market['product2Label']
symbol = base + '/' + quote
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostGetAccountInfo()
balances = response['currencies']
result = {'info': response}
for b in range(0, len(balances)):
balance = balances[b]
currency = balance['name']
account = {
'free': balance['balance'],
'used': balance['hold'],
'total': 0.0,
}
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
orderbook = self.publicPostGetOrderBook(self.extend({
'productPair': market['id'],
}, params))
return self.parse_order_book(orderbook, None, 'bids', 'asks', 'px', 'qty')
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
ticker = self.publicPostGetTicker(self.extend({
'productPair': market['id'],
}, params))
timestamp = self.milliseconds()
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'ask': float(ticker['ask']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['last']),
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['volume24hr']),
'quoteVolume': float(ticker['volume24hrProduct2']),
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = trade['unixtime'] * 1000
side = 'buy' if (trade['incomingOrderSide'] == 0) else 'sell'
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'id': str(trade['tid']),
'order': None,
'type': None,
'side': side,
'price': trade['px'],
'amount': trade['qty'],
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicPostGetTrades(self.extend({
'ins': market['id'],
'startIndex': -1,
}, params))
return self.parse_trades(response['trades'], market)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
orderType = 1 if (type == 'market') else 0
order = {
'ins': self.market_id(symbol),
'side': side,
'orderType': orderType,
'qty': amount,
'px': price,
}
response = self.privatePostCreateOrder(self.extend(order, params))
return {
'info': response,
'id': response['serverOrderId'],
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
if 'ins' in params:
return self.privatePostCancelOrder(self.extend({
'serverOrderId': id,
}, params))
raise ExchangeError(self.id + ' requires `ins` symbol parameter for cancelling an order')
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + path
if api == 'public':
if params:
body = self.json(params)
else:
self.check_required_credentials()
nonce = self.nonce()
auth = str(nonce) + self.uid + self.apiKey
signature = self.hmac(self.encode(auth), self.encode(self.secret))
body = self.json(self.extend({
'apiKey': self.apiKey,
'apiNonce': nonce,
'apiSig': signature.upper(),
}, params))
headers = {
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'isAccepted' in response:
if response['isAccepted']:
return response
raise ExchangeError(self.id + ' ' + self.json(response))
| [
"artur.komarov@gmail.com"
] | artur.komarov@gmail.com |
0e4fe9d5ad6159227a06c9b5093fad2b2cf1b987 | 02912a35ae0178be82ad3b138edcb560bee6cb88 | /pyez/pyez_building_blocks/bb2.collecing.show.commands.py | b8f673b674ed8491bd9594869911da443e556dda | [
"Apache-2.0"
] | permissive | Juniper/junosautomation | 72b9c5468d50a95987ee422c6de0e3db1ac5aea1 | d26f1a57067b7c43aed596b31e695491b5ba4603 | refs/heads/master | 2023-09-03T20:01:36.278032 | 2022-09-20T06:09:12 | 2022-09-20T06:09:12 | 56,670,285 | 127 | 86 | Apache-2.0 | 2023-05-18T12:28:09 | 2016-04-20T08:37:02 | Jupyter Notebook | UTF-8 | Python | false | false | 4,901 | py | #!/usr/bin/env python
#
# Copyright 2017 Juniper Networks, Inc. All rights reserved.
# Licensed under the Juniper Networks Script Software License (the "License").
# You may not use this script file except in compliance with the License, which is located at
# http://www.juniper.net/support/legal/scriptlicense/
# Unless required by applicable law or otherwise agreed to in writing by the parties, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied.
#
# Author.........: Diogo Montagner <dmontagner@juniper.net>
# Created on.....: 15/Dec/2017
# Version........: 1.0
# Platform.......: agnostic
# Description....: Simple example of collecting show commands from Juniper routers
#
import logging
import sys
import datetime
import pprint
from jnpr.junos import Device
from lxml import etree
from collections import defaultdict
from netaddr import *
# setting logging capabilities
log = logging.getLogger() # 'root' Logger
console = logging.StreamHandler()
format_str = '%(asctime)s\t%(levelname)s -- %(processName)s %(filename)s:%(lineno)s -- %(message)s'
console.setFormatter(logging.Formatter(format_str))
log.addHandler(console) # prints to console.
# set the log level here
#log.setLevel(logging.WARN)
log.setLevel(logging.ERROR)
#
# This method is used to open a NETCONF session with the router
#
def connectToRouter(userName, userPassword, router):
try:
log.debug("user = %s, password = %s, router = %s, format = %s", userName, userPassword, router)
dev = Device(host=router, user=userName, password=userPassword, gather_facts=False)
routerConnection = dev.open()
log.warn("established NETCONF session with the router %s", router)
return routerConnection
except Exception as e:
log.error("could not connect to the router %s", router)
log.error(e.message)
return None
#
# This method collects the configuration from the router
#
# Returns the the filename where the configuration was stored
#
def getShowBgpSummary(conn, output_format):
# dmontagner@pe1> show bgp summary | display xml rpc
# <rpc-reply xmlns:junos="http://xml.juniper.net/junos/15.1F6/junos">
# <rpc>
# <get-bgp-summary-information> <<<<<< this is the RPC call
# </get-bgp-summary-information>
# </rpc>
# <cli>
# <banner></banner>
# </cli>
# </rpc-reply>
log.debug("entered getShowBgpSummary")
bgpOutput = None
if (conn == None):
log.error("the NETCONF session to the router is not open")
return None
try:
log.debug("collecting the show bgp in format %s", output_format)
if (output_format == "xml"):
bgpOutput = conn.rpc.get_bgp_summary_information()
elif (output_format == "txt"):
bgpOutput = conn.rpc.get_bgp_summary_information({'format': 'text'})
return bgpOutput
except Exception as e:
log.error("could not collect the router configuration via RPC")
log.error(e.message)
return None
def main():
router = "<your-router-IP-here>"
rtUser = "<your-username-here>"
rtPassword = "<your-password-here>"
print("")
print("")
# Let's connect to the router
conn = connectToRouter(rtUser, rtPassword, router)
if (conn == None):
print("ERROR: could not connect to router " + router)
print("")
print("exiting ...")
sys.exit(-1)
bgpOutputXML = getShowBgpSummary(conn, "xml")
if (len(bgpOutputXML) > 0):
print("")
print("=======----- Printing XML string of the BGP output -----=======")
print(etree.tostring(bgpOutputXML))
print("")
print("=======-------------------------------------------------=======")
print("")
print("")
else:
print("could not collect the BGP output in XML format from the router " + router)
print("")
bgpOutputTXT = None
bgpOutputTXT = getShowBgpSummary(conn, "txt")
if ( not((bgpOutputTXT) == None) ):
print("")
print("=======----- Printing TXT string of the BGP output -----=======")
print(etree.tostring(bgpOutputTXT))
print("")
print("=======-------------------------------------------------=======")
print("")
print("")
# removing the <output> tag
bgpOutputTXT_nonXML = bgpOutputTXT.xpath("//output")[0].text
print("=======----- Printing TXT string of the BGP output non-XML -----=======")
print(bgpOutputTXT_nonXML)
print("=======----------------------------------------------------------=======")
else:
print("could not collect the BGP output in TXT format from the router " + router)
print("")
if __name__ == '__main__':
main() | [
"dmontagner@juniper.net"
] | dmontagner@juniper.net |
a2258d5ec114649054335c1e6162c92ddbcd094e | 9efc82c94bd00c4e09871b8b6273c3d22a3394b8 | /offline_data_prepare/bin/deal_data_bak.py | 02e928368ba99c1b3730e092ad857acc862e5b48 | [] | no_license | wumengfei/yezhuzhoubao_yezhu | f022c8f7856aa3d867b98ceb10a500b05f95955b | e6a6b45f99d417c1982e46f5b63c527ae1c8cb98 | refs/heads/master | 2021-01-11T14:08:52.480666 | 2017-06-21T08:10:12 | 2017-06-21T08:10:12 | 94,979,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,575 | py | # -*- coding: utf-8 -*-
import sys
sys.path.append('conf')
import conf
import urllib
from datetime import *
from datetime import time
from datetime import timedelta
import json
import traceback
import pdb
import redis_client
from yzd_redias_api_new import *
from Moniter import MyLog
rc = Redias_client(conf.redis_conf)
err_f = open(conf.error_file,'a')
cf = ConfigParser.ConfigParser()
cf.read(conf.redis_conf)
log_file = cf.get('log_info', 'log_file')
log_name = cf.get('log_info', 'log_name')
log_level = cf.get('log_info', 'log_level')
log_wan_reciever = cf.get('log_info', 'log_wan_reciever')
today = datetime.now() - timedelta(days = conf.time_delta)
map_dict = {}
try:
my_log = MyLog(log_file, log_name, log_level, log_wan_reciever)
except:
sys.stderr.write("failed to create MyLog instance!")
exit(1)
#挂牌房源上周的挂牌价
def list_price_last_week():
file_obj = open(conf.house_on_sale_last_week, 'r')
list_price_last_week = {}
for line in file_obj:
tmp = line.rstrip("\n").split("\t")
house_code = tmp[0]
if house_code in map_dict:
house_code = map_dict[house_code]
total_prices = tmp[1]
if house_code not in list_price_last_week:
list_price_last_week[house_code] = total_prices
return list_price_last_week
# 所有房源近一周的带看次数
def load_showing_data():
file_obj_1 = conf.showing_base
file_obj_2 = conf.showing_file
showing_add = conf.showing_add
showing_dict = {}
if os.path.isfile(showing_add):
file_obj = open(showing_add, 'r')
for line in file_obj:
tmp = line.rstrip("\n").split("\t")
house_code = tmp[0]
if house_code in map_dict:
house_code = map_dict[house_code]
showing = float(tmp[1])
if house_code not in showing_dict:
showing_dict[house_code] = showing
return showing_dict
for line in open(file_obj_1, 'r'):
tmp = line.rstrip("\n").split("\t")
house_code = tmp[0]
if house_code in map_dict:
house_code = map_dict[house_code]
showing = float(tmp[1])
if house_code not in showing_dict:
showing_dict[house_code] = showing
for line in open(file_obj_2, 'r'):
tmp = line.rstrip("\n").split("\t")
house_code = tmp[0]
showing = float(tmp[1])
if house_code in map_dict:
house_code = map_dict[house_code]
if house_code in showing_dict:
showing_dict[house_code] += showing
else:
showing_dict[house_code] = showing
for house in showing_dict:
str_line = house + "\t" + showing_dict[house] + "\n"
showing_add.write(str_line)
return showing_dict
# 所有挂牌房源的带看次数,以及基本信息
def list_house_this_week(showing_dict, sold_dict):
file_obj = open(conf.list_house, 'r')
list_house_dict = {}
my_log.debug("start load list price last week")
list_price_last_week_dict = list_price_last_week()
my_log.debug("load list price last week")
for line in file_obj:
tmp = line.rstrip("\n").split("\t")
house_code = tmp[0]
if house_code in map_dict:
house_code = map_dict[house_code]
if house_code not in list_house_dict:
list_house_dict[house_code] = {}
build_area = tmp[2]
total_prices = tmp[1]
create_time = tmp[4]
create_time_tmp = datetime.strptime(create_time,'%Y-%m-%d %H:%M:%S')
if build_area == "NULL" or total_prices == "NULL" or float(build_area) == 0:
continue
if house_code in list_price_last_week_dict:
last_list_price = list_price_last_week_dict[house_code]
list_house_dict[house_code]["list_price_last_week"] = \
last_list_price
if float(total_prices) > float(last_list_price):
list_house_dict[house_code]["list_price_qushi"] = "rise"
else:
list_house_dict[house_code]["list_price_qushi"] = "down"
else:
list_house_dict[house_code]["list_price_last_week"] = "NULL"
list_house_dict[house_code]["list_price_qushi"] = "NULL"
list_house_dict[house_code]["build_area"] = build_area
list_house_dict[house_code]["total_prices"] = total_prices
list_house_dict[house_code]["create_time"] = create_time
if house_code in sold_dict:
list_house_dict[house_code]["realmoney"] = sold_dict[house_code]["realmoney"]
list_house_dict[house_code]["dealdate"] = sold_dict[house_code]["deal_time"]
time_tmp = list_house_dict[house_code]["dealdate"]
deal_date = datetime.strptime(time_tmp, "%Y%m%d")
list_house_dict[house_code]["sold_interval"] = \
int((deal_date - create_time_tmp).days)
list_house_dict[house_code]["sold_avg"] = float(list_house_dict[house_code]["realmoney"]) / float(build_area)
list_house_dict[house_code]["list_interval"] = list_house_dict[house_code]["sold_interval"]
else:
# list house
list_house_dict[house_code]["realmoney"] = "NULL"
list_house_dict[house_code]["dealdate"] = "NULL"
list_house_dict[house_code]["sold_interval"] = "NULL"
list_house_dict[house_code]["sold_avg"] = "NULL"
today = datetime.now() - timedelta(days = conf.time_delta)
time_delta = today - create_time_tmp
list_house_dict[house_code]["list_interval"] = int(time_delta.days)
list_house_dict[house_code]["list_avg"] = float(total_prices) / float(build_area)
if house_code in showing_dict:
list_house_dict[house_code]["showing"] = showing_dict[house_code]
else:
list_house_dict[house_code]["showing"] = 0
return list_house_dict
# 成交房源的汇总信息
def deal_house_this_week():
file_obj = open(conf.deal_house, 'r')
sold_house_dict = {}
for line in file_obj:
tmp = line.rstrip("\n").split("\t")
house_code = tmp[0]
deal_time = tmp[2]
realmoney = tmp[3]
if realmoney == "NULL":
continue
if house_code in map_dict:
house_code = map_dict[house_code]
if house_code not in sold_house_dict:
sold_house_dict[house_code] = {}
sold_house_dict[house_code]["realmoney"] = realmoney
sold_house_dict[house_code]["deal_time"] = deal_time
return sold_house_dict
def similar_house_this_week(house_code, house_dict, sold_similar_list, list_similar_list):
result_dict = {}
#today = time.strftime("%Y%m%d")
time_delta = conf.time_delta + 1
today = (datetime.now() - timedelta(days = time_delta)).strftime("%Y%m%d")
key = house_code + "-" + today
result_dict[key] = {}
if len(sold_similar_list) > 0:
result_dict[key]["sold_similar"] = []
try:
for house in sold_similar_list:
build_size = house_dict[house]["build_area"]
showing = house_dict[house]["showing"]
deal_interval = house_dict[house]["sold_interval"]
result_dict[key]["sold_similar"].append((house, \
build_size, showing, deal_interval))
except Exception, e:
traceback.print_exc(file = err_f)
rise_tmp = 0
down_tmp = 0
if len(list_similar_list) > 0:
result_dict[key]["list_similar"] = []
try:
for house in list_similar_list:
build_size = house_dict[house]["build_area"]
list_interval = house_dict[house]["list_interval"]
list_price_qushi = house_dict[house]["list_price_qushi"]
showing = house_dict[house]["showing"]
list_price = house_dict[house]["total_prices"]
result_dict[key]["list_similar"].append((house, build_size, \
list_interval, list_price_qushi, showing, list_price))
except Exception, e:
traceback.print_exc(file = err_f)
return result_dict
def weekly_report(house_dict, sold_list):
file_obj = open(conf.list_house, 'r')
output_obj = open(conf.output, 'w')
my_log.debug("start load redis")
index_dict = load_redis()
my_log.debug("load redis")
try:
for line in file_obj:
tmp = line.rstrip("\n").split("\t")
house_code = tmp[0]
if house_code in map_dict:
house_code = map_dict[house_code]
similar_list = get_similar_house(index_dict, house_code)
my_log.debug("get_similar_list")
sold_similar_list = []
list_similar_list = []
for house in similar_list:
if house in sold_list:
sold_similar_list.append(house)
elif house in house_dict:
list_similar_list.append(house)
else:
continue
#print "list_similar:", list_similar_list
#print "sold_similar:", sold_similar_list
my_log.debug("start to get result")
result_dict = similar_house_this_week(house_code, house_dict, \
sold_similar_list, list_similar_list)
my_log.debug("get result")
#print result_dict
output_obj.write(json.dumps(result_dict))
my_log.debug("write file")
output_obj.write("\n")
except Exception, e:
traceback.print_exc(file = err_f)
def load_map_data():
map_file = conf.map_data
map_dict = {}
for line in open(map_file, 'r'):
tmp = line.rstrip("\n").split("\t")
house_code = tmp[0]
new_code = tmp[1]
if house_code not in map_dict:
map_dict[house_code] = new_code
return map_dict
if __name__ == "__main__":
try:
my_log.debug("start load showing data")
map_dict = load_map_data()
showing_dict = load_showing_data()
my_log.debug("load showing data")
my_log.debug("start load sold_dict")
sold_house_dict = deal_house_this_week()
my_log.debug("load sold_house_dict")
my_log.debug("load list_house_dict")
house_dict = list_house_this_week(showing_dict, sold_house_dict)
my_log.debug("load house_dict")
weekly_report(house_dict, sold_house_dict)
except Exception, e:
traceback.print_exc(file = err_f)
| [
"andy_wumengfei@hotmail.com"
] | andy_wumengfei@hotmail.com |
6500f0dba8f77f794cf28dbd6bf359a04167be39 | 684b554b9c05c0a5a4efb44f15dd063715d48899 | /socialnet/groups/migrations/0001_initial.py | a7e7a3bfafafd8bc208d7edd510398b3b028480a | [] | no_license | iceljc/Frank-Social-Django-App | 681a8d0eac1a81a293a63e6f77fc3fbbc3102eca | 111f5f472b3fa2dac9a312cc24701e27827ce9c7 | refs/heads/master | 2022-05-03T21:47:54.202506 | 2019-12-27T01:47:39 | 2019-12-27T01:47:39 | 230,328,640 | 0 | 0 | null | 2022-04-22T22:55:14 | 2019-12-26T21:11:25 | Python | UTF-8 | Python | false | false | 1,759 | py | # Generated by Django 2.2.9 on 2019-12-26 08:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, unique=True)),
('slug', models.SlugField(allow_unicode=True, unique=True)),
('description', models.TextField(blank=True, default='')),
('description_html', models.TextField(blank=True, default='', editable=False)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='GroupMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='memberships', to='groups.Group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_groups', to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('group', 'user')},
},
),
migrations.AddField(
model_name='group',
name='members',
field=models.ManyToManyField(through='groups.GroupMember', to=settings.AUTH_USER_MODEL),
),
]
| [
"franklujc@gmail.com"
] | franklujc@gmail.com |
dcd8e325ace6b51794580b2b65cb39022b4d9256 | 837ebd601d0882e370522719606c975b9c815ad2 | /adm/templates/plugins/mediation/{{cookiecutter.name}}/main/application.py | 9ee876166f3b2151ae78f6033bb9438e0b514679 | [
"BSD-3-Clause"
] | permissive | dearith/mfserv | 8ba97e211d31a177fc6de160cd4b1f8555ebf600 | ad72e51bf77595a75dcb2600d7323f13e2c2fb4b | refs/heads/master | 2021-08-15T21:17:30.528351 | 2019-04-25T10:25:58 | 2019-04-25T10:25:58 | 183,577,154 | 0 | 0 | null | 2019-04-26T07:10:44 | 2019-04-26T07:10:43 | null | UTF-8 | Python | false | false | 2,141 | py | from aiohttp import web, ClientSession
from aiohttp_metwork_middlewares import mflog_middleware
CHUNK_SIZE = 4096 * 1024
STREAMING_MODE = True
async def handle(request):
# Log something with context aware logger
log = request['mflog_logger']
http_method = request.method
url_path_qs = request.path_qs
log.info("got a %s call on %s" % (http_method, url_path_qs))
# For this example, we limit the service to GET/HEAD methods
if http_method not in ["GET", "HEAD"]:
return web.Response(status=405)
# Let's build the backend url
backend_url = "http://mybackend%s" % url_path_qs
async with ClientSession() as session:
log.info("calling %s on %s..." % (http_method, backend_url))
async with session.get(backend_url) as resp:
backend_status = resp.status
log.info("got an HTTP/%i status" % backend_status)
if not STREAMING_MODE:
######################
# NON STREAMING MODE #
######################
body = await resp.read()
response = web.Response(
headers={"Content-Type": resp.headers['Content-Type']},
body=body
)
else:
##################
# STREAMING MODE #
##################
# Let's prepare a streaming response
response = web.StreamResponse(
headers={"Content-Type": resp.headers['Content-Type']}
)
await response.prepare(request)
response.content_type = resp.headers['Content-Type']
# Let's stream the response body to avoid storing it in memory
while True:
chunk = await resp.content.read(CHUNK_SIZE)
if not chunk:
break
await response.write(chunk)
await response.write_eof()
return response
app = web.Application(middlewares=[mflog_middleware])
app.router.add_route('*', '/{tail:.*}', handle)
| [
"fabien.marty@gmail.com"
] | fabien.marty@gmail.com |
531c97fbd99b4b104f9c472c1d3f63257626f420 | 39bc76f22d523838dd95476c2c20f2b4fb5c2dd9 | /재호/2156.py | 4e8d4606cab88bdbcbdb8a4a29c93a323aad0416 | [] | no_license | Yapp-17th-Algorithm/algorithm-python | e1ea27564a8c01e36b29839aced9ae99b0800959 | cff5750421f5054270eace054526e0ecd5d5c58a | refs/heads/master | 2023-02-08T07:17:30.983662 | 2021-01-04T01:38:14 | 2021-01-04T01:38:14 | 288,205,968 | 0 | 4 | null | 2021-01-04T01:38:15 | 2020-08-17T14:43:50 | Python | UTF-8 | Python | false | false | 605 | py | # https://www.acmicpc.net/problem/2156
from sys import stdin
def solution():
N = int(stdin.readline())
amount = []
dp = [0 for _ in range(N)]
for _ in range(N):
amount.append(int(stdin.readline()))
dp[0] = amount[0]
if N >= 2:
dp[1] = dp[0] + amount[1]
if N >= 3:
dp[2] = max(dp[1], dp[0] + amount[2], amount[1] + amount[2])
if N >= 4:
for i in range(3, N):
dp[i] = max(dp[i - 1],
dp[i - 2] + amount[i],
dp[i - 3] + amount[i - 1] + amount[i])
print(dp[N - 1])
solution()
| [
"pok_gare@naver.com"
] | pok_gare@naver.com |
955052f30aee59a4aff61872076cd78ac1aec4e0 | d9e619aa745d3b0ddc5c8cbeefb419c5953f0e09 | /circuitpython-libs/adafruit-circuitpython-bundle-5.x-mpy-20200321/examples/esp32spi_cheerlights.py | f1ace2e8a79ea88aed827133ee8e81b73c515d8c | [] | no_license | johnkustin/sam32-wifi-projects | 3c53120f46056d65b4c1b96682871c98f94efd87 | 35af32c68e98507c86af26c35e5a255e4fafcbfe | refs/heads/master | 2021-04-02T22:44:55.101762 | 2020-04-01T23:10:42 | 2020-04-01T23:10:42 | 248,332,048 | 0 | 0 | null | 2020-04-01T23:10:43 | 2020-03-18T20:09:08 | Python | UTF-8 | Python | false | false | 2,205 | py | import time
import board
import busio
from digitalio import DigitalInOut
from adafruit_esp32spi import adafruit_esp32spi
from adafruit_esp32spi import adafruit_esp32spi_wifimanager
import neopixel
import adafruit_fancyled.adafruit_fancyled as fancy
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
print("ESP32 SPI webclient test")
DATA_SOURCE = "https://api.thingspeak.com/channels/1417/feeds.json?results=1"
DATA_LOCATION = ["feeds", 0, "field2"]
esp32_cs = DigitalInOut(board.D9)
esp32_ready = DigitalInOut(board.D10)
esp32_reset = DigitalInOut(board.D5)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
"""Use below for Most Boards"""
status_light = neopixel.NeoPixel(
board.NEOPIXEL, 1, brightness=0.2
) # Uncomment for Most Boards
"""Uncomment below for ItsyBitsy M4"""
# status_light = dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, 1, brightness=0.2)
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)
# neopixels
pixels = neopixel.NeoPixel(board.A1, 16, brightness=0.3)
pixels.fill(0)
# we'll save the value in question
last_value = value = None
while True:
try:
print("Fetching json from", DATA_SOURCE)
response = wifi.get(DATA_SOURCE)
print(response.json())
value = response.json()
for key in DATA_LOCATION:
value = value[key]
print(value)
response.close()
except (ValueError, RuntimeError) as e:
print("Failed to get data, retrying\n", e)
wifi.reset()
continue
if not value:
continue
if last_value != value:
color = int(value[1:], 16)
red = color >> 16 & 0xFF
green = color >> 8 & 0xFF
blue = color & 0xFF
gamma_corrected = fancy.gamma_adjust(fancy.CRGB(red, green, blue)).pack()
pixels.fill(gamma_corrected)
last_value = value
response = None
time.sleep(60)
| [
"johnkustin@gmail.com"
] | johnkustin@gmail.com |
e0dbc4429541420a051e9c1017af7b1ef95f2608 | 47ad9ef5d406f14727ea014a869187d2d9ed709c | /backend/finance_manager/urls.py | 34d67013721954998545eeeaa121d7c2fcf7aba0 | [] | no_license | dyoh1202/personal-finance-manager | 374be1b2ca05947943c7baa8b427b39787a910fd | 77f7c12749e9df7244a7bb640ce8f091784e7d82 | refs/heads/master | 2023-02-16T00:46:37.384886 | 2021-01-15T00:09:08 | 2021-01-15T00:09:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | from django.urls import path, include
from django.conf.urls import url
from rest_framework.routers import DefaultRouter
from rest_framework.urlpatterns import format_suffix_patterns
from rest_framework.response import Response
from rest_framework.views import APIView
from .views import (
article_views,
asset_views,
portfolio_views,
info_views,
scheuduler_views,
)
app_name = "fm"
router = DefaultRouter()
# # board
router.register(r"articles", article_views.ArticleViewSet)
router.register(r"comments", article_views.CommentViewSet)
# # assets
router.register(r"portfolio", portfolio_views.PortfolioViewSet)
router.register(r"userstocks", asset_views.UserStockViewSet)
router.register(r"userrealties", asset_views.UserRealtyViewSet)
router.register(r"usercash", asset_views.UserCashViewSet)
router.register(r"stockinfo", info_views.StockInfoViewSet)
router.register(r"stockprice", info_views.StockPriceViewSet)
router.register(r"exchangerate", info_views.ExchangeRateViewSet)
# router.register("get_expect_asset", scheuduler_views.get_expect_asset)
# The API URLs are now determined automatically by the router.
urlpatterns = [
path("", include(router.urls)),
path("expect_asset/", scheuduler_views.get_expect_asset),
]
| [
"mskk0805@gmail.com"
] | mskk0805@gmail.com |
6919b90ea3c9b0b50908c3b3b66b2e5fdae47b49 | e44126f00ec82826bf0d4abab7531644e70ff357 | /__init__.py | 5f124ecc5651387506a29fa1fbb4d488aacf0fba | [
"MIT"
] | permissive | nbalas/advent_of_code | 427ca0821f14eb222efa149af387f852ed3337dc | 7c67a07eccc6e07f56fc448e463c557c937a4aaa | refs/heads/master | 2021-07-11T07:09:54.890344 | 2020-12-20T14:20:23 | 2020-12-20T14:20:23 | 225,243,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21 | py | from readers import * | [
"natebalas@gmail.com"
] | natebalas@gmail.com |
2f0556aa96b46d2a145bbd3d79a3f868cc9adf17 | df88145b39b550dab8c1610c17d4435990ba0752 | /_collections/articles/obsidian_to_anki.py | f0e9cf476b5679cc0f9d1162b857a24c0869db23 | [
"MIT"
] | permissive | SubZeroX/SubZeroX.github.io | 5a233a771ecb99840e5bf1f3edf915febec749d1 | 1df9c43d538af7812e68ac07d7591f258c8c1619 | refs/heads/master | 2023-02-04T13:13:05.792582 | 2020-12-16T00:34:17 | 2020-12-16T00:34:17 | 321,810,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54,329 | py | """Script for adding cards to Anki from Obsidian."""
import re
import json
import urllib.request
import configparser
import os
import collections
import webbrowser
import markdown
import base64
import argparse
import html
import time
import socket
import subprocess
#try:
# import gooey
# GOOEY = False
#except ModuleNotFoundError:
# print("Gooey not installed, switching to cli...")
# GOOEY = False
GOOEY = False
MEDIA = dict()
ID_PREFIX = "ID: "
TAG_PREFIX = "Tags: "
TAG_SEP = " "
Note_and_id = collections.namedtuple('Note_and_id', ['note', 'id'])
NOTE_DICT_TEMPLATE = {
"deckName": "",
"modelName": "",
"fields": dict(),
"options": {
"allowDuplicate": False,
"duplicateScope": "deck"
},
"tags": ["Obsidian_to_Anki"],
# ^So that you can see what was added automatically.
"audio": list()
}
CONFIG_PATH = os.path.expanduser(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"obsidian_to_anki_config.ini"
)
)
CONFIG_DATA = dict()
md_parser = markdown.Markdown(
extensions=[
'fenced_code',
'footnotes',
'md_in_html',
'tables',
'nl2br',
'sane_lists'
]
)
ANKI_PORT = 8765
def write_safe(filename, contents):
"""
Write contents to filename while keeping a backup.
If write fails, a backup 'filename.bak' will still exist.
"""
with open(filename + ".tmp", "w", encoding='utf_8') as temp:
temp.write(contents)
os.rename(filename, filename + ".bak")
os.rename(filename + ".tmp", filename)
with open(filename, encoding='utf_8') as f:
success = (f.read() == contents)
if success:
os.remove(filename + ".bak")
def string_insert(string, position_inserts):
"""
Insert strings in position_inserts into string, at indices.
position_inserts will look like:
[(0, "hi"), (3, "hello"), (5, "beep")]
"""
offset = 0
position_inserts = sorted(list(position_inserts))
for position, insert_str in position_inserts:
string = "".join(
[
string[:position + offset],
insert_str,
string[position + offset:]
]
)
offset += len(insert_str)
return string
def file_encode(filepath):
"""Encode the file as base 64."""
with open(filepath, 'rb') as f:
return base64.b64encode(f.read()).decode('utf-8')
def spans(pattern, string):
"""Return a list of span-tuples for matches of pattern in string."""
return [match.span() for match in pattern.finditer(string)]
def overlap(span, spans):
"""Determine whether span overlaps with anything in spans."""
return any(
start <= span[0] < end or start < span[1] <= end
for start, end in spans
)
def findignore(pattern, string, ignore_spans):
"""Yield all matches for pattern in string not in ignore_spans."""
return (
match
for match in pattern.finditer(string)
if not overlap(match.span(), ignore_spans)
)
def wait_for_port(port, host='localhost', timeout=5.0):
"""Wait until a port starts accepting TCP connections.
Args:
port (int): Port number.
host (str): Host address on which the port should exist.
timeout (float): In seconds. How long to wait before raising errors.
Raises:
TimeoutError: The port isn't accepting connection after time specified
in `timeout`.
"""
start_time = time.perf_counter()
while True:
try:
with socket.create_connection((host, port), timeout=timeout):
break
except OSError as ex:
time.sleep(0.01)
if time.perf_counter() - start_time >= timeout:
raise TimeoutError(
'Waited too long for the port {} on host {} to'
'start accepting connections.'.format(port, host)
) from ex
def load_anki():
"""Attempt to load anki in the correct profile."""
try:
Config.load_config()
except Exception as e:
print("Error when loading config:", e)
print("Please open Anki before running script again.")
return False
if CONFIG_DATA["Path"] and CONFIG_DATA["Profile"]:
print("Anki Path and Anki Profile provided.")
print("Attempting to open Anki in selected profile...")
subprocess.Popen(
[CONFIG_DATA["Path"], "-p", CONFIG_DATA["Profile"]]
)
try:
wait_for_port(ANKI_PORT)
except TimeoutError:
print("Opened Anki, but can't connect! Is AnkiConnect working?")
return False
else:
print("Opened and connected to Anki successfully!")
return True
else:
print(
"Must provide both Anki Path and Anki Profile",
"in order to open Anki automatically"
)
def main():
"""Main functionality of script."""
if not os.path.exists(CONFIG_PATH):
Config.update_config()
App()
class AnkiConnect:
"""Namespace for AnkiConnect functions."""
def request(action, **params):
"""Format action and parameters into Ankiconnect style."""
return {'action': action, 'params': params, 'version': 6}
def invoke(action, **params):
"""Do the action with the specified parameters."""
requestJson = json.dumps(
AnkiConnect.request(action, **params)
).encode('utf-8')
response = json.load(urllib.request.urlopen(
urllib.request.Request('http://localhost:8765', requestJson)))
return AnkiConnect.parse(response)
def parse(response):
"""Parse the received response."""
if len(response) != 2:
raise Exception('response has an unexpected number of fields')
if 'error' not in response:
raise Exception('response is missing required error field')
if 'result' not in response:
raise Exception('response is missing required result field')
if response['error'] is not None:
raise Exception(response['error'])
return response['result']
class FormatConverter:
"""Converting Obsidian formatting to Anki formatting."""
OBS_INLINE_MATH_REGEXP = re.compile(
r"(?<!\$)\$(?=[\S])(?=[^$])[\s\S]*?\S\$"
)
OBS_DISPLAY_MATH_REGEXP = re.compile(r"\$\$[\s\S]*?\$\$")
ANKI_INLINE_START = r"\("
ANKI_INLINE_END = r"\)"
ANKI_DISPLAY_START = r"\["
ANKI_DISPLAY_END = r"\]"
ANKI_MATH_REGEXP = re.compile(r"(\\\[[\s\S]*?\\\])|(\\\([\s\S]*?\\\))")
MATH_REPLACE = "OBSTOANKIMATH"
IMAGE_REGEXP = re.compile(r'<img alt=".*?" src="(.*?)"')
SOUND_REGEXP = re.compile(r'\[sound:(.+)\]')
CLOZE_REGEXP = re.compile(
r'(?:(?<!{){(?:c?(\d+)[:|])?(?!{))((?:[^\n][\n]?)+?)(?:(?<!})}(?!}))'
)
URL_REGEXP = re.compile(r'https?://')
PARA_OPEN = "<p>"
PARA_CLOSE = "</p>"
CLOZE_UNSET_NUM = 1
@staticmethod
def inline_anki_repl(matchobject):
"""Get replacement string for Obsidian-formatted inline math."""
found_string = matchobject.group(0)
# Strip Obsidian formatting by removing first and last characters
found_string = found_string[1:-1]
# Add Anki formatting
result = FormatConverter.ANKI_INLINE_START + found_string
result += FormatConverter.ANKI_INLINE_END
return result
@staticmethod
def display_anki_repl(matchobject):
"""Get replacement string for Obsidian-formatted display math."""
found_string = matchobject.group(0)
# Strip Obsidian formatting by removing first two and last two chars
found_string = found_string[2:-2]
# Add Anki formatting
result = FormatConverter.ANKI_DISPLAY_START + found_string
result += FormatConverter.ANKI_DISPLAY_END
return result
@staticmethod
def obsidian_to_anki_math(note_text):
"""Convert Obsidian-formatted math to Anki-formatted math."""
return FormatConverter.OBS_INLINE_MATH_REGEXP.sub(
FormatConverter.inline_anki_repl,
FormatConverter.OBS_DISPLAY_MATH_REGEXP.sub(
FormatConverter.display_anki_repl, note_text
)
)
@staticmethod
def cloze_repl(match):
id, content = match.group(1), match.group(2)
if id is None:
result = "{{{{c{!s}::{}}}}}".format(
FormatConverter.CLOZE_UNSET_NUM,
content
)
FormatConverter.CLOZE_UNSET_NUM += 1
return result
else:
return "{{{{c{}::{}}}}}".format(id, content)
@staticmethod
def curly_to_cloze(text):
"""Change text in curly brackets to Anki-formatted cloze."""
text = FormatConverter.CLOZE_REGEXP.sub(
FormatConverter.cloze_repl,
text
)
FormatConverter.CLOZE_UNSET_NUM = 1
return text
@ staticmethod
def markdown_parse(text):
"""Apply markdown conversions to text."""
text = md_parser.reset().convert(text)
return text
@ staticmethod
def is_url(text):
"""Check whether text looks like a url."""
return bool(
FormatConverter.URL_REGEXP.match(text)
)
@ staticmethod
def get_images(html_text):
"""Get all the images that need to be added."""
for match in FormatConverter.IMAGE_REGEXP.finditer(html_text):
path = match.group(1)
print(path)
if FormatConverter.is_url(path):
continue # Skips over images web-hosted.
filename = os.path.basename(path)
if filename not in CONFIG_DATA["Added Media"].keys(
) and filename not in MEDIA:
MEDIA[filename] = file_encode(path)
# Adds the filename and data to media_names
@ staticmethod
def get_audio(html_text):
"""Get all the audio that needs to be added"""
for match in FormatConverter.SOUND_REGEXP.finditer(html_text):
path = match.group(1)
filename = os.path.basename(path)
if filename not in CONFIG_DATA["Added Media"].keys(
) and filename not in MEDIA:
MEDIA[filename] = file_encode(path)
# Adds the filename and data to media_names
@ staticmethod
def path_to_filename(matchobject):
"""Replace the src in matchobject appropriately."""
found_string, found_path = matchobject.group(0), matchobject.group(1)
if FormatConverter.is_url(found_path):
return found_string # So urls should not be altered.
found_string = found_string.replace(
found_path, os.path.basename(found_path)
)
return found_string
@ staticmethod
def fix_image_src(html_text):
"""Fix the src of the images so that it's relative to Anki."""
return FormatConverter.IMAGE_REGEXP.sub(
FormatConverter.path_to_filename,
html_text
)
@ staticmethod
def fix_audio_src(html_text):
"""Fix the audio filenames so that it's relative to Anki."""
return FormatConverter.SOUND_REGEXP.sub(
FormatConverter.path_to_filename,
html_text
)
@ staticmethod
def format(note_text, cloze=False):
"""Apply all format conversions to note_text."""
note_text = FormatConverter.obsidian_to_anki_math(note_text)
# Extract the parts that are anki math
math_matches = [
math_match.group(0)
for math_match in FormatConverter.ANKI_MATH_REGEXP.finditer(
note_text
)
]
# Replace them to be later added back, so they don't interfere
# with markdown parsing
note_text = FormatConverter.ANKI_MATH_REGEXP.sub(
FormatConverter.MATH_REPLACE, note_text
)
if cloze:
note_text = FormatConverter.curly_to_cloze(note_text)
note_text = FormatConverter.markdown_parse(note_text)
# Add back the parts that are anki math
for math_match in math_matches:
note_text = note_text.replace(
FormatConverter.MATH_REPLACE,
html.escape(math_match),
1
)
FormatConverter.get_images(note_text)
FormatConverter.get_audio(note_text)
note_text = FormatConverter.fix_image_src(note_text)
note_text = FormatConverter.fix_audio_src(note_text)
note_text = note_text.strip()
# Remove unnecessary paragraph tag
if note_text.startswith(
FormatConverter.PARA_OPEN
) and note_text.endswith(
FormatConverter.PARA_CLOSE
):
note_text = note_text[len(FormatConverter.PARA_OPEN):]
note_text = note_text[:-len(FormatConverter.PARA_CLOSE)]
return note_text
class Note:
"""Manages parsing notes into a dictionary formatted for AnkiConnect.
Input must be the note text.
Does NOT deal with finding the note in the file.
"""
ID_REGEXP = re.compile(
r"(?:<!--)?" + ID_PREFIX + r"(\d+)"
)
def __init__(self, note_text):
"""Set up useful variables."""
self.text = note_text
self.lines = self.text.splitlines()
self.current_field_num = 0
self.delete = False
if Note.ID_REGEXP.match(self.lines[-1]):
self.identifier = int(
Note.ID_REGEXP.match(self.lines.pop()).group(1)
)
# The above removes the identifier line, for convenience of parsing
else:
self.identifier = None
if not self.lines:
# This indicates a delete action.
self.delete = True
return
elif self.lines[-1].startswith(TAG_PREFIX):
self.tags = self.lines.pop()[len(TAG_PREFIX):].split(
TAG_SEP
)
else:
self.tags = list()
self.note_type = Note.note_subs[self.lines[0]]
self.subs = Note.field_subs[self.note_type]
self.field_names = list(self.subs)
@ property
def current_field(self):
"""Get the field to add text to."""
return self.field_names[self.current_field_num]
@ property
def current_sub(self):
"""Get the prefix substitution of the current field."""
return self.subs[self.current_field]
@ property
def next_field(self):
"""Attempt to get the next field to add text to."""
try:
return self.field_names[self.current_field_num + 1]
except IndexError:
return ""
@ property
def next_sub(self):
"""Attempt to get the substitution of the next field."""
try:
return self.subs[self.next_field]
except KeyError:
return ""
@ property
def fields(self):
"""Get the fields of the note into a dictionary."""
fields = dict.fromkeys(self.field_names, "")
for line in self.lines[1:]:
if self.next_sub and line.startswith(self.next_sub):
# This means we're entering a new field.
# So, we should format the text in the current field
self.current_field_num += 1
line = line[len(self.current_sub):]
fields[self.current_field] += line + "\n"
fields = {
key: FormatConverter.format(
value.strip(),
cloze=(
self.note_type in CONFIG_DATA["Clozes"]
and CONFIG_DATA["CurlyCloze"]
)
)
for key, value in fields.items()
}
return {key: value.strip() for key, value in fields.items()}
def parse(self, deck, url=None):
"""Get a properly formatted dictionary of the note."""
template = NOTE_DICT_TEMPLATE.copy()
if not self.delete:
template["modelName"] = self.note_type
template["fields"] = self.fields
if all([
CONFIG_DATA["Add file link"],
CONFIG_DATA["Vault"],
url
]):
for key in template["fields"]:
template["fields"][key] += " " + "".join([
'<a',
' href="{}">Obsidian</a>'.format(url)
])
break # So only does first field
template["tags"] = template["tags"] + self.tags
template["deckName"] = deck
return Note_and_id(note=template, id=self.identifier)
else:
return Note_and_id(note=False, id=self.identifier)
class InlineNote(Note):
ID_REGEXP = re.compile(r"(?:<!--)?" + ID_PREFIX + r"(\d+)")
TAG_REGEXP = re.compile(TAG_PREFIX + r"(.*)")
TYPE_REGEXP = re.compile(r"\[(.*?)\]") # So e.g. [Basic]
def __init__(self, note_text):
self.text = note_text.strip()
self.current_field_num = 0
self.delete = False
ID = InlineNote.ID_REGEXP.search(self.text)
if ID is not None:
self.identifier = int(ID.group(1))
self.text = self.text[:ID.start()] # Removes identifier
else:
self.identifier = None
if not self.text:
# This indicates a delete action
self.delete = True
return
TAGS = InlineNote.TAG_REGEXP.search(self.text)
if TAGS is not None:
self.tags = TAGS.group(1).split(TAG_SEP)
self.text = self.text[:TAGS.start()]
else:
self.tags = list()
TYPE = InlineNote.TYPE_REGEXP.search(self.text)
self.note_type = Note.note_subs[TYPE.group(1)]
self.text = self.text[TYPE.end():]
self.subs = Note.field_subs[self.note_type]
self.field_names = list(self.subs)
self.text = self.text.strip()
@ property
def fields(self):
"""Get the fields of the note into a dictionary."""
fields = dict.fromkeys(self.field_names, "")
while self.next_sub:
# So, we're expecting a new field
end = self.text.find(self.next_sub)
fields[self.current_field] += self.text[:end]
self.text = self.text[end + len(self.next_sub):]
self.current_field_num += 1
# For last field:
fields[self.current_field] += self.text
fields = {
key: FormatConverter.format(
value,
cloze=(
self.note_type in CONFIG_DATA["Clozes"]
and CONFIG_DATA["CurlyCloze"]
)
)
for key, value in fields.items()
}
return {key: value.strip() for key, value in fields.items()}
class RegexNote:
ID_REGEXP_STR = r"\n(?:<!--)?(?:" + ID_PREFIX + r"(\d+).*)"
TAG_REGEXP_STR = r"(" + TAG_PREFIX + r".*)"
def __init__(self, matchobject, note_type, tags=False, id=False):
self.match = matchobject
self.note_type = note_type
self.groups = list(self.match.groups())
self.group_num = len(self.groups)
if id:
# This means id is last group
self.identifier = int(self.groups.pop())
else:
self.identifier = None
if tags:
# Even if id were present, tags is now last group
self.tags = self.groups.pop()[len(TAG_PREFIX):].split(
TAG_SEP
)
else:
self.tags = list()
self.field_names = list(Note.field_subs[self.note_type])
@ property
def fields(self):
fields = dict.fromkeys(self.field_names, "")
for name, match in zip(self.field_names, self.groups):
if match:
fields[name] = match
fields = {
key: FormatConverter.format(
value,
cloze=(
self.note_type in CONFIG_DATA["Clozes"]
and CONFIG_DATA["CurlyCloze"]
)
)
for key, value in fields.items()
}
return {key: value.strip() for key, value in fields.items()}
def parse(self, deck, url=None):
"""Get a properly formatted dictionary of the note."""
template = NOTE_DICT_TEMPLATE.copy()
template["modelName"] = self.note_type
template["fields"] = self.fields
if all([
CONFIG_DATA["Add file link"],
CONFIG_DATA["Vault"],
url
]):
for key in template["fields"]:
template["fields"][key] += " " + "".join([
'<a',
' href="{}">Obsidian</a>'.format(url)
])
break # So only does first field
template["tags"] = template["tags"] + self.tags
template["deckName"] = deck
return Note_and_id(note=template, id=self.identifier)
class Config:
"""Deals with saving and loading the configuration file."""
def update_config():
"""Update config with new notes."""
print("Updating configuration file...")
config = configparser.ConfigParser()
config.optionxform = str
if os.path.exists(CONFIG_PATH):
print("Config file exists, reading...")
config.read(CONFIG_PATH, encoding='utf-8-sig')
# Setting up field substitutions
note_types = AnkiConnect.invoke("modelNames")
fields_request = [
AnkiConnect.request(
"modelFieldNames", modelName=note
)
for note in note_types
]
subs = {
note: {
field: field + ":"
for field in AnkiConnect.parse(fields)
}
for note, fields in zip(
note_types,
AnkiConnect.invoke(
"multi", actions=fields_request
)
)
}
for note, note_field_subs in subs.items():
config.setdefault(note, dict())
for field, sub in note_field_subs.items():
config[note].setdefault(field, sub)
# This means that, if there's already a substitution present,
# the 'default' substitution of field + ":" isn't added.
# Setting up Note Substitutions
config.setdefault("Note Substitutions", dict())
config.setdefault("Cloze Note Types", dict())
for note in note_types:
config["Note Substitutions"].setdefault(note, note)
config["Cloze Note Types"].setdefault(note, "False")
# Similar to above - if there's already a substitution present,
# it isn't overwritten
if "Cloze" in note_types:
config["Cloze Note Types"]["Cloze"] = "True"
# Setting up Syntax
config.setdefault("Syntax", dict())
config["Syntax"].setdefault(
"Begin Note", "START"
)
config["Syntax"].setdefault(
"End Note", "END"
)
config["Syntax"].setdefault(
"Begin Inline Note", "STARTI"
)
config["Syntax"].setdefault(
"End Inline Note", "ENDI"
)
config["Syntax"].setdefault(
"Target Deck Line", "TARGET DECK"
)
config["Syntax"].setdefault(
"File Tags Line", "FILE TAGS"
)
config["Syntax"].setdefault(
"Delete Regex Note Line", "DELETE"
)
config.setdefault("Obsidian", dict())
config["Obsidian"].setdefault("Vault name", "")
config["Obsidian"].setdefault("Add file link", "False")
config["DEFAULT"] = dict() # Removes DEFAULT if it's there.
config.setdefault("Defaults", dict())
config["Defaults"].setdefault(
"Tag", "Obsidian_to_Anki"
)
config["Defaults"].setdefault(
"Deck", "Default"
)
config["Defaults"].setdefault(
"CurlyCloze", "False"
)
config["Defaults"].setdefault(
"GUI", "True"
)
config["Defaults"].setdefault(
"Regex", "False"
)
config["Defaults"].setdefault(
"ID Comments", "True"
)
config["Defaults"].setdefault(
"Anki Path", ""
)
config["Defaults"].setdefault(
"Anki Profile", ""
)
# Setting up Custom Regexps
config.setdefault("Custom Regexps", dict())
for note in note_types:
config["Custom Regexps"].setdefault(note, "")
# Setting up media files
config.setdefault("Added Media", dict())
with open(CONFIG_PATH, "w", encoding='utf_8') as configfile:
config.write(configfile)
print("Configuration file updated!")
def load_config():
"""Load from an existing config file (assuming it exists)."""
print("Loading configuration file...")
config = configparser.ConfigParser()
config.optionxform = str # Allows for case sensitivity
config.read(CONFIG_PATH, encoding='utf-8-sig')
note_subs = config["Note Substitutions"]
Note.note_subs = {v: k for k, v in note_subs.items()}
Note.field_subs = {
note: dict(config[note]) for note in config
if note not in [
"Note Substitutions",
"Defaults",
"Syntax",
"Custom Regexps",
"Added Media",
"DEFAULT"
]
}
CONFIG_DATA["Clozes"] = [
type for type in config["Cloze Note Types"]
if config.getboolean("Cloze Note Types", type)
]
CONFIG_DATA["NOTE_PREFIX"] = re.escape(
config["Syntax"]["Begin Note"]
)
CONFIG_DATA["NOTE_SUFFIX"] = re.escape(
config["Syntax"]["End Note"]
)
CONFIG_DATA["INLINE_PREFIX"] = re.escape(
config["Syntax"]["Begin Inline Note"]
)
CONFIG_DATA["INLINE_SUFFIX"] = re.escape(
config["Syntax"]["End Inline Note"]
)
CONFIG_DATA["DECK_LINE"] = re.escape(
config["Syntax"]["Target Deck Line"]
)
CONFIG_DATA["TAG_LINE"] = re.escape(
config["Syntax"]["File Tags Line"]
)
CONFIG_DATA["Added Media"] = config["Added Media"]
RegexFile.EMPTY_REGEXP = re.compile(
re.escape(
config["Syntax"]["Delete Regex Note Line"]
) + RegexNote.ID_REGEXP_STR
)
NOTE_DICT_TEMPLATE["tags"] = [config["Defaults"]["Tag"]]
NOTE_DICT_TEMPLATE["deckName"] = config["Defaults"]["Deck"]
CONFIG_DATA["CurlyCloze"] = config.getboolean(
"Defaults", "CurlyCloze"
)
CONFIG_DATA["GUI"] = config.getboolean(
"Defaults", "GUI"
)
CONFIG_DATA["Regex"] = config.getboolean(
"Defaults", "Regex"
)
CONFIG_DATA["Comment"] = config.getboolean(
"Defaults", "ID Comments"
)
CONFIG_DATA["Path"] = config["Defaults"]["Anki Path"]
CONFIG_DATA["Profile"] = config["Defaults"]["Anki Profile"]
CONFIG_DATA["Vault"] = config["Obsidian"]["Vault name"]
CONFIG_DATA["Add file link"] = config.getboolean(
"Obsidian", "Add file link"
)
Config.config = config # Can access later if need be
print("Loaded successfully!")
class App:
"""Master class that manages the application."""
SUPPORTED_EXTS = [".md", ".txt"]
def __init__(self):
"""Execute the main functionality of the script."""
try:
Config.load_config()
except Exception as e:
print("Error:", e)
print("Attempting to fix config file...")
Config.update_config()
Config.load_config()
if CONFIG_DATA["GUI"] and GOOEY:
self.setup_gui_parser()
else:
self.setup_cli_parser()
args = self.parser.parse_args()
if CONFIG_DATA["GUI"] and GOOEY:
if args.directory:
args.path = args.directory
elif args.file:
args.path = args.file
else:
args.path = False
no_args = True
if args.update:
no_args = False
Config.update_config()
Config.load_config()
if args.mediaupdate:
no_args = False
CONFIG_DATA["Added Media"].clear()
self.gen_regexp()
if args.config:
no_args = False
webbrowser.open(CONFIG_PATH)
return
if args.path:
no_args = False
current = os.getcwd()
self.path = args.path
directories = list()
if os.path.isdir(self.path):
os.chdir(self.path)
if args.recurse:
directories = list()
for root, dirs, files in os.walk(os.getcwd()):
directories.append(
Directory(root, regex=args.regex)
)
for dir in dirs:
if dir.startswith("."):
dirs.remove(dir)
# So, ignore . folders
else:
directories = [
Directory(
os.getcwd(), regex=args.regex
)
]
os.chdir(current)
else:
directories = [
Directory(
current, regex=args.regex, onefile=self.path
)
]
requests = list()
print("Getting tag list")
requests.append(
AnkiConnect.request(
"getTags"
)
)
print("Adding media with these filenames...")
print(list(MEDIA.keys()))
requests.append(self.get_add_media())
print("Adding directory requests...")
for directory in directories:
requests.append(directory.requests_1())
result = AnkiConnect.invoke(
"multi",
actions=requests
)
for filename in MEDIA.keys():
CONFIG_DATA["Added Media"].setdefault(
filename, "True"
)
with open(CONFIG_PATH, "w", encoding='utf_8') as configfile:
Config.config.write(configfile)
tags = AnkiConnect.parse(result[0])
directory_responses = result[2:]
for directory, response in zip(directories, directory_responses):
directory.parse_requests_1(AnkiConnect.parse(response), tags)
requests = list()
for directory in directories:
requests.append(directory.requests_2())
AnkiConnect.invoke(
"multi",
actions=requests
)
if no_args:
self.parser.print_help()
def setup_parser_optionals(self):
"""Set up optional arguments for the parser."""
self.parser.add_argument(
"-c", "--config",
action="store_true",
dest="config",
help="Open up config file for editing."
)
self.parser.add_argument(
"-u", "--update",
action="store_true",
dest="update",
help="Update config file."
)
self.parser.add_argument(
"-r", "--regex",
action="store_true",
dest="regex",
help="Use custom regex syntax.",
default=CONFIG_DATA["Regex"]
)
self.parser.add_argument(
"-m", "--mediaupdate",
action="store_true",
dest="mediaupdate",
help="Force addition of media files."
)
self.parser.add_argument(
"-R", "--recurse",
action="store_true",
dest="recurse",
help="Recursively scan subfolders."
)
if GOOEY:
@ gooey.Gooey(use_cmd_args=True)
def setup_gui_parser(self):
"""Set up the GUI argument parser."""
self.parser = gooey.GooeyParser(
description="Add cards to Anki from a markdown or text file."
)
path_group = self.parser.add_mutually_exclusive_group(
required=False
)
path_group.add_argument(
"-f", "--file",
help="Choose a file to scan.",
dest="file",
widget='FileChooser'
)
path_group.add_argument(
"-d", "--dir",
help="Choose a directory to scan.",
dest="directory",
widget='DirChooser'
)
self.setup_parser_optionals()
def setup_cli_parser(self):
"""Setup the command-line argument parser."""
self.parser = argparse.ArgumentParser(
description="Add cards to Anki from a markdown or text file."
)
self.parser.add_argument(
"path",
default=False,
nargs="?",
help="Path to the file or directory you want to scan."
)
self.setup_parser_optionals()
def gen_regexp(self):
"""Generate the regular expressions used by the app."""
setattr(
App, "NOTE_REGEXP",
re.compile(
r"".join(
[
r"^",
CONFIG_DATA["NOTE_PREFIX"],
r"\n([\s\S]*?\n)",
CONFIG_DATA["NOTE_SUFFIX"],
r"\n?"
]
), flags=re.MULTILINE
)
)
setattr(
App, "DECK_REGEXP",
re.compile(
"".join(
[
r"^",
CONFIG_DATA["DECK_LINE"],
r"\n(.*)",
]
), flags=re.MULTILINE
)
)
setattr(
App, "EMPTY_REGEXP",
re.compile(
"".join(
[
r"^",
CONFIG_DATA["NOTE_PREFIX"],
r"\n(?:<!--)?",
ID_PREFIX,
r"[\s\S]*?\n",
CONFIG_DATA["NOTE_SUFFIX"]
]
), flags=re.MULTILINE
)
)
setattr(
App, "TAG_REGEXP",
re.compile(
r"^" + CONFIG_DATA["TAG_LINE"] + r"\n(.*)\n",
flags=re.MULTILINE
)
)
setattr(
App, "INLINE_REGEXP",
re.compile(
"".join(
[
CONFIG_DATA["INLINE_PREFIX"],
r"(.*?)",
CONFIG_DATA["INLINE_SUFFIX"]
]
)
)
)
setattr(
App, "INLINE_EMPTY_REGEXP",
re.compile(
"".join(
[
CONFIG_DATA["INLINE_PREFIX"],
r"\s+(?:<!--)?" + ID_PREFIX + r".*?",
CONFIG_DATA["INLINE_SUFFIX"]
]
)
)
)
setattr(
App, "VAULT_PATH_REGEXP",
re.compile(
CONFIG_DATA["Vault"] + r".*"
)
)
def get_add_media(self):
"""Get the AnkiConnect-formatted add_media request."""
return AnkiConnect.request(
"multi",
actions=[
AnkiConnect.request(
"storeMediaFile",
filename=key,
data=value
)
for key, value in MEDIA.items()
]
)
class File:
"""Class for performing script operations at the file-level."""
def __init__(self, filepath):
"""Perform initial file reading and attribute setting."""
self.filename = filepath
self.path = os.path.abspath(filepath)
if CONFIG_DATA["Vault"]:
self.url = "obsidian://vault/{}".format(
App.VAULT_PATH_REGEXP.search(self.path).group()
).replace("\\", "/")
else:
self.url = ""
with open(self.filename, encoding='utf_8') as f:
self.file = f.read()
self.original_file = self.file
self.file += "\n" # Adds empty line, useful for ID
self.target_deck = App.DECK_REGEXP.search(self.file)
if self.target_deck is not None:
self.target_deck = self.target_deck.group(1)
else:
self.target_deck = NOTE_DICT_TEMPLATE["deckName"]
print(
"Identified target deck for", self.filename,
"as", self.target_deck
)
self.global_tags = App.TAG_REGEXP.search(self.file)
if self.global_tags is not None:
self.global_tags = self.global_tags.group(1)
else:
self.global_tags = ""
def scan_file(self):
"""Sort notes from file into adding vs editing."""
print("Scanning file", self.filename, " for notes...")
self.notes_to_add = list()
self.id_indexes = list()
self.notes_to_edit = list()
self.notes_to_delete = list()
self.inline_notes_to_add = list()
self.inline_id_indexes = list()
for note_match in App.NOTE_REGEXP.finditer(self.file):
note, position = note_match.group(1), note_match.end(1)
parsed = Note(note).parse(self.target_deck, url=self.url)
if parsed.id is None:
# Need to make sure global_tags get added.
parsed.note["tags"] += self.global_tags.split(TAG_SEP)
self.notes_to_add.append(parsed.note)
self.id_indexes.append(position)
elif not parsed.note:
# This indicates a delete action
self.notes_to_delete.append(parsed.id)
else:
self.notes_to_edit.append(parsed)
for inline_note_match in App.INLINE_REGEXP.finditer(self.file):
note = inline_note_match.group(1)
position = inline_note_match.end(1)
parsed = InlineNote(note).parse(self.target_deck, url=self.url)
if parsed.id is None:
# Need to make sure global_tags get added.
parsed.note["tags"] += self.global_tags.split(TAG_SEP)
self.inline_notes_to_add.append(parsed.note)
self.inline_id_indexes.append(position)
elif not parsed.note:
# This indicates a delete action
self.notes_to_delete.append(parsed.id)
else:
self.notes_to_edit.append(parsed)
@ staticmethod
def id_to_str(id, inline=False, comment=False):
"""Get the string repr of id."""
result = ID_PREFIX + str(id)
if comment:
result = "<!--" + result + "-->"
if inline:
result += " "
else:
result += "\n"
return result
def write_ids(self):
"""Write the identifiers to self.file."""
print("Writing new note IDs to file,", self.filename, "...")
self.file = string_insert(
self.file, list(
zip(
self.id_indexes, [
self.id_to_str(id, comment=CONFIG_DATA["Comment"])
for id in self.note_ids[:len(self.notes_to_add)]
if id is not None
]
)
) + list(
zip(
self.inline_id_indexes, [
self.id_to_str(
id, inline=True,
comment=CONFIG_DATA["Comment"]
)
for id in self.note_ids[len(self.notes_to_add):]
if id is not None
]
)
)
)
def remove_empties(self):
"""Remove empty notes from self.file."""
self.file = App.EMPTY_REGEXP.sub(
"", self.file
)
self.file = App.INLINE_EMPTY_REGEXP.sub(
"", self.file
)
def write_file(self):
"""Write to the actual os file"""
self.file = self.file[:-1] # Remove newline added
if self.file != self.original_file:
write_safe(self.filename, self.file)
def get_add_notes(self):
"""Get the AnkiConnect-formatted request to add notes."""
return AnkiConnect.request(
"addNotes",
notes=self.notes_to_add + self.inline_notes_to_add
)
def get_delete_notes(self):
"""Get the AnkiConnect-formatted request to delete a note."""
return AnkiConnect.request(
"deleteNotes",
notes=self.notes_to_delete
)
def get_update_fields(self):
"""Get the AnkiConnect-formatted request to update fields."""
return AnkiConnect.request(
"multi",
actions=[
AnkiConnect.request(
"updateNoteFields", note={
"id": parsed.id,
"fields": parsed.note["fields"],
"audio": parsed.note["audio"]
}
)
for parsed in self.notes_to_edit
]
)
def get_note_info(self):
"""Get the AnkiConnect-formatted request to get note info."""
return AnkiConnect.request(
"notesInfo",
notes=[
parsed.id for parsed in self.notes_to_edit
]
)
def get_cards(self):
"""Get the card IDs for all notes that need to be edited."""
print("Getting card IDs")
self.cards = list()
for info in self.card_ids:
self.cards += info["cards"]
def get_change_decks(self):
"""Get the AnkiConnect-formatted request to change decks."""
return AnkiConnect.request(
"changeDeck",
cards=self.cards,
deck=self.target_deck
)
def get_clear_tags(self):
"""Get the AnkiConnect-formatted request to clear tags."""
return AnkiConnect.request(
"removeTags",
notes=[parsed.id for parsed in self.notes_to_edit],
tags=" ".join(self.tags)
)
def get_add_tags(self):
"""Get the AnkiConnect-formatted request to add tags."""
return AnkiConnect.request(
"multi",
actions=[
AnkiConnect.request(
"addTags",
notes=[parsed.id],
tags=" ".join(parsed.note["tags"]) + " " + self.global_tags
)
for parsed in self.notes_to_edit
]
)
class RegexFile(File):
def scan_file(self):
"""Sort notes from file into adding vs editing."""
print("Scanning file", self.filename, " for notes...")
self.ignore_spans = list()
# The above ensures that the script won't match a RegexNote inside
# a Note or InlineNote
self.notes_to_add = list()
self.id_indexes = list()
self.notes_to_edit = list()
self.notes_to_delete = list()
self.inline_notes_to_add = list() # To avoid overriding get_add_notes
self.ignore_spans += spans(App.NOTE_REGEXP, self.file)
self.ignore_spans += spans(App.INLINE_REGEXP, self.file)
for note_type, regexp in Config.config["Custom Regexps"].items():
if regexp:
self.search(note_type, regexp)
# Finally, scan for deleting notes
for match in RegexFile.EMPTY_REGEXP.finditer(self.file):
self.notes_to_delete.append(
int(match.group(1))
)
def search(self, note_type, regexp):
"""
Search the file for regex matches of this type,
ignoring matches inside ignore_spans,
and adding any matches to ignore_spans.
"""
regexp_tags_id = re.compile(
"".join(
[
regexp,
RegexNote.TAG_REGEXP_STR,
RegexNote.ID_REGEXP_STR
]
), flags=re.MULTILINE
)
regexp_id = re.compile(
regexp + RegexNote.ID_REGEXP_STR, flags=re.MULTILINE
)
regexp_tags = re.compile(
regexp + RegexNote.TAG_REGEXP_STR, flags=re.MULTILINE
)
regexp = re.compile(
regexp, flags=re.MULTILINE
)
for match in findignore(regexp_tags_id, self.file, self.ignore_spans):
# This note has id, so we update it
self.ignore_spans.append(match.span())
self.notes_to_edit.append(
RegexNote(match, note_type, tags=True, id=True).parse(
self.target_deck, url=self.url
)
)
for match in findignore(regexp_id, self.file, self.ignore_spans):
# This note has id, so we update it
self.ignore_spans.append(match.span())
self.notes_to_edit.append(
RegexNote(match, note_type, tags=False, id=True).parse(
self.target_deck, url=self.url
)
)
for match in findignore(regexp_tags, self.file, self.ignore_spans):
# This note has no id, so we update it
self.ignore_spans.append(match.span())
parsed = RegexNote(match, note_type, tags=True, id=False).parse(
self.target_deck, url=self.url
)
parsed.note["tags"] += self.global_tags.split(TAG_SEP)
self.notes_to_add.append(
parsed.note
)
self.id_indexes.append(match.end())
for match in findignore(regexp, self.file, self.ignore_spans):
# This note has no id, so we update it
self.ignore_spans.append(match.span())
parsed = RegexNote(match, note_type, tags=False, id=False).parse(
self.target_deck, url=self.url
)
parsed.note["tags"] += self.global_tags.split(TAG_SEP)
self.notes_to_add.append(
parsed.note
)
self.id_indexes.append(match.end())
def fix_newline_ids(self):
"""Removes double newline then ids from self.file."""
double_regexp = re.compile(
r"(\r\n|\r|\n){2}(?:<!--)?" + ID_PREFIX + r"\d+"
)
self.file = double_regexp.sub(
lambda x: x.group()[1:],
self.file
)
def write_ids(self):
"""Write the identifiers to self.file."""
print("Writing new note IDs to file,", self.filename, "...")
self.file = string_insert(
self.file, zip(
self.id_indexes, [
"\n" + File.id_to_str(id, comment=CONFIG_DATA["Comment"])
for id in self.note_ids
if id is not None
]
)
)
self.fix_newline_ids()
def remove_empties(self):
"""Remove empty notes from self.file."""
self.file = RegexFile.EMPTY_REGEXP.sub(
"", self.file
)
class Directory:
"""Class for managing a directory of files at a time."""
def __init__(self, abspath, regex=False, onefile=None):
"""Scan directory for files."""
self.path = abspath
self.parent = os.getcwd()
if regex:
self.file_class = RegexFile
else:
self.file_class = File
os.chdir(self.path)
if onefile:
# Hence, just one file to do
self.files = [self.file_class(onefile)]
else:
with os.scandir() as it:
self.files = sorted(
[
self.file_class(entry.path)
for entry in it
if entry.is_file() and os.path.splitext(
entry.path
)[1] in App.SUPPORTED_EXTS
], key=lambda file: [
int(part) if part.isdigit() else part.lower()
for part in re.split(r'(\d+)', file.filename)]
)
for file in self.files:
file.scan_file()
os.chdir(self.parent)
def requests_1(self):
"""Get the 1st HTTP request for this directory."""
print("Forming request 1 for directory", self.path)
requests = list()
print("Adding notes into Anki...")
requests.append(
AnkiConnect.request(
"multi",
actions=[
file.get_add_notes()
for file in self.files
]
)
)
print("Updating fields of existing notes...")
requests.append(
AnkiConnect.request(
"multi",
actions=[
file.get_update_fields()
for file in self.files
]
)
)
print("Getting card IDs of notes to be edited...")
requests.append(
AnkiConnect.request(
"multi",
actions=[
file.get_note_info()
for file in self.files
]
)
)
print("Removing empty notes...")
requests.append(
AnkiConnect.request(
"multi",
actions=[
file.get_delete_notes()
for file in self.files
]
)
)
return AnkiConnect.request(
"multi",
actions=requests
)
def parse_requests_1(self, requests_1_response, tags):
response = requests_1_response
notes_ids = AnkiConnect.parse(response[0])
cards_ids = AnkiConnect.parse(response[2])
for note_ids, file in zip(notes_ids, self.files):
file.note_ids = AnkiConnect.parse(note_ids)
for card_ids, file in zip(cards_ids, self.files):
file.card_ids = AnkiConnect.parse(card_ids)
for file in self.files:
file.tags = tags
os.chdir(self.path)
for file in self.files:
file.get_cards()
file.write_ids()
print("Removing empty notes for file", file.filename)
file.remove_empties()
file.write_file()
os.chdir(self.parent)
def requests_2(self):
"""Get 2nd big request."""
print("Forming request 2 for directory", self.path)
requests = list()
print("Moving cards to target deck...")
requests.append(
AnkiConnect.request(
"multi",
actions=[
file.get_change_decks()
for file in self.files
]
)
)
print("Replacing tags...")
requests.append(
AnkiConnect.request(
"multi",
actions=[
file.get_clear_tags()
for file in self.files
]
)
)
requests.append(
AnkiConnect.request(
"multi",
actions=[
file.get_add_tags()
for file in self.files
]
)
)
return AnkiConnect.request(
"multi",
actions=requests
)
if __name__ == "__main__":
print("Attempting to connect to Anki...")
try:
wait_for_port(ANKI_PORT)
except TimeoutError:
print("Couldn't connect to Anki, attempting to open Anki...")
if load_anki():
main()
else:
print("Connected!")
main()
| [
"gabrielrodriguesemp@gmail.com"
] | gabrielrodriguesemp@gmail.com |
fb22a6ab4f7c655cbe4d9bd2f5a03676fcb78ce5 | 3d6942911768864d0209c33139260ee73778cf9e | /4 Python Snippets/Time Series Forecasting/gradientboosted_snippets.py | 60207b34b3ce62fad53dd78a4d37d1a4b4cf2aaf | [] | no_license | ciancronin/DSND_code_notes | 0a8d659ce856bb40aa7cce267403c16a45c6af3b | eb19f29c87fe8beef4285d98c22b3f7d2ada8215 | refs/heads/master | 2021-06-26T04:56:13.920537 | 2020-10-30T00:12:56 | 2020-10-30T00:12:56 | 145,321,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,314 | py | """Snippets for GradientBoostingRegressor"""
import yfinance as yf
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_absolute_error
import datetime as dt
import math
from itertools import product
import numpy as np
import time
import utils as ut
import sys
def find_best_gbm_model(X_train, y_train, X_valid, y_valid, parameters):
"""Train a GradientBoostedRegressor Model."""
# Train based on parameters dict
grid_model_list = {}
for i, params in enumerate(parameters_flattened):
model = GradientBoostingRegressor(**params)
model.fit(X_train, y_train)
# Store the model
grid_model_list[i] = model
# Store train and valid evaluation results
model_result = []
for i, m in grid_model_list.items():
d = {}
d['id'] = i
pred = pd.Series(m.predict(X_train)).clip(lower=0)
d['train'] = mean_absolute_error(pred, y_train)
pred = pd.Series(m.predict(X_valid)).clip(lower=0)
d['valid'] = mean_absolute_error(pred, y_valid)
model_result.append(d)
model_result_df = pd.DataFrame(model_result).sort_values('valid')
return grid_model_list[model_result_df.iloc[0].name]
if __name__ == '__main__':
data = yf.download("GOOG AAPL", period='3y')
goog_adj_close = pd.DataFrame(data['Adj Close']['GOOG'].values,
columns=['amount'],
index=data['Adj Close']['GOOG'].index)
y_tr, y_val, y_tst, X_tr, X_val, X_tst = ut.preprocess_time_series_and_split(
goog_adj_close)
parameters = {
'learning_rate': [0.1, 0.05],
'max_depth': [4], # [4, 6, 8],
'n_estimators': [60], # [60, 80, 100, 120],
'subsample': [0.8],
'loss': ['ls'], # Least-squares
'criterion': ['mse']
}
parameters_flattened = [dict(zip(parameters, v)) for v in product(*parameters.values())]
print('{} parameter combinations to train'.format(len(parameters_flattened)))
model = find_best_gbm_model(X_tr, y_tr, X_val, y_val, parameters)
model_preds = model.predict(X_tst)
print(ut.smape(y_tst['amount'], model_preds))
fig, ax = plt.subplots(figsize=(20, 10))
ax.plot(y_tst.values, label='Actuals')
ax.plot(model_preds, label='Predictions')
ax.legend()
plt.show()
| [
"42519113+ciancronin@users.noreply.github.com"
] | 42519113+ciancronin@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.