id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
340193 | <filename>pizza_store/enums/__init__.py
from pizza_store.enums.permissions import CategoryPermission, ProductPermission
from pizza_store.enums.role import Role
__all__ = ["CategoryPermission", "ProductPermission", "Role"]
| StarcoderdataPython |
6425483 | <filename>course1/week2/quiz2/PredictingHousePrices.py
# coding: utf-8
# #Fire up graphlab create
# In[35]:
import graphlab
# #Load some house sales data
#
# Dataset is from house sales in King County, the region where the city of Seattle, WA is located.
# In[36]:
sales = graphlab.SFrame('home_data.gl/')
# In[37]:
sales
# #Exploring the data for housing sales
# The house price is correlated with the number of square feet of living space.
# In[38]:
graphlab.canvas.set_target('ipynb')
sales.show(view="Scatter Plot", x="sqft_living", y="price")
# #Create a simple regression model of sqft_living to price
# Split data into training and testing.
# We use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let GraphLab Create pick a random seed for you).
# In[39]:
train_data,test_data = sales.random_split(.8,seed=0)
# ##Build the regression model using only sqft_living as a feature
# In[40]:
sqft_model = graphlab.linear_regression.create(train_data, target='price', features=['sqft_living'])
# #Evaluate the simple model
# In[41]:
print test_data['price'].mean()
# In[42]:
print sqft_model.evaluate(test_data)
# RMSE of about \$255,170!
# #Let's show what our predictions look like
# Matplotlib is a Python plotting library that is also useful for plotting. You can install it with:
#
# 'pip install matplotlib'
# In[43]:
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
# In[44]:
plt.plot(test_data['sqft_living'],test_data['price'],'.',
test_data['sqft_living'],sqft_model.predict(test_data),'-')
# Above: blue dots are original data, green line is the prediction from the simple regression.
#
# Below: we can view the learned regression coefficients.
# In[45]:
sqft_model.get('coefficients')
# #Explore other features in the data
#
# To build a more elaborate model, we will explore using more features.
# In[46]:
my_features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode']
# In[47]:
sales[my_features].show()
# In[48]:
sales.show(view='BoxWhisker Plot', x='zipcode', y='price')
# Pull the bar at the bottom to view more of the data.
#
# 98039 is the most expensive zip code.
# #Build a regression model with more features
# In[49]:
my_features_model = graphlab.linear_regression.create(train_data,target='price',features=my_features)
# In[50]:
print my_features
# ##Comparing the results of the simple model with adding more features
# In[51]:
print sqft_model.evaluate(test_data)
print my_features_model.evaluate(test_data)
# The RMSE goes down from \$255,170 to \$179,508 with more features.
# #Apply learned models to predict prices of 3 houses
# The first house we will use is considered an "average" house in Seattle.
# In[52]:
house1 = sales[sales['id']=='5309101200']
# In[53]:
house1
# <img src="house-5309101200.jpg">
# In[54]:
print house1['price']
# In[55]:
print sqft_model.predict(house1)
# In[56]:
print my_features_model.predict(house1)
# In this case, the model with more features provides a worse prediction than the simpler model with only 1 feature. However, on average, the model with more features is better.
# ##Prediction for a second, fancier house
#
# We will now examine the predictions for a fancier house.
# In[57]:
house2 = sales[sales['id']=='1925069082']
# In[58]:
house2
# <img src="house-1925069082.jpg">
# In[59]:
print sqft_model.predict(house2)
# In[60]:
print my_features_model.predict(house2)
# In this case, the model with more features provides a better prediction. This behavior is expected here, because this house is more differentiated by features that go beyond its square feet of living space, especially the fact that it's a waterfront house.
# ##Last house, super fancy
#
# Our last house is a very large one owned by a famous Seattleite.
# In[61]:
bill_gates = {'bedrooms':[8],
'bathrooms':[25],
'sqft_living':[50000],
'sqft_lot':[225000],
'floors':[4],
'zipcode':['98039'],
'condition':[10],
'grade':[10],
'waterfront':[1],
'view':[4],
'sqft_above':[37500],
'sqft_basement':[12500],
'yr_built':[1994],
'yr_renovated':[2010],
'lat':[47.627606],
'long':[-122.242054],
'sqft_living15':[5000],
'sqft_lot15':[40000]}
# <img src="house-bill-gates.jpg">
# In[62]:
print my_features_model.predict(graphlab.SFrame(bill_gates))
# The model predicts a price of over $13M for this house! But we expect the house to cost much more. (There are very few samples in the dataset of houses that are this fancy, so we don't expect the model to capture a perfect prediction here.)
# In[63]:
house_zip_code = sales[sales["zipcode"] == "98039"]
# In[64]:
house_zip_code
# In[65]:
house_zip_code['price'].mean()
# In[66]:
house_zip_code_range = house_zip_code[house_zip_code.apply(lambda x: x['sqft_living'] > 2000.0 and x['sqft_living'] <= 4000.0)]
# In[67]:
house_zip_code_range.head()
# In[68]:
house_zip_code_range.num_rows()
# In[69]:
house_zip_code.num_rows()
# In[70]:
advanced_features = [
'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode',
'condition', # condition of house
'grade', # measure of quality of construction
'waterfront', # waterfront property
'view', # type of view
'sqft_above', # square feet above ground
'sqft_basement', # square feet in basement
'yr_built', # the year built
'yr_renovated', # the year renovated
'lat', 'long', # the lat-long of the parcel
'sqft_living15', # average sq.ft. of 15 nearest neighbors
'sqft_lot15', # average lot size of 15 nearest neighbors
]
# In[71]:
advanced_features_model = graphlab.linear_regression.create(train_data, target='price', features=advanced_features)
# In[72]:
print advanced_features_model.evaluate(test_data)
# In[73]:
advanced_features_model.evaluate(test_data)['rmse'] - my_features_model.evaluate(test_data)['rmse']
# In[ ]:
# In[ ]:
| StarcoderdataPython |
3342306 | import csv
import logging
import json
import math
import random
import re
import time
import urllib.request
from pathlib import Path
import sys
import pandas as pd
import get_edgar.common.my_csv as mc
logger = logging.getLogger(__name__)
EDGAR_PREFIX = "https://www.sec.gov/Archives/"
SEC_PREFIX = "https://www.sec.gov"
## Download index
# Generate the output csv paths for the sample year
def dl_index(folder,start_year,end_year,form_types,prefix,ciks=None):
""" Download index to csvs according
to the start year & end year
Arguments:
folder {Path} -- [the Path for the folder to store index csvs]
start_year {int} -- [the start year of the sample period]
end_year {int} -- [the end year of the sample period]
form_types {string} -- [all the form types need to download index]
prefix {string} -- [prefix of the output index csv names]
ciks {Path or tuple or set} -- [csv file containing ciks needed, if applicable]
Returns:
[list of Paths] -- [list of Paths for all the index csvs during the sample period]
"""
years = list(range(start_year, end_year+1))
if folder.exists() == False:
folder.mkdir()
cik_need = input_cik(ciks=ciks)
index_csvs = []
for form_type in form_types:
for year in years:
index_csv = folder / f'index_{prefix}_{form_type}_{year}.csv'
get_index_master(year,form_type,index_csv,cik_filter=cik_need)
index_csvs.append(index_csv)
return index_csvs
def input_cik(ciks=None):
if ciks is not None:
if type(ciks) in (tuple,set):
return ciks
else:
return mc.extract_obs(ciks,'CIK')
else:
return None
# Generate index csv for each year
def get_index_master(year, form_type, out_csv,cik_filter=None):
""" Get index file for a form type during the specified years.
year -> the year to download
form_type -> the name of the form type required, case sensitive
Output:
csv file for required index
"""
if out_csv.exists() == False:
urls = index_url(year)
with open(out_csv,'w', newline='') as out:
writer = csv.writer(out)
labels = ['cik', 'conm', 'form_type', 'filing_date','txt_path', 'html_index']
writer.writerow(labels)
for url in urls:
try:
master = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
logger.error(f'{url} does not exist')
break
lines = str(master, "latin-1").splitlines()
for line in lines[11:]:# skip header, first 11 lines for master.idx
row = append_html_index(line)
if form_type_filter(row, form_type):
if cik_filter is not None:
if row[0] in cik_filter:
writer.writerow(row)
else:
writer.writerow(row)
logger.info(f"{year} {form_type} downloaded and wrote to csv")
logger.info(f'{out_csv} created')
else:
logger.info(f'{out_csv} already exists')
def index_url(year):
""" Generate url of the index file for future downloading.
year - > the year to download
Returns:
url link of the index file
"""
quarters = ['QTR1', 'QTR2', 'QTR3', 'QTR4']
return [f'https://www.sec.gov/Archives/edgar/full-index/{year}/{q}/master.idx' for q in quarters]
def append_html_index(line):
""" Separate a line in an index file and Generate link of the index webpage.
line - > a line in an index file
Returns:
a list of chunks in a line of an index file, including the index webpage
"""
chunks = line.split("|")
chunks[-1] = EDGAR_PREFIX + chunks[-1]
chunks.append(chunks[-1].replace(".txt", "-index.html"))
return chunks
def form_type_filter(chunks, form_type):
""" Find a specific form type in the index file.
chunks - > a seprated line in an index file
form_type - > the name of the form type required, case sensitive
Returns:
True if the line represents a form that fits form type required
False if the line does not
"""
try:
norm_type = re.compile(r'[^\w]')
type_t = re.sub(norm_type,'',chunks[2].strip().lower())
type_m = re.sub(norm_type,'',form_type.lower())
if type_m == type_t:
return True
else:
return False
except:
logger.error('form type need to be a string')
return False
def evt_filter(csv_index,evt_csv,evtdate,mperiods):
"""Keep filings for the specific period after a event
Arguments:
csv_index {Path} -- The Path for the csv file containing all filings
evt_csv {Path} -- The Path for the csv file containing the event dates
evtdate {str} -- The variable name of the event date
mperiods {int} -- The number of months after the event dates
Returns:
Path -- The Path for the resulting csv file
"""
all_index = pd.read_csv(csv_index,parse_dates=['filing_date'])
all_index['cik'] = all_index['cik'].apply(str)
evt = pd.read_csv(evt_csv,parse_dates=[evtdate])
evt['post_evt'] = evt[evtdate] + pd.DateOffset(months=mperiods)
evt['pre_evt'] = evt[evtdate] - pd.DateOffset(days=10)
while True:
try:
combined = pd.merge(all_index,evt,left_on='cik',right_on='CIK',how='left')
break
except ValueError:
evt['CIK'] = evt['CIK'].apply(str)
filt = (combined['filing_date'] >= combined['pre_evt']) & \
(combined['filing_date'] <= combined['post_evt'])
results = combined.loc[filt].drop(['CIK',evtdate,'post_evt','pre_evt'],axis=1)
results.drop_duplicates(keep='first',inplace=True)
csv_out = csv_index.resolve().parent / f'{csv_index.name[:-4]}_{mperiods}m.csv'
results.to_csv(csv_out,index=False)
return csv_out
| StarcoderdataPython |
4925683 | from src.api_server import APIServer
import threading
import time
#The Scheduler is a control loop that checks for any pods that have been created
#but not yet deployed, found in the etcd pendingPodList.
#It transfers Pod objects from the pendingPodList to the runningPodList and creates an EndPoint object to store in the etcd EndPoint list
#If no WorkerNode is available that can take the pod, it remains in the pendingPodList
class Scheduler(threading.Thread):
def __init__(self, APISERVER, LOOPTIME):
self.apiServer = APISERVER
self.running = True
self.time = LOOPTIME
def __call__(self):
print("Scheduler start")
while self.running:
newPending = []
with self.apiServer.etcdLock:
for pod in self.apiServer.etcd.pendingPodList:
for worker in self.apiServer.etcd.nodeList:
if worker.status == "UP":
if worker.available_cpu >= pod.assigned_cpu:
pod.status = "RUNNING"
#print("Assigning Pod "+ pod.podName+" to Node "+worker.label)
worker.available_cpu -= pod.assigned_cpu
self.apiServer.CreateEndPoint(pod, worker)
self.apiServer.etcd.runningPodList.append(pod)
break
if pod.status == "PENDING":
newPending.append(pod)
self.apiServer.etcd.pendingPodList = newPending
time.sleep(self.time)
print("SchedShutdown")
| StarcoderdataPython |
71344 | <reponame>lsteffenel/dask-tutorial
## verbose version
delayed_read_csv = delayed(pd.read_csv)
a = delayed_read_csv(filenames[0])
b = delayed_read_csv(filenames[1])
c = delayed_read_csv(filenames[2])
delayed_len = delayed(len)
na = delayed_len(a)
nb = delayed_len(b)
nc = delayed_len(c)
delayed_sum = delayed(sum)
total = delayed_sum([na, nb, nc])
%time print(total.compute())
## concise version
csvs = [delayed(pd.read_csv)(fn) for fn in filenames]
lens = [delayed(len)(csv) for csv in csvs]
total = delayed(sum)(lens)
%time print(total.compute()) | StarcoderdataPython |
17378 | #!/usr/bin/python
#-*-coding:utf-8-*-
import json
import sys
import time
# TBD: auto discovery
# data_path = "/proc/fs/lustre/llite/nvmefs-ffff883f8a4f2800/stats"
data_path = "/proc/fs/lustre/lmv/shnvme3-clilmv-ffff8859d3e2d000/md_stats"
# use a dic1/dic2 to hold sampling data
def load_data(dic):
# Open file
fileHandler = open(data_path, "r")
# Get list of all lines in file
listOfLines = fileHandler.readlines()
# Close file
fileHandler.close()
# Iterate over the lines
for line in listOfLines:
words = line.split()
if(len(words) < 2):
println("got error line, to skip")
continue
dic[words[0]] = float(words[1])
# print(dic)
# put "next - prev" into delta
def calc_delta(prev, next, delta):
for key in prev:
delta[key] = next[key] - prev[key]
# print a dictionary in the indented json format
def print_dict(dic):
print(json.dumps(dic, indent=2, sort_keys=True, ensure_ascii=False))
# calculate iops for each category except snapshot_time, all divided by snapshot_time
def calc_iops_from_delta(delta):
# in case of snapshot_time error, skip
if (delta['snapshot_time'] < 0.000001):
print("error: time gap too small")
return
for key in delta:
if ('snapshot_time' != key):
delta[key] = int(delta[key]/delta['snapshot_time'])
if __name__ == '__main__':
# dic1/dic2 are used to load prev/next kernel data interchangably
# calc delta by doing: next - prev
# calc iops by doing: delta/time_consumption
dic1 = {}
dic2 = {}
delta = {}
load_data(dic1)
prev = 1
# load_data(dic2)
# calc_delta(dic1, dic2, delta)
# calc_iops_from_delta(delta)
# print_dict(delta)
# dic1['name'] = 'anhua'
# print_dict(dic1)
# enter loop
while True:
time.sleep(2) # TBD: configurable
if prev == 1:
load_data(dic2)
prev = 2
calc_delta(dic1, dic2, delta)
else:
load_data(dic1)
prev = 1
calc_delta(dic2, dic1, delta)
calc_iops_from_delta(delta)
print_dict(delta)
| StarcoderdataPython |
11267071 | # -----------------------------------------------------------------------------
# pytermor [ANSI formatted terminal output toolset]
# (C) 2022 <NAME> <<EMAIL>>
# -----------------------------------------------------------------------------
import unittest
from datetime import timedelta
from pytermor import format_time_delta
from tests import verb_print_info, verb_print_header, verb_print_subtests
class TestTimeDelta(unittest.TestCase):
expected_format_max_len = 10
expected_format_dataset = [
['OVERFLOW', timedelta(days=-700000)],
['-2 years', timedelta(days=-1000)],
['-10 months', timedelta(days=-300)],
['-3 months', timedelta(days=-100)],
['-9d 23h', timedelta(days=-9, hours=-23)],
['-5d 0h', timedelta(days=-5)],
['-13h 30min', timedelta(days=-1, hours=10, minutes=30)],
['-45 mins', timedelta(hours=-1, minutes=15)],
['-5 mins', timedelta(minutes=-5)],
['-2 secs', timedelta(seconds=-2.01)],
['-2 secs', timedelta(seconds=-2)],
['-1 sec', timedelta(seconds=-2, microseconds=1)],
['-1 sec', timedelta(seconds=-1.9)],
['-1 sec', timedelta(seconds=-1.1)],
['-1 sec', timedelta(seconds=-1.0)],
['~0 secs', timedelta(seconds=-0.5)],
['~0 secs', timedelta(milliseconds=-50)],
['~0 secs', timedelta(microseconds=-100)],
['~0 secs', timedelta(microseconds=-1)],
['0 secs', timedelta()],
['0 secs', timedelta(microseconds=500)],
['<1 sec', timedelta(milliseconds=25)],
['<1 sec', timedelta(seconds=0.1)],
['<1 sec', timedelta(seconds=0.9)],
['1 sec', timedelta(seconds=1)],
['1 sec', timedelta(seconds=1.0)],
['1 sec', timedelta(seconds=1.1)],
['1 sec', timedelta(seconds=1.9)],
['1 sec', timedelta(seconds=2, microseconds=-1)],
['2 secs', timedelta(seconds=2)],
['2 secs', timedelta(seconds=2.0)],
['2 secs', timedelta(seconds=2.5)],
['10 secs', timedelta(seconds=10)],
['1 min', timedelta(minutes=1)],
['5 mins', timedelta(minutes=5)],
['15 mins', timedelta(minutes=15)],
['45 mins', timedelta(minutes=45)],
['1h 30min', timedelta(hours=1, minutes=30)],
['4h 15min', timedelta(hours=4, minutes=15)],
['8h 59min', timedelta(hours=8, minutes=59, seconds=59)],
['12h 30min', timedelta(hours=12, minutes=30)],
['18h 45min', timedelta(hours=18, minutes=45)],
['23h 50min', timedelta(hours=23, minutes=50)],
['1d 0h', timedelta(days=1)],
['3d 4h', timedelta(days=3, hours=4)],
['5d 22h', timedelta(days=5, hours=22, minutes=51)],
['6d 23h', timedelta(days=7, minutes=-1)],
['9d 0h', timedelta(days=9)],
['12 days', timedelta(days=12, hours=18)],
['16 days', timedelta(days=16, hours=2)],
['1 month', timedelta(days=30)],
['1 month', timedelta(days=55)],
['2 months', timedelta(days=70)],
['2 months', timedelta(days=80)],
['6 months', timedelta(days=200)],
['11 months', timedelta(days=350)],
['1 year', timedelta(days=390)],
['2 years', timedelta(days=810)],
['27 years', timedelta(days=10000)],
['277 years', timedelta(days=100000)],
['OVERFLOW', timedelta(days=400000)],
]
def test_output_has_expected_format(self):
verb_print_info()
for idx, (expected_output, input_arg) in enumerate(self.expected_format_dataset):
subtest_msg = f'tdelta/match #{idx}: "{input_arg}" -> "{expected_output}"'
with self.subTest(msg=subtest_msg):
actual_output = format_time_delta(input_arg.total_seconds(), self.expected_format_max_len)
verb_print_info(subtest_msg + f' => "{actual_output}"')
self.assertEqual(expected_output, actual_output)
verb_print_subtests(len(self.expected_format_dataset))
""" ----------------------------------------------------------------------------------------------------------- """
req_len_expected_len_list = [3, 4, 6, 10, 9, 1000]
req_len_input_delta_list = [el[1] for el in expected_format_dataset]
def test_output_fits_in_required_length(self):
verb_print_info()
for idx, expected_max_len in enumerate(self.req_len_expected_len_list):
verb_print_header(f'expected_max_len={expected_max_len:d}:')
for input_idx, input_td in enumerate(self.req_len_input_delta_list):
subtest_msg = f'tdelta/len #{input_idx}: "{input_td}" -> (len {expected_max_len})'
with self.subTest(msg=subtest_msg):
actual_output = format_time_delta(input_td.total_seconds(), expected_max_len)
verb_print_info(subtest_msg + f' => (len {len(actual_output)}) "{actual_output}"')
self.assertGreaterEqual(expected_max_len,
len(actual_output),
f'Actual output ("{actual_output}") exceeds maximum')
""" ----------------------------------------------------------------------------------------------------------- """
invalid_len_list = [-5, 0, 1, 2]
def test_invalid_max_length_fails(self):
for invalid_max_len in self.invalid_len_list:
with self.subTest(msg=f'invalid max length {invalid_max_len}'):
self.assertRaises(ValueError, lambda: format_time_delta(100, invalid_max_len))
| StarcoderdataPython |
11311046 | import sys
class ImageGen:
"""
Handles the construction of the ASCII art
rectangles that represent what software,
styles and influences should be used
for the next project.
"""
def __init__(self, typeface='#'):
self.typeface = typeface
def software(self, ident=''):
images = {
'bambootracker': [
[(1, 2), (1, 3), (1, 4)],
[(2, 2), (2, 5)],
[(3, 2), (3, 3), (3, 4), (3, 5), (3, 6), (3, 8), (3, 9), (3, 10), (3, 11), (3, 12)],
[(4, 2), (4, 6), (4, 10)],
[(5, 2), (5, 3), (5, 4), (5, 5), (5, 10)]
],
'famitracker': [
[(1, 2), (1, 3), (1, 4), (1, 5), (1, 6)],
[(2, 2)],
[(3, 2), (3, 3), (3, 4), (3, 5), (3, 6), (3, 8), (3, 9), (3, 10), (3, 11), (3, 12)],
[(4, 2), (4, 10)],
[(5, 2), (5, 10)]
],
'lmms': [
[(1, 2)],
[(2, 2)],
[(3, 2), (3, 8), (3, 9), (3, 10), (3, 11), (3, 12)],
[(4, 2), (4, 8), (4, 10), (4, 12)],
[(5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 8), (5, 12)]
],
'sunvox': [
[(1, 3), (1, 4), (1, 5), (1, 6)],
[(2, 2)],
[(3, 3), (3, 4), (3, 5), (3, 8), (3, 12)],
[(4, 6), (4, 9), (4, 11)],
[(5, 2), (5, 3), (5, 4), (5, 5), (5, 10)]
]
}
image_data = []
try:
image_data = images[ident]
except IndexError as ex:
sys.stdout.write('Invalid software type passed to image generator.\n')
sys.exit()
return image_data
def extension(self, ident=''):
images = {
'2a03': [
[(1, 3), (1, 4), (1, 5)],
[(2, 2), (2, 6)],
[(3, 5), (3, 8), (3, 10)],
[(4, 3), (4, 9), (4, 10), (4, 11)],
[(5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 8), (5, 12)]
],
'fds': [],
'vrc6': [],
'vrc7': [],
'n163': []
}
image_data = []
try:
image_data = images[ident]
except IndexError as ex:
sys.stdout.write('Invalid FamiTracker extension type passed to image generator.\n')
sys.exit()
return image_data
def getBase(self):
return [
['@', '=', '=', '=', '=', '=', '=', '=', '=', '=', '=', '=', '=', '=', '@'],
['|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '|'],
['|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '|'],
['|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '|'],
['|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '|'],
['|', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '|'],
['@', '=', '=', '=', '=', '=', '=', '=', '=', '=', '=', '=', '=', '=', '@']
]
def getTypeface(self):
return self.typeface | StarcoderdataPython |
9785014 | # coding: utf-8
# (C) Copyright IBM Corp. 2019, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
IBM Watson™ Discovery for IBM Cloud Pak for Data is a cognitive search and content
analytics engine that you can add to applications to identify patterns, trends and
actionable insights to drive better decision-making. Securely unify structured and
unstructured data with pre-enriched content, and use a simplified query language to
eliminate the need for manual filtering of results.
"""
import json
from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator
from .common import get_sdk_headers
from datetime import datetime
from enum import Enum
from ibm_cloud_sdk_core import BaseService
from ibm_cloud_sdk_core import DetailedResponse
from ibm_cloud_sdk_core import datetime_to_string, string_to_datetime
from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment
from os.path import basename
from typing import BinaryIO
from typing import Dict
from typing import List
import sys
##############################################################################
# Service
##############################################################################
class DiscoveryV2(BaseService):
"""The Discovery V2 service."""
DEFAULT_SERVICE_URL = None
DEFAULT_SERVICE_NAME = 'discovery'
def __init__(
self,
version: str,
authenticator: Authenticator = None,
service_name: str = DEFAULT_SERVICE_NAME,
) -> None:
"""
Construct a new client for the Discovery service.
:param str version: The API version date to use with the service, in
"YYYY-MM-DD" format. Whenever the API is changed in a backwards
incompatible way, a new minor version of the API is released.
The service uses the API version for the date you specify, or
the most recent version before that date. Note that you should
not programmatically specify the current date at runtime, in
case the API has been updated since your application's release.
Instead, specify a version date that is compatible with your
application, and don't change it until your application is
ready for a later version.
:param Authenticator authenticator: The authenticator specifies the authentication mechanism.
Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md
about initializing the authenticator of your choice.
"""
if not authenticator:
authenticator = get_authenticator_from_environment(service_name)
BaseService.__init__(self,
service_url=self.DEFAULT_SERVICE_URL,
authenticator=authenticator,
disable_ssl_verification=False)
self.version = version
self.configure_service(service_name)
#########################
# Collections
#########################
def list_collections(self, project_id: str, **kwargs) -> 'DetailedResponse':
"""
List collections.
Lists existing collections for the specified project.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='list_collections')
headers.update(sdk_headers)
params = {'version': self.version}
url = '/v2/projects/{0}/collections'.format(
*self._encode_path_vars(project_id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
#########################
# Queries
#########################
def query(self,
project_id: str,
*,
collection_ids: List[str] = None,
filter: str = None,
query: str = None,
natural_language_query: str = None,
aggregation: str = None,
count: int = None,
return_: List[str] = None,
offset: int = None,
sort: str = None,
highlight: bool = None,
spelling_suggestions: bool = None,
table_results: 'QueryLargeTableResults' = None,
suggested_refinements: 'QueryLargeSuggestedRefinements' = None,
passages: 'QueryLargePassages' = None,
**kwargs) -> 'DetailedResponse':
"""
Query a project.
By using this method, you can construct queries. For details, see the [Discovery
documentation](https://cloud.ibm.com/docs/discovery-data?topic=discovery-data-query-concepts).
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param List[str] collection_ids: (optional) A comma-separated list of
collection IDs to be queried against.
:param str filter: (optional) A cacheable query that excludes documents
that don't mention the query content. Filter searches are better for
metadata-type searches and for assessing the concepts in the data set.
:param str query: (optional) A query search returns all documents in your
data set with full enrichments and full text, but with the most relevant
documents listed first. Use a query search when you want to find the most
relevant search results.
:param str natural_language_query: (optional) A natural language query that
returns relevant documents by utilizing training data and natural language
understanding.
:param str aggregation: (optional) An aggregation search that returns an
exact answer by combining query search with filters. Useful for
applications to build lists, tables, and time series. For a full list of
possible aggregations, see the Query reference.
:param int count: (optional) Number of results to return.
:param List[str] return_: (optional) A list of the fields in the document
hierarchy to return. If this parameter not specified, then all top-level
fields are returned.
:param int offset: (optional) The number of query results to skip at the
beginning. For example, if the total number of results that are returned is
10 and the offset is 8, it returns the last two results.
:param str sort: (optional) A comma-separated list of fields in the
document to sort on. You can optionally specify a sort direction by
prefixing the field with `-` for descending or `+` for ascending. Ascending
is the default sort direction if no prefix is specified. This parameter
cannot be used in the same query as the **bias** parameter.
:param bool highlight: (optional) When `true`, a highlight field is
returned for each result which contains the fields which match the query
with `<em></em>` tags around the matching query terms.
:param bool spelling_suggestions: (optional) When `true` and the
**natural_language_query** parameter is used, the
**natural_language_query** parameter is spell checked. The most likely
correction is returned in the **suggested_query** field of the response (if
one exists).
:param QueryLargeTableResults table_results: (optional) Configuration for
table retrieval.
:param QueryLargeSuggestedRefinements suggested_refinements: (optional)
Configuration for suggested refinements.
:param QueryLargePassages passages: (optional) Configuration for passage
retrieval.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
if table_results is not None:
table_results = self._convert_model(table_results)
if suggested_refinements is not None:
suggested_refinements = self._convert_model(suggested_refinements)
if passages is not None:
passages = self._convert_model(passages)
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='query')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'collection_ids': collection_ids,
'filter': filter,
'query': query,
'natural_language_query': natural_language_query,
'aggregation': aggregation,
'count': count,
'return': return_,
'offset': offset,
'sort': sort,
'highlight': highlight,
'spelling_suggestions': spelling_suggestions,
'table_results': table_results,
'suggested_refinements': suggested_refinements,
'passages': passages
}
url = '/v2/projects/{0}/query'.format(
*self._encode_path_vars(project_id))
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
data=data)
response = self.send(request)
return response
def get_autocompletion(self,
project_id: str,
prefix: str,
*,
collection_ids: List[str] = None,
field: str = None,
count: int = None,
**kwargs) -> 'DetailedResponse':
"""
Get Autocomplete Suggestions.
Returns completion query suggestions for the specified prefix.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param str prefix: The prefix to use for autocompletion. For example, the
prefix `Ho` could autocomplete to `Hot`, `Housing`, or `How do I upgrade`.
Possible completions are.
:param List[str] collection_ids: (optional) Comma separated list of the
collection IDs. If this parameter is not specified, all collections in the
project are used.
:param str field: (optional) The field in the result documents that
autocompletion suggestions are identified from.
:param int count: (optional) The number of autocompletion suggestions to
return.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
if prefix is None:
raise ValueError('prefix must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='get_autocompletion')
headers.update(sdk_headers)
params = {
'version': self.version,
'prefix': prefix,
'collection_ids': self._convert_list(collection_ids),
'field': field,
'count': count
}
url = '/v2/projects/{0}/autocompletion'.format(
*self._encode_path_vars(project_id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def query_notices(self,
project_id: str,
*,
filter: str = None,
query: str = None,
natural_language_query: str = None,
count: int = None,
offset: int = None,
**kwargs) -> 'DetailedResponse':
"""
Query system notices.
Queries for notices (errors or warnings) that might have been generated by the
system. Notices are generated when ingesting documents and performing relevance
training.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param str filter: (optional) A cacheable query that excludes documents
that don't mention the query content. Filter searches are better for
metadata-type searches and for assessing the concepts in the data set.
:param str query: (optional) A query search returns all documents in your
data set with full enrichments and full text, but with the most relevant
documents listed first.
:param str natural_language_query: (optional) A natural language query that
returns relevant documents by utilizing training data and natural language
understanding.
:param int count: (optional) Number of results to return. The maximum for
the **count** and **offset** values together in any one query is **10000**.
:param int offset: (optional) The number of query results to skip at the
beginning. For example, if the total number of results that are returned is
10 and the offset is 8, it returns the last two results. The maximum for
the **count** and **offset** values together in any one query is **10000**.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='query_notices')
headers.update(sdk_headers)
params = {
'version': self.version,
'filter': filter,
'query': query,
'natural_language_query': natural_language_query,
'count': count,
'offset': offset
}
url = '/v2/projects/{0}/notices'.format(
*self._encode_path_vars(project_id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def list_fields(self,
project_id: str,
*,
collection_ids: List[str] = None,
**kwargs) -> 'DetailedResponse':
"""
List fields.
Gets a list of the unique fields (and their types) stored in the the specified
collections.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param List[str] collection_ids: (optional) Comma separated list of the
collection IDs. If this parameter is not specified, all collections in the
project are used.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='list_fields')
headers.update(sdk_headers)
params = {
'version': self.version,
'collection_ids': self._convert_list(collection_ids)
}
url = '/v2/projects/{0}/fields'.format(
*self._encode_path_vars(project_id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
#########################
# Component settings
#########################
def get_component_settings(self, project_id: str,
**kwargs) -> 'DetailedResponse':
"""
Configuration settings for components.
Returns default configuration settings for components.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='get_component_settings')
headers.update(sdk_headers)
params = {'version': self.version}
url = '/v2/projects/{0}/component_settings'.format(
*self._encode_path_vars(project_id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
#########################
# Documents
#########################
def add_document(self,
project_id: str,
collection_id: str,
*,
file: BinaryIO = None,
filename: str = None,
file_content_type: str = None,
metadata: str = None,
x_watson_discovery_force: bool = None,
**kwargs) -> 'DetailedResponse':
"""
Add a document.
Add a document to a collection with optional metadata.
Returns immediately after the system has accepted the document for processing.
* The user must provide document content, metadata, or both. If the request is
missing both document content and metadata, it is rejected.
* The user can set the **Content-Type** parameter on the **file** part to
indicate the media type of the document. If the **Content-Type** parameter is
missing or is one of the generic media types (for example,
`application/octet-stream`), then the service attempts to automatically detect the
document's media type.
* The following field names are reserved and will be filtered out if present
after normalization: `id`, `score`, `highlight`, and any field with the prefix of:
`_`, `+`, or `-`
* Fields with empty name values after normalization are filtered out before
indexing.
* Fields containing the following characters after normalization are filtered
out before indexing: `#` and `,`
If the document is uploaded to a collection that has it's data shared with
another collection, the **X-Watson-Discovery-Force** header must be set to `true`.
**Note:** Documents can be added with a specific **document_id** by using the
**_/v2/projects/{project_id}/collections/{collection_id}/documents** method.
**Note:** This operation only works on collections created to accept direct file
uploads. It cannot be used to modify a collection that connects to an external
source such as Microsoft SharePoint.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param str collection_id: The ID of the collection.
:param TextIO file: (optional) The content of the document to ingest. The
maximum supported file size when adding a file to a collection is 50
megabytes, the maximum supported file size when testing a configuration is
1 megabyte. Files larger than the supported size are rejected.
:param str filename: (optional) The filename for file.
:param str file_content_type: (optional) The content type of file.
:param str metadata: (optional) The maximum supported metadata file size is
1 MB. Metadata parts larger than 1 MB are rejected. Example: ``` {
"Creator": "<NAME>",
"Subject": "Apples"
} ```.
:param bool x_watson_discovery_force: (optional) When `true`, the uploaded
document is added to the collection even if the data for that collection is
shared with other collections.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
if collection_id is None:
raise ValueError('collection_id must be provided')
headers = {'X-Watson-Discovery-Force': x_watson_discovery_force}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='add_document')
headers.update(sdk_headers)
params = {'version': self.version}
form_data = []
if file:
if not filename and hasattr(file, 'name'):
filename = basename(file.name)
if not filename:
raise ValueError('filename must be provided')
form_data.append(('file', (filename, file, file_content_type or
'application/octet-stream')))
if metadata:
metadata = str(metadata)
form_data.append(('metadata', (None, metadata, 'text/plain')))
url = '/v2/projects/{0}/collections/{1}/documents'.format(
*self._encode_path_vars(project_id, collection_id))
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
files=form_data)
response = self.send(request)
return response
def update_document(self,
project_id: str,
collection_id: str,
document_id: str,
*,
file: BinaryIO = None,
filename: str = None,
file_content_type: str = None,
metadata: str = None,
x_watson_discovery_force: bool = None,
**kwargs) -> 'DetailedResponse':
"""
Update a document.
Replace an existing document or add a document with a specified **document_id**.
Starts ingesting a document with optional metadata.
If the document is uploaded to a collection that has it's data shared with another
collection, the **X-Watson-Discovery-Force** header must be set to `true`.
**Note:** When uploading a new document with this method it automatically replaces
any document stored with the same **document_id** if it exists.
**Note:** This operation only works on collections created to accept direct file
uploads. It cannot be used to modify a collection that connects to an external
source such as Microsoft SharePoint.
**Note:** If an uploaded document is segmented, all segments will be overwritten,
even if the updated version of the document has fewer segments.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param str collection_id: The ID of the collection.
:param str document_id: The ID of the document.
:param TextIO file: (optional) The content of the document to ingest. The
maximum supported file size when adding a file to a collection is 50
megabytes, the maximum supported file size when testing a configuration is
1 megabyte. Files larger than the supported size are rejected.
:param str filename: (optional) The filename for file.
:param str file_content_type: (optional) The content type of file.
:param str metadata: (optional) The maximum supported metadata file size is
1 MB. Metadata parts larger than 1 MB are rejected. Example: ``` {
"Creator": "<NAME>",
"Subject": "Apples"
} ```.
:param bool x_watson_discovery_force: (optional) When `true`, the uploaded
document is added to the collection even if the data for that collection is
shared with other collections.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
if collection_id is None:
raise ValueError('collection_id must be provided')
if document_id is None:
raise ValueError('document_id must be provided')
headers = {'X-Watson-Discovery-Force': x_watson_discovery_force}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='update_document')
headers.update(sdk_headers)
params = {'version': self.version}
form_data = []
if file:
if not filename and hasattr(file, 'name'):
filename = basename(file.name)
if not filename:
raise ValueError('filename must be provided')
form_data.append(('file', (filename, file, file_content_type or
'application/octet-stream')))
if metadata:
metadata = str(metadata)
form_data.append(('metadata', (None, metadata, 'text/plain')))
url = '/v2/projects/{0}/collections/{1}/documents/{2}'.format(
*self._encode_path_vars(project_id, collection_id, document_id))
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
files=form_data)
response = self.send(request)
return response
def delete_document(self,
project_id: str,
collection_id: str,
document_id: str,
*,
x_watson_discovery_force: bool = None,
**kwargs) -> 'DetailedResponse':
"""
Delete a document.
If the given document ID is invalid, or if the document is not found, then the a
success response is returned (HTTP status code `200`) with the status set to
'deleted'.
**Note:** This operation only works on collections created to accept direct file
uploads. It cannot be used to modify a collection that connects to an external
source such as Microsoft SharePoint.
**Note:** Segments of an uploaded document cannot be deleted individually. Delete
all segments by deleting using the `parent_document_id` of a segment result.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param str collection_id: The ID of the collection.
:param str document_id: The ID of the document.
:param bool x_watson_discovery_force: (optional) When `true`, the uploaded
document is added to the collection even if the data for that collection is
shared with other collections.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
if collection_id is None:
raise ValueError('collection_id must be provided')
if document_id is None:
raise ValueError('document_id must be provided')
headers = {'X-Watson-Discovery-Force': x_watson_discovery_force}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='delete_document')
headers.update(sdk_headers)
params = {'version': self.version}
url = '/v2/projects/{0}/collections/{1}/documents/{2}'.format(
*self._encode_path_vars(project_id, collection_id, document_id))
request = self.prepare_request(method='DELETE',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
#########################
# Training data
#########################
def list_training_queries(self, project_id: str,
**kwargs) -> 'DetailedResponse':
"""
List training queries.
List the training queries for the specified project.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='list_training_queries')
headers.update(sdk_headers)
params = {'version': self.version}
url = '/v2/projects/{0}/training_data/queries'.format(
*self._encode_path_vars(project_id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def delete_training_queries(self, project_id: str,
**kwargs) -> 'DetailedResponse':
"""
Delete training queries.
Removes all training queries for the specified project.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='delete_training_queries')
headers.update(sdk_headers)
params = {'version': self.version}
url = '/v2/projects/{0}/training_data/queries'.format(
*self._encode_path_vars(project_id))
request = self.prepare_request(method='DELETE',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def create_training_query(self,
project_id: str,
natural_language_query: str,
examples: List['TrainingExample'],
*,
filter: str = None,
**kwargs) -> 'DetailedResponse':
"""
Create training query.
Add a query to the training data for this project. The query can contain a filter
and natural language query.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param str natural_language_query: The natural text query for the training
query.
:param List[TrainingExample] examples: Array of training examples.
:param str filter: (optional) The filter used on the collection before the
**natural_language_query** is applied.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
if natural_language_query is None:
raise ValueError('natural_language_query must be provided')
if examples is None:
raise ValueError('examples must be provided')
examples = [self._convert_model(x) for x in examples]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='create_training_query')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'natural_language_query': natural_language_query,
'examples': examples,
'filter': filter
}
url = '/v2/projects/{0}/training_data/queries'.format(
*self._encode_path_vars(project_id))
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
data=data)
response = self.send(request)
return response
def get_training_query(self, project_id: str, query_id: str,
**kwargs) -> 'DetailedResponse':
"""
Get a training data query.
Get details for a specific training data query, including the query string and all
examples.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param str query_id: The ID of the query used for training.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
if query_id is None:
raise ValueError('query_id must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='get_training_query')
headers.update(sdk_headers)
params = {'version': self.version}
url = '/v2/projects/{0}/training_data/queries/{1}'.format(
*self._encode_path_vars(project_id, query_id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def update_training_query(self,
project_id: str,
query_id: str,
natural_language_query: str,
examples: List['TrainingExample'],
*,
filter: str = None,
**kwargs) -> 'DetailedResponse':
"""
Update a training query.
Updates an existing training query and it's examples.
:param str project_id: The ID of the project. This information can be found
from the deploy page of the Discovery administrative tooling.
:param str query_id: The ID of the query used for training.
:param str natural_language_query: The natural text query for the training
query.
:param List[TrainingExample] examples: Array of training examples.
:param str filter: (optional) The filter used on the collection before the
**natural_language_query** is applied.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if project_id is None:
raise ValueError('project_id must be provided')
if query_id is None:
raise ValueError('query_id must be provided')
if natural_language_query is None:
raise ValueError('natural_language_query must be provided')
if examples is None:
raise ValueError('examples must be provided')
examples = [self._convert_model(x) for x in examples]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V2',
operation_id='update_training_query')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'natural_language_query': natural_language_query,
'examples': examples,
'filter': filter
}
url = '/v2/projects/{0}/training_data/queries/{1}'.format(
*self._encode_path_vars(project_id, query_id))
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
data=data)
response = self.send(request)
return response
class AddDocumentEnums(object):
class FileContentType(Enum):
"""
The content type of file.
"""
APPLICATION_JSON = 'application/json'
APPLICATION_MSWORD = 'application/msword'
APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
APPLICATION_PDF = 'application/pdf'
TEXT_HTML = 'text/html'
APPLICATION_XHTML_XML = 'application/xhtml+xml'
class UpdateDocumentEnums(object):
class FileContentType(Enum):
"""
The content type of file.
"""
APPLICATION_JSON = 'application/json'
APPLICATION_MSWORD = 'application/msword'
APPLICATION_VND_OPENXMLFORMATS_OFFICEDOCUMENT_WORDPROCESSINGML_DOCUMENT = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
APPLICATION_PDF = 'application/pdf'
TEXT_HTML = 'text/html'
APPLICATION_XHTML_XML = 'application/xhtml+xml'
##############################################################################
# Models
##############################################################################
class Collection():
"""
A collection for storing documents.
:attr str collection_id: (optional) The unique identifier of the collection.
:attr str name: (optional) The name of the collection.
"""
def __init__(self, *, collection_id: str = None, name: str = None) -> None:
"""
Initialize a Collection object.
:param str collection_id: (optional) The unique identifier of the
collection.
:param str name: (optional) The name of the collection.
"""
self.collection_id = collection_id
self.name = name
@classmethod
def from_dict(cls, _dict: Dict) -> 'Collection':
"""Initialize a Collection object from a json dictionary."""
args = {}
valid_keys = ['collection_id', 'name']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Collection: '
+ ', '.join(bad_keys))
if 'collection_id' in _dict:
args['collection_id'] = _dict.get('collection_id')
if 'name' in _dict:
args['name'] = _dict.get('name')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Collection object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'collection_id') and self.collection_id is not None:
_dict['collection_id'] = self.collection_id
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Collection object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Collection') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Collection') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Completions():
"""
An object containing an array of autocompletion suggestions.
:attr List[str] completions: (optional) Array of autcomplete suggestion based on
the provided prefix.
"""
def __init__(self, *, completions: List[str] = None) -> None:
"""
Initialize a Completions object.
:param List[str] completions: (optional) Array of autcomplete suggestion
based on the provided prefix.
"""
self.completions = completions
@classmethod
def from_dict(cls, _dict: Dict) -> 'Completions':
"""Initialize a Completions object from a json dictionary."""
args = {}
valid_keys = ['completions']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Completions: '
+ ', '.join(bad_keys))
if 'completions' in _dict:
args['completions'] = _dict.get('completions')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Completions object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'completions') and self.completions is not None:
_dict['completions'] = self.completions
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Completions object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Completions') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Completions') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ComponentSettingsAggregation():
"""
Display settings for aggregations.
:attr str name: (optional) Identifier used to map aggregation settings to
aggregation configuration.
:attr str label: (optional) User-friendly alias for the aggregation.
:attr bool multiple_selections_allowed: (optional) Whether users is allowed to
select more than one of the aggregation terms.
:attr str visualization_type: (optional) Type of visualization to use when
rendering the aggregation.
"""
def __init__(self,
*,
name: str = None,
label: str = None,
multiple_selections_allowed: bool = None,
visualization_type: str = None) -> None:
"""
Initialize a ComponentSettingsAggregation object.
:param str name: (optional) Identifier used to map aggregation settings to
aggregation configuration.
:param str label: (optional) User-friendly alias for the aggregation.
:param bool multiple_selections_allowed: (optional) Whether users is
allowed to select more than one of the aggregation terms.
:param str visualization_type: (optional) Type of visualization to use when
rendering the aggregation.
"""
self.name = name
self.label = label
self.multiple_selections_allowed = multiple_selections_allowed
self.visualization_type = visualization_type
@classmethod
def from_dict(cls, _dict: Dict) -> 'ComponentSettingsAggregation':
"""Initialize a ComponentSettingsAggregation object from a json dictionary."""
args = {}
valid_keys = [
'name', 'label', 'multiple_selections_allowed', 'visualization_type'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ComponentSettingsAggregation: '
+ ', '.join(bad_keys))
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'label' in _dict:
args['label'] = _dict.get('label')
if 'multiple_selections_allowed' in _dict:
args['multiple_selections_allowed'] = _dict.get(
'multiple_selections_allowed')
if 'visualization_type' in _dict:
args['visualization_type'] = _dict.get('visualization_type')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ComponentSettingsAggregation object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'label') and self.label is not None:
_dict['label'] = self.label
if hasattr(self, 'multiple_selections_allowed'
) and self.multiple_selections_allowed is not None:
_dict[
'multiple_selections_allowed'] = self.multiple_selections_allowed
if hasattr(
self,
'visualization_type') and self.visualization_type is not None:
_dict['visualization_type'] = self.visualization_type
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ComponentSettingsAggregation object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ComponentSettingsAggregation') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ComponentSettingsAggregation') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class VisualizationTypeEnum(Enum):
"""
Type of visualization to use when rendering the aggregation.
"""
AUTO = "auto"
FACET_TABLE = "facet_table"
WORD_CLOUD = "word_cloud"
MAP = "map"
class ComponentSettingsFieldsShown():
"""
Fields shown in the results section of the UI.
:attr ComponentSettingsFieldsShownBody body: (optional) Body label.
:attr ComponentSettingsFieldsShownTitle title: (optional) Title label.
"""
def __init__(self,
*,
body: 'ComponentSettingsFieldsShownBody' = None,
title: 'ComponentSettingsFieldsShownTitle' = None) -> None:
"""
Initialize a ComponentSettingsFieldsShown object.
:param ComponentSettingsFieldsShownBody body: (optional) Body label.
:param ComponentSettingsFieldsShownTitle title: (optional) Title label.
"""
self.body = body
self.title = title
@classmethod
def from_dict(cls, _dict: Dict) -> 'ComponentSettingsFieldsShown':
"""Initialize a ComponentSettingsFieldsShown object from a json dictionary."""
args = {}
valid_keys = ['body', 'title']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ComponentSettingsFieldsShown: '
+ ', '.join(bad_keys))
if 'body' in _dict:
args['body'] = ComponentSettingsFieldsShownBody._from_dict(
_dict.get('body'))
if 'title' in _dict:
args['title'] = ComponentSettingsFieldsShownTitle._from_dict(
_dict.get('title'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ComponentSettingsFieldsShown object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'body') and self.body is not None:
_dict['body'] = self.body._to_dict()
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ComponentSettingsFieldsShown object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ComponentSettingsFieldsShown') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ComponentSettingsFieldsShown') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ComponentSettingsFieldsShownBody():
"""
Body label.
:attr bool use_passage: (optional) Use the whole passage as the body.
:attr str field: (optional) Use a specific field as the title.
"""
def __init__(self, *, use_passage: bool = None, field: str = None) -> None:
"""
Initialize a ComponentSettingsFieldsShownBody object.
:param bool use_passage: (optional) Use the whole passage as the body.
:param str field: (optional) Use a specific field as the title.
"""
self.use_passage = use_passage
self.field = field
@classmethod
def from_dict(cls, _dict: Dict) -> 'ComponentSettingsFieldsShownBody':
"""Initialize a ComponentSettingsFieldsShownBody object from a json dictionary."""
args = {}
valid_keys = ['use_passage', 'field']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ComponentSettingsFieldsShownBody: '
+ ', '.join(bad_keys))
if 'use_passage' in _dict:
args['use_passage'] = _dict.get('use_passage')
if 'field' in _dict:
args['field'] = _dict.get('field')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ComponentSettingsFieldsShownBody object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'use_passage') and self.use_passage is not None:
_dict['use_passage'] = self.use_passage
if hasattr(self, 'field') and self.field is not None:
_dict['field'] = self.field
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ComponentSettingsFieldsShownBody object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ComponentSettingsFieldsShownBody') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ComponentSettingsFieldsShownBody') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ComponentSettingsFieldsShownTitle():
"""
Title label.
:attr str field: (optional) Use a specific field as the title.
"""
def __init__(self, *, field: str = None) -> None:
"""
Initialize a ComponentSettingsFieldsShownTitle object.
:param str field: (optional) Use a specific field as the title.
"""
self.field = field
@classmethod
def from_dict(cls, _dict: Dict) -> 'ComponentSettingsFieldsShownTitle':
"""Initialize a ComponentSettingsFieldsShownTitle object from a json dictionary."""
args = {}
valid_keys = ['field']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ComponentSettingsFieldsShownTitle: '
+ ', '.join(bad_keys))
if 'field' in _dict:
args['field'] = _dict.get('field')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ComponentSettingsFieldsShownTitle object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'field') and self.field is not None:
_dict['field'] = self.field
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ComponentSettingsFieldsShownTitle object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ComponentSettingsFieldsShownTitle') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ComponentSettingsFieldsShownTitle') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ComponentSettingsResponse():
"""
A response containing the default component settings.
:attr ComponentSettingsFieldsShown fields_shown: (optional) Fields shown in the
results section of the UI.
:attr bool autocomplete: (optional) Whether or not autocomplete is enabled.
:attr bool structured_search: (optional) Whether or not structured search is
enabled.
:attr int results_per_page: (optional) Number or results shown per page.
:attr List[ComponentSettingsAggregation] aggregations: (optional) a list of
component setting aggregations.
"""
def __init__(
self,
*,
fields_shown: 'ComponentSettingsFieldsShown' = None,
autocomplete: bool = None,
structured_search: bool = None,
results_per_page: int = None,
aggregations: List['ComponentSettingsAggregation'] = None) -> None:
"""
Initialize a ComponentSettingsResponse object.
:param ComponentSettingsFieldsShown fields_shown: (optional) Fields shown
in the results section of the UI.
:param bool autocomplete: (optional) Whether or not autocomplete is
enabled.
:param bool structured_search: (optional) Whether or not structured search
is enabled.
:param int results_per_page: (optional) Number or results shown per page.
:param List[ComponentSettingsAggregation] aggregations: (optional) a list
of component setting aggregations.
"""
self.fields_shown = fields_shown
self.autocomplete = autocomplete
self.structured_search = structured_search
self.results_per_page = results_per_page
self.aggregations = aggregations
@classmethod
def from_dict(cls, _dict: Dict) -> 'ComponentSettingsResponse':
"""Initialize a ComponentSettingsResponse object from a json dictionary."""
args = {}
valid_keys = [
'fields_shown', 'autocomplete', 'structured_search',
'results_per_page', 'aggregations'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ComponentSettingsResponse: '
+ ', '.join(bad_keys))
if 'fields_shown' in _dict:
args['fields_shown'] = ComponentSettingsFieldsShown._from_dict(
_dict.get('fields_shown'))
if 'autocomplete' in _dict:
args['autocomplete'] = _dict.get('autocomplete')
if 'structured_search' in _dict:
args['structured_search'] = _dict.get('structured_search')
if 'results_per_page' in _dict:
args['results_per_page'] = _dict.get('results_per_page')
if 'aggregations' in _dict:
args['aggregations'] = [
ComponentSettingsAggregation._from_dict(x)
for x in (_dict.get('aggregations'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ComponentSettingsResponse object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'fields_shown') and self.fields_shown is not None:
_dict['fields_shown'] = self.fields_shown._to_dict()
if hasattr(self, 'autocomplete') and self.autocomplete is not None:
_dict['autocomplete'] = self.autocomplete
if hasattr(self,
'structured_search') and self.structured_search is not None:
_dict['structured_search'] = self.structured_search
if hasattr(self,
'results_per_page') and self.results_per_page is not None:
_dict['results_per_page'] = self.results_per_page
if hasattr(self, 'aggregations') and self.aggregations is not None:
_dict['aggregations'] = [x._to_dict() for x in self.aggregations]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ComponentSettingsResponse object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ComponentSettingsResponse') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ComponentSettingsResponse') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DeleteDocumentResponse():
"""
Information returned when a document is deleted.
:attr str document_id: (optional) The unique identifier of the document.
:attr str status: (optional) Status of the document. A deleted document has the
status deleted.
"""
def __init__(self, *, document_id: str = None, status: str = None) -> None:
"""
Initialize a DeleteDocumentResponse object.
:param str document_id: (optional) The unique identifier of the document.
:param str status: (optional) Status of the document. A deleted document
has the status deleted.
"""
self.document_id = document_id
self.status = status
@classmethod
def from_dict(cls, _dict: Dict) -> 'DeleteDocumentResponse':
"""Initialize a DeleteDocumentResponse object from a json dictionary."""
args = {}
valid_keys = ['document_id', 'status']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class DeleteDocumentResponse: '
+ ', '.join(bad_keys))
if 'document_id' in _dict:
args['document_id'] = _dict.get('document_id')
if 'status' in _dict:
args['status'] = _dict.get('status')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DeleteDocumentResponse object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_id') and self.document_id is not None:
_dict['document_id'] = self.document_id
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DeleteDocumentResponse object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'DeleteDocumentResponse') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DeleteDocumentResponse') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class StatusEnum(Enum):
"""
Status of the document. A deleted document has the status deleted.
"""
DELETED = "deleted"
class DocumentAccepted():
"""
Information returned after an uploaded document is accepted.
:attr str document_id: (optional) The unique identifier of the ingested
document.
:attr str status: (optional) Status of the document in the ingestion process. A
status of `processing` is returned for documents that are ingested with a
*version* date before `2019-01-01`. The `pending` status is returned for all
others.
"""
def __init__(self, *, document_id: str = None, status: str = None) -> None:
"""
Initialize a DocumentAccepted object.
:param str document_id: (optional) The unique identifier of the ingested
document.
:param str status: (optional) Status of the document in the ingestion
process. A status of `processing` is returned for documents that are
ingested with a *version* date before `2019-01-01`. The `pending` status is
returned for all others.
"""
self.document_id = document_id
self.status = status
@classmethod
def from_dict(cls, _dict: Dict) -> 'DocumentAccepted':
"""Initialize a DocumentAccepted object from a json dictionary."""
args = {}
valid_keys = ['document_id', 'status']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class DocumentAccepted: '
+ ', '.join(bad_keys))
if 'document_id' in _dict:
args['document_id'] = _dict.get('document_id')
if 'status' in _dict:
args['status'] = _dict.get('status')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DocumentAccepted object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_id') and self.document_id is not None:
_dict['document_id'] = self.document_id
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DocumentAccepted object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'DocumentAccepted') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DocumentAccepted') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class StatusEnum(Enum):
"""
Status of the document in the ingestion process. A status of `processing` is
returned for documents that are ingested with a *version* date before
`2019-01-01`. The `pending` status is returned for all others.
"""
PROCESSING = "processing"
PENDING = "pending"
class DocumentAttribute():
"""
List of document attributes.
:attr str type: (optional) The type of attribute.
:attr str text: (optional) The text associated with the attribute.
:attr TableElementLocation location: (optional) The numeric location of the
identified element in the document, represented with two integers labeled
`begin` and `end`.
"""
def __init__(self,
*,
type: str = None,
text: str = None,
location: 'TableElementLocation' = None) -> None:
"""
Initialize a DocumentAttribute object.
:param str type: (optional) The type of attribute.
:param str text: (optional) The text associated with the attribute.
:param TableElementLocation location: (optional) The numeric location of
the identified element in the document, represented with two integers
labeled `begin` and `end`.
"""
self.type = type
self.text = text
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'DocumentAttribute':
"""Initialize a DocumentAttribute object from a json dictionary."""
args = {}
valid_keys = ['type', 'text', 'location']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class DocumentAttribute: '
+ ', '.join(bad_keys))
if 'type' in _dict:
args['type'] = _dict.get('type')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = TableElementLocation._from_dict(
_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DocumentAttribute object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DocumentAttribute object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'DocumentAttribute') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DocumentAttribute') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Field():
"""
Object containing field details.
:attr str field: (optional) The name of the field.
:attr str type: (optional) The type of the field.
:attr str collection_id: (optional) The collection Id of the collection where
the field was found.
"""
def __init__(self,
*,
field: str = None,
type: str = None,
collection_id: str = None) -> None:
"""
Initialize a Field object.
:param str field: (optional) The name of the field.
:param str type: (optional) The type of the field.
:param str collection_id: (optional) The collection Id of the collection
where the field was found.
"""
self.field = field
self.type = type
self.collection_id = collection_id
@classmethod
def from_dict(cls, _dict: Dict) -> 'Field':
"""Initialize a Field object from a json dictionary."""
args = {}
valid_keys = ['field', 'type', 'collection_id']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Field: ' +
', '.join(bad_keys))
if 'field' in _dict:
args['field'] = _dict.get('field')
if 'type' in _dict:
args['type'] = _dict.get('type')
if 'collection_id' in _dict:
args['collection_id'] = _dict.get('collection_id')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Field object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'field') and self.field is not None:
_dict['field'] = self.field
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
if hasattr(self, 'collection_id') and self.collection_id is not None:
_dict['collection_id'] = self.collection_id
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Field object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Field') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Field') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TypeEnum(Enum):
"""
The type of the field.
"""
NESTED = "nested"
STRING = "string"
DATE = "date"
LONG = "long"
INTEGER = "integer"
SHORT = "short"
BYTE = "byte"
DOUBLE = "double"
FLOAT = "float"
BOOLEAN = "boolean"
BINARY = "binary"
class ListCollectionsResponse():
"""
Response object containing an array of collection details.
:attr List[Collection] collections: (optional) An array containing information
about each collection in the project.
"""
def __init__(self, *, collections: List['Collection'] = None) -> None:
"""
Initialize a ListCollectionsResponse object.
:param List[Collection] collections: (optional) An array containing
information about each collection in the project.
"""
self.collections = collections
@classmethod
def from_dict(cls, _dict: Dict) -> 'ListCollectionsResponse':
"""Initialize a ListCollectionsResponse object from a json dictionary."""
args = {}
valid_keys = ['collections']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ListCollectionsResponse: '
+ ', '.join(bad_keys))
if 'collections' in _dict:
args['collections'] = [
Collection._from_dict(x) for x in (_dict.get('collections'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ListCollectionsResponse object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'collections') and self.collections is not None:
_dict['collections'] = [x._to_dict() for x in self.collections]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ListCollectionsResponse object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ListCollectionsResponse') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ListCollectionsResponse') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ListFieldsResponse():
"""
The list of fetched fields.
The fields are returned using a fully qualified name format, however, the format
differs slightly from that used by the query operations.
* Fields which contain nested objects are assigned a type of "nested".
* Fields which belong to a nested object are prefixed with `.properties` (for
example, `warnings.properties.severity` means that the `warnings` object has a
property called `severity`).
:attr List[Field] fields: (optional) An array containing information about each
field in the collections.
"""
def __init__(self, *, fields: List['Field'] = None) -> None:
"""
Initialize a ListFieldsResponse object.
:param List[Field] fields: (optional) An array containing information about
each field in the collections.
"""
self.fields = fields
@classmethod
def from_dict(cls, _dict: Dict) -> 'ListFieldsResponse':
"""Initialize a ListFieldsResponse object from a json dictionary."""
args = {}
valid_keys = ['fields']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ListFieldsResponse: '
+ ', '.join(bad_keys))
if 'fields' in _dict:
args['fields'] = [
Field._from_dict(x) for x in (_dict.get('fields'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ListFieldsResponse object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'fields') and self.fields is not None:
_dict['fields'] = [x._to_dict() for x in self.fields]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ListFieldsResponse object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ListFieldsResponse') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ListFieldsResponse') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Notice():
"""
A notice produced for the collection.
:attr str notice_id: (optional) Identifies the notice. Many notices might have
the same ID. This field exists so that user applications can programmatically
identify a notice and take automatic corrective action. Typical notice IDs
include: `index_failed`, `index_failed_too_many_requests`,
`index_failed_incompatible_field`, `index_failed_cluster_unavailable`,
`ingestion_timeout`, `ingestion_error`, `bad_request`, `internal_error`,
`missing_model`, `unsupported_model`,
`smart_document_understanding_failed_incompatible_field`,
`smart_document_understanding_failed_internal_error`,
`smart_document_understanding_failed_internal_error`,
`smart_document_understanding_failed_warning`,
`smart_document_understanding_page_error`,
`smart_document_understanding_page_warning`. **Note:** This is not a complete
list, other values might be returned.
:attr datetime created: (optional) The creation date of the collection in the
format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'.
:attr str document_id: (optional) Unique identifier of the document.
:attr str collection_id: (optional) Unique identifier of the collection.
:attr str query_id: (optional) Unique identifier of the query used for relevance
training.
:attr str severity: (optional) Severity level of the notice.
:attr str step: (optional) Ingestion or training step in which the notice
occurred.
:attr str description: (optional) The description of the notice.
"""
def __init__(self,
*,
notice_id: str = None,
created: datetime = None,
document_id: str = None,
collection_id: str = None,
query_id: str = None,
severity: str = None,
step: str = None,
description: str = None) -> None:
"""
Initialize a Notice object.
:param str notice_id: (optional) Identifies the notice. Many notices might
have the same ID. This field exists so that user applications can
programmatically identify a notice and take automatic corrective action.
Typical notice IDs include: `index_failed`,
`index_failed_too_many_requests`, `index_failed_incompatible_field`,
`index_failed_cluster_unavailable`, `ingestion_timeout`, `ingestion_error`,
`bad_request`, `internal_error`, `missing_model`, `unsupported_model`,
`smart_document_understanding_failed_incompatible_field`,
`smart_document_understanding_failed_internal_error`,
`smart_document_understanding_failed_internal_error`,
`smart_document_understanding_failed_warning`,
`smart_document_understanding_page_error`,
`smart_document_understanding_page_warning`. **Note:** This is not a
complete list, other values might be returned.
:param datetime created: (optional) The creation date of the collection in
the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'.
:param str document_id: (optional) Unique identifier of the document.
:param str collection_id: (optional) Unique identifier of the collection.
:param str query_id: (optional) Unique identifier of the query used for
relevance training.
:param str severity: (optional) Severity level of the notice.
:param str step: (optional) Ingestion or training step in which the notice
occurred.
:param str description: (optional) The description of the notice.
"""
self.notice_id = notice_id
self.created = created
self.document_id = document_id
self.collection_id = collection_id
self.query_id = query_id
self.severity = severity
self.step = step
self.description = description
@classmethod
def from_dict(cls, _dict: Dict) -> 'Notice':
"""Initialize a Notice object from a json dictionary."""
args = {}
valid_keys = [
'notice_id', 'created', 'document_id', 'collection_id', 'query_id',
'severity', 'step', 'description'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class Notice: ' +
', '.join(bad_keys))
if 'notice_id' in _dict:
args['notice_id'] = _dict.get('notice_id')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'document_id' in _dict:
args['document_id'] = _dict.get('document_id')
if 'collection_id' in _dict:
args['collection_id'] = _dict.get('collection_id')
if 'query_id' in _dict:
args['query_id'] = _dict.get('query_id')
if 'severity' in _dict:
args['severity'] = _dict.get('severity')
if 'step' in _dict:
args['step'] = _dict.get('step')
if 'description' in _dict:
args['description'] = _dict.get('description')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Notice object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'notice_id') and self.notice_id is not None:
_dict['notice_id'] = self.notice_id
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'document_id') and self.document_id is not None:
_dict['document_id'] = self.document_id
if hasattr(self, 'collection_id') and self.collection_id is not None:
_dict['collection_id'] = self.collection_id
if hasattr(self, 'query_id') and self.query_id is not None:
_dict['query_id'] = self.query_id
if hasattr(self, 'severity') and self.severity is not None:
_dict['severity'] = self.severity
if hasattr(self, 'step') and self.step is not None:
_dict['step'] = self.step
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Notice object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'Notice') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Notice') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class SeverityEnum(Enum):
"""
Severity level of the notice.
"""
WARNING = "warning"
ERROR = "error"
class QueryAggregation():
"""
An abstract aggregation type produced by Discovery to analyze the input provided.
:attr str type: The type of aggregation command used. Options include: term,
histogram, timeslice, nested, filter, min, max, sum, average, unique_count, and
top_hits.
"""
def __init__(self, type: str) -> None:
"""
Initialize a QueryAggregation object.
:param str type: The type of aggregation command used. Options include:
term, histogram, timeslice, nested, filter, min, max, sum, average,
unique_count, and top_hits.
"""
self.type = type
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryAggregation':
"""Initialize a QueryAggregation object from a json dictionary."""
disc_class = cls._get_class_by_discriminator(_dict)
if disc_class != cls:
return disc_class.from_dict(_dict)
args = {}
valid_keys = ['type']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryAggregation: '
+ ', '.join(bad_keys))
if 'type' in _dict:
args['type'] = _dict.get('type')
else:
raise ValueError(
'Required property \'type\' not present in QueryAggregation JSON'
)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryAggregation object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryAggregation object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryAggregation') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryAggregation') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
@classmethod
def _get_class_by_discriminator(cls, _dict: Dict) -> object:
mapping = {}
mapping['term'] = 'QueryTermAggregation'
mapping['histogram'] = 'QueryHistogramAggregation'
mapping['timeslice'] = 'QueryTimesliceAggregation'
mapping['nested'] = 'QueryNestedAggregation'
mapping['filter'] = 'QueryFilterAggregation'
mapping['min'] = 'QueryCalculationAggregation'
mapping['max'] = 'QueryCalculationAggregation'
mapping['sum'] = 'QueryCalculationAggregation'
mapping['average'] = 'QueryCalculationAggregation'
mapping['unique_count'] = 'QueryCalculationAggregation'
mapping['top_hits'] = 'QueryTopHitsAggregation'
disc_value = _dict.get('type')
if disc_value is None:
raise ValueError(
'Discriminator property \'type\' not found in QueryAggregation JSON'
)
class_name = mapping.get(disc_value, disc_value)
try:
disc_class = getattr(sys.modules[__name__], class_name)
except AttributeError:
disc_class = cls
if isinstance(disc_class, object):
return disc_class
raise TypeError('%s is not a discriminator class' % class_name)
class QueryHistogramAggregationResult():
"""
Histogram numeric interval result.
:attr int key: The value of the upper bound for the numeric segment.
:attr int matching_results: Number of documents with the specified key as the
upper bound.
:attr List[QueryAggregation] aggregations: (optional) An array of sub
aggregations.
"""
def __init__(self,
key: int,
matching_results: int,
*,
aggregations: List['QueryAggregation'] = None) -> None:
"""
Initialize a QueryHistogramAggregationResult object.
:param int key: The value of the upper bound for the numeric segment.
:param int matching_results: Number of documents with the specified key as
the upper bound.
:param List[QueryAggregation] aggregations: (optional) An array of sub
aggregations.
"""
self.key = key
self.matching_results = matching_results
self.aggregations = aggregations
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryHistogramAggregationResult':
"""Initialize a QueryHistogramAggregationResult object from a json dictionary."""
args = {}
valid_keys = ['key', 'matching_results', 'aggregations']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryHistogramAggregationResult: '
+ ', '.join(bad_keys))
if 'key' in _dict:
args['key'] = _dict.get('key')
else:
raise ValueError(
'Required property \'key\' not present in QueryHistogramAggregationResult JSON'
)
if 'matching_results' in _dict:
args['matching_results'] = _dict.get('matching_results')
else:
raise ValueError(
'Required property \'matching_results\' not present in QueryHistogramAggregationResult JSON'
)
if 'aggregations' in _dict:
args['aggregations'] = [
QueryAggregation._from_dict(x)
for x in (_dict.get('aggregations'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryHistogramAggregationResult object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'key') and self.key is not None:
_dict['key'] = self.key
if hasattr(self,
'matching_results') and self.matching_results is not None:
_dict['matching_results'] = self.matching_results
if hasattr(self, 'aggregations') and self.aggregations is not None:
_dict['aggregations'] = [x._to_dict() for x in self.aggregations]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryHistogramAggregationResult object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryHistogramAggregationResult') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryHistogramAggregationResult') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryLargePassages():
"""
Configuration for passage retrieval.
:attr bool enabled: (optional) A passages query that returns the most relevant
passages from the results.
:attr bool per_document: (optional) When `true`, passages will be returned
within their respective result.
:attr int max_per_document: (optional) Maximum number of passages to return per
result.
:attr List[str] fields: (optional) A list of fields that passages are drawn
from. If this parameter not specified, then all top-level fields are included.
:attr int count: (optional) The maximum number of passages to return. The search
returns fewer passages if the requested total is not found. The default is `10`.
The maximum is `100`.
:attr int characters: (optional) The approximate number of characters that any
one passage will have.
"""
def __init__(self,
*,
enabled: bool = None,
per_document: bool = None,
max_per_document: int = None,
fields: List[str] = None,
count: int = None,
characters: int = None) -> None:
"""
Initialize a QueryLargePassages object.
:param bool enabled: (optional) A passages query that returns the most
relevant passages from the results.
:param bool per_document: (optional) When `true`, passages will be returned
within their respective result.
:param int max_per_document: (optional) Maximum number of passages to
return per result.
:param List[str] fields: (optional) A list of fields that passages are
drawn from. If this parameter not specified, then all top-level fields are
included.
:param int count: (optional) The maximum number of passages to return. The
search returns fewer passages if the requested total is not found. The
default is `10`. The maximum is `100`.
:param int characters: (optional) The approximate number of characters that
any one passage will have.
"""
self.enabled = enabled
self.per_document = per_document
self.max_per_document = max_per_document
self.fields = fields
self.count = count
self.characters = characters
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryLargePassages':
"""Initialize a QueryLargePassages object from a json dictionary."""
args = {}
valid_keys = [
'enabled', 'per_document', 'max_per_document', 'fields', 'count',
'characters'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryLargePassages: '
+ ', '.join(bad_keys))
if 'enabled' in _dict:
args['enabled'] = _dict.get('enabled')
if 'per_document' in _dict:
args['per_document'] = _dict.get('per_document')
if 'max_per_document' in _dict:
args['max_per_document'] = _dict.get('max_per_document')
if 'fields' in _dict:
args['fields'] = _dict.get('fields')
if 'count' in _dict:
args['count'] = _dict.get('count')
if 'characters' in _dict:
args['characters'] = _dict.get('characters')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryLargePassages object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'enabled') and self.enabled is not None:
_dict['enabled'] = self.enabled
if hasattr(self, 'per_document') and self.per_document is not None:
_dict['per_document'] = self.per_document
if hasattr(self,
'max_per_document') and self.max_per_document is not None:
_dict['max_per_document'] = self.max_per_document
if hasattr(self, 'fields') and self.fields is not None:
_dict['fields'] = self.fields
if hasattr(self, 'count') and self.count is not None:
_dict['count'] = self.count
if hasattr(self, 'characters') and self.characters is not None:
_dict['characters'] = self.characters
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryLargePassages object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryLargePassages') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryLargePassages') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryLargeSuggestedRefinements():
"""
Configuration for suggested refinements.
:attr bool enabled: (optional) Whether to perform suggested refinements.
:attr int count: (optional) Maximum number of suggested refinements texts to be
returned. The default is `10`. The maximum is `100`.
"""
def __init__(self, *, enabled: bool = None, count: int = None) -> None:
"""
Initialize a QueryLargeSuggestedRefinements object.
:param bool enabled: (optional) Whether to perform suggested refinements.
:param int count: (optional) Maximum number of suggested refinements texts
to be returned. The default is `10`. The maximum is `100`.
"""
self.enabled = enabled
self.count = count
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryLargeSuggestedRefinements':
"""Initialize a QueryLargeSuggestedRefinements object from a json dictionary."""
args = {}
valid_keys = ['enabled', 'count']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryLargeSuggestedRefinements: '
+ ', '.join(bad_keys))
if 'enabled' in _dict:
args['enabled'] = _dict.get('enabled')
if 'count' in _dict:
args['count'] = _dict.get('count')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryLargeSuggestedRefinements object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'enabled') and self.enabled is not None:
_dict['enabled'] = self.enabled
if hasattr(self, 'count') and self.count is not None:
_dict['count'] = self.count
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryLargeSuggestedRefinements object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryLargeSuggestedRefinements') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryLargeSuggestedRefinements') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryLargeTableResults():
"""
Configuration for table retrieval.
:attr bool enabled: (optional) Whether to enable table retrieval.
:attr int count: (optional) Maximum number of tables to return.
"""
def __init__(self, *, enabled: bool = None, count: int = None) -> None:
"""
Initialize a QueryLargeTableResults object.
:param bool enabled: (optional) Whether to enable table retrieval.
:param int count: (optional) Maximum number of tables to return.
"""
self.enabled = enabled
self.count = count
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryLargeTableResults':
"""Initialize a QueryLargeTableResults object from a json dictionary."""
args = {}
valid_keys = ['enabled', 'count']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryLargeTableResults: '
+ ', '.join(bad_keys))
if 'enabled' in _dict:
args['enabled'] = _dict.get('enabled')
if 'count' in _dict:
args['count'] = _dict.get('count')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryLargeTableResults object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'enabled') and self.enabled is not None:
_dict['enabled'] = self.enabled
if hasattr(self, 'count') and self.count is not None:
_dict['count'] = self.count
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryLargeTableResults object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryLargeTableResults') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryLargeTableResults') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryNoticesResponse():
"""
Object containing notice query results.
:attr int matching_results: (optional) The number of matching results.
:attr List[Notice] notices: (optional) Array of document results that match the
query.
"""
def __init__(self,
*,
matching_results: int = None,
notices: List['Notice'] = None) -> None:
"""
Initialize a QueryNoticesResponse object.
:param int matching_results: (optional) The number of matching results.
:param List[Notice] notices: (optional) Array of document results that
match the query.
"""
self.matching_results = matching_results
self.notices = notices
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryNoticesResponse':
"""Initialize a QueryNoticesResponse object from a json dictionary."""
args = {}
valid_keys = ['matching_results', 'notices']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryNoticesResponse: '
+ ', '.join(bad_keys))
if 'matching_results' in _dict:
args['matching_results'] = _dict.get('matching_results')
if 'notices' in _dict:
args['notices'] = [
Notice._from_dict(x) for x in (_dict.get('notices'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryNoticesResponse object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'matching_results') and self.matching_results is not None:
_dict['matching_results'] = self.matching_results
if hasattr(self, 'notices') and self.notices is not None:
_dict['notices'] = [x._to_dict() for x in self.notices]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryNoticesResponse object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryNoticesResponse') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryNoticesResponse') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryResponse():
"""
A response containing the documents and aggregations for the query.
:attr int matching_results: (optional) The number of matching results for the
query.
:attr List[QueryResult] results: (optional) Array of document results for the
query.
:attr List[QueryAggregation] aggregations: (optional) Array of aggregations for
the query.
:attr RetrievalDetails retrieval_details: (optional) An object contain retrieval
type information.
:attr str suggested_query: (optional) Suggested correction to the submitted
**natural_language_query** value.
:attr List[QuerySuggestedRefinement] suggested_refinements: (optional) Array of
suggested refinements.
:attr List[QueryTableResult] table_results: (optional) Array of table results.
"""
def __init__(self,
*,
matching_results: int = None,
results: List['QueryResult'] = None,
aggregations: List['QueryAggregation'] = None,
retrieval_details: 'RetrievalDetails' = None,
suggested_query: str = None,
suggested_refinements: List['QuerySuggestedRefinement'] = None,
table_results: List['QueryTableResult'] = None) -> None:
"""
Initialize a QueryResponse object.
:param int matching_results: (optional) The number of matching results for
the query.
:param List[QueryResult] results: (optional) Array of document results for
the query.
:param List[QueryAggregation] aggregations: (optional) Array of
aggregations for the query.
:param RetrievalDetails retrieval_details: (optional) An object contain
retrieval type information.
:param str suggested_query: (optional) Suggested correction to the
submitted **natural_language_query** value.
:param List[QuerySuggestedRefinement] suggested_refinements: (optional)
Array of suggested refinements.
:param List[QueryTableResult] table_results: (optional) Array of table
results.
"""
self.matching_results = matching_results
self.results = results
self.aggregations = aggregations
self.retrieval_details = retrieval_details
self.suggested_query = suggested_query
self.suggested_refinements = suggested_refinements
self.table_results = table_results
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryResponse':
"""Initialize a QueryResponse object from a json dictionary."""
args = {}
valid_keys = [
'matching_results', 'results', 'aggregations', 'retrieval_details',
'suggested_query', 'suggested_refinements', 'table_results'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryResponse: '
+ ', '.join(bad_keys))
if 'matching_results' in _dict:
args['matching_results'] = _dict.get('matching_results')
if 'results' in _dict:
args['results'] = [
QueryResult._from_dict(x) for x in (_dict.get('results'))
]
if 'aggregations' in _dict:
args['aggregations'] = [
QueryAggregation._from_dict(x)
for x in (_dict.get('aggregations'))
]
if 'retrieval_details' in _dict:
args['retrieval_details'] = RetrievalDetails._from_dict(
_dict.get('retrieval_details'))
if 'suggested_query' in _dict:
args['suggested_query'] = _dict.get('suggested_query')
if 'suggested_refinements' in _dict:
args['suggested_refinements'] = [
QuerySuggestedRefinement._from_dict(x)
for x in (_dict.get('suggested_refinements'))
]
if 'table_results' in _dict:
args['table_results'] = [
QueryTableResult._from_dict(x)
for x in (_dict.get('table_results'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryResponse object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'matching_results') and self.matching_results is not None:
_dict['matching_results'] = self.matching_results
if hasattr(self, 'results') and self.results is not None:
_dict['results'] = [x._to_dict() for x in self.results]
if hasattr(self, 'aggregations') and self.aggregations is not None:
_dict['aggregations'] = [x._to_dict() for x in self.aggregations]
if hasattr(self,
'retrieval_details') and self.retrieval_details is not None:
_dict['retrieval_details'] = self.retrieval_details._to_dict()
if hasattr(self,
'suggested_query') and self.suggested_query is not None:
_dict['suggested_query'] = self.suggested_query
if hasattr(self, 'suggested_refinements'
) and self.suggested_refinements is not None:
_dict['suggested_refinements'] = [
x._to_dict() for x in self.suggested_refinements
]
if hasattr(self, 'table_results') and self.table_results is not None:
_dict['table_results'] = [x._to_dict() for x in self.table_results]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryResponse object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryResponse') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryResponse') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryResult():
"""
Result document for the specified query.
:attr str document_id: The unique identifier of the document.
:attr dict metadata: (optional) Metadata of the document.
:attr QueryResultMetadata result_metadata: Metadata of a query result.
:attr List[QueryResultPassage] document_passages: (optional) Passages returned
by Discovery.
"""
def __init__(self,
document_id: str,
result_metadata: 'QueryResultMetadata',
*,
metadata: dict = None,
document_passages: List['QueryResultPassage'] = None,
**kwargs) -> None:
"""
Initialize a QueryResult object.
:param str document_id: The unique identifier of the document.
:param QueryResultMetadata result_metadata: Metadata of a query result.
:param dict metadata: (optional) Metadata of the document.
:param List[QueryResultPassage] document_passages: (optional) Passages
returned by Discovery.
:param **kwargs: (optional) Any additional properties.
"""
self.document_id = document_id
self.metadata = metadata
self.result_metadata = result_metadata
self.document_passages = document_passages
for _key, _value in kwargs.items():
setattr(self, _key, _value)
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryResult':
"""Initialize a QueryResult object from a json dictionary."""
args = {}
xtra = _dict.copy()
if 'document_id' in _dict:
args['document_id'] = _dict.get('document_id')
del xtra['document_id']
else:
raise ValueError(
'Required property \'document_id\' not present in QueryResult JSON'
)
if 'metadata' in _dict:
args['metadata'] = _dict.get('metadata')
del xtra['metadata']
if 'result_metadata' in _dict:
args['result_metadata'] = QueryResultMetadata._from_dict(
_dict.get('result_metadata'))
del xtra['result_metadata']
else:
raise ValueError(
'Required property \'result_metadata\' not present in QueryResult JSON'
)
if 'document_passages' in _dict:
args['document_passages'] = [
QueryResultPassage._from_dict(x)
for x in (_dict.get('document_passages'))
]
del xtra['document_passages']
args.update(xtra)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryResult object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_id') and self.document_id is not None:
_dict['document_id'] = self.document_id
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if hasattr(self,
'result_metadata') and self.result_metadata is not None:
_dict['result_metadata'] = self.result_metadata._to_dict()
if hasattr(self,
'document_passages') and self.document_passages is not None:
_dict['document_passages'] = [
x._to_dict() for x in self.document_passages
]
if hasattr(self, '_additionalProperties'):
for _key in self._additionalProperties:
_value = getattr(self, _key, None)
if _value is not None:
_dict[_key] = _value
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __setattr__(self, name: str, value: object) -> None:
properties = {
'document_id', 'metadata', 'result_metadata', 'document_passages'
}
if not hasattr(self, '_additionalProperties'):
super(QueryResult, self).__setattr__('_additionalProperties', set())
if name not in properties:
self._additionalProperties.add(name)
super(QueryResult, self).__setattr__(name, value)
def __str__(self) -> str:
"""Return a `str` version of this QueryResult object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryResult') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryResult') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryResultMetadata():
"""
Metadata of a query result.
:attr str document_retrieval_source: (optional) The document retrieval source
that produced this search result.
:attr str collection_id: The collection id associated with this training data
set.
:attr float confidence: (optional) The confidence score for the given result.
Calculated based on how relevant the result is estimated to be. confidence can
range from `0.0` to `1.0`. The higher the number, the more relevant the
document. The `confidence` value for a result was calculated using the model
specified in the `document_retrieval_strategy` field of the result set. This
field is only returned if the **natural_language_query** parameter is specified
in the query.
"""
def __init__(self,
collection_id: str,
*,
document_retrieval_source: str = None,
confidence: float = None) -> None:
"""
Initialize a QueryResultMetadata object.
:param str collection_id: The collection id associated with this training
data set.
:param str document_retrieval_source: (optional) The document retrieval
source that produced this search result.
:param float confidence: (optional) The confidence score for the given
result. Calculated based on how relevant the result is estimated to be.
confidence can range from `0.0` to `1.0`. The higher the number, the more
relevant the document. The `confidence` value for a result was calculated
using the model specified in the `document_retrieval_strategy` field of the
result set. This field is only returned if the **natural_language_query**
parameter is specified in the query.
"""
self.document_retrieval_source = document_retrieval_source
self.collection_id = collection_id
self.confidence = confidence
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryResultMetadata':
"""Initialize a QueryResultMetadata object from a json dictionary."""
args = {}
valid_keys = [
'document_retrieval_source', 'collection_id', 'confidence'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryResultMetadata: '
+ ', '.join(bad_keys))
if 'document_retrieval_source' in _dict:
args['document_retrieval_source'] = _dict.get(
'document_retrieval_source')
if 'collection_id' in _dict:
args['collection_id'] = _dict.get('collection_id')
else:
raise ValueError(
'Required property \'collection_id\' not present in QueryResultMetadata JSON'
)
if 'confidence' in _dict:
args['confidence'] = _dict.get('confidence')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryResultMetadata object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_retrieval_source'
) and self.document_retrieval_source is not None:
_dict['document_retrieval_source'] = self.document_retrieval_source
if hasattr(self, 'collection_id') and self.collection_id is not None:
_dict['collection_id'] = self.collection_id
if hasattr(self, 'confidence') and self.confidence is not None:
_dict['confidence'] = self.confidence
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryResultMetadata object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryResultMetadata') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryResultMetadata') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DocumentRetrievalSourceEnum(Enum):
"""
The document retrieval source that produced this search result.
"""
SEARCH = "search"
CURATION = "curation"
class QueryResultPassage():
"""
A passage query result.
:attr str passage_text: (optional) The content of the extracted passage.
:attr int start_offset: (optional) The position of the first character of the
extracted passage in the originating field.
:attr int end_offset: (optional) The position of the last character of the
extracted passage in the originating field.
:attr str field: (optional) The label of the field from which the passage has
been extracted.
"""
def __init__(self,
*,
passage_text: str = None,
start_offset: int = None,
end_offset: int = None,
field: str = None) -> None:
"""
Initialize a QueryResultPassage object.
:param str passage_text: (optional) The content of the extracted passage.
:param int start_offset: (optional) The position of the first character of
the extracted passage in the originating field.
:param int end_offset: (optional) The position of the last character of the
extracted passage in the originating field.
:param str field: (optional) The label of the field from which the passage
has been extracted.
"""
self.passage_text = passage_text
self.start_offset = start_offset
self.end_offset = end_offset
self.field = field
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryResultPassage':
"""Initialize a QueryResultPassage object from a json dictionary."""
args = {}
valid_keys = ['passage_text', 'start_offset', 'end_offset', 'field']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryResultPassage: '
+ ', '.join(bad_keys))
if 'passage_text' in _dict:
args['passage_text'] = _dict.get('passage_text')
if 'start_offset' in _dict:
args['start_offset'] = _dict.get('start_offset')
if 'end_offset' in _dict:
args['end_offset'] = _dict.get('end_offset')
if 'field' in _dict:
args['field'] = _dict.get('field')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryResultPassage object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'passage_text') and self.passage_text is not None:
_dict['passage_text'] = self.passage_text
if hasattr(self, 'start_offset') and self.start_offset is not None:
_dict['start_offset'] = self.start_offset
if hasattr(self, 'end_offset') and self.end_offset is not None:
_dict['end_offset'] = self.end_offset
if hasattr(self, 'field') and self.field is not None:
_dict['field'] = self.field
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryResultPassage object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryResultPassage') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryResultPassage') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QuerySuggestedRefinement():
"""
A suggested additional query term or terms user to filter results.
:attr str text: (optional) The text used to filter.
"""
def __init__(self, *, text: str = None) -> None:
"""
Initialize a QuerySuggestedRefinement object.
:param str text: (optional) The text used to filter.
"""
self.text = text
@classmethod
def from_dict(cls, _dict: Dict) -> 'QuerySuggestedRefinement':
"""Initialize a QuerySuggestedRefinement object from a json dictionary."""
args = {}
valid_keys = ['text']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QuerySuggestedRefinement: '
+ ', '.join(bad_keys))
if 'text' in _dict:
args['text'] = _dict.get('text')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QuerySuggestedRefinement object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QuerySuggestedRefinement object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QuerySuggestedRefinement') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QuerySuggestedRefinement') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryTableResult():
"""
A tables whose content or context match a search query.
:attr str table_id: (optional) The identifier for the retrieved table.
:attr str source_document_id: (optional) The identifier of the document the
table was retrieved from.
:attr str collection_id: (optional) The identifier of the collection the table
was retrieved from.
:attr str table_html: (optional) HTML snippet of the table info.
:attr int table_html_offset: (optional) The offset of the table html snippet in
the original document html.
:attr TableResultTable table: (optional) Full table object retrieved from Table
Understanding Enrichment.
"""
def __init__(self,
*,
table_id: str = None,
source_document_id: str = None,
collection_id: str = None,
table_html: str = None,
table_html_offset: int = None,
table: 'TableResultTable' = None) -> None:
"""
Initialize a QueryTableResult object.
:param str table_id: (optional) The identifier for the retrieved table.
:param str source_document_id: (optional) The identifier of the document
the table was retrieved from.
:param str collection_id: (optional) The identifier of the collection the
table was retrieved from.
:param str table_html: (optional) HTML snippet of the table info.
:param int table_html_offset: (optional) The offset of the table html
snippet in the original document html.
:param TableResultTable table: (optional) Full table object retrieved from
Table Understanding Enrichment.
"""
self.table_id = table_id
self.source_document_id = source_document_id
self.collection_id = collection_id
self.table_html = table_html
self.table_html_offset = table_html_offset
self.table = table
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryTableResult':
"""Initialize a QueryTableResult object from a json dictionary."""
args = {}
valid_keys = [
'table_id', 'source_document_id', 'collection_id', 'table_html',
'table_html_offset', 'table'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryTableResult: '
+ ', '.join(bad_keys))
if 'table_id' in _dict:
args['table_id'] = _dict.get('table_id')
if 'source_document_id' in _dict:
args['source_document_id'] = _dict.get('source_document_id')
if 'collection_id' in _dict:
args['collection_id'] = _dict.get('collection_id')
if 'table_html' in _dict:
args['table_html'] = _dict.get('table_html')
if 'table_html_offset' in _dict:
args['table_html_offset'] = _dict.get('table_html_offset')
if 'table' in _dict:
args['table'] = TableResultTable._from_dict(_dict.get('table'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryTableResult object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'table_id') and self.table_id is not None:
_dict['table_id'] = self.table_id
if hasattr(
self,
'source_document_id') and self.source_document_id is not None:
_dict['source_document_id'] = self.source_document_id
if hasattr(self, 'collection_id') and self.collection_id is not None:
_dict['collection_id'] = self.collection_id
if hasattr(self, 'table_html') and self.table_html is not None:
_dict['table_html'] = self.table_html
if hasattr(self,
'table_html_offset') and self.table_html_offset is not None:
_dict['table_html_offset'] = self.table_html_offset
if hasattr(self, 'table') and self.table is not None:
_dict['table'] = self.table._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryTableResult object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryTableResult') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryTableResult') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryTermAggregationResult():
"""
Top value result for the term aggregation.
:attr str key: Value of the field with a non-zero frequency in the document set.
:attr int matching_results: Number of documents containing the 'key'.
:attr List[QueryAggregation] aggregations: (optional) An array of sub
aggregations.
"""
def __init__(self,
key: str,
matching_results: int,
*,
aggregations: List['QueryAggregation'] = None) -> None:
"""
Initialize a QueryTermAggregationResult object.
:param str key: Value of the field with a non-zero frequency in the
document set.
:param int matching_results: Number of documents containing the 'key'.
:param List[QueryAggregation] aggregations: (optional) An array of sub
aggregations.
"""
self.key = key
self.matching_results = matching_results
self.aggregations = aggregations
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryTermAggregationResult':
"""Initialize a QueryTermAggregationResult object from a json dictionary."""
args = {}
valid_keys = ['key', 'matching_results', 'aggregations']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryTermAggregationResult: '
+ ', '.join(bad_keys))
if 'key' in _dict:
args['key'] = _dict.get('key')
else:
raise ValueError(
'Required property \'key\' not present in QueryTermAggregationResult JSON'
)
if 'matching_results' in _dict:
args['matching_results'] = _dict.get('matching_results')
else:
raise ValueError(
'Required property \'matching_results\' not present in QueryTermAggregationResult JSON'
)
if 'aggregations' in _dict:
args['aggregations'] = [
QueryAggregation._from_dict(x)
for x in (_dict.get('aggregations'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryTermAggregationResult object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'key') and self.key is not None:
_dict['key'] = self.key
if hasattr(self,
'matching_results') and self.matching_results is not None:
_dict['matching_results'] = self.matching_results
if hasattr(self, 'aggregations') and self.aggregations is not None:
_dict['aggregations'] = [x._to_dict() for x in self.aggregations]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryTermAggregationResult object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryTermAggregationResult') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryTermAggregationResult') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryTimesliceAggregationResult():
"""
A timeslice interval segment.
:attr str key_as_string: String date value of the upper bound for the timeslice
interval in ISO-8601 format.
:attr int key: Numeric date value of the upper bound for the timeslice interval
in UNIX milliseconds since epoch.
:attr int matching_results: Number of documents with the specified key as the
upper bound.
:attr List[QueryAggregation] aggregations: (optional) An array of sub
aggregations.
"""
def __init__(self,
key_as_string: str,
key: int,
matching_results: int,
*,
aggregations: List['QueryAggregation'] = None) -> None:
"""
Initialize a QueryTimesliceAggregationResult object.
:param str key_as_string: String date value of the upper bound for the
timeslice interval in ISO-8601 format.
:param int key: Numeric date value of the upper bound for the timeslice
interval in UNIX milliseconds since epoch.
:param int matching_results: Number of documents with the specified key as
the upper bound.
:param List[QueryAggregation] aggregations: (optional) An array of sub
aggregations.
"""
self.key_as_string = key_as_string
self.key = key
self.matching_results = matching_results
self.aggregations = aggregations
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryTimesliceAggregationResult':
"""Initialize a QueryTimesliceAggregationResult object from a json dictionary."""
args = {}
valid_keys = [
'key_as_string', 'key', 'matching_results', 'aggregations'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryTimesliceAggregationResult: '
+ ', '.join(bad_keys))
if 'key_as_string' in _dict:
args['key_as_string'] = _dict.get('key_as_string')
else:
raise ValueError(
'Required property \'key_as_string\' not present in QueryTimesliceAggregationResult JSON'
)
if 'key' in _dict:
args['key'] = _dict.get('key')
else:
raise ValueError(
'Required property \'key\' not present in QueryTimesliceAggregationResult JSON'
)
if 'matching_results' in _dict:
args['matching_results'] = _dict.get('matching_results')
else:
raise ValueError(
'Required property \'matching_results\' not present in QueryTimesliceAggregationResult JSON'
)
if 'aggregations' in _dict:
args['aggregations'] = [
QueryAggregation._from_dict(x)
for x in (_dict.get('aggregations'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryTimesliceAggregationResult object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'key_as_string') and self.key_as_string is not None:
_dict['key_as_string'] = self.key_as_string
if hasattr(self, 'key') and self.key is not None:
_dict['key'] = self.key
if hasattr(self,
'matching_results') and self.matching_results is not None:
_dict['matching_results'] = self.matching_results
if hasattr(self, 'aggregations') and self.aggregations is not None:
_dict['aggregations'] = [x._to_dict() for x in self.aggregations]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryTimesliceAggregationResult object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryTimesliceAggregationResult') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryTimesliceAggregationResult') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryTopHitsAggregationResult():
"""
A query response containing the matching documents for the preceding aggregations.
:attr int matching_results: Number of matching results.
:attr List[dict] hits: (optional) An array of the document results.
"""
def __init__(self,
matching_results: int,
*,
hits: List[dict] = None) -> None:
"""
Initialize a QueryTopHitsAggregationResult object.
:param int matching_results: Number of matching results.
:param List[dict] hits: (optional) An array of the document results.
"""
self.matching_results = matching_results
self.hits = hits
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryTopHitsAggregationResult':
"""Initialize a QueryTopHitsAggregationResult object from a json dictionary."""
args = {}
valid_keys = ['matching_results', 'hits']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryTopHitsAggregationResult: '
+ ', '.join(bad_keys))
if 'matching_results' in _dict:
args['matching_results'] = _dict.get('matching_results')
else:
raise ValueError(
'Required property \'matching_results\' not present in QueryTopHitsAggregationResult JSON'
)
if 'hits' in _dict:
args['hits'] = _dict.get('hits')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryTopHitsAggregationResult object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'matching_results') and self.matching_results is not None:
_dict['matching_results'] = self.matching_results
if hasattr(self, 'hits') and self.hits is not None:
_dict['hits'] = self.hits
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryTopHitsAggregationResult object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryTopHitsAggregationResult') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryTopHitsAggregationResult') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class RetrievalDetails():
"""
An object contain retrieval type information.
:attr str document_retrieval_strategy: (optional) Identifies the document
retrieval strategy used for this query. `relevancy_training` indicates that the
results were returned using a relevancy trained model.
**Note**: In the event of trained collections being queried, but the trained
model is not used to return results, the **document_retrieval_strategy** will be
listed as `untrained`.
"""
def __init__(self, *, document_retrieval_strategy: str = None) -> None:
"""
Initialize a RetrievalDetails object.
:param str document_retrieval_strategy: (optional) Identifies the document
retrieval strategy used for this query. `relevancy_training` indicates that
the results were returned using a relevancy trained model.
**Note**: In the event of trained collections being queried, but the
trained model is not used to return results, the
**document_retrieval_strategy** will be listed as `untrained`.
"""
self.document_retrieval_strategy = document_retrieval_strategy
@classmethod
def from_dict(cls, _dict: Dict) -> 'RetrievalDetails':
"""Initialize a RetrievalDetails object from a json dictionary."""
args = {}
valid_keys = ['document_retrieval_strategy']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class RetrievalDetails: '
+ ', '.join(bad_keys))
if 'document_retrieval_strategy' in _dict:
args['document_retrieval_strategy'] = _dict.get(
'document_retrieval_strategy')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a RetrievalDetails object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_retrieval_strategy'
) and self.document_retrieval_strategy is not None:
_dict[
'document_retrieval_strategy'] = self.document_retrieval_strategy
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this RetrievalDetails object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'RetrievalDetails') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'RetrievalDetails') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DocumentRetrievalStrategyEnum(Enum):
"""
Identifies the document retrieval strategy used for this query.
`relevancy_training` indicates that the results were returned using a relevancy
trained model.
**Note**: In the event of trained collections being queried, but the trained
model is not used to return results, the **document_retrieval_strategy** will be
listed as `untrained`.
"""
UNTRAINED = "untrained"
RELEVANCY_TRAINING = "relevancy_training"
class TableBodyCells():
"""
Cells that are not table header, column header, or row header cells.
:attr str cell_id: (optional) The unique ID of the cell in the current table.
:attr TableElementLocation location: (optional) The numeric location of the
identified element in the document, represented with two integers labeled
`begin` and `end`.
:attr str text: (optional) The textual contents of this cell from the input
document without associated markup content.
:attr int row_index_begin: (optional) The `begin` index of this cell's `row`
location in the current table.
:attr int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:attr int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:attr int column_index_end: (optional) The `end` index of this cell's `column`
location in the current table.
:attr List[TableRowHeaderIds] row_header_ids: (optional) A list of table row
header ids.
:attr List[TableRowHeaderTexts] row_header_texts: (optional) A list of table row
header texts.
:attr List[TableRowHeaderTextsNormalized] row_header_texts_normalized:
(optional) A list of table row header texts normalized.
:attr List[TableColumnHeaderIds] column_header_ids: (optional) A list of table
column header ids.
:attr List[TableColumnHeaderTexts] column_header_texts: (optional) A list of
table column header texts.
:attr List[TableColumnHeaderTextsNormalized] column_header_texts_normalized:
(optional) A list of table column header texts normalized.
:attr List[DocumentAttribute] attributes: (optional) A list of document
attributes.
"""
def __init__(self,
*,
cell_id: str = None,
location: 'TableElementLocation' = None,
text: str = None,
row_index_begin: int = None,
row_index_end: int = None,
column_index_begin: int = None,
column_index_end: int = None,
row_header_ids: List['TableRowHeaderIds'] = None,
row_header_texts: List['TableRowHeaderTexts'] = None,
row_header_texts_normalized: List[
'TableRowHeaderTextsNormalized'] = None,
column_header_ids: List['TableColumnHeaderIds'] = None,
column_header_texts: List['TableColumnHeaderTexts'] = None,
column_header_texts_normalized: List[
'TableColumnHeaderTextsNormalized'] = None,
attributes: List['DocumentAttribute'] = None) -> None:
"""
Initialize a TableBodyCells object.
:param str cell_id: (optional) The unique ID of the cell in the current
table.
:param TableElementLocation location: (optional) The numeric location of
the identified element in the document, represented with two integers
labeled `begin` and `end`.
:param str text: (optional) The textual contents of this cell from the
input document without associated markup content.
:param int row_index_begin: (optional) The `begin` index of this cell's
`row` location in the current table.
:param int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:param int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:param int column_index_end: (optional) The `end` index of this cell's
`column` location in the current table.
:param List[TableRowHeaderIds] row_header_ids: (optional) A list of table
row header ids.
:param List[TableRowHeaderTexts] row_header_texts: (optional) A list of
table row header texts.
:param List[TableRowHeaderTextsNormalized] row_header_texts_normalized:
(optional) A list of table row header texts normalized.
:param List[TableColumnHeaderIds] column_header_ids: (optional) A list of
table column header ids.
:param List[TableColumnHeaderTexts] column_header_texts: (optional) A list
of table column header texts.
:param List[TableColumnHeaderTextsNormalized]
column_header_texts_normalized: (optional) A list of table column header
texts normalized.
:param List[DocumentAttribute] attributes: (optional) A list of document
attributes.
"""
self.cell_id = cell_id
self.location = location
self.text = text
self.row_index_begin = row_index_begin
self.row_index_end = row_index_end
self.column_index_begin = column_index_begin
self.column_index_end = column_index_end
self.row_header_ids = row_header_ids
self.row_header_texts = row_header_texts
self.row_header_texts_normalized = row_header_texts_normalized
self.column_header_ids = column_header_ids
self.column_header_texts = column_header_texts
self.column_header_texts_normalized = column_header_texts_normalized
self.attributes = attributes
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableBodyCells':
"""Initialize a TableBodyCells object from a json dictionary."""
args = {}
valid_keys = [
'cell_id', 'location', 'text', 'row_index_begin', 'row_index_end',
'column_index_begin', 'column_index_end', 'row_header_ids',
'row_header_texts', 'row_header_texts_normalized',
'column_header_ids', 'column_header_texts',
'column_header_texts_normalized', 'attributes'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableBodyCells: '
+ ', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = TableElementLocation._from_dict(
_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'row_index_begin' in _dict:
args['row_index_begin'] = _dict.get('row_index_begin')
if 'row_index_end' in _dict:
args['row_index_end'] = _dict.get('row_index_end')
if 'column_index_begin' in _dict:
args['column_index_begin'] = _dict.get('column_index_begin')
if 'column_index_end' in _dict:
args['column_index_end'] = _dict.get('column_index_end')
if 'row_header_ids' in _dict:
args['row_header_ids'] = [
TableRowHeaderIds._from_dict(x)
for x in (_dict.get('row_header_ids'))
]
if 'row_header_texts' in _dict:
args['row_header_texts'] = [
TableRowHeaderTexts._from_dict(x)
for x in (_dict.get('row_header_texts'))
]
if 'row_header_texts_normalized' in _dict:
args['row_header_texts_normalized'] = [
TableRowHeaderTextsNormalized._from_dict(x)
for x in (_dict.get('row_header_texts_normalized'))
]
if 'column_header_ids' in _dict:
args['column_header_ids'] = [
TableColumnHeaderIds._from_dict(x)
for x in (_dict.get('column_header_ids'))
]
if 'column_header_texts' in _dict:
args['column_header_texts'] = [
TableColumnHeaderTexts._from_dict(x)
for x in (_dict.get('column_header_texts'))
]
if 'column_header_texts_normalized' in _dict:
args['column_header_texts_normalized'] = [
TableColumnHeaderTextsNormalized._from_dict(x)
for x in (_dict.get('column_header_texts_normalized'))
]
if 'attributes' in _dict:
args['attributes'] = [
DocumentAttribute._from_dict(x)
for x in (_dict.get('attributes'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableBodyCells object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'row_index_begin') and self.row_index_begin is not None:
_dict['row_index_begin'] = self.row_index_begin
if hasattr(self, 'row_index_end') and self.row_index_end is not None:
_dict['row_index_end'] = self.row_index_end
if hasattr(
self,
'column_index_begin') and self.column_index_begin is not None:
_dict['column_index_begin'] = self.column_index_begin
if hasattr(self,
'column_index_end') and self.column_index_end is not None:
_dict['column_index_end'] = self.column_index_end
if hasattr(self, 'row_header_ids') and self.row_header_ids is not None:
_dict['row_header_ids'] = [
x._to_dict() for x in self.row_header_ids
]
if hasattr(self,
'row_header_texts') and self.row_header_texts is not None:
_dict['row_header_texts'] = [
x._to_dict() for x in self.row_header_texts
]
if hasattr(self, 'row_header_texts_normalized'
) and self.row_header_texts_normalized is not None:
_dict['row_header_texts_normalized'] = [
x._to_dict() for x in self.row_header_texts_normalized
]
if hasattr(self,
'column_header_ids') and self.column_header_ids is not None:
_dict['column_header_ids'] = [
x._to_dict() for x in self.column_header_ids
]
if hasattr(
self,
'column_header_texts') and self.column_header_texts is not None:
_dict['column_header_texts'] = [
x._to_dict() for x in self.column_header_texts
]
if hasattr(self, 'column_header_texts_normalized'
) and self.column_header_texts_normalized is not None:
_dict['column_header_texts_normalized'] = [
x._to_dict() for x in self.column_header_texts_normalized
]
if hasattr(self, 'attributes') and self.attributes is not None:
_dict['attributes'] = [x._to_dict() for x in self.attributes]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableBodyCells object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableBodyCells') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableBodyCells') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableCellKey():
"""
A key in a key-value pair.
:attr str cell_id: (optional) The unique ID of the key in the table.
:attr TableElementLocation location: (optional) The numeric location of the
identified element in the document, represented with two integers labeled
`begin` and `end`.
:attr str text: (optional) The text content of the table cell without HTML
markup.
"""
def __init__(self,
*,
cell_id: str = None,
location: 'TableElementLocation' = None,
text: str = None) -> None:
"""
Initialize a TableCellKey object.
:param str cell_id: (optional) The unique ID of the key in the table.
:param TableElementLocation location: (optional) The numeric location of
the identified element in the document, represented with two integers
labeled `begin` and `end`.
:param str text: (optional) The text content of the table cell without HTML
markup.
"""
self.cell_id = cell_id
self.location = location
self.text = text
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableCellKey':
"""Initialize a TableCellKey object from a json dictionary."""
args = {}
valid_keys = ['cell_id', 'location', 'text']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableCellKey: '
+ ', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = TableElementLocation._from_dict(
_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableCellKey object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableCellKey object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableCellKey') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableCellKey') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableCellValues():
"""
A value in a key-value pair.
:attr str cell_id: (optional) The unique ID of the value in the table.
:attr TableElementLocation location: (optional) The numeric location of the
identified element in the document, represented with two integers labeled
`begin` and `end`.
:attr str text: (optional) The text content of the table cell without HTML
markup.
"""
def __init__(self,
*,
cell_id: str = None,
location: 'TableElementLocation' = None,
text: str = None) -> None:
"""
Initialize a TableCellValues object.
:param str cell_id: (optional) The unique ID of the value in the table.
:param TableElementLocation location: (optional) The numeric location of
the identified element in the document, represented with two integers
labeled `begin` and `end`.
:param str text: (optional) The text content of the table cell without HTML
markup.
"""
self.cell_id = cell_id
self.location = location
self.text = text
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableCellValues':
"""Initialize a TableCellValues object from a json dictionary."""
args = {}
valid_keys = ['cell_id', 'location', 'text']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableCellValues: '
+ ', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = TableElementLocation._from_dict(
_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableCellValues object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableCellValues object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableCellValues') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableCellValues') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableColumnHeaderIds():
"""
An array of values, each being the `id` value of a column header that is applicable to
the current cell.
:attr str id: (optional) The `id` value of a column header.
"""
def __init__(self, *, id: str = None) -> None:
"""
Initialize a TableColumnHeaderIds object.
:param str id: (optional) The `id` value of a column header.
"""
self.id = id
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableColumnHeaderIds':
"""Initialize a TableColumnHeaderIds object from a json dictionary."""
args = {}
valid_keys = ['id']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableColumnHeaderIds: '
+ ', '.join(bad_keys))
if 'id' in _dict:
args['id'] = _dict.get('id')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableColumnHeaderIds object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableColumnHeaderIds object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableColumnHeaderIds') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableColumnHeaderIds') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableColumnHeaderTexts():
"""
An array of values, each being the `text` value of a column header that is applicable
to the current cell.
:attr str text: (optional) The `text` value of a column header.
"""
def __init__(self, *, text: str = None) -> None:
"""
Initialize a TableColumnHeaderTexts object.
:param str text: (optional) The `text` value of a column header.
"""
self.text = text
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableColumnHeaderTexts':
"""Initialize a TableColumnHeaderTexts object from a json dictionary."""
args = {}
valid_keys = ['text']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableColumnHeaderTexts: '
+ ', '.join(bad_keys))
if 'text' in _dict:
args['text'] = _dict.get('text')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableColumnHeaderTexts object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableColumnHeaderTexts object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableColumnHeaderTexts') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableColumnHeaderTexts') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableColumnHeaderTextsNormalized():
"""
If you provide customization input, the normalized version of the column header texts
according to the customization; otherwise, the same value as `column_header_texts`.
:attr str text_normalized: (optional) The normalized version of a column header
text.
"""
def __init__(self, *, text_normalized: str = None) -> None:
"""
Initialize a TableColumnHeaderTextsNormalized object.
:param str text_normalized: (optional) The normalized version of a column
header text.
"""
self.text_normalized = text_normalized
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableColumnHeaderTextsNormalized':
"""Initialize a TableColumnHeaderTextsNormalized object from a json dictionary."""
args = {}
valid_keys = ['text_normalized']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableColumnHeaderTextsNormalized: '
+ ', '.join(bad_keys))
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableColumnHeaderTextsNormalized object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableColumnHeaderTextsNormalized object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableColumnHeaderTextsNormalized') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableColumnHeaderTextsNormalized') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableColumnHeaders():
"""
Column-level cells, each applicable as a header to other cells in the same column as
itself, of the current table.
:attr str cell_id: (optional) The unique ID of the cell in the current table.
:attr object location: (optional) The location of the column header cell in the
current table as defined by its `begin` and `end` offsets, respectfully, in the
input document.
:attr str text: (optional) The textual contents of this cell from the input
document without associated markup content.
:attr str text_normalized: (optional) If you provide customization input, the
normalized version of the cell text according to the customization; otherwise,
the same value as `text`.
:attr int row_index_begin: (optional) The `begin` index of this cell's `row`
location in the current table.
:attr int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:attr int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:attr int column_index_end: (optional) The `end` index of this cell's `column`
location in the current table.
"""
def __init__(self,
*,
cell_id: str = None,
location: object = None,
text: str = None,
text_normalized: str = None,
row_index_begin: int = None,
row_index_end: int = None,
column_index_begin: int = None,
column_index_end: int = None) -> None:
"""
Initialize a TableColumnHeaders object.
:param str cell_id: (optional) The unique ID of the cell in the current
table.
:param object location: (optional) The location of the column header cell
in the current table as defined by its `begin` and `end` offsets,
respectfully, in the input document.
:param str text: (optional) The textual contents of this cell from the
input document without associated markup content.
:param str text_normalized: (optional) If you provide customization input,
the normalized version of the cell text according to the customization;
otherwise, the same value as `text`.
:param int row_index_begin: (optional) The `begin` index of this cell's
`row` location in the current table.
:param int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:param int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:param int column_index_end: (optional) The `end` index of this cell's
`column` location in the current table.
"""
self.cell_id = cell_id
self.location = location
self.text = text
self.text_normalized = text_normalized
self.row_index_begin = row_index_begin
self.row_index_end = row_index_end
self.column_index_begin = column_index_begin
self.column_index_end = column_index_end
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableColumnHeaders':
"""Initialize a TableColumnHeaders object from a json dictionary."""
args = {}
valid_keys = [
'cell_id', 'location', 'text', 'text_normalized', 'row_index_begin',
'row_index_end', 'column_index_begin', 'column_index_end'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableColumnHeaders: '
+ ', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = _dict.get('location')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'row_index_begin' in _dict:
args['row_index_begin'] = _dict.get('row_index_begin')
if 'row_index_end' in _dict:
args['row_index_end'] = _dict.get('row_index_end')
if 'column_index_begin' in _dict:
args['column_index_begin'] = _dict.get('column_index_begin')
if 'column_index_end' in _dict:
args['column_index_end'] = _dict.get('column_index_end')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableColumnHeaders object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self,
'row_index_begin') and self.row_index_begin is not None:
_dict['row_index_begin'] = self.row_index_begin
if hasattr(self, 'row_index_end') and self.row_index_end is not None:
_dict['row_index_end'] = self.row_index_end
if hasattr(
self,
'column_index_begin') and self.column_index_begin is not None:
_dict['column_index_begin'] = self.column_index_begin
if hasattr(self,
'column_index_end') and self.column_index_end is not None:
_dict['column_index_end'] = self.column_index_end
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableColumnHeaders object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableColumnHeaders') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableColumnHeaders') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableElementLocation():
"""
The numeric location of the identified element in the document, represented with two
integers labeled `begin` and `end`.
:attr int begin: The element's `begin` index.
:attr int end: The element's `end` index.
"""
def __init__(self, begin: int, end: int) -> None:
"""
Initialize a TableElementLocation object.
:param int begin: The element's `begin` index.
:param int end: The element's `end` index.
"""
self.begin = begin
self.end = end
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableElementLocation':
"""Initialize a TableElementLocation object from a json dictionary."""
args = {}
valid_keys = ['begin', 'end']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableElementLocation: '
+ ', '.join(bad_keys))
if 'begin' in _dict:
args['begin'] = _dict.get('begin')
else:
raise ValueError(
'Required property \'begin\' not present in TableElementLocation JSON'
)
if 'end' in _dict:
args['end'] = _dict.get('end')
else:
raise ValueError(
'Required property \'end\' not present in TableElementLocation JSON'
)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableElementLocation object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'begin') and self.begin is not None:
_dict['begin'] = self.begin
if hasattr(self, 'end') and self.end is not None:
_dict['end'] = self.end
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableElementLocation object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableElementLocation') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableElementLocation') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableHeaders():
"""
The contents of the current table's header.
:attr str cell_id: (optional) The unique ID of the cell in the current table.
:attr object location: (optional) The location of the table header cell in the
current table as defined by its `begin` and `end` offsets, respectfully, in the
input document.
:attr str text: (optional) The textual contents of the cell from the input
document without associated markup content.
:attr int row_index_begin: (optional) The `begin` index of this cell's `row`
location in the current table.
:attr int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:attr int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:attr int column_index_end: (optional) The `end` index of this cell's `column`
location in the current table.
"""
def __init__(self,
*,
cell_id: str = None,
location: object = None,
text: str = None,
row_index_begin: int = None,
row_index_end: int = None,
column_index_begin: int = None,
column_index_end: int = None) -> None:
"""
Initialize a TableHeaders object.
:param str cell_id: (optional) The unique ID of the cell in the current
table.
:param object location: (optional) The location of the table header cell in
the current table as defined by its `begin` and `end` offsets,
respectfully, in the input document.
:param str text: (optional) The textual contents of the cell from the input
document without associated markup content.
:param int row_index_begin: (optional) The `begin` index of this cell's
`row` location in the current table.
:param int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:param int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:param int column_index_end: (optional) The `end` index of this cell's
`column` location in the current table.
"""
self.cell_id = cell_id
self.location = location
self.text = text
self.row_index_begin = row_index_begin
self.row_index_end = row_index_end
self.column_index_begin = column_index_begin
self.column_index_end = column_index_end
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableHeaders':
"""Initialize a TableHeaders object from a json dictionary."""
args = {}
valid_keys = [
'cell_id', 'location', 'text', 'row_index_begin', 'row_index_end',
'column_index_begin', 'column_index_end'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableHeaders: '
+ ', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = _dict.get('location')
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'row_index_begin' in _dict:
args['row_index_begin'] = _dict.get('row_index_begin')
if 'row_index_end' in _dict:
args['row_index_end'] = _dict.get('row_index_end')
if 'column_index_begin' in _dict:
args['column_index_begin'] = _dict.get('column_index_begin')
if 'column_index_end' in _dict:
args['column_index_end'] = _dict.get('column_index_end')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableHeaders object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'row_index_begin') and self.row_index_begin is not None:
_dict['row_index_begin'] = self.row_index_begin
if hasattr(self, 'row_index_end') and self.row_index_end is not None:
_dict['row_index_end'] = self.row_index_end
if hasattr(
self,
'column_index_begin') and self.column_index_begin is not None:
_dict['column_index_begin'] = self.column_index_begin
if hasattr(self,
'column_index_end') and self.column_index_end is not None:
_dict['column_index_end'] = self.column_index_end
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableHeaders object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableHeaders') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableHeaders') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableKeyValuePairs():
"""
Key-value pairs detected across cell boundaries.
:attr TableCellKey key: (optional) A key in a key-value pair.
:attr List[TableCellValues] value: (optional) A list of values in a key-value
pair.
"""
def __init__(self,
*,
key: 'TableCellKey' = None,
value: List['TableCellValues'] = None) -> None:
"""
Initialize a TableKeyValuePairs object.
:param TableCellKey key: (optional) A key in a key-value pair.
:param List[TableCellValues] value: (optional) A list of values in a
key-value pair.
"""
self.key = key
self.value = value
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableKeyValuePairs':
"""Initialize a TableKeyValuePairs object from a json dictionary."""
args = {}
valid_keys = ['key', 'value']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableKeyValuePairs: '
+ ', '.join(bad_keys))
if 'key' in _dict:
args['key'] = TableCellKey._from_dict(_dict.get('key'))
if 'value' in _dict:
args['value'] = [
TableCellValues._from_dict(x) for x in (_dict.get('value'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableKeyValuePairs object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'key') and self.key is not None:
_dict['key'] = self.key._to_dict()
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = [x._to_dict() for x in self.value]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableKeyValuePairs object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableKeyValuePairs') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableKeyValuePairs') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableResultTable():
"""
Full table object retrieved from Table Understanding Enrichment.
:attr TableElementLocation location: (optional) The numeric location of the
identified element in the document, represented with two integers labeled
`begin` and `end`.
:attr str text: (optional) The textual contents of the current table from the
input document without associated markup content.
:attr TableTextLocation section_title: (optional) Text and associated location
within a table.
:attr TableTextLocation title: (optional) Text and associated location within a
table.
:attr List[TableHeaders] table_headers: (optional) An array of table-level cells
that apply as headers to all the other cells in the current table.
:attr List[TableRowHeaders] row_headers: (optional) An array of row-level cells,
each applicable as a header to other cells in the same row as itself, of the
current table.
:attr List[TableColumnHeaders] column_headers: (optional) An array of
column-level cells, each applicable as a header to other cells in the same
column as itself, of the current table.
:attr List[TableKeyValuePairs] key_value_pairs: (optional) An array of key-value
pairs identified in the current table.
:attr List[TableBodyCells] body_cells: (optional) An array of cells that are
neither table header nor column header nor row header cells, of the current
table with corresponding row and column header associations.
:attr List[TableTextLocation] contexts: (optional) An array of lists of textual
entries across the document related to the current table being parsed.
"""
def __init__(self,
*,
location: 'TableElementLocation' = None,
text: str = None,
section_title: 'TableTextLocation' = None,
title: 'TableTextLocation' = None,
table_headers: List['TableHeaders'] = None,
row_headers: List['TableRowHeaders'] = None,
column_headers: List['TableColumnHeaders'] = None,
key_value_pairs: List['TableKeyValuePairs'] = None,
body_cells: List['TableBodyCells'] = None,
contexts: List['TableTextLocation'] = None) -> None:
"""
Initialize a TableResultTable object.
:param TableElementLocation location: (optional) The numeric location of
the identified element in the document, represented with two integers
labeled `begin` and `end`.
:param str text: (optional) The textual contents of the current table from
the input document without associated markup content.
:param TableTextLocation section_title: (optional) Text and associated
location within a table.
:param TableTextLocation title: (optional) Text and associated location
within a table.
:param List[TableHeaders] table_headers: (optional) An array of table-level
cells that apply as headers to all the other cells in the current table.
:param List[TableRowHeaders] row_headers: (optional) An array of row-level
cells, each applicable as a header to other cells in the same row as
itself, of the current table.
:param List[TableColumnHeaders] column_headers: (optional) An array of
column-level cells, each applicable as a header to other cells in the same
column as itself, of the current table.
:param List[TableKeyValuePairs] key_value_pairs: (optional) An array of
key-value pairs identified in the current table.
:param List[TableBodyCells] body_cells: (optional) An array of cells that
are neither table header nor column header nor row header cells, of the
current table with corresponding row and column header associations.
:param List[TableTextLocation] contexts: (optional) An array of lists of
textual entries across the document related to the current table being
parsed.
"""
self.location = location
self.text = text
self.section_title = section_title
self.title = title
self.table_headers = table_headers
self.row_headers = row_headers
self.column_headers = column_headers
self.key_value_pairs = key_value_pairs
self.body_cells = body_cells
self.contexts = contexts
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableResultTable':
"""Initialize a TableResultTable object from a json dictionary."""
args = {}
valid_keys = [
'location', 'text', 'section_title', 'title', 'table_headers',
'row_headers', 'column_headers', 'key_value_pairs', 'body_cells',
'contexts'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableResultTable: '
+ ', '.join(bad_keys))
if 'location' in _dict:
args['location'] = TableElementLocation._from_dict(
_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'section_title' in _dict:
args['section_title'] = TableTextLocation._from_dict(
_dict.get('section_title'))
if 'title' in _dict:
args['title'] = TableTextLocation._from_dict(_dict.get('title'))
if 'table_headers' in _dict:
args['table_headers'] = [
TableHeaders._from_dict(x) for x in (_dict.get('table_headers'))
]
if 'row_headers' in _dict:
args['row_headers'] = [
TableRowHeaders._from_dict(x)
for x in (_dict.get('row_headers'))
]
if 'column_headers' in _dict:
args['column_headers'] = [
TableColumnHeaders._from_dict(x)
for x in (_dict.get('column_headers'))
]
if 'key_value_pairs' in _dict:
args['key_value_pairs'] = [
TableKeyValuePairs._from_dict(x)
for x in (_dict.get('key_value_pairs'))
]
if 'body_cells' in _dict:
args['body_cells'] = [
TableBodyCells._from_dict(x) for x in (_dict.get('body_cells'))
]
if 'contexts' in _dict:
args['contexts'] = [
TableTextLocation._from_dict(x) for x in (_dict.get('contexts'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableResultTable object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'section_title') and self.section_title is not None:
_dict['section_title'] = self.section_title._to_dict()
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title._to_dict()
if hasattr(self, 'table_headers') and self.table_headers is not None:
_dict['table_headers'] = [x._to_dict() for x in self.table_headers]
if hasattr(self, 'row_headers') and self.row_headers is not None:
_dict['row_headers'] = [x._to_dict() for x in self.row_headers]
if hasattr(self, 'column_headers') and self.column_headers is not None:
_dict['column_headers'] = [
x._to_dict() for x in self.column_headers
]
if hasattr(self,
'key_value_pairs') and self.key_value_pairs is not None:
_dict['key_value_pairs'] = [
x._to_dict() for x in self.key_value_pairs
]
if hasattr(self, 'body_cells') and self.body_cells is not None:
_dict['body_cells'] = [x._to_dict() for x in self.body_cells]
if hasattr(self, 'contexts') and self.contexts is not None:
_dict['contexts'] = [x._to_dict() for x in self.contexts]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableResultTable object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableResultTable') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableResultTable') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableRowHeaderIds():
"""
An array of values, each being the `id` value of a row header that is applicable to
this body cell.
:attr str id: (optional) The `id` values of a row header.
"""
def __init__(self, *, id: str = None) -> None:
"""
Initialize a TableRowHeaderIds object.
:param str id: (optional) The `id` values of a row header.
"""
self.id = id
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableRowHeaderIds':
"""Initialize a TableRowHeaderIds object from a json dictionary."""
args = {}
valid_keys = ['id']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableRowHeaderIds: '
+ ', '.join(bad_keys))
if 'id' in _dict:
args['id'] = _dict.get('id')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableRowHeaderIds object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableRowHeaderIds object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableRowHeaderIds') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableRowHeaderIds') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableRowHeaderTexts():
"""
An array of values, each being the `text` value of a row header that is applicable to
this body cell.
:attr str text: (optional) The `text` value of a row header.
"""
def __init__(self, *, text: str = None) -> None:
"""
Initialize a TableRowHeaderTexts object.
:param str text: (optional) The `text` value of a row header.
"""
self.text = text
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableRowHeaderTexts':
"""Initialize a TableRowHeaderTexts object from a json dictionary."""
args = {}
valid_keys = ['text']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableRowHeaderTexts: '
+ ', '.join(bad_keys))
if 'text' in _dict:
args['text'] = _dict.get('text')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableRowHeaderTexts object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableRowHeaderTexts object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableRowHeaderTexts') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableRowHeaderTexts') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableRowHeaderTextsNormalized():
"""
If you provide customization input, the normalized version of the row header texts
according to the customization; otherwise, the same value as `row_header_texts`.
:attr str text_normalized: (optional) The normalized version of a row header
text.
"""
def __init__(self, *, text_normalized: str = None) -> None:
"""
Initialize a TableRowHeaderTextsNormalized object.
:param str text_normalized: (optional) The normalized version of a row
header text.
"""
self.text_normalized = text_normalized
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableRowHeaderTextsNormalized':
"""Initialize a TableRowHeaderTextsNormalized object from a json dictionary."""
args = {}
valid_keys = ['text_normalized']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableRowHeaderTextsNormalized: '
+ ', '.join(bad_keys))
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableRowHeaderTextsNormalized object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableRowHeaderTextsNormalized object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableRowHeaderTextsNormalized') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableRowHeaderTextsNormalized') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableRowHeaders():
"""
Row-level cells, each applicable as a header to other cells in the same row as itself,
of the current table.
:attr str cell_id: (optional) The unique ID of the cell in the current table.
:attr TableElementLocation location: (optional) The numeric location of the
identified element in the document, represented with two integers labeled
`begin` and `end`.
:attr str text: (optional) The textual contents of this cell from the input
document without associated markup content.
:attr str text_normalized: (optional) If you provide customization input, the
normalized version of the cell text according to the customization; otherwise,
the same value as `text`.
:attr int row_index_begin: (optional) The `begin` index of this cell's `row`
location in the current table.
:attr int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:attr int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:attr int column_index_end: (optional) The `end` index of this cell's `column`
location in the current table.
"""
def __init__(self,
*,
cell_id: str = None,
location: 'TableElementLocation' = None,
text: str = None,
text_normalized: str = None,
row_index_begin: int = None,
row_index_end: int = None,
column_index_begin: int = None,
column_index_end: int = None) -> None:
"""
Initialize a TableRowHeaders object.
:param str cell_id: (optional) The unique ID of the cell in the current
table.
:param TableElementLocation location: (optional) The numeric location of
the identified element in the document, represented with two integers
labeled `begin` and `end`.
:param str text: (optional) The textual contents of this cell from the
input document without associated markup content.
:param str text_normalized: (optional) If you provide customization input,
the normalized version of the cell text according to the customization;
otherwise, the same value as `text`.
:param int row_index_begin: (optional) The `begin` index of this cell's
`row` location in the current table.
:param int row_index_end: (optional) The `end` index of this cell's `row`
location in the current table.
:param int column_index_begin: (optional) The `begin` index of this cell's
`column` location in the current table.
:param int column_index_end: (optional) The `end` index of this cell's
`column` location in the current table.
"""
self.cell_id = cell_id
self.location = location
self.text = text
self.text_normalized = text_normalized
self.row_index_begin = row_index_begin
self.row_index_end = row_index_end
self.column_index_begin = column_index_begin
self.column_index_end = column_index_end
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableRowHeaders':
"""Initialize a TableRowHeaders object from a json dictionary."""
args = {}
valid_keys = [
'cell_id', 'location', 'text', 'text_normalized', 'row_index_begin',
'row_index_end', 'column_index_begin', 'column_index_end'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableRowHeaders: '
+ ', '.join(bad_keys))
if 'cell_id' in _dict:
args['cell_id'] = _dict.get('cell_id')
if 'location' in _dict:
args['location'] = TableElementLocation._from_dict(
_dict.get('location'))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'text_normalized' in _dict:
args['text_normalized'] = _dict.get('text_normalized')
if 'row_index_begin' in _dict:
args['row_index_begin'] = _dict.get('row_index_begin')
if 'row_index_end' in _dict:
args['row_index_end'] = _dict.get('row_index_end')
if 'column_index_begin' in _dict:
args['column_index_begin'] = _dict.get('column_index_begin')
if 'column_index_end' in _dict:
args['column_index_end'] = _dict.get('column_index_end')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableRowHeaders object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cell_id') and self.cell_id is not None:
_dict['cell_id'] = self.cell_id
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self,
'text_normalized') and self.text_normalized is not None:
_dict['text_normalized'] = self.text_normalized
if hasattr(self,
'row_index_begin') and self.row_index_begin is not None:
_dict['row_index_begin'] = self.row_index_begin
if hasattr(self, 'row_index_end') and self.row_index_end is not None:
_dict['row_index_end'] = self.row_index_end
if hasattr(
self,
'column_index_begin') and self.column_index_begin is not None:
_dict['column_index_begin'] = self.column_index_begin
if hasattr(self,
'column_index_end') and self.column_index_end is not None:
_dict['column_index_end'] = self.column_index_end
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableRowHeaders object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableRowHeaders') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableRowHeaders') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TableTextLocation():
"""
Text and associated location within a table.
:attr str text: (optional) The text retrieved.
:attr TableElementLocation location: (optional) The numeric location of the
identified element in the document, represented with two integers labeled
`begin` and `end`.
"""
def __init__(self,
*,
text: str = None,
location: 'TableElementLocation' = None) -> None:
"""
Initialize a TableTextLocation object.
:param str text: (optional) The text retrieved.
:param TableElementLocation location: (optional) The numeric location of
the identified element in the document, represented with two integers
labeled `begin` and `end`.
"""
self.text = text
self.location = location
@classmethod
def from_dict(cls, _dict: Dict) -> 'TableTextLocation':
"""Initialize a TableTextLocation object from a json dictionary."""
args = {}
valid_keys = ['text', 'location']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TableTextLocation: '
+ ', '.join(bad_keys))
if 'text' in _dict:
args['text'] = _dict.get('text')
if 'location' in _dict:
args['location'] = TableElementLocation._from_dict(
_dict.get('location'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TableTextLocation object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TableTextLocation object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TableTextLocation') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TableTextLocation') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TrainingExample():
"""
Object containing example response details for a training query.
:attr str document_id: The document ID associated with this training example.
:attr str collection_id: The collection ID associated with this training
example.
:attr int relevance: The relevance of the training example.
:attr datetime created: (optional) The date and time the example was created.
:attr datetime updated: (optional) The date and time the example was updated.
"""
def __init__(self,
document_id: str,
collection_id: str,
relevance: int,
*,
created: datetime = None,
updated: datetime = None) -> None:
"""
Initialize a TrainingExample object.
:param str document_id: The document ID associated with this training
example.
:param str collection_id: The collection ID associated with this training
example.
:param int relevance: The relevance of the training example.
:param datetime created: (optional) The date and time the example was
created.
:param datetime updated: (optional) The date and time the example was
updated.
"""
self.document_id = document_id
self.collection_id = collection_id
self.relevance = relevance
self.created = created
self.updated = updated
@classmethod
def from_dict(cls, _dict: Dict) -> 'TrainingExample':
"""Initialize a TrainingExample object from a json dictionary."""
args = {}
valid_keys = [
'document_id', 'collection_id', 'relevance', 'created', 'updated'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TrainingExample: '
+ ', '.join(bad_keys))
if 'document_id' in _dict:
args['document_id'] = _dict.get('document_id')
else:
raise ValueError(
'Required property \'document_id\' not present in TrainingExample JSON'
)
if 'collection_id' in _dict:
args['collection_id'] = _dict.get('collection_id')
else:
raise ValueError(
'Required property \'collection_id\' not present in TrainingExample JSON'
)
if 'relevance' in _dict:
args['relevance'] = _dict.get('relevance')
else:
raise ValueError(
'Required property \'relevance\' not present in TrainingExample JSON'
)
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'updated' in _dict:
args['updated'] = string_to_datetime(_dict.get('updated'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TrainingExample object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_id') and self.document_id is not None:
_dict['document_id'] = self.document_id
if hasattr(self, 'collection_id') and self.collection_id is not None:
_dict['collection_id'] = self.collection_id
if hasattr(self, 'relevance') and self.relevance is not None:
_dict['relevance'] = self.relevance
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TrainingExample object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TrainingExample') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TrainingExample') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TrainingQuery():
"""
Object containing training query details.
:attr str query_id: (optional) The query ID associated with the training query.
:attr str natural_language_query: The natural text query for the training query.
:attr str filter: (optional) The filter used on the collection before the
**natural_language_query** is applied.
:attr datetime created: (optional) The date and time the query was created.
:attr datetime updated: (optional) The date and time the query was updated.
:attr List[TrainingExample] examples: Array of training examples.
"""
def __init__(self,
natural_language_query: str,
examples: List['TrainingExample'],
*,
query_id: str = None,
filter: str = None,
created: datetime = None,
updated: datetime = None) -> None:
"""
Initialize a TrainingQuery object.
:param str natural_language_query: The natural text query for the training
query.
:param List[TrainingExample] examples: Array of training examples.
:param str query_id: (optional) The query ID associated with the training
query.
:param str filter: (optional) The filter used on the collection before the
**natural_language_query** is applied.
:param datetime created: (optional) The date and time the query was
created.
:param datetime updated: (optional) The date and time the query was
updated.
"""
self.query_id = query_id
self.natural_language_query = natural_language_query
self.filter = filter
self.created = created
self.updated = updated
self.examples = examples
@classmethod
def from_dict(cls, _dict: Dict) -> 'TrainingQuery':
"""Initialize a TrainingQuery object from a json dictionary."""
args = {}
valid_keys = [
'query_id', 'natural_language_query', 'filter', 'created',
'updated', 'examples'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TrainingQuery: '
+ ', '.join(bad_keys))
if 'query_id' in _dict:
args['query_id'] = _dict.get('query_id')
if 'natural_language_query' in _dict:
args['natural_language_query'] = _dict.get('natural_language_query')
else:
raise ValueError(
'Required property \'natural_language_query\' not present in TrainingQuery JSON'
)
if 'filter' in _dict:
args['filter'] = _dict.get('filter')
if 'created' in _dict:
args['created'] = string_to_datetime(_dict.get('created'))
if 'updated' in _dict:
args['updated'] = string_to_datetime(_dict.get('updated'))
if 'examples' in _dict:
args['examples'] = [
TrainingExample._from_dict(x) for x in (_dict.get('examples'))
]
else:
raise ValueError(
'Required property \'examples\' not present in TrainingQuery JSON'
)
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TrainingQuery object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'query_id') and self.query_id is not None:
_dict['query_id'] = self.query_id
if hasattr(self, 'natural_language_query'
) and self.natural_language_query is not None:
_dict['natural_language_query'] = self.natural_language_query
if hasattr(self, 'filter') and self.filter is not None:
_dict['filter'] = self.filter
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
if hasattr(self, 'examples') and self.examples is not None:
_dict['examples'] = [x._to_dict() for x in self.examples]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TrainingQuery object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TrainingQuery') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TrainingQuery') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TrainingQuerySet():
"""
Object specifying the training queries contained in the identified training set.
:attr List[TrainingQuery] queries: (optional) Array of training queries.
"""
def __init__(self, *, queries: List['TrainingQuery'] = None) -> None:
"""
Initialize a TrainingQuerySet object.
:param List[TrainingQuery] queries: (optional) Array of training queries.
"""
self.queries = queries
@classmethod
def from_dict(cls, _dict: Dict) -> 'TrainingQuerySet':
"""Initialize a TrainingQuerySet object from a json dictionary."""
args = {}
valid_keys = ['queries']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class TrainingQuerySet: '
+ ', '.join(bad_keys))
if 'queries' in _dict:
args['queries'] = [
TrainingQuery._from_dict(x) for x in (_dict.get('queries'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TrainingQuerySet object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'queries') and self.queries is not None:
_dict['queries'] = [x._to_dict() for x in self.queries]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TrainingQuerySet object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'TrainingQuerySet') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TrainingQuerySet') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryCalculationAggregation(QueryAggregation):
"""
Returns a scalar calculation across all documents for the field specified. Possible
calculations include min, max, sum, average, and unique_count.
:attr str field: The field to perform the calculation on.
:attr float value: (optional) The value of the calculation.
"""
def __init__(self, type: str, field: str, *, value: float = None) -> None:
"""
Initialize a QueryCalculationAggregation object.
:param str type: The type of aggregation command used. Options include:
term, histogram, timeslice, nested, filter, min, max, sum, average,
unique_count, and top_hits.
:param str field: The field to perform the calculation on.
:param float value: (optional) The value of the calculation.
"""
self.type = type
self.field = field
self.value = value
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryCalculationAggregation':
"""Initialize a QueryCalculationAggregation object from a json dictionary."""
args = {}
valid_keys = ['type', 'field', 'value']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryCalculationAggregation: '
+ ', '.join(bad_keys))
if 'type' in _dict:
args['type'] = _dict.get('type')
else:
raise ValueError(
'Required property \'type\' not present in QueryCalculationAggregation JSON'
)
if 'field' in _dict:
args['field'] = _dict.get('field')
else:
raise ValueError(
'Required property \'field\' not present in QueryCalculationAggregation JSON'
)
if 'value' in _dict:
args['value'] = _dict.get('value')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryCalculationAggregation object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
if hasattr(self, 'field') and self.field is not None:
_dict['field'] = self.field
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryCalculationAggregation object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryCalculationAggregation') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryCalculationAggregation') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryFilterAggregation(QueryAggregation):
"""
A modifier that will narrow down the document set of the sub aggregations it precedes.
:attr str match: The filter written in Discovery Query Language syntax applied
to the documents before sub aggregations are run.
:attr int matching_results: Number of documents matching the filter.
:attr List[QueryAggregation] aggregations: (optional) An array of sub
aggregations.
"""
def __init__(self,
type: str,
match: str,
matching_results: int,
*,
aggregations: List['QueryAggregation'] = None) -> None:
"""
Initialize a QueryFilterAggregation object.
:param str type: The type of aggregation command used. Options include:
term, histogram, timeslice, nested, filter, min, max, sum, average,
unique_count, and top_hits.
:param str match: The filter written in Discovery Query Language syntax
applied to the documents before sub aggregations are run.
:param int matching_results: Number of documents matching the filter.
:param List[QueryAggregation] aggregations: (optional) An array of sub
aggregations.
"""
self.type = type
self.match = match
self.matching_results = matching_results
self.aggregations = aggregations
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryFilterAggregation':
"""Initialize a QueryFilterAggregation object from a json dictionary."""
args = {}
valid_keys = ['type', 'match', 'matching_results', 'aggregations']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryFilterAggregation: '
+ ', '.join(bad_keys))
if 'type' in _dict:
args['type'] = _dict.get('type')
else:
raise ValueError(
'Required property \'type\' not present in QueryFilterAggregation JSON'
)
if 'match' in _dict:
args['match'] = _dict.get('match')
else:
raise ValueError(
'Required property \'match\' not present in QueryFilterAggregation JSON'
)
if 'matching_results' in _dict:
args['matching_results'] = _dict.get('matching_results')
else:
raise ValueError(
'Required property \'matching_results\' not present in QueryFilterAggregation JSON'
)
if 'aggregations' in _dict:
args['aggregations'] = [
QueryAggregation._from_dict(x)
for x in (_dict.get('aggregations'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryFilterAggregation object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
if hasattr(self, 'match') and self.match is not None:
_dict['match'] = self.match
if hasattr(self,
'matching_results') and self.matching_results is not None:
_dict['matching_results'] = self.matching_results
if hasattr(self, 'aggregations') and self.aggregations is not None:
_dict['aggregations'] = [x._to_dict() for x in self.aggregations]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryFilterAggregation object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryFilterAggregation') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryFilterAggregation') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryHistogramAggregation(QueryAggregation):
"""
Numeric interval segments to categorize documents by using field values from a single
numeric field to describe the category.
:attr str field: The numeric field name used to create the histogram.
:attr int interval: The size of the sections the results are split into.
:attr str name: (optional) Identifier specified in the query request of this
aggregation.
:attr List[QueryHistogramAggregationResult] results: (optional) Array of numeric
intervals.
"""
def __init__(
self,
type: str,
field: str,
interval: int,
*,
name: str = None,
results: List['QueryHistogramAggregationResult'] = None) -> None:
"""
Initialize a QueryHistogramAggregation object.
:param str type: The type of aggregation command used. Options include:
term, histogram, timeslice, nested, filter, min, max, sum, average,
unique_count, and top_hits.
:param str field: The numeric field name used to create the histogram.
:param int interval: The size of the sections the results are split into.
:param str name: (optional) Identifier specified in the query request of
this aggregation.
:param List[QueryHistogramAggregationResult] results: (optional) Array of
numeric intervals.
"""
self.type = type
self.field = field
self.interval = interval
self.name = name
self.results = results
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryHistogramAggregation':
"""Initialize a QueryHistogramAggregation object from a json dictionary."""
args = {}
valid_keys = ['type', 'field', 'interval', 'name', 'results']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryHistogramAggregation: '
+ ', '.join(bad_keys))
if 'type' in _dict:
args['type'] = _dict.get('type')
else:
raise ValueError(
'Required property \'type\' not present in QueryHistogramAggregation JSON'
)
if 'field' in _dict:
args['field'] = _dict.get('field')
else:
raise ValueError(
'Required property \'field\' not present in QueryHistogramAggregation JSON'
)
if 'interval' in _dict:
args['interval'] = _dict.get('interval')
else:
raise ValueError(
'Required property \'interval\' not present in QueryHistogramAggregation JSON'
)
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'results' in _dict:
args['results'] = [
QueryHistogramAggregationResult._from_dict(x)
for x in (_dict.get('results'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryHistogramAggregation object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
if hasattr(self, 'field') and self.field is not None:
_dict['field'] = self.field
if hasattr(self, 'interval') and self.interval is not None:
_dict['interval'] = self.interval
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'results') and self.results is not None:
_dict['results'] = [x._to_dict() for x in self.results]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryHistogramAggregation object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryHistogramAggregation') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryHistogramAggregation') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryNestedAggregation(QueryAggregation):
"""
A restriction that alter the document set used for sub aggregations it precedes to
nested documents found in the field specified.
:attr str path: The path to the document field to scope sub aggregations to.
:attr int matching_results: Number of nested documents found in the specified
field.
:attr List[QueryAggregation] aggregations: (optional) An array of sub
aggregations.
"""
def __init__(self,
type: str,
path: str,
matching_results: int,
*,
aggregations: List['QueryAggregation'] = None) -> None:
"""
Initialize a QueryNestedAggregation object.
:param str type: The type of aggregation command used. Options include:
term, histogram, timeslice, nested, filter, min, max, sum, average,
unique_count, and top_hits.
:param str path: The path to the document field to scope sub aggregations
to.
:param int matching_results: Number of nested documents found in the
specified field.
:param List[QueryAggregation] aggregations: (optional) An array of sub
aggregations.
"""
self.type = type
self.path = path
self.matching_results = matching_results
self.aggregations = aggregations
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryNestedAggregation':
"""Initialize a QueryNestedAggregation object from a json dictionary."""
args = {}
valid_keys = ['type', 'path', 'matching_results', 'aggregations']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryNestedAggregation: '
+ ', '.join(bad_keys))
if 'type' in _dict:
args['type'] = _dict.get('type')
else:
raise ValueError(
'Required property \'type\' not present in QueryNestedAggregation JSON'
)
if 'path' in _dict:
args['path'] = _dict.get('path')
else:
raise ValueError(
'Required property \'path\' not present in QueryNestedAggregation JSON'
)
if 'matching_results' in _dict:
args['matching_results'] = _dict.get('matching_results')
else:
raise ValueError(
'Required property \'matching_results\' not present in QueryNestedAggregation JSON'
)
if 'aggregations' in _dict:
args['aggregations'] = [
QueryAggregation._from_dict(x)
for x in (_dict.get('aggregations'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryNestedAggregation object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
if hasattr(self, 'path') and self.path is not None:
_dict['path'] = self.path
if hasattr(self,
'matching_results') and self.matching_results is not None:
_dict['matching_results'] = self.matching_results
if hasattr(self, 'aggregations') and self.aggregations is not None:
_dict['aggregations'] = [x._to_dict() for x in self.aggregations]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryNestedAggregation object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryNestedAggregation') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryNestedAggregation') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryTermAggregation(QueryAggregation):
"""
Returns the top values for the field specified.
:attr str field: The field in the document used to generate top values from.
:attr int count: (optional) The number of top values returned.
:attr str name: (optional) Identifier specified in the query request of this
aggregation.
:attr List[QueryTermAggregationResult] results: (optional) Array of top values
for the field.
"""
def __init__(self,
type: str,
field: str,
*,
count: int = None,
name: str = None,
results: List['QueryTermAggregationResult'] = None) -> None:
"""
Initialize a QueryTermAggregation object.
:param str type: The type of aggregation command used. Options include:
term, histogram, timeslice, nested, filter, min, max, sum, average,
unique_count, and top_hits.
:param str field: The field in the document used to generate top values
from.
:param int count: (optional) The number of top values returned.
:param str name: (optional) Identifier specified in the query request of
this aggregation.
:param List[QueryTermAggregationResult] results: (optional) Array of top
values for the field.
"""
self.type = type
self.field = field
self.count = count
self.name = name
self.results = results
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryTermAggregation':
"""Initialize a QueryTermAggregation object from a json dictionary."""
args = {}
valid_keys = ['type', 'field', 'count', 'name', 'results']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryTermAggregation: '
+ ', '.join(bad_keys))
if 'type' in _dict:
args['type'] = _dict.get('type')
else:
raise ValueError(
'Required property \'type\' not present in QueryTermAggregation JSON'
)
if 'field' in _dict:
args['field'] = _dict.get('field')
else:
raise ValueError(
'Required property \'field\' not present in QueryTermAggregation JSON'
)
if 'count' in _dict:
args['count'] = _dict.get('count')
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'results' in _dict:
args['results'] = [
QueryTermAggregationResult._from_dict(x)
for x in (_dict.get('results'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryTermAggregation object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
if hasattr(self, 'field') and self.field is not None:
_dict['field'] = self.field
if hasattr(self, 'count') and self.count is not None:
_dict['count'] = self.count
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'results') and self.results is not None:
_dict['results'] = [x._to_dict() for x in self.results]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryTermAggregation object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryTermAggregation') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryTermAggregation') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryTimesliceAggregation(QueryAggregation):
"""
A specialized histogram aggregation that uses dates to create interval segments.
:attr str field: The date field name used to create the timeslice.
:attr str interval: The date interval value. Valid values are seconds, minutes,
hours, days, weeks, and years.
:attr str name: (optional) Identifier specified in the query request of this
aggregation.
:attr List[QueryTimesliceAggregationResult] results: (optional) Array of
aggregation results.
"""
def __init__(
self,
type: str,
field: str,
interval: str,
*,
name: str = None,
results: List['QueryTimesliceAggregationResult'] = None) -> None:
"""
Initialize a QueryTimesliceAggregation object.
:param str type: The type of aggregation command used. Options include:
term, histogram, timeslice, nested, filter, min, max, sum, average,
unique_count, and top_hits.
:param str field: The date field name used to create the timeslice.
:param str interval: The date interval value. Valid values are seconds,
minutes, hours, days, weeks, and years.
:param str name: (optional) Identifier specified in the query request of
this aggregation.
:param List[QueryTimesliceAggregationResult] results: (optional) Array of
aggregation results.
"""
self.type = type
self.field = field
self.interval = interval
self.name = name
self.results = results
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryTimesliceAggregation':
"""Initialize a QueryTimesliceAggregation object from a json dictionary."""
args = {}
valid_keys = ['type', 'field', 'interval', 'name', 'results']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryTimesliceAggregation: '
+ ', '.join(bad_keys))
if 'type' in _dict:
args['type'] = _dict.get('type')
else:
raise ValueError(
'Required property \'type\' not present in QueryTimesliceAggregation JSON'
)
if 'field' in _dict:
args['field'] = _dict.get('field')
else:
raise ValueError(
'Required property \'field\' not present in QueryTimesliceAggregation JSON'
)
if 'interval' in _dict:
args['interval'] = _dict.get('interval')
else:
raise ValueError(
'Required property \'interval\' not present in QueryTimesliceAggregation JSON'
)
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'results' in _dict:
args['results'] = [
QueryTimesliceAggregationResult._from_dict(x)
for x in (_dict.get('results'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryTimesliceAggregation object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
if hasattr(self, 'field') and self.field is not None:
_dict['field'] = self.field
if hasattr(self, 'interval') and self.interval is not None:
_dict['interval'] = self.interval
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'results') and self.results is not None:
_dict['results'] = [x._to_dict() for x in self.results]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryTimesliceAggregation object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryTimesliceAggregation') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryTimesliceAggregation') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class QueryTopHitsAggregation(QueryAggregation):
"""
Returns the top documents ranked by the score of the query.
:attr int size: The number of documents to return.
:attr str name: (optional) Identifier specified in the query request of this
aggregation.
:attr QueryTopHitsAggregationResult hits: (optional)
"""
def __init__(self,
type: str,
size: int,
*,
name: str = None,
hits: 'QueryTopHitsAggregationResult' = None) -> None:
"""
Initialize a QueryTopHitsAggregation object.
:param str type: The type of aggregation command used. Options include:
term, histogram, timeslice, nested, filter, min, max, sum, average,
unique_count, and top_hits.
:param int size: The number of documents to return.
:param str name: (optional) Identifier specified in the query request of
this aggregation.
:param QueryTopHitsAggregationResult hits: (optional)
"""
self.type = type
self.size = size
self.name = name
self.hits = hits
@classmethod
def from_dict(cls, _dict: Dict) -> 'QueryTopHitsAggregation':
"""Initialize a QueryTopHitsAggregation object from a json dictionary."""
args = {}
valid_keys = ['type', 'size', 'name', 'hits']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class QueryTopHitsAggregation: '
+ ', '.join(bad_keys))
if 'type' in _dict:
args['type'] = _dict.get('type')
else:
raise ValueError(
'Required property \'type\' not present in QueryTopHitsAggregation JSON'
)
if 'size' in _dict:
args['size'] = _dict.get('size')
else:
raise ValueError(
'Required property \'size\' not present in QueryTopHitsAggregation JSON'
)
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'hits' in _dict:
args['hits'] = QueryTopHitsAggregationResult._from_dict(
_dict.get('hits'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a QueryTopHitsAggregation object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
if hasattr(self, 'size') and self.size is not None:
_dict['size'] = self.size
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'hits') and self.hits is not None:
_dict['hits'] = self.hits._to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this QueryTopHitsAggregation object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'QueryTopHitsAggregation') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'QueryTopHitsAggregation') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
| StarcoderdataPython |
11375766 | <gh_stars>1-10
from typing import List, Dict, Any, Callable, Union, Tuple, Optional
from multiprocessing.connection import Connection
from functools import partial
import multiprocessing
import textwrap
import gym
import numpy as np
import torch
import torch.nn as nn
from regym.rl_algorithms.replay_buffers import Storage
from regym.networks import Convolutional2DBody, FCBody, CategoricalActorCriticNet, SequentialBody, PolicyInferenceActorCriticNet
from regym.networks.preprocessing import (turn_into_single_element_batch,
batch_vector_observation,
parse_preprocessing_fn)
from regym.networks.utils import parse_gating_fn
from regym.util.data_augmentation import parse_data_augmentation_fn, apply_data_augmentation_to_experiences
from regym.rl_algorithms.agents import Agent, build_MCTS_Agent, MCTSAgent
from regym.rl_algorithms.expert_iteration import ExpertIterationAlgorithm
from regym.networks.servers.neural_net_server import NeuralNetServerHandler
from regym.networks.servers import request_prediction_from_server
class ExpertIterationAgent(Agent):
def __init__(self, algorithm: ExpertIterationAlgorithm,
name: str,
expert: MCTSAgent, apprentice: nn.Module,
use_apprentice_in_expert: bool,
use_agent_modelling: bool,
use_true_agent_models_in_mcts: bool,
use_learnt_opponent_models_in_mcts: bool,
request_observed_action: bool,
average_episode_returns_with_mcts_values: bool,
action_dim: int,
observation_dim: Tuple[int],
num_opponents: int,
temperature: float=1.,
drop_temperature_after_n_moves: int=np.inf,
state_preprocess_fn: Optional[Callable]=turn_into_single_element_batch,
server_state_preprocess_fn: Optional[Callable]=batch_vector_observation,
data_augmnentation_fn: Optional[Callable]=None,
use_cuda: bool=False):
'''
:param algorithm: ExpertIterationAlgorithm which will be fed
trajectories computed by this agent, in turn
this will update the parameters of :param: apprentice
:param name: String identifier for the agent
:param expert: Agent used to take actions in the environment
and create optimization targets for the apprentice
:param apprentice: Neural network used inside of the expert.
It will be used to compute priors for MCTS nodes
and values to backpropagate.
This neural net will be changed by :param: algorithm
:param use_apprentice_in_expert: whether to bias MCTS's selection
phase and expansion phase with the
apprentice. If False, this algorithm
is equivalent to DAGGER
:param use_agent_modelling: Whether to model other agent's actions as
an auxiliary task. As in DPIQN paper
:param use_true_agent_models_in_mcts: Whether to use opponent modelling inside of MCTS.
During training: creates a NeuralNetServerHandler
containing the policy for the other agent.
Requires the other agent's model.
During inference: uses opponent modelling head
of self.apprentice.
:param use_learnt_opponent_models_in_mcts: Whether to learnt opponent models, by querying
the head of apprentice (nn.Module) which is
trained to model opponent actions
(key from prediction dictionary 'policy_0')
:param request_observed_action: When using opponent modelling. Whether to request
to store one-hot encoded observed actions.
Otherwise the full distribution over actions is stored
:param average_episode_returns_with_mcts_values: Whether to average
the episode returns with Q values of MCTS' root
node, to serve as targets for the apprentice's
value head.
:param action_dim: Shape of actions, use to generate placeholder values
:param observation_dim: Shape of observations, use to generate placeholder values
:param num_opponents: Number of opponents that will be playing in an environment
:param state_preprocess_fn: Function to pre-process observations before they
are fed into the apprentice (an nn.Module)
:param server_state_preprocess_fn: Same as :param: state_preprocess_fn, but this fn
will be given to underlying NeuralNetServer
:param data_augmnentation_fn: Function used to augment experiences (create new ones)
Currently only implemented for handle_multiple_experiences.
:param use_cuda: Whether to load neural net to a cuda device for action predictions
'''
super().__init__(name=name, requires_environment_model=True)
self.use_cuda = use_cuda
self.requires_self_prediction = True
self.temperature: float = temperature
self.drop_temperature_after_n_moves: int = drop_temperature_after_n_moves
self.algorithm: ExpertIterationAlgorithm = algorithm
self.expert: Agent = expert
self.apprentice: nn.Module = apprentice
if self.use_cuda: self.apprentice = self.apprentice.cuda()
self.average_episode_returns_with_mcts_values = average_episode_returns_with_mcts_values
#### Algorithmic variations ####
self._use_true_agent_models_in_mcts = use_true_agent_models_in_mcts
self._use_learnt_opponent_models_in_mcts = use_learnt_opponent_models_in_mcts
if self.use_true_agent_models_in_mcts and (not self.use_learnt_opponent_models_in_mcts):
# We will need to set up a server for other agent's models
# Inside MCTS, we make the evaluation_fn point to the right
# server (with the other agent's models) on opponent's nodes.
self.requires_acess_to_other_agents = True
self.expert.requires_acess_to_other_agents = True
self.use_agent_modelling: bool = use_agent_modelling
self.num_opponents: int = num_opponents
if self.use_agent_modelling:
self.action_dim = action_dim
self.observation_dim = observation_dim
self.requires_opponents_prediction = True
# Key used to extract opponent policy from extra_info in handle_experienco
self.request_observed_action = request_observed_action
self.extra_info_key = 'a' if self.request_observed_action else 'probs'
# If FALSE, this algorithm is equivalent to DAgger
self.use_apprentice_in_expert: bool = use_apprentice_in_expert
if self.use_apprentice_in_expert:
self.multi_action_requires_server = True
self.embed_apprentice_in_expert()
####
# Replay buffer style storage
# Doesn't matter, should not really be using a Storage
# In tasks with single environments
self.storage: Storage = self.init_storage(size=100)
# In vectorized (multiple) environments
# Mapping from env_id to trajectory storage
self.storages: Dict[int, Storage] = {}
# Set state preprocessing functions
self.state_preprocess_fn = state_preprocess_fn
self.server_state_preprocess_fn = server_state_preprocess_fn
self.data_augmnentation_fn = data_augmnentation_fn
self.current_episode_lengths = []
@property
def use_true_agent_models_in_mcts(self):
return self._use_true_agent_models_in_mcts
@property
def use_learnt_opponent_models_in_mcts(self):
return self._use_learnt_opponent_models_in_mcts
@use_learnt_opponent_models_in_mcts.setter
def use_learnt_opponent_models_in_mcts(self, value: bool):
'''
If set, during MCTS search, in order to compute action priors for
opponent, the learnt opponent model from this agent' apprentice will be queried.
'''
if value:
self._use_true_agent_models_in_mcts = False
self.requires_acess_to_other_agents = False
self.expert.requires_acess_to_other_agents = False
self._use_learnt_opponent_models_in_mcts = value
self.embed_apprentice_in_expert()
@use_true_agent_models_in_mcts.setter
def use_true_agent_models_in_mcts(self, value: bool):
'''
If set, during MCTS search, in order to compute action priors for
opponent, the true opponent model will be queried.
'''
if value:
self._use_learnt_opponent_models_in_mcts = False
self._use_true_agent_models_in_mcts = value
self.requires_acess_to_other_agents = value
self.expert.requires_acess_to_other_agents = value
self.embed_apprentice_in_expert()
@Agent.num_actors.setter
def num_actors(self, n):
# We would like to just say: super().num_actors = n
# But python is really annoying when it comes to property setters
# See: https://bugs.python.org/issue14965
super(self.__class__, self.__class__).num_actors.fset(self, n)
self.expert.num_actors = n
self.current_episode_lengths = [0 for _ in range(self.num_actors)]
@Agent.summary_writer.setter
def summary_writer(self, summary_writer):
self._summary_writer = summary_writer
self.algorithm.summary_writer = summary_writer
def access_other_agents(self, other_agents_vector: List[Agent], task: 'Task', num_envs):
'''
TODO:
'''
assert self.use_true_agent_models_in_mcts
self.expert.access_other_agents(other_agents_vector, task, num_envs)
def embed_apprentice_in_expert(self):
# Non-parallel environments
# TODO: code different variations of brexit for non-parallel envs
self.expert.policy_fn = self.policy_fn
self.expert.evaluation_fn = self.evaluation_fn
# Parallel environments
if self.use_true_agent_models_in_mcts:
# For this agent's nodes: Use the apprentice's actor policy head
# For opponent nodes: Use true opponent model from opponent NeuralNetServerHandler
self.expert.server_based_policy_fn = \
self.__class__.opponent_aware_server_based_policy_fn
elif self.use_learnt_opponent_models_in_mcts:
# For this agent's nodes: Use the apprentice's actor policy head
# For opponent nodes: Use the apprentice's opponent model head
self.expert.server_based_policy_fn = \
self.__class__.learnt_opponent_model_aware_server_based_policy_fn
else:
# For this agent's nodes: Use the apprentice's actor policy head
# For opponent nodes: Use the apprentice's actor policy head
self.expert.server_based_policy_fn = partial(
request_prediction_from_server,
key='probs')
self.expert.server_based_evaluation_fn = partial(
request_prediction_from_server,
key='V')
def init_storage(self, size: int):
storage = Storage(size=size)
storage.add_key('normalized_child_visitations') # \pi_{MCTS} policy
if self.use_agent_modelling:
storage.add_key('opponent_s') # s
storage.add_key('opponent_policy') # \pi_{opponent}(.|s)
return storage
def reset_after_episodes(self):
''' Resets storages, in case they were half-full between training runs '''
self.storages: Dict[int, Storage] = {}
def handle_experience(self, o, a, r: float, succ_s, done=False,
extra_info: Dict[int, Dict[str, Any]] = {}):
super().handle_experience(o, a, r, succ_s, done)
expert_child_visitations = self.expert.current_prediction['child_visitations']
expert_state_value_prediction = self.expert.current_prediction['V']
self._handle_experience(o, a, r, succ_s, done, extra_info,
self.storage,
expert_child_visitations, expert_state_value_prediction)
def handle_multiple_experiences(self, experiences: List, env_ids: List[int]):
super().handle_multiple_experiences(experiences, env_ids)
if self.data_augmnentation_fn:
experiences, env_ids = apply_data_augmentation_to_experiences(experiences, env_ids, self.data_augmnentation_fn)
for (o, a, r, succ_o, done, extra_info), e_i in zip(experiences, env_ids):
self.storages[e_i] = self.storages.get(e_i, self.init_storage(size=100))
expert_child_visitations = extra_info['self']['child_visitations']
expert_state_value_prediction = extra_info['self']['V']
self._handle_experience(o, a, r, succ_o, done, extra_info,
e_i,
self.storages[e_i],
expert_child_visitations, expert_state_value_prediction)
def _handle_experience(self, o, a, r, succ_s, done: bool,
extra_info: Dict[int, Dict[str, Any]],
env_i: int,
storage: Storage,
expert_child_visitations: torch.FloatTensor,
expert_state_value_prediction: torch.FloatTensor):
self.current_episode_lengths[env_i] += 1
# Preprocessing all variables
o, r = self.process_environment_signals(o, r)
normalized_visits = self._normalize_visitations_with_temperature(
visitations=expert_child_visitations,
temperature=(self.temperature if self.current_episode_lengths[env_i] <= self.drop_temperature_after_n_moves else 0.1)
)
if self.use_agent_modelling: opponent_policy, opponen_obs = self.process_extra_info(extra_info)
else: opponent_policy, opponen_obs = {}, []
self.update_storage(storage, o, r, done,
opponent_policy=opponent_policy,
opponent_s=opponen_obs,
mcts_policy=normalized_visits,
expert_state_value_prediction=expert_state_value_prediction)
if done: self.handle_end_of_episode(storage, env_i)
self.expert.handle_experience(o, a, r, succ_s, done)
def _normalize_visitations_with_temperature(self,
visitations: torch.Tensor,
temperature: float):
'''
Artificially changing the value of :param: visitations
via :param: temperature:
- If temperature > 1: Increases entropy, encouraging exploration
- If temperature == 1: No changes
- If temperature < 1: Decreases entropy, encouraging exploitation
Dropping temperature to 0.1 is equivalent to greedily selecting most visited action
'''
expert_child_visitations_with_temperature = visitations ** (1/temperature)
normalized_visitations = expert_child_visitations_with_temperature / expert_child_visitations_with_temperature.sum()
return normalized_visitations.clamp(min=0., max=1.)
def handle_end_of_episode(self, storage: Storage, env_i: int):
self.current_episode_lengths[env_i] = 0 # We are now beginning a new episode
self.algorithm.add_episode_trajectory(storage)
storage.reset()
if self.algorithm.should_train():
self.algorithm.train(self.apprentice)
if self.use_apprentice_in_expert:
# We need to update the neural net in server used by MCTS
assert self.expert.server_handler, 'NeuralNetServerHandler missing while trying to update its neural net'
self.expert.server_handler.update_neural_net(self.apprentice)
def process_environment_signals(self, o, r: float):
processed_s = self.state_preprocess_fn(o)
processed_r = torch.Tensor([r]).float()
return processed_s, processed_r
def process_extra_info(self, extra_info: Dict[str, Any]) \
-> Tuple[torch.Tensor, torch.Tensor]:
# At most there is information about 1 agent
# Because opponent modelling is only supported
# For tasks with two agents
assert len(extra_info) <= 2, ('There can be at most information about 2 agents'
'\'Self\' and 1 other agent')
if len(extra_info) == 1: # If dictionary only contains info about this agent
# First argument to `torch.full` might create an issue (might break for non 1D actions)
processed_opponent_policy = torch.full((self.action_dim,), float('nan'))
# Adding batch dimension
processed_opponent_obs = torch.full(self.observation_dim, float('nan'))
processed_opponent_obs = self.state_preprocess_fn(processed_opponent_obs)
else:
opponent_index = list(filter(lambda key: key != 'self',
extra_info.keys()))[0] # Not super pretty
opponent_policy = extra_info[opponent_index][self.extra_info_key]
processed_opponent_obs = self.state_preprocess_fn(extra_info[opponent_index]['s'])
if self.extra_info_key == 'a': # Observing only single actions
processed_opponent_policy = nn.functional.one_hot(torch.LongTensor([opponent_policy]), num_classes=self.action_dim).squeeze(0)
elif self.extra_info_key == 'probs': # Observing full action distribution
processed_opponent_policy = torch.FloatTensor(opponent_policy)
else: raise RuntimeError(f'Could not process extra_info_key: {self.extra_info_key}')
return processed_opponent_policy, processed_opponent_obs
def update_storage(self, storage: Storage,
o: torch.Tensor,
r: torch.Tensor,
done: bool,
opponent_policy: torch.Tensor,
opponent_s: torch.Tensor,
mcts_policy: torch.Tensor,
expert_state_value_prediction: torch.Tensor):
storage.add({'normalized_child_visitations': mcts_policy,
's': o})
if self.use_agent_modelling:
storage.add({'opponent_policy': opponent_policy,
'opponent_s': opponent_s})
if self.average_episode_returns_with_mcts_values:
storage.add({'V': expert_state_value_prediction})
if done:
# Hendrik idea:
# Using MCTS value for current search might be better?
if self.average_episode_returns_with_mcts_values:
# Average all previously estimated values with episode return
storage.V = [(value + r) / 2 for value in storage.V]
else:
# Use episodic return for all points
for _ in range(len(storage.s)): storage.add({'V': r})
def model_based_take_action(self, env: Union[gym.Env, List[gym.Env]],
observation, player_index: int, multi_action: bool):
action = self.expert.model_based_take_action(env, observation,
player_index,
multi_action)
self.current_prediction = self.expert.current_prediction
return action
def model_free_take_action(self, state, legal_actions: List[int], multi_action: bool = False):
if self.training: raise RuntimeError('ExpertIterationAgent.model_free_take_action() cannot be called when training is True')
prediction = self.apprentice(self.state_preprocess_fn(state),
legal_actions=legal_actions)
return prediction['a']
def start_server(self, num_connections: int):
''' Explain that this is needed because different MCTS experts need to send requests '''
if num_connections == -1: num_connections = multiprocessing.cpu_count()
self.expert.server_handler = NeuralNetServerHandler(
num_connections=num_connections,
net=self.apprentice,
preprocess_fn=self.server_state_preprocess_fn
)
def close_server(self):
self.expert.close_server()
@torch.no_grad()
def policy_fn(self, observation, legal_actions, self_player_index: int = None, requested_player_index: int = None):
processed_obs = self.state_preprocess_fn(observation)
return self.apprentice(processed_obs, legal_actions=legal_actions)['probs'].squeeze(0).numpy()
@torch.no_grad()
def evaluation_fn(self, observation, legal_actions):
processed_obs = self.state_preprocess_fn(observation)
return self.apprentice(processed_obs, legal_actions=legal_actions)['V'].squeeze(0).numpy()
def clone(self):
raise NotImplementedError('Cloning ExpertIterationAgent not supported')
def __repr__(self):
basic_stats = f'Name: {self.name}\nRequires access to other agents: {self.requires_acess_to_other_agents}\n'
agent_stats = (f'Agent modelling: {self.use_agent_modelling}\n'
f'Use apprentice in expert: {self.use_apprentice_in_expert}\n'
f'Use agent mdelling in mcts: {self.use_true_agent_models_in_mcts}\n'
f'Use learnt opponent models in mcts: {self.use_learnt_opponent_models_in_mcts}\n'
f'Average episode returns with MCTS values: {self.average_episode_returns_with_mcts_values}\n'
f'State processing fn: {self.state_preprocess_fn}\n'
f'Server based State processing fn: {self.server_state_preprocess_fn}'
)
agent = f"Agent:\n{textwrap.indent(str(agent_stats), ' ')}\n"
expert = f"Expert:\n{textwrap.indent(str(self.expert), ' ')}\n"
algorithm = f"Algorithm:\n{textwrap.indent(str(self.algorithm), ' ')}"
return basic_stats + agent + expert + algorithm
#####
# This is a dirty HACK but oh well...
@staticmethod
def opponent_aware_server_based_policy_fn(observation,
legal_actions: List[int],
self_player_index: int,
requested_player_index: int,
connection: Connection,
opponent_connection: Connection) -> np.ndarray:
key = 'probs'
target_connection = connection if requested_player_index == self_player_index else opponent_connection
return request_prediction_from_server(
observation, legal_actions, target_connection, key)
@staticmethod
def learnt_opponent_model_aware_server_based_policy_fn(observation,
legal_actions: List[int],
self_player_index: int,
requested_player_index: int,
connection: Connection) -> np.ndarray:
key = 'probs' if requested_player_index == self_player_index else 'policy_0'
return request_prediction_from_server(
observation, legal_actions, connection, key)
#
####
def choose_feature_extractor(task, config: Dict):
if config['feature_extractor_arch'] == 'CNN':
model = Convolutional2DBody(input_shape=config['preprocessed_input_dimensions'],
channels=config['channels'],
kernel_sizes=config['kernel_sizes'],
paddings=config['paddings'],
strides=config['strides'],
final_feature_dim=config['final_feature_dim'],
residual_connections=config.get('residual_connections', []),
use_batch_normalization=config['use_batch_normalization'])
return model
else:
return ValueError('Only convolutional architectures are supported for ExpertIterationAgent')
def build_apprentice_model(task, config: Dict) -> nn.Module:
if task.action_type == 'Continuous':
raise ValueError(f'Only Discrete action type tasks are supported. Task {task.name} has a Continuous action_type')
feature_extractor = choose_feature_extractor(task, config)
# REFACTORING: maybe we can refactor into its own function, figure out
# figure out how to do proper separation of agent modell and not.
if config['use_agent_modelling']:
return build_apprentice_with_agent_modelling(
feature_extractor, task, config)
else:
return build_apprentice_no_agent_modelling(feature_extractor, config, task)
def build_apprentice_no_agent_modelling(feature_extractor, config, task) -> nn.Module:
default_embedding_size = [64, 64]
body = FCBody(
state_dim=feature_extractor.feature_dim,
hidden_units=config.get(
'post_feature_extractor_hidden_units',
default_embedding_size
)
)
feature_and_body = SequentialBody([feature_extractor, body])
critic_gate_fn = parse_gating_fn(config.get('critic_gate_fn', None))
return CategoricalActorCriticNet(state_dim=feature_and_body.feature_dim,
action_dim=task.action_dim,
critic_gate_fn=critic_gate_fn,
body=feature_and_body)
def build_apprentice_with_agent_modelling(feature_extractor, task, config):
default_embedding_size = [64]
policy_inference_body = FCBody(
feature_extractor.feature_dim,
hidden_units=config.get(
'post_feature_extractor_policy_inference_hidden_units',
default_embedding_size
)
)
actor_critic_body = FCBody(
feature_extractor.feature_dim,
hidden_units=config.get(
'post_feature_extractor_actor_critic_hidden_units',
default_embedding_size
)
)
# We model all agents but ourselves
num_agents_to_model = task.num_agents - 1
if not isinstance(task.action_dim, (int, float)):
raise ValueError('number of actions must be an integer (1D)')
return PolicyInferenceActorCriticNet(feature_extractor=feature_extractor,
num_policies=num_agents_to_model,
num_actions=task.action_dim,
policy_inference_body=policy_inference_body,
actor_critic_body=actor_critic_body)
def build_expert(task, config: Dict, expert_name: str) -> MCTSAgent:
selection_phase = 'puct' if config['use_apprentice_in_expert'] else 'ucb1'
exploration = f'exploration_factor_{selection_phase}'
expert_config = {
'budget': config['mcts_budget'],
'rollout_budget': config.get('mcts_rollout_budget', 0.),
'selection_phase': selection_phase,
'use_dirichlet': config.get('mcts_use_dirichlet', False),
exploration: config['mcts_exploration_factor'],
'dirichlet_alpha': config['mcts_dirichlet_alpha'],
'dirichlet_strength': config.get('mcts_dirichlet_strength', 1.)
}
return build_MCTS_Agent(task, expert_config, agent_name=expert_name)
def check_parameter_validity(task: 'Task', config: Dict[str, Any]):
''' Checks whether :param: config is compatible with :param: task '''
if config.get('use_agent_modelling', False) and task.num_agents != 2:
raise NotImplementedError('ExpertIterationAgent with agent modelling '
'is only supported with tasks with 2 agents '
'(one agent is this ExpertIterationAgent and the other '
f'will be the opponent). Given task {task.name} '
f'features {task.num_agents} agents.')
if (config.get('use_learnt_opponent_models_in_mcts', False) and
config.get('use_true_agent_models_in_mcts', False)):
raise ValueError("Both flags 'use_true_agent_models_in_mcts' and "
"'use_learnt_opponent_models_in_mcts' were set, which "
"is conflicting. One represents "
"using true opponent models inside of MCTS, the other "
"using learnt opponent models. Read build_ExpertIteration_Agent "
"documentation for further info."
)
def generate_preprocessing_functions(config) -> Tuple[Callable, Callable]:
if 'state_preprocessing_fn' in config:
state_preprocess_fn = parse_preprocessing_fn(
config['state_preprocessing_fn'])
else: state_preprocess_fn = None
if 'server_state_preprocessing_fn' in config:
server_state_preprocess_fn = parse_preprocessing_fn(
config['server_state_preprocessing_fn'])
else: server_state_preprocess_fn = None
return state_preprocess_fn, server_state_preprocess_fn
def build_ExpertIteration_Agent(task: 'Task',
config: Dict[str, Any],
agent_name: str = 'ExIt') -> ExpertIterationAgent:
'''
TODO: Check all params to make sure they are up to date
:param task: Environment specific configuration
:param agent_name: String identifier for the agent
:param config: Dict contain hyperparameters for the ExpertIterationAgent:
Higher level params:
- 'use_apprentice_in_expert': (Bool) whether to bias MCTS's selection
phase and expansion phase with the apprentice.
If False, Expert Iteration becomes the
DAGGER algorithm:
https://www.cs.cmu.edu/~sross1/publications/Ross-AIStats11-NoRegret.pdf
If True, PUCT will be used as a selection
strategy in MCTS, otherwise UCB1 will be used
- 'use_agent_modelling': (Bool) Whether to model other agent's actions
as an axuliary task. As in DPIQN paper
- 'use_true_agent_models_in_mcts': (Bool) Whether to use true agent models
to compute priors for MCTS nodes.
- 'use_learnt_opponent_models_in_mcts': (Bool) Whether to use learnt agent models
to compute priors for MCTS nodes.
- 'request_observed_action': Whether to observe one hot encoded actions, otherwise full policy will be requested.
Only meaningful when :param: use_agent_modelling is set.
- 'average_episode_returns_with_mcts_values': (Bool) Whether to average
the episode returns with Q values of MCTS' root
node, to serve as targets for the apprentice's
value head. Idea taken from: https://medium.com/oracledevs/lessons-from-alphazero-part-4-improving-the-training-target-6efba2e71628
MCTS params:
- 'mcts_budget': (Int) Number of iterations of the MCTS loop that will be carried
out before an action is selected.
- 'mcts_rollout_budget': (Int) Number of steps to simulate during
rollout_phase
- 'mcts_exploration_factor': (Float) PUCT exploration constant
- 'mcts_use_dirichlet': (Bool) Whether to add dirichlet noise to the
MCTS rootnode's action probabilities (see PUCT)
- 'mcts_dirichlet_alpha': Parameter of Dirichlet distribution
- 'temperature': Value by which MCTS child visitations will be
inversely exponentiated to (N^(1/temperature))
- 'drop_temperature_after_n_moves': Number of moves after which
temperature parameter will dropped
to a very small value (around 0.01)
(Collected) Dataset params:
- 'initial_max_generations_in_memory': (Int) Initial number of generations to be allowed
in replay buffer
- 'increase_memory_every_n_generations': (Int) Number of iterations to elapse before increasing dataset size.
- 'memory_increase_step': Number of extra generations to allow in the
algorithm's dataset everytime the dataset's
capacity increases, as dictated by
:param: increase_memory_every_n_generations
- 'final_max_generations_in_memory': (Int) Ceiling on the size of replay buffer
- 'num_epochs_per_iteration': (Int) Training epochs to over the game dataset per iteration
- 'num_games_per_iteration': (Int) Number of episodes to collect before doing a training
- 'batch_size': (Int) Minibatch size used during training
Neural Network params:
- 'learning_rate': (Float) Learning rate for neural network optimizer
- 'feature_extractor_arch': (str) Architechture for the feature extractor
+ For Convolutional2DBody:
- 'residual_connections': List[Tuple[int, int]] Which layers should hace residual skip connections
- 'preprocessed_input_dimensions': Tuple[int] Input dimensions for each channel
- 'channels': Tuple[int]
- 'kernel_sizes': Tuple[int]
- 'paddings': Tuple[int]
- 'final_feature_dim': int. Dimensionality of the final, fully connected layer of a convolutional body
- 'critic_gate_fn': Gating function to be applied to critic's
output head. Supported: ['None', 'tanh']
'''
check_parameter_validity(task, config)
apprentice = build_apprentice_model(task, config)
expert = build_expert(task, config, expert_name=f'Expert:{agent_name}')
(state_preprocess_fn, server_state_preprocess_fn) = \
generate_preprocessing_functions(config)
data_augmnentation_fn = parse_data_augmentation_fn(config['data_augmnentation_fn']) \
if 'data_augmnentation_fn' in config else None
algorithm = ExpertIterationAlgorithm(
model_to_train=apprentice,
batch_size=config['batch_size'],
num_epochs_per_iteration=config['num_epochs_per_iteration'],
learning_rate=config['learning_rate'],
games_per_iteration=config['games_per_iteration'],
initial_max_generations_in_memory=config['initial_max_generations_in_memory'],
final_max_generations_in_memory=config['final_max_generations_in_memory'],
increase_memory_every_n_generations=config['increase_memory_every_n_generations'],
memory_increase_step=config['memory_increase_step'],
use_agent_modelling=config['use_agent_modelling'],
num_opponents=(task.num_agents - 1), # We don't model ourselves
use_cuda=config.get('use_cuda', False)
)
return ExpertIterationAgent(
name=agent_name,
algorithm=algorithm,
expert=expert,
apprentice=apprentice,
use_apprentice_in_expert=config['use_apprentice_in_expert'],
use_agent_modelling=config['use_agent_modelling'],
use_true_agent_models_in_mcts=config['use_true_agent_models_in_mcts'],
use_learnt_opponent_models_in_mcts=config['use_learnt_opponent_models_in_mcts'],
request_observed_action=config.get('request_observed_action', False),
average_episode_returns_with_mcts_values=config.get('average_episode_returns_with_mcts_values', False),
action_dim=task.action_dim,
observation_dim=task.observation_dim,
num_opponents=(task.num_agents - 1),
state_preprocess_fn=state_preprocess_fn,
server_state_preprocess_fn=server_state_preprocess_fn,
use_cuda=config.get('use_cuda', False),
temperature=config.get('temperature', 1.),
drop_temperature_after_n_moves=config.get('drop_temperature_after_n_moves', np.inf),
data_augmnentation_fn=data_augmnentation_fn
)
| StarcoderdataPython |
1942774 | <filename>testbed/scripts/calculate_algorand_throughput.py
#!/usr/local/bin/python3
import sys
import math
block_size = int(sys.argv[1])
size_in_txns = int(block_size / 100000 * 429)
deadline = int(size_in_txns / 3521 * 250)
print("Actual block size: {} txns, deadline: {} ms".format(size_in_txns, deadline))
| StarcoderdataPython |
1781288 | from django.contrib import admin
from .models import ImageModel
# Register your models here.
admin.site.register(ImageModel)
| StarcoderdataPython |
11367390 | <filename>pinocchio/decorator.py
"""
decorator extension for 'nose'.
Allows you to decorate functions, classes, and methods with attributes
without modifying the actual source code. Particularly useful in
conjunction with the 'attrib' extension package.
"""
import sys
err = sys.stderr
import logging
import os
from nose.plugins.base import Plugin
log = logging.getLogger(__name__)
class Decorator(Plugin):
score = 99999
def __init__(self):
Plugin.__init__(self)
def add_options(self, parser, env=os.environ):
parser.add_option("--decorator-file",
action="store",
dest="decorator_file",
default=None,
help="Apply attributes in this file to matching functions, classes, and methods")
def configure(self, options, config):
self.conf = config
### configure logging
logger = logging.getLogger(__name__)
logger.propagate = 0
handler = logging.StreamHandler(err)
logger.addHandler(handler)
lvl = logging.WARNING
if options.verbosity >= 5:
lvl = 0
elif options.verbosity >= 4:
lvl = logging.DEBUG
elif options.verbosity >= 3:
lvl = logging.INFO
logger.setLevel(lvl)
### enable plugin & save decorator file name, if given.
if options.decorator_file:
self.enabled = True
self.decorator_file = options.decorator_file
def begin(self):
"""
Called before any tests are run.
"""
filename = self.decorator_file
fp = open(filename)
curtains = {}
for line in fp:
# skip empty lines or lines with comments ('#')
line = line.strip()
if not line or line.startswith('#'):
continue
# parse attributes...
name, attribs = line.split(':')
name = name.strip()
attribs = [a.strip() for a in attribs.split(',')]
# ...and store 'em.
l = curtains.get(name, [])
l.extend(attribs)
curtains[name] = l
# save the attributes in 'self.curtains'.
self.curtains = curtains
######
def wantClass(self, cls):
"""
wantClass -- attach matching attributes to the class.
"""
fullname = '%s.%s' % (cls.__module__, cls.__name__,)
self._attach_attributes(fullname, cls)
# indicate no preferences re running this test...
return None
def wantMethod(self, method):
"""
wantMethod -- attach matching attributes to this method.
"""
if hasattr(method, 'im_class'):
klass = method.im_class.__name__
else:
klass = method.__self__.__class__.__name__
fullname = '%s.%s.%s' % (method.__module__, klass, method.__name__)
self._attach_attributes(fullname, method)
# indicate no preference re running this test...
return None
def wantFunction(self, func):
"""
wantFunction -- attach matching attributes to this function.
"""
fullname = '%s.%s' % (func.__module__,
func.__name__)
self._attach_attributes(fullname, func)
# indicate no preferences re running this test.
return None
def _attach_attributes(self, fullname, obj):
"""
Attach attributes matching 'fullname' to the object 'obj'.
"""
attribs = self.curtains.get(fullname, [])
log.info('_attach_attributes: %s, %s' % (fullname, attribs,))
for a in attribs:
try:
key, val = a.split("=")
except ValueError:
key, val = (a, True)
try:
obj.__dict__[key] = val
except TypeError:
setattr(obj, key, val)
| StarcoderdataPython |
3279424 | <reponame>GreyElaina/TransQualityControl
#!/usr/bin/python3
import src.branch_error_check.branch_main
import src.duplicate_key_check.duplicate_main
import src.format_char_check.format_main
if __name__ == '__main__':
branch_json = src.branch_error_check.branch_main.branch_check('./project/assets')
duplicate_json = src.duplicate_key_check.duplicate_main.duplicate_main('./project/assets')
format_json = src.format_char_check.format_main.format_main('./project/assets')
print(branch_json)
print(duplicate_json)
print(format_json)
| StarcoderdataPython |
9627303 | <reponame>therealAJ/CoachMe<filename>server/server.py
from flask import Flask, request, render_template
from predict import makeprediction, makegridprediction
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/prediction', methods=["POST"])
def startPrediction():
#time_remaining, loc_x, loc_y
time_remaining = request.form["time_remaining"]
loc_x = request.form["loc_x"]
loc_y = request.form["loc_y"]
result = makeprediction(time_remaining, loc_x, loc_y)
print str(result)
return str(result)
#return render_template('second.html', combined_shot_type = combined_shot_type)
@app.route('/heatmap', methods=["POST"])
def heatMap():
time_remaining = request.form["time_remaining"]
loc_x = request.form["loc_x"]
loc_y = request.form["loc_y"]
result = makegridprediction(time_remaining, loc_x, loc_y)
print(result)
return str(result)
if __name__ == '__main__':
app.run(debug=True) | StarcoderdataPython |
3465941 | <filename>model/models.py
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
class CRNet(nn.Module):
"""
definition of CRNet
"""
def __init__(self):
super(CRNet, self).__init__()
self.meta = {'mean': [131.45376586914062, 103.98748016357422, 91.46234893798828],
'std': [1, 1, 1],
'imageSize': [224, 224, 3]}
model_ft = models.resnet18(pretrained=True)
self.model = model_ft
self.regressor = Regressor(model_ft)
self.classifier = Classifier(model_ft, num_cls=5)
def forward(self, x):
for name, module in self.model.named_children():
if name != 'fc':
x = module(x)
reg_out = self.regressor.forward(x.view(-1, self.num_flat_features(x)))
cls_out = self.classifier.forward(x.view(-1, self.num_flat_features(x)))
return reg_out, cls_out
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
class Regressor(nn.Module):
def __init__(self, model):
super(Regressor, self).__init__()
num_ftrs = model.fc.in_features
self.fc1 = nn.Linear(num_ftrs, 256)
self.fc2 = nn.Linear(256, 64)
self.fc3 = nn.Linear(64, 1)
def forward(self, x):
x1 = F.relu(self.fc1(x))
x1 = F.dropout(x1, p=0.5, training=self.training)
x2 = F.relu(self.fc2(x1))
x2 = F.dropout(x2, p=0.5, training=self.training)
x3 = self.fc3(x2)
return x3
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
class Classifier(nn.Module):
def __init__(self, model, num_cls=5):
super(Classifier, self).__init__()
num_ftrs = model.fc.in_features
self.fc1 = nn.Linear(num_ftrs, 256)
self.fc2 = nn.Linear(256, 64)
self.fc3 = nn.Linear(64, num_cls)
def forward(self, x):
x1 = F.relu(self.fc1(x))
x1 = F.dropout(x1, p=0.5, training=self.training)
x2 = F.relu(self.fc2(x1))
x2 = F.dropout(x2, p=0.5, training=self.training)
x3 = self.fc3(x2)
return x3
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
| StarcoderdataPython |
3275459 | <gh_stars>0
#!/usr/bin/env python
#
# rm_pycache.py
#
from __future__ import print_function
import argparse
import os
import sys
import re
import shutil
class CleanPyCacheApp(object):
def __init__(self):
self._verbose = True
def main(self):
python_dirs = self._parse_command_line(sys.argv[1:])
#print("python_dirs", python_dirs)
for py_dir in python_dirs:
self._remove_pycache(py_dir)
def _parse_command_line(self, argv):
python_dirs = []
parser = argparse.ArgumentParser()
parser.add_argument("-q", action="store_true", dest="quiet", default=False,
help="suppress messages which cache file will be removed.")
parser.add_argument("python_dirs", action="store", nargs='+',
help="one or more top-level python directories to search for cache files recursively.")
args = parser.parse_args(argv)
if args.quiet:
self._verbose = False
for dname in args.python_dirs:
if not os.path.exists(dname):
print("Path %s does not exist." % dname)
sys.exit(1)
if not os.path.isdir(dname):
print("Path %s is not a directory." % dname)
sys.exit(1)
if not os.path.isabs(dname):
python_dirs.append(os.path.abspath(dname))
else:
python_dirs.append(dname)
return python_dirs
def _remove_pycache(self, py_dir_root):
os.chdir(py_dir_root)
re_pyc_file = re.compile(r'.+\.pyc$', re.IGNORECASE)
for root, dirs, files in os.walk(py_dir_root):
if '__pycache__' in dirs:
if self._verbose:
print("rm -rf %s" % os.path.join(root,'__pycache__'))
shutil.rmtree(os.path.join(root,'__pycache__'))
dirs.remove('__pycache__')
for fname in files:
if re_pyc_file.match(fname):
if self._verbose:
print("rm %s" % os.path.join(root, fname))
os.remove(os.path.join(root, fname))
if __name__ == '__main__':
app = CleanPyCacheApp()
app.main()
| StarcoderdataPython |
3229323 | from openbabel import OBMol, OBConversion
def convert_str(str_data, in_format, out_format):
mol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
conv.SetOutFormat(out_format)
conv.ReadString(mol, str_data)
return (conv.WriteString(mol), conv.GetOutFormat().GetMIMEType())
def to_inchi(str_data, in_format):
mol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
# Hackish for now, convert to xyz first...
conv.SetOutFormat('xyz')
conv.ReadString(mol, str_data)
xyz = conv.WriteString(mol)
# Now convert to inchi and inchikey.
mol = OBMol()
conv.SetInFormat('xyz')
conv.ReadString(mol, xyz)
conv.SetOutFormat('inchi')
inchi = conv.WriteString(mol).rstrip()
conv.SetOptions("K", conv.OUTOPTIONS)
inchikey = conv.WriteString(mol).rstrip()
return (inchi, inchikey)
def atom_count(str_data, in_format):
mol = OBMol()
conv = OBConversion()
conv.SetInFormat(in_format)
conv.ReadString(mol, str_data)
return mol.NumAtoms() | StarcoderdataPython |
11277475 | from .constants import config, Ingredient
from pyrebase import pyrebase
from .database import Database
from psycopg2 import Error
firebase = pyrebase.initialize_app(config)
db = firebase.database() # Get a reference to the database service
class BagOfIngredients:
"""
BagOfIngredient class. Creates a BagOfIngredient object that can be used to
add ingredients, update ingredients, and delete ingredients for a certain
user.
"""
def __init__(self, username):
"""
Constructor for BagOfIngredients. Takes in a username and setups the
bag.
:param username: String. Username (email) of the User.
"""
self.username = "'" + username + "'" # use a session variable
self.ingredients = []
self.number_of_ingredients = 0
self.boi = None
self.db = Database()
self.db.open()
def get_boi(self):
"""
Queries the database for the user's bag of ingredients and returns a
list of ingredients
:return: list(Ingredient). A list of ingredient objects.
"""
self.ingredients = self.db.get(
"BagOfIngredients", "*", where="user_id=" + self.username
)
return self.ingredients
def push_boi(self, ing: Ingredient):
"""
Function used to add ingredient to database.
:param ing: Ingredient. Ingredient to be added
:return: Boolean. True if ingredient was added, false otherwise
"""
columns = "user_id, ingredient, ingredient_name, amount, unit"
data = "{0},'{1}','{2}',{3},'{4}'".format(
self.username, ing.ingredient_full, ing.ingredient, ing.amount,
ing.units
)
push_success = self.db.write("BagOfIngredients", columns, data)
self.number_of_ingredients += 1
self.ingredients.append(ing)
return push_success
def delete_ingredient(self, ingredient_name):
"""
Function used to delete a single ingredient from the ingredient list.
:param ingredient_name: String. Name of ingredient to be deleted.
:return: Boolean. True if ingredient was deleted, false otherwise.
"""
# Deletes one ingredient
try:
delete_query = (
"DELETE FROM bagofingredients WHERE user_id="
+ self.username
+ " AND ingredient_name="
+ ingredient_name
+ ";"
)
print(delete_query)
check = self.db.query(delete_query)
except (Error, Exception):
print("ERROR OCCURED IN DELETION!")
return False
return check
def update_ingredient(self, ingredient_name, new_quantity):
"""
Function to update the quantity of a certain ingredient.
:param ingredient_name: String. Name of ingredient to be updated.
:param new_quantity: int. New quantity of ingredient.
:return: Boolean. True if ingredient was updated, false otherwise.
"""
# Updates ingredient with new quantity
try:
unit = self.db.get(table="bagofingredients", columns="unit",
where=("user_id = {} AND ingredient_name = "
"{}").format(self.username,
ingredient_name))
full = new_quantity.replace("'", "") + " " + unit[0][0] + " " + \
ingredient_name.replace("'", "")
update_query = ("UPDATE bagofingredients SET amount = {}, ingredie"
"nt = '{}' WHERE user_id = {} AND ingredient_name"
"= {}").format(new_quantity, full, self.username,
ingredient_name)
check = self.db.query(update_query)
except (Error, Exception):
print("ERROR OCCURED IN UPDATING!")
return False
return check
| StarcoderdataPython |
18243 | <reponame>esgomezm/spec-bioimage-io
import shutil
import traceback
from pathlib import Path
from pprint import pprint
from typing import List, Optional, Union
from marshmallow import ValidationError
from bioimageio.spec import export_resource_package, load_raw_resource_description
from bioimageio.spec.shared.raw_nodes import URI
from bioimageio.spec.shared.utils import resolve_uri
def package(
rdf_source: Union[Path, str, URI, dict],
path: Path = Path() / "{src_name}-package.zip",
update_format: bool = False,
weights_priority_order: Optional[List[str]] = None,
verbose: bool = False,
) -> int:
"""Package a BioImage.IO resource described by a BioImage.IO Resource Description File (RDF)."""
code = validate(rdf_source, update_format=update_format, update_format_inner=update_format, verbose=verbose)
source_name = rdf_source.get("name") if isinstance(rdf_source, dict) else rdf_source
if code:
print(f"Cannot export invalid BioImage.IO RDF {source_name}")
return code
try:
tmp_package_path = export_resource_package(
rdf_source, update_to_current_format=update_format, weights_priority_order=weights_priority_order
)
except Exception as e:
print(f"Failed to package {source_name} due to: {e}")
if verbose:
traceback.print_exc()
return 1
try:
rdf_local_source = resolve_uri(rdf_source)
path = path.with_name(path.name.format(src_name=rdf_local_source.stem))
shutil.move(tmp_package_path, path)
except Exception as e:
print(f"Failed to move package from {tmp_package_path} to {path} due to: {e}")
if verbose:
traceback.print_exc()
return 1
print(f"exported bioimageio package from {source_name} to {path}")
return 0
def validate(
rdf_source: Union[Path, str, URI, dict],
update_format: bool = False,
update_format_inner: bool = None,
verbose: bool = False,
) -> int:
"""Validate a BioImage.IO Resource Description File (RDF)."""
if update_format_inner is None:
update_format_inner = update_format
source_name = rdf_source.get("name") if isinstance(rdf_source, dict) else rdf_source
try:
raw_rd = load_raw_resource_description(rdf_source, update_to_current_format=update_format)
except ValidationError as e:
print(f"Invalid {source_name}:")
pprint(e.normalized_messages())
return 1
except Exception as e:
print(f"Could not validate {source_name}:")
pprint(e)
if verbose:
traceback.print_exc()
return 1
code = 0
if raw_rd.type == "collection":
for inner_category in ["application", "collection", "dataset", "model", "notebook"]:
for inner in getattr(raw_rd, inner_category) or []:
try:
inner_source = inner.source
except Exception as e:
pprint(e)
code += 1
else:
code += validate(inner_source, update_format_inner, update_format_inner, verbose)
if code:
print(f"Found invalid RDFs in collection {source_name}.")
if not code:
print(f"successfully verified {raw_rd.type} {source_name}")
return code
| StarcoderdataPython |
5101267 | <filename>utils/jenkinshelper.py
'''
helper functions for jenkins automatic processes
'''
import datetime
import os
import ssl
import requests
from blazarclient import exception as blazarexception
import chi
from croniter import croniter
from dateutil import tz
import json
import traceback
from urllib import parse as urlparse
import xml.etree.ElementTree as et
JENKINS_JOB_CONFIG_FILE = 'jobs/{job_name}/config.xml'
JENKINS_SERVICE_ACCOUNT_CREDENTIAL_FILE = 'Chameleon-{site}-service-account.sh'
JENKINS_SERVICE_USER_CREDENTIAL_FILE = 'Chameleon-jenkins-service-user-credentials.json'
# JENKINS_URL = 'https://{username}:{password}@jenkins.chameleoncloud.org'
JENKINS_URL = 'http://{username}:{password}@127.0.0.1:8080'
JENKINS_RELOAD_JOB_URL = JENKINS_URL + '/view/appliances/job/{job_name}/reload'
ADVANCED_RESERVATION_MAX_DAY = 15
SITE_KEY_NAME_MAP = {'uc': 'default',
'tacc': 'jenkins'}
BLAZAR_TIME_FORMAT = '%Y-%m-%d %H:%M'
def update_env_variables_from_file(file):
with open(file, 'r') as f:
for line in f:
if 'export' not in line:
continue
if line.startswith('#'):
continue
key, value = line.replace('export ', '', 1).strip().split('=', 1)
os.environ[key] = value.replace('"', '')
def reserve_resource(booking_site, node_type, lease_name_prefix,
job_name, job_config_file, lease_duration_in_hour,
searching_feq_in_min, exec_command,
jenkins_location, node_count=1):
now = datetime.datetime.now(tz=tz.tzutc())
# check if there already exists a scheduled Jenkins build
config_xml_tree = et.parse(job_config_file)
cron_time = None
for t in config_xml_tree.getroot().findall('triggers'):
for tt in t.findall('hudson.triggers.TimerTrigger'):
for s in tt.findall('spec'):
cron_time = s.text
if cron_time:
iter = croniter(cron_time, now)
if iter.get_next(datetime.datetime) <= now + datetime.timedelta(
days=ADVANCED_RESERVATION_MAX_DAY):
print('A reservation exists and waits for executing; '
'no need to reset.')
return
# try booking every 30 minutes with max 15 days
blazarclient = chi.blazar()
reservation_args = {'name': lease_name_prefix + job_name,
'start': None,
'end': None,
'reservations': [
{'resource_type': 'physical:host',
'resource_properties': json.dumps(
['=', '$node_type', node_type]
),
'hypervisor_properties': '',
'min': node_count,
'max': node_count}],
'events': []}
start = now + datetime.timedelta(seconds=70)
max_try_end = start + datetime.timedelta(days=ADVANCED_RESERVATION_MAX_DAY)
lease = None
while start < max_try_end:
reservation_args['start'] = start.strftime(BLAZAR_TIME_FORMAT)
reservation_args['end'] = (start + datetime.timedelta(
hours=lease_duration_in_hour)).strftime(BLAZAR_TIME_FORMAT)
try:
lease = blazarclient.lease.create(**reservation_args)
break
except blazarexception.BlazarClientException as bce:
if 'Not enough hosts available' in str(bce):
start = start + \
datetime.timedelta(minutes=searching_feq_in_min)
else:
traceback.print_exc()
raise bce
if lease:
lease_id = lease['id']
# release will start 10 minutes later
release_start_time = (
start + datetime.timedelta(minutes=10)).astimezone(
tz.gettz('America/Chicago'))
cron_time = [release_start_time.minute, release_start_time.hour,
release_start_time.day, release_start_time.month, '*']
# schedule Jenkins test
for t in config_xml_tree.getroot().findall('triggers'):
for child in t:
t.remove(child)
trigger_element = et.Element('hudson.triggers.TimerTrigger')
trigger_spec_element = et.Element('spec')
trigger_spec_element.text = ' '.join(str(x) for x in cron_time)
trigger_element.append(trigger_spec_element)
t.append(trigger_element)
# replace lease id
for b in config_xml_tree.getroot().findall('builders'):
for shell in b.findall('hudson.tasks.Shell'):
for command in shell.findall('command'):
command.text = exec_command.format(lease_id=lease_id)
config_xml_tree.write(job_config_file)
# jenkins reload configuration from disk
# we have CSRF protection, so we need to get crumb token first before reload
# crumbs are only valid within a created web session, see
# https://github.com/spinnaker/spinnaker/issues/2067 and
# https://github.com/spinnaker/spinnaker.github.io/pull/1512
service_user_cred = {}
with open(os.path.join(jenkins_location,
JENKINS_SERVICE_USER_CREDENTIAL_FILE)
) as cred_json:
service_user_cred = json.load(cred_json)
jenkins_url = JENKINS_URL.format(
username=service_user_cred['USERNAME'],
password=urlparse.quote(service_user_cred['PASSWORD']))
sess = requests.Session()
crumb = sess.get(
'{jenkins_url}/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)'
.format(jenkins_url=jenkins_url)).text
headers = {crumb.split(':')[0]: crumb.split(':')[1]}
r = sess.post(JENKINS_RELOAD_JOB_URL.format(
username=service_user_cred['USERNAME'],
password=urlparse.quote(
service_user_cred['PASSWORD']),
job_name=job_name),
headers=headers)
if r.status_code != 200:
raise RuntimeError('Lease created with id {} at site {}, '
'but failed to reload Jenkins page.'
.format(lease_id, booking_site))
print('Lease created with id {} at site {} and will start on {} '
'(task will start 10 minutes later than lease start time.)'
.format(lease_id, booking_site, start))
else:
raise RuntimeError('Reserve resource {node_type} on {site} failed!'
.format(node_type=node_type, site=booking_site))
| StarcoderdataPython |
316596 | import pywikibot, re, os, requests, json
from datetime import date, datetime, timedelta
#from customFuncs import basic_petscan
site = pywikibot.Site("lv", "wikipedia")
site.login()
site.get_tokens('edit')
def do_api_req(wikipedia,title,cont=''):
params = {
"action": "query",
"format": "json",
"prop": "revisions",
"titles": title,
"rvprop": "timestamp|comment|user|ids|content",
"rvlimit": "max",
"rvdir": "older"
}
wikipedia = wikipedia.replace('wiki','').replace('_','-')
if cont!='':
params.update({"rvcontinue":cont})
r = requests.get('https://{}.wikipedia.org/w/api.php?'.format(wikipedia),params = params)
r.encoding = 'utf-8'
json_data = eval(r.text)
#print(json_data)
#pywikibot.output(json_data["continue"])
return json_data
#
def put_notif(title,user_orig,start_date,last_edit_obj):
pagetosave = pywikibot.Page(site,'Diskusija:'+title)
pgtext = pagetosave.text
if 'pievienojis šai lapai veidni' in pgtext: return 0
cur_date = datetime.now()
text = """{{{{ping|{}}}}} Tu esi pievienojis šai lapai veidni {{{{tl|inuse}}}} un pēdējā laikā neesi aktīvi darbojies: veidne ielikta pirms {} dienām un pēdējo labojumu esi veicis pirms {} dienām. Lūdzu izvērtē, vai Tu tiešām '''aktīvi''' strādā pie šīs lapas un tai ir tiešām nepieciešama {{{{tl|inuse}}}} veidne. Ja ne, tad lūgums lapu uzlabot un noņemt veidni. Paldies! --~~~~""".format(
user_orig, (cur_date-start_date).days, (cur_date-last_edit_obj).days
)#user_orig,
r = pywikibot.data.api.Request(site=site, action='edit', format='json', bot=0, title='Diskusija:'+title, section='new', sectiontitle='Inuse veidne', text=text,token=site.tokens['edit'], summary='inuse veidnes lietojums').submit()
#
inuseregex = '{{\s*(template:|veidne:)?\s*(inuse|under[_ ]construction|labošanā|in[_ ]use|inuseuntil|lietošanā|rediģēšanā)\s*}}'#|ilgstošā[_ ]labošanā|ilgstošā[_ ]lietošanā|ilgstošā[_ ]rediģēšanā|inuse2|long[_ ]inuse|underconstruction
def parse_one_article(title,file):
#file = eval(open("inuse.json", "r", encoding='utf-8').read())
file = file['query']['pages']
theid = list(file.keys())[0]
counter = 0
firstRevWithTpl = None#labojums, kurā dalībnieks pieliek veidni
lastRevWithoutTpl = None#pēdējais labojums bez veidnes
for rev in file[theid]['revisions']:
revtext = rev['*']
hasinuse = re.search(inuseregex,revtext,re.I)
#print(('yes' if hasinuse else 'NO'), rev['timestamp'])
if not hasinuse:
#lastRevWithoutTpl = rev
break
else:
firstRevWithTpl = rev
#
#print([lastRevWithoutTpl, firstRevWithTpl])
date_from = firstRevWithTpl['timestamp']
user_orig = firstRevWithTpl['user']
#print(date_from)
#print(user_orig)
#
last_edit = ''
for rev in file[theid]['revisions']:
if rev['user']==user_orig:
last_edit = rev['timestamp']
break
#
#print(last_edit)
date_format = "%m/%d/%Y"
cur_date = datetime.now()
start_date = datetime.strptime(date_from,'%Y-%m-%dT%H:%M:%SZ')
last_edit_obj = datetime.strptime(last_edit,'%Y-%m-%dT%H:%M:%SZ')
print({'user_last':user_orig,'date_first_edit':date_from,'date_last_edit_by_user':last_edit,'diff':(cur_date-start_date).days, 'diff2': (cur_date-last_edit_obj).days})
if (cur_date-start_date).days>10 and (cur_date-last_edit_obj).days>7:
print('larger, will put notif')
put_notif(title,user_orig,start_date,last_edit_obj)
#
def getAllPages():
params = {
"action": "query",
"format": "json",
"list": "categorymembers",
"utf8": 1,
"formatversion": "2",
"cmtitle": "Kategorija:Lapas, ko šobrīd pārstrādā",
"cmprop": "title",
"cmlimit": "max"
}
wikipedia = 'lv'
r = requests.get('https://{}.wikipedia.org/w/api.php?'.format(wikipedia),params = params)
r.encoding = 'utf-8'
json_data = json.loads(r.text)['query']['categorymembers']
json_data = [f['title'].replace(' ','_') for f in json_data]
return json_data
def main():
#Latvijas putnu sugu saraksts - pagaidām ignorēt
#listarticles = basic_petscan('4534095')
#listarticles = [f['title'] for f in listarticles]
listarticles = getAllPages()
for onearticle in listarticles:
#if onearticle=='Rokenrola_slavas_zāle': continue
apivesture = do_api_req('lvwiki',onearticle)
#rchanges = apivesture['query']["pages"]
#theid = list(rchanges.keys())
print('*'*60)
pywikibot.output(onearticle)
parse_one_article(onearticle,apivesture)
#
main()
| StarcoderdataPython |
9628825 | from django.db import models
from django_analyses.models.input.definitions.input_definitions import \
InputDefinitions
from django_analyses.models.input.definitions.number_input_definition import \
NumberInputDefinition
from django_analyses.models.input.types.float_input import FloatInput
class FloatInputDefinition(NumberInputDefinition):
min_value = models.FloatField(blank=True, null=True)
max_value = models.FloatField(blank=True, null=True)
default = models.FloatField(blank=True, null=True)
input_class = FloatInput
def get_type(self) -> InputDefinitions:
return InputDefinitions.FLT
| StarcoderdataPython |
5123396 | <reponame>seroanlph/BinnedFit
#!/usr/bin/python
import iminuit as imin
import numpy as np
import inspect
import sys
from os import system
from scipy.stats import kstest
class UnbinnedLLH():
def __init__(self, model, x, start):
if np.ma.isMaskedArray(x):
self.x = x.compressed()
elif type(x) == list:
self.x = np.array(x)
else:
self.x = x
self.model = model
if type(start) == dict:
self.par0 = start
else:
self.par0 = dict(
zip(inspect.getfullargspec(self.model).args[1:], start))
def likelihood(self, *params):
return -2 * np.log(self.model(self.x, *params)).sum()
def run(self, verbose=True):
"""
Run the fit, returns the minimized minimizer which stores the minimum and
the values.
"""
self.m = imin.Minuit(self.likelihood,
**self.par0,
name=self.par0.keys())
self.m.print_level = int(verbose) * 2
self.m.errordef = 1
self.m.migrad()
self.m.hesse()
return self.m
def goodness_of_fit(self):
ks_ts, p = kstest(self.x, self.model, args=self.m.values)
return (p)
class LLHFit(UnbinnedLLH):
"""
General Template for binned likelihood fitting.
Use it only if you have to implement your own edge case -
Otherwise GeneralLLHFit is probably what your looking for.
"""
def __init__(self, model, x, n_meas, start):
"""
Read in Model and Data to fit.
-----------
parameters:
model: function
Model that predicts n_meas. Should take the data as an numpy array
as first argument and arbitrary *params after that.
x: numpy array
The data used to create a prediction which is then compared to n_meas
n_meas: numpy array with dtype int
number of entries in the histogram, which is used to fit.
start: array or dict:
Initial guess on the parameters to fit. If an array is provided,
the names of the parameters are tried to be inferred from the model.
Otherwise a dictionary can be used to provide names for the parameter.
"""
if np.ma.isMaskedArray(x) and np.ma.isMaskedArray(n_meas):
or_mask = np.ma.mask_or(n_meas.mask, x.mask)
self.n_meas = np.ma.masked_where(or_mask, n_meas)
UnbinnedLLH.__init__(self, model, np.ma.masked_where(or_mask, x),
start)
elif np.ma.isMaskedArray(n_meas):
self.n_meas = n_meas.compressed()
UnbinnedLLH.__init__(self, model,
np.ma.masked_where(n_meas.mask, x), start)
elif np.ma.isMaskedArray(x):
self.n_meas = np.ma.masked_where(x.mask, n_meas).compressed()
UnbinnedLLH.__init__(self, model, x, start)
else:
self.n_meas = n_meas
UnbinnedLLH.__init__(self, model, x, start)
def likelihood(self, *params):
"""
Dummy function for the likelihood calculation.
Implement your own, if there is no fitting implementation
"""
pass
class ChiSquare(LLHFit):
def __init__(self, model, x, y, yerr, start):
LLHFit.__init__(self, model, x, y, start)
try:
self.yerr = yerr[[True for y in y]]
except TypeError:
self.yerr = np.array([yerr for item in y])
except IndexError:
raise IndexError('len of y and yerr have to be the same!')
def likelihood(self, *params):
return np.sum(
(self.n_meas - self.model(self.x, *params)**2 / self.yerr**2))
class NeymanChi2Fit(LLHFit):
def likelihood(self, *params):
return np.sum(
(self.n_meas - self.model(self.x, *params)**2 / self.n_meas))
class PearsonChi2Fit(LLHFit):
def likelihood(self, *params):
n_model = self.model(self.x, *params)
return np.sum((self.n_meas - n_model)**2 / n_model)
class PoissonFit(LLHFit):
def likelihood(self, *params):
n_pred = self.model(self.x, *params)
return -2 * self.__likelihood(n_pred).sum()
def __likelihood(self, n_pred):
return self.n_meas * np.log(n_pred) - n_pred
def goodness_of_fit(self):
return (self.m.fval + 2 * self.__likelihood(self.n_meas).sum())
class BinomialFit(LLHFit):
def __init__(self, model, x, n_meas, N, start):
LLHFit.__init__(self, model, x, n_meas, start)
self.N = N
self.p_meas = self.n_meas / N
def likelihood(self, *params):
p = np.ma.array(self.model(self.x, *params))
likelihood = self.__likelihood(p)
return -2 * self.N * likelihood.sum()
def goodness_of_fit(self):
L = self.m.fval
L_exp = -2*self.N * \
self.__likelihood(self.p_meas).sum()
return (L - L_exp)
def __likelihood(self, p):
p = np.ma.masked_where((p == 0) | (p == 1), p)
q = 1 - p
likelihood = np.zeros_like(p)
likelihood += np.where((self.p_meas != 0) & (self.p_meas != 1),
np.ma.log(q) + self.p_meas * np.ma.log(p) -
self.p_meas * np.ma.log(q), np.zeros_like(p))
likelihood += np.where(self.p_meas == 0, np.ma.log(q),
np.zeros_like(q))
likelihood += np.where(self.p_meas == 1, np.ma.log(p),
np.zeros_like(p))
return (likelihood)
class GeneralLLHFit(LLHFit):
def __init__(self, model, x, n_meas, start, distribution, *args):
self.distribution = lambda measurement, model: distribution(
measurement, model, *args)
LLHFit.__init__(self, model, x, n_meas, start)
def likelihood(self, *params):
n_model = self.model(self.x, *params)
return -2 * __likelihood(n_model).sum()
def __likelihood(self, n_model):
return np.log(self.distribution(self.n_meas, n_model))
def goodness_of_fit(self):
return 2 * (self.__likelihood(self.n_meas).sum() - self.m.fval)
def install():
paths = sys.path
for path in paths:
if "site-packages" in path:
destination = path
break
system(f'cp ./fit.py {destination}')
if __name__ == "__main__":
install()
print("setup successfull")
| StarcoderdataPython |
3290354 | <reponame>phisolani/optimal_wifi_slicing
from gekko import GEKKO
m = GEKKO() # Initialize gekko
m.options.SOLVER=1 # APOPT is an MINLP solver
# optional solver settings with APOPT
m.solver_options = ['minlp_maximum_iterations 500', \
# minlp iterations with integer solution
'minlp_max_iter_with_int_sol 10', \
# treat minlp as nlp
'minlp_as_nlp 1', \
# nlp sub-problem max iterations
'nlp_maximum_iterations 50', \
# 1 = depth first, 2 = breadth first
'minlp_branch_method 1', \
# maximum deviation from whole number
'minlp_integer_tol 0.05', \
# covergence tolerance
'minlp_gap_tol 0.01']
# Initialize variables
x1 = m.Var(value=1,lb=1,ub=5)
x2 = m.Var(value=5,lb=1,ub=5)
# Integer constraints for x3 and x4
x3 = m.Var(value=5,lb=1,ub=5,integer=True)
x4 = m.Var(value=1,lb=1,ub=5,integer=True)
# Equations
m.Equation(x1*x2*x3*x4>=25)
m.Equation(x1**2+x2**2+x3**2+x4**2==40)
m.Obj(x1*x4*(x1+x2+x3)+x3) # Objective
m.solve(disp=False) # Solve
print('Results')
print('x1: ' + str(x1.value))
print('x2: ' + str(x2.value))
print('x3: ' + str(x3.value))
print('x4: ' + str(x4.value))
print('Objective: ' + str(m.options.objfcnval))
print('Solve Time: ' + str(m.options.SOLVETIME)) | StarcoderdataPython |
4906631 | ######################################################################################################################
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Amazon Software License (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://aws.amazon.com/asl/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import os
import time
import types
import uuid
from datetime import datetime
import actions
import handlers
import services
class TaskTracker(object):
"""
Class that implements logic to create and update the status of action in a dynamodb table.
"""
def __init__(self, ):
self._task_items = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush()
def add_task_action(self, task, assumed_role, action_resources, task_datetime, source, task_group=None):
item = {
handlers.TASK_TR_ID: str(uuid.uuid4()),
handlers.TASK_TR_NAME: task[handlers.TASK_NAME],
handlers.TASK_TR_ACTION: task[handlers.TASK_ACTION],
handlers.TASK_TR_CREATED: datetime.now().isoformat(),
handlers.TASK_TR_CREATED_TS: time.time(),
handlers.TASK_TR_SOURCE: source,
handlers.TASK_TR_DT: task_datetime,
handlers.TASK_TR_STATUS: handlers.STATUS_PENDING,
handlers.TASK_TR_DEBUG: task[handlers.TASK_DEBUG],
handlers.TASK_TR_NOTIFICATIONS: task[handlers.TASK_NOTIFICATIONS],
handlers.TASK_TR_HAS_COMPLETION: getattr(actions.get_action_class(task[handlers.TASK_ACTION]),
handlers.COMPLETION_METHOD, None) is not None,
handlers.TASK_TR_METRICS: task[handlers.TASK_METRICS],
handlers.TASK_TR_DRYRUN: task[handlers.TASK_DRYRUN],
handlers.TASK_TR_INTERNAL: task[handlers.TASK_INTERNAL],
handlers.TASK_INTERVAL: task[handlers.TASK_INTERVAL],
handlers.TASK_TR_TIMEOUT: task[handlers.TASK_TIMEOUT],
handlers.TASK_TR_TIMEZONE: task[handlers.TASK_TIMEZONE],
handlers.TASK_TR_STARTED_TS: int(time.time()),
handlers.TASK_TR_EXECUTE_SIZE: task[handlers.TASK_EXECUTE_SIZE],
handlers.TASK_TR_SELECT_SIZE: task[handlers.TASK_SELECT_SIZE],
handlers.TASK_TR_COMPLETION_SIZE: task[handlers.TASK_COMPLETION_SIZE],
handlers.TASK_TR_TAGFILTER: task[handlers.TASK_TAG_FILTER],
handlers.TASK_TR_EVENTS: task.get(handlers.TASK_EVENTS, {}),
handlers.TASK_TR_RUN_LOCAL: True,
handlers.TASK_TR_GROUP: task_group
}
if assumed_role not in [None, ""]:
item[handlers.TASK_TR_ASSUMED_ROLE] = assumed_role
item[handlers.TASK_TR_ACCOUNT] = services.account_from_role_arn(assumed_role)
else:
item[handlers.TASK_TR_ACCOUNT] = os.getenv(handlers.ENV_OPS_AUTOMATOR_ACCOUNT)
if len(task[handlers.TASK_PARAMETERS]) > 0:
item[handlers.TASK_TR_PARAMETERS] = task[handlers.TASK_PARAMETERS]
parameters = item.get(handlers.TASK_TR_PARAMETERS, None)
if parameters is not None:
item[handlers.TASK_TR_PARAMETERS] = parameters
# check if the class has a field or static method that returns true if the action class needs completion
# this way we can make completion dependent of parameter values
has_completion = getattr(actions.get_action_class(task[handlers.TASK_ACTION]), actions.ACTION_PARAM_HAS_COMPLETION, None)
if has_completion is not None:
# if it is static method call it passing the task parameters
if isinstance(has_completion, types.FunctionType):
has_completion = has_completion(parameters)
else:
# if it does not have this method test if the class has an us_complete method
has_completion = getattr(actions.get_action_class(task[handlers.TASK_ACTION]),
handlers.COMPLETION_METHOD, None) is not None
item[handlers.TASK_TR_HAS_COMPLETION] = has_completion
item[handlers.TASK_TR_RESOURCES] = action_resources
self._task_items.append(self.dynamo_safe_attribute_types(item))
return item
def update_action(self, action_id, _, __, status=None, status_data=None):
try:
item = [t for t in self._task_items if t[handlers.TASK_TR_ID] == action_id][0]
except Exception as ex:
raise ("Error updating task {} in task tracking".format(action_id, ex))
if status is not None:
item[handlers.TASK_TR_STATUS] = status
if status_data is not None:
self.dynamo_safe_attribute_types(status_data)
item.update(status_data)
@property
def items(self):
return len(self._task_items)
def get_task_items_for_job(self, task_group):
return [i for i in self._task_items if i.get(handlers.TASK_TR_GROUP, None) == task_group]
@property
def task_items(self):
return self._task_items
@classmethod
def dynamo_safe_attribute_types(cls, data):
def check_attributes(d):
for attr in d.keys():
if isinstance(d[attr], datetime):
d[attr] = d[attr].isoformat()
continue
if isinstance(d[attr], basestring) and d[attr].strip() == "":
del d[attr]
continue
if isinstance(d[attr], dict):
d[attr] = cls.dynamo_safe_attribute_types(d[attr])
continue
if isinstance(data, list):
for i in data:
check_attributes(i)
else:
check_attributes(data)
return data
def flush(self, timeout_event=None):
pass
| StarcoderdataPython |
4951781 | #!/usr/bin/python
import re
import zipfile
import tempfile
import shutil
import os
import unittest
import subprocess
class generator_zip(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp('generator_zip')
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def read_zip(self, zip_filename):
contents = []
with zipfile.ZipFile(zip_filename, 'r') as zip_fh:
for filename in zip_fh.namelist():
contents.append((filename, zip_fh.read(filename)))
return contents
def expected_names(self, results, rounds, count):
expected = []
for i in range(rounds):
expected += ['GEN_%05d_%05d.xml' % (i, x) for x in range(count)]
names = [x[0] for x in results]
self.assertEquals(expected, names)
def expected_content_reuse(self,
results,
rounds,
repeat,
duplicate,
strip_seed=False):
dup_min = 0
dup_max = 0
if repeat > 0:
dup_min = rounds
dup_max = rounds * repeat * duplicate
contents = []
dups = []
for filename, content in results:
if strip_seed is True:
stripped = re.sub(r"<seed>.*</seed>", "", content)
if stripped not in contents:
contents.append(stripped)
else:
dups.append(stripped)
else:
if content not in contents:
contents.append(content)
else:
dups.append(content)
self.assertTrue(len(dups) >= dup_min)
self.assertTrue(len(dups) <= dup_max)
def test_zip(self):
count = 30
subprocess.check_output(
['./generate-polls', '--seed', '10', '--count', '%d' % count,
'--package', 'poll-1.zip', 'examples/dfars/dfars.py',
'examples/dfars/state-graph.yaml', self.tmp_dir])
subprocess.check_output(
['./generate-polls', '--seed', '10', '--count', '%d' % count,
'--package', 'poll-2.zip', 'examples/dfars/dfars.py',
'examples/dfars/state-graph.yaml', self.tmp_dir])
a = self.read_zip(os.path.join(self.tmp_dir, '0-poll-1.zip'))
b = self.read_zip(os.path.join(self.tmp_dir, '0-poll-2.zip'))
self.assertEquals(a, b)
self.assertEquals(len(a), 30)
self.expected_names(a, 1, count)
self.expected_content_reuse(a, 1, 0, 0)
def test_zip_rounds(self):
rounds = 2
count = 30
subprocess.check_output(
['./generate-polls', '--round', '%d' % rounds, '--seed', '10',
'--count', '%d' % count, '--package', 'poll-1.zip',
'examples/dfars/dfars.py', 'examples/dfars/state-graph.yaml',
self.tmp_dir])
subprocess.check_output(
['./generate-polls', '--round', '%d' % rounds, '--seed', '10',
'--count', '%d' % count, '--package', 'poll-2.zip',
'examples/dfars/dfars.py', 'examples/dfars/state-graph.yaml',
self.tmp_dir])
a = self.read_zip(os.path.join(self.tmp_dir, '0-poll-1.zip'))
a += self.read_zip(os.path.join(self.tmp_dir, '1-poll-1.zip'))
b = self.read_zip(os.path.join(self.tmp_dir, '0-poll-2.zip'))
b += self.read_zip(os.path.join(self.tmp_dir, '1-poll-2.zip'))
self.assertEquals(a, b)
self.assertEquals(len(a), rounds * count)
self.expected_names(a, rounds, count)
self.expected_content_reuse(a, 2, 0, 0)
def test_zip_repeat(self):
rounds = 2
count = 30
subprocess.check_output(
['./generate-polls', '--repeat', '5', '--duplicate', '5',
'--round', '%d' % rounds, '--seed', '10', '--count', '%d' % count,
'--package', 'poll-1.zip', 'examples/dfars/dfars.py',
'examples/dfars/state-graph.yaml', self.tmp_dir])
subprocess.check_output(
['./generate-polls', '--repeat', '5', '--duplicate', '5',
'--round', '%d' % rounds, '--seed', '10', '--count', '%d' % count,
'--package', 'poll-2.zip', 'examples/dfars/dfars.py',
'examples/dfars/state-graph.yaml', self.tmp_dir])
a = self.read_zip(os.path.join(self.tmp_dir, '0-poll-1.zip'))
a += self.read_zip(os.path.join(self.tmp_dir, '1-poll-1.zip'))
b = self.read_zip(os.path.join(self.tmp_dir, '0-poll-2.zip'))
b += self.read_zip(os.path.join(self.tmp_dir, '1-poll-2.zip'))
self.assertEquals(a, b)
self.assertEquals(len(a), rounds * count)
self.expected_names(a, rounds, count)
self.expected_content_reuse(a, 2, 5, 5)
def test_zip_repeat_with_seed(self):
rounds = 2
count = 30
subprocess.check_output(
['./generate-polls', '--store_seed', '--repeat', '5',
'--duplicate', '5', '--round', '%d' % rounds, '--seed', '10',
'--count', '%d' % count, '--package', 'poll-1.zip',
'examples/dfars/dfars.py', 'examples/dfars/state-graph.yaml',
self.tmp_dir])
subprocess.check_output(
['./generate-polls', '--store_seed', '--repeat', '5',
'--duplicate', '5', '--round', '%d' % rounds, '--seed', '10',
'--count', '%d' % count, '--package', 'poll-2.zip',
'examples/dfars/dfars.py', 'examples/dfars/state-graph.yaml',
self.tmp_dir])
a = self.read_zip(os.path.join(self.tmp_dir, '0-poll-1.zip'))
a += self.read_zip(os.path.join(self.tmp_dir, '1-poll-1.zip'))
b = self.read_zip(os.path.join(self.tmp_dir, '0-poll-2.zip'))
b += self.read_zip(os.path.join(self.tmp_dir, '1-poll-2.zip'))
self.assertEquals(a, b)
self.assertEquals(len(a), rounds * count)
self.expected_names(a, rounds, count)
self.expected_content_reuse(a, 0, 0, 0)
self.expected_content_reuse(a, 2, 5, 5, strip_seed=True)
def test_zip_magic_page_reuse(self):
# in this case, the polls should use the magic seed, which prevents the poll from being repeatedly used.
rounds = 2
count = 50
subprocess.check_output(
['./generate-polls', '--store_seed', '--repeat', '5',
'--duplicate', '5', '--round', '%d' % rounds, '--seed', '10',
'--count', '%d' % count, '--package', 'poll-1.zip',
'examples/hangman/machine.py',
'examples/hangman/state-graph.yaml', self.tmp_dir])
subprocess.check_output(
['./generate-polls', '--store_seed', '--repeat', '5',
'--duplicate', '5', '--round', '%d' % rounds, '--seed', '10',
'--count', '%d' % count, '--package', 'poll-2.zip',
'examples/hangman/machine.py',
'examples/hangman/state-graph.yaml', self.tmp_dir])
a = self.read_zip(os.path.join(self.tmp_dir, '0-poll-1.zip'))
a += self.read_zip(os.path.join(self.tmp_dir, '1-poll-1.zip'))
b = self.read_zip(os.path.join(self.tmp_dir, '0-poll-2.zip'))
b += self.read_zip(os.path.join(self.tmp_dir, '1-poll-2.zip'))
self.assertEquals(a, b)
self.assertEquals(len(a), rounds * count)
self.expected_names(a, rounds, count)
self.expected_content_reuse(a, 2, 0, 0)
self.expected_content_reuse(a, 2, 0, 0, strip_seed=True)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3312866 | <gh_stars>0
import mysql.connector
conn = mysql.connector.connect(user="root",password="",database="python")
cursor = conn.cursor()
cursor.execute('select * from user where id = %s', ('1',))
values = cursor.fetchall()
print(values)
cursor.execute('select * from user')
values = cursor.fetchall()
print(values)
conn.commit()
cursor.close()
| StarcoderdataPython |
265601 | from models.split_googlenet import Split_googlenet
from models.split_resnet import Split_ResNet18,Split_ResNet34,Split_ResNet50,Split_ResNet101
from models.split_densenet import Split_densenet121,Split_densenet161,Split_densenet169,Split_densenet201
__all__ = [
"Split_ResNet18",
"Split_ResNet34",
"Split_ResNet50",
"Split_ResNet101",
"Split_googlenet",
"Split_densenet121",
"Split_densenet161",
"Split_densenet169",
"Split_densenet201",
] | StarcoderdataPython |
4898276 | <filename>chromusic.py
"""####################################################################
# Copyright (C) #
# 2020 <NAME>(<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
####################################################################"""
import concurrent.futures
import random
import secrets
import textwrap
from typing import Union, List, Any
from datetime import timedelta, datetime
from uuid import uuid4
import schedule
from elasticsearch import Elasticsearch, helpers
from pyrogram import Client, Filters, InlineKeyboardMarkup, \
InputTextMessageContent, InlineQueryResultArticle
# from search_module.search_handling import file_retrieve_handler
from pyrogram.api import functions, types
from pyrogram.errors import FloodWait, SlowmodeWait
from static.emoji import _floppy_emoji, _clock_emoji
from index.analyzer import Analyzer
from index.dataGenerator import *
# from languages.persian import result_list_handler
from languages import english, persian
from mapping import *
def telegramAPI_connect():
"""
This function makes multiple telegram app clients and their respective global variables to make them accessible
through the entire module.
You can customise this function and make app clients as much as you want.
Obtaining api_id and api_hash (Telegram guides):
In order to obtain an API id and develop your own application using the Telegram API you need to do the following:
1. Sign up for Telegram using any application.
2. Log in to your Telegram core: https://my.telegram.org.
3. Go to 'API development tools' and fill out the form.
4. You will get basic addresses as well as the api_id and api_hash parameters required for user authorization.
5. For the moment each number can only have one api_id connected to it.
:return:
"""
global executor, app, app2, bot, adbot, indexer_list
indexer_list = []
chromusic_indexer_7693_api_id = "api_id_1"
chromusic_indexer_7693_api_hash = "api_hash_1"
shelbycobra2016_api_id = "api_id_2"
shelbycobra2016_api_hash = "api_hash_2"
api_id_0762 = "api_id_3"
api_hash_0762 = "api_hash_3"
api_id_0765 = "api_id_4"
api_hash_0765 = "api_hash_4"
BOT_TOKEN = "Your <PASSWORD> toke here" # chromusic_bot bot token
# app_me = client_connect("shelbycobra2016", shelbycobra2016_api_id, shelbycobra2016_api_hash)
app = client_connect("Chromusic_1", api_id_0762, api_hash_0762)
app2 = client_connect("Chromusic_2", api_id_0765, api_hash_0765)
chromusic_indexer_7693 = client_connect("Chromusic_indexer_7693", chromusic_indexer_7693_api_id,
chromusic_indexer_7693_api_hash)
# adbot = adbot_connect(BOT_TOKEN, api_hash, api_id)
# bot = bot_connect("soranpythonbot", BOT_TOKEN, shelbycobra2016_api_hash, shelbycobra2016_api_id)
bot = bot_connect("chromusic_bot", shelbycobra2016_api_id, shelbycobra2016_api_hash, BOT_TOKEN)
indexer_list.append(chromusic_indexer_7693)
def client_connect(
session_name: str = "chromusic",
api_id: Union[int, str] = None,
api_hash: Union[int, str] = None):
"""
Connect the client to Telegram servers. [Client API]
:param session_name: [str] (defaults to 'chromusic')
Pass a string of your choice to give a name to the client session, e.g.: "*chromusic*". This name will be
used to save a file on disk that stores details needed to reconnect without asking again for credentials.
Alternatively, if you don't want a file to be saved on disk, pass the special name "**:memory:**" to start
an in-memory session that will be discarded as soon as you stop the Client. In order to reconnect again
using a memory storage without having to login again, you can use
:meth:`~pyrogram.Client.export_session_string` before stopping the client to get a session string you can
pass here as argument.
:param api_id: [int/str]
The *api_id* part of your Telegram API Key, as integer. E.g.: "12345".
This is an alternative way to pass it if you don't want to use the *config.ini* file.
:param api_hash: [str]
The *api_hash* part of your Telegram API Key, as string. E.g.: "<KEY>".
This is an alternative way to set it if you don't want to use the *config.ini* file.
:return: Connected client
:raises: ConnectionError: In case you try to connect an already connected client.
"""
client = Client(session_name, api_id, api_hash)
client.start()
# apl.append(app)
print(f"Client {session_name} session running ...")
return client
def bot_connect(
session_name: str = "chromusic_bot",
api_id: Union[int, str] = None,
api_hash: Union[int, str] = None,
bot_token: str = None):
"""
Connect the client to Telegram servers. [Bot API]
:param session_name: [str] (defaults to 'chromusic_bot')
Pass a string of your choice to give a name to the client session, e.g.: "*chromusic*". This name will be
used to save a file on disk that stores details needed to reconnect without asking again for credentials.
Alternatively, if you don't want a file to be saved on disk, pass the special name "**:memory:**" to start
an in-memory session that will be discarded as soon as you stop the Client. In order to reconnect again
using a memory storage without having to login again, you can use
:meth:`~pyrogram.Client.export_session_string` before stopping the client to get a session string you can
pass here as argument.
:param api_id: [int/str]
The *api_id* part of your Telegram API Key, as integer. E.g.: "12345".
This is an alternative way to pass it if you don't want to use the *config.ini* file.
:param api_hash: [str]
The *api_hash* part of your Telegram API Key, as string. E.g.: "<KEY>".
This is an alternative way to set it if you don't want to use the *config.ini* file.
:param bot_token: [str]
Pass your Bot API token to create a bot session, e.g.: "<PASSWORD>:ABC-<PASSWORD>"
Only applicable for new sessions.
This is an alternative way to set it if you don't want to use the *config.ini* file.
:return: Connected bot object
:raises: ConnectionError - In case you try to connect an already connected client.
"""
bot = Client(session_name, api_id, api_hash, bot_token=bot_token)
bot.start()
# apl.append(bot)
print(f"Bot: {session_name} session running ...")
return bot
def db_connect():
"""
Connect to elasticsearch API
Creates a global variable for elasticsearch object
:return: -
"""
global es
es = Elasticsearch([{"host": "localhost", "port": 9200}])
print("Elasticsearch database running ...")
def exception_handler(func: Any) -> object:
"""
Wraps a function and handles non-handled exceptions
:param func: Input function
:return: Input function's output
"""
try:
func
except SlowmodeWait as e:
time.sleep(e.x)
return func
except FloodWait as e:
time.sleep(e.x)
return func
except Exception as e:
time.sleep(10)
return func
def check_new_member_join_count(channel_id: int):
"""
Check if any changes has happened in the number of subscribers
:param channel_id: Channel ID in which this client is an admin
:return: -
"""
try:
current_db_members_count = int(es.get("admin_log_control", id=channel_id)["_source"]["members_count"])
current_members_count = int(app.get_chat_members_count(channel_id))
# print("check_new_member: ", channel_id)
if not current_db_members_count == current_members_count:
check_joining_status(channel_id)
res = es.update(index="admin_log_control", id=channel_id, body={
"script":
{
"source": "ctx._source.members_count = params.count",
"lang": "painless",
"params": {
"count": int(current_members_count)
}
}
}, ignore=409)
# print("check_new_member: ", res)
except FloodWait as e:
print("floodwait from check_new_member:", e)
print("sleeping for", e.x)
time.sleep(e.x)
except Exception as e:
print("exception from check_new_member:", e)
def check_joining_status(channel_id):
"""
Check if a user is a subscriber or not
:param channel_id: Channel ID in which this client is an admin
:return: -
"""
try:
res = get_admin_log(channel_id) # chromusic channel ID
# print("admin log: ", res)
current_last_date = es.get("admin_log_control", id=channel_id)["_source"]["last_offset_date"]
_last_event = None
# print("this action contains join")
for event in reversed(res["events"]):
# print("this is the event: ", event)
if event["date"] > current_last_date:
# print("event_single: ", current_last_date)
if es.exists("user", id=event["user_id"]):
# print("user_exists")
if str(event["action"]).__contains__("Leave"):
# print("this action contains leave")
# app.send_message(chat_id="me", text="leave")
try:
es.update("user", id=event["user_id"], body={
"script":
{
"inline": "ctx._source.role = params.role; ctx._source.limited = params.limited;",
"lang": "painless",
"params": {
"role": "searcher",
"limited": True
}
}
}, ignore=409)
except Exception as e:
print("exception from updating join/leave check joining status: ", e)
# pass
elif str(event["action"]).__contains__("Join"):
# print("this action contains join")
# app.send_message(chat_id="me", text="Join")
es.update("user", id=event["user_id"], body={
"script":
{
"inline": "ctx._source.role = params.role; ctx._source.limited = params.limited;",
"lang": "painless",
"params": {
"role": "subscriber",
"limited": False
}
}
}, ignore=409)
_last_event = event
if _last_event:
# print("last date: ", _last_event["date"])
es.update(index="admin_log_control", id=channel_id, body={
"script":
{
"source": "ctx._source.last_offset_date = params.date_offset",
"lang": "painless",
"params": {
"date_offset": int(_last_event["date"])
}
}
}, ignore=409)
except Exception as e:
print("from check joining status:", e)
def language_handler(
func: str = None,
lang: str = "en",
*args: list,
**kwargs: dict):
"""
Routes the functions to their respective languages.
:param func: Input function to be routed
:param lang: Language to return results in
:param args: [list] Other arguments
:param kwargs: [dict] Other key-value arguments
:return: The result of the queried function in 'lang' language
"""
text = ""
if lang == "en":
text = getattr(english, func)(*args, **kwargs)
elif lang == "fa":
text = getattr(persian, func)(*args, **kwargs)
else:
text = getattr(english, func)(*args, **kwargs)
return text
def get_admin_log(peer: Union[int, str] = None) -> list:
"""
Get a list of logs from the admin-logs. This method gets 'Join' and 'Leave' events by default, but you can
uncomment the commented items and add them to your result list.
:param peer: Union: [id, username]. Peer username or ID.
ex. get_admin_log("chromusic")
:return: list[object] - A list of recent join/leave activities
"""
res = app.send(functions.channels.GetAdminLog(
channel=app.resolve_peer(peer),
q='',
max_id=0,
min_id=0,
limit=10000,
# events_filter=types.ChannelAdminLogEventsFilter()
events_filter=types.ChannelAdminLogEventsFilter(
join=True,
leave=True
# invite=False,
# ban=False,
# unban=False,
# kick=False,
# unkick=False,
# promote=False,
# demote=False,
# info=False,
# settings=False,
# pinned=False,
# edit=False
)
))
return res
def download_guide(user):
"""
Send a 'How to search and download' example to new users. Automatically picks the user's language and returns
the example with respect to their languages.
:param user: User object
:return:
"""
try:
user_data = es.get("user", id=user.id)["_source"]
if user_data["downloaded_audio_count"] == 0:
lang_code = user_data["lang_code"]
help_keyboard_text = language_handler("example_message", lang_code)
help_markup_keyboard = language_handler("example_message_keyboard", user_data["lang_code"])
bot.send_message(chat_id=user.id, text=help_keyboard_text,
reply_markup=InlineKeyboardMarkup(help_markup_keyboard),
parse_mode='HTML')
except FloodWait as e:
res = bot.set_slow_mode(user.id, 2)
text = f"floodwait occured in the download_guide! \n\n{e}\n\nresult: {res}"
app.send_message(chromusic_log_id, text)
time.sleep(e.x)
except SlowmodeWait as e:
res = bot.set_slow_mode(user.id, 2)
text = f"SlowmodeWait occured in the download_guide! \n\n{e}\n\nresult: {res}"
app.send_message(chromusic_log_id, text)
time.sleep(e.x)
except Exception as e:
print(f"from download_guide exception: {e}")
def search_handler(bot: object, message: object):
"""
1. Search the query among the elasticsearch documents
2. Handles the activity and membership status of users
:param bot: Bot object - Connects to and deals with Telegram servers
:param message: Message object containing the query
:return: A message containing the results, in case there were any result in the database; otherwise a 'no result'
text
"""
if len(str(message.text)) > 1:
user = message.from_user
try:
is_member(user)
query = message.text
processed_query = str(query).replace("_", " ")
res = es.search(index="audio_files", body={"query": {
"multi_match": {
"query": processed_query,
"type": "best_fields",
"fields": ["title", "file_name", "performer"], # , "caption"],
# "fuzziness": "AUTO", # play with search parameters to satisfy your desired results
# "tie_breaker": 0.5,
"minimum_should_match": "60%"
}}})
user_data = es.get("user", id=user.id)["_source"]
lang_code = user_data["lang_code"]
last_active_date = int(time.time()) - int(user_data["last_active_date"])
if last_active_date > timedelta(days=7).total_seconds():
help_markup_keyboard = language_handler("help_markup_keyboard", user_data["lang_code"])
if last_active_date > timedelta(days=14).total_seconds():
help_keyboard_text = language_handler("long_time_not_active", lang_code, user.first_name, 15)
else:
help_keyboard_text = language_handler("long_time_not_active", lang_code, user.first_name, 5)
exception_handler(bot.send_message(chat_id=user.id, text=help_keyboard_text,
reply_markup=InlineKeyboardMarkup(help_markup_keyboard),
parse_mode='HTML'))
_text = language_handler("result_list_handler", lang_code, query, res)
search_list_keyboard = language_handler("search_list_keyboard", lang_code, processed_query)
exception_handler(
bot.send_message(message.chat.id, _text, parse_mode='html', disable_web_page_preview=True,
reply_to_message_id=message.message_id,
reply_markup=InlineKeyboardMarkup(search_list_keyboard)))
es.update(index="user", id=user.id, body={
"script": {
"source": "ctx._source.last_active_date = params.last_active_date",
"lang": "painless",
"params": {
"last_active_date": int(time.time())
}
}
}, ignore=409)
return _text
except FloodWait as e:
res = bot.set_slow_mode(user.id, 2)
text = f"floodwait occured in the search handler! \n\n{e}\n\nresult: {res}"
app.send_message(chromusic_log_id, text)
time.sleep(e.x)
except SlowmodeWait as e:
res = bot.set_slow_mode(user.id, 2)
text = f"SlowmodeWait occured in the search handler! \n\n{e}\n\nresult: {res}"
app.send_message(chromusic_log_id, text)
time.sleep(e.x)
except Exception as e:
print(f"from search handler exception: {e}")
def is_member(user: object):
"""
Check if a user is already a member in the channel; in case was not member and exceeded five downloads, update
their 'limited' status to 'True'
:param user: User object
:return:
"""
try:
user_data = es.get("user", id=user.id)["_source"]
if user_data["role"] == "searcher":
print("he is a searcher")
if user_data["limited"] == False:
if user_data["downloaded_audio_count"] > 4:
es.update(index="user", id=user.id, body={
"script": {
"inline": "ctx._source.limited = params.limited;"
"ctx._source.role = params.role;",
"lang": "painless",
"params": {
"limited": True,
"role": "searcher"
}
}
}, ignore=409)
except FloodWait as e:
time.sleep(e.x + 5)
except Exception as e:
print(f"from is member: {e}")
def file_retrieve_handler(message: object) -> str:
"""
Retrieve audio file from source channels after getting ID of the audio file chosen by the user from the
result list.
:param message: Message object containing the ID of the audio file
:return: On success returns Generated audio-file caption
"""
try:
# important: telegram won't work with id if the client hasn't already
# indexed that chat peer itself --> this should be retrieved from the
# channel index : retrieve the username and search by id --> in case the
# retrieve client changed to something else
# A Problem: if chat_id s get converted to chat_username --> it may loose those files from chat's
# that have changed their username
# one way: each time a file is retrieved -> check if exists in the channel index --> if it's already
# integer then it exists otherwise it's converted to username
query = str(message.text).split("dl_")[1]
if len(query) < 8:
query = str(message.text)[4:]
user = message.from_user
is_member(user)
res = es.search(index="audio_files", body={
"query": {
"match": {
"_id": query
}
}
})
chat_id = int(res['hits']['hits'][0]['_source']['chat_id'])
chat_username = res['hits']['hits'][0]['_source']['chat_username']
message_id = int(res['hits']['hits'][0]['_source']['message_id'])
print(f"{40 * '='}", chat_id, ' && ', message.chat)
try:
try:
audio_track = bot.get_messages(chat_username, message_id) # message_id)
except Exception as e:
audio_track = bot.get_messages(chat_id, message_id) # message_id)
user_data = es.get("user", id=user.id)["_source"]
lang_code = user_data["lang_code"]
collaboration_request_message = language_handler("collaboration_request", lang_code)
probability = 0
if random.random() > 0.8:
probability = 1
_caption = language_handler("file_caption", lang_code, audio_track, audio_track.message_id)
music_file_keyboard = language_handler("music_file_keyboard", lang_code, query)
if user_data["limited"] == False:
exception_handler(bot.send_audio(message.chat.id, audio_track.audio.file_id,
caption=_caption, # reply_to_message_id=message.message_id,
reply_markup=InlineKeyboardMarkup(music_file_keyboard),
parse_mode="HTML"))
if user_data["role"] == "subscriber":
if probability == 1:
sent_collaboration_request_message = bot.send_message(message.chat.id,
collaboration_request_message)
# bot.send_audio("shelbycobra2016", audio_track.audio.file_id,
# caption=_caption) # , file_ref=audio_track.audio.file_ref)
# print("before_retrieve_updater")
retrieve_updater(query, user, chat_id)
# print("after_retrieve_updater")
else:
keyboard = language_handler("button_joining_request_keyboard", lang=lang_code)
text = language_handler("send_in_1_min", user_data["lang_code"], user_data["first_name"])
send1min = bot.send_message(message.chat.id, text, # reply_to_message_id=message.message_id,
reply_markup=InlineKeyboardMarkup(keyboard))
# exception_handler(bot.send_message(message.chat.id, text,
# # reply_to_message_id=message.message_id,
# reply_markup=InlineKeyboardMarkup(keyboard)))
time.sleep(60)
exception_handler(bot.send_audio(message.chat.id, audio_track.audio.file_id,
caption=_caption, # reply_to_message_id=message.message_id,
reply_markup=InlineKeyboardMarkup(music_file_keyboard),
parse_mode="HTML"))
send1min.delete()
# bot.send_audio("shelbycobra2016", audio_track.audio.file_id,
# caption=_caption)
retrieve_updater(query, user, chat_id)
# print(es.get("user", id=user.id))
# print(es.get("user_lists", id=user.id))
return _caption
except Exception as e:
# print("exception from file_ret_handler: ", e)
try:
try:
audio_track = app.get_messages(chat_username, message_id)
except Exception as e:
audio_track = app.get_messages(chat_id, message_id)
user_data = es.get("user", id=user.id)["_source"]
# print("from file ret - lang code:", audio_track)
lang_code = user_data["lang_code"]
music_file_keyboard = language_handler("music_file_keyboard", lang_code, query)
# _caption = caption_handler(audio_track, message_id, lang_code)
_caption = language_handler("file_caption", lang_code, audio_track, audio_track.message_id)
sent_to_datacenter = app.send_audio(datacenter_id, audio_track.audio.file_id,
audio_track.audio.file_ref,
caption=_caption)
message_id = sent_to_datacenter.message_id
audio_track = bot.get_messages(datacenter_id, message_id)
if user_data["limited"] == False:
exception_handler(bot.send_audio(message.chat.id, audio_track.audio.file_id,
caption=_caption, # reply_to_message_id=message.message_id,
reply_markup=InlineKeyboardMarkup(music_file_keyboard),
parse_mode="HTML"))
if user_data["role"] == "subscriber":
if probability == 1:
sent_collaboration_request_message = bot.send_message(message.chat.id,
collaboration_request_message)
# bot.send_audio("me", audio_track.audio.file_id,
# caption=_caption)
retrieve_updater(query, user, chat_id)
else:
keyboard = language_handler("button_joining_request_keyboard", lang=lang_code)
text = language_handler("send_in_1_min", user_data["lang_code"], user_data["first_name"])
send1min = bot.send_message(message.chat.id, text, # reply_to_message_id=message.message_id,
reply_markup=InlineKeyboardMarkup(keyboard))
# exception_handler(
# bot.send_message(message.chat.id, text, # reply_to_message_id=message.message_id,
# reply_markup=InlineKeyboardMarkup(keyboard)))
time.sleep(60)
exception_handler(bot.send_audio(message.chat.id, audio_track.audio.file_id,
caption=_caption, # reply_to_message_id=message.message_id,
reply_markup=InlineKeyboardMarkup(music_file_keyboard),
parse_mode="HTML"))
send1min.delete()
# bot.send_audio("me", audio_track.audio.file_id,
# caption=_caption)
retrieve_updater(query, user, chat_id)
sent_to_datacenter.delete()
return _caption
except Exception as e:
try:
finale_ex = ""
bot.send_message(message.chat.id, f"Hey {message.from_user.first_name}, unfortunately, the source "
f"channel has removed the file or the channel has converted "
f"to a private channel")
except Exception as e:
finale_ex = f"last exception occured as well {e}"
error_text = f"Exception from exception from file retriever .. maybe file has been removed:\n\n{e}\n\n" \
f"File ID: {message.text}\n" \
f"Sender: {user.first_name} - {user.username}\n" \
f"Channel username: {chat_username}\n\n" \
f"{finale_ex}"
app.send_message(chromusic_log_id, error_text)
except Exception as e:
text = f"outer exception from file retrieve: {e}"
app.send_message(chromusic_log_id, text)
def retrieve_updater(query: str, user: object, channel: str) -> bool:
"""
Update database indices on file retrieving.
:param query: Audio-file ID to update its information
:param user: User object to update its information
:param channel: ChannelID to update its information
:return: True on success
"""
resu = es.update(index="user", id=user.id, body={
"script": {
"inline": "ctx._source.downloaded_audio_count+=params.count_inc;",
"lang": "painless",
"params": {
"count_inc": 1,
}
}
}, ignore=409)
resl = es.update(index="user_lists", id=user.id, body={
"script": {
"source": "if (ctx._source.downloaded_audio_id_list.contains(params.file_id)) {ctx.op = 'none'} else "
"{if (ctx._source.downloaded_audio_id_list.size()>49) "
"{ctx._source.downloaded_audio_id_list.remove(0);"
"ctx._source.downloaded_audio_id_list.add(params.file_id);} "
"else {ctx._source.downloaded_audio_id_list.add(params.file_id);}}", # ctx.op = 'none'}",
"lang": "painless",
"params": {
"file_id": query
}
}
}, ignore=409)
# print("user_lists update:", resl)
# for res in resl:
# print(res)
resa = es.update(index="audio_files", id=query, body={
"script": {
"inline": "ctx._source.times_downloaded+=params.count_inc;",
"lang": "painless",
"params": {
"count_inc": 1,
}
}
}, ignore=409)
try:
resc = es.update(index="channel", id=channel, body={
"script": {
"inline": "ctx._source.downloaded_from_count+=params.count_inc;",
"lang": "painless",
"params": {
"count_inc": 1,
}
}
}, ignore=409)
return True
except Exception as e:
try:
time.sleep(3)
chat = app.get_chat(channel)
time.sleep(3)
res = es.create("channel", id=chat.id, body={
"title": chat.title,
"username": chat.username,
"importance": 5,
"indexed_from_audio_count": 0,
"last_indexed_offset_date": 0,
"downloaded_from_count": 0,
}, refresh=True, ignore=409)
except Exception as e:
print(f"chat: {e}")
resc = es.update(index="channel", id=chromusic_users_files_id, body={
"script": {
"inline": "ctx._source.downloaded_from_count+=params.count_inc;",
"lang": "painless",
"params": {
"count_inc": 1,
}
}
}, ignore=409)
return True
def channel_name_extractor(client: object, text: str) -> list:
"""
Extract channel names if any from the text.
Optimistic channel name extraction [first it will be added to the elasticsearch index,
later when it was its turn, it will be checked if it is a channel or not] -> Reduce get_chat requests, hence,
avoid getting banned by the Telegram.
:param client: Telegram client
:param text: Text to be checked. Maybe caption, file name, etc.
:return: A list of extracted channel names or empty list if there were no channel name in the text.
"""
try:
wrong_characters = ["?", "-", "%", "#", "*", "+", "$", "^", ".", "=", "!", "/"]
import re
if not str(text).__contains__("@"):
return []
tokens = re.split(":|,| |\n|-|;|؛", text)
channels_username = []
for token in tokens:
if "@" in token:
_token = token.replace("@", "")
if _token.__len__() > 4:
try:
if not any(x in _token for x in wrong_characters):
if es.count(index="channel", body={
"query": {
"match": {
"username": _token
}
}
})["count"] == 0:
res = es.create(index="channel_buffer", id=_token,
body={},
ignore=[409, 400])
print(f"from channel_name_extractor to channel_buffer: {res} ")
# time.sleep(3) # since get_chat() has been revoked above
channels_username.append(_token)
# print("from channel extractor: ", res)
except Exception as e:
print(f"exception from caption_entities_channel_extractor() function inside"
f" if _token.__len__() > 4: {e}")
return channels_username
except Exception as e:
print(f"exception from caption_entities_channel_extractor() function {e}")
return []
def forwarded_from_channel_extractor(client: object, message: object) -> bool:
"""
Extract channels' IDs from messages with forwarded_from_chat field and adds them to the set
:param client: Telegram client
:param message: Message object containing the ID of the audio file
:return: True on success; otherwise False
"""
try:
temp_channel = message.forward_from_chat
if temp_channel.type == "channel":
if not es.exists(index="channel", id=temp_channel.id):
# es.get(index="future_channel", id=temp_channel.username)
es.create(index="future_channel", id=temp_channel.username, body={"id": temp_channel.id}, ignore=409)
return True
except Exception as e:
print(f"exception from forwarded_from_channel_extractor() function: it may swapped to private "
f"though unavailable: {e}")
return False
def caption_entities_channel_extractor(client: object, message: object) -> list:
"""
Extract channels' IDs from messages with caption_entities field
:param client: Telegram client
:param message: Message object containing the caption of the audio file
:return: A list of extracted channel usernames on success; otherwise an empty list
"""
try:
channels_username = []
entities = message.caption_entities
wrong_characters = ["?", "-", "%", "#", "*", "+", "$", "^", ".", "=", "!", "/"]
# temp_channel = ""
if entities == None:
entities = message.entities
for entity in entities:
if entity.type == "text_link":
try:
url = str(entity.url).split("/")
if url.__len__() == 4:
# time.sleep(3) # since get_chat() has been revoked above
# temp_channel = client.get_chat(url[-1])
# time.sleep(3) # since get_chat() has been revoked above
# if temp_channel.type == "channel":
if not any(x in url[-1] for x in wrong_characters):
if es.count(index="channel", body={
"query": {
"match": {
"username": url[-1]
}
}
})["count"] == 0:
# if not es.exists(index="channel", id=temp_channel.id):
# es.create(index="future_channel", id=temp_channel.username, body={"id": temp_channel.id}, ignore=409)
res = es.create(index="channel_buffer", id=url[-1],
body={},
ignore=[409, 400])
channels_username.append(url[-1])
print(f"from caption_entities_channel_extractor to channel_buffer: {res} ")
# es.get(index="future_channel", id=temp_channel.username)
elif url.__len__() == 5:
# time.sleep(3) # since get_chat() has been revoked above
# temp_channel = client.get_chat(url[-2])
# time.sleep(3) # since get_chat() has been revoked above
# if temp_channel.type == "channel":
if not any(x in url[-1] for x in wrong_characters):
if es.count(index="channel", body={
"query": {
"match": {
"username": url[-2]
}
}
})["count"] == 0:
# if not es.exists(index="channel", id=temp_channel.id):
# es.create(index="future_channel", id=temp_channel.username, body={"id": temp_channel.id}, ignore=409)
res = es.create(index="channel_buffer", id=url[-2],
body={},
ignore=[409, 400])
channels_username.append(url[-2])
print(f"from caption_entities_channel_extractor to channel_buffer: {res} ")
# es.get(index="future_channel", id=temp_channel.username)
# print(temp_channel)
except Exception as e:
print(
f"exception from caption_entities_channel_extractor() function entity.type == 'text_link': part: {e}")
# channel_to_index_set.append(temp.id)
if message.web_page:
try:
url = str(message.web_page.url).split("/")
if url.__len__() == 4:
# time.sleep(3) # since get_chat() has been revoked above
# temp_channel = client.get_chat(url[-1])
# time.sleep(3) # since get_chat() has been revoked above
if not any(x in url[-1] for x in wrong_characters):
if es.count(index="channel", body={
"query": {
"match": {
"username": url[-1]
}
}
})["count"] == 0:
# if not es.exists(index="channel", id=temp_channel.id):
# es.create(index="future_channel", id=temp_channel.username, body={"id": temp_channel.id}, ignore=409)
res = es.create(index="channel_buffer", id=url[-1],
body={},
ignore=[409, 400])
channels_username.append(url[-1])
print(f"from caption_entities_channel_extractor to channel_buffer: {res} ")
elif url.__len__() == 5:
# time.sleep(3) # since get_chat() has been revoked above
# temp_channel = client.get_chat(url[-2])
# time.sleep(3) # since get_chat() has been revoked above
if not any(x in url[-1] for x in wrong_characters):
if es.count(index="channel", body={
"query": {
"match": {
"username": url[-2]
}
}
})["count"] == 0:
# if not es.exists(index="channel", id=temp_channel.id):
# es.create(index="future_channel", id=temp_channel.username, body={"id": temp_channel.id}, ignore=409)
res = es.create(index="channel_buffer", id=url[-2],
body={},
ignore=[409, 400])
channels_username.append(url[-2])
print(f"from caption_entities_channel_extractor to channel_buffer: {res} ")
# print(temp_channel)
except Exception as e:
print(f"exception from caption_entities_channel_extractor() function message.web_page part: {e}")
return channels_username
except Exception as e:
print(f"exception from caption_entities_channel_extractor() function: {e}")
return []
# print(channel_to_index_set)
def channel_re_analyzer() -> list:
"""
Re-analyze channels and re-score their importance
:return: Re-analysed channels list (still not completed)
"""
res = None
for imp in range(1):
# es.indices.refresh("channel")
# res = helpers.scan(es, query={"query": {"match": {"importance": imp}}},
# index="channel")
res = helpers.scan(
client=es,
query={"query": {"match": {"importance": imp}}},
size=10000,
scroll='5m',
index="channel"
)
for _channel in res:
# Do the re-scoring stuff here
_channel
return res
def daily_gathered_channels_controller(client: object) -> bool:
"""
This function calls prepares the list and calls "new_channel_indexer" function 2 sets has been used in order to
keep one of them intact until the channel is indexed and then remove from both of them (second set acts like a
buffer).
:param client: Telegram client
:return: True on success otherwise False
"""
try:
while 1:
try:
_channels_list_username = []
print("existing channels now running ...")
es.indices.refresh("future_channel")
res = helpers.scan(es,
query={"query": {"match_all": {}}},
index="future_channel")
for future_channel_instance in res:
_channels_list_username.append(future_channel_instance["_id"])
# print("new indexing channels started ... !")
new_channel_indexer(client, _channels_list_username, "future_channel")
return True
except Exception as e:
text = f"exception handled form daily_gathered_channels_controller() function: \n\n{e}"
client.send_message(chromusic_log_id, text)
# continue
finally:
time.sleep(30)
except Exception as e:
text = f"exception handled from out of while in daily_gathered_channels_controller() function: \n\n{e}"
client.send_message(chromusic_log_id, text)
daily_gathered_channels_controller(client)
return False
def existing_channels_handler_by_importance(client: object, importance: int):
"""
This function retrieves channels from DB by importance and updates their indexing status
:param client: Telegram client
:param importance: The target importance of the channels to be retrieved from DB
:return: -
"""
try:
while 1:
try:
res = es.search(index="channel", body={
"query": {
"match": {"importance": importance}
},
"sort": {
"last_indexed_offset_date": "asc"
}
})
starting_time = int(time.time())
for _channel in res["hits"]["hits"]:
print(f"_channel: {_channel}")
# Every time only lets the crawler to work 3 hours at max
try:
if int(time.time()) - starting_time > timedelta(hours=2).total_seconds():
if importance > 0:
delay = timedelta(minutes=15).total_seconds()
time.sleep(delay)
starting_time = int(time.time())
try:
es.indices.refresh(index="global_control")
status_res = es.get(index="global_control", doc_type="indexing_flag", id=_channel["_id"])
is_being_indexed = status_res["_source"]["indexing"]
print("is being indexed: ", is_being_indexed)
if is_being_indexed == True:
continue
else:
flag_update_res = es.update(index="global_control", doc_type="indexing_flag",
id=_channel["_id"], body={
"script": {
"inline": "ctx._source.indexing = params.indexing;",
"lang": "painless",
"params": {
"indexing": True,
}
}
}, ignore=409)
es.index(index="global_control", doc_type="indexing_flag", id=_channel["_id"],
body={
"indexing": True,
"name": _channel["_source"]["username"],
"importance": _channel["_source"]["importance"]
}, refresh=True)
except Exception as e:
es.create(index="global_control", doc_type="indexing_flag", id=_channel["_id"], body={
"indexing": True,
"name": _channel["_source"]["username"],
"importance": _channel["_source"]["importance"]
}, refresh=True, ignore=409)
existing_channel_indexer(client, channel_id=int(_channel["_id"]))
flag_update_res = es.update(index="global_control", doc_type="indexing_flag",
id=_channel["_id"], body={
"script": {
"inline": "ctx._source.indexing = params.indexing;",
"lang": "painless",
"params": {
"indexing": False,
}
}
}, ignore=409)
time.sleep(10)
except Exception as e:
text = f"exception handled form existing_channels_handler_by_importance() function <b>for loop</b>: \n\n{e}"
client.send_message(chromusic_log_id, text)
time.sleep(15)
except Exception as e:
text = f"exception handled form existing_channels_handler_by_importance() function: \n\n{e}"
client.send_message(chromusic_log_id, text)
finally:
text = f"existing_channels_handler_by_importance finished and will start again soon\n\n" \
f"importance: {importance}"
# client.send_message("me", text)
time.sleep(30)
except Exception as e:
text = f"out of the while in the existing_channels_handler_by_importance handled and will revoked again in 15 sec.\n\n" \
f"importance: {importance}\n\n" \
f"exception details:\n" \
f"{e}"
client.send_message(chromusic_log_id, text)
finally:
time.sleep(15)
existing_channels_handler_by_importance(client, importance)
def existing_channels_handler_by_importance_recent_messages(client: object, importance: int):
"""
This function retrieves channels from DB by importance and updates their indexing in reverse mode (from recent
to previous).
:param client: Telegram client
:param importance: The target importance of the channels to be retrieved from DB
:return: -
"""
try:
while 1:
try:
print("existing_channels_handler_by_importance_recent_messages started ...")
res = es.search(index="channel", body={
"query": {
"match": {"importance": importance}
},
"sort": {
"last_indexed_offset_date": "asc"
}
})
starting_time = int(time.time())
args = "recently"
for _channel in res["hits"]["hits"]:
try:
# Every time only lets the crawler to work 3 hours at max
if int(time.time()) - starting_time > timedelta(hours=2).total_seconds():
if importance > 0:
delay = timedelta(minutes=20).total_seconds()
time.sleep(delay)
# break
channel_db = es.get('channel', id=_channel['_id'], ignore=404)
print(f"after existing indexer with client {client}\n{channel_db}")
if int(channel_db["_source"]["importance"]) > 0:
try:
es.indices.refresh(index="global_control")
status_res = es.get(index="global_control", doc_type="indexing_flag",
id=_channel["_id"])
is_being_indexed = status_res["_source"]["indexing"]
print("is being indexed: ", is_being_indexed)
if is_being_indexed == True:
continue
else:
flag_update_res = es.update(index="global_control", doc_type="indexing_flag",
id=_channel["_id"], body={
"script": {
"inline": "ctx._source.indexing = params.indexing;",
"lang": "painless",
"params": {
"indexing": True,
}
}
}, ignore=409)
es.index(index="global_control", doc_type="indexing_flag", id=_channel["_id"],
body={
"indexing": True,
"name": _channel["_source"]["username"],
"importance": _channel["_source"]["importance"]
}, refresh=True)
except Exception as e:
es.create(index="global_control", doc_type="indexing_flag", id=_channel["_id"], body={
"indexing": True,
"name": _channel["_source"]["username"],
"importance": _channel["_source"]["importance"]
}, refresh=True, ignore=409)
existing_channel_indexer(client, int(_channel["_id"]), args)
flag_update_res = es.update(index="global_control", doc_type="indexing_flag",
id=_channel["_id"], body={
"script": {
"inline": "ctx._source.indexing = params.indexing;",
"lang": "painless",
"params": {
"indexing": False,
}
}
}, ignore=409)
time.sleep(20)
except Exception as e:
text = f"exception handled form existing_channels_handler_by_importance_recent_messages() function <b>for loop</b>: \n\n{e}"
client.send_message(chromusic_log_id, text)
finally:
time.sleep(15)
except Exception as e:
text = f"exception handled form existing_channels_handler_by_importance_recent_messages() function: \n\n{e}"
client.send_message(chromusic_log_id, text)
finally:
text = f"existing_channels_handler_by_importance_recent_messages finished and will start again soon\n\n" \
f"importance: {importance}"
# client.send_message("me", text)
time.sleep(30)
except Exception as e:
text = f"out of the while in the existing_channels_handler_by_importance_recent_messages handled and will revoked again in 15 sec.\n\n" \
f"importance: {importance}\n\n" \
f"exception details:\n" \
f"{e}"
client.send_message(chromusic_log_id, text)
finally:
time.sleep(15)
existing_channels_handler_by_importance_recent_messages(client, importance)
def existing_channel_indexer(client: object, channel_id: int, *args: list) -> bool:
"""
This function indexes channels that already exist in the database and updates their last indexing status
:param client: Telegram client
:param channel_id: ID of the previously stored target channel to continue its indexing
:param args: Other arguments to pass
:return: True on success otherwise, False
"""
try:
print("existing channel indexer started ...")
_ch_from_es = es.get(index="channel", id=channel_id)
print(f"_ch_from_es: {_ch_from_es}")
current_channel_offset_date = int(_ch_from_es['_source']['last_indexed_offset_date'])
importance = int(_ch_from_es['_source']['importance'])
channel_username = _ch_from_es['_source']['username']
lenth_of_history = len(client.get_history(channel_username))
text = f"channel_id: {channel_id}\n\n" \
f"ch from es: {_ch_from_es}\n\n" \
f"importance: {importance}\n\n" \
f"channel_username: {channel_username}\n\n" \
f"len of history: {lenth_of_history}"
# print(f"text: after using client: {text}") # works until here!
time.sleep(3)
# client.send_message("shelbycobra2016", text)
if importance > 0:
# print("current_channel_offset_date: ", current_channel_offset_date)
# print(f"indexing existing channel: {_ch_from_es['_source']['title']} started ...")
# print(f"indexing existing channel: {_ch_from_es['_source']['username']} started ...")
# print(f"works_until here {channel_username}")
if lenth_of_history < 100:
# print(f"works_until here less than 100 {channel_username}")
# print("channel_with_less_than_100_posts_deleted: ", es.get(index="channel", id=channel))
res = es.delete(index="channel", id=channel_id, ignore=[409, 400])
# print("deleted_with_less_than_100: ", res)
else:
audio_file_indexer(client, channel_id, current_channel_offset_date, *args)
# print(f"works_until here after audio file indexer:{channel_username}")
# print(f"indexing existing channel: {_ch_from_es['_source']['title']} finished ...")
print("existing channel indexer finished ...")
time.sleep(3)
return True
except Exception as e:
text = f"exception handled form existing_channel_indexer() function: \n\n{e}"
if not (str(e).__contains__("NotFoundError(404,") or
str(e).__contains__("not supported")):
client.send_message(chromusic_log_id, text)
return False
def new_channel_indexer(client: object, channels_username: list, db_index: str):
"""
Index brand new channels (not existing in the database)
:param client: Telegram client
:param channels_username: A list of channels' usernames to be indexed
:param db_index: Database index (Either from 'future_channel' or 'channel_buffer')
:return: -
"""
try:
if len(channels_username) > 0:
print(f"new channel indexer started ... {channels_username}")
starting_time = int(time.time())
for channel_username in channels_username:
print(f"channel_username: {channel_username}")
# Every time only lets the crawler to work 3 hours at max
try:
# channel_id = es.get(index=db_index, id=channel_username)["_source"]["id"]
if int(time.time()) - starting_time > timedelta(hours=4).total_seconds():
delay = timedelta(minutes=13).total_seconds()
time.sleep(delay)
# break
# print("in the new indexer")
if int(es.count(index="channel", body={"query": {
"match": {
"username": channel_username
}
}})['count']) == 0:
# print("sleeping for 5 seconds after getting channel info ...")
time.sleep(4)
try:
members_count = client.get_chat_members_count(channel_username)
# continue
except Exception as e:
es.delete(db_index, id=channel_username, ignore=404)
print(f"couldn't find the username in {db_index} index")
continue
try:
time.sleep(5)
chat = client.get_chat(channel_username)
time.sleep(5)
except Exception as e:
print(f"handled exception from new_channel_indexer(): {e}")
try:
es.delete(db_index, id=channel_username, ignore=404)
# continue
except Exception as e:
print(f"couldn't find the username in {db_index} index")
# continue
# app.send_message("me", f"time spent indexing {chat} " # up
# f"channel is {starting_time - int(time.time())} seconds")
print(f"indexing: {chat.username}")
# analyze it
# print("sleeping for 1 seconds after getting channel members ...")
time.sleep(1)
channel_analyse = Analyzer(client.get_history(channel_username), members_count)
importance = channel_analyse.channel_analyzer()
if len(client.get_history(chat.id)) > 99:
es.indices.refresh(index="channel")
res = es.create("channel", id=chat.id, body={
"title": chat.title,
"username": chat.username,
"importance": importance,
"indexed_from_audio_count": 0,
"last_indexed_offset_date": 0,
"downloaded_from_count": 0,
}, refresh=True, ignore=409)
# time.sleep(3)
if importance > 0:
# ----------- new_changes -----------------
try:
es.indices.refresh(index="global_control")
status_res = es.get(index="global_control", doc_type="indexing_flag",
id=chat.id)
is_being_indexed = status_res["_source"]["indexing"]
print("is being indexed: ", is_being_indexed)
if is_being_indexed == True:
continue
else:
flag_update_res = es.update(index="global_control", doc_type="indexing_flag",
id=chat.id, body={
"script": {
"inline": "ctx._source.indexing = params.indexing;",
"lang": "painless",
"params": {
"indexing": True,
}
}
}, ignore=409)
except Exception as e:
es.create(index="global_control", doc_type="indexing_flag", id=chat.id,
body={
"indexing": True,
"name": chat.username,
"importance": importance
}, refresh=True, ignore=409)
# ----------- new_changes -----------------
audio_file_indexer(client, chat.id, offset_date=0)
# ----------- new_changes -----------------
flag_update_res = es.update(index="global_control", doc_type="indexing_flag",
id=chat.id, body={
"script": {
"inline": "ctx._source.indexing = params.indexing;",
"lang": "painless",
"params": {
"indexing": False,
}
}
}, ignore=409)
# ----------- new_changes ----------------
else:
try:
es.delete(db_index, id=channel_username, ignore=404)
# continue
except Exception as e:
text = f"it's not in the {db_index} two to the last except in the main if in the for loop \n\n{e}"
client.send_message(chromusic_log_id, text)
print(
"it's not in the future_channel/channel_buffer two to the fffflast except in the main if in the for loop")
# continue
else:
# print("The number of the shared posts is less than 100")
try:
es.delete(db_index, id=channel_username, ignore=404)
# continue
except Exception as e:
text = f"it's not in the future_channel/channel_buffer one to the last except in the main if in the for loop \n\n{e}"
client.send_message(chromusic_log_id, text)
print(
f"it's not in the future_channel/channel_buffer one to the last except in the main if in the for loop")
# continue
# print("channel successfully deleted from future_channe/channel_bufferl")
else:
# print("this is an existing channel")
try:
es.delete(db_index, id=channel_username, ignore=404)
# continue
except Exception as e:
text = f"it's not in the future_channel/channel_buffer last except in the main if in the for loop \n\n{e}"
client.send_message(chromusic_log_id, text)
# print(f"it's not in the future_channel/channel_buffer last except in the main if in the for loop")
# continue
# print("channel successfully deleted from future_channe/channel_bufferl")
except Exception as e:
text = f"exception handled form new_channel_indexer() function <b>for loop</b>: \n\n{e}"
if not (str(e).__contains__("NotFoundError(404,") or
str(e).__contains__("not supported")):
client.send_message(chromusic_log_id, text)
# continue
finally:
time.sleep(5)
except Exception as e:
text = f"exception handled form new_channel_indexer() function: \n\n{e}"
app.send_message(chromusic_log_id, text)
finally:
text = f"new_channel_indexer() finished and will start again ..."
# client.send_message(chromusic_log_id, text)
time.sleep(30)
def audio_file_indexer(client: object, channel_id: int, offset_date: int, *args: str) -> bool:
"""
Crawl and index audio files within channels
:param client: Telegram client
:param channel_id: ID of the current channel being indexed
:param offset_date: Offset date of the last indexed message from the current channel
:param args: Extra arguments: Possibly contains "recently": whether to index from recent messages(reversed) or not
:return: True on success; otherwise False
"""
try:
_messages = []
s = int(time.time())
_last_message = None
_counter = 0
speed_limiter_counter = 0
limit = 0
reverse_index = True
_ch_from_es = es.get(index="channel", id=channel_id)
channel_username = _ch_from_es['_source']['username']
if len(args) > 0:
if args[0] == "recently":
# print("from_bottom_up_indexing", es.get(index="channel", id=channel))
limit = 20
offset_date = 0
reverse_index = False
# _ch_from_es = es.get(index="channel", id=channel_id)
# channel_username = _ch_from_es['_source']['username']
if len(client.get_history(channel_username)) < 100:
print("channel_with_less_than_100_posts_deleted: ", es.get(index="channel", id=channel_id))
res = es.delete("channel", id=channel_id, ignore=404)
# print("deleted_with_less_than_100", res)
return None
indexed_from_counter = 0
# print("from audio, indexing: ", client.get_chat(channel_username))
for message in client.iter_history(channel_username, limit=limit, offset_date=offset_date,
reverse=reverse_index):
try:
_date = int(message.date)
# sleep 2 seconds every 35 iteration
if speed_limiter_counter > 99:
speed_limiter_counter = 0
time.sleep(2)
if _counter > 9:
# client.send_message("shelbycobra2016", f"https://t.me/{_last_message.chat.username}/{_last_message.message_id}")
# client.send_message("shelbycobra2016", f"{_last_message.message_id}")
# print("in counter 34 ...")
try:
# print("before getting _date ...")
if reverse_index and not _last_message == None:
# _date = int(_last_message.date)
# print("date: ", _date)
response = es.update(index="channel", id=channel_id, body={
"script": {
"inline": "ctx._source.last_indexed_offset_date = params.last_indexed_offset_date;",
"lang": "painless",
"params": {
"last_indexed_offset_date": _date,
}
}
}, ignore=409)
# print("response: ", response)
# print(es.get("channel", id=channel))
except Exception as e:
print(f"exception from counter: {e}")
# print(f"from counter if: {response}")
# this if is meant to slow down the indexing rate to 35 messages per sec. at max
if len(_messages) > 0:
# if not reverse_index:
# print("len(_messages) > 0: ", _messages)
helpers.bulk(es, audio_data_generator(_messages))
# print("after bulk", _messages[0])
response = es.update(index="channel", id=channel_id, body={
"script": {
"inline": "ctx._source.indexed_from_audio_count += params.indexed_from_audio_count",
"lang": "painless",
"params": {
"indexed_from_audio_count": len(_messages)
}
}
}, ignore=409)
try:
if es.exists("future_channel", id=channel_username):
es.delete("future_channel", id=channel_username, ignore=404)
if es.exists("channel_buffer", id=channel_username):
es.delete("channel_buffer", id=channel_username, ignore=404)
# print(f"deleted {channel} from database successfully")
except Exception as e:
print("query didn't match any document id --> from future_channel - new channel indexer")
print(f"exact exception: \n{e}")
# print(es.get("channel", id=channel))
time.sleep(1)
_messages = []
_counter = 0
# time.sleep(1)
if message.audio:
# if limit == 20:
# print("from_bottom_up_indexing message added", message.chat.username,
# es.count(index="audio", body={"query": {
# "match": {
# "file_id": message.audio.file_id
# }}}), message.audio.title)
# --> following if is as an alternative
# if not es.exists(index="audio", id=str(message.audio.file_id[8:30:3]).replace("-", "d"))
if int(es.count(index="audio_files", body={"query": {
"match": {
"file_id": message.audio.file_id
}
}})['count']) == 0:
_messages.append(message)
# if limit == 20:
# print("message appended", len(_messages))
# ----- following 3 ifs are for extracting channels: ----------
if message.forward_from_chat:
forwarded_from_channel_extractor(client, message) # this func will extract channels' IDs
if message.caption_entities:
caption_entities_channel_extractor(client, message)
if message.text:
channel_name_extractor(client, message.text)
if message.caption:
channel_name_extractor(client, message.caption)
_counter += 1
_last_message = message
speed_limiter_counter += 1
except FloodWait as e:
text = f"FloodWait from audio_file_indexer: \n\n{e}"
client.send_message(chromusic_log_id, text)
# print("from audio file indexer: Flood wait exception: ", e)
time.sleep(e.x)
except SlowmodeWait as e:
text = f"SlowmodeWait from audio_file_indexer: \n\n{e}"
client.send_message(chromusic_log_id, text)
# print("from audio file indexer: Slowmodewait exception: ", e)
time.sleep(e.x)
except TimeoutError as e:
text = f"TimeoutError from audio_file_indexer: \n\n{e}"
client.send_message(chromusic_log_id, text)
# print("Timeout error: sleeping for 20 seconds: ", e)
time.sleep(20)
# pass
except ConnectionError as e:
text = f"ConnectionError from audio_file_indexer: \n\n{e}"
client.send_message(chromusic_log_id, text)
# print("Connection error - sleeping for 40 seconds: ", e)
except Exception as e:
client.send_message(chromusic_log_id,
f"from audio_file_indexer: maybe encountered a service message in the for loop\n\n {e}")
print("from audio file indexer: ", e)
return True
except FloodWait as e:
text = f"FloodWait from audio_file_indexer. outer try/except: \n\n{e}"
client.send_message(chromusic_log_id, text)
# print("from audio file indexer: Flood wait exception: ", e)
time.sleep(e.x)
return False
except SlowmodeWait as e:
text = f"SlowmodeWait from audio_file_indexer. outer try/except: \n\n{e}"
client.send_message(chromusic_log_id, text)
# print("from audio file indexer: Slowmodewait exception: ", e)
time.sleep(e.x)
return False
except TimeoutError as e:
text = f"TimeoutError from audio_file_indexer. outer try/except: \n\n{e}"
client.send_message(chromusic_log_id, text)
# print("Timeout error: sleeping for 20 seconds: ", e)
time.sleep(20)
return False
except ConnectionError as e:
text = f"ConnectionError from audio_file_indexer. outer try/except: \n\n{e}"
client.send_message(chromusic_log_id, text)
# print("Connection error - sleeping for 40 seconds: ", e)
return False
except Exception as e:
client.send_message(chromusic_log_id,
f" outer try/except from audio_file_indexer: maybe encountered a service message\n\n {e}")
print("from audio file indexer: ", e)
return False
def main_join_left_checker_controller():
"""
Control members' joining actions and handle exceptions
:return: -
"""
try:
while 1:
try:
delay = random.randint(5, 7)
# check_new_member_join_count(chromusic_id)
time.sleep(1)
check_new_member_join_count(chromusic_fa_id)
time.sleep(delay)
except Exception as e:
text = f"exception handled form main_join_left_checker_controller() function <b>for loop</b>: \n\n{e}"
app.send_message(chromusic_log_id, text)
except Exception as e:
text = f"exception handled form main_join_left_checker_controller() function <b>for loop</b>: \n\n{e}"
app.send_message(chromusic_log_id, text)
main_join_left_checker_controller()
finally:
text = f"join/left checker controller has stopped: \n\n"
app.send_message("shelbycobra2016", text)
def main_index_scheduler_controller():
"""
Manually schedule to index channels
:return: -
"""
try:
print("index scheduler controller")
# schedule.every(5).to(7).seconds.do(check_new_member_join_count, chromusic_id)
schedule.every(5).to(7).seconds.do(check_new_member_join_count, chromusic_fa_id)
schedule.every(1).seconds.do(check_new_member_join_count, chromusic_id)
schedule.every(1).seconds.do(check_new_member_join_count, chromusic_fa_id)
# -------------------------- Daily new gathered channels ------------------------
schedule.every(1).day.at("00:00").do(daily_gathered_channels_controller)
schedule.every(1).day.at("03:30").do(daily_gathered_channels_controller)
schedule.every(1).day.at("07:00").do(daily_gathered_channels_controller)
schedule.every(1).day.at("10:30").do(daily_gathered_channels_controller)
schedule.every(1).day.at("14:00").do(daily_gathered_channels_controller)
schedule.every(1).day.at("17:30").do(daily_gathered_channels_controller)
schedule.every(1).day.at("21:00").do(daily_gathered_channels_controller)
schedule.every(1).day.at("01:30").do(existing_channels_handler_by_importance, 6)
# importance 5 channels schedule: sat, sun, mon, tue, fri
schedule.every(1).minutes.do(daily_gathered_channels_controller)
schedule.every(30).minutes.do(daily_gathered_channels_controller)
schedule.every(3).hours.do(existing_channels_handler_by_importance_recent_messages, 5)
schedule.every(6).hours.do(existing_channels_handler_by_importance_recent_messages, 4)
schedule.every(10).hours.do(existing_channels_handler_by_importance_recent_messages, 3)
schedule.every(15).hours.do(existing_channels_handler_by_importance_recent_messages, 2)
schedule.every(24).hours.do(existing_channels_handler_by_importance_recent_messages, 1)
schedule.every(30).minutes.do(existing_channels_handler_by_importance, 5)
schedule.every(1).hours.do(existing_channels_handler_by_importance, 4)
schedule.every(3).hours.do(existing_channels_handler_by_importance, 3)
schedule.every(5).hours.do(existing_channels_handler_by_importance, 2)
schedule.every(10).hours.do(existing_channels_handler_by_importance, 1)
schedule.every().saturday.at("01:30").do(existing_channels_handler_by_importance, 5)
# schedule.every().saturday.at("01:30").do(existing_channels_handler_by_importance_recent_messages, 5)
schedule.every().saturday.at("04:30").do(existing_channels_handler_by_importance, 5)
# schedule.every().saturday.at("04:30").do(existing_channels_handler_by_importance_recent_messages, 5)
schedule.every().saturday.at("09:30").do(existing_channels_handler_by_importance, 5)
schedule.every().saturday.at("14:30").do(existing_channels_handler_by_importance, 5)
schedule.every().sunday.at("01:30").do(existing_channels_handler_by_importance, 5)
# schedule.every().sunday.at("01:30").do(existing_channels_handler_by_importance_recent_messages, 5)
schedule.every().sunday.at("04:30").do(existing_channels_handler_by_importance, 5)
schedule.every().sunday.at("09:30").do(existing_channels_handler_by_importance, 5)
schedule.every().sunday.at("09:30").do(existing_channels_handler_by_importance_recent_messages, 5)
schedule.every().sunday.at("14:30").do(existing_channels_handler_by_importance, 5)
schedule.every().monday.at("01:30").do(existing_channels_handler_by_importance, 5)
# schedule.every().monday.at("01:30").do(existing_channels_handler_by_importance_recent_messages, 5)
schedule.every().monday.at("04:30").do(existing_channels_handler_by_importance, 5)
schedule.every().monday.at("09:30").do(existing_channels_handler_by_importance, 5)
schedule.every().monday.at("14:30").do(existing_channels_handler_by_importance, 5)
# schedule.every().monday.at("14:30").do(existing_channels_handler_by_importance_recent_messages, 5)
schedule.every().tuesday.at("01:30").do(existing_channels_handler_by_importance, 5)
# schedule.every().tuesday.at("01:30").do(existing_channels_handler_by_importance_recent_messages, 5)
schedule.every().tuesday.at("04:30").do(existing_channels_handler_by_importance, 5)
schedule.every().tuesday.at("09:30").do(existing_channels_handler_by_importance, 5)
# schedule.every().tuesday.at("09:30").do(existing_channels_handler_by_importance_recent_messages, 5)
schedule.every().tuesday.at("14:30").do(existing_channels_handler_by_importance, 5)
schedule.every().wednesday.at("01:30").do(existing_channels_handler_by_importance, 5)
# schedule.every().wednesday.at("01:30").do(existing_channels_handler_by_importance_recent_messages, 5)
schedule.every().wednesday.at("04:30").do(existing_channels_handler_by_importance, 5)
schedule.every().wednesday.at("09:30").do(existing_channels_handler_by_importance, 5)
schedule.every().wednesday.at("14:30").do(existing_channels_handler_by_importance, 5)
# schedule.every().wednesday.at("14:30").do(existing_channels_handler_by_importance_recent_messages, 5)
schedule.every().thursday.at("01:30").do(existing_channels_handler_by_importance, 5)
# schedule.every().thursday.at("01:30").do(existing_channels_handler_by_importance_recent_messages, 5)
schedule.every().thursday.at("04:30").do(existing_channels_handler_by_importance, 5)
schedule.every().thursday.at("09:30").do(existing_channels_handler_by_importance, 5)
schedule.every().thursday.at("14:30").do(existing_channels_handler_by_importance, 5)
# schedule.every().thursday.at("14:30").do(existing_channels_handler_by_importance_recent_messages, 5)
schedule.every().friday.at("01:30").do(existing_channels_handler_by_importance, 5)
# schedule.every().friday.at("01:30").do(existing_channels_handler_by_importance_recent_messages, 5)
schedule.every().friday.at("04:30").do(existing_channels_handler_by_importance, 5)
schedule.every().friday.at("09:30").do(existing_channels_handler_by_importance, 5)
schedule.every().friday.at("14:30").do(existing_channels_handler_by_importance, 5)
# schedule.every().friday.at("14:30").do(existing_channels_handler_by_importance_recent_messages, 5)
# ---------------------end-----------------------------
# importance 4 channels schedule: sat , mon, wed, thr
schedule.every().saturday.at("07:30").do(existing_channels_handler_by_importance, 4)
schedule.every(1).hours.do(existing_channels_handler_by_importance_recent_messages, 4)
# schedule.every().saturday.at("07:30").do(existing_channels_handler_by_importance_recent_messages, 4)
schedule.every().saturday.at("11:30").do(existing_channels_handler_by_importance, 4)
schedule.every().saturday.at("15:30").do(existing_channels_handler_by_importance, 4)
schedule.every().monday.at("07:30").do(existing_channels_handler_by_importance, 4)
# schedule.every().monday.at("07:30").do(existing_channels_handler_by_importance_recent_messages, 4)
schedule.every().monday.at("11:30").do(existing_channels_handler_by_importance, 4)
schedule.every().monday.at("15:30").do(existing_channels_handler_by_importance, 4)
schedule.every().wednesday.at("07:30").do(existing_channels_handler_by_importance, 4)
# schedule.every().wednesday.at("07:30").do(existing_channels_handler_by_importance_recent_messages, 4)
schedule.every().wednesday.at("11:30").do(existing_channels_handler_by_importance, 4)
schedule.every().wednesday.at("15:30").do(existing_channels_handler_by_importance, 4)
schedule.every().thursday.at("07:30").do(existing_channels_handler_by_importance, 4)
# schedule.every().thursday.at("07:30").do(existing_channels_handler_by_importance_recent_messages, 4)
schedule.every().thursday.at("11:30").do(existing_channels_handler_by_importance, 4)
schedule.every().thursday.at("15:30").do(existing_channels_handler_by_importance, 4)
# ---------------------end-----------------------------
# importance 3 channels schedule: sun , tue, thr
schedule.every().sunday.at("07:30").do(existing_channels_handler_by_importance, 3)
schedule.every(3).hours.do(existing_channels_handler_by_importance_recent_messages, 3)
# schedule.every().sunday.at("07:30").do(existing_channels_handler_by_importance_recent_messages, 3)
schedule.every().sunday.at("11:30").do(existing_channels_handler_by_importance, 3)
schedule.every().sunday.at("15:30").do(existing_channels_handler_by_importance, 3)
schedule.every().tuesday.at("07:30").do(existing_channels_handler_by_importance, 3)
# schedule.every().tuesday.at("07:30").do(existing_channels_handler_by_importance_recent_messages, 3)
schedule.every().tuesday.at("11:30").do(existing_channels_handler_by_importance, 3)
schedule.every().tuesday.at("15:30").do(existing_channels_handler_by_importance, 3)
schedule.every().thursday.at("01:30").do(existing_channels_handler_by_importance, 3)
# schedule.every().thursday.at("01:30").do(existing_channels_handler_by_importance_recent_messages, 3)
schedule.every().thursday.at("11:30").do(existing_channels_handler_by_importance, 3)
schedule.every().thursday.at("15:30").do(existing_channels_handler_by_importance, 3)
# ---------------------end-----------------------------
# importance 2 channels schedule: tue, fri
schedule.every().tuesday.at("10:30").do(existing_channels_handler_by_importance, 2)
schedule.every(10).hours.do(existing_channels_handler_by_importance_recent_messages, 2)
schedule.every().tuesday.at("10:30").do(existing_channels_handler_by_importance_recent_messages, 2)
schedule.every().friday.at("07:30").do(existing_channels_handler_by_importance, 2)
# ---------------------end-----------------------------
# importance 1 channels schedule: wed
schedule.every().wednesday.at("01:30").do(existing_channels_handler_by_importance, 1)
schedule.every(20).hours.do(existing_channels_handler_by_importance_recent_messages, 1)
schedule.every().wednesday.at("01:30").do(existing_channels_handler_by_importance_recent_messages, 1)
# ---------------------end-----------------------------
# importance 1 channels schedule: wed
schedule.every(15).days.at("14:30").do(existing_channels_handler_by_importance, 0)
# ---------------------end-----------------------------
while 1:
schedule.run_pending()
time.sleep(1)
except Exception as e:
app.send_message(chromusic_log_id, f"exception from scheduler \n {e}")
# main_index_scheduler_controller()
def reset_last_index_offset_date():
"""
Reset the last index date and the number of indexed files for channels after finishing indexing
:return: True on success
"""
res = helpers.scan(es,
query={"query": {"match_all": {}}},
index="channel", size=10000,
scroll='2m', )
for hit in res:
channel_id = hit["_id"]
response = es.update(index="channel", id=channel_id, body={
"script": {
"inline": "ctx._source.last_indexed_offset_date = params.last_indexed_offset_date;"
"ctx._source.indexed_from_audio_count = params.indexed_from_audio_count;",
"lang": "painless",
"params": {
"last_indexed_offset_date": 0,
"indexed_from_audio_count": 0,
}
}
}, ignore=409)
return True
def buffer_gathered_channels_controller(client):
"""
Check the gathered channel candidates in the buffer and indexes the valid ones.
notice: usernames are gathered optimistically (they may or may not be valid usernames)
:param client: Telegram client
:return: -
"""
try:
while 1:
try:
_channels_list_username = []
print("existing channels now running ...")
res = helpers.scan(es,
query={"query": {"match_all": {}}},
index="channel_buffer")
for buffered_candidate_channel in res:
_channel_username = _channels_list_username.append(buffered_candidate_channel["_id"])
try:
new_channel_indexer(client, _channels_list_username, "channel_buffer")
except FloodWait as e:
text = f"FloodWait from buffer_gathered_channels_controller \n\n{e}"
client.send_message(chromusic_log_id, text)
# print("from audio file indexer: Flood wait exception: ", e)
time.sleep(e.x)
except SlowmodeWait as e:
text = f"SlowmodeWait from buffer_gathered_channels_controller \n\n{e}"
client.send_message(chromusic_log_id, text)
# print("from audio file indexer: Slowmodewait exception: ", e)
time.sleep(e.x)
except TimeoutError as e:
text = f"TimeoutError from buffer_gathered_channels_controller \n\n{e}"
client.send_message(chromusic_log_id, text)
# print("Timeout error: sleeping for 20 seconds: ", e)
time.sleep(20)
# pass
except ConnectionError as e:
text = f"ConnectionError from buffer_gathered_channels_controller\n\n{e}"
client.send_message(chromusic_log_id, text)
# print("Connection error - sleeping for 40 seconds: ", e)
except Exception as e:
print(f"got exception in buffer_gathered_channels_controller(): {_channel_username} \n\n{e}")
es.delete(index="channel_buffer", id=_channel_username, ignore=404)
time.sleep(30)
# print("new indexing channels started ... !")
except Exception as e:
text = f"exception handled form buffer_gathered_channels_controller() function: \n\n{e}"
client.send_message(chromusic_log_id, text)
# continue
finally:
time.sleep(30)
except Exception as e:
text = f"Exception handled out of the while in the buffer_gathered_channels_controller() \n\n{e}"
client.send_message(chromusic_log_id, text)
buffer_gathered_channels_controller(client)
def invalid_title_performer_remover(client):
"""
Detect and remove fake audio-title and audio-performer information. (In case they were channel's username or title).
:param client: Telegram client
:return: -
"""
try:
while 1:
try:
res = es.search(index="audio_files", body={
"query": {
"match_all": {}
},
"sort": {
"last_indexed_offset_date": "desc"
}
})
starting_time = int(time.time())
for audio_file in res["hits"]["hits"]:
audio = audio_file["_source"]
# print(f"_channel: {_channel}")
# Every time only lets the crawler to work 3 hours at max
try:
if str(audio["chat_username"]).replace("_", " ") in audio["performer"]:
es.update(index="audio_files", )
es.update(index="audio_files", id=audio_file["_id"], body={
"script": {
"inline": "ctx._source.performer = params.performer;",
"lang": "painless",
"params": {
"performer": " "
}
}
})
elif str(audio["chat_username"]).replace("_", " ") in audio["title"]:
es.update(index="audio_files", id=audio_file["_id"], body={
"script": {
"inline": "ctx._source.title = params.title;",
"lang": "painless",
"params": {
"title": " "
}
}
})
except Exception as e:
text = f"Exception in the for loop from invalid_title_performer_remover() \n\n{e}"
client.send_message(chromusic_log_id, text)
except Exception as e:
text = f"Exception in the while loop from invalid_title_performer_remover() \n\n{e}"
client.send_message(chromusic_log_id, text)
except Exception as e:
text = f"encountered exception out of the while loop in the invalid_title_performer_remover()\n\n{e}"
print(text)
client.send_message(chromusic_log_id, text)
def audio_file_forwarder(client):
"""
Forward audio files to a channel as backup (* optional to use)
:param client: Telegram client
:return: -
"""
i = 0
for file in client.iter_history(-1001381641403, reverse=True):
try:
if file.audio:
if i % 5000 == 0:
print(f"{i} audio files forwarded so far!")
if i % 65 == 0:
time.sleep(2)
client.forward_messages("Audiowarehouse", -1001381641403, file.message_id)
i += 1
except FloodWait as e:
text = f"FloodWait from audio_file_forwarder: \n\n{e}"
client.send_message(chromusic_log_id, text)
# print("from audio file indexer: Flood wait exception: ", e)
time.sleep(e.x)
except SlowmodeWait as e:
text = f"SlowmodeWait from audio_file_forwarder: \n\n{e}"
client.send_message(chromusic_log_id, text)
# print("from audio file indexer: Slowmodewait exception: ", e)
time.sleep(e.x)
except TimeoutError as e:
text = f"TimeoutError from audio_file_forwarder: \n\n{e}"
client.send_message(chromusic_log_id, text)
# print("Timeout error: sleeping for 20 seconds: ", e)
time.sleep(20)
# pass
except ConnectionError as e:
text = f"ConnectionError from audio_file_forwarder: \n\n{e}"
client.send_message(chromusic_log_id, text)
# print("Connection error - sleeping for 40 seconds: ", e)
except Exception as e:
client.send_message(chromusic_log_id,
f"from audio_file_forwarder: \n\n {e}")
print("from audio file indexer: ", e)
def main():
"""
Main function of the search engine. Create and initiate indexes and create necessary docs and flags.
Revoke main_functions_revoker()
:return: -
"""
# executor.submit(daily_gathered_channels_controller)
# executor.submit(main_index_scheduler_controller)
# -1001007590017
starting_time = int(time.time())
# res = es.indices.delete(index="to_index")
# res = es.indices.delete(index="audio_files")
# res = es.indices.delete(index="channel")
# es.indices.delete("admin_log_control")
# res = es.count(index="channel_buffer", body={
# "query": {
# "match_all": {}
# }
# })["count"]
# print(f"count of channel_buffer: {res}")
for i in es.indices.get("*"):
print("index name: ", i)
# res = es.indices.delete(index=i)
to_index = es.indices.create(
index="to_index",
body=to_index_mapping,
ignore=400 # ignore 400 already exists code
)
future_channel = es.indices.create(
index="future_channel",
body=future_channel_mapping,
ignore=400 # ignore 400 already exists code
)
channel_buffer = es.indices.create(
index="channel_buffer",
body=channel_buffer_mapping,
ignore=400 # ignore 400 already exists code
)
# audio = es.indices.create(
# index="audio",
# body=audio_files_mapping,
# ignore=400 # ignore 400 already exists code
# )
audio_files = es.indices.create(
index="audio_files",
body=audio_files_mapping,
ignore=400 # ignore 400 already exists code
)
channel = es.indices.create(
index="channel",
body=channel_mapping,
ignore=400 # ignore 400 already exists code
)
user = es.indices.create(
index="user",
body=channel_mapping,
ignore=400 # ignore 400 already exists code
)
admin_log = es.indices.create(
index="admin_log_control",
body=admin_log_control_mapping,
ignore=400
)
# es.indices.delete("user_lists")
# es.indices.delete("playlist")
user_lists = es.indices.create(
index="user_lists",
body=user_list_mapping,
ignore=400
)
playlists = es.indices.create(
index="playlist",
body=playlist_mapping,
ignore=400
)
res = es.create(index="admin_log_control", id=chromusic_id, body={ # Chromusic channel ID: -1001357823954
"last_offset_date": 0,
"members_count": 0
}, ignore=409)
# print(f"from main: ", res)
es.create(index="admin_log_control", id=chromusic_fa_id, body={ # Chromusic channel ID: -1001357823954
"last_offset_date": 0,
"members_count": 0
}, ignore=409)
# es.delete("user", id=165802777)
es.create(index="user", id=165802777, body={ # my ID: 165802777 --> username: shelbycobra2016
"first_name": "Soran",
"username": "shelbycobra2016",
"date_joined": int(time.time()),
"downloaded_audio_count": 0,
"lang_code": "en",
"limited": False,
"role": "owner",
"coins": 0,
"last_active_date": int(time.time()),
"is_admin": True,
"sex": "neutral",
"country": "CA"
}, ignore=409)
es.update(index="user", id=165802777, body={
"script": {
"inline": "ctx._source.role = params.role;",
"lang": "painless",
"params": {
"role": "owner"
}
}
}, ignore=409)
try:
res = helpers.scan(es,
query={"query": {"match": {
"indexing": True
}}},
index="global_control",
)
for _channel in res:
flag_update_res = es.update(index="global_control", doc_type="indexing_flag", id=_channel["_id"], body={
"script": {
"inline": "ctx._source.indexing = params.indexing;",
"lang": "painless",
"params": {
"indexing": False,
}
}
}, ignore=409)
except Exception as e:
print(e)
print("before revoker() ...")
main_functions_revoker()
def main_functions_revoker():
"""
Revoke indexers, joining-status handlers, etc. and associate respective clients (manually) (* needs to be
re-written dynamically.
:return: -
"""
executor.submit(main_join_left_checker_controller)
executor.submit(daily_gathered_channels_controller, app)
executor.submit(daily_gathered_channels_controller, app2)
# executor.submit(buffer_gathered_channels_controller, app2)
for app_instance in indexer_list:
executor.submit(buffer_gathered_channels_controller, app_instance)
executor.submit(daily_gathered_channels_controller, app_instance)
# executor.submit(audio_file_forwarder, app_instance)
executor.submit(existing_channels_handler_by_importance, app, 5)
executor.submit(existing_channels_handler_by_importance, app2, 4)
executor.submit(existing_channels_handler_by_importance, app_instance, 4)
executor.submit(existing_channels_handler_by_importance, app, 3)
executor.submit(existing_channels_handler_by_importance, app2, 2)
# executor.submit(existing_channels_handler_by_importance, app_instance, 2)
executor.submit(existing_channels_handler_by_importance, app2, 1)
executor.submit(existing_channels_handler_by_importance_recent_messages, app, 5)
# executor.submit(existing_channels_handler_by_importance_recent_messages, app_instance, 5)
executor.submit(existing_channels_handler_by_importance_recent_messages, app2, 4)
executor.submit(existing_channels_handler_by_importance_recent_messages, app_instance, 4)
executor.submit(existing_channels_handler_by_importance_recent_messages, app, 3)
executor.submit(existing_channels_handler_by_importance_recent_messages, app2, 2)
# executor.submit(existing_channels_handler_by_importance_recent_messages, app_instance, 2)
executor.submit(existing_channels_handler_by_importance_recent_messages, app, 1)
# time.sleep(20)
# executor.submit(daily_gathered_channels_controller)
# time.sleep(20)
# executor.submit(daily_gathered_channels_controller)
def initialize():
"""
Define and initialize global variables and run the main function.
:return: True on success
"""
global editing_flag
global executor
global chromusic_id
global chromusic_fa_id
global datacenter_id
global chromusic_log_id
global chromusic_users_files_id
global speed_limiter
speed_limiter = 0
chromusic_log_id = -1001279436688
datacenter_id = -1001380565527
editing_flag = False
chromusic_users_files_id = -1001288746290
chromusic_id = -1001357823954
chromusic_fa_id = -1001243615671
executor = concurrent.futures.ThreadPoolExecutor(max_workers=27)
telegramAPI_connect()
# print(app.get_me())
db_connect()
try:
executor.submit(main)
except Exception as e:
app.send_message(chromusic_log_id, str(e))
return True
if __name__ == '__main__':
initialize()
def choose_language(bot, message):
"""
Ask users to choose a language among a menu shows a list of available languages.
:param bot: Telegram bot client
:param message: Telegram message object
:return: True on success
"""
try:
user_data = es.get(index="user", id=message.from_user.id)["_source"]
lang_code = user_data["lang_code"]
except Exception as e:
lang_code = "en"
print("exception from choose language", e)
pass
markup_list = language_handler("button_language_list", lang_code)
text = language_handler("choose_language_text", lang_code, message.from_user.first_name)
exception_handler(bot.send_message(chat_id=message.from_user.id,
text=text,
reply_markup=InlineKeyboardMarkup(markup_list),
parse_mode='HTML'))
return True
@bot.on_inline_query()
def inine_res(bot: object, query: object) -> object:
"""
Handle the coming inline messages.
options:
1. more_results: Return more results (up to 40 items)
2. addtopl: Add audio file to a playlist (using "more" button in the bottom of
each search result
3. history: Show the last 50 searched audio files (using "history" button in "home" and "help" menu)
4. myplaylists: Show a list of user's playlists (using "my playlists" button in "home" and "help" menu)
5. showfiles: Show audio files within a playlist (using "show files" button in "playlist" menu)
6. edit_title: Edit the title of a playlist (using "Edit title" button in "playlist" menu)
7. edit_description: Edit the description of a playlist (using "Edit description" button in "playlist" menu)
8. help_catalog: Return a list of URLs which tutorial contents can be placed
9. Search audio files: Search audio files (requested by users without any prefix)
:param bot: Telegram bot client
:param query: Telegram query object
:return: Query answers on success, report the problem otherwise
"""
print("got inline")
results = []
user = query.from_user
# if str(query.query).split(":")[1] == "playlists":
# print(query)
hidden_character = " "
try:
lang_code = es.get("user", id=user.id)["_source"]["lang_code"]
if es.get("user", id=user.id)["_source"]["role"] == "searcher":
item_title = language_handler("inline_join_channel_title_text", lang_code)
item_description = language_handler("inline_join_channel_description_text", lang_code)
item_content = language_handler("inline_join_channel_content_text", lang_code)
results.append(InlineQueryResultArticle(
title=item_title,
# description=res["_source"]["performer"],
description=item_description,
thumb_url="https://telegra.ph/file/cd08f00005cb527e6bcdb.jpg",
# "https://www.howtogeek.com/wp-content/uploads/2017/09/img_59b89568ec308.jpg",
input_message_content=InputTextMessageContent(item_content, parse_mode="HTML")))
exception_handler(
bot.answer_inline_query(query.id, results=results,
cache_time=10, switch_pm_text="Chromusic",
switch_pm_parameter="back_to_the_bot"))
except Exception as e:
try:
item_title = language_handler("inline_start_bot_title_text", "en")
item_description = language_handler("inline_start_bot_description_text", "en")
item_content = language_handler("inline_start_bot_content_text", "en")
content = f"<a href ='https://t.me/chromusic_bot'><b>Chromusic bot:</b> audio file search engine</a>\n\n" \
f"Channel: @Chromusic\n" \
f"Persian channel: @Chromusic_fa"
results.append(InlineQueryResultArticle(
title=item_title,
# description=res["_source"]["performer"],
description=item_description,
thumb_url="https://telegra.ph/file/cd08f00005cb527e6bcdb.jpg",
# "https://www.howtogeek.com/wp-content/uploads/2017/09/img_59b89568ec308.jpg",
input_message_content=InputTextMessageContent(item_content, parse_mode="HTML")))
exception_handler(
bot.answer_inline_query(query.id, results=results,
cache_time=10, switch_pm_text="Start", switch_pm_parameter="back_to_the_bot"))
except Exception as e:
# exception_handler(
# bot.answer_inline_query(query.id, results=results,
# cache_time=10))
print(f"exception from first inline result exception handler: {e}")
back_text = language_handler("back_to_the_bot", lang_code)
if str(query.query).__contains__("#more_results:"):
# -------------- Setting No. 1 -----------------
# results_list = es.search(index="audio_files", body={"query": {
# "multi_match": {
# "query": str(query.query).split("#more_results:")[-1].replace("_", ""),
# "fields": ["title", "performer", "file_name"],
# "fuzziness": "AUTO",
# "tie_breaker": 0.5
# }}}, from_=10, size=40)
# -------------- Setting No. 2 -----------------
# es.search(index="audio_files", body={"query": {
# "multi_match": {
# "query": processed_query,
# "type": "best_fields",
# "fields": ["title", "file_name", "performer"], # , "caption"],
# # "fuzziness": "AUTO",
# # "tie_breaker": 0.5,
# "minimum_should_match": "60%"
# }}})
processed_query = str(str(query.query).split("#more_results:")[-1]).replace("_", " ")
# -------------- Setting No. 3 -----------------
results_list = es.search(index="audio_files", body={"query": {
"multi_match": {
"query": processed_query,
"type": "best_fields",
"fields": ["title", "file_name", "performer"], # , "caption"],
# "fuzziness": "AUTO",
# "tie_breaker": 0.5,
"minimum_should_match": "60%"
}}}, from_=0, size=50)
res_len = int(results_list["hits"]["total"]["value"])
if res_len > 10:
first_index_subtract_from = 10
else:
first_index_subtract_from = res_len
for index, hit in enumerate(results_list["hits"]["hits"]):
if index + 1 > first_index_subtract_from:
duration = timedelta(seconds=int(hit['_source']['duration']))
d = datetime(1, 1, 1) + duration
temp_perf_res = hit["_source"]["performer"]
temp_titl_res = hit["_source"]["title"]
temp_filnm_res = hit["_source"]["file_name"]
# _title = hit["_source"]["title"]
_performer = temp_perf_res if len(temp_perf_res) > 1 else temp_filnm_res
_performer = textwrap.shorten(_performer, width=34, placeholder='...')
_title = temp_titl_res if len(temp_titl_res) > 1 else temp_filnm_res
_title = textwrap.shorten(_title, width=34, placeholder='...')
_caption_content = language_handler("inline_file_caption", lang_code, hit)
item_describtion = f"{hidden_character}{_performer}\n" \
f"{hidden_character}{_floppy_emoji} | {round(int(hit['_source']['file_size']) / 1_048_576, 1)} MB " \
f"{_clock_emoji} | {str(d.hour) + ':' if d.hour > 0 else ''}{d.minute}:{d.second}" # 1000_000 MB
item_title = hidden_character + str(index + 1) + '. ' + _title
results.append(InlineQueryResultArticle(
title=item_title,
# description=res["_source"]["performer"],
description=item_describtion,
thumb_url="https://telegra.ph/file/cd08f00005cb527e6bcdb.jpg",
# "https://www.howtogeek.com/wp-content/uploads/2017/09/img_59b89568ec308.jpg",
# input_message_content=InputTextMessageContent(_caption_content, parse_mode="HTML")))
input_message_content=InputTextMessageContent(f"/dl_{hit['_id']}", parse_mode="HTML")))
if res_len < 40:
"""
If the previous search results count was less than 40 then do a fuzzy search to suggrst more results
"""
results_list = es.search(index="audio_files", body={"query": {
"multi_match": {
"query": processed_query,
"type": "best_fields",
"fields": ["title", "file_name", "performer"], # , "caption"],
"fuzziness": "AUTO",
# "tie_breaker": 0.5,
"minimum_should_match": "50%"
}}}, size=50 - res_len)
for index, hit in enumerate(results_list["hits"]["hits"]):
duration = timedelta(seconds=int(hit['_source']['duration']))
d = datetime(1, 1, 1) + duration
temp_perf_res = hit["_source"]["performer"]
temp_titl_res = hit["_source"]["title"]
temp_filnm_res = hit["_source"]["file_name"]
# _title = hit["_source"]["title"]
_performer = temp_perf_res if len(temp_perf_res) > 0 else temp_filnm_res
_performer = textwrap.shorten(_performer, width=34, placeholder='...')
_title = temp_titl_res if len(temp_titl_res) > 1 else temp_filnm_res
_title = textwrap.shorten(_title, width=34, placeholder='...')
_caption_content = language_handler("inline_file_caption", lang_code, hit)
item_describtion = f"{hidden_character}{_performer}\n" \
f"{hidden_character}{_floppy_emoji} | {round(int(hit['_source']['file_size']) / 1_048_576, 1)} MB " \
f"{_clock_emoji} | {str(d.hour) + ':' if d.hour > 0 else ''}{d.minute}:{d.second}" # 1000_000 MB
item_title = hidden_character + str(index + res_len + 1) + '. ' + _title
results.append(InlineQueryResultArticle(
title=item_title,
# description=res["_source"]["performer"],
description=item_describtion,
thumb_url="https://telegra.ph/file/cd08f00005cb527e6bcdb.jpg",
# "https://www.howtogeek.com/wp-content/uploads/2017/09/img_59b89568ec308.jpg",
input_message_content=InputTextMessageContent(_caption_content, parse_mode="HTML")))
exception_handler(
bot.answer_inline_query(query.id, results=results,
cache_time=10, switch_pm_text=back_text, switch_pm_parameter="back_to_the_bot"))
elif str(query.query).__contains__("#addtopl:"):
try:
file_id = str(query.query).split(" ")[1]
audio_file = es.get("audio_files", id=file_id)
print("audio file in on_inline:", audio_file)
number_of_playlists = es.count(index="playlist", body={
"query": {
"match": {
"author_id": user.id
}
}
})["count"]
new_pl_header = True
if number_of_playlists > 4:
new_pl_header = False
# playlists = es.get("user_lists", id=user.id)["_source"]["playlists"]
playlists_result = es.search(index="playlist", body={
"query": {
"match": {
"author_id": user.id
}
}
})["hits"]["hits"]
playlists = []
for pl in playlists_result:
playlists.append(pl)
print("\n\n\n\n\nplaylists", playlists)
func = "addpl"
playlist_inline_keyboard = language_handler("playlist_keyboard", lang_code, playlists, audio_file,
new_pl_header, func)
bot.answer_inline_query(query.id, results=playlist_inline_keyboard,
cache_time=1, switch_pm_text=back_text, switch_pm_parameter="back_to_the_bot")
print("playlists:", playlists)
except Exception as e:
print("from inline query- #addtopl: ", e)
elif str(query.query) == ("#history"):
download_history = []
history_result = es.get(index="user_lists", id=user.id)["_source"]["downloaded_audio_id_list"]
for index, pl in enumerate(history_result):
try:
audio_file = es.get(index="audio_files", id=pl)
download_history.append(audio_file)
print(audio_file)
except Exception as e:
print("exception from get_history ", e)
continue
func = "history"
show_add_pl_header = False
playlist_inline_keyboard = language_handler("playlist_keyboard", lang_code, download_history, "audio_file",
show_add_pl_header, func)
bot.answer_inline_query(query.id, results=playlist_inline_keyboard,
cache_time=1, switch_pm_text=back_text, switch_pm_parameter="back_to_the_bot")
elif str(query.query) == ("#myplaylists"):
playlists_result = es.search(index="playlist", body={
"query": {
"match": {
"author_id": user.id
}
}
})["hits"]["hits"]
playlists = []
for pl in playlists_result:
playlists.append(pl)
func = "playlists"
show_add_pl_header = False
playlist_inline_keyboard = language_handler("playlist_keyboard", lang_code, playlists, "audio_file",
show_add_pl_header, func)
bot.answer_inline_query(query.id, results=playlist_inline_keyboard,
cache_time=1, switch_pm_text=back_text, switch_pm_parameter="back_to_the_bot")
elif str(query.query).__contains__("#showfiles"):
try:
playlist_id = str(query.query).split(" ")[1]
results_list = es.get(index="playlist", id=playlist_id)["_source"]
for index, file_id in enumerate(results_list["list"]):
res = es.get(index="audio_files", id=file_id)["_source"]
item_title = f"{str(index + 1)}. {res['title']}"
item_title = hidden_character + item_title
item_description = f"{hidden_character}{res['performer']}"
results.append(InlineQueryResultArticle(
title=item_title,
description=item_description,
thumb_url="https://telegra.ph/file/6e6831bdd89011688bddb.jpg",
input_message_content=InputTextMessageContent(f"/dl_{file_id}", parse_mode="HTML")))
exception_handler(
bot.answer_inline_query(query.id, results=results,
cache_time=10, switch_pm_text=back_text, switch_pm_parameter="back_to_the_bot"))
except Exception as e:
print("print from show files: ", e)
elif str(query.query).__contains__("#edit_title"):
try:
result = []
playlist_id = str(query.query).split(" ")[1]
# query_id = str(query.query).split(" ")[1].split(":")[1]
def unpack(s):
return " ".join(map(str, s))
print("from edit title:", query)
if len(str(query.query).split(" ")) > 2:
args = str(query.query).split(' ')[2:]
new_title = f"{hidden_character}{unpack(args)}"
results.append(InlineQueryResultArticle(
title="Save",
description=new_title,
thumb_url="https://www.howtogeek.com/wp-content/uploads/2017/09/img_59b89568ec308.jpg",
input_message_content=InputTextMessageContent(f"/edit_pl_title {playlist_id} {new_title}",
parse_mode="HTML")))
exception_handler(
bot.answer_inline_query(query.id, results=results,
cache_time=1, switch_pm_text=back_text,
switch_pm_parameter="back_to_the_bot"))
else:
title = language_handler("edit_playlist_information_guide", lang_code, "title")
description = language_handler("edit_playlist_information_guide", lang_code, "description")
results.append(InlineQueryResultArticle(
title=title,
description=description,
thumb_url="https://www.howtogeek.com/wp-content/uploads/2017/09/img_59b89568ec308.jpg",
input_message_content=InputTextMessageContent(f"/edit_pl_title {playlist_id} default title",
parse_mode="HTML")))
exception_handler(
bot.answer_inline_query(query.id, results=results,
cache_time=1, switch_pm_text=back_text,
switch_pm_parameter="back_to_the_bot"))
except:
print("from #editfile inline: ", e)
elif str(query.query).__contains__("#edit_description"):
result = []
try:
print("query ", query.query)
playlist_id = str(query.query).split(" ")[1]
# query_id = str(query.query).split(" ")[1].split(":")[1]
def unpack(s):
return " ".join(map(str, s))
if len(str(query.query).split(" ")) > 2:
args = str(query.query).split(' ')[2:]
new_title = f"{hidden_character}{unpack(args)}"
results.append(InlineQueryResultArticle(
title="Save",
description=new_title,
thumb_url="https://www.howtogeek.com/wp-content/uploads/2017/09/img_59b89568ec308.jpg",
input_message_content=InputTextMessageContent(f"/edit_pl_description {playlist_id} {new_title}",
parse_mode="HTML")))
exception_handler(
bot.answer_inline_query(query.id, results=results,
cache_time=1, switch_pm_text=back_text,
switch_pm_parameter="back_to_the_bot"))
else:
title = language_handler("edit_playlist_information_guide", lang_code, "title")
description = language_handler("edit_playlist_information_guide", lang_code, "description")
results.append(InlineQueryResultArticle(
title=title,
description=description,
thumb_url="https://www.howtogeek.com/wp-content/uploads/2017/09/img_59b89568ec308.jpg",
input_message_content=InputTextMessageContent(f"/edit_pl_title {playlist_id} default description",
parse_mode="HTML")))
exception_handler(
bot.answer_inline_query(query.id, results=results,
cache_time=1, switch_pm_text=back_text,
switch_pm_parameter="back_to_the_bot"))
except Exception as e:
print("from #editfile inline: ", e)
elif str(query.query) == "#help_catalog":
print("got help_catalog")
try:
help_inline_keyboard = language_handler("help_inline_keyboard_list", lang_code)
bot.answer_inline_query(query.id, results=help_inline_keyboard,
cache_time=1, switch_pm_text=back_text, switch_pm_parameter="back_to_the_bot")
except Exception as e:
print("exception from help_catalog: ", e)
else:
# results_list = es.search(index="audio_files", body={"query": {
# "multi_match": {
# "query": str(query.query).split("#more_results:")[-1],
# "fields": ["file_name", "title", "performer"],
# "fuzziness": "AUTO",
# "tie_breaker": 0.5
# }}}, from_=10, size=10)
results_list = es.search(index="audio_files", body={"query": {
"multi_match": {
"query": str(query.query).split("#more_results:")[-1],
"type": "best_fields",
"fields": ["title", "file_name", "performer"], # , "caption"],
# "fuzziness": "AUTO",
# "tie_breaker": 0.5,
"minimum_should_match": "70%"
}}}, from_=1, size=50)
for index, hit in enumerate(results_list["hits"]["hits"]):
duration = timedelta(seconds=int(hit['_source']['duration']))
d = datetime(1, 1, 1) + duration
temp_perf_res = hit["_source"]["performer"]
temp_titl_res = hit["_source"]["title"]
temp_filnm_res = hit["_source"]["file_name"]
# _title = hit["_source"]["title"]
_performer = temp_perf_res if len(temp_perf_res) > 0 else temp_filnm_res
_performer = textwrap.shorten(_performer, width=34, placeholder='...')
_title = temp_titl_res if len(temp_titl_res) > 0 else temp_filnm_res
_title = textwrap.shorten(_title, width=34, placeholder='...')
_caption_content = language_handler("inline_file_caption", lang_code, hit)
item_describtion = f"{hidden_character}{_performer}\n" \
f"{hidden_character}{_floppy_emoji} | {round(int(hit['_source']['file_size']) / 1_048_576, 1)} MB " \
f"{_clock_emoji} | {str(d.hour) + ':' if d.hour > 0 else ''}{d.minute}:{d.second}" # 1000_000 MB
item_title = hidden_character + str(index + 1) + '. ' + _title
results.append(InlineQueryResultArticle(
title=item_title,
# description=res["_source"]["performer"],
description=item_describtion,
thumb_url="https://telegra.ph/file/cd08f00005cb527e6bcdb.jpg",
# "https://www.howtogeek.com/wp-content/uploads/2017/09/img_59b89568ec308.jpg",
input_message_content=InputTextMessageContent(_caption_content, parse_mode="HTML")))
# input_message_content=InputTextMessageContent(f"/dl_{hit['_id']}", parse_mode="HTML")))
# results.append(InlineQueryResultArticle(
# title=str(index + 1) + '. ' + res["_source"]["title"],
# description=res["_source"]["performer"],
# thumb_url="https://www.howtogeek.com/wp-content/uploads/2017/09/img_59b89568ec308.jpg",
# input_message_content=InputTextMessageContent(f"/dl_{res['_id']}", parse_mode="HTML")))
res_len = int(results_list["hits"]["total"])
if res_len < 50:
"""
If the previous search results count was less than 40 then do a fuzzy search to suggrst more results
"""
results_list = es.search(index="audio_files", body={"query": {
"multi_match": {
"query": str(query.query).split("#more_results:")[-1],
"type": "best_fields",
"fields": ["title", "file_name", "performer"], # , "caption"],
"fuzziness": "AUTO",
# "tie_breaker": 0.5,
"minimum_should_match": "50%"
}}}, size=50 - res_len)
for index, hit in enumerate(results_list["hits"]["hits"]):
duration = timedelta(seconds=int(hit['_source']['duration']))
d = datetime(1, 1, 1) + duration
temp_perf_res = hit["_source"]["performer"]
temp_titl_res = hit["_source"]["title"]
temp_filnm_res = hit["_source"]["file_name"]
# _title = hit["_source"]["title"]
_performer = temp_perf_res if len(temp_perf_res) > 0 else temp_filnm_res
_performer = textwrap.shorten(_performer, width=34, placeholder='...')
_title = temp_titl_res if len(temp_titl_res) > 0 else temp_filnm_res
_title = textwrap.shorten(_title, width=34, placeholder='...')
_caption_content = language_handler("inline_file_caption", lang_code, hit)
item_describtion = f"{hidden_character}{_performer}\n" \
f"{hidden_character}{_floppy_emoji} | {round(int(hit['_source']['file_size']) / 1_048_576, 1)} MB " \
f"{_clock_emoji} | {str(d.hour) + ':' if d.hour > 0 else ''}{d.minute}:{d.second}" # 1000_000 MB
item_title = hidden_character + str(index + 1) + '. ' + _title
results.append(InlineQueryResultArticle(
title=item_title,
# description=res["_source"]["performer"],
description=item_describtion,
thumb_url="https://telegra.ph/file/cd08f00005cb527e6bcdb.jpg",
# "https://www.howtogeek.com/wp-content/uploads/2017/09/img_59b89568ec308.jpg",
input_message_content=InputTextMessageContent(_caption_content, parse_mode="HTML")))
exception_handler(
bot.answer_inline_query(query.id, results=results,
cache_time=10, switch_pm_text=back_text, switch_pm_parameter="back_to_the_bot"))
@bot.on_callback_query()
def callback_query_handler(bot, query):
"""
Handle callback queries.
options:
1. "lang": Show a list of available languages
2. [language code]: Choose language
3. [Check joining status]: Check if the user has already joined your Telegram channel
4. get_list: Get a list of audio files within the current playlist (first part splitted by space)
5. delete: Remove an audio file from the current playlist after 2-step verification
6. edit: Edit playlist meta-data
7. showplaylist: Show a options for single playlist using a keyboard
8. showmyplaylists: Show a list of playlists created by the user
9. home: Show "Home" menu
10. help: Show "Help" menu
:param bot: Telegram bot object
:param query: Telegram query object
:return: True on success
"""
user = query.from_user
user_data = es.get(index="user", id=user.id)["_source"]
lang_code = user_data["lang_code"]
# bot.answer_callback_query(
# query.id,
# text=f"{query.data} language registered for you.\n\nYou can always change it using /lang command",
# show_alert=True
# )
joined_status = ["joined"]
lang_list = ["en", "fa", "hi", "ru", "ar"]
if query.data == "lang":
choose_language(bot, query)
if query.data in lang_list:
print("got query")
if query.data == "en":
lang_code = "en"
elif query.data == "fa":
lang_code = "fa"
elif query.data == "hi":
lang_code = "hi"
elif query.data == "ru":
lang_code = "ru"
elif query.data == "ar":
lang_code = "ar"
es.update("user", id=query.from_user.id, body={
"script":
{
"inline": "ctx._source.lang_code = params.lang_code;",
"lang": "painless",
"params": {
"lang_code": lang_code
}
}
}, ignore=409)
text = language_handler("lang_register_alert", lang_code, query.from_user.first_name)
# text = language_handler("send_in_1_min", lang_code, query.from_user.first_name)
exception_handler(bot.answer_callback_query(
query.id,
text=text, # f"{query.data} language registered for you.\n\nYou can always change it using /lang command",
show_alert=True
))
query.message.delete()
try:
prev_message = bot.get_messages(query.from_user.id, int(query.message.message_id) - 1)
if prev_message.text:
print("before contains:", prev_message.text)
if str(prev_message.text).__contains__("Take your audio searching to the speed"):
print("after contains:", prev_message.text)
text = language_handler("welcome", lang_code, query.from_user.first_name)
exception_handler(prev_message.edit_text(text))
# apl[1].edit_message_text(query.from_user.id, int(query.message.message_id)-1, "new text")
print(bot.get_messages(query.from_user.id, int(query.message.message_id) - 1))
except Exception as e:
print("from editing welcome message: ", e)
pass
print(query)
elif query.data in joined_status:
if query.data == "joined":
try:
# user_data = es.get(index="user", id=query.from_user.id)["_source"]
if user_data["lang_code"] == "fa":
user = exception_handler(app.get_chat_member(chromusic_fa_id, query.from_user.id))
else:
user = exception_handler(app.get_chat_member(chromusic_id, query.from_user.id))
es.update("user", id=query.from_user.id, body={
"script":
{
"inline": "ctx._source.role = params.role;"
"ctx._source.limited = params.limited;",
"lang": "painless",
"params": {
"role": "subscriber",
"limited": False
}
}
}, ignore=409)
# text = "ok you're right"
user_data = es.get("user", id=query.from_user.id)["_source"]
text = language_handler("has_joined", user_data["lang_code"], user_data["first_name"])
# text = language_handler("not_joined", user_data["lang_code"], user_data["first_name"])
except:
user_data = es.get("user", id=query.from_user.id)["_source"]
text = language_handler("not_joined", user_data["lang_code"], user_data["first_name"])
# text = "you're not joined :("
exception_handler(bot.answer_callback_query(
query.id,
text=text,
# f"{query.data} language registered for you.\n\nYou can always change it using /lang command",
show_alert=True
))
else:
exception_handler(bot.answer_callback_query(
query.id,
text=""
# f"{query.data} language registered for you.\n\nYou can always change it using /lang command",
# show_alert=True
))
elif str(query.data).__contains__("get_list"):
playlist_id = str(query.data).split(" ")[1]
results_list = es.get(index="playlist", id=playlist_id)["_source"]
back_text = language_handler("back_to_the_bot", lang_code)
results = []
for index, file_id in enumerate(results_list["list"]):
res = es.get(index="audio_files", id=file_id)
# file = app.get_chat(res["_source"]["chat_id"])["username"]
# print(file)
# audio_file = app.get_messages(res["_source"]["chat_id"], res["_source"]["message_id"])
# caption = file_retrieve_handler(audio_file)
# captions.append(caption)
results.append(res)
print("\n\nresults\n", res)
text = language_handler("playlist_result_list_handler", lang_code, results, results_list["title"])
print("text", text)
exception_handler(bot.answer_callback_query(
query.id,
text=''
# f"{query.data} language registered for you.\n\nYou can always change it using /lang command",
# show_alert=True
))
exception_handler(bot.send_message(query.from_user.id, text=text, parse_mode="HTML"))
# exception_handler(
# bot.answer_inline_query(query.id, results=results,
# cache_time=10, switch_pm_text=back_text, switch_pm_parameter="back_to_the_bot"))
elif str(query.data).__contains__("delete"):
print(query)
operation = str(query.data).split(" ")[0]
playlist_id = str(query.data).split(" ")[1]
if operation == "delete":
result = es.get(index="playlist", id=playlist_id)
func = "playlist"
text = language_handler("delete_playlist_validation_text", lang_code, func)
markup_list = language_handler("delete_playlist_validation_keyboard", lang_code, playlist_id, func)
# exception_handler(bot.send_message(chat_id=query.from_user.id,
# text=f"<b>{text}</b>",
# reply_markup=InlineKeyboardMarkup(markup_list),
# parse_mode='HTML'))
exception_handler(query.edit_message_text(
text=f"<b>{text}</b>",
reply_markup=InlineKeyboardMarkup(markup_list),
parse_mode='HTML'))
elif operation == "ydelete":
# try:
playlist_id = str(query.data).split(" ")[1]
try:
file_retrieve_id = str(query.data).split(" ")[2]
# is_audio_file = True
res = es.update(index="playlist", id=playlist_id, body={
"script": {
"source": "if (ctx._source.list.contains(params.file_id)) "
"{ctx._source.list.remove(ctx._source.list.indexOf(params.file_id))} "
"else {ctx.op = 'none'}",
"lang": "painless",
"params": {
"file_id": file_retrieve_id
}
}
}, ignore=409)
text = language_handler("file_deleted_from_playlist", user_data["lang_code"])
exception_handler(query.answer(
text=text,
show_alert=True))
bot.delete_messages(user.id, query.message.message_id)
except:
is_audio_file = False
res = es.delete(index="playlist", id=playlist_id)
text = language_handler("playlist_deleted_text", lang_code)
exception_handler(query.answer(
text=f"{text}",
show_alert=True))
bot.delete_messages(user.id, query.message.message_id)
elif operation == "ndelete":
playlist_files = es.get(index="playlist", id=playlist_id)["_source"]
single_playlist_markup_list = language_handler("single_playlist_markup_list", user_data["lang_code"],
playlist_id)
single_playlist_text = language_handler("single_playlist_text", user_data["lang_code"], playlist_files)
exception_handler(query.edit_message_text(text=single_playlist_text,
reply_markup=InlineKeyboardMarkup(single_playlist_markup_list),
parse_mode='HTML'))
elif operation == "adelete":
results_list = es.get(index="playlist", id=playlist_id)
back_text = language_handler("back_to_the_bot", lang_code)
results = []
for _audio_file_id in results_list["_source"]["list"]:
results.append(es.get(index="audio_files", id=_audio_file_id))
print("result list:", results_list)
print("results:", results)
text = language_handler("delete_audio_file_text", lang_code) # , results,
delete_audio_guide_text = language_handler("delete_audio_guide_text", lang_code)
exception_handler(bot.answer_callback_query(
query.id,
text=delete_audio_guide_text,
# f"{query.data} language registered for you.\n\nYou can always change it using /lang command",
show_alert=True
))
# results_list["_source"]["title"])
da_markup_keyborad = language_handler("delete_audio_murkup_keyboard", lang_code, playlist_id, results)
print("da_markup_keyborad", da_markup_keyborad)
exception_handler(query.edit_message_text(text=text, parse_mode="HTML",
reply_markup=InlineKeyboardMarkup(da_markup_keyborad)))
elif operation == "afdelete":
playlist_id = str(query.data).split(" ")[1]
audio_file_id = str(query.data).split(" ")[2]
_message_id = query.message.message_id
print("got delete query: ", query)
func = "audio_file"
text = language_handler("delete_playlist_validation_text", lang_code, func)
markup_list = language_handler("delete_playlist_validation_keyboard", lang_code, playlist_id, func,
audio_file_id)
# exception_handler(bot.send_message(chat_id=query.from_user.id,
# text=f"<b>{text}</b>",
# reply_markup=InlineKeyboardMarkup(markup_list),
# parse_mode='HTML'))
exception_handler(query.edit_message_text(
text=f"<b>{text}</b>",
reply_markup=InlineKeyboardMarkup(markup_list),
parse_mode='HTML'))
elif str(query.data).__contains__("edit"):
_query = str(query.data).split(" ")[0]
playlist_id = str(query.data).split(" ")[1]
if _query == "editpl":
try:
print(query)
playlist_id = str(query.data).split(" ")[1]
playlist = es.get(index="playlist", id=playlist_id)
print(playlist)
text = language_handler("edit_playlist_text", lang_code, playlist)
markup_list = language_handler("edit_playlist_keyboard", lang_code, playlist_id)
# exception_handler(bot.send_message(chat_id=query.from_user.id,
# text=f"<b>{text}</b>",
# reply_markup=InlineKeyboardMarkup(markup_list),
# parse_mode='HTML'))
exception_handler(query.edit_message_text(
text=f"{text}",
reply_markup=InlineKeyboardMarkup(markup_list),
parse_mode='HTML'))
except Exception as e:
print("exception from edit playlist: ", e)
elif str(query.data).__contains__("showplaylist"):
show_playlist(query, user_data)
elif str(query.data).__contains__("showmyplaylists"):
playlist_id = str(query.data).split(" ")[1]
playlist_files = es.get(index="playlist", id=playlist_id)["_source"]
markup_list = language_handler("playlists_buttons", user_data["lang_code"])
mylists_menu_text = language_handler("mylists_menu_text", user_data["lang_code"])
print(playlist_files)
exception_handler(query.edit_message_text(text=mylists_menu_text,
reply_markup=InlineKeyboardMarkup(markup_list),
parse_mode='HTML'))
elif str(query.data) == "home":
home_markup_keyboard = language_handler("home_markup_keyboard", user_data["lang_code"])
home_keyboard_text = language_handler("home_keyboard_text", user_data["lang_code"])
exception_handler(query.edit_message_text(text=home_keyboard_text,
reply_markup=InlineKeyboardMarkup(home_markup_keyboard),
parse_mode='HTML'))
elif str(query.data) == "help":
help_markup_keyboard = language_handler("help_markup_keyboard", user_data["lang_code"])
help_keyboard_text = language_handler("help_keyboard_text", user_data["lang_code"])
exception_handler(query.edit_message_text(text=help_keyboard_text,
reply_markup=InlineKeyboardMarkup(help_markup_keyboard),
parse_mode='HTML'))
elif str(query.data).__contains__("edit"):
_query = str(query.data).split(" ")[0]
playlist_id = str(query.data).split(" ")[1]
if _query == "editpl":
try:
print(query)
playlist_id = str(query.data).split(" ")[1]
playlist = es.get(index="playlist", id=playlist_id)
print(playlist)
text = language_handler("edit_playlist_text", lang_code, playlist)
markup_list = language_handler("edit_playlist_keyboard", lang_code, playlist_id)
# exception_handler(bot.send_message(chat_id=query.from_user.id,
# text=f"<b>{text}</b>",
# reply_markup=InlineKeyboardMarkup(markup_list),
# parse_mode='HTML'))
exception_handler(query.edit_message_text(
text=f"{text}",
reply_markup=InlineKeyboardMarkup(markup_list),
parse_mode='HTML'))
except Exception as e:
print("exception from edit playlist: ", e)
elif str(query.data).__contains__("showplaylist"):
show_playlist(query, user_data)
elif str(query.data).__contains__("showmyplaylists"):
# try:
playlist_id = str(query.data).split(" ")[1]
playlist_files = es.get(index="playlist", id=playlist_id)["_source"]
# single_playlist_markup_list = language_handler("single_playlist_markup_list", user_data["lang_code"], playlist_id, query.message.message_id)
# single_playlist_text = language_handler("single_playlist_text", user_data["lang_code"], playlist_files)
markup_list = language_handler("playlists_buttons", user_data["lang_code"])
mylists_menu_text = language_handler("mylists_menu_text", user_data["lang_code"])
print(playlist_files)
# exception_handler(bot.send_message(chat_id=user.id,
# text=mylists_menu_text,
# reply_markup=InlineKeyboardMarkup(markup_list),
# parse_mode='HTML'))
exception_handler(query.edit_message_text(text=mylists_menu_text,
reply_markup=InlineKeyboardMarkup(markup_list),
parse_mode='HTML'))
elif str(query.data) == "home":
home_markup_keyboard = language_handler("home_markup_keyboard", user_data["lang_code"])
home_keyboard_text = language_handler("home_keyboard_text", user_data["lang_code"])
# exception_handler(bot.send_message(chat_id=user.id,
# text=mylists_menu_text,
# reply_markup=InlineKeyboardMarkup(markup_list),
# parse_mode='HTML'))
exception_handler(query.edit_message_text(text=home_keyboard_text,
reply_markup=InlineKeyboardMarkup(home_markup_keyboard),
parse_mode='HTML'))
elif str(query.data) == "help":
help_markup_keyboard = language_handler("help_markup_keyboard", user_data["lang_code"])
help_keyboard_text = language_handler("help_keyboard_text", user_data["lang_code"])
# exception_handler(bot.send_message(chat_id=user.id,
# text=mylists_menu_text,
# reply_markup=InlineKeyboardMarkup(markup_list),
# parse_mode='HTML'))
exception_handler(query.edit_message_text(text=help_keyboard_text,
reply_markup=InlineKeyboardMarkup(help_markup_keyboard),
parse_mode='HTML'))
return True
def show_playlist(query, user_data):
"""
Generates a keyboard for each playlist; buttons:
1. Audio files list (as an inline list)
2. Get list (as a text message)
3. Edit
4. Delete
5. Home
6. Back
:param query: Query containing the "show playlist" data
:param user_data: User data within database
:return: True on success; False otherwise
"""
try:
query.answer(f"Back to the playlist ...")
playlist_id = str(query.data).split(" ")[1]
playlist_files = es.get(index="playlist", id=playlist_id)["_source"]
single_playlist_markup_list = language_handler("single_playlist_markup_list", user_data["lang_code"],
playlist_id)
single_playlist_text = language_handler("single_playlist_text", user_data["lang_code"], playlist_files)
print(playlist_files)
exception_handler(query.edit_message_text(text=single_playlist_text,
reply_markup=InlineKeyboardMarkup(single_playlist_markup_list),
parse_mode='HTML'))
return True
except Exception as e:
print("from showplaylist:", e)
return False
@bot.on_message(Filters.command(["users", "promote", "reset_channel", "index"]))
def users_log(bot, message):
"""
Some useful functionalities and options for the owner/admin of the bot:
1. "users": Generates a summary log of the database status only for the admin/owner of the bot. This is a
static function and not a formal part of the bot (meant just for simplification; otherwise you can use Kibana).
2. "promote": promotes the rank of a channel in the indexer waiting list
3. "reset_channel": Reset the indexing information of a channel in the database
4. "index": Index a channel immediately without waiting in the indexer queue
:param bot: Telegram bot object
:param message: Telegram message object
:return: True on success
"""
user = message.from_user
user_data = es.get(index="user", id=user.id)["_source"]
if user_data["role"] == "owner":
if message.command[0] == "users":
res = es.count(index="audio", body={
"query": {
"match_all": {}
}
})
audio_files = es.count(index="audio_files", body={
"query": {
"match_all": {}
}
})
users_count = es.count(index="user", body={
"query": {
"match_all": {}
}
})
uen = es.count(index="user", body={
"query": {
"match": {
"lang_code": "en"
}
}
})
uhi = es.count(index="user", body={
"query": {
"match": {
"lang_code": "hi"
}
}
})
uru = es.count(index="user", body={
"query": {
"match": {
"lang_code": "ru"
}
}
})
ufa = es.count(index="user", body={
"query": {
"match": {
"lang_code": "fa"
}
}
})
uar = es.count(index="user", body={
"query": {
"match": {
"lang_code": "ar"
}
}
})
channels = es.count(index="channel", body={
"query": {
"match_all": {}
}
})
imp0 = es.count(index="channel", body={
"query": {
"match": {
"importance": 0
}
}
})
imp1 = es.count(index="channel", body={
"query": {
"match": {
"importance": 1
}
}
})
imp2 = es.count(index="channel", body={
"query": {
"match": {
"importance": 2
}
}
})
imp3 = es.count(index="channel", body={
"query": {
"match": {
"importance": 3
}
}
})
imp4 = es.count(index="channel", body={
"query": {
"match": {
"importance": 4
}
}
})
imp5 = es.count(index="channel", body={
"query": {
"match": {
"importance": 5
}
}
})
to_index = es.count(index="to_index", body={
"query": {
"match_all": {}
}
})
future_channel = es.count(index="future_channel", body={
"query": {
"match_all": {}
}
})
channel_buffer = es.count(index="channel_buffer", body={
"query": {
"match_all": {}
}
})
user_lists = helpers.scan(
client=es,
query={"query": {"match_all": {}}},
size=10000,
scroll='2m',
index="user_lists"
)
# print("audio files:", res)
audio_count = res["count"]
audio_files_count = audio_files["count"]
users_count = users_count["count"]
# print("channels:", channels)
channel_count = channels["count"]
imp0_count = imp0["count"]
imp1_count = imp1["count"]
imp2_count = imp2["count"]
imp3_count = imp3["count"]
imp4_count = imp4["count"]
imp5_count = imp5["count"]
uen_count = uen["count"]
uhi_count = uhi["count"]
uru_count = uru["count"]
ufa_count = ufa["count"]
uar_count = uar["count"]
# print("to_index:", to_index)
to_index_count = to_index["count"]
future_channel_count = future_channel["count"]
channel_buffer_count = channel_buffer["count"]
# print("user_lists:", user_lists)
counts_text = f"<b>Number of indexed docs in each index</b>\n\n" \
f"<b>1. Audio:</b> {audio_count}\n" \
f"<b> Audio_files:</b> {audio_files_count}\n\n" \
f"<b>2. Users:</b> {users_count}\n" \
f" users by language:\n" \
f" en: {uen_count}\n" \
f" hi: {uhi_count}\n" \
f" ru: {uru_count}\n" \
f" fa: {ufa_count}\n" \
f" ar: {uar_count}\n\n" \
f"<b>3. To_index:</b> {to_index_count}\n\n" \
f"<b>3. future_channel:</b> {future_channel_count}\n\n" \
f"<b>3. channel_buffer:</b> {channel_buffer_count}\n\n" \
f"<b>4. Channels:</b> {channel_count}\n" \
f" channel by importance:\n" \
f" 0: {imp0_count}\n" \
f" 1: {imp1_count}\n" \
f" 2: {imp2_count}\n" \
f" 3: {imp3_count}\n" \
f" 4: {imp4_count}\n" \
f" 5: {imp5_count}\n\n"
# es.indices.delete("channel")
# es.indices.delete("audio")
# es.indices.delete("to_index")
exception_handler(bot.send_message(user.id, counts_text, parse_mode="html"))
elif message.command[0] == "promote":
try:
channel_username = message.command[1]
new_importance = message.command[2]
_channel_instance_db = es.search(index="channel", body={
"query": {
"match": {
"username": channel_username
}
}
})
if len(_channel_instance_db["hits"]["hits"]) > 0:
res_text = f"search results: \n\n{_channel_instance_db}"
exception_handler(bot.send_message(user.id, res_text, parse_mode="html"))
_channel_id = _channel_instance_db["hits"]["hits"][0]["_id"]
res = es.update(index="channel", id=_channel_id, body={
"script": {
"inline": "ctx._source.importance = params.importance;",
"lang": "painless",
"params": {
"importance": new_importance
}
}
}, ignore=409)
result_text = f"the result of promoting @{channel_username} to importance {new_importance}:\n\n" \
f"{res['result']}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
else:
result_text = f"Channel with this username doesn't exist in the database\n\n" \
f"Channel username: @{channel_username}\n" \
f"New importance: {new_importance}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
except Exception as e:
text = f"exception occurred while trying to promote channels: \n\n{e}"
exception_handler(bot.send_message(user.id, text, parse_mode="html"))
elif message.command[0] == "reset_channel":
try:
channel_username = message.command[1]
# new_importance = message.command[2]
_channel_instance_db = es.search(index="channel", body={
"query": {
"match": {
"username": channel_username
}
}
})
if len(_channel_instance_db["hits"]["hits"]) > 0:
res_text = f"search results: \n\n{_channel_instance_db}"
exception_handler(bot.send_message(user.id, res_text, parse_mode="html"))
_channel_id = _channel_instance_db["hits"]["hits"][0]["_id"]
res = es.update(index="channel", id=_channel_id, body={
"script": {
"inline": "ctx._source.last_indexed_offset_date = params.last_indexed_offset_date;",
"lang": "painless",
"params": {
"last_indexed_offset_date": 0
}
}
}, ignore=409)
result_text = f"the result of resetting @{channel_username}:\n\n" \
f"{res['result']}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
else:
result_text = f"Channel with this username doesn't exist in the database\n\n" \
f"Channel username: @{channel_username}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
except Exception as e:
text = f"exception occurred while trying to promote channels: \n\n{e}"
exception_handler(bot.send_message(user.id, text, parse_mode="html"))
elif message.command[0] == "index":
try:
channel_username = message.command[1]
urgent_index(channel_username, user)
except Exception as e:
text = f"exception occurred while trying to indexing channels: \n\n{e}"
exception_handler(bot.send_message(user.id, text, parse_mode="html"))
return True
def urgent_index(channel_username: str, user: object):
"""
Index requested channel by the owner/admin immediately. Start from the first message if the channel was
new, update otherwise.
:param channel_username: Channel username [*str]
:param user: Telegram user object
:return: True on success
"""
_channel_instance_db = es.search(index="channel", body={
"query": {
"match": {
"username": channel_username
}
}
})
if len(_channel_instance_db["hits"]["hits"]) > 0:
res_text = f"This channel has already been indexed\n\nSearch results: \n\n{_channel_instance_db}"
exception_handler(bot.send_message(user.id, res_text, parse_mode="html"))
# db_importance = _channel_instance_db["hits"]["hits"][0]["_source"]["importance"]
db_downloaded_from_count = _channel_instance_db["hits"]["hits"][0]["_source"]["indexed_from_audio_count"]
if db_downloaded_from_count == 0:
_channel_id = _channel_instance_db["hits"]["hits"][0]["_id"]
starting_text = f"Indexing @{channel_username} started ...\n\nIt might take several minutes."
exception_handler(bot.send_message(user.id, starting_text, parse_mode="html"))
#
audio_file_indexer(app, _channel_id, 0)
#
finishing_text = f"Indexing @{channel_username} finished successfully"
exception_handler(bot.send_message(user.id, finishing_text, parse_mode="html"))
else:
result_text = f"Channel with this username doesn't exist in the database.\n" \
f"Checking it on telegram...\n\n" \
f"Channel username: @{channel_username}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
try:
# time.sleep(10)
# current_chat = app2.get_chat(channel_username)
# time.sleep(3)
# if current_chat.type == "channel":
starting_text = f"Indexing @{channel_username} started ...\n\nIt might take several minutes."
exception_handler(bot.send_message(user.id, starting_text, parse_mode="html"))
# audio_file_indexer(app2, current_chat.id, 0)
# random_indexer = random.choice([app, app2, indexer_list[0]])
new_channel_indexer(app, [channel_username], "future_channel")
finishing_text = f"Indexing @{channel_username} finished successfully"
exception_handler(bot.send_message(user.id, finishing_text, parse_mode="html"))
except FloodWait as e:
result_text = f"FloodWait from manual indexer: \n\n{e}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
# print("from audio file indexer: Flood wait exception: ", e)
time.sleep(e.x)
except SlowmodeWait as e:
result_text = f"SlowmodeWait from manual indexer: \n\n{e}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
# print("from audio file indexer: Slowmodewait exception: ", e)
time.sleep(e.x)
except TimeoutError as e:
result_text = f"TimeoutError from manual indexer: \n\n{e}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
# print("Timeout error: sleeping for 20 seconds: ", e)
time.sleep(20)
# pass
except ConnectionError as e:
result_text = f"ConnectionError from manual indexer: \n\n{e}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
# print("Connection error - sleeping for 40 seconds: ", e)
except Exception as e:
result_text = f"Channel with this username doesn't seem to be valid\n\n" \
f"Channel username: @{channel_username}\n\n{e}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
time.sleep(5)
return True
@bot.on_message(Filters.private & Filters.command("start"))
def index_user(bot, message):
"""
Add new users after sending "start" if did not exist; check if they have joined the channel and update their status
respectively
:param bot: Telegram bot object
:param message: Telegram message object
:return: -
"""
# different roles: searcher, subscriber, recom_subscriber, admin, CEO, maintainer
# es.bulk({ "create" : user_data_generator(message)})
print("start")
print(message)
if "back_to_the_bot" in message.command:
message.delete()
else:
try:
user = message.from_user
# if es.exists(index="user", id=user.id):
# es.delete("user", id=user.id)
if not es.exists(index="user", id=user.id):
res_u = es.create(index="user", id=user.id, body={
"first_name": user.first_name,
"username": user.username,
"date_joined": int(time.time()),
"downloaded_audio_count": 0,
"lang_code": "en",
"limited": False,
"role": "searcher",
"coins": 0,
"last_active_date": int(time.time()),
"is_admin": False,
"sex": "neutral",
"country": "-"
}, ignore=409)
# print(res)
# es.delete(index="user_lists", id=user.id)
res_ul = es.create(index="user_lists", id=user.id, body={
"downloaded_audio_id_list": [],
"playlists": []
}, ignore=409)
welcome_en = language_handler("welcome", "en", user.first_name)
print(welcome_en)
exception_handler(bot.send_message(message.chat.id, welcome_en, parse_mode="html"))
choose_language(bot, message)
es.indices.refresh("user")
user_data = es.get(index="user", id=user.id)["_source"]
try:
# apt = apl[0]
lang_code = user_data["lang_code"]
if lang_code == "fa":
user_status = app.get_chat_member(chromusic_fa_id, user.id)
else:
user_status = app.get_chat_member(chromusic_id, user.id)
if user_data["role"] == "searcher":
es.update(index="user", id=user.id, body={
"script": {
"inline": "ctx._source.limited = params.limited;"
"ctx._source.role = params.role;",
"lang": "painless",
"params": {
"limited": False,
"role": "subscriber"
}
}
}, ignore=409)
except Exception as e:
print(e)
time.sleep(15)
es.indices.refresh("user")
user_data = es.get(index="user", id=user.id)["_source"]
lang_code = user_data["lang_code"]
example_message = language_handler("example_message", lang_code, user.first_name)
exception_handler(bot.send_message(message.chat.id, example_message, parse_mode="html"))
except Exception as e:
print(f"Exception from index_user: {e}")
finally:
check_joining_status(chromusic_id)
check_joining_status(chromusic_fa_id)
@bot.on_message(Filters.command(["lang", "help", "home"]))
def commands_handler(bot, message):
"""
Show following keyboards on request:
1. Lang
2. Help
3. Home
:param bot: Bot object
:param message: Message object
:return: True on success; False otherwise
"""
if message.command[0] == "lang":
# english.languages_list()
message.delete()
choose_language(bot, message)
elif message.command[0] == "help":
try:
user_data = es.get(index="user", id=message.chat.id)["_source"]
help_markup_keyboard = language_handler("help_markup_keyboard", user_data["lang_code"])
help_keyboard_text = language_handler("help_keyboard_text", user_data["lang_code"])
message.delete()
exception_handler(bot.send_message(message.chat.id, text=help_keyboard_text,
reply_markup=InlineKeyboardMarkup(help_markup_keyboard),
parse_mode='HTML'))
except Exception as e:
print("from search on_message: ", e)
return False
elif message.command[0] == "home":
try:
user_data = es.get(index="user", id=message.chat.id)["_source"]
home_markup_keyboard = language_handler("home_markup_keyboard", user_data["lang_code"])
home_keyboard_text = language_handler("home_keyboard_text", user_data["lang_code"])
message.delete()
exception_handler(bot.send_message(message.chat.id, text=home_keyboard_text,
reply_markup=InlineKeyboardMarkup(home_markup_keyboard),
parse_mode='HTML'))
except Exception as e:
print("from on_message: ", e)
return False
return True
@bot.on_message(Filters.command(["addnewpl", "addtoexistpl", "myplaylists",
"showplaylist", "edit_pl_title", "edit_pl_description"]))
def playlist_commands_handler(bot, message):
"""
Handle following commands:
1. "addnewpl": Create a new playlist and add the audio file to it
2. "addtoexistpl": Add the audio file to an existing playlist
3. "myplaylists": Show all playlists for the current user
4. "showplaylist": Show all audio files within the chosen playlist
5. "edit_pl_title": Edit the chosen playlist's title
6. "edit_pl_description": Edit the chosen playlist's description
:param bot: Telegram bot object
:param message: Telegram message object
:return: True on success; False on exception
"""
user = message.from_user
user_data = es.get(index="user", id=user.id)["_source"]
lang_code = user_data["lang_code"]
if message.command[0] == "addnewpl":
try:
file_ret_id = str(message.text).split(" ")[1]
playlist_id = str(uuid4())[:6].replace("-", "d")
print("playlist id: ", playlist_id)
audio_file_db_data = es.get("audio_files", id=file_ret_id)["_source"]
data = {"id": playlist_id,
"title": audio_file_db_data["title"],
"description": "New Playlist",
"list": []}
func = "addnewpl"
added_success_text = language_handler("added_to_playlist_success_text", lang_code, func, data)
markup_keyboard = language_handler("playlists_buttons", lang_code)
message.reply_text(text=added_success_text, quote=False, parse_mode="HTML",
reply_markup=InlineKeyboardMarkup(markup_keyboard))
playlist_title = audio_file_db_data["title"] if not audio_file_db_data["title"] == None else \
audio_file_db_data["performer"] if not audio_file_db_data["performer"] == None \
else audio_file_db_data["file_name"]
# resl = es.update(index="user_lists", id=user.id, body={
# "script": {
# "source": "if (ctx._source.playlists.contains(params.file_id)) {ctx.op = 'none'} else "
# "{if (ctx._source.downloaded_audio_id_list.size()>49) " #
# "{ctx._source.downloaded_audio_id_list.remove(0);"
# "ctx._source.downloaded_audio_id_list.add(params.file_id);} "
# "else {ctx._source.downloaded_audio_id_list.add(params.file_id);}}", # ctx.op = 'none'}",
# "lang": "painless",
# "params": {
# "file_id": file_ret_id
# }
# }
# })
base64urlsafe_playlist_id = secrets.token_urlsafe(6)
print("generated id", base64urlsafe_playlist_id)
# res = es.create(index="playlist", )
number_of_user_playlists = es.count(index="playlist", body={
"query": {
"match": {
"author_id": user.id
}
}
})
print("number_of_user_playlists", number_of_user_playlists)
if int(number_of_user_playlists["count"]) < 5:
create_new_playlist_res = es.create(index="playlist", id=base64urlsafe_playlist_id, body={
"author_id": user.id,
"title": playlist_title,
"description": "New playlist",
"list": [file_ret_id]
})
print("create_new_playlist_res", create_new_playlist_res)
res = es.update(index="user_lists", id=user.id, body={
"script": {
"inline": "ctx._source.playlists.add(params.playlist_id);",
"lang": "painless",
"params": {
"playlist_id": base64urlsafe_playlist_id
}
}
}, ignore=409)
message.delete()
return True
except Exception as e:
print("from playlists handling: ", e)
return False
elif message.command[0] == "addtoexistpl":
try:
playlist_id = message.command[1]
file_retrieve_id = message.command[2]
# print("playlist id: ", playlist_id)
audio_file_db_data = es.get("audio_files", id=file_retrieve_id)["_source"]
data = {"id": playlist_id,
"title": audio_file_db_data["title"],
"description": "New Playlist",
"list": []}
playlist = es.get(index="playlist", id=playlist_id)
print(playlist)
res = es.update(index="playlist", id=playlist_id, body={
"script": {
"source": "if (ctx._source.list.contains(params.file_id)) {ctx.op = 'none'} else "
"{if (ctx._source.list.size()>14) "
"{ctx.op = 'none'}"
"else {ctx._source.list.add(params.file_id);}}",
# "if (ctx._source.list.size()<20){"
# "if (ctx._source.list.contains(params.file_id))"
# "{ctx.op = 'none';} "
# "else {ctx._source.list.add(params.file_id);}}"
# "else{ctx.op = 'none'}",
"lang": "painless",
"params": {
"file_id": file_retrieve_id
}
}
}, ignore=409)
func = "addtoexistpl"
added_success_text = language_handler("added_to_playlist_success_text", lang_code, func, data, playlist)
markup_keyboard = language_handler("playlists_buttons", lang_code)
message.reply_text(text=added_success_text, quote=False, parse_mode="HTML",
reply_markup=InlineKeyboardMarkup(markup_keyboard))
# bot.send_message(user.id, str(playlist))
message.delete()
return True
except Exception as e:
print("from playlists - adding to existing playlist: ", e)
return False
elif message.command[0] == "myplaylists":
print(message)
user = message.from_user
pl_results = es.search(index="playlist", body={
"query": {
"match": {
"author_id": user.id
}
}
})
# print(pl_results)
for pl in pl_results["hits"]["hits"]:
print(pl)
markup_list = language_handler("playlists_buttons", user_data["lang_code"])
playlist_text = language_handler("mylists_menu_text", user_data["lang_code"])
exception_handler(message.reply_text(text=playlist_text, reply_markup=InlineKeyboardMarkup(markup_list),
parse_mode='HTML'))
message.delete()
return True
elif message.command[0] == "showplaylist":
try:
playlist_id = message.command[1]
playlist_files = es.get(index="playlist", id=playlist_id)["_source"]
single_playlist_markup_list = language_handler("single_playlist_markup_list", user_data["lang_code"],
playlist_id, message.message_id)
single_playlist_text = language_handler("single_playlist_text", user_data["lang_code"], playlist_files)
print(playlist_files)
exception_handler(message.reply_text(text=single_playlist_text,
reply_markup=InlineKeyboardMarkup(single_playlist_markup_list),
parse_mode='HTML'))
message.delete()
return True
except Exception as e:
print("from showplaylist:", e)
return False
elif message.command[0] == "edit_pl_title":
playlist_id = str(message.command[1])
# prev_query_id = str(message.command[1]).split(":")[1]
new_title = message.command[2:]
print(message.command)
def unpack(s):
return " ".join(map(str, s))
new_title = unpack(new_title)
res = es.update(index="playlist", id=playlist_id, body={
"script": {
"source": "ctx._source.title = params.new_title",
"lang": "painless",
"params": {
"new_title": new_title
}
}
}, ignore=409)
func = "title_update"
text = language_handler("playlist_updated_text", user_data["lang_code"], func)
exception_handler(bot.send_message(user.id, text))
# bot.answer_callback_query(callback_query_id=prev_query_id, text=text, show_alert=True)
message.delete()
return True
elif message.command[0] == "edit_pl_description":
playlist_id = str(message.command[1])
# prev_query_id = str(message.command[1]).split(":")[1]
new_description = message.command[2:]
print("commands", message.command)
def unpack(s):
return " ".join(map(str, s))
new_description = unpack(new_description)
res = es.update(index="playlist", id=playlist_id, body={
"script": {
"source": "ctx._source.description = params.new_description",
"lang": "painless",
"params": {
"new_description": new_description
}
}
}, ignore=409)
func = "description_update"
text = language_handler("playlist_updated_text", user_data["lang_code"], func)
exception_handler(bot.send_message(user.id, text))
# bot.answer_callback_query(callback_query_id=prev_query_id, text=text, show_alert=True)
message.delete()
return True
@bot.on_message(Filters.private & Filters.regex("dl_"))
def download_handler(bot, message):
"""
Check if the message is coming from a Telegram client and contains "dl_" regex, and then submit a thread to
retrieve the searched audio file
:param bot: Telegram bot object
:param message: Telegram message object
:return: True on success
"""
executor.submit(file_retrieve_handler, message)
return True
@bot.on_message(~Filters.via_bot & ~Filters.bot &
Filters.private & (Filters.forwarded | Filters.regex("@") | Filters.web_page | Filters.regex("https")))
def get_channel(bot, message):
"""
Index requested channel by the owner/admin immediately. Start from the first message if the channel was
new, update otherwise.
:param bot: Telegram bot API
:param message: Telegram message object
:return: True on success
"""
channels_username = []
user_data = es.get(index="user", id=message.chat.id)
user = message.from_user
# print(message.text)
if user_data["_source"]["role"] == "owner":
channels_usernames = caption_entities_channel_extractor(app, message)
for _channel_username in channels_usernames:
try:
urgent_index(_channel_username, user)
except FloodWait as e:
result_text = f"FloodWait from manual indexer: \n\n{e}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
# print("from audio file indexer: Flood wait exception: ", e)
time.sleep(e.x)
except SlowmodeWait as e:
result_text = f"SlowmodeWait from manual indexer: \n\n{e}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
# print("from audio file indexer: Slowmodewait exception: ", e)
time.sleep(e.x)
except TimeoutError as e:
result_text = f"TimeoutError from manual indexer: \n\n{e}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
# print("Timeout error: sleeping for 20 seconds: ", e)
time.sleep(20)
# pass
except ConnectionError as e:
result_text = f"ConnectionError from manual indexer: \n\n{e}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
# print("Connection error - sleeping for 40 seconds: ", e)
except Exception as e:
result_text = f"Channel with this username doesn't seem to be valid\n\n" \
f"Channel username: @{_channel_username}\n\n{e}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
if message.text:
channels_usernames = channel_name_extractor(app, message.text)
elif message.caption:
channels_usernames = channel_name_extractor(app, message.caption)
for _channel_username in channels_usernames:
try:
urgent_index(_channel_username, user)
except FloodWait as e:
result_text = f"FloodWait from manual indexer: \n\n{e}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
# print("from audio file indexer: Flood wait exception: ", e)
time.sleep(e.x)
except SlowmodeWait as e:
result_text = f"SlowmodeWait from manual indexer: \n\n{e}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
# print("from audio file indexer: Slowmodewait exception: ", e)
time.sleep(e.x)
except TimeoutError as e:
result_text = f"TimeoutError from manual indexer: \n\n{e}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
# print("Timeout error: sleeping for 20 seconds: ", e)
time.sleep(20)
# pass
except ConnectionError as e:
result_text = f"ConnectionError from manual indexer: \n\n{e}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
# print("Connection error - sleeping for 40 seconds: ", e)
except Exception as e:
result_text = f"Channel with this username doesn't seem to be valid\n\n" \
f"Channel username: @{_channel_username}\n\n{e}"
exception_handler(bot.send_message(user.id, result_text, parse_mode="html"))
if message.forward_from_chat:
forwarded_from_channel_extractor(app, message)
else:
check_message = bot.send_message(message.chat.id,
language_handler("checking_items_started", user_data["_source"]["lang_code"]))
if message.text:
channels_username = channel_name_extractor(app, message.text)
elif message.caption:
channels_username = channel_name_extractor(app, message.caption)
if message.forward_from_chat:
forwarded_from_channel_extractor(app, message)
t = caption_extractor(message)
print("caption: ", t)
count = 0
for _channel_username in channels_username:
# if not es.exists(index="channel", id=_channel):
if es.count(index="channel", body={
"query": {
"match": {
"username": _channel_username
}
}
})["count"] == 0:
if not es.exists(index="future_channel", id=_channel_username):
count += 1
if count > 0:
registered = True
check_message.edit_text(
language_handler("contribution_thanks", user_data["_source"]["lang_code"], message.chat.first_name,
registered, count))
else:
registered = False
check_message.edit_text(language_handler("contribution_thanks", user_data["_source"]["lang_code"],
message.chat.first_name, registered))
return True
@bot.on_message(Filters.private & Filters.audio & ~Filters.bot)
def save_audio(bot, message):
"""
Store audio files sent by users if it did not already exist in the database, ignore otherwise and reply to the
user respectively
:param bot: Telegram bot API
:param message: Telegram message object
:return: True on success
"""
audio = message.audio
print(message, caption_extractor(message))
user_data = es.get("user", id=message.chat.id)["_source"]
print("from file ret - lang code:")
lang_code = user_data["lang_code"]
if message.caption:
channels_username = channel_name_extractor(app, message.caption)
if not es.exists(index="audio_files", id=str(audio.file_id[8:30:3]).replace("-", "d")):
print(es.exists(index="audio_files", id=str(audio.file_id[8:30:3]).replace("-", "d")))
# try:
# print("before")
# _caption = language_handler("file_caption", lang_code, audio_track, audio_track.message_id, chromusic_users_files_id)
# app.send_message(chat_id=chromusic_users_files_id, text=" test chrome")
sent_to_user_sent_channel = bot.forward_messages(chromusic_users_files_id, message.chat.id, message.message_id)
sender_info = f"Sender: \n{message.chat.first_name} \n@{<EMAIL>}"
bot.send_message(chromusic_users_files_id, sender_info,
reply_to_message_id=sent_to_user_sent_channel.message_id)
# bot.forward_messages(chromusic_users_files_id, audio_track.chat.id, audio_track.message_id)
print("sent file: ", sent_to_user_sent_channel)
res = es.create(index="audio_files", id=str(audio.file_id[8:30:3]).replace("-", "d"), body={
"chat_id": sent_to_user_sent_channel.chat.id,
"chat_username": sent_to_user_sent_channel.chat.username,
"message_id": int(sent_to_user_sent_channel.message_id),
"file_id": audio.file_id,
"file_name": str(audio.file_name).replace("_", " ").replace("@", " "),
"file_size": audio.file_size,
"duration": audio.duration,
"performer": str(audio.performer).replace("_", " ").replace("@", " "),
"title": str(audio.title).replace("_", " ").replace("@", " "),
"times_downloaded": 0,
"caption": str(caption_extractor(message)),
"copyright": False
}, ignore=409)
es.indices.refresh("audio_files")
# res = helpers.bulk(es, audio_data_generator([sent_to_user_sent_channel]))
print("registered: ", res, sent_to_user_sent_channel)
print(es.get(index="audio_files", id=str(audio.file_id[8:30:3]).replace("-", "d")))
registered = True
user_data = es.get(index="user", id=message.chat.id)
bot.send_message(message.chat.id, language_handler("contribution_thanks", user_data["_source"]["lang_code"],
message.chat.first_name, registered))
# message_id = sent_to_user_sent_channel.message_id
# audio_track = bot.get_messages(datacenter_id, message_id)
# except Exception as e:
# print("from save audio: ", e)
else:
# es.delete(index="audio", id=str(audio.file_id[8:30:3]).replace("-", "d"))
registered = False
user_data = es.get(index="user", id=message.chat.id)
bot.send_message(message.chat.id, language_handler("contribution_thanks", user_data["_source"]["lang_code"],
message.chat.first_name, registered))
return True
@bot.on_message(Filters.private & Filters.text & ~Filters.edited & ~Filters.bot & ~Filters.via_bot)
def message_handler(bot, message):
"""
Handle received messages from the user. If it was a valid text submit it to the search handler otherwise send
"Help" menu.
:param bot: Telegram bot API
:param message: Telegram message object
:return: True on success
"""
# print('got ur search query')
# speed_limiter +=1
if message.text and message.entities == None:
if len(message.text)>1:
# adbot.send_message(message.chat.id, "it works")
executor.submit(search_handler, bot, message)
else:
try:
user_data = es.get(index="user", id=message.chat.id)["_source"]
help_markup_keyboard = language_handler("help_markup_keyboard", user_data["lang_code"])
help_keyboard_text = language_handler("help_keyboard_text", user_data["lang_code"])
# exception_handler(bot.send_message(chat_id=user.id,
# text=mylists_menu_text,
# reply_markup=InlineKeyboardMarkup(markup_list),
# parse_mode='HTML'))
exception_handler(bot.send_message(message.chat.id, text=help_keyboard_text,
reply_markup=InlineKeyboardMarkup(help_markup_keyboard),
parse_mode='HTML'))
except Exception as e:
print("from search on_message: ", e)
return True
# @app.on_message()
def client_handler(app, message):
"""
This is for development purposes and not a part of the bot. (uncomment the hooker in case you wanted to conduct
tests)
:param app: Telegram app API
:param message: Telegram message object
:return: -
"""
pool_id = 'poolmachinelearning_x123' # ID to get command from: Here is the pool channel admins
commands = ["/f", "/sf", "/d", "/v", "/clean", "/i", "/s"]
supported_message_types = ["text", "photo", "video", "document", "audio", "animation",
"voice", "poll", "sticker", "web_page"]
valid_usernames = ["shelbycobra2016", "cmusic_self"] # , pool_id] # , "Pudax"]
destinations = {"ce": "cedeeplearning", "pool": pool_id, "me": "shelbycobra2016"}
# if message.chat.id == "61709467":
# message_handler(app, message)
try:
if message.chat.username in valid_usernames:
# print(apl[0].get_users(34497745))
# print(message)
app.send_message("cmusic_self", str(message))
# h = app.get_history("shanbemag", limit=1)
# print(h[0])
# for i in app.iter_history('BBC_6_Minute', limit=10):
# if i.audio:
# print(i)
if str(message.text).split(' ')[0] == 'ch':
# print(es.search(index="channel", body={
# "query": {
# "match_all": {}
# }
# }))
# es.bulk({ "create" : audio_data_generator() })
# res = es.get(index="channel", id=app.get_chat('cedeeplearning').id)
# print(res)
res = es.search(index="channel", body={
"query": {
"match": {"importance": 5}
},
"sort": {
"last_indexed_offset_date": "asc"
}
})
print("started...")
# print("his mes ", [ i for i in app.iter_history("me", limit=1)])#, datetime(app.get_history("me", limit=1)[-1].date).timestamp())
# app.send_message("me", "indexing started ...")
# app.terminate()
# bot.restart()
# es.get(index="future_channel", id=app.get_chat("kurdi4").id)
# es.get(index="future_channel", id=app.get_chat("ahangify").id)
# channel_to_index_set_consume = channel_to_index_set
# [channel_to_index_set_consume.add(ch) for ch in list(channel_to_index_set)]
# _channels_list = list(channel_to_index_set_consume)
# print("cunsume set before: ", channel_to_index_set_consume)
# new_channel_indexer(_channels_list)
# app.send_message("me", res["hits"]["hits"])
for item in res["hits"]["hits"]:
print(item)
# res = helpers.scan(
# client=es,
# query={"query": {"match_all": {}}},
# size=10000,
# scroll='2m',
# index="channel"
# )
# for i in res:
# print(f"indexing: {i}")
# existing_channel_indexer(channel=int(i['_id']))
#
# app.send_message("me", "existing channels indexed successfully ")
# caption_entities_channel_extractor(message)
elif str(message.text).split(' ')[0] == 'delete':
try:
es.indices.delete('audio')
es.indices.delete('channel')
# app.send_message("shelbycobra2016", text=f"deleted 2 indexes:\n1. audio\n2. channel")
# audio = es.indices.create(
# index="audio",
# body=audio_mapping,
# ignore=400 # ignore 400 already exists code
# )
channel = es.indices.create(
index="channel",
body=channel_mapping,
ignore=400 # ignore 400 already exists code
)
app.send_message('shelbycobra2016', 'created again')
except:
app.send_message('shelbycobra2016', "There's no audio index!")
# for index in es.indices.get('*'):
# print(index)
print(es.search(index="datacenter", body={
"query": {"match_all": {}}
}))
# elif str(message.text).split(' ')[0] == 'create':
# audio = es.indices.create(
# index="audio",
# body=audio_mapping,
# ignore=400 # ignore 400 already exists code
# )
# user = es.indices.create(
# index="user",
# body=audio_mapping,
# ignore=400 # ignore 400 already exists code
# )
# channel = es.indices.create(
# index="channel",
# body=audio_mapping,
# ignore=400 # ignore 400 already exists code
# )
response = es.update(index="user", id=165802777, body={
"script": {
"source": "ctx._source.role = params.role_p",
"lang": "painless",
"params": {
"role_p": "owner"
}
}
}, refresh=True, ignore=409)
# app.send_message("shelbycobra2016",
# text=f"created 3 indexes:\n\n1. audio: {audio}\n2. user: {user}\n\n3. channel: {channel}\n\n4. update responce: {response}")
print(es.get(index="user_role", id=165802777))
elif str(message.text).split(' ')[0] == 'index':
time.sleep(3)
current_chat = app.get_chat(str(message.text).split(' ')[1])
time.sleep(3)
if current_chat.type == "channel":
audio_file_indexer(app, current_chat, 0)
elif str(message.text).split(' ')[0] == 'count':
print(es.count(index='audio_files'))
except Exception as e:
print(f"from client handler: {e}")
| StarcoderdataPython |
5096529 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.abstract}, a collection of APIs for implementing
reactors.
"""
from __future__ import division, absolute_import
from twisted.trial.unittest import SynchronousTestCase
from twisted.internet.abstract import isIPv6Address
class IPv6AddressTests(SynchronousTestCase):
"""
Tests for L{isIPv6Address}, a function for determining if a particular
string is an IPv6 address literal.
"""
def test_empty(self):
"""
The empty string is not an IPv6 address literal.
"""
self.assertFalse(isIPv6Address(""))
def test_colon(self):
"""
A single C{":"} is not an IPv6 address literal.
"""
self.assertFalse(isIPv6Address(":"))
def test_loopback(self):
"""
C{"::1"} is the IPv6 loopback address literal.
"""
self.assertTrue(isIPv6Address("::1"))
def test_scopeID(self):
"""
An otherwise valid IPv6 address literal may also include a C{"%"}
followed by an arbitrary scope identifier.
"""
self.assertTrue(isIPv6Address("fe80::1%eth0"))
self.assertTrue(isIPv6Address("fe80::2%1"))
self.assertTrue(isIPv6Address("fe80::3%en2"))
def test_invalidWithScopeID(self):
"""
An otherwise invalid IPv6 address literal is still invalid with a
trailing scope identifier.
"""
self.assertFalse(isIPv6Address("%eth0"))
self.assertFalse(isIPv6Address(":%eth0"))
self.assertFalse(isIPv6Address("hello%eth0"))
def test_unicodeAndBytes(self):
"""
L{isIPv6Address} evaluates ASCII-encoded bytes as well as text.
"""
self.assertTrue(isIPv6Address(b"fe80::2%1"))
self.assertTrue(isIPv6Address(u"fe80::2%1"))
self.assertFalse(isIPv6Address(u"\u4321"))
self.assertFalse(isIPv6Address(u"hello%eth0"))
self.assertFalse(isIPv6Address(b"hello%eth0"))
| StarcoderdataPython |
6463330 | <gh_stars>10-100
import json as json_handler
from balebot.models.constants.errors import Error
from balebot.models.factories import message_factory
class BotApiQuotedMessage:
def __init__(self, json):
if isinstance(json, dict):
json_dict = json
elif isinstance(json, str):
json_dict = json_handler.loads(json)
else:
raise ValueError(Error.unacceptable_json)
self.message_id = json_dict.get("messageId", None)
self.public_group_id = json_dict.get("publicGroupId", None)
self.sender_id = json_dict.get("senderId", None)
self.messageDate = json_dict.get("messageDate", None)
self.message = message_factory.MessageFactory.create_message(json_dict.get("message"))
| StarcoderdataPython |
395977 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 16:05:17 2020
@author: yuanh
"""
import os
import shutil
from openpyxl import load_workbook
import numpy as np
it = 6
flag = 0
nPop = 100
if flag == 1:
n = 9
index = np.zeros((n, 1))
wb = load_workbook('Positions.xlsx')
sheet = wb['2_mu']
for i in range(n):
index[i,0] = sheet.cell(row=i+2,column=1).value
if flag == 1:
os.mkdir(str(it)+'_MU_all')
for hh in range(n):
source = "J:/Coupler_optimization/MOPSO_CAE/"+str(it)+"_"+str(int(index[hh,0]))+"_MU"+"/"+str(it)+"_"+str(int(index[hh,0]))+"_MU_geo.pdf"
destination = "J:/Coupler_optimization/MOPSO_CAE/"+str(it)+"_MU_all"+"/"+str(it)+"_"+str(int(index[hh,0]))+"_MU_geo.pdf"
shutil.copyfile(source, destination)
else:
os.mkdir(str(it)+'_all')
for hh in range(nPop):
source = "J:/Coupler_optimization/MOPSO_CAE/"+str(it)+"_"+str(hh)+"/"+str(it)+"_"+str(hh)+"_geo.pdf"
destination = "J:/Coupler_optimization/MOPSO_CAE/"+str(it)+"_all"+"/"+str(it)+"_"+str(hh)+"_geo.pdf"
shutil.copyfile(source, destination)
| StarcoderdataPython |
8069931 | <filename>learner_model/SeqRNN.py
"""
The core model
"""
import os
from os import path
import time
import settings
import keras
from keras.models import Model
from keras.preprocessing import sequence
from keras.layers import Input, Dense, Dropout, Embedding, LSTM, TimeDistributed
import keras.backend as K
import numpy as np
from meta_model.model_session import ModelSession
from pprint import pprint as pr
embed_dim=10
from keras import backend as K
def relative_error(y_true, y_pred):
'''
define a customized metric for testing
'''
return K.abs(y_true-y_pred) / K.abs(y_true)
class Sequence_RNN_Model_Session(ModelSession):
def __init__(self,model,args):
self.model=model
print('computational graph registered')
self.args=args
print('arguments loaded')
print('session building complete')
@staticmethod
def create_graph(class_num=120):
input_sequences = Input(shape=(40, class_num))
network=LSTM(49,return_sequences=True)(input_sequences)
network=LSTM(49,return_sequences=True)(network)
network=LSTM(49)(network)
prediction = Dense(class_num, activation='softmax')(network)
model = Model(inputs=input_sequences, outputs=prediction)
return model
@staticmethod
def compile_model(model):
model.compile(optimizer='adam',
loss='categorical_crossentropy')
return model
@classmethod
def restore(cls, checkpoint_directory,cus=None):
# custom_objects={'relative_error':relative_error}
# return super().restore(checkpoint_directory,cus=custom_objects)
return super().restore(checkpoint_directory)
def preprocess(self,text):
maxlen = 40
self.maxlen=40
step = 1
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
X = np.zeros((len(sentences), maxlen, self.args.class_num),dtype=np.bool)
y = np.zeros((len(sentences), self.args.class_num),dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char_index in enumerate(sentence):
X[i, t, char_index] = 1
y[i, next_chars[i]] = 1
return X,y
def train(self,x,y):
self.model.train_on_batch(x,y)
def train(self,x):
x,y=self.preprocess(x)
self.model.train_on_batch(x,y)
def evaluate(self,x,batch_size=32):
x,y=self.preprocess(x)
result=self.model.evaluate(x,y,batch_size=batch_size,verbose=0)
return result
def sample(self,preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def generate(self,random_sentence_start=None,file_directory=None):
# sample
diversity=0.2
# return list of token
sentence=[ self.dictionary[it] for it in random_sentence_start[:40]]
generated=[]
for i in range(400):
x = np.zeros((1, self.maxlen, self.args.class_num))
for t, char_index in enumerate(sentence):
x[0, t, self.index[char_index]] = 1.
# iteration get the predictions
preds = self.model.predict(x, verbose=0)[0]
next_index = self.sample(preds, diversity)
next_char = self.dictionary[next_index]
generated.append(next_char)
sentence = sentence[1:] + [next_char]
generated="".join(generated)
with open(file_directory+"tmp.txt",'w') as f:
f.write(generated+"\n")
return generated
def set_one_hot_depth(self,depth):
self.one_hot_depth=depth
print('one hot depth okay')
def register_dictionary(self,dictionary):
self.dictionary=dictionary
def register_index(self,index):
self.index=index
| StarcoderdataPython |
3311188 | <reponame>mineo/edaboweb
#!/usr/bin/env python
# coding: utf-8
# Copyright © 2015 <NAME>
# License: MIT, see LICENSE for details
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.dialects.postgresql import JSONB, UUID
db = SQLAlchemy()
class Playlist(db.Model):
id = db.Column(db.Integer, primary_key=True)
gid = db.Column(UUID, nullable=False)
data = db.Column(JSONB, nullable=False)
| StarcoderdataPython |
1675582 | <filename>mafiaTokenStore.py
import json
import os
import boto3
from slack import WebClient
from slack.errors import SlackApiError
from util.env import getEnvVar
from util.slack_payload_parser import parse_payload
APP_CLIENT_ID = getEnvVar('APP_CLIENT_ID')
APP_CLIENT_SECRET = getEnvVar('APP_CLIENT_SECRET')
TOKEN_SOURCE = getEnvVar('TOKEN_SOURCE')
def extractParameters(event):
query_params = event.get("queryStringParameters")
return query_params.get('code')
def getTokenAndTeam(code):
client = WebClient()
response = client.oauth_v2_access(
client_id=APP_CLIENT_ID,
client_secret=APP_CLIENT_SECRET,
code=code
)
print(response)
return response['access_token'], response['team']['id']
def lambda_handler(event, context):
print(f"Received event:\n{json.dumps(event)}\nWith context:\n{context}")
dynamoClient = boto3.resource('dynamodb')
table = dynamoClient.Table(TOKEN_SOURCE)
code = extractParameters(event)
token, team_id = getTokenAndTeam(code)
table.put_item(
Item={
'_id': team_id,
'token': token
}
)
response = {
'statusCode': 200,
'headers': {},
'body': 'Installation is Complete!'
}
return response
| StarcoderdataPython |
9663131 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neural_compressor.utils.utility import dump_elapsed_time
from ..graph_base import GraphRewriterBase
from ..graph_util import GraphAnalyzer
from ..graph_util import GraphRewriterHelper as Helper
class MetaInfoChangingMemOpOptimizer(GraphRewriterBase):
"""Fuse the pattern like Dequantize + MetaOp + Quantize into MetaOp(set its type to int8).
With such changes, the Quantize and Dequantize OP will removed for better performance.
"""
meta_op_type_list = ['Reshape', 'Squeeze']
def __init__(self, model):
super().__init__(model)
self.graph_analyzer = GraphAnalyzer()
self.graph_analyzer.graph = self.model
self.graph_info = self.graph_analyzer.parse_graph()
def _check_and_apply_transform(self, quantize_node_name):
cur_node = self.graph_info[quantize_node_name].node
res = [quantize_node_name]
found_meta_op_flag = False
while cur_node.input:
pre_node_name = Helper.node_name_from_input(cur_node.input[0])
pre_node = self.graph_info[pre_node_name].node
pre_node_op = pre_node.op
if pre_node_op in self.meta_op_type_list:
res.insert(0, pre_node_name)
cur_node = pre_node
elif pre_node_op == 'Dequantize' and len(self.graph_info[pre_node_name].outputs) == 1:
res.insert(0, pre_node_name)
found_meta_op_flag = True
break
else:
break
dequantize_node_name = res[0]
quantize_node_name = res[-1]
deq_node = self.graph_info[dequantize_node_name].node
quant_node = self.graph_info[quantize_node_name].node
if found_meta_op_flag and len(res) > 2 and \
quant_node.attr['mode'].s.decode() == deq_node.attr['mode'].s.decode():
deq_min_range = self.graph_info[dequantize_node_name].node.input[1]
deq_max_range = self.graph_info[dequantize_node_name].node.input[2]
quant_output_min = quantize_node_name + ':1'
quant_output_max = quantize_node_name + ':2'
quantize_input_name = res[-2]
quantized_node_name = self.graph_info[quantize_node_name].outputs[0]
for index, value in enumerate(self.graph_info[quantized_node_name].node.input):
if value == quant_output_min:
self.graph_info[quantized_node_name].node.input[index] = deq_min_range
if value == quant_output_max:
self.graph_info[quantized_node_name].node.input[index] = deq_max_range
if index == 0:
self.graph_info[quantized_node_name].node.input[index] = quantize_input_name
new_dtype = self.graph_info[dequantize_node_name].node.attr['T'].type
for node_name in res[1: -1]:
self.graph_info[node_name].node.attr['T'].type = new_dtype
if 'T1' in self.graph_info[quantized_node_name].node.attr:
self.graph_info[quantized_node_name].node.attr['T1'].type = new_dtype
if 'Tinput' in self.graph_info[quantized_node_name].node.attr:
self.graph_info[quantized_node_name].node.attr['Tinput'].type = new_dtype
if 'T' in self.graph_info[quantized_node_name].node.attr:
self.graph_info[quantized_node_name].node.attr['T'].type = new_dtype
self.graph_info[res[1]
].node.input[0] = self.graph_info[dequantize_node_name].node.input[0]
self.graph_analyzer.remove_node(dequantize_node_name)
self.graph_analyzer.remove_node(self.graph_info[quantize_node_name].node.input[1])
self.graph_analyzer.remove_node(self.graph_info[quantize_node_name].node.input[2])
self.graph_analyzer.remove_node(quantize_node_name)
@dump_elapsed_time("Pass MetaOpOptimizer")
def do_transformation(self):
deq_node = self.graph_analyzer.query_fusion_pattern_nodes([['QuantizeV2']])
for i in deq_node:
self._check_and_apply_transform(i[0])
return GraphAnalyzer().dump_graph()
| StarcoderdataPython |
5162749 | def is_k_anonymous(partition, k):
if len(partition) < k:
return False
return True
def is_l_diverse(df, partition, sensitive_column, l):
diversity = len(df.loc[partition][sensitive_column].unique())
return diversity >= l
def is_t_close(df, partition, sensitive_column, global_freqs, p):
total_count = float(len(partition))
d_max = None
group_counts = (
df.loc[partition].groupby(sensitive_column)[sensitive_column].agg("count")
)
for value, count in group_counts.to_dict().items():
p = count / total_count
d = abs(p - global_freqs[value])
if d_max is None or d > d_max:
d_max = d
return d_max <= p
def get_global_freq(df, sensitive_column):
global_freqs = {}
total_count = float(len(df))
group_counts = df.groupby(sensitive_column)[sensitive_column].agg("count")
for value, count in group_counts.to_dict().items():
p = count / total_count
global_freqs[value] = p
return global_freqs
| StarcoderdataPython |
61182 | <gh_stars>0
import numpy as np
# activation functions and its derivatives
def tanh(x):
return np.tanh(x)
def tanh_prime(x):
return 1-np.tanh(x)**2
def sigmoid(x):
return 1/(1+np.exp(-x))
def sigmoid_prime(x):
return sigmoid(x)*(1-sigmoid(x))
| StarcoderdataPython |
3242378 | <reponame>theotherphp/relay
"""
Main app - setup, signal handling and graceful shutdown
"""
import os
from signal import signal, SIGTERM, SIGINT
from functools import partial
from tornado.ioloop import IOLoop
from tornado.web import Application, StaticFileHandler
from tornado.httpserver import HTTPServer
from relay_db import RelayDB
from relay_rest import MainHandler, TagsHandler, TeamHandler, TeamsHandler, \
WalkerHandler, ZeroHandler
from relay_feeds import LeaderboardWSHandler, LapsWSHandler
from relay_config import cfg
import logging
logging.basicConfig(
name=__name__,
filename='relay.log',
level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(module)s %(message)s'
)
def sig_handler(server, sig, frame):
# Stopping the app is nice, but closing the DB cleanly is the main point
logging.debug('caught signal %d' % sig)
IOLoop.instance().stop()
def run_app():
db = RelayDB()
IOLoop.instance().run_sync(db.open)
handler_args = dict(db=db)
app_settings = {
'static_path': os.path.join(os.path.dirname(__file__), 'static'),
'pure_path': os.path.join(os.path.dirname(__file__), 'static', 'pure'),
'viewer_path': os.path.join(os.path.dirname(__file__), 'static', 'Lap-Counter-Viewer'),
'debug': True
}
app = Application([
# Leaderboard support
(r'/(index.*\.html)', StaticFileHandler, dict(path=app_settings['viewer_path'])),
(r'/(css/index.*\.css)', StaticFileHandler, dict(path=app_settings['viewer_path'])),
(r'/(js/(.*)\.js)', StaticFileHandler, dict(path=app_settings['viewer_path'])),
(r'/(.*\.mp3)', StaticFileHandler, dict(path=app_settings['static_path'])),
(r'/', MainHandler, handler_args),
(r'/leaderboard_ws', LeaderboardWSHandler, handler_args),
(r'/laps_ws', LapsWSHandler, handler_args),
(r'/tags', TagsHandler, handler_args),
# Admin web pages using https://purecss.io
(r'/(pure-min\.css)', StaticFileHandler, dict(path=app_settings['pure_path'])),
(r'/(side-menu\.css)', StaticFileHandler, dict(path=app_settings['pure_path'])),
(r'/(ui\.js)', StaticFileHandler, dict(path=app_settings['pure_path'])),
(r'/teams/', TeamsHandler, handler_args),
(r'/team/(.*)', TeamHandler, handler_args),
(r'/walker/(.*)', WalkerHandler, handler_args),
(r'/zero/', ZeroHandler, handler_args)
], autoreload=True, **app_settings)
server = HTTPServer(app)
server.listen(cfg.app_port)
signal(SIGTERM, partial(sig_handler, server))
signal(SIGINT, partial(sig_handler, server))
IOLoop.current().start()
IOLoop.current().run_sync(db.close) # Close the DB cleanly to avoid corruption
if __name__ == '__main__':
logging.info('starting')
run_app()
logging.info('exiting')
| StarcoderdataPython |
4988159 | from random import randint
from retrying import retry
import apysc as ap
from apysc._display.x_interface import XInterface
from apysc._expression import expression_data_util
from apysc._type import value_util
class TestXInterface:
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_x(self) -> None:
x_interface = XInterface()
x_interface.variable_name = 'test_x_interface'
x_interface.x = ap.Int(100)
assert x_interface.x == 100
x: ap.Int = x_interface.x
assert x == x_interface._x
assert x.variable_name != x_interface._x.variable_name
x_interface.x = 200 # type: ignore
assert x_interface.x == 200
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__append_x_update_expression(self) -> None:
x_interface = XInterface()
x_interface.variable_name = 'test_x_interface'
expression_data_util.empty_expression()
x_interface.x = ap.Int(200)
expression: str = expression_data_util.get_current_expression()
value_str: str = value_util.get_value_str_for_expression(
value=x_interface._x)
expected: str = f'test_x_interface.x({value_str});'
assert expected in expression
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__initialize_x_if_not_initialized(self) -> None:
x_interface = XInterface()
x_interface.variable_name = 'test_x_interface'
x_interface._initialize_x_if_not_initialized()
assert x_interface.x == 0
x_interface.x = ap.Int(100)
x_interface._initialize_x_if_not_initialized()
assert x_interface.x == 100
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__make_snapshot(self) -> None:
x_interface = XInterface()
x_interface.variable_name = 'test_x_interface'
x_interface.x = ap.Int(100)
snapshot_name: str = 'snapshot_1'
x_interface._run_all_make_snapshot_methods(
snapshot_name=snapshot_name)
assert x_interface._x_snapshots[snapshot_name] == 100
x_interface.x = ap.Int(150)
x_interface._run_all_make_snapshot_methods(
snapshot_name=snapshot_name)
assert x_interface._x_snapshots[snapshot_name] == 100
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__revert(self) -> None:
x_interface = XInterface()
x_interface.variable_name = 'test_x_interface'
x_interface.x = ap.Int(100)
snapshot_name: str = 'snapshot_1'
x_interface._run_all_make_snapshot_methods(
snapshot_name=snapshot_name)
x_interface.x = ap.Int(150)
x_interface._run_all_revert_methods(
snapshot_name=snapshot_name)
assert x_interface.x == 100
x_interface.x = ap.Int(150)
x_interface._run_all_revert_methods(
snapshot_name=snapshot_name)
assert x_interface.x == 150
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__append_x_attr_linking_setting(self) -> None:
x_interface = XInterface()
x_interface.variable_name = 'test_x_interface'
x_interface._initialize_x_if_not_initialized()
assert x_interface._attr_linking_stack['x'] == [ap.Int(0)]
| StarcoderdataPython |
3399630 | # richard -- video index system
# Copyright (C) 2012, 2013 richard contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import patterns, url
urlpatterns = patterns(
'',
url(r'^$', 'richard.base.views.home', name='home'),
url(r'^login-failure$', 'richard.base.views.login_failure',
name='login_failure'),
url(r'^new_user$', 'new_user', name='new_user'),
url(r'^stats/$', 'richard.base.views.stats', name='stats'),
)
| StarcoderdataPython |
5169821 | import inspect
from abc import ABCMeta, abstractmethod
from functools import partial
from typing import List, get_type_hints
from guniflask.beans.constructor_resolver import ConstructorResolver
from guniflask.beans.definition import BeanDefinition
from guniflask.beans.definition_registry import BeanDefinitionRegistry
from guniflask.beans.errors import BeanTypeNotDeclaredError, BeanTypeNotAllowedError, BeanCreationError, \
BeanNotOfRequiredTypeError, NoUniqueBeanDefinitionError
from guniflask.beans.errors import NoSuchBeanDefinitionError, BeanDefinitionStoreError
from guniflask.beans.factory import BeanFactoryAware, BeanNameAware, ConfigurableBeanFactory
from guniflask.beans.lifecycle import InitializingBean, SmartInitializingSingleton, DisposableBean
from guniflask.beans.post_processor import BeanPostProcessor
class AbstractBeanFactory(ConfigurableBeanFactory, metaclass=ABCMeta):
def __init__(self):
super().__init__()
self._bean_post_processors = []
def add_bean_post_processor(self, bean_post_processor: BeanPostProcessor):
try:
self._bean_post_processors.remove(bean_post_processor)
except ValueError:
pass
self._bean_post_processors.append(bean_post_processor)
@property
def bean_post_processors(self) -> List[BeanPostProcessor]:
return self._bean_post_processors
def get_bean(self, bean_name, required_type: type = None):
bean = None
share_instance = self.get_singleton(bean_name)
if share_instance is not None:
bean = share_instance
else:
bean_definition = self.get_bean_definition(bean_name)
if bean_definition.is_singleton():
bean = self.get_singleton_from_factory(
bean_name,
partial(self.create_bean, bean_name, bean_definition),
)
elif bean_definition.is_prototype():
# FIXME: support prototype
raise RuntimeError('Do not support prototype now')
# Check if required type matches the type of the actual bean instance.
if bean is not None and required_type is not None:
if not issubclass(type(bean), required_type):
raise BeanNotOfRequiredTypeError(bean, required_type, type(bean))
return bean
def get_bean_of_type(self, required_type: type):
candidates = self.get_beans_of_type(required_type)
if len(candidates) == 1:
return list(candidates.values())[0]
if len(candidates) > 1:
raise NoUniqueBeanDefinitionError(required_type)
def get_beans_of_type(self, required_type: type):
names = self.get_bean_names_for_type(required_type)
result = {}
for name in names:
bean = self.get_bean(name, required_type=required_type)
result[name] = bean
return result
def is_type_match(self, bean_name: str, type_to_match: type) -> bool:
bean = self.get_singleton(bean_name)
if bean is not None:
return isinstance(bean, type_to_match)
if not self.contains_bean_definition(bean_name):
return False
bean_type = self._resolve_bean_type(bean_name, self.get_bean_definition(bean_name))
if bean_type is None:
return False
return issubclass(bean_type, type_to_match)
def _resolve_bean_type(self, bean_name: str, bean_definition: BeanDefinition) -> type:
source = bean_definition.source
if inspect.isclass(source):
return source
if inspect.isfunction(source) or inspect.ismethod(source):
hints = get_type_hints(source)
if 'return' not in hints:
raise BeanTypeNotDeclaredError(bean_name)
bean_type = hints['return']
if not inspect.isclass(bean_type):
raise BeanTypeNotAllowedError(bean_name, bean_type)
return bean_type
@abstractmethod
def create_bean(self, bean_name: str, bean_definition: BeanDefinition):
pass # pragma: no cover
@abstractmethod
def contains_bean_definition(self, bean_name: str) -> bool:
pass # pragma: no cover
@abstractmethod
def get_bean_definition(self, bean_name: str) -> BeanDefinition:
pass # pragma: no cover
class DefaultBeanFactory(AbstractBeanFactory, BeanDefinitionRegistry):
def __init__(self):
AbstractBeanFactory.__init__(self)
self._constructor_resolver = ConstructorResolver(self)
self._bean_definition_map = {}
self._allow_bean_definition_overriding = True
def get_bean_names_for_type(self, required_type: type) -> List[str]:
names = []
for bean_name in self._bean_definition_map:
if self.is_type_match(bean_name, required_type):
names.append(bean_name)
return names
def pre_instantiate_singletons(self):
bean_names = self.get_bean_definition_names()
for bean_name in bean_names:
bean_definition = self.get_bean_definition(bean_name)
if bean_definition.is_singleton():
self.get_bean(bean_name)
for bean_name in bean_names:
singleton = self.get_singleton(bean_name)
if isinstance(singleton, SmartInitializingSingleton):
singleton.after_singletons_instantiated()
def create_bean(self, bean_name: str, bean_definition: BeanDefinition):
bean = self._resolve_before_instantiation(bean_name, bean_definition)
if bean is not None:
return bean
bean = self._do_create_bean(bean_name, bean_definition)
return bean
def _do_create_bean(self, bean_name: str, bean_definition: BeanDefinition):
bean = self._create_bean_instance(bean_name, bean_definition)
bean = self._initialize_bean(bean, bean_name)
self._register_disposable_bean_if_necessary(bean_name, bean, bean_definition)
return bean
def _create_bean_instance(self, bean_name: str, bean_definition: BeanDefinition):
"""
1. Find the bean with the same name as arg if the required bean type is missing.
2. Find beans which matches the required bean type.
3. If there is only one matched bean, set it to arg.
4. If there are more than one matched beans, choose the bean which has the same name with arg.
Raise error if no such bean.
5. Set arg to its declared default value.
6. Raise error if there is any unassigned arg.
Note: self and cls are special.
"""
source = bean_definition.source
factory_bean = None
if bean_definition.factory_bean_name is not None:
factory_bean = self.get_bean(bean_definition.factory_bean_name)
if inspect.isclass(source):
func = source
elif inspect.isfunction(source) or inspect.ismethod(source):
if factory_bean is None:
func = source
else:
func = getattr(factory_bean, source.__name__)
else:
raise BeanCreationError(bean_name)
try:
bean = self._constructor_resolver.instantiate(func)
except Exception as e:
raise BeanCreationError(bean_name, message=f'Cannot create bean named "{bean_name}"\n{e}')
return bean
def _resolve_bean_type(self, bean_name: str, bean_definition: BeanDefinition) -> type:
source = bean_definition.source
if inspect.isclass(source):
return source
if inspect.isfunction(source) or inspect.ismethod(source):
hints = get_type_hints(source)
if 'return' not in hints:
raise BeanTypeNotDeclaredError(bean_name)
bean_type = hints['return']
if not inspect.isclass(bean_type):
raise BeanTypeNotAllowedError(bean_name, bean_type)
return bean_type
def _resolve_before_instantiation(self, bean_name: str, bean_definition: BeanDefinition):
bean = None
bean_type = self._resolve_bean_type(bean_name, bean_definition)
if bean_type is not None:
bean = self._apply_bean_post_processors_before_instantiation(bean_type, bean_name)
if bean is not None:
bean = self._apply_bean_post_processors_after_initialization(bean, bean_name)
return bean
def _initialize_bean(self, bean, bean_name: str):
self._invoke_aware_methods(bean, bean_name)
wrapped_bean = bean
wrapped_bean = self._apply_bean_post_processors_before_initialization(wrapped_bean, bean_name)
self._invoke_init_methods(wrapped_bean)
wrapped_bean = self._apply_bean_post_processors_after_initialization(wrapped_bean, bean_name)
return wrapped_bean
def _invoke_aware_methods(self, bean, bean_name: str):
if isinstance(bean, BeanNameAware):
bean.set_bean_name(bean_name)
if isinstance(bean, BeanFactoryAware):
bean.set_bean_factory(self)
def _invoke_init_methods(self, bean):
if isinstance(bean, InitializingBean):
bean.after_properties_set()
def _apply_bean_post_processors_before_instantiation(self, bean_type: type, bean_name: str):
for post_processor in self.bean_post_processors:
result = post_processor.post_process_before_instantiation(bean_type, bean_name)
if result is not None:
return result
def _apply_bean_post_processors_before_initialization(self, bean, bean_name: str):
for post_processor in self.bean_post_processors:
result = post_processor.post_process_before_initialization(bean, bean_name)
if result is not None:
bean = result
return bean
def _apply_bean_post_processors_after_initialization(self, bean, bean_name: str):
for post_processor in self.bean_post_processors:
result = post_processor.post_process_after_initialization(bean, bean_name)
if result is not None:
bean = result
return bean
def _register_disposable_bean_if_necessary(self, bean_name: str, bean, bean_definition: BeanDefinition):
if bean_definition.is_singleton() and isinstance(bean, DisposableBean):
self.register_disposable_bean(bean_name, bean)
def set_allow_bean_definition_overriding(self, allow_bean_definition_overriding: bool):
self._allow_bean_definition_overriding = allow_bean_definition_overriding
@property
def is_allow_bean_definition_overriding(self) -> bool:
return self._allow_bean_definition_overriding
def register_bean_definition(self, bean_name: str, bean_definition: BeanDefinition):
old_bean_definition = self._bean_definition_map.get(bean_name)
if old_bean_definition is not None:
if not self.is_allow_bean_definition_overriding:
raise BeanDefinitionStoreError(f'A bean named "{bean_name}" is already bound')
self._bean_definition_map[bean_name] = bean_definition
def get_bean_definition(self, bean_name: str) -> BeanDefinition:
bean_definition = self._bean_definition_map.get(bean_name)
if bean_definition is None:
raise NoSuchBeanDefinitionError(bean_name)
return bean_definition
def get_bean_definition_names(self) -> List[str]:
return list(self._bean_definition_map.keys())
def contains_bean_definition(self, bean_name: str) -> bool:
return bean_name in self._bean_definition_map
def remove_bean_definition(self, bean_name: str):
if bean_name not in self._bean_definition_map:
raise NoSuchBeanDefinitionError(bean_name)
self._bean_definition_map.pop(bean_name)
| StarcoderdataPython |
1944981 | <filename>scripts/arcresthelper/__init__.py<gh_stars>1-10
import common
import securityhandlerhelper
import featureservicetools
import orgtools
import portalautomation
import publishingtools
import resettools
__version__ = "3.0.1"
| StarcoderdataPython |
6517780 | from tkinter import Tk, Button, Label
from tkinter import Canvas
from random import randint
root = Tk()
root.title("Catch the ball Game")
root.resizable(False, False)
# for defining the canvas
canvas = Canvas(root, width=600, height=600)
canvas.pack()
# variable for the vertical distance travelled by ball
limit = 0
# variable for horizontal distance of bar from x-axis
dist = 5
# variable for score
score = 0
# Class for the Creating and moving ball
class Ball:
# for creation of ball on the canvas
def __init__(self, canvas, x1, y1, x2, y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.canvas = canvas
# for creation of ball object
self.ball = canvas.create_oval(self.x1, self.y1, self.x2, self.y2, fill="red", tags='dot1')
# for moving the ball
def move_ball(self):
# defining offset
offset = 10
global limit
# checking if ball lands ground or bar
if limit >= 510:
global dist, score, next
# checking that ball falls on the bar
if (dist - offset <= self.x1 and dist + 40 + offset >= self.x2):
# incrementing the score
score += 10
# dissappear the ball
canvas.delete('dot1')
# calling the function for again creation of ball object
ball_set()
else:
# dissappear the ball
canvas.delete('dot1')
bar.delete_bar(self)
# display the score
score_board()
return
# incrementing the vertical distance travelled by ball by deltay
limit += 1
# moving the ball in vertical direction by taking x=0 and y=deltay
self.canvas.move(self.ball, 0, 1)
# for continuous moving of ball again call move_ball
self.canvas.after(10, self.move_ball)
# class for creating and moving bar
class bar:
# method for creating bar
def __init__(self, canvas, x1, y1, x2, y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.canvas = canvas
# for creating bar using create_rectangle
self.rod = canvas.create_rectangle(self.x1, self.y1, self.x2, self.y2, fill="grey", tags='dot2')
# method for moving the bar
def move_bar(self, num):
global dist
# checking the forward or backward button
if (num == 1):
# moving the bar in forward direction by taking x-axis positive distance and taking vertical distance y=0
self.canvas.move(self.rod, 20, 0)
# incrementing the distance of bar from x-axis
dist += 20
else:
# moving the bar in backward direction by taking x-axis negative distance and taking vertical distance y=0
self.canvas.move(self.rod, -20, 0)
# decrementing the distance of bar from x-axis
dist -= 20
def delete_bar(self):
canvas.delete('dot2')
# Function to define the dimensions of the ball
def ball_set():
global limit
limit = 0
# for random x-axis distance from where the ball starts to fall
value = randint(0, 570)
# define the dimensions of the ball
ball1 = Ball(canvas, value, 20, value + 30, 50)
# call function for moving of the ball
ball1.move_ball()
# Function for displaying the score after getting over of the game
def score_board():
root2 = Tk()
root2.title("Catch the ball Game")
root2.resizable(False, False)
canvas2 = Canvas(root2, width=300, height=300)
canvas2.pack()
w = Label(canvas2, text="\nOOPS...GAME IS OVER\n\nYOUR SCORE = " + str(score) + "\n\n")
w.pack()
button3 = Button(canvas2, text="PLAY AGAIN", bg="green", command=lambda: play_again(root2))
button3.pack()
button4 = Button(canvas2, text="EXIT", bg="green", command=lambda: exit_handler(root2))
button4.pack()
# Function for handling the play again request
def play_again(root2):
root2.destroy()
main()
# Function for handling exit request
def exit_handler(root2):
root2.destroy()
root.destroy()
# Main function
def main():
global score, dist
score = 0
dist = 0
# defining the dimensions of bar
bar1 = bar(canvas, 5, 560, 45, 575)
# defining the text,colour of buttons and also define the action after click on the button by calling suitable methods
button = Button(canvas, text="->", bg="green", command=lambda: bar1.move_bar(1))
# placing the buttons at suitable location on the canvas
button.place(x=300, y=580)
button2 = Button(canvas, text="<-", bg="green", command=lambda: bar1.move_bar(0))
button2.place(x=260, y=580)
# calling the function for defining the dimensions of ball
ball_set()
root.mainloop()
# Driver code
if (__name__ == "__main__"):
main()
| StarcoderdataPython |
6644971 | #!/usr/bin/env python3
import click
import logging
from mujoco_worldgen.util.envs import EnvViewer, examine_env
from mujoco_worldgen.util.path import worldgen_path
from mujoco_worldgen.util.parse_arguments import parse_arguments
logger = logging.getLogger(__name__)
# For more detailed on argv information, please have a look on
# docstring below.
@click.command()
@click.argument('argv', nargs=-1, required=False)
def main(argv):
'''
examine.py is used to display environments
Example uses:
bin/examine.py simple_particle
bin/examine.py examples/particle_gather.py
bin/examine.py particle_gather n_food=5 floorsize=5
bin/examine.py example_env_examine.jsonnet
'''
env_names, env_kwargs = parse_arguments(argv)
assert len(env_names) == 1, 'You must provide exactly 1 environment to examine.'
env_name = env_names[0]
examine_env(env_name, env_kwargs,
core_dir=worldgen_path(), envs_dir='examples', xmls_dir='xmls',
env_viewer=EnvViewer)
print(main.__doc__)
if __name__ == '__main__':
logging.getLogger('').handlers = []
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
main()
| StarcoderdataPython |
8096174 | # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.helm.goals import lint, package, publish, tailor
from pants.backend.helm.target_types import (
HelmArtifactTarget,
HelmChartTarget,
HelmUnitTestTestsGeneratorTarget,
HelmUnitTestTestTarget,
)
from pants.backend.helm.target_types import rules as target_types_rules
from pants.backend.helm.test.unittest import rules as test_rules
from pants.backend.helm.util_rules import chart, sources, tool
def target_types():
return [
HelmArtifactTarget,
HelmChartTarget,
HelmUnitTestTestTarget,
HelmUnitTestTestsGeneratorTarget,
]
def rules():
return [
*chart.rules(),
*lint.rules(),
*package.rules(),
*publish.rules(),
*tailor.rules(),
*test_rules(),
*sources.rules(),
*tool.rules(),
*target_types_rules(),
]
| StarcoderdataPython |
4987740 | <reponame>CamelKing1997/AICVTools
import os
import shutil
import sys
from stat import *
import argparse
def walkfolder(srcdir, tardir):
index = 1
for root, dirs, files in os.walk(srcdir, True):
for file in files:
shutil.copy(os.path.join(root, file), tardir)
index += 1
print(f'[num:{index:}] COPY {os.path.join(root, file)} TO {os.path.join(tardir, file)}')
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--srcdir', type=str, default='')
parser.add_argument('--tardir', type=str, default='')
opt = parser.parse_args()
return opt
if __name__ == '__main__':
opt = parse_opt()
srcdir = opt.srcdir
tardir = opt.tardir
walkfolder(srcdir, tardir)
| StarcoderdataPython |
40056 | #!/usr/bin/env python3.5
import sys
import os
import logging
import numpy as np
import musm
from sklearn.utils import check_random_state
from textwrap import dedent
#1Social Choice
_LOG = musm.get_logger('adt17')
PROBLEMS = {
'synthetic': musm.Synthetic,
'pc': musm.PC,
}
USERS = {
'noiseless': musm.NoiselessUser,
'pl': musm.PlackettLuceUser,
}
def get_results_path(args):
properties = [
args['problem'], args['num_groups'], args['num_clusters_per_group'],
args['num_users_per_group'], args['max_iters'], args['set_size'],
args['pick'], args['transform'], args['tau'], args['lmbda'],
args['enable_cv'], args['min_regret'], args['distrib'],
args['density'], args['response_model'], args['noise'], args['seed'],
]
return os.path.join('results', '_'.join(map(str, properties)) + '.pickle')
def _sparsify(w, density, rng):
if not (0 < density <= 1):
raise ValueError('density must be in (0, 1], got {}'.format(density))
w = np.array(w, copy=True)
perm = rng.permutation(w.shape[1])
num_zeros = round((1 - density) * w.shape[1])
w[:,perm[:min(num_zeros, w.shape[1] - 1)]] = 0
return w
def sample_cluster(problem, num_users=5, distrib='normal', density=1, rng=0):
num_attributes = problem.num_attributes
if hasattr(problem, 'cost_matrix'):
num_attributes += problem.cost_matrix.shape[0]
if distrib == 'uniform':
w_mean = rng.uniform(0, 1, size=num_attributes)
elif distrib == 'normal':
w_mean = rng.uniform(-1, 1, size=num_attributes)
else:
raise ValueError('invalid distrib, got {}'.format(distrib))
if True: # XXX
w = w_mean + np.zeros((num_users, num_attributes))
else:
w = w_mean + rng.uniform(0, 25, size=(num_users, num_attributes))
return _sparsify(np.abs(w), density, rng)
def generate_user_groups(problem, args):
User = USERS[args['response_model']]
rng = check_random_state(0)
num_users_per_cluster = max(1, round(args['num_users_per_group'] /
args['num_clusters_per_group']))
user_groups = []
for gid in range(args['num_groups']):
w_star = []
for cid in range(1, args['num_clusters_per_group'] + 1):
if cid == args['num_clusters_per_group']:
num_users_in_cluster = args['num_users_per_group'] - len(w_star)
else:
num_users_in_cluster = num_users_per_cluster
temp = sample_cluster(problem,
num_users=num_users_in_cluster,
distrib=args['distrib'],
density=args['density'],
rng=rng)
ttemp = temp
if hasattr(problem, 'cost_matrix'):
num_costs = problem.cost_matrix.shape[0]
temp_bools = temp[:, :-num_costs]
temp_costs = temp[:, -num_costs:]
ttemp = temp_bools + np.dot(temp_costs, problem.cost_matrix)
_LOG.debug(dedent('''\
CLUSTER {cid}:
true user weights =
{temp}
true user weights transformed by cost matrix =
{ttemp}
''').format(**locals()))
if len(w_star) == 0:
w_star = ttemp
else:
w_star = np.append(w_star, ttemp, axis=0)
user_groups.append([User(problem,
w_star[uid],
min_regret=args['min_regret'],
noise=args['noise'],
rng=rng)
for uid in range(args['num_users_per_group'])])
return user_groups
def run(args):
problem = PROBLEMS[args['problem']]()
try:
user_groups = musm.load(args['groups'])
except:
user_groups = generate_user_groups(problem,
musm.subdict(args, nokeys={'problem'}))
if args['groups'] is not None:
musm.dump(args['groups'], user_groups)
rng = check_random_state(args['seed'])
traces = []
for gid in range(args['num_groups']):
traces.append(musm.musm(problem,
user_groups[gid],
gid,
set_size=args['set_size'],
max_iters=args['max_iters'],
enable_cv=args['enable_cv'],
pick=args['pick'],
transform=args['transform'],
tau=args['tau'],
lmbda=args['lmbda'],
rng=0))
musm.dump(get_results_path(args), {'args': args, 'traces': traces})
def main():
import argparse
np.seterr(all='raise')
np.set_printoptions(precision=2, linewidth=1000000)
fmt = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(formatter_class=fmt)
group = parser.add_argument_group('Experiment')
group.add_argument('problem', type=str,
help='the problem, any of {}'
.format(sorted(PROBLEMS.keys())))
group.add_argument('-N', '--num-groups', type=int, default=20,
help='number of user groups')
group.add_argument('-C', '--num-clusters-per-group', type=int, default=1,
help='number of clusters in a group')
group.add_argument('-M', '--num-users-per-group', type=int, default=5,
help='number of users in a group')
group.add_argument('-T', '--max-iters', type=int, default=100,
help='maximum number of elicitation iterations')
group.add_argument('-s', '--seed', type=int, default=0,
help='RNG seed')
group.add_argument('-v', '--verbose', action='store_true',
help='enable debug spew')
group = parser.add_argument_group('Algorithm')
group.add_argument('-K', '--set-size', type=int, default=2,
help='set size')
group.add_argument('-P', '--pick', type=str, default='maxvar',
help='critertion used for picking users')
group.add_argument('-F', '--transform', type=str, default='indep',
help='user-user transformation to use')
group.add_argument('-t', '--tau', type=float, default=0.25,
help='kernel inverse temperature parameter')
group.add_argument('-L', '--lmbda', type=float, default=0.5,
help='transform importance')
group.add_argument('-X', '--enable-cv', action='store_true',
help='enable hyperparameter cross-validation')
group = parser.add_argument_group('User Simulation')
group.add_argument('--min-regret', type=float, default=0,
help='minimum regret for satisfaction')
group.add_argument('-G', '--groups', type=str, default=None,
help='path to pickle with user weights')
group.add_argument('-u', '--distrib', type=str, default='normal',
help='distribution of user weights')
group.add_argument('-d', '--density', type=float, default=1,
help='proportion of non-zero user weights')
group.add_argument('-R', '--response-model', type=str, default='pl',
help='user response model for choice queries')
group.add_argument('-n', '--noise', type=float, default=1,
help='amount of user response noise')
args = parser.parse_args()
handlers = []
if args.verbose:
handlers.append(logging.StreamHandler(sys.stdout))
logging.basicConfig(level=logging.DEBUG, handlers=handlers,
format='%(levelname)-6s %(name)-6s %(funcName)-12s: %(message)s')
run(vars(args))
if __name__ == '__main__':
main()
| StarcoderdataPython |
12815134 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2022-04-08 00:20 bucktoothsir <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""
"""
import capstone as ct
import keystone as kt
ARCH_DIC = {'x86':[ct.CS_ARCH_X86, kt.KS_ARCH_X86],
'arm':[ct.CS_ARCH_ARM, kt.KS_ARCH_ARM],
'arm64':[ct.CS_ARCH_ARM64, kt.KS_ARCH_ARM64],
'mips':[ct.CS_ARCH_MIPS, kt.KS_ARCH_MIPS]
}
MOD_DIC = {'16':[ct.CS_MODE_16, kt.KS_MODE_16],
'32':[ct.CS_MODE_32, kt.KS_MODE_32],
'64':[ct.CS_MODE_64, kt.KS_MODE_64],
'arm':[ct.CS_MODE_ARM, kt.KS_MODE_ARM],
'bigendian':[ct.CS_MODE_BIG_ENDIAN, kt.KS_MODE_BIG_ENDIAN],
'littleendian':[ct.CS_MODE_LITTLE_ENDIAN, kt.KS_MODE_LITTLE_ENDIAN]
}
def get_ct_arch(arch_str):
arch = ARCH_DIC.get(arch_str, None)
if arch:
return arch[0]
else:
return None
def get_ct_mod(mod_str):
mod = MOD_DIC.get(mod_str, None)
if mod:
return mod[0]
else:
return None
def get_kt_arch(arch_str):
arch = ARCH_DIC.get(arch_str, None)
if arch:
return arch[1]
else:
return None
def get_kt_mod(mod_str):
mod = MOD_DIC.get(mod_str, None)
if mod:
return mod[1]
else:
return None
def page(str, keywords=[], lines=25):
for k in keywords:
str = str.replace(k, highlight(k))
text = str.split('\n')
length = len(text)
for linenum in range(length):
print(text[linenum])
if linenum % lines == 0 and linenum >= lines:
key = input('--More-- (%d/%d)' % (linenum-1, length))
if key == 'q':
break
# linux ansicolor highlighting
def highlight(word, color='green'):
output = ""
suffix = "\033[0m"
if color == "green":
prefix = "\033[1;32m"
output = prefix + word + suffix
return output | StarcoderdataPython |
4861245 | <gh_stars>1-10
import sys
from tqdm import tqdm
#Sample output run
#python meta_file_parser.py test_for_meta_parsing.txt meta_data/meta_file_monthly_ids_range.tsv
#Pass the country/lang file that you want to extract
opened_file = open(sys.argv[1])
#Pass the meta-file as provided in the repo
meta_file_opened = open(sys.argv[2])
meta_file_dict = {}
output_file = open(str(sys.argv[1].split('.')[0]) + "_required_monthly_files.txt", "w+")
#This function is for inserting a value in a dictionary
def insert_in_dict(dictionary, key, value):
#check key
# key
# value
if key in dictionary:
dictionary[key] = value
else:
if key != None:
dictionary[key] = value
return dictionary
#start from the second line
next(meta_file_opened)
#Loading the meta-file in a dictionary
for meta_line in meta_file_opened:
meta_array = meta_line.strip().split('\t')
#print(meta_array)
insert_in_dict(meta_file_dict, meta_array[0], [int(meta_array[1]), int(meta_array[2])])
meta_file_opened.close()
print("\n")
print("File 'required_monthly_files.txt' is being generated, please wait...")
print("\n")
#Which files need to be used
files_to_be_downloaded = set()
num_lines = sum(1 for line in open(sys.argv[1],'r'))
# num_lines = sum(1 for line in opened_file)
#To check which files are needed to be downloaded, run this part.
# with Bar('Processing...') as bar:
# for i in tqdm(range(100)):
for line in tqdm(opened_file, total=num_lines):
tweet_id = int(line.strip())
for key in meta_file_dict:
start_id = meta_file_dict[key][0]
end_id = meta_file_dict[key][1]
if(tweet_id >= start_id and tweet_id <= end_id):
#files_to_be_downloaded.add(key.split('_')[0].capitalize() + " "+ key.split('_')[1])
files_to_be_downloaded.add(key)
# bar.next()
#list of all files
list_files = []
#output file names and then take input from reader
for value in files_to_be_downloaded:
list_files.append(value)
list_files.sort()
for i in range(0, len(list_files)):
#output in a text file
output_file.write(list_files[i] + '\n')
opened_file.close()
output_file.close()
| StarcoderdataPython |
1820520 | from .common import AlterResource, CreateResource, DeleteResource, GetResource, ListResources
from .. import config, serializers
class StorageProviderMixin(object):
SERIALIZER_CLS = serializers.StorageProviderSchema
@staticmethod
def _get_api_url(**kwargs):
return config.config.CONFIG_HOST
@staticmethod
def get_request_url(**kwargs):
url = "/storageProviders"
if kwargs.get("id"):
url += "/{}".format(kwargs["id"])
return url
class ListStorageProviders(StorageProviderMixin, ListResources):
def _get_request_params(self, kwargs):
limit = kwargs.get("limit") or 20
offset = kwargs.get("offset") or 0
return {
"filter[limit]": limit,
"filter[skip]": offset,
"filter[order][]": "name ASC",
}
class CreateStorageProvider(StorageProviderMixin, CreateResource):
HANDLE_FIELD = "id"
class GetStorageProvider(StorageProviderMixin, GetResource):
pass
class UpdateStorageProvider(StorageProviderMixin, AlterResource):
def _get_request_json(self, kwargs):
data = dict(kwargs)
if "id" in data:
del data["id"]
return data
class DeleteStorageProvider(StorageProviderMixin, DeleteResource):
pass
| StarcoderdataPython |
4827491 | from typing import List
cube = lambda x: x**3
def fibonacci(n: int) -> List[int]:
if n == 0:
return []
if n == 1:
return [0]
nums = [0, 1]
for i in range(2, n):
nums.append(nums[i-1] + nums[i-2])
return nums
if __name__ == '__main__':
n = int(input())
print(list(map(cube, fibonacci(n)))) | StarcoderdataPython |
9784084 | <reponame>AndreasKaratzas/stonne
from tools.codegen.model import *
from tools.codegen.api.types import TensorOptionsArguments, LegacyDispatcherArgument, ThisArgument
import tools.codegen.api.cpp as cpp
from typing import Union, Sequence
# This file describes the translation of JIT schema to the legacy
# dispatcher API. This looks a lot like the C++ API (which
# makes historical sense, because historically the dispatcher API
# and the C++ API exactly matched), but over time we have
# evolved the C++ API without actually changing our native::
# kernels. To be deleted eventually. Dispatcher calls use
# this when you are not use_c10_dispatcher: full.
def name(func: FunctionSchema) -> str:
name = str(func.name.name)
# TODO: delete this!
if func.is_out_fn():
name += '_out'
if func.name.overload_name:
name += f'_{func.name.overload_name}'
return name
def argumenttype_type(t: Type, *, mutable: bool) -> str:
if str(t) == 'Tensor?':
if mutable:
return 'Tensor &'
else:
return 'const Tensor &'
elif str(t) == 'Tensor?[]':
return 'TensorList'
return cpp.argumenttype_type(t, mutable=mutable)
def returns_type(rs: Sequence[Return]) -> str:
return cpp.returns_type(rs)
def argument_type(a: Argument) -> str:
return argumenttype_type(a.type, mutable=a.is_write)
def argument(a: Union[Argument, ThisArgument, TensorOptionsArguments]) -> LegacyDispatcherArgument:
if isinstance(a, Argument):
return LegacyDispatcherArgument(
type=argument_type(a),
name=a.name,
default=cpp.default_expr(a.default, a.type) if a.default is not None else None,
argument=a,
)
elif isinstance(a, ThisArgument):
# Erase ThisArgument from the distinction
return LegacyDispatcherArgument(
type=argument_type(a.argument),
name=a.argument.name,
default=None,
argument=a.argument,
)
elif isinstance(a, TensorOptionsArguments):
# TODO: expunge this logic entirely
default = None
if all(x.default == "None" for x in a.all()):
default = '{}'
elif a.dtype.default == "long":
default = 'at::kLong' # TODO: this is wrong
return LegacyDispatcherArgument(
type='const TensorOptions &',
name='options',
default=default,
argument=a,
)
else:
assert_never(a)
def arguments(func: FunctionSchema) -> Sequence[LegacyDispatcherArgument]:
return list(map(argument, cpp.group_arguments(func)))
| StarcoderdataPython |
6697182 | <filename>scripts/slave/build_scan_db.py<gh_stars>0
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contains configuration and setup of a build scan database.
The database is a versioned JSON file built of NamedTuples.
"""
import collections
import json
import logging
import optparse
import os
import sys
from common import chromium_utils
from slave import gatekeeper_ng_config
DATA_DIR = os.path.dirname(os.path.abspath(__file__))
# Bump each time there is an incompatible change in build_db.
BUILD_DB_VERSION = 5
_BuildDB = collections.namedtuple('BuildDB', [
'build_db_version', # An int representing the build_db version.
'masters', # {mastername: {buildername: {buildnumber: BuildDBBuild}}}}
'sections', # {section_hash: human_readable_json_of_gatekeeper_section}
'aux', # Dictionary to keep auxiliary information such as triggered
# revisions.
])
_BuildDBBuild = collections.namedtuple('BuildDBBuild', [
'finished', # True if the build has finished, False otherwise.
'succeeded', # True if finished and would have not closed the tree.
'triggered', # {section: [steps which triggered the section]}
])
class JsonNode(object):
"""Allows for serialization of NamedTuples to JSON."""
def _asdict(self): # pylint: disable=R0201
return {}
# TODO(stip): recursively encode child nodes.
def asJson(self):
nodes_to_encode = [(k, v) for k, v in self._asdict().iteritems()
if hasattr(v, 'asJson')]
standard_nodes = [(k, v) for k, v in self._asdict().iteritems()
if not hasattr(v, 'asJson')]
newly_encoded_nodes = [(k, v.asJson()) for k, v in nodes_to_encode]
return dict(standard_nodes + newly_encoded_nodes)
class BuildDB(_BuildDB, JsonNode):
pass
class BuildDBBuild(_BuildDBBuild, JsonNode):
pass
class BadConf(Exception):
pass
def gen_db(**kwargs):
"""Helper function to generate a default database."""
defaults = [
('build_db_version', BUILD_DB_VERSION),
('masters', {}),
('sections', {}),
('aux', {}),
]
for key, default in defaults:
kwargs.setdefault(key, default)
return BuildDB(**kwargs)
def gen_build(**kwargs):
"""Helper function to generate a default build."""
defaults = [('finished', False),
('succeeded', False),
('triggered', {})]
for key, default in defaults:
kwargs.setdefault(key, default)
return BuildDBBuild(**kwargs)
def load_from_json(f):
"""Load a build from a JSON stream."""
json_build_db = json.load(f)
if json_build_db.get('build_db_version') != BUILD_DB_VERSION:
raise BadConf('file is an older db version: %r (expecting %d)' % (
json_build_db.get('build_db_version'), BUILD_DB_VERSION))
masters = json_build_db.get('masters', {})
# Convert build dicts into BuildDBBuilds.
build_db = gen_db()
for mastername, master in masters.iteritems():
build_db.masters.setdefault(mastername, {})
for buildername, builder in master.iteritems():
build_db.masters[mastername].setdefault(buildername, {})
for buildnumber, build in builder.iteritems():
# Note that buildnumber is forced to be an int here, and
# we use * instead of ** -- until the serializer is recursive,
# BuildDBBuild will be written as a value list (tuple).
build_db.masters[mastername][buildername][
int(buildnumber)] = BuildDBBuild(*build)
if 'aux' in json_build_db:
build_db.aux.update(json_build_db['aux'])
return build_db
def get_build_db(filename):
"""Open the build_db file.
filename: the filename of the build db.
"""
build_db = gen_db()
if os.path.isfile(filename):
print 'loading build_db from', filename
try:
with open(filename) as f:
build_db = load_from_json(f)
except BadConf as e:
new_fn = '%s.old' % filename
logging.warn('error loading %s: %s, moving to %s' % (
filename, e, new_fn))
chromium_utils.MoveFile(filename, new_fn)
return build_db
def convert_db_to_json(build_db_data, gatekeeper_config, f):
"""Converts build_db to a format suitable for JSON encoding and writes it."""
# Remove all but the last finished build.
for builders in build_db_data.masters.values():
for builder in builders:
unfinished = [(k, v) for k, v in builders[builder].iteritems()
if not v.finished]
finished = [(k, v) for k, v in builders[builder].iteritems()
if v.finished]
builders[builder] = dict(unfinished)
if finished:
max_finished = max(finished, key=lambda x: x[0])
builders[builder][max_finished[0]] = max_finished[1]
build_db = gen_db(masters=build_db_data.masters, aux=build_db_data.aux)
# Output the gatekeeper sections we're operating with, so a human reading the
# file can debug issues. This is discarded by the parser in get_build_db.
used_sections = set([])
for masters in build_db_data.masters.values():
for builder in masters.values():
used_sections |= set(t for b in builder.values() for t in b.triggered)
for master in gatekeeper_config.values():
for section in master:
section_hash = gatekeeper_ng_config.gatekeeper_section_hash(section)
if section_hash in used_sections:
build_db.sections[section_hash] = section
json.dump(build_db.asJson(), f, cls=gatekeeper_ng_config.SetEncoder,
sort_keys=True)
def save_build_db(build_db_data, gatekeeper_config, filename):
"""Save the build_db file.
build_db: dictionary to jsonize and store as build_db.
gatekeeper_config: the gatekeeper config used for this pass.
filename: the filename of the build db.
"""
print 'saving build_db to', filename
with open(filename, 'wb') as f:
convert_db_to_json(build_db_data, gatekeeper_config, f)
def main():
prog_desc = 'Parses the build_db and outputs to stdout.'
usage = '%prog [options]'
parser = optparse.OptionParser(usage=(usage + '\n\n' + prog_desc))
parser.add_option('--json', default=os.path.join(DATA_DIR, 'gatekeeper.json'),
help='location of gatekeeper configuration file')
parser.add_option('--build-db', default='build_db.json',
help='records the build status information for builders')
options, _ = parser.parse_args()
build_db = get_build_db(options.build_db)
gatekeeper_config = gatekeeper_ng_config.load_gatekeeper_config(options.json)
convert_db_to_json(build_db, gatekeeper_config, sys.stdout)
print
return 0
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
6636607 | <reponame>AbinavRavi/PipelineDP
import abc
import typing
import pickle
from dataclasses import dataclass
from functools import reduce
import pipeline_dp
@dataclass
class AccumulatorParams:
accumulator_type: type
constructor_params: typing.Any
def merge(accumulators: typing.Iterable['Accumulator']) -> 'Accumulator':
"""Merges the accumulators."""
return reduce(lambda acc1, acc2: acc1.add_accumulator(acc2), accumulators)
def create_accumulator_params(
aggregation_params: pipeline_dp.AggregateParams,
budget_accountant: pipeline_dp.BudgetAccountant
) -> typing.List[AccumulatorParams]:
raise NotImplemented() # implementation will be done later
class Accumulator(abc.ABC):
"""Base class for all accumulators.
Accumulators are objects that encapsulate aggregations and computations of
differential private metrics.
"""
@abc.abstractmethod
def add_value(self, value):
"""Adds the value to each of the accumulator.
Args:
value: value to be added.
Returns: self.
"""
pass
@abc.abstractmethod
def add_accumulator(self, accumulator: 'Accumulator') -> 'Accumulator':
"""Merges the accumulator to self and returns self.
Sub-class implementation is responsible for checking that types of
self and accumulator are the same.
Args:
accumulator:
Returns: self
"""
pass
@abc.abstractmethod
def compute_metrics(self):
"""Computes and returns the result of aggregation."""
pass
def serialize(self):
return pickle.dumps(self)
@classmethod
def deserialize(cls, serialized_obj: str):
deserialized_obj = pickle.loads(serialized_obj)
if not isinstance(deserialized_obj, cls):
raise TypeError("The deserialized object is not of the right type.")
return deserialized_obj
class CompoundAccumulator(Accumulator):
"""Accumulator for computing multiple metrics.
CompoundAccumulator contains one or more accumulators of other types for
computing multiple metrics.
For example it can contain [CountAccumulator, SumAccumulator].
CompoundAccumulator delegates all operations to the internal accumulators.
"""
def __init__(self, accumulators: typing.Iterable['Accumulator']):
self.accumulators = accumulators
def add_value(self, value):
for accumulator in self.accumulators:
accumulator.add_value(value)
return self
def add_accumulator(self, accumulator: 'CompoundAccumulator') -> \
'CompoundAccumulator':
"""Merges the accumulators of the CompoundAccumulators.
The expectation is that the internal accumulators are of the same type and
are in the same order."""
if len(accumulator.accumulators) != len(self.accumulators):
raise ValueError(
"Accumulators in the input are not of the same size." +
f" Expected size = {len(self.accumulators)}" +
f" received size = {len(accumulator.accumulators)}.")
for pos, (base_accumulator_type, to_add_accumulator_type) in enumerate(
zip(self.accumulators, accumulator.accumulators)):
if type(base_accumulator_type) != type(to_add_accumulator_type):
raise TypeError(
"The type of the accumulators don't match at "
f"index {pos}. {type(base_accumulator_type).__name__} "
f"!= {type(to_add_accumulator_type).__name__}.")
for (base_accumulator,
to_add_accumulator) in zip(self.accumulators,
accumulator.accumulators):
base_accumulator.add_accumulator(to_add_accumulator)
return self
def compute_metrics(self):
"""Computes and returns a list of metrics computed by internal
accumulators."""
return [
accumulator.compute_metrics() for accumulator in self.accumulators
]
class AccumulatorFactory:
"""Factory for producing the appropriate Accumulator depending on the
AggregateParams and BudgetAccountant."""
def __init__(self, params: pipeline_dp.AggregateParams,
budget_accountant: pipeline_dp.BudgetAccountant):
self._params = params
self._budget_accountant = budget_accountant
def initialize(self):
self._accumulator_params = create_accumulator_params(
self._params, self._budget_accountant)
def create(self, values: typing.List) -> Accumulator:
accumulators = []
for accumulator_param in self._accumulator_params:
accumulators.append(
accumulator_param.accumulator_type(
accumulator_param.constructor_params, values))
# No need to create CompoundAccumulator if there is only 1 accumulator.
if len(accumulators) == 1:
return accumulators[0]
return CompoundAccumulator(accumulators)
@dataclass
class CountParams:
pass
class CountAccumulator(Accumulator):
def __init__(self, params: CountParams, values):
self._count = len(values)
def add_value(self, value):
self._count += 1
def add_accumulator(self,
accumulator: 'CountAccumulator') -> 'CountAccumulator':
self._count += accumulator._count
return self
def compute_metrics(self) -> float:
# TODO: add differential privacy
return self._count
@dataclass
class SumParams:
pass
class SumAccumulator(Accumulator):
def __init__(self, params: SumParams, values):
self._sum = sum(values)
def add_value(self, value):
self._sum += value
def add_accumulator(self,
accumulator: 'SumAccumulator') -> 'SumAccumulator':
self._sum += accumulator._sum
def compute_metrics(self) -> float:
# TODO: add differential privacy
return self._sum
| StarcoderdataPython |
249805 |
# <NAME>
# # idealista HTML Selenium Scrapping to MongoDB
import pandas as pd
from selenium import webdriver
from bs4 import BeautifulSoup
from pymongo import MongoClient
from pprint import pprint
import datetime
import time
from selenium.webdriver.firefox.options import Options
client = MongoClient('') # MongoDB instance URL
db = client['ub-ads'] # DB name
collection = db['idealistaAPI'] # Collection Name
#result = db.collection.create_index([('url', 1)],unique=True) #Create unique index
df = pd.read_csv('idealistaAPI-2018-Oct-30-1713.csv') #Read CSV with all URLs to scrape
for url in df.url:
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36'
options = Options()
options.headless = False
options.add_argument('user-agent={"user_agent"}')
wd = webdriver.Firefox('/Users/marcoremane/Desktop/chromedriver', options=options)
wd.get(url);
html = wd.page_source
parser = BeautifulSoup(html, "html.parser")
all_features = []
try:
#Try to find our divs
parser_HTML = parser.findAll('div', attrs={'class':'details-property_features'})
except AttributeError as error:
#Can't find them? Feature list will be empty.
pass
#Found something? One div only?
if len(parser_HTML) == 1:
x = parser_HTML[0].findAll('li')
basic_features = [item.get_text() for item in x]
all_features = basic_features
#Found something? Two divs?
if len(parser_HTML) == 2:
x = parser_HTML[0].findAll('li')
y = parser_HTML[1].findAll('li')
basic_features = [item.get_text() for item in x]
extras = [item.get_text() for item in y]
all_features = basic_features + extras
#try:
#Try to create our mongo db doc
# doc = {
# "url" : url,
# "feautures" : all_features,
# "scrape_date" : datetime.datetime.utcnow()
# }
# document_id = db.collection.insert_one(doc).inserted_id
# print(" #{0} - URL scrapped: {1} with features: {2}".format(document_id, url, all_features))
#except pymongo.errors.DuplicateKeyError as error:
#Document already exists? It's ok. Go to next document.
#print(url, " record already exists on DB.")
#pass
doc = {
"url" : url,
"feautures" : all_features,
"scrape_date" : datetime.datetime.utcnow()
}
document_id = db.collection.insert_one(doc).inserted_id
print(" #{0} - URL scrapped: {1} with features: {2}".format(document_id, 'https://www.idealista.com/inmueble/82895714/', all_features))
time.sleep(60)
wd.quit()
| StarcoderdataPython |
1718849 | """
This inline script can be used to dump flows as HAR files.
example cmdline invocation:
mitmdump -s ./har_dump.py --set hardump=./dump.har
filename endwith '.zhar' will be compressed:
mitmdump -s ./har_dump.py --set hardump=./dump.zhar
"""
import json
import base64
import typing
import tempfile
import re
from datetime import datetime
from datetime import timezone
import falcon
from mitmproxy import ctx
from mitmproxy import connections
from mitmproxy import version
from mitmproxy.utils import strutils
from mitmproxy.net.http import cookies
from mitmproxy import http
class WhiteListResource:
def addon_path(self):
return "whitelist"
def __init__(self, white_list_addon):
self.white_list_addon = white_list_addon
def on_get(self, req, resp, method_name):
getattr(self, "on_" + method_name)(req, resp)
def on_whitelist_requests(self, req, resp):
raw_url_patterns = req.get_param('urlPatterns')
status_code = req.get_param('statusCode')
url_patterns = raw_url_patterns.strip("[]").split(",")
url_patterns_compiled = []
try:
for raw_pattern in url_patterns:
url_patterns_compiled.append(self.parse_regexp(raw_pattern))
except re.error:
raise falcon.HTTPBadRequest("Invalid regexp patterns")
self.white_list_addon.white_list = {
"status_code": status_code,
"url_patterns": url_patterns_compiled
}
def on_add_whitelist_pattern(self, req, resp):
url_pattern = req.get_param('urlPattern')
if not hasattr(self.white_list_addon.white_list, "status_code") \
or not hasattr(self.white_list_addon.white_list, "url_patterns"):
raise falcon.HTTPBadRequest("Whitelist is disabled. Cannot add patterns to a disabled whitelist.")
self.white_list_addon.white_list["url_patterns"].append(url_pattern)
def on_enable_empty_whitelist(self, req, resp):
status_code = req.get_param('statusCode')
self.white_list_addon.white_list["url_patterns"] = []
self.white_list_addon.white_list["status_code"] = status_code
def on_disable_whitelist(self, req, resp):
self.white_list_addon.white_list = {}
def parse_regexp(self, raw_regexp):
if not raw_regexp.startswith('^'):
raw_regexp = '^' + raw_regexp
if not raw_regexp.endswith('$'):
raw_regexp = raw_regexp + '$'
return re.compile(raw_regexp)
class WhiteListAddOn:
def __init__(self):
self.num = 0
self.white_list = {}
def get_resource(self):
return WhiteListResource(self)
def is_whitelist_enabled(self):
if 'status_code' in self.white_list and 'url_patterns' in self.white_list:
return True
return False
def request(self, flow):
if not self.is_whitelist_enabled():
return
is_whitelisted = False
for up in self.white_list['url_patterns']:
if up.match(flow.request.url):
is_whitelisted = True
break
if not is_whitelisted:
flow.response = http.HTTPResponse.make(
int(self.white_list['status_code']),
b"",
{"Content-Type": "text/html"}
)
flow.metadata['WhiteListFiltered'] = True
addons = [
WhiteListAddOn()
]
| StarcoderdataPython |
8033787 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from datetime import timedelta
from uuid import uuid4
from django.test import TestCase
from django.utils.timezone import now
from kolibri.core.auth.test.test_api import FacilityFactory
from kolibri.core.auth.test.test_api import FacilityUserFactory
from kolibri.core.exams.models import Exam
from kolibri.core.logger import models
from kolibri.core.logger.utils.exam_log_migration import migrate_from_exam_logs
class SimpleForwardMigrateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.facility = FacilityFactory.create()
coach = FacilityUserFactory.create(facility=cls.facility)
cls.exam = Exam.objects.create(
title="quiz", question_count=5, collection=cls.facility, creator=coach
)
for i in range(0, 3):
user = FacilityUserFactory.create(facility=cls.facility)
examlog = models.ExamLog.objects.create(user=user, exam=cls.exam)
for j in range(0, 4):
models.ExamAttemptLog.objects.create(
item=str(j),
user=user,
examlog=examlog,
start_timestamp=now(),
end_timestamp=now(),
correct=j % 2,
content_id=uuid4().hex,
answer={"question": {"radio 1": {"numCorrect": 1}}},
interaction_history=[{"history_a": 1}, {"history_b": 1}],
)
migrate_from_exam_logs(models.ExamLog.objects.all())
def test_masterylogs(self):
self.assertEqual(models.MasteryLog.objects.all().count(), 3)
self.assertEqual(models.MasteryLog.objects.filter(complete=False).count(), 3)
for log in models.MasteryLog.objects.all():
self.assertTrue(log.mastery_criterion["coach_assigned"])
def test_attemptlogs(self):
self.assertEqual(models.AttemptLog.objects.all().count(), 12)
attempt_log = models.AttemptLog.objects.first()
self.assertEqual(len(attempt_log.item.split(":")), 2)
for json_field in ("answer", "interaction_history"):
self.assertNotIsInstance(getattr(attempt_log, json_field), (str,))
def test_contentsessionlogs(self):
self.assertEqual(
models.ContentSessionLog.objects.all().count(),
3,
)
self.assertEqual(
models.ContentSessionLog.objects.filter(progress=0).count(),
3,
)
def test_contentsummarylogs(self):
self.assertEqual(
models.ContentSummaryLog.objects.all().count(),
3,
)
self.assertEqual(
models.ContentSummaryLog.objects.filter(progress=0).count(),
3,
)
class RepeatedForwardMigrateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.facility = FacilityFactory.create()
coach = FacilityUserFactory.create(facility=cls.facility)
cls.exam = Exam.objects.create(
title="quiz", question_count=5, collection=cls.facility, creator=coach
)
for i in range(0, 3):
user = FacilityUserFactory.create(facility=cls.facility)
examlog = models.ExamLog.objects.create(user=user, exam=cls.exam)
for j in range(0, 4):
models.ExamAttemptLog.objects.create(
item=str(j),
user=user,
examlog=examlog,
start_timestamp=now(),
end_timestamp=now(),
correct=j % 2,
content_id=uuid4().hex,
)
migrate_from_exam_logs(models.ExamLog.objects.all())
migrate_from_exam_logs(models.ExamLog.objects.all())
def test_masterylogs(self):
self.assertEqual(models.MasteryLog.objects.all().count(), 3)
self.assertEqual(models.MasteryLog.objects.filter(complete=False).count(), 3)
def test_attemptlogs(self):
self.assertEqual(models.AttemptLog.objects.all().count(), 12)
def test_contentsessionlogs(self):
self.assertEqual(
models.ContentSessionLog.objects.all().count(),
3,
)
self.assertEqual(
models.ContentSessionLog.objects.filter(progress=0).count(),
3,
)
def test_contentsummarylogs(self):
self.assertEqual(
models.ContentSummaryLog.objects.all().count(),
3,
)
self.assertEqual(
models.ContentSummaryLog.objects.filter(progress=0).count(),
3,
)
class UpdatedForwardMigrateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.facility = FacilityFactory.create()
coach = FacilityUserFactory.create(facility=cls.facility)
cls.exam = Exam.objects.create(
title="quiz", question_count=5, collection=cls.facility, creator=coach
)
for i in range(0, 3):
user = FacilityUserFactory.create(facility=cls.facility)
examlog = models.ExamLog.objects.create(user=user, exam=cls.exam)
for j in range(0, 4):
models.ExamAttemptLog.objects.create(
item=str(j),
user=user,
examlog=examlog,
start_timestamp=now(),
end_timestamp=now(),
correct=j % 2,
content_id=uuid4().hex,
)
migrate_from_exam_logs(models.ExamLog.objects.all())
models.ExamLog.objects.all().update(closed=True)
for examlog in models.ExamLog.objects.all():
oldattempt = examlog.attemptlogs.first()
oldattempt.end_timestamp = now() + timedelta(hours=1)
oldattempt.answer = {"something": "something"}
oldattempt.simple_answer = "test_filter"
oldattempt.correct = True
oldattempt.save()
olderattempt = examlog.attemptlogs.last()
olderattempt.end_timestamp = now() - timedelta(hours=1)
olderattempt.answer = {"nothing": "nothing"}
olderattempt.simple_answer = "test_none_filter"
olderattempt.correct = 0
olderattempt.save()
models.ExamAttemptLog.objects.create(
item=str(j),
user=examlog.user,
examlog=examlog,
start_timestamp=now(),
end_timestamp=now(),
correct=0,
content_id=uuid4().hex,
)
migrate_from_exam_logs(models.ExamLog.objects.all())
def test_masterylogs(self):
self.assertEqual(models.MasteryLog.objects.all().count(), 3)
self.assertEqual(models.MasteryLog.objects.filter(complete=True).count(), 3)
def test_attemptlogs(self):
self.assertEqual(models.AttemptLog.objects.all().count(), 15)
modified_attempts = models.AttemptLog.objects.filter(
simple_answer="test_filter"
)
self.assertEqual(modified_attempts.count(), 3)
for attempt in modified_attempts:
self.assertEqual(attempt.answer, {"something": "something"})
self.assertEqual(attempt.correct, True)
unmodified_attempts = models.AttemptLog.objects.filter(
simple_answer="test_none_filter"
)
self.assertEqual(unmodified_attempts.count(), 0)
def test_contentsessionlogs(self):
self.assertEqual(
models.ContentSessionLog.objects.all().count(),
3,
)
self.assertEqual(
models.ContentSessionLog.objects.filter(progress=1).count(),
3,
)
def test_contentsummarylogs(self):
self.assertEqual(
models.ContentSummaryLog.objects.all().count(),
3,
)
self.assertEqual(
models.ContentSummaryLog.objects.filter(progress=1).count(),
3,
)
class UpdatedExamAttemptLogOnlyForwardMigrateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.facility = FacilityFactory.create()
coach = FacilityUserFactory.create(facility=cls.facility)
cls.exam = Exam.objects.create(
title="quiz", question_count=5, collection=cls.facility, creator=coach
)
for i in range(0, 3):
user = FacilityUserFactory.create(facility=cls.facility)
examlog = models.ExamLog.objects.create(user=user, exam=cls.exam)
for j in range(0, 4):
models.ExamAttemptLog.objects.create(
item=str(j),
user=user,
examlog=examlog,
start_timestamp=now(),
end_timestamp=now(),
correct=j % 2,
content_id=uuid4().hex,
)
migrate_from_exam_logs(models.ExamLog.objects.all())
updated_ids = []
for examlog in models.ExamLog.objects.all():
oldattempt = examlog.attemptlogs.first()
oldattempt.end_timestamp = now() + timedelta(hours=1)
oldattempt.answer = {"something": "something"}
oldattempt.simple_answer = "test_filter"
oldattempt.correct = True
oldattempt.save()
updated_ids.append(oldattempt.id)
olderattempt = examlog.attemptlogs.last()
olderattempt.end_timestamp = now() - timedelta(hours=1)
olderattempt.answer = {"nothing": "nothing"}
olderattempt.simple_answer = "test_none_filter"
olderattempt.correct = 0
olderattempt.save()
updated_ids.append(olderattempt.id)
newattempt = models.ExamAttemptLog.objects.create(
item=str(j),
user=examlog.user,
examlog=examlog,
start_timestamp=now(),
end_timestamp=now(),
correct=0,
content_id=uuid4().hex,
)
updated_ids.append(newattempt.id)
migrate_from_exam_logs(
models.ExamLog.objects.none(), source_attempt_log_ids=updated_ids
)
def test_masterylogs(self):
self.assertEqual(models.MasteryLog.objects.all().count(), 3)
def test_attemptlogs(self):
self.assertEqual(models.AttemptLog.objects.all().count(), 15)
modified_attempts = models.AttemptLog.objects.filter(
simple_answer="test_filter"
)
self.assertEqual(modified_attempts.count(), 3)
for attempt in modified_attempts:
self.assertEqual(attempt.answer, {"something": "something"})
self.assertEqual(attempt.correct, True)
unmodified_attempts = models.AttemptLog.objects.filter(
simple_answer="test_none_filter"
)
self.assertEqual(unmodified_attempts.count(), 0)
def test_contentsessionlogs(self):
self.assertEqual(
models.ContentSessionLog.objects.all().count(),
3,
)
def test_contentsummarylogs(self):
self.assertEqual(
models.ContentSummaryLog.objects.all().count(),
3,
)
| StarcoderdataPython |
11390514 | <filename>azure-mgmt-datafactory/azure/mgmt/datafactory/operations/__init__.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .operations import Operations
from .factories_operations import FactoriesOperations
from .integration_runtimes_operations import IntegrationRuntimesOperations
from .integration_runtime_nodes_operations import IntegrationRuntimeNodesOperations
from .linked_services_operations import LinkedServicesOperations
from .datasets_operations import DatasetsOperations
from .pipelines_operations import PipelinesOperations
from .pipeline_runs_operations import PipelineRunsOperations
from .activity_runs_operations import ActivityRunsOperations
from .triggers_operations import TriggersOperations
__all__ = [
'Operations',
'FactoriesOperations',
'IntegrationRuntimesOperations',
'IntegrationRuntimeNodesOperations',
'LinkedServicesOperations',
'DatasetsOperations',
'PipelinesOperations',
'PipelineRunsOperations',
'ActivityRunsOperations',
'TriggersOperations',
]
| StarcoderdataPython |
8009678 | from collections import deque
import sys
import numpy as np
import pylab as pl
def findpeaks_naive(f, W=100):
"""
Iterator to find the peaks in a set of data points.
The algorithm is dumb. It checks to see if each element is greater
than the W values to the left and W values to the right.
Args:
f: the data
W: (optional) The half-width of the window. A
peak must be greater than W elements to its left and W
elements to its right.
Returns:
the peaks of the data, one at a time (streaming)
Raises:
StopIteration.
"""
N = len(f)
for i in range(W, N-W):
foundPeak = True
current = f[i]
for j in range(i-W, i+W+1):
if current < f[j]:
foundPeak = False
break
if foundPeak:
yield i
def findpeaks(f, W=100):
"""
Iterator to find the peaks in a set of data points.
The algorithm uses a double ended queue. The front of the queue
contains the index of a running maximum. When this index is
differs from the index at the back of the queue (the current
position) by half a window width a peak is located. The double
ended queue method of computing a running maximum is described by
Leet: http://articles.leetcode.com/sliding-window-maximum/
Args:
f: the data
W: (optional) The half-width of the window. A
peak must be greater than W elements to its left and W
elements to its right.
Returns:
the peaks of the data, one at a time (streaming)
Raises:
StopIteration.
"""
deq = deque()
N = len(f)
WW = 2*W+1
if N < WW:
raise StopIteration
WW = 2*W+1
for i in range(WW-1):
while len(deq) > 0 and f[i] > f[deq[-1]]:
deq.pop() # bump smaller values off the back
deq.append(i)
for i in range(WW-1, N):
while len(deq) > 0 and f[i] > f[deq[-1]]:
deq.pop() # bump smaller values off the back
while len(deq) > 0 and deq[0] <= i-WW:
deq.popleft() # remove the front because the window is past
deq.append(i)
if deq[0] == i - W:
yield deq[0] # it's a peak if it's in the middle of the window
if __name__ == '__main__':
usage = """
Speed test:
naive find peaks
(sigproc) Joe ~/work $ time python {} 1
real 0m9.287s
user 0m9.224s
sys 0m0.042s
better find peaks
(sigproc) Joe ~/work $ time python {} 2
real 0m0.507s
user 0m0.456s
sys 0m0.037s
""".format(sys.argv[0], sys.argv[0])
showPlots = False # make true to see data and peaks
N = 200000
W = 1000
t = np.arange(N)
T = 2000.0
f = np.sin(2*np.pi*t/T) + 3*np.sin(3*np.pi*t/T)
if len(sys.argv) > 1:
choice = int(sys.argv[1])
else:
print(usage)
sys.exit(0)
if choice == 1: # naive
peaks = np.array(list(findpeaks_naive(f, W)))
elif choice == 2: # fast
peaks = np.array(list(findpeaks(f, W)))
else:
printf('Not a valid choice ', choice)
choice = None
if showPlots:
pl.plot(t, f)
pl.plot(peaks, f[peaks], 'y*')
pl.show()
print(peaks)
| StarcoderdataPython |
4894427 | <reponame>sonntagsgesicht/auxilium
# -*- coding: utf-8 -*-
# auxilium
# --------
# Python project for an automated test and deploy toolkit.
#
# Author: sonntagsgesicht
# Version: 0.2.4, copyright Wednesday, 20 October 2021
# Website: https://github.com/sonntagsgesicht/auxilium
# License: Apache License 2.0 (see LICENSE file)
from logging import log, INFO, DEBUG
from os import getcwd
from auxilium.tools.const import ICONS
from auxilium.tools.system_tools import module
LEVEL = DEBUG
def deploy(usr, pwd, path=getcwd(), venv=None):
"""release on `pypi.org`"""
log(INFO, ICONS["deploy"] + 'deploy release on `pypi.org`')
# check dist
module('twine', 'check --strict dist/*', path=path, venv=venv)
# push to pypi.org
return module("twine", "upload -u %s -p %s dist/*" % (usr, pwd),
level=LEVEL, path=path, venv=venv)
| StarcoderdataPython |
5124108 | <reponame>BrancoLab/LocomotionControl<gh_stars>0
from math import sin, cos, atan2, sqrt, pi, hypot, acos
import numpy as np
from scipy.spatial.transform import Rotation as Rot
import sys
sys.path.append("./")
from control.paths.utils import mod2pi, pi_2_pi
from control.paths.waypoints import Waypoints, Waypoint
from geometry import Path
"""
Code adapted from: https://github.com/zhm-real/CurvesGenerator
zhm-real shared code to create different types of paths under an MIT license.
The logic of the code is left un-affected here, I've just refactored it.
"""
# utility
def interpolate(ind, l, m, maxc, ox, oy, oyaw, px, py, pyaw, directions):
if m == "S":
px[ind] = ox + l / maxc * cos(oyaw)
py[ind] = oy + l / maxc * sin(oyaw)
pyaw[ind] = oyaw
else:
ldx = sin(l) / maxc
if m == "L":
ldy = (1.0 - cos(l)) / maxc
elif m == "R":
ldy = (1.0 - cos(l)) / (-maxc)
gdx = cos(-oyaw) * ldx + sin(-oyaw) * ldy
gdy = -sin(-oyaw) * ldx + cos(-oyaw) * ldy
px[ind] = ox + gdx
py[ind] = oy + gdy
if m == "L":
pyaw[ind] = oyaw + l
elif m == "R":
pyaw[ind] = oyaw - l
if l > 0.0:
directions[ind] = 1
else:
directions[ind] = -1
return px, py, pyaw, directions
# ---------------------------------------------------------------------------- #
# PLANNERS #
# ---------------------------------------------------------------------------- #
class Planner:
tag = ["N", "a", "N"]
def __call__(self, alpha, beta, dist):
t, p, q = self.fit(alpha, beta, dist)
return t, p, q, self.tag
@staticmethod
def _calc_sines(alpha, beta):
return (
sin(alpha),
sin(beta),
cos(alpha),
cos(beta),
cos(alpha - beta),
)
def fit(self, alpha, beta, dist):
raise NotImplementedError
class LSL(Planner):
tag = ["L", "S", "L"]
def __init__(self):
super().__init__()
def fit(self, alpha, beta, dist):
sin_a, sin_b, cos_a, cos_b, cos_a_b = self._calc_sines(alpha, beta)
p_lsl = 2 + dist ** 2 - 2 * cos_a_b + 2 * dist * (sin_a - sin_b)
if p_lsl < 0:
return (
None,
None,
None,
)
else:
p_lsl = sqrt(p_lsl)
denominate = dist + sin_a - sin_b
t_lsl = mod2pi(-alpha + atan2(cos_b - cos_a, denominate))
q_lsl = mod2pi(beta - atan2(cos_b - cos_a, denominate))
return (
t_lsl,
p_lsl,
q_lsl,
)
class RSR(Planner):
tag = ["R", "S", "R"]
def __init__(self):
super().__init__()
def fit(self, alpha, beta, dist):
sin_a, sin_b, cos_a, cos_b, cos_a_b = self._calc_sines(alpha, beta)
p_rsr = 2 + dist ** 2 - 2 * cos_a_b + 2 * dist * (sin_b - sin_a)
if p_rsr < 0:
return None, None, None
else:
p_rsr = sqrt(p_rsr)
denominate = dist - sin_a + sin_b
t_rsr = mod2pi(alpha - atan2(cos_a - cos_b, denominate))
q_rsr = mod2pi(-beta + atan2(cos_a - cos_b, denominate))
return t_rsr, p_rsr, q_rsr
class LSR(Planner):
tag = ["L", "S", "R"]
def __init__(self):
super().__init__()
def fit(self, alpha, beta, dist):
sin_a, sin_b, cos_a, cos_b, cos_a_b = self._calc_sines(alpha, beta)
p_lsr = -2 + dist ** 2 + 2 * cos_a_b + 2 * dist * (sin_a + sin_b)
if p_lsr < 0:
return None, None, None
else:
p_lsr = sqrt(p_lsr)
rec = atan2(-cos_a - cos_b, dist + sin_a + sin_b) - atan2(-2.0, p_lsr)
t_lsr = mod2pi(-alpha + rec)
q_lsr = mod2pi(-mod2pi(beta) + rec)
return t_lsr, p_lsr, q_lsr
class RSL(Planner):
tag = ["R", "S", "L"]
def __init__(self):
super().__init__()
def fit(self, alpha, beta, dist):
sin_a, sin_b, cos_a, cos_b, cos_a_b = self._calc_sines(alpha, beta)
p_rsl = -2 + dist ** 2 + 2 * cos_a_b - 2 * dist * (sin_a + sin_b)
if p_rsl < 0:
return None, None, None
else:
p_rsl = sqrt(p_rsl)
rec = atan2(cos_a + cos_b, dist - sin_a - sin_b) - atan2(2.0, p_rsl)
t_rsl = mod2pi(alpha - rec)
q_rsl = mod2pi(beta - rec)
return t_rsl, p_rsl, q_rsl
class RLR(Planner):
tag = ["R", "L", "R"]
def __init__(self):
super().__init__()
def fit(self, alpha, beta, dist):
sin_a, sin_b, cos_a, cos_b, cos_a_b = self._calc_sines(alpha, beta)
rec = (
6.0 - dist ** 2 + 2.0 * cos_a_b + 2.0 * dist * (sin_a - sin_b)
) / 8.0
if abs(rec) > 1.0:
return None, None, None
p_rlr = mod2pi(2 * pi - acos(rec))
t_rlr = mod2pi(
alpha
- atan2(cos_a - cos_b, dist - sin_a + sin_b)
+ mod2pi(p_rlr / 2.0)
)
q_rlr = mod2pi(alpha - beta - t_rlr + mod2pi(p_rlr))
return t_rlr, p_rlr, q_rlr
class LRL(Planner):
tag = ["L", "R", "L"]
def __init__(self):
super().__init__()
def fit(self, alpha, beta, dist):
sin_a, sin_b, cos_a, cos_b, cos_a_b = self._calc_sines(alpha, beta)
rec = (
6.0 - dist ** 2 + 2.0 * cos_a_b + 2.0 * dist * (sin_b - sin_a)
) / 8.0
if abs(rec) > 1.0:
return None, None, None
p_lrl = mod2pi(2 * pi - acos(rec))
t_lrl = mod2pi(
-alpha - atan2(cos_a - cos_b, dist + sin_a - sin_b) + p_lrl / 2.0
)
q_lrl = mod2pi(mod2pi(beta) - alpha - t_lrl + mod2pi(p_lrl))
return t_lrl, p_lrl, q_lrl
# ---------------------------------------------------------------------------- #
# DUBIN #
# ---------------------------------------------------------------------------- #
class DubinPath:
def __init__(self, waypoints: Waypoints, max_curvature: float = 0.2):
self.waypoints = waypoints
self.max_curvature = max_curvature
self.planners = [LSL(), RSR(), LSR(), RSL(), RLR(), LRL()]
# store path variables
self.x = []
self.y = []
self.theta = []
self.lengths = [] # length of each segment
self.mode = [] # type of each segment
def generate_local_course(self, L, lengths, mode, step_size: float = 0.1):
point_num = int(L / step_size) + len(lengths) + 3
px = [0.0 for _ in range(point_num)]
py = [0.0 for _ in range(point_num)]
pyaw = [0.0 for _ in range(point_num)]
directions = [0 for _ in range(point_num)]
ind = 1
if lengths[0] > 0.0:
directions[0] = 1
else:
directions[0] = -1
if lengths[0] > 0.0:
d = step_size
else:
d = -step_size
ll = 0.0
for m, l, i in zip(mode, lengths, range(len(mode))):
if l > 0.0:
d = step_size
else:
d = -step_size
ox, oy, oyaw = px[ind], py[ind], pyaw[ind]
ind -= 1
if i >= 1 and (lengths[i - 1] * lengths[i]) > 0:
pd = -d - ll
else:
pd = d - ll
while abs(pd) <= abs(l):
ind += 1
px, py, pyaw, directions = interpolate(
ind,
pd,
m,
self.max_curvature,
ox,
oy,
oyaw,
px,
py,
pyaw,
directions,
)
pd += d
ll = l - pd - d # calc remain length
ind += 1
px, py, pyaw, directions = interpolate(
ind,
l,
m,
self.max_curvature,
ox,
oy,
oyaw,
px,
py,
pyaw,
directions,
)
if len(px) <= 1:
return [], [], [], []
# remove unused data
while len(px) >= 1 and px[-1] == 0.0:
px.pop()
py.pop()
pyaw.pop()
directions.pop()
return px, py, pyaw, directions
def planning_from_origin(self, gx, gy, gtheta):
D = hypot(gx, gy)
d = D * self.max_curvature
theta = mod2pi(atan2(gy, gx))
alpha = mod2pi(-theta)
beta = mod2pi(gtheta - theta)
best_cost = float("inf")
bt, bp, bq, best_mode = None, None, None, None
for planner in self.planners:
t, p, q, mode = planner(alpha, beta, d)
if t is None:
continue
cost = abs(t) + abs(p) + abs(q)
if best_cost > cost:
bt, bp, bq, best_mode = t, p, q, mode
best_cost = cost
lengths = [bt, bp, bq]
x_list, y_list, theta_list, directions = self.generate_local_course(
sum(lengths), lengths, best_mode,
)
return x_list, y_list, theta_list, best_mode, best_cost
def fit_segment(self, wp1: Waypoint, wp2: Waypoint):
gx = wp2.x - wp1.x
gy = wp2.y - wp1.y
theta1, theta2 = np.radians(wp1.theta), np.radians(wp2.theta)
l_rot = Rot.from_euler("z", theta1).as_matrix()[0:2, 0:2]
le_xy = np.stack([gx, gy]).T @ l_rot
le_theta = theta2 - theta1
lp_x, lp_y, lp_theta, mode, lengths = self.planning_from_origin(
le_xy[0], le_xy[1], le_theta
)
rot = Rot.from_euler("z", -theta1).as_matrix()[0:2, 0:2]
converted_xy = np.stack([lp_x, lp_y]).T @ rot
x = converted_xy[:, 0] + wp1.x
y = converted_xy[:, 1] + wp1.y
theta = [pi_2_pi(i_yaw + theta1) for i_yaw in lp_theta]
# add to path
self.x.extend(list(x)[1:-1])
self.y.extend(list(y)[1:-1])
self.theta.extend(list(theta)[1:-1])
self.mode.extend(list(mode)[1:-1])
self.lengths.append(lengths)
def fit(self) -> Path:
for i in range(len(self.waypoints) - 1):
self.fit_segment(self.waypoints[i], self.waypoints[i + 1])
return Path(self.x, self.y, self.theta)
if __name__ == "__main__":
import sys
sys.path.append("./")
import matplotlib.pyplot as plt
import pandas as pd
import draw
f, ax = plt.subplots(figsize=(7, 10))
# load and draw tracking data
from fcutils.path import files
for fp in files(
"/Users/federicoclaudi/Dropbox (UCL)/Rotation_vte/Locomotion/analysis/control/",
"*.h5",
):
tracking = pd.read_hdf(fp, key="hdf")
tracking.x = 20 - tracking.x + 20
# draw.Tracking.scatter(tracking.x, tracking.y, c=tracking.theta, vmin=0, vmax=360, cmap='bwr', lw=1, ec='k')
draw.Tracking(tracking.x, tracking.y, alpha=0.7)
# draw hairpin arena
draw.Hairpin(ax)
# draw waypoints
wps = Waypoints()
for wp in wps:
draw.Arrow(wp.x, wp.y, wp.theta, 2, width=4, color="g")
# fit dubin path
dubin = DubinPath(wps).fit()
draw.Tracking(dubin.x, dubin.y, lw=2, color="k")
plt.show()
| StarcoderdataPython |
3206276 | <gh_stars>1-10
import csv
import os.path
import matplotlib
matplotlib.rcParams.update({'font.size': 20})
from matplotlib import pyplot as plt
import numpy as np
itr_interval = 200
max_itr = 1e4
fields = [
# 'Return',
'Success Rate',
# 'Collision Rate',
'Inference Accuracy'
]
field_names = [
# 'Evaluation Average Return',
'Evaluation Success Rate',
# 'Evaluation Collision Rate',
'Latent Inference Accuracy',
]
itr_name = 'epoch'
min_loss = [-1000]*100
max_loss = [1000]*100
exp_name = "t_intersection_lstm4noise0.05yld0.5ds0.1"
prepath = "./Data/"+exp_name+"/Eval"
plot_path = "./Data/"+exp_name+"/Eval"
# result_paths = [
# 'noise0.05yld0.2ds0.1dfd0.1dfi0.3epoch500',
# 'noise0.05yld0.4ds0.1dfd0.1dfi0.3epoch500',
# 'noise0.05yld0.6ds0.1dfd0.1dfi0.3epoch500',
# 'noise0.05yld0.8ds0.1dfd0.1dfi0.3epoch500',]
# ylabels = ['Reletive Success Rate','Reletive Inference Accuracy']
# xlabel = 'P(CONSERVATIVE)'
# xs = [0.2,0.4,0.6,0.8]
# extra_name = 'yld_drift'
result_paths = [
'noise0.05yld0.5ds0.1dfd0.1dfi0.1epoch500',
'noise0.05yld0.5ds0.1dfd0.1dfi0.3epoch500',
'noise0.05yld0.5ds0.1dfd0.1dfi0.5epoch500',
]
ylabels = ['Reletive Success Rate','Reletive Inference Accuracy']
xlabel = 'Front Gap Sample Interval'
xs = [0.1,0.3,0.5]
extra_name = 'dfi_drift'
# result_paths = [
# 'noise0.05yld0.5ds0.1dfd0.05dfi0.3epoch500',
# 'noise0.05yld0.5ds0.1dfd0.1dfi0.3epoch500',
# 'noise0.05yld0.5ds0.1dfd0.3dfi0.3epoch500',
# 'noise0.05yld0.5ds0.1dfd0.5dfi0.3epoch500',
# ]
# ylabels = ['Reletive Success Rate','Reletive Inference Accuracy']
# xlabel = 'Front Gap Mean Difference'
# xs = [0.05,0.1,0.3,0.5]
# extra_name = 'dfd_drift'
policies = [
'PPOlayer1hidden48ep5000',
'PPOSupVanillalayer1hidden48ep5000',
'PPOSuplayer1hidden48ep5000',
'PPOSupSep2layer1hidden28ep5000',
'PPOGNN2llayer1hidden24GSagenode24glayer3actreluep5000',
'PPOSupVanillaGNN2llayer1hidden24GSagenode24glayer3actreluep5000',
'PPOSupGNN2llayer1hidden24GSagenode24glayer3actreluep5000',
'PPOSupSep2GNN2llayer1hidden18GSagenode18glayer3actreluep5000',
'PPOSupSep2LSTMGNN2layer1hidden28GSagenode18glayer3suphidden18suplayer1actreluep5000',
]
policy_names = [
'PPO + LSTM',
'PPO + LSTM \nShared Inference + LSTM',
'PPO + LSTM \nCoupled Inference + LSTM',
'PPO + LSTM \nSeparated Inference + LSTM',
'PPO + STGSage',
'PPO + STGSage \nShared Inference + STGSage',
'PPO + STGSage \nCoupled Inference + STGSage',
'PPO + STGSage \nSeparated Inference + STGSage',
'PPO + LSTM \nSeparated Inference + STGSage',
]
colors = [
'C0',
'C1',
'C2',
'C3',
'C4',
'C5',
'C6',
'C7',
'C8',
]
seeds = [0,1,2]
pre_name = ''
post_name = ''
results = {}
for policy in policies:
results[policy] = {}
for field in fields:
results[policy][field]=[]
for result_path in result_paths:
file_path = prepath+'/'+result_path+'/result.csv'
with open(file_path) as csv_file:
if '\0' in open(file_path).read():
print("you have null bytes in your input file")
csv_reader = csv.reader(x.replace('\0', '') for x in csv_file)
else:
csv_reader = csv.reader(csv_file, delimiter=',')
for (i,row) in enumerate(csv_reader):
if i == 0:
entry_dict = {}
for index in range(len(row)):
entry_dict[row[index]] = index
# print(entry_dict)
else:
policy, seed, _ = row[entry_dict['Policy']].split('_')
if policy in policies:
for field in fields:
results[policy][field].append(float(row[entry_dict[field]]))
fig = plt.figure(figsize=(10*len(fields),5))
plot_names = []
avg_losses = {}
for field in fields:
Base_Losses = []
for p in policies:
if np.sum(results[p][field]) == 0:
continue
Base_Losses.append(np.reshape(results[p][field],(len(xs),len(seeds))).transpose())
avg_losses[field] = np.mean(np.mean(Base_Losses, 0), 0)
for fid,field in enumerate(fields):
print(field)
plt.subplot(1,len(fields),fid+1)
# fig = plt.figure(fid)
legends = []
plts = []
plot_names.append(extra_name+field_names[fid])
for (policy_index,policy) in enumerate(policies):
Losses = np.reshape(results[policy][field],(len(xs),len(seeds))).transpose()
Losses = Losses/avg_losses[field]
if np.sum(Losses) == 0:
continue
# print(policy,field,Losses)
y = np.mean(Losses,0)
yerr = np.std(Losses,0)
# plot, = plt.plot(itrs,y,colors[policy_index])
plt.errorbar(xs,y,yerr,color=colors[policy_index], fmt='o-', markersize=3, capsize=10,label=policy_names[policy_index])
# plot, = plt.plot(xs,y,colors[policy_index],label=policy_names[policy_index],marker='o')
# plt.fill_between(xs,y+yerr,y-yerr,linewidth=0,
# facecolor=colors[policy_index],alpha=0.3)
legends.append(policy_names[policy_index])
# if fid == len(fields)-1:
# plt.legend(plts,legends, bbox_to_anchor=(1.01, 1), loc='upper left')
plt.xlabel(xlabel)
plt.ylabel(ylabels[fid])
# fig.savefig(plot_path+'/'+plot_names[fid]+'.pdf',bbox_inches='tight')
# plt.close(fig)
ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
# plt.legend(handles, labels, bbox_to_anchor=(1.01, 1), loc='upper left')
# fig.legend(handles, labels, bbox_to_anchor=(0.5, 1.1), loc='upper center', ncol=len(labels))
fig.legend(handles, labels, bbox_to_anchor=(0.5, 1.3), loc='upper center', ncol=4)
fig.savefig(plot_path+'/'+extra_name+'.pdf',bbox_inches='tight')
plt.close(fig) | StarcoderdataPython |
1621331 | <reponame>ncrubin/chemftr
"""Test cases for util.py
"""
from chemftr.utils import QR, QI, QR2, QI2, power_two
def test_QR():
""" Tests function QR which gives the minimum cost for a QROM over L values of size M. """
# Tests checked against Mathematica noteboook `costingTHC.nb`
# Arguments are otherwise random
assert QR(12341234,5670) == (6,550042)
assert QR(12201990,520199) == (2,4611095)
def test_QI():
""" Tests function QI which gives the minimum cost for inverse QROM over L values. """
# Tests checked against Mathematica noteboook `costingTHC.nb`
# Arguments are otherwise random
assert QI(987654) == (10,1989)
assert QI(8052021) == (11,5980)
def test_QR2():
""" Tests function QR2 which gives the minimum cost for a QROM with two registers. """
# Tests checked against Mathematica noteboook `costingsf.nb`
# Arguments are otherwise random
assert QR2(12, 34, 81) == (2, 2, 345)
assert QR2(712, 340111, 72345) == (4, 16, 8341481)
def test_QI2():
""" Tests function QI which gives the minimum cost for inverse QROM with two registers. """
# Tests checked against Mathematica noteboook `costingsf.nb`
# Arguments are otherwise random
assert QI2(1234,5678) == (32, 64, 5519)
assert QI2(7120,1340111) == (4, 32768, 204052)
def test_power_two():
""" Test for power_two(m) which returns power of 2 that is a factor of m """
try:
power_two(-1234)
except AssertionError:
pass
assert power_two(0) == 0
assert power_two(2) == 1
assert power_two(3) == 0
assert power_two(104) == 3 # 2**3 * 13
assert power_two(128) == 7 # 2**7
assert power_two(393120) == 5 # 2**5 * 3**3 * 5 * 7 * 13
| StarcoderdataPython |
5189803 | from ....expressions import Symbol, Constant, FunctionApplication as Fa
from ....expression_walker import PatternWalker, add_match
from ..chart_parser import (
Grammar,
Rule,
RootRule,
ChartParser,
_lu,
DictLexicon,
)
S = Symbol("S")
NP = Symbol("NP")
PN = Symbol("PN")
VP = Symbol("VP")
V = Symbol("V")
a = Symbol("a")
b = Symbol("b")
w = Symbol("w")
plural = Constant("plural")
singular = Constant("singular")
TestGrammar = Grammar(
(
RootRule(S(a), (NP(a), VP(a))),
Rule(VP(a), (V(a), NP(b))),
Rule(NP(plural), (NP(a), Constant("and"), NP(b))),
Rule(NP(a), (PN(a),)),
)
)
test_lexicon = DictLexicon(
{
"owns": (V(singular),),
"own": (V(plural),),
"Jones": (PN(singular),),
"Smith": (PN(singular),),
"Ulysses": (PN(singular),),
}
)
def test_mgu():
u = _lu.unify(a, Constant("hehe"))
assert u is not None
u = _lu.unify(NP(a), NP(Constant("hoho")))
assert u is not None
def test_recognize():
cp = ChartParser(TestGrammar, test_lexicon)
assert cp.recognize("Jones owns Ulysses")
assert not cp.recognize("Jones own Ulysses")
assert cp.recognize("Jones and Smith own Ulysses")
def test_parse():
cp = ChartParser(TestGrammar, test_lexicon)
tree = S(singular)(
NP(singular)(PN(singular)(Constant("Jones"))),
VP(singular)(
V(singular)(Constant("owns")),
NP(singular)(PN(singular)(Constant("Ulysses"))),
),
)
assert tree == cp.parse("Jones owns Ulysses")[0]
class TestGrammarWalker(PatternWalker):
@add_match(Fa(Fa(S, ...), ...))
def s(self, exp):
(np, vp) = exp.args
return self.walk(np) + " " + self.walk(vp)
@add_match(Fa(Fa(NP, ...), ...))
def np_proper(self, exp):
(pn,) = exp.args
return self.walk(pn)
@add_match(Fa(Fa(VP, ...), ...))
def vp(self, exp):
(v, np) = exp.args
return self.walk(v) + " " + self.walk(np)
@add_match(Fa(Fa(PN, ...), ...))
def pn(self, exp):
(w,) = exp.args
return w.value
@add_match(Fa(Fa(V, ...), ...))
def v(self, exp):
(w,) = exp.args
return w.value
def test_walk_parsed():
cp = ChartParser(TestGrammar, test_lexicon)
sentence = "Jones owns Ulysses"
tree = cp.parse(sentence)[0]
r = TestGrammarWalker().walk(tree)
assert sentence == r
class TestGrammarWalker2(TestGrammarWalker):
@add_match(Fa(V(singular), ...))
def v_sing(self, exp):
return "SV"
@add_match(Fa(Fa(NP, ...), (..., "and", ...,),))
def np_conj(self, exp):
(pn1, c, pn2) = exp.args
return self.walk(pn1) + " and " + self.walk(pn2)
@add_match(Fa(V(plural), ...))
def v_plur(self, exp):
return "PV"
@add_match(Fa(Fa(PN, ...), ...))
def pn_(self, exp):
(num,) = exp.functor.args
return "SN" if num == singular else "PN"
def test_walk_parsed_2():
cp = ChartParser(TestGrammar, test_lexicon)
tree = cp.parse("Jones owns Ulysses")[0]
r = TestGrammarWalker2().walk(tree)
assert "SN SV SN" == r
tree = cp.parse("Jones and Smith own Ulysses")[0]
r = TestGrammarWalker2().walk(tree)
assert "SN and SN PV SN"
| StarcoderdataPython |
3336726 | <filename>src/botservice/azext_bot/botservice/models/__init__.py<gh_stars>0
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sku import Sku
from .resource import Resource
from .bot_properties import BotProperties
from .bot import Bot
from .channel import Channel
from .bot_channel import BotChannel
from .facebook_page import FacebookPage
from .facebook_channel_properties import FacebookChannelProperties
from .facebook_channel import FacebookChannel
from .email_channel_properties import EmailChannelProperties
from .email_channel import EmailChannel
from .ms_teams_channel_properties import MsTeamsChannelProperties
from .ms_teams_channel import MsTeamsChannel
from .skype_channel_properties import SkypeChannelProperties
from .skype_channel import SkypeChannel
from .kik_channel_properties import KikChannelProperties
from .kik_channel import KikChannel
from .web_chat_site import WebChatSite
from .web_chat_channel_properties import WebChatChannelProperties
from .web_chat_channel import WebChatChannel
from .direct_line_site import DirectLineSite
from .direct_line_channel_properties import DirectLineChannelProperties
from .direct_line_channel import DirectLineChannel
from .telegram_channel_properties import TelegramChannelProperties
from .telegram_channel import TelegramChannel
from .sms_channel_properties import SmsChannelProperties
from .sms_channel import SmsChannel
from .slack_channel_properties import SlackChannelProperties
from .slack_channel import SlackChannel
from .connection_item_name import ConnectionItemName
from .connection_setting_parameter import ConnectionSettingParameter
from .connection_setting_properties import ConnectionSettingProperties
from .connection_setting import ConnectionSetting
from .service_provider_parameter import ServiceProviderParameter
from .service_provider_properties import ServiceProviderProperties
from .service_provider import ServiceProvider
from .service_provider_response_list import ServiceProviderResponseList
from .error_body import ErrorBody
from .error import Error, ErrorException
from .operation_display_info import OperationDisplayInfo
from .operation_entity import OperationEntity
from .check_name_availability_request_body import CheckNameAvailabilityRequestBody
from .check_name_availability_response_body import CheckNameAvailabilityResponseBody
from .bot_paged import BotPaged
from .bot_channel_paged import BotChannelPaged
from .operation_entity_paged import OperationEntityPaged
from .connection_setting_paged import ConnectionSettingPaged
from .azure_bot_service_enums import (
SkuName,
SkuTier,
Kind,
ChannelName,
)
__all__ = [
'Sku',
'Resource',
'BotProperties',
'Bot',
'Channel',
'BotChannel',
'FacebookPage',
'FacebookChannelProperties',
'FacebookChannel',
'EmailChannelProperties',
'EmailChannel',
'MsTeamsChannelProperties',
'MsTeamsChannel',
'SkypeChannelProperties',
'SkypeChannel',
'KikChannelProperties',
'KikChannel',
'WebChatSite',
'WebChatChannelProperties',
'WebChatChannel',
'DirectLineSite',
'DirectLineChannelProperties',
'DirectLineChannel',
'TelegramChannelProperties',
'TelegramChannel',
'SmsChannelProperties',
'SmsChannel',
'SlackChannelProperties',
'SlackChannel',
'ConnectionItemName',
'ConnectionSettingParameter',
'ConnectionSettingProperties',
'ConnectionSetting',
'ServiceProviderParameter',
'ServiceProviderProperties',
'ServiceProvider',
'ServiceProviderResponseList',
'ErrorBody',
'Error', 'ErrorException',
'OperationDisplayInfo',
'OperationEntity',
'CheckNameAvailabilityRequestBody',
'CheckNameAvailabilityResponseBody',
'BotPaged',
'BotChannelPaged',
'OperationEntityPaged',
'ConnectionSettingPaged',
'SkuName',
'SkuTier',
'Kind',
'ChannelName',
]
| StarcoderdataPython |
6406428 | #coding=utf-8
import json
from core.helper.crypt import pwd_crypt
from core.helper.globalvar import global_const
from copy import copy
import os
class config_parser:
# 自定义异常,找不到对应的服务器记录
class ErrorNotFind(BaseException):
def __init__(self):
pass
def __str__(self):
return 'Can not find the record'
class ErrorIncorrectType(BaseException):
def __init__(self):
pass
def __str__(self):
return 'Incorrect type of value'
def __init__(self,config_path=None):
if config_path == None:
self._config_file_path = "%s/etc/config.json" % (global_const().get_value("BASEDIR"))
else:
self._config_file_path = config_path
self._parse()
# 从文件中读取JSON并解析出来存在成员变量内
def _parse(self):
with open(self._config_file_path, "a+") as f:
try:
f.seek(0)
self._config = json.loads(f.read())
except Exception as e:
self._config = []
# 将数据同步到文件
def _sync(self):
with open(self._config_file_path, "w+") as f:
f.write(json.dumps(self._config))
# 迭代器方法
def __iter__(self):
self._current = 0
return self
# python2 迭代器兼容
def next(self):
return self.__next__()
# 迭代器方法
def __next__(self):
if self._config == None:
raise StopIteration
elif self._current < len(self._config):
cur_record = copy(self._config[self._current])
self._current+=1
crypto = pwd_crypt()
password = crypto.decrypt(cur_record['password'])
cur_record['password'] = password
return cur_record
else:
raise StopIteration
def _get_writable_record(self, ip):
find_record = None
for record in self._config:
if record['ip'] == ip:
find_record = record
return find_record
# 添加一条服务器信息
# tags传入的形式为元数据数组,如[('area','NA3'),('role',p2p),('group','kp2p)]
def add_record(self, ip, port, user, password, name=None):
# 先查找是否有对应的ip,如果有的话则不做任何处理
if self._get_writable_record(ip) != None:
return
crypto = pwd_crypt()
obj = {"ip":ip, "port": port, "user":user, "password":crypto.encrypt(password)}
if name != None:
obj['name'] = name
self._config.append(obj)
self._sync()
# 为一条记录添加tag
# tag传入的方式为字典
def add_tag(self, ip, **kwargs):
for tag in kwargs:
if type(kwargs[tag]) is not str:
raise self.ErrorIncorrectType
record = self._get_writable_record(ip)
if None == record:
return
# 找到了对应的记录, 原本记录中不存在tags属性,则初始化tags
if 'tags' not in record:
record['tags'] = {}
# 写入tag,如果是有相同的tag名称则会直接替代
for tag in kwargs:
record['tags'][tag] = kwargs[tag]
self._sync()
# 删除一条记录上的tag
def del_tag(self, ip, *args):
record = self._get_writable_record(ip)
# 没找到记录,直接返回
if record == None:
return
try:
for tagname in args:
del record['tags'][tagname]
self._sync()
except:
pass
# 更新一条记录上的某一条tag,没有就直接增加
def update_tag(self, ip, **kwargs):
record = self._get_writable_record(ip)
if 'tags' not in record:
record['tags'] = {}
for tagname in kwargs:
record['tags'][tagname] = kwargs[tagname]
self._sync()
# 根据ip删除一条服务器信息
def remove_record(self, ip):
index = None
for record in self._config:
if record['ip'] == ip:
index = self._config.index(record)
if None != index:
del self._config[index]
self._sync()
# 根据ip获取一条服务器信息
def get_record(self, ip):
ret_record = None
for record in self._config:
if record['ip'] == ip:
crypto = pwd_crypt()
password = crypto.decrypt(record['password'])
ret_record = copy(record)
ret_record['password'] = password
return ret_record
def modify_record(self, ip, port = None, user = None, password = None, name = None ):
# 检查是否有对应的ip,如果没有则抛出错误
if password != None:
crypto = pwd_crypt()
password = crypto.encrypt(password)
a_record = 0
for record in self._config:
if record['ip'] == ip:
# 找到了对应的记录
a_record= 1
fields = ['port', 'user', 'password', 'name']
for key in fields:
# 如果记录中的这个参数在上下文中并且数据不是None, 则把与参数同名的上下文变量的值赋值给参数
if key in locals() and locals()[key] != None:
record[key] = locals()[key]
self._sync()
return
if a_record == 0:
raise self.ErrorNotFind
| StarcoderdataPython |
11326762 | <gh_stars>100-1000
import numpy as np
import matplotlib.pylab as plt
import copy
# add angler to path (not necessary if pip installed)
import sys
sys.path.append("..")
# import the main simulation and optimization classes
from angler import Simulation, Optimization
from angler.plot import Temp_plt
# import some structure generators
from angler.structures import three_port, two_port, ortho_port
lambda0 = 2e-6 # free space wavelength (m)
c0 = 3e8 # speed of light in vacuum (m/s)
omega = 2*np.pi*c0/lambda0 # angular frequency (2pi/s)
dl = 0.4e-1 # grid size (L0)
NPML = [25, 25] # number of pml grid points on x and y borders
pol = 'Ez' # polarization (either 'Hz' or 'Ez')
source_amp = 6 # amplitude of modal source (A/L0^2?)
# material constants
n_index = 2.44 # refractive index
eps_m = n_index**2 # relative permittivity
chi3 = 4.1*1e-19 # Al2S3 from Boyd (m^2/V^2)
# max_ind_shift = 5.8e-3 # maximum allowed nonlinear refractive index shift (computed from damage threshold)
# geometric parameters
L1 = 6 # length waveguides in design region (L0)
L2 = 6 # width of box (L0)
H1 = 6 # height waveguides in design region (L0)
H2 = 6 # height of box (L0)
w = .3 # width of waveguides (L0)
l = 3 # length of waveguide from PML to box (L0)
spc = 2 # space between box and PML (L0)
# define permittivity of three port system
eps_r, design_region = ortho_port(L1, L2, H1, H2, w, l, dl, NPML, eps_m)
(Nx, Ny) = eps_r.shape
nx, ny = int(Nx/2), int(Ny/2) # halfway grid points
simulation = Simulation(omega,eps_r,dl,NPML,pol)
# set the modal source and probes
simulation = Simulation(omega, eps_r, dl, NPML, 'Ez')
simulation.add_mode(np.sqrt(eps_m), 'x', [NPML[0]+int(l/2/dl), ny], int(H1/2/dl), scale=source_amp)
simulation.setup_modes()
# left modal profile
right = Simulation(omega, eps_r, dl, NPML, 'Ez')
right.add_mode(np.sqrt(eps_m), 'x', [-NPML[0]-int(l/2/dl), ny], int(H1/2/dl))
right.setup_modes()
J_right = np.abs(right.src)
# top modal profile
top = Simulation(omega, eps_r, dl, NPML, 'Ez')
top.add_mode(np.sqrt(eps_m), 'y', [nx, -NPML[1]-int(l/2/dl)], int(L1/2/dl))
top.setup_modes()
J_top = np.abs(top.src)
# compute straight line simulation
eps_r_wg, _ = two_port(L1, H1, w, l, spc, dl, NPML, eps_start=eps_m)
(Nx_wg, Ny_wg) = eps_r_wg.shape
nx_wg, ny_wg = int(Nx_wg/2), int(Ny_wg/2) # halfway grid points
simulation_wg = Simulation(omega, eps_r_wg, dl, NPML, 'Ez')
simulation_wg.add_mode(np.sqrt(eps_m), 'x', [NPML[0]+int(l/2/dl), ny_wg], int(Ny/3), scale=source_amp)
simulation_wg.setup_modes()
# compute normalization
sim_out = Simulation(omega, eps_r_wg, dl, NPML, 'Ez')
sim_out.add_mode(np.sqrt(eps_m), 'x', [-NPML[0]-int(l/2/dl), ny], int(Ny/3))
sim_out.setup_modes()
J_out = np.abs(sim_out.src)
(_, _, Ez_wg) = simulation_wg.solve_fields()
SCALE = np.sum(np.square(np.abs(Ez_wg))*J_out)
J_out = J_out
J_right = J_right / SCALE
J_top = J_top / SCALE
# changes design region. 'style' can be in {'full', 'empty', 'halfway', 'random'}
np.random.seed(0)
simulation.init_design_region(design_region, eps_m, style='halfway')
# add nonlinearity
nl_region = copy.deepcopy(design_region)
simulation.nonlinearity = [] # This is needed in case you re-run this cell, for example (or you can re-initialize simulation every time)
simulation.add_nl(chi3, nl_region, eps_scale=True, eps_max=eps_m)
# define objective function
import autograd.numpy as npa
def J(e, e_nl):
linear_right = 1*npa.sum(npa.square(npa.abs(e))*J_right)
linear_top = -1*npa.sum(npa.square(npa.abs(e))*J_top)
nonlinear_right = -1*npa.sum(npa.square(npa.abs(e_nl))*J_right)
nonlinear_top = 1*npa.sum(npa.square(npa.abs(e_nl))*J_top)
objfn = (linear_right + linear_top + nonlinear_right + nonlinear_top)/2
return objfn
# make optimization object
R = 5 # filter radius of curvature (pixels) (takes a while to set up as R > 5-10)
beta = 500 # projection strength
eta= 0.50 # projection halfway
temp_plt = Temp_plt(it_plot=1, plot_what=('eps', 'elin', 'enl'), folder='figs/data/temp_im/',
figsize=(14,4), dpi=100)
optimization = Optimization(J=J, simulation=simulation, design_region=design_region, eps_m=eps_m, R=R, beta=beta, eta=eta)
(grad_avm, grad_num) = optimization.check_deriv(Npts=5, d_rho=5e-4)
# print('adjoint gradient = {}\nnumerical gradient = {}'.format(grad_avm, grad_num))
optimization.run(method='lbfgs', Nsteps=400, temp_plt=temp_plt) | StarcoderdataPython |
5169541 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import os
import pytest
import time
from azure import eventhub
from azure.eventhub import EventData, EventHubClient, Offset
@pytest.mark.liveTest
def test_iothub_receive_sync(iot_connection_str, device_id):
client = EventHubClient.from_iothub_connection_string(iot_connection_str, debug=True)
receiver = client.add_receiver("$default", "0", operation='/messages/events')
try:
client.run()
partitions = client.get_eventhub_info()
assert partitions["partition_ids"] == ["0", "1", "2", "3"]
received = receiver.receive(timeout=5)
assert len(received) == 0
finally:
client.stop() | StarcoderdataPython |
2749 | """
Module for higher level SharePoint REST api actions - utilize methods in the api.py module
"""
class Site():
def __init__(self, sp):
self.sp = sp
@property
def info(self):
endpoint = "_api/site"
value = self.sp.get(endpoint).json()
return value
@property
def web(self):
endpoint = "_api/web"
value = self.sp.get(endpoint).json()
return value
@property
def contextinfo(self):
return self.sp.contextinfo
@property
def contenttypes(self):
endpoint = "_api/web/contenttypes"
value = self.sp.get(endpoint).json().get('value')
return value
@property
def eventreceivers(self):
endpoint = "_api/web/eventreceivers"
value = self.sp.get(endpoint).json().get('value')
return value
@property
def features(self):
endpoint = "_api/web/features"
value = self.sp.get(endpoint).json().get('value')
return value
@property
def fields(self):
endpoint = "_api/web/fields"
value = self.sp.get(endpoint).json().get('value')
return value
@property
def lists(self):
endpoint = "_api/web/lists"
value = self.sp.get(endpoint).json().get('value')
return value
@property
def siteusers(self):
endpoint = "_api/web/siteusers"
value = self.sp.get(endpoint).json().get('value')
return value
@property
def groups(self):
endpoint = "_api/web/sitegroups"
value = self.sp.get(endpoint).json().get('value')
return value
@property
def roleassignments(self):
endpoint = "_api/web/roleassignments"
value = self.sp.get(endpoint).json().get('value')
return value
# def set_title_field_to_optional(self, list_title):
# """Sets the Title field in the given list to optional
# :param list_title: str: title of SharePoint list
# """
# # TODO - this likely is not necessary anymore, since we are not creating new lists
# field_rec = [x for x in self.get_field(list_title)
# if x['InternalName'] == "Title"][0]
# if field_rec and field_rec.get('Required'):
# body = {'Required': False}
# self.update_list_field(field_rec, list_title, body)
# def check_field_exists(self, list_title, field_title):
# """Check that a field exists to avoid error from attempting to access non-existent field
# :param list_title: str: title of SharePoint list
# :param field_title: str: title of field in SharePoint list
# :returns: bool
# """
# field_rec = self._get_first_or_none(
# "InternalName", field_title, list_data=self.get_list_fields(list_title))
# return field_rec is not None
# def update_list_field(self, field_rec, list_title, body):
# """Given a field record, a list title, and the json body to update with, updates the SharePoint list field
# :param field_rec: dict: field record from SharePoint field query
# :param list_title: str: title of SharePoint list
# :param body: dict: dictionary structured for SharePoint REST api fields endpoint
# """
# field_id = field_rec.get('Id')
# update_field_url = "_api/web/lists/GetByTitle('{0}')/fields('{1}')".format(
# list_title, field_id)
# response = self.sp.post(url=update_field_url, json=body)
# response.raise_for_status()
# def get_email_from_sharepoint_id(self, sharepoint_id: int):
# """Returns email address from a SharePoint integer user id value
# :param sp_user_id: int: SharePoint user id
# :returns: str
# """
# return self._get_first_or_none("Id", sharepoint_id, list_data=self.siteusers).get("Email")
# def get_sharepoint_id_from_email(self, email):
# """Returns SharePoint integer user ID from an email address
# :param username: str: email address
# :returns: int
# """
# return self._get_first_or_none("Email", email, list_data=self.siteusers).get("Id")
def _get_first_or_none(self, compare_column, compare_value, list_data=None, url=None):
if not list_data and not url:
return ValueError("either list_data or url must be provided")
if not list_data:
list_data = self.sp.get(url).json().get('value')
try:
return [x for x in list_data if x[compare_column] == compare_value][0]
except IndexError as e:
return None
# TODO Add large file upload with chunking
# https://github.com/JonathanHolvey/sharepy/issues/23
| StarcoderdataPython |
9719596 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-01 17:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('file_repository', '0006_auto_20161108_2010'),
('problems', '0079_merge'),
]
operations = [
migrations.AddField(
model_name='problem',
name='files',
field=models.ManyToManyField(to='file_repository.FileModel', verbose_name='problem files'),
),
]
| StarcoderdataPython |
11241656 | <reponame>jkklapp/sheila2<filename>sheila2_test.py
import unittest
import os
from sheila2 import Sheila
class BasicTestCase(unittest.TestCase):
def setUp(self):
self.sheila = Sheila("testdb", "test.cst")
def tearDown(self):
try:
os.remove("test.cst")
except OSError:
pass
self.sheila.destroy()
class TestBasicInsertion(BasicTestCase):
def testTableEntryExpansion(self):
sheila = self.sheila
sheila.insert({"a": 1, "b": 2})
self.assertEqual(len(sheila.cst.tables()), 1)
sheila.insert({"a": 12})
self.assertEqual(len(sheila.cst.tables()), 1)
sheila.insert({"a": 1, "b": 2, "c": 3})
self.assertEqual(len(sheila.cst.tables()), 1)
def testTableExpansion(self):
sheila = self.sheila
sheila.insert({"a": 1, "b": 2})
self.assertEqual(len(sheila.cst.tables()), 1)
sheila.insert({"c": 12})
self.assertEqual(len(sheila.cst.tables()), 2)
sheila.insert({"b": 2, "c": 3})
self.assertEqual(len(sheila.cst.tables()), 2)
class TestBasicQuery(BasicTestCase):
def testGetData(self):
sheila = self.sheila
test_data = {"a": 1, "b": 2}
sheila.insert(test_data)
query_data = sheila.query({"a": 1})
self.assertIn(test_data, query_data)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
12856527 |
import gym
import numpy as np
import tensorflow as tf
import time
from actor_critic.policy import A2CBuilder
from actor_critic.util import discount_with_dones, cat_entropy, fix_tf_name
from common.model import NetworkBase
from common.multiprocessing_env import SubprocVecEnv
from tqdm import tqdm
class ActorCritic(NetworkBase):
def __init__(self, sess, a2c_arch, ob_space, ac_space,
pg_coeff=1.0, vf_coeff=0.5, ent_coeff=0.01, max_grad_norm=0.5,
lr=7e-4, alpha=0.99, epsilon=1e-5, summarize=False):
self.sess = sess
self.nact = ac_space.n
self.ob_space = ob_space
# Actions, Advantages, and Reward
self.actions = tf.placeholder(tf.int32, [None], name='actions')
self.advantages = tf.placeholder(tf.float32, [None], name='advantages')
self.rewards = tf.placeholder(tf.float32, [None], name='rewards')
self.depth = tf.placeholder(tf.float32, [None], name='scramble_depth')
# setup the models
self.step_model = A2CBuilder(self.sess, a2c_arch, ob_space, ac_space, reuse=False)
self.train_model = A2CBuilder(self.sess, a2c_arch, ob_space, ac_space, reuse=True)
# Negative log probs of actions
neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.train_model.pi, labels=self.actions)
# Policy Gradients Loss, Value Function Loss, Entropy, and Full Loss
self.pg_loss = tf.reduce_mean(self.advantages * neglogpac)
self.vf_loss = tf.reduce_mean(tf.square(tf.squeeze(self.train_model.vf) - self.rewards) / 2.0)
self.entropy = tf.reduce_mean(cat_entropy(self.train_model.pi))
self.loss = pg_coeff*self.pg_loss - ent_coeff*self.entropy + vf_coeff*self.vf_loss
self.mean_rew= tf.reduce_mean(self.rewards)
self.mean_depth = tf.reduce_mean(self.depth)
# Find the model parameters and their gradients
with tf.variable_scope('a2c_model'):
self.params = tf.trainable_variables()
grads = tf.gradients(self.loss, self.params)
if max_grad_norm is not None:
grads, grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, self.params))
# Setup the optimizer
trainer = tf.train.RMSPropOptimizer(learning_rate=lr, decay=alpha, epsilon=epsilon)
self.opt = trainer.apply_gradients(grads)
# For some awesome tensorboard stuff
if summarize:
tf.summary.scalar('Loss', self.loss)
tf.summary.scalar('Entropy', self.entropy)
tf.summary.scalar('Policy Gradient Loss', self.pg_loss)
tf.summary.scalar('Value Function Loss', self.vf_loss)
tf.summary.scalar('Rewards', self.mean_rew)
tf.summary.scalar('Depth', self.mean_depth)
# fix tf scopes if we are loading a scope that is different from the saved instance
#name_scope = tf.contrib.framework.get_name_scope()
#if len(name_scope) != 0:
# self.params = { fix_tf_name(v.name, name_scope): v for v in self.params }
#else:
# self.params = { fix_tf_name(v.name): v for v in self.params }
# Initialize the tensorflow saver
self.saver = tf.train.Saver(self.params, max_to_keep=5)
# Single training step
def train(self, obs, rewards, masks, actions, values, depth, step, summary_op=None):
advantages = rewards - values
feed_dict = {
self.actions: actions,
self.advantages: advantages,
self.rewards: rewards,
self.depth: depth,
}
inputs = self.train_model.get_inputs()
mapped_input = self.train_model.transform_input(obs)
for transformed_input, inp in zip(mapped_input, inputs):
feed_dict[inp] = transformed_input
ret_vals = [
self.loss,
self.pg_loss,
self.vf_loss,
self.entropy,
self.mean_rew,
self.mean_depth,
self.opt,
]
if summary_op is not None:
ret_vals.append(summary_op)
return self.sess.run(ret_vals, feed_dict=feed_dict)
# Given an observation, perform an action
def act(self, obs, stochastic=True):
return self.step_model.step(obs, stochastic=stochastic)
# Return the value of the value function
def critique(self, obs):
return self.step_model.value(obs)
# The function that trains the a2c model
def train(env_fn = None,
spectrum = False,
a2c_arch = None,
nenvs = 16,
nsteps = 100,
max_iters = 1e6,
gamma = 0.99,
pg_coeff = 1.0,
vf_coeff = 0.5,
ent_coeff = 0.01,
max_grad_norm = 0.5,
lr = 7e-4,
alpha = 0.99,
epsilon = 1e-5,
log_interval = 100,
summarize = True,
load_path = None,
log_path = None,
cpu_cores = 1):
# Construct the vectorized parallel environments
envs = [ env_fn for _ in range(nenvs) ]
envs = SubprocVecEnv(envs)
# Set some random seeds for the environment
envs.seed(0)
if spectrum:
envs.spectrum()
ob_space = envs.observation_space.shape
nw, nh, nc = ob_space
ac_space = envs.action_space
obs = envs.reset()
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=cpu_cores,
intra_op_parallelism_threads=cpu_cores )
tf_config.gpu_options.allow_growth = True
with tf.Session(config=tf_config) as sess:
actor_critic = ActorCritic(sess, a2c_arch, ob_space, ac_space,
pg_coeff, vf_coeff, ent_coeff, max_grad_norm,
lr, alpha, epsilon, summarize)
load_count = 0
if load_path is not None:
actor_critic.load(load_path)
print('Loaded a2c')
summary_op = tf.summary.merge_all()
writer = tf.summary.FileWriter(log_path, graph=sess.graph)
sess.run(tf.global_variables_initializer())
batch_ob_shape = (-1, nw, nh, nc)
dones = [False for _ in range(nenvs)]
episode_rewards = np.zeros((nenvs, ))
final_rewards = np.zeros((nenvs, ))
print('a2c Training Start!')
print('Model will be saved on intervals of %i' % (log_interval))
for i in tqdm(range(load_count + 1, int(max_iters) + 1), ascii=True, desc='ActorCritic'):
# Create the minibatch lists
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_depth = [], [], [], [], [], []
total_reward = 0
for n in range(nsteps):
# Get the actions and values from the actor critic, we don't need neglogp
actions, values, neglogp = actor_critic.act(obs)
mb_obs.append(np.copy(obs))
mb_actions.append(actions)
mb_values.append(values)
mb_dones.append(dones)
obs, rewards, dones, info = envs.step(actions)
total_reward += np.sum(rewards)
episode_rewards += rewards
masks = 1 - np.array(dones)
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
mb_rewards.append(rewards)
mb_depth.append(np.array([ info_item['scramble_depth'] for info_item in info ]))
mb_dones.append(dones)
# Convert batch steps to batch rollouts
mb_obs = np.asarray(mb_obs, dtype=np.float32).swapaxes(1,0).reshape(batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1,0)
mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1,0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1,0)
mb_dones = np.asarray(mb_dones, dtype=np.float32).swapaxes(1,0)
mb_depth = np.asarray(mb_depth, dtype=np.int32).swapaxes(1,0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
last_values = actor_critic.critique(obs).tolist()
# discounting
for n, (rewards, d, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):
rewards = rewards.tolist()
d = d.tolist()
if d[-1] == 0:
rewards = discount_with_dones(rewards+[value], d+[0], gamma)[:-1]
else:
rewards = discount_with_dones(rewards, d, gamma)
mb_rewards[n] = rewards
# Flatten the whole minibatch
mb_rewards = mb_rewards.flatten()
mb_actions = mb_actions.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
mb_depth = mb_depth.flatten()
# Save the information to tensorboard
if summarize:
loss, policy_loss, value_loss, policy_ent, mrew, mdp, _, summary = actor_critic.train(
mb_obs, mb_rewards, mb_masks, mb_actions, mb_values, mb_depth, i, summary_op)
writer.add_summary(summary, i)
else:
loss, policy_loss, value_loss, policy_ent, mrew, mdp, _ = actor_critic.train(
mb_obs, mb_rewards, mb_masks, mb_actions, mb_values, mb_depth, i)
if i % log_interval == 0:
actor_critic.save(log_path, i)
actor_critic.save(log_path, 'final')
print('a2c model is finished training')
| StarcoderdataPython |
6462514 | <filename>src/menu.py
# _*_coding:utf-8_*_
from os import environ
if 'PYGAME_HIDE_SUPPORT_PROMPT' not in environ:
environ['PYGAME_HIDE_SUPPORT_PROMPT'] = ''
del environ
import PIL
import pygame
from PIL import Image
from pygame import Surface
from life_game import GridUniverse
from typing import Tuple, List, Optional, Union
Color = Union[int, Tuple[int, int, int]]
def draw_image_in_rect(destiny: Surface, image: Surface, pos: Tuple[int, int], rect: List[int], color: Color, border_color: int):
[x, y, width, height, border_size] = rect
pygame.draw.rect(destiny, color, [x, y, width, height], 0)
pygame.draw.rect(destiny, border_color, [
x + border_size, y + border_size, width - 2 * border_size, height - 2 * border_size], 0)
destiny.blit(image, pos)
def pillImageResize(filename: str, size: Optional[Tuple[int, int]] = (60, 60)) -> Surface:
image = Image.open(filename)
image = image.resize(size, PIL.Image.ANTIALIAS)
image = pygame.image.fromstring(image.tobytes(),
image.size, image.mode)
return image
class SettingsDisplay:
def __init__(self, drawing_mode: Optional[bool] = True, x: Optional[int] = 0, y: Optional[int] = 0) -> None:
self.image_dict = {False: pygame.image.load('images/play.png').convert_alpha(),
True: pygame.image.load('images/pause.png').convert_alpha()}
self.icons = {False: pillImageResize('images/eraser.png').convert_alpha(),
True: pillImageResize('images/pencil.png').convert_alpha()}
self.x = x
self.y = y
self.drawing = True
self.drawing_mode = drawing_mode
self.font = pygame.font.SysFont('Verdana', 20)
self.font.set_bold(True)
def draw(self, destiny: Surface, grid: GridUniverse) -> None:
image = self.image_dict[self.drawing_mode]
draw_image_in_rect(destiny, image, (30, 28), [
16, 18, 64, 64, 2], (0), (140, 140, 140))
icon = self.icons[self.drawing]
draw_image_in_rect(destiny, icon, (108, 20), [
106, 18, 64, 64, 2], (0), (120, 120, 120))
text = self.font.render(
f"population : {grid.population_size}", True, (255, 255, 255))
destiny.blit(text, (200, 5))
text = self.font.render(
f"generation : {grid.geration}", True, (255, 255, 255))
destiny.blit(text, (197, 37))
text = self.font.render(
f"block size : {grid.block_size}", True, (255, 255, 255))
destiny.blit(text, (207, 70))
| StarcoderdataPython |
11293318 | <filename>Python Pandas.py
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 15 18:16:26 2018
@author: Manoj.Prabhakar
"""
import numpy as np
import pandas as pd
from numpy.random import randn
import os
# Series
labels = ['a','b','d','e']
data = [10,30,50,70]
ar = np.array(data)
d={'a':10,'b':30,'c':50,'d':70}
pd.Series(data=data)
pd.Series(data=data,index=labels)
pd.Series(data,labels)
pd.Series(d)
series1 = pd.Series([1,2,3,4],['India','Afghanistan','China','Pakistan'])
series2 = pd.Series([1,2,7,4],['India','Afghanistan','Bangladesh','Pakistan'])
series1['India']
series3 = pd.Series(data=labels)
series3[0]
series2[2]
series1+series2
# DataFrame - Pandas series object with an index
np.random.seed(1000)
df=pd.DataFrame(randn(5,3),['A','B','C','D','E'],['W','X','Y'])
df['W']
type(df['W'])
type(df)
df[['W','X']]
df['new'] = df['W']+df['Y']
df
df.drop('new',axis=1,inplace=True) # axis = 0 means the row and axis = 1 means column, inplace = True - for changes to occur
df.drop('E')
df.shape # 5 denotes the number of rows and 3 denotes the number of columns
# Access rows
df.loc['A']
df.iloc[0]
df.loc['A','Y']
df.loc[['A','B'],['W','X']]
# Conditional Selection
df1 = df>0
df[df1>0]
df
# Filtering rows - Selecting only those rows which are Not NA's
df[df['X']>0]
# Multiple Conditions
df[(df['W']>0) & (df['X']<1)] # And Operator
df[(df['W']>0) | (df['X']<1)] # Or Operator
newst = 'CA NA NR OS MA'.split()
df['States'] = newst
df
# Mising Data
d = ({'A':[1,2,np.nan],'B':[6,np.nan,np.nan],'C':[1,4,5]})
df = pd.DataFrame(d)
df
df.dropna() # For Rows
df.dropna(axis=1) # For Columns
df.dropna(thresh=2) # Remove those columns which has atleast 2 missing values
df['A'].fillna(value=df['A'].mean())
# Group BY
data = {'Company':['Goog','MSFT','ABINB','DMART','TCS','TCS'],
'Person':['Ra','Ma','Ta','Ka','Ga','Fa'],
'Sales': [200,1000,100001,232,445,567]}
df = pd.DataFrame(data)
df
r=df.groupby('Company')
r.mean()
r.sum()
r.std()
df.groupby('Company').count()
df.groupby('Company').max()
df.groupby('Company').describe()
# Merging and Concatenating
df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=[0, 1, 2, 3])
df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],
'B': ['B4', 'B5', 'B6', 'B7'],
'C': ['C4', 'C5', 'C6', 'C7'],
'D': ['D4', 'D5', 'D6', 'D7']},
index=[4, 5, 6, 7])
df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'],
'B': ['B8', 'B9', 'B10', 'B11'],
'C': ['C8', 'C9', 'C10', 'C11'],
'D': ['D8', 'D9', 'D10', 'D11']},
index=[8, 9, 10, 11])
# Similar to Rbind in R
pd.concat([df1,df2,df3])
# Similar to Cbind in R
pd.concat([df1,df2,df3],axis=1)
left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
pd.merge(left,right,how='inner',on='key')
pd.merge(left, right, how='outer', on=['key1', 'key2'])
pd.merge(left, right, how='right', on=['key1', 'key2'])
pd.merge(left, right, how='left', on=['key1', 'key2'])
# Data Input and Output
os.chdir("E:/PRESENTATION")
df = pd.readc_csv('train.csv',encoding='latin1')
df.to_csv('Output.csv')
df = pd.read_excel('Train and Test.xlsx',sheetname='Train')
df = pd.read_excel('Train and Test.xlsx',sheetname='Test')
df.to_excel('Output.xlsx',sheet_name='Sheet1')
df = pd.read_html('http://www.fdic.gov/bank/individual/failed/banklist.html')
| StarcoderdataPython |
11381221 | from django.conf import settings
from elasticsearch_dsl import DocType, Date, String, Long, Ip, Nested, \
Object, Index, MetaField, analyzer, FacetedSearch, Q, TermsFacet, \
InnerObjectWrapper, DateHistogramFacet, SF
class Client(InnerObjectWrapper):
pass
class DnsRecord(DocType):
domain = String(index='not_analyzed')
rtype = String(index='not_analyzed')
rdata = String(index='not_analyzed')
ttl = Long(index='not_analyzed')
timestamp = Date(index='not_analyzed')
client = Nested(
doc_class=Client,
properties={
'service_type': String(index='not_analyzed'),
'ip': Ip(index='not_analyzed')
}
)
# create an index and register the doc types
index = Index(settings.ES_INDEX)
index.settings(**settings.ES_INDEX_SETTINGS)
index.doc_type(DnsRecord)
| StarcoderdataPython |
1989911 | <reponame>data-science-misis/rec-sys
import pandas as pd
from data_provider import database, popular
MAX_RECOMMENDATION_COUNT = 100
def predict_popular(k=10):
n = min(k, MAX_RECOMMENDATION_COUNT)
return popular()[:n]
def get_user_ids():
return database()['user_id'].unique().tolist()
def predict_collaborative_filtering(user_id, k=10):
return pd.DataFrame()
| StarcoderdataPython |
1754492 | <filename>rllib/policy/q_function_policy/epsilon_greedy.py
"""Epsilon Greedy Policy."""
import torch
from rllib.util.neural_networks import get_batch_size
from .abstract_q_function_policy import AbstractQFunctionPolicy
class EpsGreedy(AbstractQFunctionPolicy):
"""Implementation of Epsilon Greedy Policy.
An epsilon greedy exploration strategy chooses the greedy strategy with probability
1-epsilon, and a random action with probability epsilon.
If eps_end and eps_decay are not set, then epsilon will be always eps_start.
If not, epsilon will decay exponentially at rate eps_decay from eps_start to
eps_end.
"""
@property
def epsilon(self):
"""Return epsilon."""
return self.param()
def forward(self, state):
"""See `AbstractQFunctionPolicy.forward'."""
batch_size = get_batch_size(state, self.dim_state)
aux_size = () if len(batch_size) == 0 else batch_size
# Epsilon part.
eps = torch.true_divide(self.epsilon, self.num_actions)
probabilities = eps * torch.ones(*aux_size, self.num_actions)
greedy = (1 - self.epsilon) * torch.ones(*aux_size, self.num_actions)
# Greedy part.
a = torch.argmax(self.q_function(state), dim=-1)
probabilities.scatter_add_(dim=-1, index=a.unsqueeze(-1), src=greedy)
if not batch_size:
probabilities = probabilities.squeeze(0)
return torch.log(probabilities)
| StarcoderdataPython |
4807195 | #!/usr/bin/python
from sys import argv, exit
from Bio import SeqIO
f = open(argv[1],'r')
records = SeqIO.parse(f,'fasta')
for r in records:
seq = str(r.seq).replace('-', 'N')
print(">%s\n%s\n"%(r.description, seq))
f.close()
| StarcoderdataPython |
9708075 |
DEBUG = True
TEMPLATE_DEBUG = True
SITE_ID = 1
DATABASES = {
'default': {
'NAME': 'db.sqlite',
'ENGINE': 'django.db.backends.sqlite3',
}
}
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django_email_changer',
)
ROOT_URLCONF = 'django_email_changer.urls'
SECRET_KEY = '<PASSWORD>_<PASSWORD>_<PASSWORD>'
USE_TZ = True
| StarcoderdataPython |
8037671 | # Generated by Django 3.2 on 2021-05-09 17:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0002_product_image'),
]
operations = [
migrations.RemoveField(
model_name='shippingaddress',
name='landmark',
),
]
| StarcoderdataPython |
3228636 | from kingfisher_scrapy.base_spider import SimpleSpider
from kingfisher_scrapy.util import components
class NicaraguaSolidWaste(SimpleSpider):
"""
Domain
Solid Waste Mitigation Platform (SWMP)
Spider arguments
from_date
Download only data from this date onward (YYYY-MM-DD format). Defaults to '2000-01-01'.
until_date
Download only data until this date (YYYY-MM-DD format). Defaults to today.
"""
name = 'nicaragua_solid_waste'
data_type = 'release_package'
default_from_date = '2000-01-01'
date_required = True
url = 'http://www.gekoware.com/swmp/api/ocds/{}/{}'
def start_requests(self):
url = self.url
# date parameter obtained
url = url.format(self.from_date.strftime("%Y%m%d"), self.until_date.strftime("%Y%m%d"))
# url looks like http://www.gekoware.com/swmp/api/ocds/20190101/20201005
yield self.build_request(url, formatter=components(-2))
| StarcoderdataPython |
1973311 | "Unit tests of larry.__getitem__"
from nose.tools import assert_raises, assert_equal
import numpy as np
from numpy.testing import assert_array_equal
nan = np.nan
from la import larry
from la.util.testing import assert_larry_equal as ale
def make_larrys():
a1 = np.array([[ 1.0, nan],
[ 3.0, 4.0],
[ 5.0, 6.0]])
lar1 = larry(a1)
a2 = np.array([ 0, 1, 2, 3])
lar2 = larry(a2)
return lar1, lar2
def test_getitem_01():
"larry.getitem #01"
desired = larry([3.0, 4.0])
lar1, lar2 = make_larrys()
actual = lar1[1]
ale(actual, desired)
def test_getitem_02():
"larry.getitem #02"
desired = larry([3.0, 4.0])
lar1, lar2 = make_larrys()
actual = lar1[1,:]
ale(actual, desired)
def test_getitem_03():
"larry.getitem #03"
desired = larry([3.0, 4.0])
lar1, lar2 = make_larrys()
actual = lar1[1,0:2]
ale(actual, desired)
def test_getitem_04():
"larry.getitem #04"
desired = np.array([4.0])[0]
lar1, lar2 = make_larrys()
actual = lar1[1,1]
ale(actual, desired)
def test_getitem05():
"larry.getitem #05"
label = [[1, 2], [0, 1]]
desired = larry([[3.0, 4.0],
[5.0, 6.0]],
label)
lar1, lar2 = make_larrys()
idx = np.array([False, True, True])
actual = lar1[idx,:]
ale(actual, desired, original=lar1)
def test_getitem_06():
"larry.getitem #06"
desired = larry([[1.0, nan],
[3.0, 4.0]])
lar1, lar2 = make_larrys()
actual = lar1[0:2,0:2]
ale(actual, desired)
def test_getitem_07():
"larry.getitem #07"
desired = larry([[ 3.0, 4.0],
[ 5.0, 6.0]], [[1, 2], [0, 1]])
lar1, lar2 = make_larrys()
actual = lar1[np.array([1, 2])]
ale(actual, desired, original=lar1)
def test_getitem_08():
"larry.getitem #08"
lar1, lar2 = make_larrys()
assert_raises(IndexError, lar1.__getitem__, 100)
def test_getitem_09():
"larry.getitem #09"
lar1, lar2 = make_larrys()
assert_raises(IndexError, lar1.__getitem__, 'a')
def test_getitem_10():
"larry.getitem #10"
desired = np.array([1])[0]
lar1, lar2 = make_larrys()
actual = lar2[1]
ale(actual, desired)
def test_getitem_11():
"larry.getitem #11"
desired = larry([0, 1])
lar1, lar2 = make_larrys()
actual = lar2[:2]
ale(actual, desired)
def test_getitem_12():
"larry.getitem #12"
label = [[0, 2, 1], [0, 1]]
desired = larry([[ 1.0, nan],
[ 5.0, 6.0],
[ 3.0, 4.0]],
label)
lar1, lar2 = make_larrys()
actual = lar1[[0, 2, 1]]
#ale(actual, desired, original=lar1) fails: axis 1 label is not a copy
ale(actual, desired)
def test_getitem_13():
"larry.getitem #13"
label = [[0, 2, 1], [0, 1]]
desired = larry([[ 1.0, nan],
[ 5.0, 6.0],
[ 3.0, 4.0]],
label)
lar1, lar2 = make_larrys()
actual = lar1[[0.99, 2.6, 1.78]]
#ale(actual, desired, original=lar1) fails: axis 1 label is not a copy
ale(actual, desired)
def test_getitem_14():
"larry.getitem #14"
label = [[1, 0], [0, 1]]
desired = larry([[ 3.0, 4.0],
[ 1.0, nan]],
label)
lar1, lar2 = make_larrys()
idx = [True, False]
actual = lar1[idx]
#ale(actual, desired, original=lar1) fails: axis 1 label is not a copy
ale(actual, desired)
def test_getitem_15():
"larry.getitem #15"
lar1, lar2 = make_larrys()
assert_raises(IndexError, lar1.__getitem__, [0,1,0])
def test_getitem_16():
"larry.getitem #16"
desired = larry([[ 1.0, nan]])
lar1, lar2 = make_larrys()
idx = np.array([True, False])
actual = lar1[idx,:]
ale(actual, desired, original=lar1)
def test_getitem_17():
"larry.getitem #17"
desired = larry([[ 1.0],
[ 3.0],
[ 5.0]])
lar1, lar2 = make_larrys()
idx = np.array([True, False])
actual = lar1[:, idx]
ale(actual, desired, original=lar1)
def test_getitem_18():
"larry.getitem #18"
desired = larry([3.0, 4.0])
lar1, lar2 = make_larrys()
actual = lar1[1.9]
ale(actual, desired)
def test_getitem_19():
"larry.getitem #19"
desired = np.array([4.0])[0]
lar1, lar2 = make_larrys()
actual = lar1[1.1, 1.1]
ale(actual, desired)
def test_getitem_20():
"larry.getitem #20"
desired = larry([[ 3.0, 4.0],
[ 5.0, 6.0]], [[1, 2], [0, 1]])
lar1, lar2 = make_larrys()
actual = lar1[np.array([1, 2]),:]
ale(actual, desired, original=lar1)
def test_getitem_21():
"larry.getitem #21"
desired = larry([ 3.0, 5.0], [[1, 2]])
lar1, lar2 = make_larrys()
actual = lar1[np.array([1, 2]), 0]
ale(actual, desired, original=lar1)
def test_getitem_22():
"larry.getitem #22"
lar = larry([None, None])
desired = None
actual = lar[0]
assert_equal(actual, desired, "Indexing object dtype failed.")
def test_getitem_23():
"larry.getitem #23"
a = np.empty(2, dtype=object)
a[0] = np.array([1, 2, 3])
a[1] = np.array([4, 5, 6])
lar = larry(a)
desired = np.array([4, 5, 6])
actual = lar[1]
err_msg = "Indexing 1d object dtype (array of arrays) failed."
assert_array_equal(actual, desired, err_msg=err_msg)
def test_getitem_24():
"larry.getitem #24"
a = np.empty((2,1), dtype=object)
a[0,0] = np.array([1, 2, 3])
a[1,0] = np.array([4, 5, 6])
lar = larry(a)
desired = np.array([4, 5, 6])
actual = lar[1,0]
err_msg = "Indexing 2d object dtype (array of arrays) failed."
assert_array_equal(actual, desired, err_msg=err_msg)
def test_getitem_25():
"larry.getitem #25"
desired = larry(np.ones((3,0)))
lar1, lar2 = make_larrys()
actual = lar1[:,1:1]
ale(actual, desired, original=lar1)
# ------------------------------------------------------------------------
# indexing with bool larrys
def test_getitem_26():
"larry.getitem #26"
lar1, lar2 = make_larrys()
idx1 = larry([True, False, True])
actual = lar1[idx1]
arr = lar1.x[idx1.x]
assert_array_equal(arr, actual.x)
idx2 = [0, 2]
desired = lar1[idx2]
ale(actual, desired, original=lar1)
def test_getitem_27():
"larry.getitem #27"
lar1, lar2 = make_larrys()
idx1 = larry([True, False])
actual = lar1[:,idx1]
arr = lar1.x[:,idx1.x]
assert_array_equal(arr, actual.x)
idx2 = [0]
desired = lar1[:,idx2]
ale(actual, desired, original=lar1)
def test_getitem_28():
"larry.getitem #28"
lar1, lar2 = make_larrys()
idx1 = larry([True, False, True, False])
actual = lar2[idx1]
arr = lar2.x[idx1.x]
assert_array_equal(arr, actual.x)
idx2 = [0, 2]
desired = lar2[idx2]
ale(actual, desired, original=lar2)
def test_getitem_29():
"larry.getitem #29"
lar1, lar2 = make_larrys()
idx = larry([True, False, True, False])
assert_raises(IndexError, lar1.__getitem__, idx[::-1])
assert_raises(IndexError, lar2.__getitem__, idx[::-1])
idx = larry([0, 1, 2, 3])
assert_raises(IndexError, lar1.__getitem__, idx)
assert_raises(IndexError, lar2.__getitem__, idx)
idx = larry([True, False])
assert_raises(IndexError, lar1.__getitem__, (slice(None), idx[::-1]))
def test_getitem_30():
"larry.getitem #30"
lar1, lar2 = make_larrys()
lar1.label[0] = ['a', 'b', 'c']
lar1.label[1] = ['A', 'B']
lar2.label[0] = ['a', 'b', 'c', 'd']
idx1 = larry([True, False, True], label=[['a', 'b', 'c']])
actual = lar1[idx1]
arr = lar1.x[idx1.x]
assert_array_equal(arr, actual.x)
idx2 = [0, 2]
desired = lar1[idx2]
ale(actual, desired, original=lar1)
def test_getitem_31():
"larry.getitem #31"
lar1, lar2 = make_larrys()
lar1.label[0] = ['a', 'b', 'c']
lar1.label[1] = ['A', 'B']
lar2.label[0] = ['a', 'b', 'c', 'd']
idx1 = larry([True, False], label=[['A', 'B']])
actual = lar1[:,idx1]
arr = lar1.x[:,idx1.x]
assert_array_equal(arr, actual.x)
idx2 = [0]
desired = lar1[:,idx2]
ale(actual, desired, original=lar1)
| StarcoderdataPython |
9669382 | # Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
"""Sigma Delta neuron."""
import torch
from . import base
from ..dendrite import Sigma
from ..axon import Delta
def neuron_params(device_params, scale=1 << 6):
"""Translates device parameters to neuron parameters.
Parameters
----------
device_params : dictionary
dictionary of device parameter specification.
scale : int
neuron scale value. Default value = 1 << 6.
Returns
-------
dictionary
dictionary of neuron parameters that can be used to initialize neuron
class.
"""
# p_scale = 1 << 12
return {
'threshold': device_params['vThMant'] / scale,
}
class Neuron(base.Neuron):
"""This is the implementation of Sigma-Delta wrapper neuron.
The internal state representations are scaled down compared to
the actual hardware implementation. This allows for a natural range of
synaptic weight values as well as the gradient parameters.
The neuron parameters like threshold, decays are represented as real
values. They internally get converted to fixed precision representation of
the hardware. It also provides properties to access the neuron
parameters in fixed precision states. The parameters are internally clamped
to the valid range.
Parameters
----------
threshold : float
neuron threshold.
activation: fx-ptr or lambda
The neuron activation class instance that needs to be wrapped
by sigma-delta unit. For e.g. ``torch.nn.functional.relu`` would
give sigma-delta-relu unit.
tau_grad : float, optional
time constant of spike function derivative. Defaults to 1.
scale_grad : float, optional
scale of spike function derivative. Defaults to 1.
scale : int, optional
scale of the internal state. `scale=1` will result in values in the
range expected from the of Loihi hardware. Defaults to 1 << 6.
cum_error : bool, optional
flag to enable/disable residual state of delta unit. Defaults to False.
norm : fx-ptr or lambda, optional
normalization function on the dendrite output. None means no
normalization. Defaults to None.
dropout : fx-ptr or lambda, optional
neuron dropout method. None means no normalization. Defaults to None.
shared_param : bool, optional
flag to enable/disable shared parameter neuron group. If it is
False, individual parameters are assigned on a per-channel basis.
Defaults to True.
persistent_state : bool, optional
flag to enable/disable persistent state between iterations.
Defaults to False.
requires_grad : bool, optional
flag to enable/disable learning on neuron parameter. Defaults to False.
"""
def __init__(
self, threshold, activation,
tau_grad=1, scale_grad=1,
scale=1 << 6, cum_error=False,
norm=None, dropout=None,
shared_param=True, persistent_state=False, requires_grad=False
):
""" """
super(Neuron, self).__init__(
threshold=threshold,
w_scale=scale,
s_scale=scale * (1 << 6),
norm=norm,
dropout=dropout,
persistent_state=persistent_state,
shared_param=shared_param,
requires_grad=requires_grad
)
self.graded_spike = True
self.activation = activation
self.sigma = Sigma(persistent_state=self.persistent_state)
self.delta = Delta(
threshold=self._threshold,
min_threshold=1 / self.w_scale,
# so that effective minimum quantized threshold will be 1
tau_grad=tau_grad,
scale_grad=scale_grad,
cum_error=cum_error,
shared_param=self.shared_param,
persistent_state=self.persistent_state,
requires_grad=self.requires_grad
)
self.bias_is_set = False
self.register_parameter(
'bias',
torch.nn.Parameter(
torch.FloatTensor([0]),
requires_grad=self.requires_grad
)
)
@property
def device(self):
"""The device memory (cpu/cuda) where the object lives."""
return self.delta.pre_state.device
# @property
# def shape(self):
# """The shape of the layer
# """
# assert self.sigma.shape == self.delta.shape, \
# f'The shape of sigma and delta do not match. '\
# f'Found {self.sigma.shape=} and {self.delta.shape=}.'
# return self.sigma.shape
@property
def threshold(self):
"""Neuron threshold"""
return self.delta.threshold
@property
def scale(self):
"""Scale difference between slayer representation and hardware
representation of the variable states."""
return self.w_scale
@property
def device_params(self):
"""Dictionary of device parameters."""
return {
'type': 'SDNN',
'activation': self.activation.__name__,
'vThMant': self.v_th_mant,
}
def set_bias(self, bias):
"""Sets the bias for sigma-delta unit
Parameters
----------
bias : torch tensor
bias corresponding to each neuron.
"""
if self.shape is None:
self.bias.data = bias.to(self.bias.device)
else:
self.bias.data = bias.reshape(self.shape).to(self.bias.device)
self.bias_is_set = True
def forward(self, input):
"""Computes the full response of the neuron instance to an input.
The input shape must match with the neuron shape. For the first time,
the neuron shape is determined from the input automatically.
Parameters
----------
input : torch tensor
Input tensor.
Returns
-------
torch tensor
graded spike response of the neuron.
"""
# dendrite computation
dend = self.sigma(input)
if self.norm is not None:
dend = self.norm(dend)
# neuron computation
if self.bias_is_set:
axon = self.delta(
self.activation(
dend + torch.unsqueeze(
torch.unsqueeze(self.bias, dim=0),
dim=-1
)
))
else:
axon = self.delta(self.activation(dend))
# axon = self.quantize_8bit(axon*2)/2
# axon computation
if self.drop is not None:
axon = self.drop(axon)
if self.shape is None:
if self.sigma.shape != self.delta.shape:
raise AssertionError(
f'The shape of sigma and delta do not match. '
f'Found {self.sigma.shape=} and {self.delta.shape=}.'
)
self.shape = self.sigma.shape
return axon
| StarcoderdataPython |
6529433 | from django.db import DatabaseError
from django.test import TestCase
from app.models import SmallInteger
class SmallIntegerTests(TestCase):
def setUp(self):
self.int0_id = SmallInteger.objects.create(small_integer=0).id
self.int1_id = SmallInteger.objects.create(small_integer=1111).id
def test_create_integer(self):
int0 = SmallInteger.objects.get(id=self.int0_id)
int1 = SmallInteger.objects.get(id=self.int1_id)
self.assertEqual(int0.small_integer, 0)
self.assertEqual(int1.small_integer, 1111)
self.assertLess(int0.small_integer, int1.small_integer)
self.assertGreater(int1.small_integer, int0.small_integer)
def test_extremal_values(self):
int_biggest = SmallInteger.objects.create(small_integer=18446744073709551615)
self.assertEqual(int_biggest.small_integer, 18446744073709551615)
int_smallest = SmallInteger.objects.create(small_integer=-9223372036854775808)
self.assertEqual(int_smallest.small_integer, -9223372036854775808)
self.assertLess(int_smallest.small_integer, int_biggest.small_integer)
with self.assertRaises(Exception):
SmallInteger.objects.create(small_integer=18446744073709551616)
with self.assertRaises(Exception):
SmallInteger.objects.create(small_integer=-9223372036854776840)
| StarcoderdataPython |
3399722 | <reponame>lochbrunner/ant-challenge
from dataclasses import dataclass
from typing import List, Optional
from abc import ABC, abstractmethod
from enum import Enum
class Semantic(Enum):
ALY = 1
ENEMY = 2
SUGAR = 3
OWN_HILL = 4
OTHERS_HILL = 5
@dataclass
class ViewRay:
distance: float
sematic: Semantic
@dataclass
class Smell:
strength: float # 0 to 1
ally_code: int # custom codes
enemy_code: int # custom codes
@dataclass
class Perception:
touch: bool # did the agent hit something
velocity: float # current velocity of ant
view: List[ViewRay] # fixed order of angles
smell: List[Smell] # unordered list of smells
class Activity(Enum):
NONE = 1
CARRY = 2
FIGHT = 3
@dataclass
class Action:
turn: float # angle
accelerate: float
activity: Activity
create_smell: Optional[int] # custom codes
class BaseAnt(ABC):
@abstractmethod
def think(self, perception: Perception) -> Action:
pass
| StarcoderdataPython |
3339938 | Version = "2.0"
if __name__ == "__main__":
print (Version)
| StarcoderdataPython |
9700291 | # Generated by Django 3.0.7 on 2020-08-07 03:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('syndication_app', '0005_auto_20200807_0011'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='author_messages', to='syndication_app.User')),
],
),
]
| StarcoderdataPython |
3287711 | <filename>app/urls.py
from django.urls import path
from .views import *
urlpatterns = [
path('', apioverview),
path('task-list/', tasklist),
path('task-detail/<int:pk>/', taskdetail),
path('task-create/', taskcreate),
path('task-update/<int:pk>/', taskupdate),
path('task-delete/<int:pk>/', taskdelete),
]
| StarcoderdataPython |
358403 | # Generated by Django 3.2.5 on 2021-07-21 22:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_user_staff'),
]
operations = [
migrations.CreateModel(
name='Survey',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('finished', models.BooleanField(default=False, verbose_name='finished')),
('link', models.URLField(max_length=1024, verbose_name='link_encuesta')),
('name', models.CharField(max_length=100, verbose_name='nombre_encuesta')),
('avg_duration', models.FloatField(default=5.0, verbose_name='duracion_promedio')),
('prize', models.CharField(default='', max_length=200, verbose_name='premio')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='fecha_de_creacion')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='fecha_ultima_actualizacion')),
],
),
]
| StarcoderdataPython |
23785 | <reponame>misantroop/jsonpickle
# -*- coding: utf-8 -*-
"""Test miscellaneous objects from the standard library"""
import uuid
import unittest
import jsonpickle
class UUIDTestCase(unittest.TestCase):
def test_random_uuid(self):
u = uuid.uuid4()
encoded = jsonpickle.encode(u)
decoded = jsonpickle.decode(encoded)
expect = u.hex
actual = decoded.hex
self.assertEqual(expect, actual)
def test_known_uuid(self):
expect = '28b56adbd18f44e2a5556bba2f23e6f6'
exemplar = uuid.UUID(expect)
encoded = jsonpickle.encode(exemplar)
decoded = jsonpickle.decode(encoded)
actual = decoded.hex
self.assertEqual(expect, actual)
class BytesTestCase(unittest.TestCase):
def test_bytestream(self):
expect = (b'\x89HDF\r\n\x1a\n\x00\x00\x00\x00\x00\x08\x08\x00'
b'\x04\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xffh'
b'\x848\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff'
b'\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00`\x00\x00'
b'\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00'
b'\x00\x88\x00\x00\x00\x00\x00\x00\x00\xa8\x02\x00'
b'\x00\x00\x00\x00\x00\x01\x00\x01\x00')
encoded = jsonpickle.encode(expect)
actual = jsonpickle.decode(encoded)
self.assertEqual(expect, actual)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(UUIDTestCase))
suite.addTest(unittest.makeSuite(BytesTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| StarcoderdataPython |
159073 | from aiida_firecrest.scheduler import FirecrestScheduler
from aiida_firecrest.transport import FirecrestTransport
def test_init_scheduler():
FirecrestScheduler()
def init_transport(firecrest_server):
transport = FirecrestTransport(
url=firecrest_server.url,
token_uri=firecrest_server.token_uri,
client_id=firecrest_server.client_id,
client_secret=firecrest_server.client_secret,
machine=firecrest_server.machine,
)
return transport
def test_init_transport(firecrest_server):
init_transport(firecrest_server)
def test_path_exists(firecrest_server):
transport = init_transport(firecrest_server)
assert transport.path_exists(firecrest_server.scratch_path)
assert not transport.path_exists(firecrest_server.scratch_path + "/file.txt")
def test_isdir(firecrest_server):
transport = init_transport(firecrest_server)
assert transport.isdir(firecrest_server.scratch_path)
assert not transport.isdir(firecrest_server.scratch_path + "/other")
def test_mkdir(firecrest_server):
transport = init_transport(firecrest_server)
transport.mkdir(firecrest_server.scratch_path + "/test")
assert transport.isdir(firecrest_server.scratch_path + "/test")
def test_putfile(firecrest_server, tmp_path):
transport = init_transport(firecrest_server)
assert not transport.isfile(firecrest_server.scratch_path + "/file.txt")
file_path = tmp_path.joinpath("file.txt")
file_path.write_text("test")
transport.putfile(str(file_path), firecrest_server.scratch_path + "/file.txt")
assert transport.isfile(firecrest_server.scratch_path + "/file.txt")
def test_listdir(firecrest_server):
transport = init_transport(firecrest_server)
assert transport.listdir(firecrest_server.scratch_path) == []
# TODO make file/folder then re-test
| StarcoderdataPython |
8077161 |
from . import fnoUtils as utils
def getDailyData(instrumentType, dailyData):
'''
Returns all Daily Data In Ascending date order after doing the following conversion:
Current in memory dailyData format:
{
date1-string : {
date: date1,
open: open1,
high: high1,
.
.
changeInOpenInterest : changeInOpenInterest1
},
date2-string : {
date: date2,
open: open2,
high: high2,
.
.
changeInOpenInterest : changeInOpenInterest2
},
.
.
.
}
Return dailyData Fromat:
{
date : [date1-string | date2-string | date3-string | ...], Ascending order ->
open : [open1 | open2 | open3 | ...],
.
.
.
chnageInOpenInterest : [chnageInOpenInterest1 | chnageInOpenInterest2 | chnageInOpenInterest3 | .....]
}
'''
#data = {}
returnData = {}
dailyDataFields = {}
if instrumentType == utils.instruments['stockOptions']:
dailyDataFields = utils.stkOptDailyDataFields
elif instrumentType == utils.instruments['stockFutures']:
dailyDataFields = utils.stkFutDailyDataFields
elif instrumentType == utils.instruments['indexOptions']:
dailyDataFields = utils.idxOptDailyDataFields
elif instrumentType == utils.instruments['indexFutures']:
dailyDataFields = utils.idxFutDailyDataFields
dtDateList = [utils.convertStringToDate(sDate) for sDate in dailyData.keys()]
# Sort the dtDates List
sortedDtDateList = sorted(dtDateList)
sortedStrDateList = [utils.convertDateToString(dtDate) for dtDate in sortedDtDateList]
# Construct the first row of the returnData {date: [sorted date list]}
returnData.update({dailyDataFields['date']: sortedStrDateList})
# Construct sorted date wise list of other daily data except date as it has already been constructed above
for field in dailyDataFields: # Loop through the names(keys) of all fields
l = []
# Leave out date field as we have already constructed its sorted list above
for strDate in sortedStrDateList: # Loop over the sorted date list
l.append(dailyData[strDate].getDailyDataInfo()[field])
returnData.update({field: l})
return returnData
def getDailyDataForAInterval(instrumentType, dailyData, startDate, endDate):
'''
startDate: datetime.date() object
endDate: datetime.date() object
Returns all Daily Data In Ascending date order after doing the following conversion:
Current in memory dailyData format:
{
date1-string : {
date: date1,
open: open1,
high: high1,
.
.
changeInOpenInterest : changeInOpenInterest1
},
date2-string : {
date: date2,
open: open2,
high: high2,
.
.
changeInOpenInterest : changeInOpenInterest2
},
.
.
.
}
Return dailyData Fromat:
{
date : [date1-string | date2-string | date3-string | ...], Ascending order ->
open : [open1 | open2 | open3 | ...],
.
.
.
chnageInOpenInterest : [chnageInOpenInterest1 | chnageInOpenInterest2 | chnageInOpenInterest3 | .....]
}
'''
returnData = {}
dailyDataFields = {}
if instrumentType == utils.instruments['stockOptions']:
dailyDataFields = utils.stkOptDailyDataFields
elif instrumentType == utils.instruments['stockFutures']:
dailyDataFields = utils.stkFutDailyDataFields
elif instrumentType == utils.instruments['indexOptions']:
dailyDataFields = utils.idxOptDailyDataFields
elif instrumentType == utils.instruments['indexFutures']:
dailyDataFields = utils.idxFutDailyDataFields
dtDateList = [utils.convertStringToDate(sDate) for sDate in dailyData.keys()]
selectedDtDates = []
# Select the dates in valid range
for dtDate in dtDateList:
if dtDate >= startDate and dtDate <= endDate:
selectedDtDates.append(dtDate)
# Sort the slected dtDates List
sortedDtDateList = sorted(selectedDtDates)
sortedStrDateList = [utils.convertDateToString(dtDate) for dtDate in sortedDtDateList]
# Construct the first row of the returnData {date: [sorted date list]}
returnData.update({dailyDataFields['date']: sortedStrDateList})
# Construct sorted date wise list of other daily data except date as it has already been constructed above
for field in dailyDataFields: # Loop through the names(keys) of all fields
l = []
# Leave out date field as we have already constructed its sorted list above
for strDate in sortedStrDateList: # Loop over the sorted date list
l.append(dailyData[strDate].getDailyDataInfo()[field])
returnData.update({field: l})
return returnData
| StarcoderdataPython |
12842139 | <filename>codes/scripts/generate_mod_LR_bic.py
import os
import sys
import cv2
import numpy as np
import torch
try:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from data.util import imresize_np
from utils import util
except ImportError:
pass
def generate_mod_LR_bic():
# set parameters
up_scale = 4
mod_scale = 4
# set data dir
sourcedir = '/mnt/yjchai/SR_data/Set5' #'/mnt/yjchai/SR_data/DIV2K_test_HR' #'/mnt/yjchai/SR_data/Flickr2K/Flickr2K_HR'
savedir = '/mnt/yjchai/SR_data/Set5_test' #'/mnt/yjchai/SR_data/DIV2K_test' #'/mnt/yjchai/SR_data/Flickr2K_train'
# set random seed
util.set_random_seed(0)
# load PCA matrix of enough kernel
print('load PCA matrix')
pca_matrix = torch.load('/media/sdc/yjchai/IKC/codes/pca_matrix.pth', map_location=lambda storage, loc: storage)
print('PCA matrix shape: {}'.format(pca_matrix.shape))
saveHRpath = os.path.join(savedir, 'HR', 'x' + str(mod_scale))
saveLRpath = os.path.join(savedir, 'LR', 'x' + str(up_scale))
saveBicpath = os.path.join(savedir, 'Bic', 'x' + str(up_scale))
saveLRblurpath = os.path.join(savedir, 'LRblur', 'x' + str(up_scale))
if not os.path.isdir(sourcedir):
print('Error: No source data found')
exit(0)
if not os.path.isdir(savedir):
os.mkdir(savedir)
if not os.path.isdir(os.path.join(savedir, 'HR')):
os.mkdir(os.path.join(savedir, 'HR'))
if not os.path.isdir(os.path.join(savedir, 'LR')):
os.mkdir(os.path.join(savedir, 'LR'))
if not os.path.isdir(os.path.join(savedir, 'Bic')):
os.mkdir(os.path.join(savedir, 'Bic'))
if not os.path.isdir(os.path.join(savedir, 'LRblur')):
os.mkdir(os.path.join(savedir, 'LRblur'))
if not os.path.isdir(saveHRpath):
os.mkdir(saveHRpath)
else:
print('It will cover ' + str(saveHRpath))
if not os.path.isdir(saveLRpath):
os.mkdir(saveLRpath)
else:
print('It will cover ' + str(saveLRpath))
if not os.path.isdir(saveBicpath):
os.mkdir(saveBicpath)
else:
print('It will cover ' + str(saveBicpath))
if not os.path.isdir(saveLRblurpath):
os.mkdir(saveLRblurpath)
else:
print('It will cover '+ str(saveLRblurpath))
filepaths = sorted([f for f in os.listdir(sourcedir) if f.endswith('.png')])
print(filepaths)
num_files = len(filepaths)
kernel_map_tensor = torch.zeros((num_files, 1, 10)) # each kernel map: 1*10
# prepare data with augementation
for i in range(num_files):
filename = filepaths[i]
print('No.{} -- Processing {}'.format(i, filename))
# read image
image = cv2.imread(os.path.join(sourcedir, filename))
width = int(np.floor(image.shape[1] / mod_scale))
height = int(np.floor(image.shape[0] / mod_scale))
# modcrop
if len(image.shape) == 3:
image_HR = image[0:mod_scale * height, 0:mod_scale * width, :]
else:
image_HR = image[0:mod_scale * height, 0:mod_scale * width]
# LR_blur, by random gaussian kernel
img_HR = util.img2tensor(image_HR)
C, H, W = img_HR.size()
# sig_list = [1.8, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0, 3.2]
sig = 2.6
prepro = util.SRMDPreprocessing(up_scale, pca_matrix, random=False, para_input=10, kernel=21, noise=False,
cuda=True, sig=sig, sig_min=0.2, sig_max=4.0, rate_iso=1.0, scaling=3,
rate_cln=0.2, noise_high=0.0) #random(sig_min, sig_max) | stable kernel(sig)
LR_img, ker_map = prepro(img_HR.view(1, C, H, W))
image_LR_blur = util.tensor2img(LR_img)
cv2.imwrite(os.path.join(saveLRblurpath, 'sig{}_'.format(str(sig)) + filename), image_LR_blur)
# LR
image_LR = imresize_np(image_HR, 1 / up_scale, True)
# bic
image_Bic = imresize_np(image_LR, up_scale, True)
cv2.imwrite(os.path.join(saveHRpath, filename), image_HR)
cv2.imwrite(os.path.join(saveLRpath, filename), image_LR)
cv2.imwrite(os.path.join(saveBicpath, filename), image_Bic)
kernel_map_tensor[i] = ker_map
# save dataset corresponding kernel maps
torch.save(kernel_map_tensor, './Set5_sig2.6_kermap.pth')
print("Image Blurring & Down smaple Done: X"+str(up_scale))
if __name__ == "__main__":
generate_mod_LR_bic()
| StarcoderdataPython |
5104158 | # -*- coding: utf-8 -*-
"""One-component GEMC through RaspaBaseWorkChain"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import click
from aiida.common import NotExistent
from aiida.engine import run, submit
from aiida.orm import Code, Dict
from aiida.plugins import WorkflowFactory
VLCCWorkChain = WorkflowFactory('matdis.vlcc') # pylint: disable=invalid-name
@click.command('cli')
@click.argument('codelabel')
def main(codelabel):
"""Run base workchain"""
# pylint: disable=no-member
try:
code = Code.get_from_string(codelabel)
except NotExistent:
print("The code '{}' does not exist".format(codelabel))
sys.exit(1)
print("Testing RASPA methane GEMC through RaspaBaseWorkChain ...")
# Constructing builder
builder = VLCCWorkChain.get_builder()
# Specifying the code
builder.raspa_base.raspa.code = code
builder.raspa_base.fixers = {
'fixer_001': ('aiida_raspa.utils', 'check_gemc_box')
}
builder.molecule = Str('xenon')
builder.parameters = Dict(
dict={
'raspa_init_cycles': 200, # Default: 1e3
'raspa_prod_cycles': 200, # Default: 1e4
'box_one_nmols': 100,
'box_two_nmols': 100,
'box_one_length': 30,
'box_two_length': 30,
'temperature_list':[170],
# 'T_min': 280,
# 'T_max': 300,
# 'dT': 10,
})
# Specifying the scheduler options
builder.raspa_base.raspa.metadata.options = {
"resources": {
"num_machines": 1,
"num_mpiprocs_per_machine": 1,
},
"max_wallclock_seconds": 1 * 30 * 60, # 30 min
"withmpi": False,
}
run(builder)
if __name__ == '__main__':
main() # pylint: disable=no-value-for-parameter
# EOF
| StarcoderdataPython |
9728202 | from batchglm.models.base import _Estimator_Base, _EstimatorStore_XArray_Base
from batchglm.models.base import _InputData_Base
from batchglm.models.base import _Model_Base, _Model_XArray_Base
from batchglm.models.base import _Simulator_Base
from batchglm.models.base import INPUT_DATA_PARAMS
import batchglm.data as data_utils
from batchglm.utils.linalg import groupwise_solve_lm
from batchglm.utils.numeric import weighted_mean, weighted_variance | StarcoderdataPython |
1683502 | """
This module contains the implementation of the Classes: ModelGenerationMushroomOnline, ModelGenerationMushroomOnlineDQN,
ModelGenerationMushroomOnlineAC, ModelGenerationMushroomOnlinePPO, ModelGenerationMushroomOnlineSAC,
ModelGenerationMushroomOnlineDDPG and ModelGenerationMushroomOnlineGPOMDP.
The Class ModelGenerationMushroomOnline inherits from the Class ModelGeneration.
The Classes ModelGenerationMushroomOnlineDQQ, ModelGenerationMushroomOnlineAC and ModelGenerationMushroomOnlineGPOMDP inherit
from the Class ModelGenerationMushroomOnline.
The Classes ModelGenerationMushroomOnlinePPO, ModelGenerationMushroomOnlineSAC and ModelGenerationMushroomOnlineDDPG inherit
from the Class ModelGenerationMushroomOnlineAC.
"""
import copy
import numpy as np
from abc import abstractmethod
import matplotlib.pyplot as plt
from mushroom_rl.utils.spaces import Discrete
from mushroom_rl.policy import EpsGreedy, GaussianTorchPolicy, BoltzmannTorchPolicy, StateStdGaussianPolicy
from mushroom_rl.policy import OrnsteinUhlenbeckPolicy
from mushroom_rl.utils.parameters import LinearParameter
from mushroom_rl.algorithms.value.dqn import DQN
from mushroom_rl.algorithms.actor_critic.deep_actor_critic import PPO, SAC, DDPG
from mushroom_rl.algorithms.policy_search import GPOMDP
from mushroom_rl.utils.replay_memory import ReplayMemory
from mushroom_rl.approximators.parametric import TorchApproximator
from mushroom_rl.approximators.parametric import LinearApproximator
from mushroom_rl.approximators.regressor import Regressor
from mushroom_rl.utils.optimizers import AdaptiveOptimizer
from mushroom_rl.core import Core
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from ARLO.block.block_output import BlockOutput
from ARLO.block.model_generation import ModelGeneration
from ARLO.hyperparameter.hyperparameter import Real, Integer, Categorical
class ModelGenerationMushroomOnline(ModelGeneration):
"""
This Class is used to contain all the common methods for the online model generation algorithms that are implemented in
MushroomRL.
"""
def __repr__(self):
return str(self.__class__.__name__)+'('+'eval_metric='+str(self.eval_metric)+', obj_name='+str(self.obj_name)\
+', seeder='+ str(self.seeder)+', local_prng='+ str(self.local_prng)+', model='+str(self.model)\
+', algo_params='+str(self.algo_params)+', log_mode='+str(self.log_mode)\
+', checkpoint_log_path='+str(self.checkpoint_log_path)+', verbosity='+str(self.verbosity)\
+', n_jobs='+str(self.n_jobs)+', job_type='+str(self.job_type)\
+', deterministic_output_policy='+str(self.deterministic_output_policy)\
+', works_on_online_rl='+str(self.works_on_online_rl)+', works_on_offline_rl='+str(self.works_on_offline_rl)\
+', works_on_box_action_space='+str(self.works_on_box_action_space)\
+', works_on_discrete_action_space='+str(self.works_on_discrete_action_space)\
+', works_on_box_observation_space='+str(self.works_on_box_observation_space)\
+', works_on_discrete_observation_space='+str(self.works_on_discrete_observation_space)\
+', pipeline_type='+str(self.pipeline_type)+', is_learn_successful='+str(self.is_learn_successful)\
+', is_parametrised='+str(self.is_parametrised)+', block_eval='+str(self.block_eval)\
+', algo_params_upon_instantiation='+str(self.algo_params_upon_instantiation)\
+', logger='+str(self.logger)+', fully_instantiated='+str(self.fully_instantiated)\
+', info_MDP='+str(self.info_MDP)+')'
def learn(self, train_data=None, env=None):
"""
Parameters
----------
train_data: This can be a dataset that will be used for training. It must be an object of a Class inheriting from Class
BaseDataSet.
The default is None.
env: This must be a simulator/environment. It must be an object of a Class inheriting from Class BaseEnvironment.
The default is None.
Returns
-------
res: This is an object of Class BlockOutput containing the learnt policy. If something went wrong in the execution of the
method the object of Class BlockOutput is empty.
This method alternates between learning the RL algorithm and evaluating it.
"""
#resets is_learn_successful to False, checks pipeline_type, checks the types of train_data and env, and makes sure that
#they are not both None and selects the right inputs:
starting_train_data_and_env = super().learn(train_data=train_data, env=env)
#if super().learn() returned something that is of Class BlockOutput it means that up in the chain there was an error and
#i need to return here the empty object of Class BlockOutput
if(isinstance(starting_train_data_and_env, BlockOutput)):
return BlockOutput(obj_name=self.obj_name)
#since this is an online block we only have an environment, which is the second element of the list
#starting_train_data_and_env
starting_env = starting_train_data_and_env[1]
#if i have a method called _default_network() it means I am using a PyTorch network. This is ok: MushroomRL does not allow
#the use of other deep learning frameworks so there are not going to be issues:
if(hasattr(self, '_default_network')):
#sets torch number of threads
torch.set_num_threads(self.n_jobs)
#create core object with starting_env:
self._create_core(env=starting_env)
self.dict_of_evals = {}
#if the algorithm has a replay buffer i fill it randomly:
if('initial_replay_size' in list(self.algo_params.keys())):
#fill replay memory with random dataset
self.core.learn(n_steps=self.algo_params['initial_replay_size'].current_actual_value,
n_steps_per_fit=self.algo_params['initial_replay_size'].current_actual_value,
quiet=True)
#evaluation step:
res = BlockOutput(obj_name=str(self.obj_name)+'_result', log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity,
policy=self.construct_policy(policy=self.algo_object.policy, regressor_type=self.regressor_type))
if(self.deterministic_output_policy):
#If this method is called then in the metric DiscountedReward you can use batch_eval
res.make_policy_deterministic()
starting_eval = self.eval_metric.evaluate(block_res=res, env=starting_env)
#update dict_of_evals:
self.update_dict_of_evals(current_epoch=0, single_episodes_eval=self.eval_metric.single_episode_evaluations,
env=starting_env)
self.logger.info(msg='Starting evaluation: '+str(starting_eval))
for n_epoch in range(self.algo_params['n_epochs'].current_actual_value):
self.logger.info(msg='Epoch: '+str(n_epoch))
#learning step:
self.core.learn(n_steps=self.algo_params['n_steps'].current_actual_value,
n_steps_per_fit=self.algo_params['n_steps_per_fit'].current_actual_value,
n_episodes=self.algo_params['n_episodes'].current_actual_value,
n_episodes_per_fit=self.algo_params['n_episodes_per_fit'].current_actual_value,
quiet=True)
#evaluation step:
res = BlockOutput(obj_name=str(self.obj_name)+'_result', log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity,
policy=self.construct_policy(policy=self.algo_object.policy, regressor_type=self.regressor_type))
if(self.deterministic_output_policy):
#If this method is called then in the metric DiscountedReward you can use batch_eval
res.make_policy_deterministic()
tmp_eval = self.eval_metric.evaluate(block_res=res, env=starting_env)
self.logger.info(msg='Current evaluation: '+str(tmp_eval))
#update dict_of_evals
self.update_dict_of_evals(current_epoch=n_epoch+1, single_episodes_eval=self.eval_metric.single_episode_evaluations,
env=starting_env)
self.is_learn_successful = True
self.logger.info(msg='\''+str(self.__class__.__name__)+'\' object learnt successfully!')
return res
def plot_dict_of_evals(self):
"""
This method plots and saves the dict_of_evals of the block.
"""
x = np.array(list(self.dict_of_evals.keys()))
if(len(x) == 0):
exc_msg = 'The \'dict_of_evals\' is empty!'
self.logger.exception(msg=exc_msg)
raise ValueError(exc_msg)
evals_values = list(self.dict_of_evals.values())
y = np.array([np.mean(evals_values[i]) for i in range(len(evals_values))])
std_dev = np.array([np.std(evals_values[i]) for i in range(len(evals_values))])
plt.figure()
plt.xlabel('Environment Steps')
plt.ylabel('Average Discounted Reward')
plt.title('Average Discounted Reward and Standard Deviation for '+str(self.obj_name))
plt.grid(True)
plt.plot(x, y, color='#FF9860')
if(len(evals_values[0]) > 1):
plt.fill_between(x, y-std_dev, y+std_dev, alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9860')
plt.show()
def update_dict_of_evals(self, current_epoch, single_episodes_eval, env):
"""
Parameters
----------
current_epoch: This is a non-negative integer and it represents the current epoch.
single_episodes_eval: This is a list of floats containing the evaluation of the agent over the single episodes, for as
many episodes as specified by the eval_metric.
env: This is the environment in which we are acting. It must be an object of a Class inheriting from the Class
BaseEnvironmnet.
This method updates the dict_of_evals.
"""
number_of_steps = self.algo_params['n_steps'].current_actual_value
if(number_of_steps is None):
number_of_steps = env.horizon*self.algo_params['n_episodes'].current_actual_value
new_dict = {current_epoch*number_of_steps: single_episodes_eval}
if(len(list(self.dict_of_evals.keys())) == 0):
self.dict_of_evals = new_dict
else:
self.dict_of_evals = {**self.dict_of_evals, **new_dict}
def _create_core(self, env):
"""
Parameters
---------
env: This is the environment in which we are acting. It must be an object of a Class inheriting from the Class
BaseEnvironmnet.
This method updates the value of the core member by creating an object of Class mushroom_rl.core.Core.
"""
self.core = Core(agent=self.algo_object, mdp=env)
def analyse(self):
"""
This method is not yet implemented.
"""
raise NotImplementedError
def save(self):
"""
This method saves to a pickle file the object. Before saving it the core and the algo_object are cleared since these two
can weigh quite a bit.
"""
#clean up the core and algo_object: these two, in algorithms that have ReplayMemory, are going to make the output file,
#created when calling the method save, be very heavy.
#I need to clean these in a deep copy: otherwise erasing algo_object I cannot call twice in a row the learn method
#because the algo_object is set in the method set_params
copy_to_save = copy.deepcopy(self)
copy_to_save.core = None
copy_to_save.algo_object = None
#calls method save() implemented in base Class ModelGeneration of the instance copy_to_save
super(ModelGenerationMushroomOnline, copy_to_save).save()
class ModelGenerationMushroomOnlineDQN(ModelGenerationMushroomOnline):
"""
This Class implements a specific online model generation algorithm: DQN. This Class wraps the DQN method implemented in
MushroomRL.
cf. https://github.com/MushroomRL/mushroom-rl/blob/dev/mushroom_rl/algorithms/value/dqn/dqn.py
This Class inherits from the Class ModelGenerationMushroomOnline.
"""
def __init__(self, eval_metric, obj_name, regressor_type='q_regressor', seeder=2, algo_params=None, log_mode='console',
checkpoint_log_path=None, verbosity=3, n_jobs=1, job_type='process', deterministic_output_policy=True):
"""
Parameters
----------
algo_params: This is either None or a dictionary containing all the needed parameters.
The default is None.
If None then the following parameters will be used:
'epsilon': LinearParameter(value=1, threshold_value=0.01, n=1000000)
'policy': EpsGreedy(epsilon=LinearParameter(value=1, threshold_value=0.01, n=1000000)),
'approximator': TorchApproximator,
'network': one hidden layer, 16 neurons,
'input_shape': self.info_MDP.observation_space.shape,
'n_actions': self.info_MDP.action_space.n,
'output_shape': (self.info_MDP.action_space.n,),
'optimizer': Adam,
'lr': 0.0001,
'critic_loss': smooth_l1_loss,
'batch_size': 32,
'target_update_frequency': 250,
'replay_memory': ReplayMemory,
'initial_replay_size': 50000,
'max_replay_size': 1000000,
'clip_reward': False,
'n_epochs': 10,
'n_steps': None,
'n_steps_per_fit': None,
'n_episodes': 500,
'n_episodes_per_fit': 50,
regressor_type: This is a string and it can either be: 'action_regressor', 'q_regressor' or 'generic_regressor'. This is
used to pick one of the 3 possible kind of regressor made available by MushroomRL.
Note that if you want to use a 'q_regressor' then the picked regressor must be able to perform
multi-target regression, as a single regressor is used for all actions.
The default is 'q_regressor'.
deterministic_output_policy: If this is True then the output policy will be rendered deterministic else if False nothing
will be done. Note that the policy is made deterministic only at the end of the learn()
method.
Non-Parameters Members
----------------------
fully_instantiated: This is True if the block is fully instantiated, False otherwise. It is mainly used to make sure that
when we call the learn method the model generation blocks have been fully instantiated as they
undergo two stage initialisation being info_MDP unknown at the beginning of the pipeline.
info_MDP: This is a dictionary compliant with the parameters needed in input to all MushroomRL model generation
algorithms. It containts the observation space, the action space, the MDP horizon and the MDP gamma.
algo_object: This is the object containing the actual model generation algorithm.
algo_params_upon_instantiation: This a copy of the original value of algo_params, namely the value of
algo_params that the object got upon creation. This is needed for re-loading
objects.
model: This is used in set_params in the generic Class ModelGenerationMushroomOnline. With this member we avoid
re-writing for each Class inheriting from the Class ModelGenerationMushroomOnline the set_params method.
In this Class this member equals to DQN, which is the Class of MushroomRL implementing DQN.
core: This is used to contain the Core object of MushroomRL needed to run online RL algorithms.
The other parameters and non-parameters members are described in the Class Block.
"""
super().__init__(eval_metric=eval_metric, obj_name=obj_name, seeder=seeder, log_mode=log_mode,
checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs, job_type=job_type)
self.works_on_online_rl = True
self.works_on_offline_rl = False
self.works_on_box_action_space = False
self.works_on_discrete_action_space = True
self.works_on_box_observation_space = True
self.works_on_discrete_observation_space = True
self.regressor_type = regressor_type
#this block has parameters and I may want to tune them:
self.is_parametrised = True
self.algo_params = algo_params
self.deterministic_output_policy = deterministic_output_policy
self.fully_instantiated = False
self.info_MDP = None
self.algo_object = None
self.algo_params_upon_instantiation = copy.deepcopy(self.algo_params)
self.model = DQN
self.core = None
#seeds torch
torch.manual_seed(self.seeder)
torch.cuda.manual_seed(self.seeder)
#this seeding is needed for the policy of MushroomRL. Indeed the evaluation at the start of the learn method is done
#using the policy and in the method draw_action, np.random is called!
np.random.seed(self.seeder)
def _default_network(self):
"""
This method creates a default Network with 1 hidden layer and ReLU activation functions.
Returns
-------
Network: the Class wrapper representing the default network.
"""
class Network(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super().__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self.hl0 = nn.Linear(n_input, 16)
self.hl1 = nn.Linear(16, 16)
self.hl2 = nn.Linear(16, n_output)
nn.init.xavier_uniform_(self.hl0.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl1.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl2.weight, gain=nn.init.calculate_gain('relu'))
def forward(self, state, action=None):
h = F.relu(self.hl0(state.float()))
h = F.relu(self.hl1(h))
q = self.hl2(h)
if action is None:
return q
else:
q_acted = torch.squeeze(q.gather(1, action.long()))
return q_acted
return Network
def full_block_instantiation(self, info_MDP):
"""
Parameters
----------
info_MDP: This is an object of Class mushroom_rl.environment.MDPInfo. It contains the action and observation spaces,
gamma and the horizon of the MDP.
Returns
-------
This method returns True if the algo_params were set successfully, and False otherwise.
"""
self.info_MDP = info_MDP
if(self.algo_params is None):
approximator = Categorical(hp_name='approximator', obj_name='approximator_'+str(self.model.__name__),
current_actual_value=TorchApproximator)
network = Categorical(hp_name='network', obj_name='network_'+str(self.model.__name__),
current_actual_value=self._default_network())
optimizer_class = Categorical(hp_name='class', obj_name='optimizer_class_'+str(self.model.__name__),
current_actual_value=optim.Adam)
lr = Real(hp_name='lr', obj_name='optimizer_lr_'+str(self.model.__name__),
current_actual_value=0.0001, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
critic_loss = Categorical(hp_name='critic_loss', obj_name='critic_loss_'+str(self.model.__name__),
current_actual_value=F.smooth_l1_loss)
batch_size = Integer(hp_name='batch_size', obj_name='batch_size_'+str(self.model.__name__),
current_actual_value=32, range_of_values=[16, 128], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
target_update_frequency = Integer(hp_name='target_update_frequency', current_actual_value=250,
range_of_values=[100,1000], to_mutate=True,
obj_name='target_update_frequency_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
initial_replay_size = Integer(hp_name='initial_replay_size', current_actual_value=50000,
range_of_values=[10000, 100000],
obj_name='initial_replay_size_'+str(self.model.__name__),
to_mutate=True, seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
max_replay_size = Integer(hp_name='max_replay_size', current_actual_value=1000000, range_of_values=[10000, 1000000],
obj_name='max_replay_size_'+str(self.model.__name__), to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
replay_memory = Categorical(hp_name='replay_memory', obj_name='replay_memory_'+str(self.model.__name__),
current_actual_value=ReplayMemory(initial_size=initial_replay_size.current_actual_value,
max_size=max_replay_size.current_actual_value))
clip_reward = Categorical(hp_name='clip_reward', obj_name='clip_reward_'+str(self.model.__name__),
current_actual_value=False, possible_values=[True, False], to_mutate=True,
seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_epochs = Integer(hp_name='n_epochs', current_actual_value=10, range_of_values=[1,50], to_mutate=True,
obj_name='n_epochs_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps = Integer(hp_name='n_steps', current_actual_value=None, to_mutate=False,
obj_name='n_steps_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps_per_fit = Integer(hp_name='n_steps_per_fit', current_actual_value=None,
to_mutate=False, obj_name='n_steps_per_fit_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes = Integer(hp_name='n_episodes', current_actual_value=500, range_of_values=[10,1000], to_mutate=True,
obj_name='n_episodes_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_episodes_per_fit = Integer(hp_name='n_episodes_per_fit', current_actual_value=50, range_of_values=[1,100],
to_mutate=True, obj_name='n_episodes_per_fit_'+str(self.model.__name__),
seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
epsilon = Categorical(hp_name='epsilon', obj_name='epsilon_'+str(self.model.__name__),
current_actual_value=LinearParameter(value=1, threshold_value=0.01, n=1000000))
dict_of_params = {'approximator': approximator,
'network': network,
'class': optimizer_class,
'lr': lr,
'loss': critic_loss,
'batch_size': batch_size,
'target_update_frequency': target_update_frequency,
'replay_memory': replay_memory,
'initial_replay_size': initial_replay_size,
'max_replay_size': max_replay_size,
'clip_reward': clip_reward,
'n_epochs': n_epochs,
'n_steps': n_steps,
'n_steps_per_fit': n_steps_per_fit,
'n_episodes': n_episodes,
'n_episodes_per_fit': n_episodes_per_fit,
'epsilon': epsilon
}
self.algo_params = dict_of_params
is_set_param_success = self.set_params(new_params=self.algo_params)
if(not is_set_param_success):
err_msg = 'There was an error setting the parameters of a'+'\''+str(self.__class__.__name__)+'\' object!'
self.logger.error(msg=err_msg)
self.fully_instantiated = False
self.is_learn_successful = False
return False
self.logger.info(msg='\''+str(self.__class__.__name__)+'\' object fully instantiated!')
self.fully_instantiated = True
return True
def set_params(self, new_params):
"""
Parameters
----------
new_params: The new parameters to be used in the specific model generation algorithm. It must be a dictionary that does
not contain any dictionaries(i.e: all parameters must be at the same level).
We need to create the dictionary in the right form for MushroomRL. Then it needs to update self.algo_params.
Then it needs to update the object self.algo_object: to this we need to pass the actual values and not
the Hyperparameter objects.
We call _select_current_actual_value_from_hp_classes: to this method we need to pass the dictionary already
in its final form.
Returns
-------
bool: This method returns True if new_params is set correctly, and False otherwise.
"""
if(new_params is not None):
mdp_info = Categorical(hp_name='mdp_info', obj_name='mdp_info_'+str(self.model.__name__),
current_actual_value=self.info_MDP)
input_shape = Categorical(hp_name='input_shape', obj_name='input_shape_'+str(self.model.__name__),
current_actual_value=self.info_MDP.observation_space.shape)
if(self.regressor_type == 'action_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=(1,))
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.n)
elif(self.regressor_type == 'q_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=(self.info_MDP.action_space.n,))
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.n)
elif(self.regressor_type == 'generic_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.shape)
#to have a generic regressor I must not specify n_actions
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=None)
policy = Categorical(hp_name='policy', obj_name='policy_'+str(self.model.__name__),
current_actual_value=EpsGreedy(new_params['epsilon'].current_actual_value))
tmp_structured_algo_params = {'mdp_info': mdp_info,
'policy': policy,
'approximator_params': {'input_shape': input_shape,
'n_actions': n_actions,
'output_shape': output_shape,
'optimizer': {'class': None, 'params': {'lr': None}},
}
}
for tmp_key in list(new_params.keys()):
#i do not want to change mdp_info or policy
if(tmp_key in ['approximator', 'batch_size', 'target_update_frequency', 'replay_memory',
'initial_replay_size', 'max_replay_size', 'clip_reward']):
tmp_structured_algo_params.update({tmp_key: new_params[tmp_key]})
if(tmp_key in ['network', 'loss']):
tmp_structured_algo_params['approximator_params'].update({tmp_key: new_params[tmp_key]})
if(tmp_key in ['class']):
tmp_structured_algo_params['approximator_params']['optimizer'].update({tmp_key: new_params[tmp_key]})
if(tmp_key in ['lr']):
new_dict_to_add = {tmp_key: new_params[tmp_key]}
tmp_structured_algo_params['approximator_params']['optimizer']['params'].update(new_dict_to_add)
structured_dict_of_values = self._select_current_actual_value_from_hp_classes(params_structured_dict=
tmp_structured_algo_params)
#i need to un-pack structured_dict_of_values for DQN
self.algo_object = DQN(**structured_dict_of_values)
final_dict_of_params = tmp_structured_algo_params
#add n_epochs, n_steps, n_steps_per_fit, n_episodes, n_episodes_per_fit:
dict_to_add = {'n_epochs': new_params['n_epochs'],
'n_steps': new_params['n_steps'],
'n_steps_per_fit': new_params['n_steps_per_fit'],
'n_episodes': new_params['n_episodes'],
'n_episodes_per_fit': new_params['n_episodes_per_fit'],
'epsilon': new_params['epsilon']
}
final_dict_of_params = {**final_dict_of_params, **dict_to_add}
self.algo_params = final_dict_of_params
tmp_new_params = self.get_params()
if(tmp_new_params is not None):
self.algo_params_upon_instantiation = copy.deepcopy(tmp_new_params)
else:
self.logger.error(msg='There was an error getting the parameters!')
return False
return True
else:
self.logger.error(msg='Cannot set parameters: \'new_params\' is \'None\'!')
return False
class ModelGenerationMushroomOnlineAC(ModelGenerationMushroomOnline):
"""
This Class is used as base Class for actor critic methods implemented in MushroomRL. Specifically is used to contain some
common methods that would have the same implementation across different actor critic methods.
This Class inherits from the Class ModelGenerationMushroomOnline.
"""
@abstractmethod
def model_specific_set_params(self, new_params, mdp_info, input_shape, output_shape, n_actions):
raise NotImplementedError
def set_params(self, new_params):
"""
Parameters
----------
new_params: The new parameters to be used in the specific model generation algorithm. It must be a dictionary that does
not contain any dictionaries(i.e: all parameters must be at the same level).
We need to create the dictionary in the right form for MushroomRL. Then it needs to update self.algo_params.
Then it needs to update the object self.algo_object: to this we need to pass the actual values and not
the Hyperparameter objects.
We call _select_current_actual_value_from_hp_classes: to this method we need to pass the dictionary already in
its final form.
Returns
-------
bool: This method returns True if new_params is set correctly, and False otherwise.
"""
if(new_params is not None):
mdp_info = Categorical(hp_name='mdp_info', obj_name='mdp_info_'+str(self.model.__name__),
current_actual_value=self.info_MDP)
input_shape = Categorical(hp_name='input_shape', obj_name='input_shape_'+str(self.model.__name__),
current_actual_value=self.info_MDP.observation_space.shape)
if(self.regressor_type == 'action_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=(1,))
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.n)
elif(self.regressor_type == 'q_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=(self.info_MDP.action_space.n,))
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.n)
elif(self.regressor_type == 'generic_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.shape)
#to have a generic regressor I must not specify n_actions
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=None)
tmp_structured_algo_params, dict_to_add = self.model_specific_set_params(new_params=new_params, mdp_info=mdp_info,
input_shape=input_shape,
output_shape=output_shape,
n_actions=n_actions)
final_dict_of_params = {**tmp_structured_algo_params, **dict_to_add}
self.algo_params = final_dict_of_params
tmp_new_params = self.get_params()
if(tmp_new_params is not None):
self.algo_params_upon_instantiation = copy.deepcopy(tmp_new_params)
else:
self.logger.error(msg='There was an error getting the parameters!')
return False
return True
else:
self.logger.error(msg='Cannot set parameters: \'new_params\' is \'None\'!')
return False
class ModelGenerationMushroomOnlinePPO(ModelGenerationMushroomOnlineAC):
"""
This Class implements a specific online model generation algorithm: PPO. This Class wraps the PPO method
implemented in MushroomRL.
cf. https://github.com/MushroomRL/mushroom-rl/blob/dev/mushroom_rl/algorithms/actor_critic/deep_actor_critic/ppo.py
This Class inherits from the Class ModelGenerationMushroomOnlineAC.
"""
def __init__(self, eval_metric, obj_name, regressor_type='generic_regressor', seeder=2, algo_params=None, log_mode='console',
checkpoint_log_path=None, verbosity=3, n_jobs=1, job_type='process', deterministic_output_policy=True):
"""
Parameters
----------
algo_params: This is either None or a dictionary containing all the needed parameters.
The default is None.
If None then the following parameters will be used:
'policy': either BoltzmannTorchPolicy(beta=0.001) or GaussianTorchPolicy(std_0=1),
'network': one hidden layer, 16 neurons,
'input_shape': self.info_MDP.observation_space.shape,
'n_actions': None,
'output_shape': self.info_MDP.action_space.shape,
'actor_class': Adam,
'actor_lr': 3e-4,
'critic_class': Adam,
'critic_lr': 3e-4,
'loss': F.mse_loss,
'n_epochs_policy': 10,
'batch_size': 64,
'eps_ppo': 0.2,
'lam': 0.95,
'ent_coeff': 0,
'n_epochs': 10,
'n_steps': None,
'n_steps_per_fit': None,
'n_episodes': 500,
'n_episodes_per_fit': 50
regressor_type: This is a string and it can either be: 'action_regressor', 'q_regressor' or 'generic_regressor'. This is
used to pick one of the 3 possible kind of regressor made available by MushroomRL.
Note that if you want to use a 'q_regressor' then the picked regressor must be able to perform
multi-target regression, as a single regressor is used for all actions.
The default is 'generic_regressor'.
deterministic_output_policy: If this is True then the output policy will be rendered deterministic else if False nothing
will be done. Note that the policy is made deterministic only at the end of the learn()
method.
Non-Parameters Members
----------------------
fully_instantiated: This is True if the block is fully instantiated, False otherwise. It is mainly used to make sure that
when we call the learn method the model generation blocks have been fully instantiated as they
undergo two stage initialisation being info_MDP unknown at the beginning of the pipeline.
info_MDP: This is a dictionary compliant with the parameters needed in input to all MushroomRL model generation
algorithms. It containts the observation space, the action space, the MDP horizon and the MDP gamma.
algo_object: This is the object containing the actual model generation algorithm.
algo_params_upon_instantiation: This a copy of the original value of algo_params, namely the value of
algo_params that the object got upon creation. This is needed for re-loading
objects.
model: This is used in set_params in the generic Class ModelGenerationMushroomOnline. With this member we avoid
re-writing for each Class inheriting from the Class ModelGenerationMushroomOnline the set_params method.
In this Class this member equals to PPO, which is the Class of MushroomRL implementing PPO.
core: This is used to contain the Core object of MushroomRL needed to run online RL algorithms.
The other parameters and non-parameters members are described in the Class Block.
"""
super().__init__(eval_metric=eval_metric, obj_name=obj_name, seeder=seeder, log_mode=log_mode,
checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs, job_type=job_type)
self.works_on_online_rl = True
self.works_on_offline_rl = False
self.works_on_box_action_space = True
self.works_on_discrete_action_space = True
self.works_on_box_observation_space = True
self.works_on_discrete_observation_space = True
self.regressor_type = regressor_type
#this block has parameters and I may want to tune them:
self.is_parametrised = True
self.algo_params = algo_params
self.deterministic_output_policy = deterministic_output_policy
self.fully_instantiated = False
self.info_MDP = None
self.algo_object = None
self.algo_params_upon_instantiation = copy.deepcopy(self.algo_params)
self.model = PPO
self.core = None
#seeds torch
torch.manual_seed(self.seeder)
torch.cuda.manual_seed(self.seeder)
if torch.cuda.is_available():
self.can_use_cuda = True
else:
self.can_use_cuda = False
#this seeding is needed for the policy of MushroomRL. Indeed the evaluation at the start of the learn method is done
#using the policy and in the method draw_action, np.random is called!
np.random.seed(self.seeder)
def _default_network(self):
"""
This method creates a default Network with 1 hidden layer and ReLU activation functions.
Returns
-------
Network: the Class wrapper representing the default network.
"""
class Network(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super(Network, self).__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self.hl0 = nn.Linear(n_input, 16)
self.hl1 = nn.Linear(16, 16)
self.hl2 = nn.Linear(16, n_output)
nn.init.xavier_uniform_(self.hl0.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl1.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl2.weight, gain=nn.init.calculate_gain('relu'))
def forward(self, state, **kwargs):
h = F.relu(self.hl0(state.float()))
h = F.relu(self.hl1(h))
return self.hl2(h)
return Network
def full_block_instantiation(self, info_MDP):
"""
Parameters
----------
info_MDP: This is an object of Class mushroom_rl.environment.MDPInfo. It contains the action and observation spaces,
gamma and the horizon of the MDP.
Returns
-------
This method returns True if the algo_params were set successfully, and False otherwise.
"""
self.info_MDP = info_MDP
if(self.algo_params is None):
network = Categorical(hp_name='network', obj_name='network_'+str(self.model.__name__),
current_actual_value=self._default_network())
actor_class = Categorical(hp_name='actor_class', obj_name='actor_class_'+str(self.model.__name__),
current_actual_value=optim.Adam)
actor_lr = Real(hp_name='actor_lr', obj_name='actor_lr_'+str(self.model.__name__),
current_actual_value=3e-4, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
critic_class = Categorical(hp_name='critic_class', obj_name='critic_class_'+str(self.model.__name__),
current_actual_value=optim.Adam)
critic_lr = Real(hp_name='critic_lr', obj_name='critic_lr_'+str(self.model.__name__),
current_actual_value=3e-4, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
loss = Categorical(hp_name='loss', obj_name='loss_'+str(self.model.__name__),
current_actual_value=F.mse_loss)
n_epochs_policy = Integer(hp_name='n_epochs_policy', obj_name='n_epochs_policy_'+str(self.model.__name__),
current_actual_value=10, range_of_values=[1, 100], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
batch_size = Integer(hp_name='batch_size', obj_name='batch_size_'+str(self.model.__name__),
current_actual_value=64, range_of_values=[8, 64], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
eps_ppo = Real(hp_name='eps_ppo', obj_name='eps_ppo_'+str(self.model.__name__), current_actual_value=0.2,
range_of_values=[0.08,0.35], to_mutate=True, seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
lam = Real(hp_name='lam', obj_name='lam_'+str(self.model.__name__), current_actual_value=0.95,
range_of_values=[0.85, 0.99], to_mutate=True, seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
ent_coeff = Real(hp_name='ent_coeff', obj_name='ent_coeff_'+str(self.model.__name__), current_actual_value=0,
range_of_values=[0, 0.02], to_mutate=True, seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_epochs = Integer(hp_name='n_epochs', current_actual_value=10, range_of_values=[1,50], to_mutate=True,
obj_name='n_epochs_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps = Integer(hp_name='n_steps', current_actual_value=None, to_mutate=False,
obj_name='n_steps_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps_per_fit = Integer(hp_name='n_steps_per_fit', current_actual_value=None, to_mutate=False,
obj_name='n_steps_per_fit_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes = Integer(hp_name='n_episodes', current_actual_value=500, range_of_values=[10,1000], to_mutate=True,
obj_name='n_episodes_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes_per_fit = Integer(hp_name='n_episodes_per_fit', current_actual_value=50, range_of_values=[1,1000],
to_mutate=True, obj_name='n_episodes_per_fit_'+str(self.model.__name__),
seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
dict_of_params = {'actor_class': actor_class,
'actor_lr': actor_lr,
'network': network,
'critic_class': critic_class,
'critic_lr': critic_lr,
'loss': loss,
'n_epochs_policy': n_epochs_policy,
'batch_size': batch_size,
'eps_ppo': eps_ppo,
'lam': lam,
'ent_coeff': ent_coeff,
'n_epochs': n_epochs,
'n_steps': n_steps,
'n_steps_per_fit': n_steps_per_fit,
'n_episodes': n_episodes,
'n_episodes_per_fit': n_episodes_per_fit
}
self.algo_params = dict_of_params
is_set_param_success = self.set_params(new_params=self.algo_params)
if(not is_set_param_success):
err_msg = 'There was an error setting the parameters of a'+'\''+str(self.__class__.__name__)+'\' object!'
self.logger.error(msg=err_msg)
self.fully_instantiated = False
self.is_learn_successful = False
return False
self.logger.info(msg='\''+str(self.__class__.__name__)+'\' object fully instantiated!')
self.fully_instantiated = True
return True
def model_specific_set_params(self, new_params, mdp_info, input_shape, output_shape, n_actions):
"""
Parameters
----------
new_params: These are the new parameters to set in the RL algorithm. It is a flat dictionary containing objects of Class
HyperParameter.
mdp_info: This is an object of Class mushroom_rl.environment.MDPInfo: it contains the action space, the observation space
and gamma and the horizon of the MDP.
input_shape: The shape of the observation space.
output_shape: The shape of the action space.
n_actions: If the space is Discrete this is the number of actions.
Returns
-------
tmp_structured_algo_params: A structured dictionary containing the parameters that are strictly part of the RL algorithm.
dict_to_add: A flat dictionary containing parameters needed in the method learn() that are not strictly part of the RL
algorithm, like the number of epochs and the number of episodes.
"""
if(isinstance(self.info_MDP.action_space, Discrete)):
#check if there is the beta parameter for the BoltzmannTorchPolicy
if('beta' not in list(new_params.keys())):
new_params['beta'] = Real(hp_name='beta', obj_name='beta_'+str(self.model.__name__), current_actual_value=0.001,
range_of_values=[0.0001, 0.9], to_mutate=False, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
o_policy = BoltzmannTorchPolicy(network=new_params['network'].current_actual_value,
input_shape=input_shape.current_actual_value,
output_shape=output_shape.current_actual_value,
beta=new_params['beta'].current_actual_value, use_cuda=self.can_use_cuda,
n_actions=n_actions.current_actual_value, n_models=None)
else:
#check if there is the std deviation parameter for the GaussianTorchPolicy
if('std' not in list(new_params.keys())):
new_params['std'] = Real(hp_name='std', obj_name='std_'+str(self.model.__name__), current_actual_value=5,
range_of_values=[0.1, 20], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
o_policy = GaussianTorchPolicy(network=new_params['network'].current_actual_value,
input_shape=input_shape.current_actual_value,
output_shape=output_shape.current_actual_value,
std_0=new_params['std'].current_actual_value, use_cuda=self.can_use_cuda,
n_actions=n_actions.current_actual_value, n_models=None)
policy = Categorical(hp_name='policy', obj_name='policy_'+str(self.model.__name__), current_actual_value=o_policy)
tmp_structured_algo_params = {'mdp_info': mdp_info,
'policy': policy,
'actor_optimizer': {'class': None, 'params': {'lr': None}},
'critic_params': {'input_shape': input_shape,
'n_actions': n_actions,
'output_shape': output_shape,
'optimizer': {'class': None, 'params': {'lr': None}}
}
}
for tmp_key in list(new_params.keys()):
#i do not want to change mdp_info or policy
if(tmp_key in ['n_epochs_policy', 'batch_size', 'eps_ppo', 'lam', 'ent_coeff']):
tmp_structured_algo_params.update({tmp_key: new_params[tmp_key]})
if(tmp_key in ['network', 'loss']):
tmp_structured_algo_params['critic_params'].update({tmp_key: new_params[tmp_key]})
if(tmp_key == 'critic_class'):
tmp_structured_algo_params['critic_params']['optimizer'].update({'class': new_params[tmp_key]})
if(tmp_key == 'critic_lr'):
tmp_structured_algo_params['critic_params']['optimizer']['params'].update({'lr': new_params[tmp_key]})
if(tmp_key == 'actor_class'):
tmp_structured_algo_params['actor_optimizer'].update({'class': new_params[tmp_key]})
if(tmp_key == 'actor_lr'):
tmp_structured_algo_params['actor_optimizer']['params'].update({'lr': new_params[tmp_key]})
structured_dict_of_values = self._select_current_actual_value_from_hp_classes(params_structured_dict=
tmp_structured_algo_params)
#i need to un-pack structured_dict_of_values for PPO
self.algo_object = PPO(**structured_dict_of_values)
#now that i have created the PPO object i can resolve the conflict between the 'actor_class', 'actor_lr',
#'critic_class' and 'critic_lr'. To resolve it, i need to change their keys from generic 'class' and 'lr', that are
#needed for MushroomRL, to 'actor_class', 'actor_lr', 'critic_class' and 'critic_lr':
new_val = tmp_structured_algo_params['critic_params']['optimizer']['class']
tmp_structured_algo_params['critic_params']['optimizer']['critic_class'] = new_val
del tmp_structured_algo_params['critic_params']['optimizer']['class']
new_val = tmp_structured_algo_params['critic_params']['optimizer']['params']['lr']
tmp_structured_algo_params['critic_params']['optimizer']['params']['critic_lr'] = new_val
del tmp_structured_algo_params['critic_params']['optimizer']['params']['lr']
tmp_structured_algo_params['actor_optimizer']['actor_class'] = tmp_structured_algo_params['actor_optimizer']['class']
del tmp_structured_algo_params['actor_optimizer']['class']
new_val = tmp_structured_algo_params['actor_optimizer']['params']['lr']
tmp_structured_algo_params['actor_optimizer']['params']['actor_lr'] = new_val
del tmp_structured_algo_params['actor_optimizer']['params']['lr']
#add n_epochs, n_steps, n_steps_per_fit, n_episodes, n_episodes_per_fit:
dict_to_add = {'n_epochs': new_params['n_epochs'],
'n_steps': new_params['n_steps'],
'n_steps_per_fit': new_params['n_steps_per_fit'],
'n_episodes': new_params['n_episodes'],
'n_episodes_per_fit': new_params['n_episodes_per_fit']
}
if(isinstance(self.info_MDP.action_space, Discrete)):
dict_to_add.update({'beta': new_params['beta']})
else:
dict_to_add.update({'std': new_params['std']})
return tmp_structured_algo_params, dict_to_add
class ModelGenerationMushroomOnlineSAC(ModelGenerationMushroomOnlineAC):
"""
This Class implements a specific online model generation algorithm: SAC. This Class wraps the SAC method
implemented in MushroomRL.
cf. https://github.com/MushroomRL/mushroom-rl/blob/dev/mushroom_rl/algorithms/actor_critic/deep_actor_critic/sac.py
This Class inherits from the Class ModelGenerationMushroomOnlineAC.
"""
def __init__(self, eval_metric, obj_name, regressor_type='generic_regressor', seeder=2, algo_params=None, log_mode='console',
checkpoint_log_path=None, verbosity=3, n_jobs=1, job_type='process', deterministic_output_policy=True):
"""
Parameters
----------
algo_params: This is either None or a dictionary containing all the needed parameters.
The default is None.
If None then the following parameters will be used:
'input_shape': self.info_MDP.observation_space.shape,
'n_actions': None,
'output_shape': self.info_MDP.action_space.shape,
'actor_network': one hidden layer, 16 neurons,
'actor_class': Adam,
'actor_lr': 3e-4,
'critic_network': one hidden layer, 16 neurons,
'critic_class': Adam,
'critic_lr': 3e-4,
'loss': F.mse_loss,
'batch_size': 256,
'initial_replay_size': 50000,
'max_replay_size': 1000000,
'warmup_transitions': 100,
'tau': 0.005,
'lr_alpha': 3e-4,
'log_std_min': -20,
'log_std_max': 2,
'target_entropy': None,
'n_epochs': 10,
'n_steps': None,
'n_steps_per_fit': None,
'n_episodes': 500,
'n_episodes_per_fit': 50
regressor_type: This is a string and it can either be: 'action_regressor', 'q_regressor' or 'generic_regressor'. This is
used to pick one of the 3 possible kind of regressor made available by MushroomRL.
Note that if you want to use a 'q_regressor' then the picked regressor must be able to perform
multi-target regression, as a single regressor is used for all actions.
The default is 'generic_regressor'.
deterministic_output_policy: If this is True then the output policy will be rendered deterministic else if False nothing
will be done. Note that the policy is made deterministic only at the end of the learn()
method.
Non-Parameters Members
----------------------
fully_instantiated: This is True if the block is fully instantiated, False otherwise. It is mainly used to make sure that
when we call the learn method the model generation blocks have been fully instantiated as they
undergo two stage initialisation being info_MDP unknown at the beginning of the pipeline.
info_MDP: This is a dictionary compliant with the parameters needed in input to all MushroomRL model generation
algorithms. It containts the observation space, the action space, the MDP horizon and the MDP gamma.
algo_object: This is the object containing the actual model generation algorithm.
algo_params_upon_instantiation: This a copy of the original value of algo_params, namely the value of
algo_params that the object got upon creation. This is needed for re-loading
objects.
model: This is used in set_params in the generic Class ModelGenerationMushroomOnline. With this member we avoid
re-writing for each Class inheriting from the Class ModelGenerationMushroomOnline the set_params method.
In this Class this member equals to SAC, which is the Class of MushroomRL implementing SAC.
core: This is used to contain the Core object of MushroomRL needed to run online RL algorithms.
The other parameters and non-parameters members are described in the Class Block.
"""
super().__init__(eval_metric=eval_metric, obj_name=obj_name, seeder=seeder, log_mode=log_mode,
checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs, job_type=job_type)
self.works_on_online_rl = True
self.works_on_offline_rl = False
self.works_on_box_action_space = True
self.works_on_discrete_action_space = False
self.works_on_box_observation_space = True
self.works_on_discrete_observation_space = True
self.regressor_type = regressor_type
#this block has parameters and I may want to tune them:
self.is_parametrised = True
self.algo_params = algo_params
self.deterministic_output_policy = deterministic_output_policy
self.fully_instantiated = False
self.info_MDP = None
self.algo_object = None
self.algo_params_upon_instantiation = copy.deepcopy(self.algo_params)
self.model = SAC
self.core = None
#seeds torch
torch.manual_seed(self.seeder)
torch.cuda.manual_seed(self.seeder)
#this seeding is needed for the policy of MushroomRL. Indeed the evaluation at the start of the learn method is done
#using the policy and in the method draw_action, np.random is called!
np.random.seed(self.seeder)
def _default_network(self):
"""
This method creates a default CriticNetwork with 1 hidden layer and ReLU activation functions and a default ActorNetwork
with 1 hidden layer and ReLU activation functions.
Returns
-------
CriticNetwork, ActorNetwork: the Class wrappers representing the default CriticNetwork and ActorNetwork.
"""
class CriticNetwork(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super().__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self.hl0 = nn.Linear(n_input, 16)
self.hl1 = nn.Linear(16, 16)
self.hl2 = nn.Linear(16, n_output)
nn.init.xavier_uniform_(self.hl0.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl1.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl2.weight, gain=nn.init.calculate_gain('relu'))
def forward(self, state, action, **kwargs):
state_action = torch.cat((state.float(), action.float()), dim=1)
h = F.relu(self.hl0(state_action))
h = F.relu(self.hl1(h))
q = self.hl2(h)
return torch.squeeze(q)
class ActorNetwork(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super(ActorNetwork, self).__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self.hl0 = nn.Linear(n_input, 16)
self.hl1 = nn.Linear(16, 16)
self.hl2 = nn.Linear(16, n_output)
nn.init.xavier_uniform_(self.hl0.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl1.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl2.weight, gain=nn.init.calculate_gain('relu'))
def forward(self, state, **kwargs):
h = F.relu(self.hl0(torch.squeeze(state, 1).float()))
h = F.relu(self.hl1(h))
return self.hl2(h)
return CriticNetwork, ActorNetwork
def full_block_instantiation(self, info_MDP):
"""
Parameters
----------
info_MDP: This is an object of Class mushroom_rl.environment.MDPInfo. It contains the action and observation spaces,
gamma and the horizon of the MDP.
Returns
-------
This method returns True if the algo_params were set successfully, and False otherwise.
"""
self.info_MDP = info_MDP
if(self.algo_params is None):
critic, actor = self._default_network()
#actor:
actor_network_mu = Categorical(hp_name='actor_network_mu', obj_name='actor_network_mu_'+str(self.model.__name__),
current_actual_value=actor)
actor_network_sigma = Categorical(hp_name='actor_network_sigma',
obj_name='actor_network_sigma_'+str(self.model.__name__),
current_actual_value=copy.deepcopy(actor))
actor_class = Categorical(hp_name='actor_class', obj_name='actor_class_'+str(self.model.__name__),
current_actual_value=optim.Adam)
actor_lr = Real(hp_name='actor_lr', obj_name='actor_lr_'+str(self.model.__name__),
current_actual_value=3e-4, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
#critic:
critic_network = Categorical(hp_name='critic_network', obj_name='critic_network_'+str(self.model.__name__),
current_actual_value=critic)
critic_class = Categorical(hp_name='critic_class', obj_name='critic_class_'+str(self.model.__name__),
current_actual_value=optim.Adam)
critic_lr = Real(hp_name='critic_lr', obj_name='critic_lr_'+str(self.model.__name__),
current_actual_value=3e-4, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
critic_loss = Categorical(hp_name='loss', obj_name='loss_'+str(self.model.__name__),
current_actual_value=F.mse_loss)
batch_size = Integer(hp_name='batch_size', obj_name='batch_size_'+str(self.model.__name__),
current_actual_value=256, range_of_values=[8, 256], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
initial_replay_size = Integer(hp_name='initial_replay_size', current_actual_value=50000,
obj_name='initial_replay_size_'+str(self.model.__name__))
max_replay_size = Integer(hp_name='max_replay_size', current_actual_value=1000000,
obj_name='max_replay_size_'+str(self.model.__name__))
warmup_transitions = Integer(hp_name='warmup_transitions', current_actual_value=100,
obj_name='warmup_transitions_'+str(self.model.__name__))
tau = Real(hp_name='tau', current_actual_value=0.005, obj_name='tau_'+str(self.model.__name__))
lr_alpha = Real(hp_name='lr_alpha', current_actual_value=3e-4, obj_name='lr_alpha_'+str(self.model.__name__))
log_std_min = Real(hp_name='log_std_min', current_actual_value=-20, obj_name='log_std_min_'+str(self.model.__name__))
log_std_max = Real(hp_name='log_std_max', current_actual_value=2, obj_name='log_std_max_'+str(self.model.__name__))
target_entropy = Real(hp_name='target_entropy', current_actual_value=None,
obj_name='target_entropy_'+str(self.model.__name__))
n_epochs = Integer(hp_name='n_epochs', current_actual_value=10, range_of_values=[1,50], to_mutate=True,
obj_name='n_epochs_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps = Integer(hp_name='n_steps', current_actual_value=None, to_mutate=False,
obj_name='n_steps_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps_per_fit = Integer(hp_name='n_steps_per_fit', current_actual_value=None, to_mutate=False,
obj_name='n_steps_per_fit_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes = Integer(hp_name='n_episodes', current_actual_value=500, range_of_values=[10,1000], to_mutate=True,
obj_name='n_episodes_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes_per_fit = Integer(hp_name='n_episodes_per_fit', current_actual_value=50, range_of_values=[1,1000],
to_mutate=True, obj_name='n_episodes_per_fit_'+str(self.model.__name__),
seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
dict_of_params = {'actor_network_mu': actor_network_mu,
'actor_network_sigma': actor_network_sigma,
'actor_class': actor_class,
'actor_lr': actor_lr,
'critic_network': critic_network,
'critic_class': critic_class,
'critic_lr': critic_lr,
'loss': critic_loss,
'batch_size': batch_size,
'initial_replay_size': initial_replay_size,
'max_replay_size': max_replay_size,
'warmup_transitions': warmup_transitions,
'tau': tau,
'lr_alpha': lr_alpha,
'log_std_min': log_std_min,
'log_std_max': log_std_max,
'target_entropy': target_entropy,
'n_epochs': n_epochs,
'n_steps': n_steps,
'n_steps_per_fit': n_steps_per_fit,
'n_episodes': n_episodes,
'n_episodes_per_fit': n_episodes_per_fit
}
self.algo_params = dict_of_params
is_set_param_success = self.set_params(new_params=self.algo_params)
if(not is_set_param_success):
err_msg = 'There was an error setting the parameters of a'+'\''+str(self.__class__.__name__)+'\' object!'
self.logger.error(msg=err_msg)
self.fully_instantiated = False
self.is_learn_successful = False
return False
self.logger.info(msg='\''+str(self.__class__.__name__)+'\' object fully instantiated!')
self.fully_instantiated = True
return True
def model_specific_set_params(self, new_params, mdp_info, input_shape, output_shape, n_actions):
"""
Parameters
----------
new_params: These are the new parameters to set in the RL algorithm. It is a flat dictionary containing objects of Class
HyperParameter.
mdp_info: This is an object of Class mushroom_rl.environment.MDPInfo: it contains the action space, the observation space
and gamma and the horizon of the MDP.
input_shape: The shape of the observation space.
output_shape: The shape of the action space.
n_actions: If the space is Discrete this is the number of actions.
Returns
-------
tmp_structured_algo_params: A structured dictionary containing the parameters that are strictly part of the RL algorithm.
dict_to_add: A flat dictionary containing parameters needed in the method learn() that are not strictly part of the RL
algorithm, like the number of epochs and the number of episodes.
"""
critic_input_shape = Categorical(hp_name='critic_input_shape', obj_name='critic_input_shape_'+str(self.model.__name__),
current_actual_value=(input_shape.current_actual_value[0]+
self.info_MDP.action_space.shape[0],))
critic_output_shape = Categorical(hp_name='critic_output_shape', current_actual_value=(1,),
obj_name='critic_output_shape_'+str(self.model.__name__))
tmp_structured_algo_params = {'mdp_info': mdp_info,
'actor_mu_params': {'input_shape': input_shape,
'n_actions': n_actions,
'output_shape': output_shape
},
'actor_sigma_params': {'input_shape': input_shape,
'n_actions': n_actions,
'output_shape': output_shape
},
'actor_optimizer': {'class': None, 'params': {'lr': None}},
'critic_params': {'input_shape': critic_input_shape,
'output_shape': critic_output_shape,
'optimizer': {'class': None, 'params': {'lr': None}}
}
}
for tmp_key in list(new_params.keys()):
#i do not want to change mdp_info
if(tmp_key in ['batch_size', 'initial_replay_size', 'max_replay_size', 'warmup_transitions', 'tau', 'lr_alpha',
'log_std_min', 'log_std_max', 'target_entropy']):
tmp_structured_algo_params.update({tmp_key: new_params[tmp_key]})
if(tmp_key == 'loss'):
tmp_structured_algo_params['critic_params'].update({tmp_key: new_params[tmp_key]})
if(tmp_key == 'critic_network'):
tmp_structured_algo_params['critic_params'].update({'network': new_params[tmp_key]})
if(tmp_key == 'critic_class'):
tmp_structured_algo_params['critic_params']['optimizer'].update({'class': new_params[tmp_key]})
if(tmp_key == 'critic_lr'):
tmp_structured_algo_params['critic_params']['optimizer']['params'].update({'lr': new_params[tmp_key]})
if(tmp_key == 'actor_network_mu'):
tmp_structured_algo_params['actor_mu_params'].update({'network': new_params[tmp_key]})
if(tmp_key == 'actor_network_sigma'):
tmp_structured_algo_params['actor_sigma_params'].update({'network': new_params[tmp_key]})
if(tmp_key == 'actor_class'):
tmp_structured_algo_params['actor_optimizer'].update({'class': new_params[tmp_key]})
if(tmp_key == 'actor_lr'):
tmp_structured_algo_params['actor_optimizer']['params'].update({'lr': new_params[tmp_key]})
structured_dict_of_values = self._select_current_actual_value_from_hp_classes(params_structured_dict=
tmp_structured_algo_params)
#i need to un-pack structured_dict_of_values for SAC
self.algo_object = SAC(**structured_dict_of_values)
#now that i have created the SAC object i can resolve the conflict between the 'actor_class', 'actor_lr', 'actor_network',
#'critic_class', 'critic_lr' and 'critic_network'. To resolve it, i need to change their keys from generic 'class'
#'lr' and 'network', that are needed for MushroomRL, to 'actor_class', 'actor_lr', 'actor_network', 'critic_class',
#critic_lr' and 'critic_network':
tmp_structured_algo_params['critic_params']['critic_network'] = tmp_structured_algo_params['critic_params']['network']
del tmp_structured_algo_params['critic_params']['network']
new_val = tmp_structured_algo_params['critic_params']['optimizer']['class']
tmp_structured_algo_params['critic_params']['optimizer']['critic_class'] = new_val
del tmp_structured_algo_params['critic_params']['optimizer']['class']
new_val = tmp_structured_algo_params['critic_params']['optimizer']['params']['lr']
tmp_structured_algo_params['critic_params']['optimizer']['params']['critic_lr'] = new_val
del tmp_structured_algo_params['critic_params']['optimizer']['params']['lr']
new_val = tmp_structured_algo_params['actor_mu_params']['network']
tmp_structured_algo_params['actor_mu_params']['actor_network_mu'] = new_val
del tmp_structured_algo_params['actor_mu_params']['network']
new_val = tmp_structured_algo_params['actor_sigma_params']['network']
tmp_structured_algo_params['actor_sigma_params']['actor_network_sigma'] = new_val
del tmp_structured_algo_params['actor_sigma_params']['network']
tmp_structured_algo_params['actor_optimizer']['actor_class'] = tmp_structured_algo_params['actor_optimizer']['class']
del tmp_structured_algo_params['actor_optimizer']['class']
new_val = tmp_structured_algo_params['actor_optimizer']['params']['lr']
tmp_structured_algo_params['actor_optimizer']['params']['actor_lr'] = new_val
del tmp_structured_algo_params['actor_optimizer']['params']['lr']
#add n_epochs, n_steps, n_steps_per_fit, n_episodes, n_episodes_per_fit:
dict_to_add = {'n_epochs': new_params['n_epochs'],
'n_steps': new_params['n_steps'],
'n_steps_per_fit': new_params['n_steps_per_fit'],
'n_episodes': new_params['n_episodes'],
'n_episodes_per_fit': new_params['n_episodes_per_fit']
}
return tmp_structured_algo_params, dict_to_add
class ModelGenerationMushroomOnlineDDPG(ModelGenerationMushroomOnlineAC):
"""
This Class implements a specific online model generation algorithm: DDPG. This Class wraps the DDPG method
implemented in MushroomRL.
cf. https://github.com/MushroomRL/mushroom-rl/blob/dev/mushroom_rl/algorithms/actor_critic/deep_actor_critic/ddpg.py
This Class inherits from the Class ModelGenerationMushroomOnlineAC.
"""
def __init__(self, eval_metric, obj_name, regressor_type='generic_regressor', seeder=2, algo_params=None, log_mode='console',
checkpoint_log_path=None, verbosity=3, n_jobs=1, job_type='process', deterministic_output_policy=True):
"""
Parameters
----------
algo_params: This is either None or a dictionary containing all the needed parameters.
The default is None.
If None then the following parameters will be used:
'input_shape': self.info_MDP.observation_space.shape,
'n_actions': None,
'output_shape': self.info_MDP.action_space.shape,
'policy': OrnsteinUhlenbeckPolicy(sigma=0.2*np.ones(1), theta=0.15, dt=1e-2)
'actor_network': one hidden layer, 16 neurons,
'actor_class': Adam,
'actor_lr': 1e-3,
'critic_network': one hidden layer, 16 neurons,
'critic_class': Adam,
'critic_lr': 1e-3,
'loss': F.mse_loss,
'batch_size': 100,
'initial_replay_size': 50000,
'max_replay_size': 1000000,
'tau': 0.005,
'policy_delay': 1,
'n_epochs': 10,
'n_steps': None,
'n_steps_per_fit': None,
'n_episodes': 500,
'n_episodes_per_fit': 50
regressor_type: This is a string and it can either be: 'action_regressor', 'q_regressor' or 'generic_regressor'. This is
used to pick one of the 3 possible kind of regressor made available by MushroomRL.
Note that if you want to use a 'q_regressor' then the picked regressor must be able to perform
multi-target regression, as a single regressor is used for all actions.
The default is 'generic_regressor'.
deterministic_output_policy: If this is True then the output policy will be rendered deterministic else if False nothing
will be done. Note that the policy is made deterministic only at the end of the learn()
method.
Non-Parameters Members
----------------------
fully_instantiated: This is True if the block is fully instantiated, False otherwise. It is mainly used to make sure that
when we call the learn method the model generation blocks have been fully instantiated as they
undergo two stage initialisation being info_MDP unknown at the beginning of the pipeline.
info_MDP: This is a dictionary compliant with the parameters needed in input to all MushroomRL model generation
algorithms. It containts the observation space, the action space, the MDP horizon and the MDP gamma.
algo_object: This is the object containing the actual model generation algorithm.
algo_params_upon_instantiation: This a copy of the original value of algo_params, namely the value of
algo_params that the object got upon creation. This is needed for re-loading
objects.
model: This is used in set_params in the generic Class ModelGenerationMushroomOnline. With this member we avoid
re-writing for each Class inheriting from the Class ModelGenerationMushroomOnline the set_params method.
In this Class this member equals to DDPG, which is the Class of MushroomRL implementing DDPG.
core: This is used to contain the Core object of MushroomRL needed to run online RL algorithms.
The other parameters and non-parameters members are described in the Class Block.
"""
super().__init__(eval_metric=eval_metric, obj_name=obj_name, seeder=seeder, log_mode=log_mode,
checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs, job_type=job_type)
self.works_on_online_rl = True
self.works_on_offline_rl = False
self.works_on_box_action_space = True
self.works_on_discrete_action_space = False
self.works_on_box_observation_space = True
self.works_on_discrete_observation_space = True
self.regressor_type = regressor_type
#this block has parameters and I may want to tune them:
self.is_parametrised = True
self.algo_params = algo_params
self.deterministic_output_policy = deterministic_output_policy
self.fully_instantiated = False
self.info_MDP = None
self.algo_object = None
self.algo_params_upon_instantiation = copy.deepcopy(self.algo_params)
self.model = DDPG
self.core = None
#seeds torch
torch.manual_seed(self.seeder)
torch.cuda.manual_seed(self.seeder)
#this seeding is needed for the policy of MushroomRL. Indeed the evaluation at the start of the learn method is done
#using the policy and in the method draw_action, np.random is called!
np.random.seed(self.seeder)
def _default_network(self):
"""
This method creates a default CriticNetwork with 1 hidden layer and ReLU activation functions and a default ActorNetwork
with 1 hidden layer and ReLU activation functions.
Returns
-------
CriticNetwork, ActorNetwork: the Class wrappers representing the default CriticNetwork and ActorNetwork.
"""
class CriticNetwork(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super().__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self.hl0 = nn.Linear(n_input, 16)
self.hl1 = nn.Linear(16, 16)
self.hl2 = nn.Linear(16, n_output)
nn.init.xavier_uniform_(self.hl0.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl1.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl2.weight, gain=nn.init.calculate_gain('relu'))
def forward(self, state, action, **kwargs):
state_action = torch.cat((state.float(), action.float()), dim=1)
h = F.relu(self.hl0(state_action))
h = F.relu(self.hl1(h))
q = self.hl2(h)
return torch.squeeze(q)
class ActorNetwork(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super(ActorNetwork, self).__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self.hl0 = nn.Linear(n_input, 16)
self.hl1 = nn.Linear(16, 16)
self.hl2 = nn.Linear(16, n_output)
nn.init.xavier_uniform_(self.hl0.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl1.weight, gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self.hl2.weight, gain=nn.init.calculate_gain('relu'))
def forward(self, state, **kwargs):
h = F.relu(self.hl0(torch.squeeze(state, 1).float()))
h = F.relu(self.hl1(h))
return self.hl2(h)
return CriticNetwork, ActorNetwork
def full_block_instantiation(self, info_MDP):
"""
Parameters
----------
info_MDP: This is an object of Class mushroom_rl.environment.MDPInfo. It contains the action and observation spaces,
gamma and the horizon of the MDP.
Returns
-------
This method returns True if the algo_params were set successfully, and False otherwise.
"""
self.info_MDP = info_MDP
if(self.algo_params is None):
policy_class = Categorical(hp_name='policy_class', obj_name='policy_class_'+str(self.model.__name__),
current_actual_value=OrnsteinUhlenbeckPolicy)
sigma = Real(hp_name='sigma', current_actual_value=0.2, obj_name='sigma_'+str(self.model.__name__))
theta = Real(hp_name='theta', current_actual_value=0.15, obj_name='theta_'+str(self.model.__name__))
dt = Real(hp_name='dt', current_actual_value=1e-2, obj_name='dt_'+str(self.model.__name__))
critic, actor = self._default_network()
#actor:
actor_network = Categorical(hp_name='actor_network', obj_name='actor_network_'+str(self.model.__name__),
current_actual_value=actor)
actor_class = Categorical(hp_name='actor_class', obj_name='actor_class_'+str(self.model.__name__),
current_actual_value=optim.Adam)
actor_lr = Real(hp_name='actor_lr', obj_name='actor_lr_'+str(self.model.__name__),
current_actual_value=1e-3, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
#critic:
critic_network = Categorical(hp_name='critic_network', obj_name='critic_network_'+str(self.model.__name__),
current_actual_value=critic)
critic_class = Categorical(hp_name='critic_class', obj_name='critic_class_'+str(self.model.__name__),
current_actual_value=optim.Adam)
critic_lr = Real(hp_name='critic_lr', obj_name='critic_lr_'+str(self.model.__name__),
current_actual_value=1e-3, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
critic_loss = Categorical(hp_name='loss', obj_name='loss_'+str(self.model.__name__),
current_actual_value=F.mse_loss)
batch_size = Integer(hp_name='batch_size', obj_name='batch_size_'+str(self.model.__name__),
current_actual_value=100, range_of_values=[8, 128], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
initial_replay_size = Integer(hp_name='initial_replay_size', current_actual_value=50000,
range_of_values=[1000, 10000], to_mutate=True, seeder=self.seeder,
log_mode=self.log_mode, obj_name='initial_replay_size_'+str(self.model.__name__))
max_replay_size = Integer(hp_name='max_replay_size', current_actual_value=1000000, range_of_values=[10000, 1000000],
to_mutate=True, seeder=self.seeder, log_mode=self.log_mode,
obj_name='max_replay_size_'+str(self.model.__name__))
tau = Real(hp_name='tau', current_actual_value=0.005, obj_name='tau_'+str(self.model.__name__))
policy_delay = Integer(hp_name='policy_delay', current_actual_value=1,
obj_name='policy_delay_'+str(self.model.__name__))
n_epochs = Integer(hp_name='n_epochs', current_actual_value=10, range_of_values=[1,50], to_mutate=True,
obj_name='n_epochs_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps = Integer(hp_name='n_steps', current_actual_value=None, obj_name='n_steps_'+str(self.model.__name__),
seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_steps_per_fit = Integer(hp_name='n_steps_per_fit', current_actual_value=None, to_mutate=False,
obj_name='n_steps_per_fit_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes = Integer(hp_name='n_episodes', current_actual_value=500, range_of_values=[10,1000], to_mutate=True,
obj_name='n_episodes_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes_per_fit = Integer(hp_name='n_episodes_per_fit', current_actual_value=50, range_of_values=[1,1000],
to_mutate=True, obj_name='n_episodes_per_fit_'+str(self.model.__name__),
seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
dict_of_params = {'policy_class': policy_class,
'sigma': sigma,
'theta': theta,
'dt': dt,
'actor_network': actor_network,
'actor_class': actor_class,
'actor_lr': actor_lr,
'critic_network': critic_network,
'critic_class': critic_class,
'critic_lr': critic_lr,
'loss': critic_loss,
'batch_size': batch_size,
'initial_replay_size': initial_replay_size,
'max_replay_size': max_replay_size,
'tau': tau,
'policy_delay': policy_delay,
'n_epochs': n_epochs,
'n_steps': n_steps,
'n_steps_per_fit': n_steps_per_fit,
'n_episodes': n_episodes,
'n_episodes_per_fit': n_episodes_per_fit
}
self.algo_params = dict_of_params
is_set_param_success = self.set_params(new_params=self.algo_params)
if(not is_set_param_success):
err_msg = 'There was an error setting the parameters of a'+'\''+str(self.__class__.__name__)+'\' object!'
self.logger.error(msg=err_msg)
self.fully_instantiated = False
self.is_learn_successful = False
return False
self.logger.info(msg='\''+str(self.__class__.__name__)+'\' object fully instantiated!')
self.fully_instantiated = True
return True
def model_specific_set_params(self, new_params, mdp_info, input_shape, output_shape, n_actions):
"""
Parameters
----------
new_params: These are the new parameters to set in the RL algorithm. It is a flat dictionary containing objects of Class
HyperParameter.
mdp_info: This is an object of Class mushroom_rl.environment.MDPInfo: it contains the action space, the observation space
and gamma and the horizon of the MDP.
input_shape: The shape of the observation space.
output_shape: The shape of the action space.
n_actions: If the space is Discrete this is the number of actions.
Returns
-------
tmp_structured_algo_params: A structured dictionary containing the parameters that are strictly part of the RL algorithm.
dict_to_add: A flat dictionary containing parameters needed in the method learn() that are not strictly part of the RL
algorithm, like the number of epochs and the number of episodes.
"""
critic_input_shape = Categorical(hp_name='critic_input_shape', obj_name='critic_input_shape_'+str(self.model.__name__),
current_actual_value=(input_shape.current_actual_value[0]+
self.info_MDP.action_space.shape[0],))
critic_output_shape = Categorical(hp_name='critic_output_shape', current_actual_value=(1,),
obj_name='critic_output_shape_'+str(self.model.__name__))
tmp_structured_algo_params = {'mdp_info': mdp_info,
'actor_params': {'input_shape': input_shape,
'n_actions': n_actions,
'output_shape': output_shape
},
'actor_optimizer': {'class': None, 'params': {'lr': None}},
'critic_params': {'input_shape': critic_input_shape,
'output_shape': critic_output_shape,
'optimizer': {'class': None, 'params': {'lr': None}}
}
}
#either np.ones(1) or np.ones(self.info_MDP.action_space.shape[0])
new_sigma = np.ones(1)*new_params['sigma'].current_actual_value
policy_params_dict = dict(sigma=new_sigma, theta=new_params['theta'].current_actual_value,
dt=new_params['dt'].current_actual_value)
policy_params = Categorical(hp_name='policy_params', current_actual_value=policy_params_dict,
obj_name='policy_params_'+str(self.model.__name__))
new_params.update({'policy_params': policy_params})
for tmp_key in list(new_params.keys()):
#i do not want to change mdp_info
if(tmp_key in ['policy_class', 'policy_params', 'batch_size', 'initial_replay_size', 'max_replay_size', 'tau',
'policy_delay']):
tmp_structured_algo_params.update({tmp_key: new_params[tmp_key]})
if(tmp_key == 'loss'):
tmp_structured_algo_params['critic_params'].update({tmp_key: new_params[tmp_key]})
if(tmp_key == 'critic_network'):
tmp_structured_algo_params['critic_params'].update({'network': new_params[tmp_key]})
if(tmp_key == 'critic_class'):
tmp_structured_algo_params['critic_params']['optimizer'].update({'class': new_params[tmp_key]})
if(tmp_key == 'critic_lr'):
tmp_structured_algo_params['critic_params']['optimizer']['params'].update({'lr': new_params[tmp_key]})
if(tmp_key == 'actor_network'):
tmp_structured_algo_params['actor_params'].update({'network': new_params[tmp_key]})
if(tmp_key == 'actor_class'):
tmp_structured_algo_params['actor_optimizer'].update({'class': new_params[tmp_key]})
if(tmp_key == 'actor_lr'):
tmp_structured_algo_params['actor_optimizer']['params'].update({'lr': new_params[tmp_key]})
structured_dict_of_values = self._select_current_actual_value_from_hp_classes(params_structured_dict=
tmp_structured_algo_params)
#i need to un-pack structured_dict_of_values for DDPG
self.algo_object = DDPG(**structured_dict_of_values)
#now that i have created the DDPG object i can resolve the conflict between the 'actor_class', 'actor_lr',
#'actor_network', 'critic_class', 'critic_lr' and 'critic_network'. To resolve it, i need to change their keys from
#generic 'class', 'lr' and 'network', that are needed for MushroomRL, to 'actor_class', 'actor_lr', 'actor_network',
#'critic_class', critic_lr' and 'critic_network':
tmp_structured_algo_params['critic_params']['critic_network'] = tmp_structured_algo_params['critic_params']['network']
del tmp_structured_algo_params['critic_params']['network']
new_val = tmp_structured_algo_params['critic_params']['optimizer']['class']
tmp_structured_algo_params['critic_params']['optimizer']['critic_class'] = new_val
del tmp_structured_algo_params['critic_params']['optimizer']['class']
new_val = tmp_structured_algo_params['critic_params']['optimizer']['params']['lr']
tmp_structured_algo_params['critic_params']['optimizer']['params']['critic_lr'] = new_val
del tmp_structured_algo_params['critic_params']['optimizer']['params']['lr']
new_val = tmp_structured_algo_params['actor_params']['network']
tmp_structured_algo_params['actor_params']['actor_network'] = new_val
del tmp_structured_algo_params['actor_params']['network']
tmp_structured_algo_params['actor_optimizer']['actor_class'] = tmp_structured_algo_params['actor_optimizer']['class']
del tmp_structured_algo_params['actor_optimizer']['class']
new_val = tmp_structured_algo_params['actor_optimizer']['params']['lr']
tmp_structured_algo_params['actor_optimizer']['params']['actor_lr'] = new_val
del tmp_structured_algo_params['actor_optimizer']['params']['lr']
#delete policy_params: this is constructed new each time here:
del tmp_structured_algo_params['policy_params']
#add n_epochs, n_steps, n_steps_per_fit, n_episodes, n_episodes_per_fit, sigma, theta, dt:
dict_to_add = {'n_epochs': new_params['n_epochs'],
'n_steps': new_params['n_steps'],
'n_steps_per_fit': new_params['n_steps_per_fit'],
'n_episodes': new_params['n_episodes'],
'n_episodes_per_fit': new_params['n_episodes_per_fit'],
'sigma': new_params['sigma'],
'theta': new_params['theta'],
'dt': new_params['dt']
}
return tmp_structured_algo_params, dict_to_add
class ModelGenerationMushroomOnlineGPOMDP(ModelGenerationMushroomOnline):
"""
This Class implements a specific online model generation algorithm: GPOMDP. This Class wraps the GPOMDP method implemented in
MushroomRL.
cf. https://github.com/MushroomRL/mushroom-rl/blob/dev/mushroom_rl/algorithms/policy_search/policy_gradient/gpomdp.py
This Class inherits from the Class ModelGenerationMushroomOnline.
"""
def __init__(self, eval_metric, obj_name, regressor_type='generic_regressor', seeder=2, algo_params=None, log_mode='console',
checkpoint_log_path=None, verbosity=3, n_jobs=1, job_type='process', deterministic_output_policy=True):
"""
Parameters
----------
algo_params: This is either None or a dictionary containing all the needed parameters.
The default is None.
If None then the following parameters will be used:
'policy': StateStdGaussianPolicy,
'approximator': LinearApproximator,
'input_shape': self.info_MDP.observation_space.shape,
'n_actions': None,
'output_shape': self.info_MDP.action_space.shape,
'optimizer': AdaptiveOptimizer,
'eps': 1e-2,
'n_epochs': 10,
'n_steps': None,
'n_steps_per_fit': None,
'n_episodes': 500,
'n_episodes_per_fit': 50
regressor_type: This is a string and it can either be: 'action_regressor', 'q_regressor' or 'generic_regressor'. This is
used to pick one of the 3 possible kind of regressor made available by MushroomRL.
Note that if you want to use a 'q_regressor' then the picked regressor must be able to perform
multi-target regression, as a single regressor is used for all actions.
The default is 'generic_regressor'.
deterministic_output_policy: If this is True then the output policy will be rendered deterministic else if False nothing
will be done. Note that the policy is made deterministic only at the end of the learn()
method.
Non-Parameters Members
----------------------
fully_instantiated: This is True if the block is fully instantiated, False otherwise. It is mainly used to make sure that
when we call the learn method the model generation blocks have been fully instantiated as they
undergo two stage initialisation being info_MDP unknown at the beginning of the pipeline.
info_MDP: This is a dictionary compliant with the parameters needed in input to all MushroomRL model generation
algorithms. It containts the observation space, the action space, the MDP horizon and the MDP gamma.
algo_object: This is the object containing the actual model generation algorithm.
algo_params_upon_instantiation: This a copy of the original value of algo_params, namely the value of
algo_params that the object got upon creation. This is needed for re-loading
objects.
model: This is used in set_params in the generic Class ModelGenerationMushroomOnline. With this member we avoid
re-writing for each Class inheriting from the Class ModelGenerationMushroomOnline the set_params method.
In this Class this member equals to GPOMDP, which is the Class of MushroomRL implementing GPOMDP.
core: This is used to contain the Core object of MushroomRL needed to run online RL algorithms.
The other parameters and non-parameters members are described in the Class Block.
"""
super().__init__(eval_metric=eval_metric, obj_name=obj_name, seeder=seeder, log_mode=log_mode,
checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs, job_type=job_type)
self.works_on_online_rl = True
self.works_on_offline_rl = False
self.works_on_box_action_space = True
self.works_on_discrete_action_space = False
self.works_on_box_observation_space = True
self.works_on_discrete_observation_space = True
self.regressor_type = regressor_type
#this block has parameters and I may want to tune them:
self.is_parametrised = True
self.algo_params = algo_params
self.deterministic_output_policy = deterministic_output_policy
self.fully_instantiated = False
self.info_MDP = None
self.algo_object = None
self.algo_params_upon_instantiation = copy.deepcopy(self.algo_params)
self.model = GPOMDP
self.core = None
#this seeding is needed for the policy of MushroomRL. Indeed the evaluation at the start of the learn method is done
#using the policy and in the method draw_action, np.random is called!
np.random.seed(self.seeder)
def full_block_instantiation(self, info_MDP):
"""
Parameters
----------
info_MDP: This is an object of Class mushroom_rl.environment.MDPInfo. It contains the action and observation spaces,
gamma and the horizon of the MDP.
Returns
-------
This method returns True if the algo_params were set successfully, and False otherwise.
"""
self.info_MDP = info_MDP
if(self.algo_params is None):
optimizer = Categorical(hp_name='optimizer', obj_name='optimizer_'+str(self.model.__name__),
current_actual_value=AdaptiveOptimizer)
eps = Real(hp_name='eps', obj_name='eps_'+str(self.model.__name__), current_actual_value=1e-2,
range_of_values=[1e-4, 1e-1], to_mutate=True, seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
to_maximize = Categorical(hp_name='maximize', obj_name='maximize_'+str(self.model.__name__),
current_actual_value=True, to_mutate=False, seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_epochs = Integer(hp_name='n_epochs', current_actual_value=10, range_of_values=[1,50], to_mutate=True,
obj_name='n_epochs_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps = Integer(hp_name='n_steps', current_actual_value=None, to_mutate=False,
obj_name='n_steps_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_steps_per_fit = Integer(hp_name='n_steps_per_fit', current_actual_value=None, to_mutate=False,
obj_name='n_steps_per_fit_'+str(self.model.__name__), seeder=self.seeder,
log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path,
verbosity=self.verbosity)
n_episodes = Integer(hp_name='n_episodes', current_actual_value=500, range_of_values=[10,1000], to_mutate=True,
obj_name='n_episodes_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
n_episodes_per_fit = Integer(hp_name='n_episodes_per_fit', current_actual_value=50, range_of_values=[1,100],
to_mutate=True, obj_name='n_episodes_per_fit_'+str(self.model.__name__),
seeder=self.seeder, log_mode=self.log_mode,
checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity)
dict_of_params = {'optimizer': optimizer,
'eps': eps,
'maximize': to_maximize,
'n_epochs': n_epochs,
'n_steps': n_steps,
'n_steps_per_fit': n_steps_per_fit,
'n_episodes': n_episodes,
'n_episodes_per_fit': n_episodes_per_fit
}
self.algo_params = dict_of_params
is_set_param_success = self.set_params(new_params=self.algo_params)
if(not is_set_param_success):
err_msg = 'There was an error setting the parameters of a'+'\''+str(self.__class__.__name__)+'\' object!'
self.logger.error(msg=err_msg)
self.fully_instantiated = False
self.is_learn_successful = False
return False
self.logger.info(msg='\''+str(self.__class__.__name__)+'\' object fully instantiated!')
self.fully_instantiated = True
return True
def _create_policy(self, input_shape, n_actions, output_shape):
"""
Parameters
----------
input_shape: The shape of the observation space.
n_actions: If the space is Discrete this is the number of actions.
output_shape: The shape of the action space.
Returns
-------
policy: This is an object of Class Categorical and in the current_actual_value it contains a mushroom_rl policy object.
"""
approximator_value = Regressor(LinearApproximator, input_shape=input_shape.current_actual_value,
output_shape=output_shape.current_actual_value,
n_actions=n_actions.current_actual_value)
approximator = Categorical(hp_name='approximator', obj_name='approximator_'+str(self.model.__name__),
current_actual_value=approximator_value)
sigma_value = Regressor(LinearApproximator, input_shape=input_shape.current_actual_value,
output_shape=output_shape.current_actual_value, n_actions=n_actions.current_actual_value)
sigma = Categorical(hp_name='sigma', obj_name='sigma_'+str(self.model.__name__), current_actual_value=sigma_value)
sigma_weights = 0.25*np.ones(sigma.current_actual_value.weights_size)
sigma.current_actual_value.set_weights(sigma_weights)
policy_value = StateStdGaussianPolicy(mu=approximator.current_actual_value, std=sigma.current_actual_value)
policy = Categorical(hp_name='policy', obj_name='policy_'+str(self.model.__name__), current_actual_value=policy_value)
return policy
def set_params(self, new_params):
"""
Parameters
----------
new_params: The new parameters to be used in the specific model generation algorithm. It must be a dictionary that does
not contain any dictionaries(i.e: all parameters must be at the same level).
We need to create the dictionary in the right form for MushroomRL. Then it needs to update self.algo_params.
Then it needs to update the object self.algo_object: to this we need to pass the actual values and not
the Hyperparameter objects.
We call _select_current_actual_value_from_hp_classes: to this method we need to pass the dictionary already
in its final form.
Returns
-------
bool: This method returns True if new_params is set correctly, and False otherwise.
"""
if(new_params is not None):
mdp_info = Categorical(hp_name='mdp_info', obj_name='mdp_info_'+str(self.model.__name__),
current_actual_value=self.info_MDP)
input_shape = Categorical(hp_name='input_shape', obj_name='input_shape_'+str(self.model.__name__),
current_actual_value=self.info_MDP.observation_space.shape)
if(self.regressor_type == 'action_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=(1,))
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.n)
elif(self.regressor_type == 'q_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=(self.info_MDP.action_space.n,))
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.n)
elif(self.regressor_type == 'generic_regressor'):
output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__),
current_actual_value=self.info_MDP.action_space.shape)
#to have a generic regressor I must not specify n_actions
n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__),
current_actual_value=None)
tmp_structured_algo_params = {'mdp_info': mdp_info}
#By subclassing this Class and changing the method _create_policy() one can specify a specific policy:
policy = self._create_policy(input_shape=input_shape, n_actions=n_actions, output_shape=output_shape)
tmp_structured_algo_params.update({'policy': policy})
opt_params = {}
for tmp_key in list(new_params.keys()):
#i do not want to change mdp_info
if(tmp_key == 'optimizer'):
tmp_structured_algo_params.update({tmp_key: new_params[tmp_key]})
if(tmp_key not in ['mdp_info', 'policy', 'optimizer', 'n_epochs', 'n_steps', 'n_steps_per_fit', 'n_episodes',
'n_episodes_per_fit']):
opt_params.update({tmp_key: new_params[tmp_key]})
optimizer_vals = self._select_current_actual_value_from_hp_classes(params_structured_dict=opt_params)
opt = tmp_structured_algo_params['optimizer'].current_actual_value
tmp_structured_algo_params['optimizer'].current_actual_value = opt(**optimizer_vals)
structured_dict_of_values = self._select_current_actual_value_from_hp_classes(params_structured_dict=
tmp_structured_algo_params)
#i need to un-pack structured_dict_of_values for GPOMDP
self.algo_object = GPOMDP(**structured_dict_of_values)
final_dict_of_params = tmp_structured_algo_params
#remove the optimizer object (that is needed for MushroomRL) and insert the optimizer Class instead:
final_dict_of_params['optimizer'].current_actual_value = opt
#add n_epochs, n_steps, n_steps_per_fit, n_episodes, n_episodes_per_fit:
dict_to_add = {'n_epochs': new_params['n_epochs'],
'n_steps': new_params['n_steps'],
'n_steps_per_fit': new_params['n_steps_per_fit'],
'n_episodes': new_params['n_episodes'],
'n_episodes_per_fit': new_params['n_episodes_per_fit']
}
final_dict_of_params = {**final_dict_of_params, **dict_to_add, **opt_params}
self.algo_params = final_dict_of_params
tmp_new_params = self.get_params()
if(tmp_new_params is not None):
self.algo_params_upon_instantiation = copy.deepcopy(tmp_new_params)
else:
self.logger.error(msg='There was an error getting the parameters!')
return False
return True
else:
self.logger.error(msg='Cannot set parameters: \'new_params\' is \'None\'!')
return False
| StarcoderdataPython |
189940 | # -*- coding: utf-8 -*-
""" MIMEタイプ """
TEXT = "text/plain"
HTML = "text/html"
CSS = "text/css"
JS = "application/javascript"
XML = "application/xml"
XHTML = "application/xhtml+xml"
JSON = "application/json"
SVG = "image/svg+xml"
def needs_charset(mime_type):
""" charset指定が必要なMIMEタイプか? """
return mime_type in (TEXT, HTML)
| StarcoderdataPython |
3442530 | <reponame>bearcatt/mega.pytorch
import random
import sys
import numpy as np
from PIL import Image
from .vid import VIDDataset
from mega_core.config import cfg
# modified from torchvision to add support for max size
def get_size(min_size, max_size, image_size):
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
w, h = image_size
size = random.choice(min_size)
max_size = max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
class VIDMEGADataset(VIDDataset):
def __init__(self, image_set, data_dir, img_dir, anno_path, img_index, transforms, is_train=True):
super(VIDMEGADataset, self).__init__(image_set, data_dir, img_dir, anno_path, img_index, transforms, is_train=is_train)
if not self.is_train:
self.start_index = []
self.start_id = []
if cfg.MODEL.VID.MEGA.GLOBAL.ENABLE:
self.shuffled_index = {}
for id, image_index in enumerate(self.image_set_index):
frame_id = int(image_index.split("/")[-1])
if frame_id == 0:
self.start_index.append(id)
if cfg.MODEL.VID.MEGA.GLOBAL.ENABLE:
shuffled_index = np.arange(self.frame_seg_len[id])
if cfg.MODEL.VID.MEGA.GLOBAL.SHUFFLE:
np.random.shuffle(shuffled_index)
self.shuffled_index[str(id)] = shuffled_index
self.start_id.append(id)
else:
self.start_id.append(self.start_index[-1])
def _get_train(self, idx):
filename = self.image_set_index[idx]
img = Image.open(self._img_dir % filename).convert("RGB")
# if a video dataset
img_refs_l = []
img_refs_m = []
img_refs_g = []
if hasattr(self, "pattern"):
offsets = np.random.choice(cfg.MODEL.VID.MEGA.MAX_OFFSET - cfg.MODEL.VID.MEGA.MIN_OFFSET + 1,
cfg.MODEL.VID.MEGA.REF_NUM_LOCAL, replace=False) + cfg.MODEL.VID.MEGA.MIN_OFFSET
for i in range(len(offsets)):
ref_id = min(max(self.frame_seg_id[idx] + offsets[i], 0), self.frame_seg_len[idx] - 1)
ref_filename = self.pattern[idx] % ref_id
img_ref = Image.open(self._img_dir % ref_filename).convert("RGB")
img_refs_l.append(img_ref)
# memory frames
if cfg.MODEL.VID.MEGA.MEMORY.ENABLE:
ref_id_center = max(self.frame_seg_id[idx] - cfg.MODEL.VID.MEGA.ALL_FRAME_INTERVAL, 0)
offsets = np.random.choice(cfg.MODEL.VID.MEGA.MAX_OFFSET - cfg.MODEL.VID.MEGA.MIN_OFFSET + 1,
cfg.MODEL.VID.MEGA.REF_NUM_MEM, replace=False) + cfg.MODEL.VID.MEGA.MIN_OFFSET
for i in range(len(offsets)):
ref_id = min(max(ref_id_center + offsets[i], 0), self.frame_seg_len[idx] - 1)
ref_filename = self.pattern[idx] % ref_id
img_ref = Image.open(self._img_dir % ref_filename).convert("RGB")
img_refs_m.append(img_ref)
# global frames
if cfg.MODEL.VID.MEGA.GLOBAL.ENABLE:
ref_ids = np.random.choice(self.frame_seg_len[idx], cfg.MODEL.VID.MEGA.REF_NUM_GLOBAL, replace=False)
for ref_id in ref_ids:
ref_filename = self.pattern[idx] % ref_id
img_ref = Image.open(self._img_dir % ref_filename).convert("RGB")
img_refs_g.append(img_ref)
else:
for i in range(cfg.MODEL.VID.MEGA.REF_NUM_LOCAL):
img_refs_l.append(img.copy())
if cfg.MODEL.VID.MEGA.MEMORY.ENABLE:
for i in range(cfg.MODEL.VID.MEGA.REF_NUM_MEM):
img_refs_m.append(img.copy())
if cfg.MODEL.VID.MEGA.GLOBAL.ENABLE:
for i in range(cfg.MODEL.VID.MEGA.REF_NUM_GLOBAL):
img_refs_g.append(img.copy())
target = self.get_groundtruth(idx)
target = target.clip_to_image(remove_empty=True)
if len(cfg.INPUT.MIN_SIZE_TRAIN) > 1:
size = get_size(cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, img.size)
transform_fn = lambda image, target: self.transforms(image, size, target)
else:
transform_fn = self.transforms
if self.transforms is not None:
img, target = transform_fn(img, target)
for i in range(len(img_refs_l)):
img_refs_l[i], _ = transform_fn(img_refs_l[i], None)
for i in range(len(img_refs_m)):
img_refs_m[i], _ = transform_fn(img_refs_m[i], None)
for i in range(len(img_refs_g)):
img_refs_g[i], _ =transform_fn(img_refs_g[i], None)
images = {}
images["cur"] = img
images["ref_l"] = img_refs_l
images["ref_m"] = img_refs_m
images["ref_g"] = img_refs_g
return images, target, idx
def _get_test(self, idx):
filename = self.image_set_index[idx]
img_cur = Image.open(self._img_dir % filename).convert("RGB")
# give the current frame a category. 0 for start, 1 for normal
frame_id = int(filename.split("/")[-1])
frame_category = 0
if frame_id != 0:
frame_category = 1
img_refs_l = []
# reading other images of the queue (not necessary to be the last one, but last one here)
ref_id = min(self.frame_seg_len[idx] - 1, frame_id + cfg.MODEL.VID.MEGA.MAX_OFFSET)
ref_filename = self.pattern[idx] % ref_id
img_ref = Image.open(self._img_dir % ref_filename).convert("RGB")
img_refs_l.append(img_ref)
img_refs_g = []
if cfg.MODEL.VID.MEGA.GLOBAL.ENABLE:
size = cfg.MODEL.VID.MEGA.GLOBAL.SIZE if frame_id == 0 else 1
shuffled_index = self.shuffled_index[str(self.start_id[idx])]
for id in range(size):
filename = self.pattern[idx] % shuffled_index[
(idx - self.start_id[idx] + cfg.MODEL.VID.MEGA.GLOBAL.SIZE - id - 1) % self.frame_seg_len[idx]]
img_ref = Image.open(self._img_dir % filename).convert("RGB")
img_refs_g.append(img_ref)
target = self.get_groundtruth(idx)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img_cur, target = self.transforms(img_cur, target)
for i in range(len(img_refs_l)):
img_refs_l[i], _ = self.transforms(img_refs_l[i], None)
for i in range(len(img_refs_g)):
img_refs_g[i], _ = self.transforms(img_refs_g[i], None)
images = {}
images["cur"] = img_cur
images["ref_l"] = img_refs_l
images["ref_g"] = img_refs_g
images["frame_category"] = frame_category
images["seg_len"] = self.frame_seg_len[idx]
images["pattern"] = self.pattern[idx]
images["img_dir"] = self._img_dir
images["transforms"] = self.transforms
return images, target, idx
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.