code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(5):
print(tej[i])
<|reserved_special_token_1|>
tej = 'votary'
for i in range(5):
print(tej[i])
<|reserved_special_token_1|>
tej="votary"
for i in range(5):
print(tej[i])
|
flexible
|
{
"blob_id": "1f385fda1bdc0008ff91b935998c95c8ffcbd297",
"index": 2797,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(5):\n print(tej[i])\n",
"step-3": "tej = 'votary'\nfor i in range(5):\n print(tej[i])\n",
"step-4": "tej=\"votary\"\nfor i in range(5):\n\tprint(tej[i])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Library for Stalker project
#Libraries
import pandas as pd
import seaborn as sns
from IPython.display import Image, display
import matplotlib.pyplot as plt
# Google search
from googlesearch import search
# Tldextract to get domain of url
import tldextract as tld
# BeautifulSoup
from bs4 import BeautifulSoup as bs
from bs4.element import Comment
import urllib.request
# NLTK to analyze webs
import nltk
from nltk.corpus import stopwords
from nltk import FreqDist
from nltk.tokenize import word_tokenize
# Find close matches
from difflib import get_close_matches
# Sentiment analysis
from textblob import TextBlob
# Twitter sentiment analysis
import tweepy
# News API
from newsapi import NewsApiClient
# Credentials
import credentials as cd
# Finding info in APIs
newsapi = NewsApiClient(api_key=cd.news_credentials['api_key'])
news_sources = 'the-verge,buzzfeed,engadget,hacker-news,mashable,reddit-r-all,wired,techcrunch'
# Twitter API
consumer_key = cd.twitter_credentials['consumer_key']
consumer_key_secret = cd.twitter_credentials['consumer_key_secret']
access_token = cd.twitter_credentials['access_token']
access_token_secret = cd.twitter_credentials['access_token_secret']
auth = tweepy.OAuthHandler(consumer_key, consumer_key_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Finding query on Google
# Finding related urls
def find_webs(query):
urls = []
rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube','pinterest','angel']
sites = []
red_social = False
for s in search(query, tld="com", num=30, stop=30, pause=3, lang='en'):
if len(urls)<10:
for rs in rrss:
if rs in s or tld.extract(s).domain in sites:
red_social = True
if not red_social and s not in urls:
urls.append(s)
sites.append(tld.extract(s).domain)
red_social = False
return urls
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
def text_from_html(body):
soup = bs(body, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
return u" ".join(t.strip() for t in visible_texts)
def cleaning_urls_text(url):
try:
html = text_from_html(urllib.request.urlopen(url).read())
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(html)
return [w for w in word_tokens if not w in stop_words]
except:
return []
def filter_warning_words(sentence):
warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime','arson', 'assault', 'bigamy', 'blackmail',
'bribery', 'burglary', 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud', 'genocide',
'hijacking','homicide', 'kidnapping', 'manslaughter', 'mugging', 'murder', 'perjury', 'rape', 'riot',
'robbery', 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing']
return list(filter(lambda word: word in warning_word, sentence))
def warnings_count(url):
clean_sentence = cleaning_urls_text(url)
length = len(filter_warning_words(clean_sentence))
return (url, length) if length != 0 else None
def most_warnings(urls, look_for):
list_len_tup = list(map(warnings_count, urls))
list_len_tup_clean = list(filter(lambda item: item != None, list_len_tup))
list_len_tup_clean.sort(key = lambda item: item[1], reverse=True)
top_urls = [url for url, length in list_len_tup_clean[:2]]
if len(top_urls) > 1:
print(f"""
We found something sketchy. You might want to check these links:
- {top_urls[0]}
- {top_urls[1]}
""")
elif len(top_urls) == 1:
print(f"""
We found something sketchy. You might want to check this link:
{top_urls[0]}
""")
else:
print(f"We couldn't find anything worrying about {look_for} on Google. Nice!")
# Input correction
def retrieve_name(my_name, companies):
companies_list = []
for i in companies.dropna(subset=['name']).name:
companies_list.append(i)
if my_name in companies_list:
return my_name
elif len(get_close_matches(my_name, companies_list)) > 0:
action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_name, companies_list)[0])
if (action == "y"):
return get_close_matches(my_name, companies_list)[0]
elif (action == "n"):
return my_name
else:
return("we don't understand you. Apologies.")
def retrieve_sector(my_sector, investments):
investments = investments.dropna(subset=['raised_amount_usd', 'company_category_list'])
sector_list0 = []
sector_list = []
for item in investments['company_category_list']:
if ',' in item:
sector_list0.append(item.split(sep=', '))
else:
sector_list0.append(item)
for i in sector_list0:
if type(i) == list:
for sec in i:
sector_list.append(sec)
else:
sector_list.append(i)
if my_sector in sector_list:
return my_sector
elif len(get_close_matches(my_sector, sector_list)) > 0:
action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_sector, sector_list) [0])
if (action == "y"):
return get_close_matches(my_sector, sector_list)[0]
else:
return my_sector
# Sentiment analysis tweeter
def tw_sent_sector(public_tweets, sector):
sentiment_list = []
for tweet in public_tweets:
analysis = TextBlob(tweet.text)
sentiment_list.append(analysis.sentiment[0])
if sum(sentiment_list)>0:
sent = 'Positive'
elif sum(sentiment_list)<0:
sent = 'Negative'
else:
sent = 'Neutral'
print(f"The sentiment about {sector} industry in Twitter is {sent}")
# Sentiment analysis news
def news_sentiment_sector(public_news, sector):
news_list = []
for piece in range(len(public_news['articles'])):
news_list.append(TextBlob(public_news['articles'][piece]['title']).sentiment[0])
news_list.append(TextBlob(public_news['articles'][piece]['description']).sentiment[0])
if sum(news_list)>0:
news_sent = 'Positive'
elif sum(news_list)<0:
news_sent = 'Negative'
else:
news_sent = 'Neutral'
print(f"There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}")
# Look for data about sector
def category(sector, investments):
# Gather tweets
public_tweets = api.search(sector)
# Gather news
public_news = newsapi.get_everything(q=sector,sources=news_sources,language='en')
# Prepare the data for the sector
investments = investments.dropna(subset=['company_category_list'])
sector_investments = investments[investments['company_category_list'].str.contains(sector)].drop('index',axis=1)
sector_investments.reset_index(drop=True)
sector_investments['funded_at'] = pd.to_datetime(sector_investments['funded_at'])
sector_investments['Year'] = sector_investments['funded_at'].apply(lambda x: x.year )
sector_investments['Month'] = sector_investments['funded_at'].apply(lambda x: x.month )
sector_investments['Day'] = sector_investments['funded_at'].apply(lambda x: x.day )
# Sentiment analysis Twitter
tw_sent_sector(public_tweets, sector)
# Sentiment analysis News
news_sentiment_sector(public_news, sector)
# create plot
sector_year = sector_investments.groupby(['Year']).sum()[-10:]
movement = ((sector_year.raised_amount_usd.iloc[len(sector_year)-1] -sector_year.raised_amount_usd.iloc[0])/sector_year.raised_amount_usd.iloc[0]*100)
if sector_year.raised_amount_usd.iloc[0] + sector_year.raised_amount_usd.iloc[len(sector_year)-1] >= 0:
in_dec = 'increased'
grow = 'growing'
else:
in_dec = 'decreased'
grow = 'falling'
movement = movement[1:]
sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd).set_title(f'Evolution of the amount invested in {sector}')
investments_per_year = sector_investments.groupby(['Year']).count()
peak_year = sector_year.index[sector_year['raised_amount_usd']== max(sector_year.raised_amount_usd)].to_list()
peak_amount = max(sector_year.raised_amount_usd)
#peak_year_invest = investments_per_year.index[investments_per_year['raised_amount_usd']== max(investments_per_year.raised_amount_usd)].to_list()
low_amount = min(sector_year.raised_amount_usd)
most_invested_companies = sector_investments.groupby(by='company_name').sum().sort_values(by='raised_amount_usd', ascending=False)
low_year = sector_year.index[sector_year['raised_amount_usd']== min(sector_year.raised_amount_usd)].to_list()
format_doll = ',.2f'
print(f"""The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement),format_doll)}% in the last {len(sector_year)} years.
It peaked in year {peak_year[0]} with ${format(peak_amount,format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount,format_doll)} invested.
""")
plt.ylabel('Raised amount in USD')
plt.show()
sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year.Day[-10:]).set_title(f'Evolution of the number of investment in {sector}')
plt.ylabel('Number of investments')
#print("""Plot explanaition average investment
""")
plt.show()
#print(f"""
# The Top 3 companies with biggest investments are:
#- {most_invested_companies.index[0]} with ${most_invested_companies.raised_amount_usd[0]} raised,
#- {most_invested_companies.index[1]} with ${most_invested_companies.raised_amount_usd[1]} raised and
#- {most_invested_companies.index[2]} with ${most_invested_companies.raised_amount_usd[2]} raised
#""")
# Sentiment analysis founder
def tw_analysis_founder(public_tweets, founder):
sentiment_list = []
for tweet in public_tweets:
analysis = TextBlob(tweet.text)
sentiment_list.append(analysis.sentiment[0])
if sum(sentiment_list)>0:
sent = 'Positive'
elif sum(sentiment_list)<0:
sent = 'Negative'
else:
sent = 'Neutral'
print(f"The sentiment about {founder} in Twitter is {sent}")
# Look for data about the founder
def founders(founder, people):
full_name = founder.split()
public_tweets = api.search(founder)
# What to search on Google
look_for = founder
for i in range(len(people)):
if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[i]==full_name[1]:
display(Image(url=people.profile_image_url[i]))
print(f'We found this information about {founder}:')
print(f"Founder's name: {people.first_name[i]} {people.last_name[i]} ")
print(f"Title: {people.title[i]}")
print(f"Organization: {people.organization[i]}")
print(f"Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}")
if people.twitter_url[i] != None:
print(f"Twitter URL: {people.twitter_url[i]}")
if people.linkedin_url[i] != None:
print(f"Linkedin URL: {people.linkedin_url[i]}")
if people.facebook_url[i] != None:
print(f"Facebook URL: {people.facebook_url[i]}")
# Twitter analysis
tw_analysis_founder(public_tweets, founder)
# Google search
most_warnings(find_webs(founder), look_for)
# Look for data about company
def find_companies_by_size(size, companies, name, sector, company):
company_nan = companies.dropna()
company_sector = company_nan[company_nan['category_list'].str.contains(sector)].drop('index',axis=1).dropna()
company_sector['total_funding_size']=pd.qcut(company_sector.funding_total_usd, q=[0, .25, .75, 1], labels=['small', 'medium', 'big'])
if name in company_nan['name']:
return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')& (company_sector['country_code']==company.country_code)].sample()
else:
return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')].sample()
def competitor_info(company):
print(f"Company name: {company.name.item()}")
print(f"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}")
print(f"Total rounds: {company.funding_rounds.item()}")
print(f"Webpage: {company.homepage_url.item()}")
print(f"Country: {company.country_code.item()}")
print(f"Status: {company.status.item()}")
print(f"Founded in: {company.founded_at.item()}")
# Sentiment analysis company
def tw_analysis_company(public_tweets, company):
sentiment_list = []
for tweet in public_tweets:
analysis = TextBlob(tweet.text)
sentiment_list.append(analysis.sentiment[0])
if sum(sentiment_list)>0:
sent = 'Positive'
elif sum(sentiment_list)<0:
sent = 'Negative'
else:
sent = 'Neutral'
print(f"The sentiment about {company} in Twitter is {sent}")
def startup(name, companies, sector):
company = companies[companies['name'] == name]
# What to search on Google
look_for = name
# Gather tweets
public_tweets = api.search(name)
try:
print(f"Company name: {company.name.item()}")
print(f"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}")
print(f"Total rounds: {company.funding_rounds.item()}")
print(f"Webpage: {company.homepage_url.item()}")
print(f"Country: {company.country_code.item()}")
print(f"Status: {company.status.item()}")
# Find competitors
print('\n')
print(f"Competitors similar to {company.name.item()}:")
print('\n')
competitor_info(find_companies_by_size('small', companies, name, sector, company))
print('\n')
competitor_info(find_companies_by_size('medium', companies, name, sector, company))
print('\n')
competitor_info(find_companies_by_size('big', companies, name, sector, company))
except:
print(f"We couldn't find information about {name} in Crunchbase")
#Twitter sentiment analysis for company
tw_analysis_company(public_tweets, name)
# Google search
most_warnings(find_webs(name), look_for)
|
normal
|
{
"blob_id": "9c7ecd3c878d43633606439aa63f840176f20dee",
"index": 7941,
"step-1": "<mask token>\n\n\ndef find_webs(query):\n urls = []\n rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube',\n 'pinterest', 'angel']\n sites = []\n red_social = False\n for s in search(query, tld='com', num=30, stop=30, pause=3, lang='en'):\n if len(urls) < 10:\n for rs in rrss:\n if rs in s or tld.extract(s).domain in sites:\n red_social = True\n if not red_social and s not in urls:\n urls.append(s)\n sites.append(tld.extract(s).domain)\n red_social = False\n return urls\n\n\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta',\n '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n\n<mask token>\n\n\ndef cleaning_urls_text(url):\n try:\n html = text_from_html(urllib.request.urlopen(url).read())\n stop_words = set(stopwords.words('english'))\n word_tokens = word_tokenize(html)\n return [w for w in word_tokens if not w in stop_words]\n except:\n return []\n\n\ndef filter_warning_words(sentence):\n warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime',\n 'arson', 'assault', 'bigamy', 'blackmail', 'bribery', 'burglary',\n 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud',\n 'genocide', 'hijacking', 'homicide', 'kidnapping', 'manslaughter',\n 'mugging', 'murder', 'perjury', 'rape', 'riot', 'robbery',\n 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing']\n return list(filter(lambda word: word in warning_word, sentence))\n\n\ndef warnings_count(url):\n clean_sentence = cleaning_urls_text(url)\n length = len(filter_warning_words(clean_sentence))\n return (url, length) if length != 0 else None\n\n\n<mask token>\n\n\ndef retrieve_sector(my_sector, investments):\n investments = investments.dropna(subset=['raised_amount_usd',\n 'company_category_list'])\n sector_list0 = []\n sector_list = []\n for item in investments['company_category_list']:\n if ',' in item:\n sector_list0.append(item.split(sep=', '))\n else:\n sector_list0.append(item)\n for i in sector_list0:\n if type(i) == list:\n for sec in i:\n sector_list.append(sec)\n else:\n sector_list.append(i)\n if my_sector in sector_list:\n return my_sector\n elif len(get_close_matches(my_sector, sector_list)) > 0:\n action = input('Did you mean %s instead? [y or n]: ' %\n get_close_matches(my_sector, sector_list)[0])\n if action == 'y':\n return get_close_matches(my_sector, sector_list)[0]\n else:\n return my_sector\n\n\ndef tw_sent_sector(public_tweets, sector):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {sector} industry in Twitter is {sent}')\n\n\n<mask token>\n\n\ndef category(sector, investments):\n public_tweets = api.search(sector)\n public_news = newsapi.get_everything(q=sector, sources=news_sources,\n language='en')\n investments = investments.dropna(subset=['company_category_list'])\n sector_investments = investments[investments['company_category_list'].\n str.contains(sector)].drop('index', axis=1)\n sector_investments.reset_index(drop=True)\n sector_investments['funded_at'] = pd.to_datetime(sector_investments[\n 'funded_at'])\n sector_investments['Year'] = sector_investments['funded_at'].apply(lambda\n x: x.year)\n sector_investments['Month'] = sector_investments['funded_at'].apply(lambda\n x: x.month)\n sector_investments['Day'] = sector_investments['funded_at'].apply(lambda\n x: x.day)\n tw_sent_sector(public_tweets, sector)\n news_sentiment_sector(public_news, sector)\n sector_year = sector_investments.groupby(['Year']).sum()[-10:]\n movement = (sector_year.raised_amount_usd.iloc[len(sector_year) - 1] -\n sector_year.raised_amount_usd.iloc[0]\n ) / sector_year.raised_amount_usd.iloc[0] * 100\n if sector_year.raised_amount_usd.iloc[0\n ] + sector_year.raised_amount_usd.iloc[len(sector_year) - 1] >= 0:\n in_dec = 'increased'\n grow = 'growing'\n else:\n in_dec = 'decreased'\n grow = 'falling'\n movement = movement[1:]\n sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd\n ).set_title(f'Evolution of the amount invested in {sector}')\n investments_per_year = sector_investments.groupby(['Year']).count()\n peak_year = sector_year.index[sector_year['raised_amount_usd'] == max(\n sector_year.raised_amount_usd)].to_list()\n peak_amount = max(sector_year.raised_amount_usd)\n low_amount = min(sector_year.raised_amount_usd)\n most_invested_companies = sector_investments.groupby(by='company_name'\n ).sum().sort_values(by='raised_amount_usd', ascending=False)\n low_year = sector_year.index[sector_year['raised_amount_usd'] == min(\n sector_year.raised_amount_usd)].to_list()\n format_doll = ',.2f'\n print(\n f\"\"\"The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement), format_doll)}% in the last {len(sector_year)} years. \nIt peaked in year {peak_year[0]} with ${format(peak_amount, format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount, format_doll)} invested.\n\"\"\"\n )\n plt.ylabel('Raised amount in USD')\n plt.show()\n sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year\n .Day[-10:]).set_title(\n f'Evolution of the number of investment in {sector}')\n plt.ylabel('Number of investments')\n \"\"\")\n plt.show()\n #print(f\"\"\"\n\n\ndef tw_analysis_founder(public_tweets, founder):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {founder} in Twitter is {sent}')\n\n\ndef founders(founder, people):\n full_name = founder.split()\n public_tweets = api.search(founder)\n look_for = founder\n for i in range(len(people)):\n if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[\n i] == full_name[1]:\n display(Image(url=people.profile_image_url[i]))\n print(f'We found this information about {founder}:')\n print(\n f\"Founder's name: {people.first_name[i]} {people.last_name[i]} \"\n )\n print(f'Title: {people.title[i]}')\n print(f'Organization: {people.organization[i]}')\n print(\n f'Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}'\n )\n if people.twitter_url[i] != None:\n print(f'Twitter URL: {people.twitter_url[i]}')\n if people.linkedin_url[i] != None:\n print(f'Linkedin URL: {people.linkedin_url[i]}')\n if people.facebook_url[i] != None:\n print(f'Facebook URL: {people.facebook_url[i]}')\n tw_analysis_founder(public_tweets, founder)\n most_warnings(find_webs(founder), look_for)\n\n\ndef find_companies_by_size(size, companies, name, sector, company):\n company_nan = companies.dropna()\n company_sector = company_nan[company_nan['category_list'].str.contains(\n sector)].drop('index', axis=1).dropna()\n company_sector['total_funding_size'] = pd.qcut(company_sector.\n funding_total_usd, q=[0, 0.25, 0.75, 1], labels=['small', 'medium',\n 'big'])\n if name in company_nan['name']:\n return company_sector[(company_sector['total_funding_size'] == size\n ) & (company_sector['funding_total_usd'] > 100000) & (\n company_sector['status'] != 'closed') & (company_sector[\n 'country_code'] == company.country_code)].sample()\n else:\n return company_sector[(company_sector['total_funding_size'] == size\n ) & (company_sector['funding_total_usd'] > 100000) & (\n company_sector['status'] != 'closed')].sample()\n\n\ndef competitor_info(company):\n print(f'Company name: {company.name.item()}')\n print(\n f\"Total money raised: ${format(company.funding_total_usd.item(), ',.2f')}\"\n )\n print(f'Total rounds: {company.funding_rounds.item()}')\n print(f'Webpage: {company.homepage_url.item()}')\n print(f'Country: {company.country_code.item()}')\n print(f'Status: {company.status.item()}')\n print(f'Founded in: {company.founded_at.item()}')\n\n\ndef tw_analysis_company(public_tweets, company):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {company} in Twitter is {sent}')\n\n\ndef startup(name, companies, sector):\n company = companies[companies['name'] == name]\n look_for = name\n public_tweets = api.search(name)\n try:\n print(f'Company name: {company.name.item()}')\n print(\n f\"Total money raised: ${format(company.funding_total_usd.item(), ',.2f')}\"\n )\n print(f'Total rounds: {company.funding_rounds.item()}')\n print(f'Webpage: {company.homepage_url.item()}')\n print(f'Country: {company.country_code.item()}')\n print(f'Status: {company.status.item()}')\n print('\\n')\n print(f'Competitors similar to {company.name.item()}:')\n print('\\n')\n competitor_info(find_companies_by_size('small', companies, name,\n sector, company))\n print('\\n')\n competitor_info(find_companies_by_size('medium', companies, name,\n sector, company))\n print('\\n')\n competitor_info(find_companies_by_size('big', companies, name,\n sector, company))\n except:\n print(f\"We couldn't find information about {name} in Crunchbase\")\n tw_analysis_company(public_tweets, name)\n most_warnings(find_webs(name), look_for)\n",
"step-2": "<mask token>\n\n\ndef find_webs(query):\n urls = []\n rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube',\n 'pinterest', 'angel']\n sites = []\n red_social = False\n for s in search(query, tld='com', num=30, stop=30, pause=3, lang='en'):\n if len(urls) < 10:\n for rs in rrss:\n if rs in s or tld.extract(s).domain in sites:\n red_social = True\n if not red_social and s not in urls:\n urls.append(s)\n sites.append(tld.extract(s).domain)\n red_social = False\n return urls\n\n\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta',\n '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n\n<mask token>\n\n\ndef cleaning_urls_text(url):\n try:\n html = text_from_html(urllib.request.urlopen(url).read())\n stop_words = set(stopwords.words('english'))\n word_tokens = word_tokenize(html)\n return [w for w in word_tokens if not w in stop_words]\n except:\n return []\n\n\ndef filter_warning_words(sentence):\n warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime',\n 'arson', 'assault', 'bigamy', 'blackmail', 'bribery', 'burglary',\n 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud',\n 'genocide', 'hijacking', 'homicide', 'kidnapping', 'manslaughter',\n 'mugging', 'murder', 'perjury', 'rape', 'riot', 'robbery',\n 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing']\n return list(filter(lambda word: word in warning_word, sentence))\n\n\ndef warnings_count(url):\n clean_sentence = cleaning_urls_text(url)\n length = len(filter_warning_words(clean_sentence))\n return (url, length) if length != 0 else None\n\n\n<mask token>\n\n\ndef retrieve_name(my_name, companies):\n companies_list = []\n for i in companies.dropna(subset=['name']).name:\n companies_list.append(i)\n if my_name in companies_list:\n return my_name\n elif len(get_close_matches(my_name, companies_list)) > 0:\n action = input('Did you mean %s instead? [y or n]: ' %\n get_close_matches(my_name, companies_list)[0])\n if action == 'y':\n return get_close_matches(my_name, companies_list)[0]\n elif action == 'n':\n return my_name\n else:\n return \"we don't understand you. Apologies.\"\n\n\ndef retrieve_sector(my_sector, investments):\n investments = investments.dropna(subset=['raised_amount_usd',\n 'company_category_list'])\n sector_list0 = []\n sector_list = []\n for item in investments['company_category_list']:\n if ',' in item:\n sector_list0.append(item.split(sep=', '))\n else:\n sector_list0.append(item)\n for i in sector_list0:\n if type(i) == list:\n for sec in i:\n sector_list.append(sec)\n else:\n sector_list.append(i)\n if my_sector in sector_list:\n return my_sector\n elif len(get_close_matches(my_sector, sector_list)) > 0:\n action = input('Did you mean %s instead? [y or n]: ' %\n get_close_matches(my_sector, sector_list)[0])\n if action == 'y':\n return get_close_matches(my_sector, sector_list)[0]\n else:\n return my_sector\n\n\ndef tw_sent_sector(public_tweets, sector):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {sector} industry in Twitter is {sent}')\n\n\n<mask token>\n\n\ndef category(sector, investments):\n public_tweets = api.search(sector)\n public_news = newsapi.get_everything(q=sector, sources=news_sources,\n language='en')\n investments = investments.dropna(subset=['company_category_list'])\n sector_investments = investments[investments['company_category_list'].\n str.contains(sector)].drop('index', axis=1)\n sector_investments.reset_index(drop=True)\n sector_investments['funded_at'] = pd.to_datetime(sector_investments[\n 'funded_at'])\n sector_investments['Year'] = sector_investments['funded_at'].apply(lambda\n x: x.year)\n sector_investments['Month'] = sector_investments['funded_at'].apply(lambda\n x: x.month)\n sector_investments['Day'] = sector_investments['funded_at'].apply(lambda\n x: x.day)\n tw_sent_sector(public_tweets, sector)\n news_sentiment_sector(public_news, sector)\n sector_year = sector_investments.groupby(['Year']).sum()[-10:]\n movement = (sector_year.raised_amount_usd.iloc[len(sector_year) - 1] -\n sector_year.raised_amount_usd.iloc[0]\n ) / sector_year.raised_amount_usd.iloc[0] * 100\n if sector_year.raised_amount_usd.iloc[0\n ] + sector_year.raised_amount_usd.iloc[len(sector_year) - 1] >= 0:\n in_dec = 'increased'\n grow = 'growing'\n else:\n in_dec = 'decreased'\n grow = 'falling'\n movement = movement[1:]\n sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd\n ).set_title(f'Evolution of the amount invested in {sector}')\n investments_per_year = sector_investments.groupby(['Year']).count()\n peak_year = sector_year.index[sector_year['raised_amount_usd'] == max(\n sector_year.raised_amount_usd)].to_list()\n peak_amount = max(sector_year.raised_amount_usd)\n low_amount = min(sector_year.raised_amount_usd)\n most_invested_companies = sector_investments.groupby(by='company_name'\n ).sum().sort_values(by='raised_amount_usd', ascending=False)\n low_year = sector_year.index[sector_year['raised_amount_usd'] == min(\n sector_year.raised_amount_usd)].to_list()\n format_doll = ',.2f'\n print(\n f\"\"\"The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement), format_doll)}% in the last {len(sector_year)} years. \nIt peaked in year {peak_year[0]} with ${format(peak_amount, format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount, format_doll)} invested.\n\"\"\"\n )\n plt.ylabel('Raised amount in USD')\n plt.show()\n sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year\n .Day[-10:]).set_title(\n f'Evolution of the number of investment in {sector}')\n plt.ylabel('Number of investments')\n \"\"\")\n plt.show()\n #print(f\"\"\"\n\n\ndef tw_analysis_founder(public_tweets, founder):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {founder} in Twitter is {sent}')\n\n\ndef founders(founder, people):\n full_name = founder.split()\n public_tweets = api.search(founder)\n look_for = founder\n for i in range(len(people)):\n if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[\n i] == full_name[1]:\n display(Image(url=people.profile_image_url[i]))\n print(f'We found this information about {founder}:')\n print(\n f\"Founder's name: {people.first_name[i]} {people.last_name[i]} \"\n )\n print(f'Title: {people.title[i]}')\n print(f'Organization: {people.organization[i]}')\n print(\n f'Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}'\n )\n if people.twitter_url[i] != None:\n print(f'Twitter URL: {people.twitter_url[i]}')\n if people.linkedin_url[i] != None:\n print(f'Linkedin URL: {people.linkedin_url[i]}')\n if people.facebook_url[i] != None:\n print(f'Facebook URL: {people.facebook_url[i]}')\n tw_analysis_founder(public_tweets, founder)\n most_warnings(find_webs(founder), look_for)\n\n\ndef find_companies_by_size(size, companies, name, sector, company):\n company_nan = companies.dropna()\n company_sector = company_nan[company_nan['category_list'].str.contains(\n sector)].drop('index', axis=1).dropna()\n company_sector['total_funding_size'] = pd.qcut(company_sector.\n funding_total_usd, q=[0, 0.25, 0.75, 1], labels=['small', 'medium',\n 'big'])\n if name in company_nan['name']:\n return company_sector[(company_sector['total_funding_size'] == size\n ) & (company_sector['funding_total_usd'] > 100000) & (\n company_sector['status'] != 'closed') & (company_sector[\n 'country_code'] == company.country_code)].sample()\n else:\n return company_sector[(company_sector['total_funding_size'] == size\n ) & (company_sector['funding_total_usd'] > 100000) & (\n company_sector['status'] != 'closed')].sample()\n\n\ndef competitor_info(company):\n print(f'Company name: {company.name.item()}')\n print(\n f\"Total money raised: ${format(company.funding_total_usd.item(), ',.2f')}\"\n )\n print(f'Total rounds: {company.funding_rounds.item()}')\n print(f'Webpage: {company.homepage_url.item()}')\n print(f'Country: {company.country_code.item()}')\n print(f'Status: {company.status.item()}')\n print(f'Founded in: {company.founded_at.item()}')\n\n\ndef tw_analysis_company(public_tweets, company):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {company} in Twitter is {sent}')\n\n\ndef startup(name, companies, sector):\n company = companies[companies['name'] == name]\n look_for = name\n public_tweets = api.search(name)\n try:\n print(f'Company name: {company.name.item()}')\n print(\n f\"Total money raised: ${format(company.funding_total_usd.item(), ',.2f')}\"\n )\n print(f'Total rounds: {company.funding_rounds.item()}')\n print(f'Webpage: {company.homepage_url.item()}')\n print(f'Country: {company.country_code.item()}')\n print(f'Status: {company.status.item()}')\n print('\\n')\n print(f'Competitors similar to {company.name.item()}:')\n print('\\n')\n competitor_info(find_companies_by_size('small', companies, name,\n sector, company))\n print('\\n')\n competitor_info(find_companies_by_size('medium', companies, name,\n sector, company))\n print('\\n')\n competitor_info(find_companies_by_size('big', companies, name,\n sector, company))\n except:\n print(f\"We couldn't find information about {name} in Crunchbase\")\n tw_analysis_company(public_tweets, name)\n most_warnings(find_webs(name), look_for)\n",
"step-3": "<mask token>\n\n\ndef find_webs(query):\n urls = []\n rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube',\n 'pinterest', 'angel']\n sites = []\n red_social = False\n for s in search(query, tld='com', num=30, stop=30, pause=3, lang='en'):\n if len(urls) < 10:\n for rs in rrss:\n if rs in s or tld.extract(s).domain in sites:\n red_social = True\n if not red_social and s not in urls:\n urls.append(s)\n sites.append(tld.extract(s).domain)\n red_social = False\n return urls\n\n\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta',\n '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n\n<mask token>\n\n\ndef cleaning_urls_text(url):\n try:\n html = text_from_html(urllib.request.urlopen(url).read())\n stop_words = set(stopwords.words('english'))\n word_tokens = word_tokenize(html)\n return [w for w in word_tokens if not w in stop_words]\n except:\n return []\n\n\ndef filter_warning_words(sentence):\n warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime',\n 'arson', 'assault', 'bigamy', 'blackmail', 'bribery', 'burglary',\n 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud',\n 'genocide', 'hijacking', 'homicide', 'kidnapping', 'manslaughter',\n 'mugging', 'murder', 'perjury', 'rape', 'riot', 'robbery',\n 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing']\n return list(filter(lambda word: word in warning_word, sentence))\n\n\ndef warnings_count(url):\n clean_sentence = cleaning_urls_text(url)\n length = len(filter_warning_words(clean_sentence))\n return (url, length) if length != 0 else None\n\n\n<mask token>\n\n\ndef retrieve_name(my_name, companies):\n companies_list = []\n for i in companies.dropna(subset=['name']).name:\n companies_list.append(i)\n if my_name in companies_list:\n return my_name\n elif len(get_close_matches(my_name, companies_list)) > 0:\n action = input('Did you mean %s instead? [y or n]: ' %\n get_close_matches(my_name, companies_list)[0])\n if action == 'y':\n return get_close_matches(my_name, companies_list)[0]\n elif action == 'n':\n return my_name\n else:\n return \"we don't understand you. Apologies.\"\n\n\ndef retrieve_sector(my_sector, investments):\n investments = investments.dropna(subset=['raised_amount_usd',\n 'company_category_list'])\n sector_list0 = []\n sector_list = []\n for item in investments['company_category_list']:\n if ',' in item:\n sector_list0.append(item.split(sep=', '))\n else:\n sector_list0.append(item)\n for i in sector_list0:\n if type(i) == list:\n for sec in i:\n sector_list.append(sec)\n else:\n sector_list.append(i)\n if my_sector in sector_list:\n return my_sector\n elif len(get_close_matches(my_sector, sector_list)) > 0:\n action = input('Did you mean %s instead? [y or n]: ' %\n get_close_matches(my_sector, sector_list)[0])\n if action == 'y':\n return get_close_matches(my_sector, sector_list)[0]\n else:\n return my_sector\n\n\ndef tw_sent_sector(public_tweets, sector):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {sector} industry in Twitter is {sent}')\n\n\ndef news_sentiment_sector(public_news, sector):\n news_list = []\n for piece in range(len(public_news['articles'])):\n news_list.append(TextBlob(public_news['articles'][piece]['title']).\n sentiment[0])\n news_list.append(TextBlob(public_news['articles'][piece][\n 'description']).sentiment[0])\n if sum(news_list) > 0:\n news_sent = 'Positive'\n elif sum(news_list) < 0:\n news_sent = 'Negative'\n else:\n news_sent = 'Neutral'\n print(\n f'There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}'\n )\n\n\ndef category(sector, investments):\n public_tweets = api.search(sector)\n public_news = newsapi.get_everything(q=sector, sources=news_sources,\n language='en')\n investments = investments.dropna(subset=['company_category_list'])\n sector_investments = investments[investments['company_category_list'].\n str.contains(sector)].drop('index', axis=1)\n sector_investments.reset_index(drop=True)\n sector_investments['funded_at'] = pd.to_datetime(sector_investments[\n 'funded_at'])\n sector_investments['Year'] = sector_investments['funded_at'].apply(lambda\n x: x.year)\n sector_investments['Month'] = sector_investments['funded_at'].apply(lambda\n x: x.month)\n sector_investments['Day'] = sector_investments['funded_at'].apply(lambda\n x: x.day)\n tw_sent_sector(public_tweets, sector)\n news_sentiment_sector(public_news, sector)\n sector_year = sector_investments.groupby(['Year']).sum()[-10:]\n movement = (sector_year.raised_amount_usd.iloc[len(sector_year) - 1] -\n sector_year.raised_amount_usd.iloc[0]\n ) / sector_year.raised_amount_usd.iloc[0] * 100\n if sector_year.raised_amount_usd.iloc[0\n ] + sector_year.raised_amount_usd.iloc[len(sector_year) - 1] >= 0:\n in_dec = 'increased'\n grow = 'growing'\n else:\n in_dec = 'decreased'\n grow = 'falling'\n movement = movement[1:]\n sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd\n ).set_title(f'Evolution of the amount invested in {sector}')\n investments_per_year = sector_investments.groupby(['Year']).count()\n peak_year = sector_year.index[sector_year['raised_amount_usd'] == max(\n sector_year.raised_amount_usd)].to_list()\n peak_amount = max(sector_year.raised_amount_usd)\n low_amount = min(sector_year.raised_amount_usd)\n most_invested_companies = sector_investments.groupby(by='company_name'\n ).sum().sort_values(by='raised_amount_usd', ascending=False)\n low_year = sector_year.index[sector_year['raised_amount_usd'] == min(\n sector_year.raised_amount_usd)].to_list()\n format_doll = ',.2f'\n print(\n f\"\"\"The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement), format_doll)}% in the last {len(sector_year)} years. \nIt peaked in year {peak_year[0]} with ${format(peak_amount, format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount, format_doll)} invested.\n\"\"\"\n )\n plt.ylabel('Raised amount in USD')\n plt.show()\n sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year\n .Day[-10:]).set_title(\n f'Evolution of the number of investment in {sector}')\n plt.ylabel('Number of investments')\n \"\"\")\n plt.show()\n #print(f\"\"\"\n\n\ndef tw_analysis_founder(public_tweets, founder):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {founder} in Twitter is {sent}')\n\n\ndef founders(founder, people):\n full_name = founder.split()\n public_tweets = api.search(founder)\n look_for = founder\n for i in range(len(people)):\n if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[\n i] == full_name[1]:\n display(Image(url=people.profile_image_url[i]))\n print(f'We found this information about {founder}:')\n print(\n f\"Founder's name: {people.first_name[i]} {people.last_name[i]} \"\n )\n print(f'Title: {people.title[i]}')\n print(f'Organization: {people.organization[i]}')\n print(\n f'Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}'\n )\n if people.twitter_url[i] != None:\n print(f'Twitter URL: {people.twitter_url[i]}')\n if people.linkedin_url[i] != None:\n print(f'Linkedin URL: {people.linkedin_url[i]}')\n if people.facebook_url[i] != None:\n print(f'Facebook URL: {people.facebook_url[i]}')\n tw_analysis_founder(public_tweets, founder)\n most_warnings(find_webs(founder), look_for)\n\n\ndef find_companies_by_size(size, companies, name, sector, company):\n company_nan = companies.dropna()\n company_sector = company_nan[company_nan['category_list'].str.contains(\n sector)].drop('index', axis=1).dropna()\n company_sector['total_funding_size'] = pd.qcut(company_sector.\n funding_total_usd, q=[0, 0.25, 0.75, 1], labels=['small', 'medium',\n 'big'])\n if name in company_nan['name']:\n return company_sector[(company_sector['total_funding_size'] == size\n ) & (company_sector['funding_total_usd'] > 100000) & (\n company_sector['status'] != 'closed') & (company_sector[\n 'country_code'] == company.country_code)].sample()\n else:\n return company_sector[(company_sector['total_funding_size'] == size\n ) & (company_sector['funding_total_usd'] > 100000) & (\n company_sector['status'] != 'closed')].sample()\n\n\ndef competitor_info(company):\n print(f'Company name: {company.name.item()}')\n print(\n f\"Total money raised: ${format(company.funding_total_usd.item(), ',.2f')}\"\n )\n print(f'Total rounds: {company.funding_rounds.item()}')\n print(f'Webpage: {company.homepage_url.item()}')\n print(f'Country: {company.country_code.item()}')\n print(f'Status: {company.status.item()}')\n print(f'Founded in: {company.founded_at.item()}')\n\n\ndef tw_analysis_company(public_tweets, company):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {company} in Twitter is {sent}')\n\n\ndef startup(name, companies, sector):\n company = companies[companies['name'] == name]\n look_for = name\n public_tweets = api.search(name)\n try:\n print(f'Company name: {company.name.item()}')\n print(\n f\"Total money raised: ${format(company.funding_total_usd.item(), ',.2f')}\"\n )\n print(f'Total rounds: {company.funding_rounds.item()}')\n print(f'Webpage: {company.homepage_url.item()}')\n print(f'Country: {company.country_code.item()}')\n print(f'Status: {company.status.item()}')\n print('\\n')\n print(f'Competitors similar to {company.name.item()}:')\n print('\\n')\n competitor_info(find_companies_by_size('small', companies, name,\n sector, company))\n print('\\n')\n competitor_info(find_companies_by_size('medium', companies, name,\n sector, company))\n print('\\n')\n competitor_info(find_companies_by_size('big', companies, name,\n sector, company))\n except:\n print(f\"We couldn't find information about {name} in Crunchbase\")\n tw_analysis_company(public_tweets, name)\n most_warnings(find_webs(name), look_for)\n",
"step-4": "<mask token>\nnewsapi = NewsApiClient(api_key=cd.news_credentials['api_key'])\nnews_sources = (\n 'the-verge,buzzfeed,engadget,hacker-news,mashable,reddit-r-all,wired,techcrunch'\n )\nconsumer_key = cd.twitter_credentials['consumer_key']\nconsumer_key_secret = cd.twitter_credentials['consumer_key_secret']\naccess_token = cd.twitter_credentials['access_token']\naccess_token_secret = cd.twitter_credentials['access_token_secret']\nauth = tweepy.OAuthHandler(consumer_key, consumer_key_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\n\ndef find_webs(query):\n urls = []\n rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube',\n 'pinterest', 'angel']\n sites = []\n red_social = False\n for s in search(query, tld='com', num=30, stop=30, pause=3, lang='en'):\n if len(urls) < 10:\n for rs in rrss:\n if rs in s or tld.extract(s).domain in sites:\n red_social = True\n if not red_social and s not in urls:\n urls.append(s)\n sites.append(tld.extract(s).domain)\n red_social = False\n return urls\n\n\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta',\n '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n\ndef text_from_html(body):\n soup = bs(body, 'html.parser')\n texts = soup.findAll(text=True)\n visible_texts = filter(tag_visible, texts)\n return u' '.join(t.strip() for t in visible_texts)\n\n\ndef cleaning_urls_text(url):\n try:\n html = text_from_html(urllib.request.urlopen(url).read())\n stop_words = set(stopwords.words('english'))\n word_tokens = word_tokenize(html)\n return [w for w in word_tokens if not w in stop_words]\n except:\n return []\n\n\ndef filter_warning_words(sentence):\n warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime',\n 'arson', 'assault', 'bigamy', 'blackmail', 'bribery', 'burglary',\n 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud',\n 'genocide', 'hijacking', 'homicide', 'kidnapping', 'manslaughter',\n 'mugging', 'murder', 'perjury', 'rape', 'riot', 'robbery',\n 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing']\n return list(filter(lambda word: word in warning_word, sentence))\n\n\ndef warnings_count(url):\n clean_sentence = cleaning_urls_text(url)\n length = len(filter_warning_words(clean_sentence))\n return (url, length) if length != 0 else None\n\n\ndef most_warnings(urls, look_for):\n list_len_tup = list(map(warnings_count, urls))\n list_len_tup_clean = list(filter(lambda item: item != None, list_len_tup))\n list_len_tup_clean.sort(key=lambda item: item[1], reverse=True)\n top_urls = [url for url, length in list_len_tup_clean[:2]]\n if len(top_urls) > 1:\n print(\n f\"\"\"\n We found something sketchy. You might want to check these links:\n \n - {top_urls[0]}\n \n - {top_urls[1]}\n \"\"\"\n )\n elif len(top_urls) == 1:\n print(\n f\"\"\"\n We found something sketchy. You might want to check this link:\n {top_urls[0]}\n \"\"\"\n )\n else:\n print(\n f\"We couldn't find anything worrying about {look_for} on Google. Nice!\"\n )\n\n\ndef retrieve_name(my_name, companies):\n companies_list = []\n for i in companies.dropna(subset=['name']).name:\n companies_list.append(i)\n if my_name in companies_list:\n return my_name\n elif len(get_close_matches(my_name, companies_list)) > 0:\n action = input('Did you mean %s instead? [y or n]: ' %\n get_close_matches(my_name, companies_list)[0])\n if action == 'y':\n return get_close_matches(my_name, companies_list)[0]\n elif action == 'n':\n return my_name\n else:\n return \"we don't understand you. Apologies.\"\n\n\ndef retrieve_sector(my_sector, investments):\n investments = investments.dropna(subset=['raised_amount_usd',\n 'company_category_list'])\n sector_list0 = []\n sector_list = []\n for item in investments['company_category_list']:\n if ',' in item:\n sector_list0.append(item.split(sep=', '))\n else:\n sector_list0.append(item)\n for i in sector_list0:\n if type(i) == list:\n for sec in i:\n sector_list.append(sec)\n else:\n sector_list.append(i)\n if my_sector in sector_list:\n return my_sector\n elif len(get_close_matches(my_sector, sector_list)) > 0:\n action = input('Did you mean %s instead? [y or n]: ' %\n get_close_matches(my_sector, sector_list)[0])\n if action == 'y':\n return get_close_matches(my_sector, sector_list)[0]\n else:\n return my_sector\n\n\ndef tw_sent_sector(public_tweets, sector):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {sector} industry in Twitter is {sent}')\n\n\ndef news_sentiment_sector(public_news, sector):\n news_list = []\n for piece in range(len(public_news['articles'])):\n news_list.append(TextBlob(public_news['articles'][piece]['title']).\n sentiment[0])\n news_list.append(TextBlob(public_news['articles'][piece][\n 'description']).sentiment[0])\n if sum(news_list) > 0:\n news_sent = 'Positive'\n elif sum(news_list) < 0:\n news_sent = 'Negative'\n else:\n news_sent = 'Neutral'\n print(\n f'There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}'\n )\n\n\ndef category(sector, investments):\n public_tweets = api.search(sector)\n public_news = newsapi.get_everything(q=sector, sources=news_sources,\n language='en')\n investments = investments.dropna(subset=['company_category_list'])\n sector_investments = investments[investments['company_category_list'].\n str.contains(sector)].drop('index', axis=1)\n sector_investments.reset_index(drop=True)\n sector_investments['funded_at'] = pd.to_datetime(sector_investments[\n 'funded_at'])\n sector_investments['Year'] = sector_investments['funded_at'].apply(lambda\n x: x.year)\n sector_investments['Month'] = sector_investments['funded_at'].apply(lambda\n x: x.month)\n sector_investments['Day'] = sector_investments['funded_at'].apply(lambda\n x: x.day)\n tw_sent_sector(public_tweets, sector)\n news_sentiment_sector(public_news, sector)\n sector_year = sector_investments.groupby(['Year']).sum()[-10:]\n movement = (sector_year.raised_amount_usd.iloc[len(sector_year) - 1] -\n sector_year.raised_amount_usd.iloc[0]\n ) / sector_year.raised_amount_usd.iloc[0] * 100\n if sector_year.raised_amount_usd.iloc[0\n ] + sector_year.raised_amount_usd.iloc[len(sector_year) - 1] >= 0:\n in_dec = 'increased'\n grow = 'growing'\n else:\n in_dec = 'decreased'\n grow = 'falling'\n movement = movement[1:]\n sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd\n ).set_title(f'Evolution of the amount invested in {sector}')\n investments_per_year = sector_investments.groupby(['Year']).count()\n peak_year = sector_year.index[sector_year['raised_amount_usd'] == max(\n sector_year.raised_amount_usd)].to_list()\n peak_amount = max(sector_year.raised_amount_usd)\n low_amount = min(sector_year.raised_amount_usd)\n most_invested_companies = sector_investments.groupby(by='company_name'\n ).sum().sort_values(by='raised_amount_usd', ascending=False)\n low_year = sector_year.index[sector_year['raised_amount_usd'] == min(\n sector_year.raised_amount_usd)].to_list()\n format_doll = ',.2f'\n print(\n f\"\"\"The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement), format_doll)}% in the last {len(sector_year)} years. \nIt peaked in year {peak_year[0]} with ${format(peak_amount, format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount, format_doll)} invested.\n\"\"\"\n )\n plt.ylabel('Raised amount in USD')\n plt.show()\n sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year\n .Day[-10:]).set_title(\n f'Evolution of the number of investment in {sector}')\n plt.ylabel('Number of investments')\n \"\"\")\n plt.show()\n #print(f\"\"\"\n\n\ndef tw_analysis_founder(public_tweets, founder):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {founder} in Twitter is {sent}')\n\n\ndef founders(founder, people):\n full_name = founder.split()\n public_tweets = api.search(founder)\n look_for = founder\n for i in range(len(people)):\n if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[\n i] == full_name[1]:\n display(Image(url=people.profile_image_url[i]))\n print(f'We found this information about {founder}:')\n print(\n f\"Founder's name: {people.first_name[i]} {people.last_name[i]} \"\n )\n print(f'Title: {people.title[i]}')\n print(f'Organization: {people.organization[i]}')\n print(\n f'Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}'\n )\n if people.twitter_url[i] != None:\n print(f'Twitter URL: {people.twitter_url[i]}')\n if people.linkedin_url[i] != None:\n print(f'Linkedin URL: {people.linkedin_url[i]}')\n if people.facebook_url[i] != None:\n print(f'Facebook URL: {people.facebook_url[i]}')\n tw_analysis_founder(public_tweets, founder)\n most_warnings(find_webs(founder), look_for)\n\n\ndef find_companies_by_size(size, companies, name, sector, company):\n company_nan = companies.dropna()\n company_sector = company_nan[company_nan['category_list'].str.contains(\n sector)].drop('index', axis=1).dropna()\n company_sector['total_funding_size'] = pd.qcut(company_sector.\n funding_total_usd, q=[0, 0.25, 0.75, 1], labels=['small', 'medium',\n 'big'])\n if name in company_nan['name']:\n return company_sector[(company_sector['total_funding_size'] == size\n ) & (company_sector['funding_total_usd'] > 100000) & (\n company_sector['status'] != 'closed') & (company_sector[\n 'country_code'] == company.country_code)].sample()\n else:\n return company_sector[(company_sector['total_funding_size'] == size\n ) & (company_sector['funding_total_usd'] > 100000) & (\n company_sector['status'] != 'closed')].sample()\n\n\ndef competitor_info(company):\n print(f'Company name: {company.name.item()}')\n print(\n f\"Total money raised: ${format(company.funding_total_usd.item(), ',.2f')}\"\n )\n print(f'Total rounds: {company.funding_rounds.item()}')\n print(f'Webpage: {company.homepage_url.item()}')\n print(f'Country: {company.country_code.item()}')\n print(f'Status: {company.status.item()}')\n print(f'Founded in: {company.founded_at.item()}')\n\n\ndef tw_analysis_company(public_tweets, company):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list) > 0:\n sent = 'Positive'\n elif sum(sentiment_list) < 0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f'The sentiment about {company} in Twitter is {sent}')\n\n\ndef startup(name, companies, sector):\n company = companies[companies['name'] == name]\n look_for = name\n public_tweets = api.search(name)\n try:\n print(f'Company name: {company.name.item()}')\n print(\n f\"Total money raised: ${format(company.funding_total_usd.item(), ',.2f')}\"\n )\n print(f'Total rounds: {company.funding_rounds.item()}')\n print(f'Webpage: {company.homepage_url.item()}')\n print(f'Country: {company.country_code.item()}')\n print(f'Status: {company.status.item()}')\n print('\\n')\n print(f'Competitors similar to {company.name.item()}:')\n print('\\n')\n competitor_info(find_companies_by_size('small', companies, name,\n sector, company))\n print('\\n')\n competitor_info(find_companies_by_size('medium', companies, name,\n sector, company))\n print('\\n')\n competitor_info(find_companies_by_size('big', companies, name,\n sector, company))\n except:\n print(f\"We couldn't find information about {name} in Crunchbase\")\n tw_analysis_company(public_tweets, name)\n most_warnings(find_webs(name), look_for)\n",
"step-5": "# Library for Stalker project\n\n#Libraries \nimport pandas as pd\nimport seaborn as sns\nfrom IPython.display import Image, display\nimport matplotlib.pyplot as plt\n# Google search\nfrom googlesearch import search\n# Tldextract to get domain of url\nimport tldextract as tld\n# BeautifulSoup\nfrom bs4 import BeautifulSoup as bs\nfrom bs4.element import Comment\nimport urllib.request\n# NLTK to analyze webs\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk import FreqDist\nfrom nltk.tokenize import word_tokenize\n# Find close matches\nfrom difflib import get_close_matches\n# Sentiment analysis\nfrom textblob import TextBlob\n# Twitter sentiment analysis\nimport tweepy\n# News API\nfrom newsapi import NewsApiClient\n# Credentials\nimport credentials as cd\n\n# Finding info in APIs\nnewsapi = NewsApiClient(api_key=cd.news_credentials['api_key'])\nnews_sources = 'the-verge,buzzfeed,engadget,hacker-news,mashable,reddit-r-all,wired,techcrunch'\n\n# Twitter API\nconsumer_key = cd.twitter_credentials['consumer_key']\nconsumer_key_secret = cd.twitter_credentials['consumer_key_secret']\naccess_token = cd.twitter_credentials['access_token']\naccess_token_secret = cd.twitter_credentials['access_token_secret']\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_key_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\n# Finding query on Google\n\n# Finding related urls\n\ndef find_webs(query):\n urls = []\n rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube','pinterest','angel']\n sites = []\n red_social = False\n for s in search(query, tld=\"com\", num=30, stop=30, pause=3, lang='en'):\n\n if len(urls)<10:\n for rs in rrss:\n if rs in s or tld.extract(s).domain in sites:\n red_social = True\n if not red_social and s not in urls:\n urls.append(s)\n sites.append(tld.extract(s).domain) \n\n red_social = False\n return urls\n\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n\ndef text_from_html(body):\n soup = bs(body, 'html.parser')\n texts = soup.findAll(text=True)\n visible_texts = filter(tag_visible, texts) \n return u\" \".join(t.strip() for t in visible_texts)\n\ndef cleaning_urls_text(url):\n try:\n html = text_from_html(urllib.request.urlopen(url).read())\n stop_words = set(stopwords.words('english')) \n word_tokens = word_tokenize(html)\n return [w for w in word_tokens if not w in stop_words]\n except:\n return []\n\ndef filter_warning_words(sentence):\n warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime','arson', 'assault', 'bigamy', 'blackmail',\n 'bribery', 'burglary', 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud', 'genocide', \n 'hijacking','homicide', 'kidnapping', 'manslaughter', 'mugging', 'murder', 'perjury', 'rape', 'riot',\n 'robbery', 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing']\n return list(filter(lambda word: word in warning_word, sentence))\n\ndef warnings_count(url):\n clean_sentence = cleaning_urls_text(url)\n length = len(filter_warning_words(clean_sentence))\n return (url, length) if length != 0 else None \n\ndef most_warnings(urls, look_for):\n list_len_tup = list(map(warnings_count, urls))\n list_len_tup_clean = list(filter(lambda item: item != None, list_len_tup))\n list_len_tup_clean.sort(key = lambda item: item[1], reverse=True)\n top_urls = [url for url, length in list_len_tup_clean[:2]]\n \n if len(top_urls) > 1:\n print(f\"\"\"\n We found something sketchy. You might want to check these links:\n \n - {top_urls[0]}\n \n - {top_urls[1]}\n \"\"\")\n elif len(top_urls) == 1:\n print(f\"\"\"\n We found something sketchy. You might want to check this link:\n {top_urls[0]}\n \"\"\")\n else:\n print(f\"We couldn't find anything worrying about {look_for} on Google. Nice!\")\n \n \n# Input correction\ndef retrieve_name(my_name, companies):\n companies_list = []\n for i in companies.dropna(subset=['name']).name:\n companies_list.append(i)\n \n if my_name in companies_list:\n return my_name\n elif len(get_close_matches(my_name, companies_list)) > 0:\n action = input(\"Did you mean %s instead? [y or n]: \" % get_close_matches(my_name, companies_list)[0])\n if (action == \"y\"):\n return get_close_matches(my_name, companies_list)[0]\n elif (action == \"n\"):\n return my_name\n else:\n return(\"we don't understand you. Apologies.\")\n\ndef retrieve_sector(my_sector, investments):\n investments = investments.dropna(subset=['raised_amount_usd', 'company_category_list'])\n sector_list0 = []\n sector_list = []\n for item in investments['company_category_list']:\n if ',' in item:\n sector_list0.append(item.split(sep=', '))\n else:\n sector_list0.append(item)\n for i in sector_list0:\n if type(i) == list:\n for sec in i:\n sector_list.append(sec)\n\n else:\n sector_list.append(i)\n if my_sector in sector_list:\n return my_sector\n elif len(get_close_matches(my_sector, sector_list)) > 0:\n action = input(\"Did you mean %s instead? [y or n]: \" % get_close_matches(my_sector, sector_list) [0])\n if (action == \"y\"):\n return get_close_matches(my_sector, sector_list)[0]\n else:\n return my_sector\n\n # Sentiment analysis tweeter\ndef tw_sent_sector(public_tweets, sector): \n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list)>0:\n sent = 'Positive'\n elif sum(sentiment_list)<0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f\"The sentiment about {sector} industry in Twitter is {sent}\")\n \n \n# Sentiment analysis news\ndef news_sentiment_sector(public_news, sector):\n news_list = []\n for piece in range(len(public_news['articles'])):\n news_list.append(TextBlob(public_news['articles'][piece]['title']).sentiment[0])\n news_list.append(TextBlob(public_news['articles'][piece]['description']).sentiment[0]) \n if sum(news_list)>0:\n news_sent = 'Positive'\n elif sum(news_list)<0:\n news_sent = 'Negative'\n else:\n news_sent = 'Neutral'\n print(f\"There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}\")\n\n# Look for data about sector \ndef category(sector, investments):\n # Gather tweets\n public_tweets = api.search(sector)\n \n # Gather news\n public_news = newsapi.get_everything(q=sector,sources=news_sources,language='en')\n \n # Prepare the data for the sector\n investments = investments.dropna(subset=['company_category_list'])\n sector_investments = investments[investments['company_category_list'].str.contains(sector)].drop('index',axis=1)\n sector_investments.reset_index(drop=True)\n sector_investments['funded_at'] = pd.to_datetime(sector_investments['funded_at'])\n sector_investments['Year'] = sector_investments['funded_at'].apply(lambda x: x.year )\n sector_investments['Month'] = sector_investments['funded_at'].apply(lambda x: x.month )\n sector_investments['Day'] = sector_investments['funded_at'].apply(lambda x: x.day )\n \n # Sentiment analysis Twitter\n tw_sent_sector(public_tweets, sector)\n \n # Sentiment analysis News\n news_sentiment_sector(public_news, sector)\n \n # create plot\n sector_year = sector_investments.groupby(['Year']).sum()[-10:]\n movement = ((sector_year.raised_amount_usd.iloc[len(sector_year)-1] -sector_year.raised_amount_usd.iloc[0])/sector_year.raised_amount_usd.iloc[0]*100)\n if sector_year.raised_amount_usd.iloc[0] + sector_year.raised_amount_usd.iloc[len(sector_year)-1] >= 0:\n in_dec = 'increased'\n grow = 'growing'\n else:\n in_dec = 'decreased'\n grow = 'falling'\n movement = movement[1:]\n sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd).set_title(f'Evolution of the amount invested in {sector}')\n investments_per_year = sector_investments.groupby(['Year']).count()\n peak_year = sector_year.index[sector_year['raised_amount_usd']== max(sector_year.raised_amount_usd)].to_list()\n peak_amount = max(sector_year.raised_amount_usd)\n #peak_year_invest = investments_per_year.index[investments_per_year['raised_amount_usd']== max(investments_per_year.raised_amount_usd)].to_list()\n low_amount = min(sector_year.raised_amount_usd)\n most_invested_companies = sector_investments.groupby(by='company_name').sum().sort_values(by='raised_amount_usd', ascending=False)\n low_year = sector_year.index[sector_year['raised_amount_usd']== min(sector_year.raised_amount_usd)].to_list()\n format_doll = ',.2f'\n print(f\"\"\"The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement),format_doll)}% in the last {len(sector_year)} years. \nIt peaked in year {peak_year[0]} with ${format(peak_amount,format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount,format_doll)} invested.\n\"\"\")\n \n plt.ylabel('Raised amount in USD')\n plt.show()\n \n sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year.Day[-10:]).set_title(f'Evolution of the number of investment in {sector}')\n plt.ylabel('Number of investments')\n \n #print(\"\"\"Plot explanaition average investment\n \n \"\"\")\n plt.show()\n #print(f\"\"\"\n \n # The Top 3 companies with biggest investments are:\n #- {most_invested_companies.index[0]} with ${most_invested_companies.raised_amount_usd[0]} raised,\n #- {most_invested_companies.index[1]} with ${most_invested_companies.raised_amount_usd[1]} raised and\n #- {most_invested_companies.index[2]} with ${most_invested_companies.raised_amount_usd[2]} raised\n \n #\"\"\")\n \n # Sentiment analysis founder\ndef tw_analysis_founder(public_tweets, founder):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list)>0:\n sent = 'Positive'\n elif sum(sentiment_list)<0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f\"The sentiment about {founder} in Twitter is {sent}\")\n \n\n# Look for data about the founder\ndef founders(founder, people):\n full_name = founder.split()\n public_tweets = api.search(founder)\n # What to search on Google\n look_for = founder\n for i in range(len(people)):\n if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[i]==full_name[1]:\n display(Image(url=people.profile_image_url[i]))\n print(f'We found this information about {founder}:')\n print(f\"Founder's name: {people.first_name[i]} {people.last_name[i]} \")\n print(f\"Title: {people.title[i]}\")\n print(f\"Organization: {people.organization[i]}\")\n print(f\"Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}\")\n if people.twitter_url[i] != None:\n print(f\"Twitter URL: {people.twitter_url[i]}\")\n if people.linkedin_url[i] != None:\n print(f\"Linkedin URL: {people.linkedin_url[i]}\")\n if people.facebook_url[i] != None:\n print(f\"Facebook URL: {people.facebook_url[i]}\")\n # Twitter analysis\n tw_analysis_founder(public_tweets, founder)\n # Google search\n \n most_warnings(find_webs(founder), look_for)\n \n \n\n# Look for data about company\ndef find_companies_by_size(size, companies, name, sector, company):\n company_nan = companies.dropna()\n company_sector = company_nan[company_nan['category_list'].str.contains(sector)].drop('index',axis=1).dropna()\n company_sector['total_funding_size']=pd.qcut(company_sector.funding_total_usd, q=[0, .25, .75, 1], labels=['small', 'medium', 'big'])\n if name in company_nan['name']:\n return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')& (company_sector['country_code']==company.country_code)].sample()\n else: \n return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')].sample()\n \n\n\ndef competitor_info(company):\n print(f\"Company name: {company.name.item()}\")\n print(f\"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}\")\n print(f\"Total rounds: {company.funding_rounds.item()}\")\n print(f\"Webpage: {company.homepage_url.item()}\")\n print(f\"Country: {company.country_code.item()}\")\n print(f\"Status: {company.status.item()}\")\n print(f\"Founded in: {company.founded_at.item()}\")\n\n# Sentiment analysis company\ndef tw_analysis_company(public_tweets, company):\n sentiment_list = []\n for tweet in public_tweets:\n analysis = TextBlob(tweet.text)\n sentiment_list.append(analysis.sentiment[0])\n if sum(sentiment_list)>0:\n sent = 'Positive'\n elif sum(sentiment_list)<0:\n sent = 'Negative'\n else:\n sent = 'Neutral'\n print(f\"The sentiment about {company} in Twitter is {sent}\")\n \n\ndef startup(name, companies, sector):\n company = companies[companies['name'] == name]\n # What to search on Google\n look_for = name\n # Gather tweets\n public_tweets = api.search(name)\n try:\n print(f\"Company name: {company.name.item()}\")\n print(f\"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}\")\n print(f\"Total rounds: {company.funding_rounds.item()}\")\n print(f\"Webpage: {company.homepage_url.item()}\")\n print(f\"Country: {company.country_code.item()}\")\n print(f\"Status: {company.status.item()}\")\n \n # Find competitors\n print('\\n')\n print(f\"Competitors similar to {company.name.item()}:\")\n print('\\n')\n competitor_info(find_companies_by_size('small', companies, name, sector, company))\n print('\\n') \n competitor_info(find_companies_by_size('medium', companies, name, sector, company))\n print('\\n') \n competitor_info(find_companies_by_size('big', companies, name, sector, company))\n except: \n print(f\"We couldn't find information about {name} in Crunchbase\")\n \n #Twitter sentiment analysis for company\n tw_analysis_company(public_tweets, name)\n # Google search\n most_warnings(find_webs(name), look_for)\n ",
"step-ids": [
14,
15,
16,
20,
22
]
}
|
[
14,
15,
16,
20,
22
] |
<|reserved_special_token_0|>
class DownVoteHandler(MainHandler):
def get(self):
user = self.get_user()
if user:
post_id = self.request.get('post_id')
post = PostData.get_by_id(int(post_id))
voter_list = post.voter_list
if post.author == user:
error = 'cant vote for self'
self.render('mainpage.html', error=error)
elif user in voter_list:
error = 'cant vote twice'
self.render('mainpage.html', error=error)
else:
post.downscore += 1
voter_list.append(user)
post.put()
self.redirect('/blog/' + post_id)
else:
self.redirect('/')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UpVoteHandler(MainHandler):
<|reserved_special_token_0|>
class DownVoteHandler(MainHandler):
def get(self):
user = self.get_user()
if user:
post_id = self.request.get('post_id')
post = PostData.get_by_id(int(post_id))
voter_list = post.voter_list
if post.author == user:
error = 'cant vote for self'
self.render('mainpage.html', error=error)
elif user in voter_list:
error = 'cant vote twice'
self.render('mainpage.html', error=error)
else:
post.downscore += 1
voter_list.append(user)
post.put()
self.redirect('/blog/' + post_id)
else:
self.redirect('/')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UpVoteHandler(MainHandler):
def get(self):
user = self.get_user()
if user:
post_id = self.request.get('post_id')
post = PostData.get_by_id(int(post_id))
voter_list = post.voter_list
if post.author == user:
error = 'cant vote for self'
self.render('mainpage.html', error=error)
elif user in voter_list:
error = 'cant vote twice'
self.render('mainpage.html', error=error)
else:
post.upscore += 1
voter_list.append(user)
post.put()
self.redirect('/blog/' + post_id)
else:
self.redirect('/')
class DownVoteHandler(MainHandler):
def get(self):
user = self.get_user()
if user:
post_id = self.request.get('post_id')
post = PostData.get_by_id(int(post_id))
voter_list = post.voter_list
if post.author == user:
error = 'cant vote for self'
self.render('mainpage.html', error=error)
elif user in voter_list:
error = 'cant vote twice'
self.render('mainpage.html', error=error)
else:
post.downscore += 1
voter_list.append(user)
post.put()
self.redirect('/blog/' + post_id)
else:
self.redirect('/')
<|reserved_special_token_1|>
from mainhandler import MainHandler
from sec.data import *
class UpVoteHandler(MainHandler):
def get(self):
user = self.get_user()
if user:
post_id = self.request.get('post_id')
post = PostData.get_by_id(int(post_id))
voter_list = post.voter_list
if post.author == user:
error = 'cant vote for self'
self.render('mainpage.html', error=error)
elif user in voter_list:
error = 'cant vote twice'
self.render('mainpage.html', error=error)
else:
post.upscore += 1
voter_list.append(user)
post.put()
self.redirect('/blog/' + post_id)
else:
self.redirect('/')
class DownVoteHandler(MainHandler):
def get(self):
user = self.get_user()
if user:
post_id = self.request.get('post_id')
post = PostData.get_by_id(int(post_id))
voter_list = post.voter_list
if post.author == user:
error = 'cant vote for self'
self.render('mainpage.html', error=error)
elif user in voter_list:
error = 'cant vote twice'
self.render('mainpage.html', error=error)
else:
post.downscore += 1
voter_list.append(user)
post.put()
self.redirect('/blog/' + post_id)
else:
self.redirect('/')
<|reserved_special_token_1|>
# [BEGIN IMPORTS]
from mainhandler import MainHandler
from sec.data import *
# [END IMPORTS]
class UpVoteHandler (MainHandler):
def get(self):
user = self.get_user()
if user:
post_id = self.request.get('post_id')
post = PostData.get_by_id(int(post_id))
voter_list = post.voter_list
if post.author == user:
error = "cant vote for self"
self.render('mainpage.html', error=error)
elif user in voter_list:
error = "cant vote twice"
self.render('mainpage.html', error=error)
else:
post.upscore += 1
voter_list.append(user)
post.put()
self.redirect('/blog/' + post_id)
else:
self.redirect('/')
class DownVoteHandler (MainHandler):
def get(self):
user = self.get_user()
if user:
post_id = self.request.get('post_id')
post = PostData.get_by_id(int(post_id))
voter_list = post.voter_list
if post.author == user:
error = "cant vote for self"
self.render('mainpage.html', error=error)
elif user in voter_list:
error = "cant vote twice"
self.render('mainpage.html', error=error)
else:
post.downscore += 1
voter_list.append(user)
post.put()
self.redirect('/blog/' + post_id)
else:
self.redirect('/')
|
flexible
|
{
"blob_id": "5711613df0bda10512466f147febcffacfe1607b",
"index": 7794,
"step-1": "<mask token>\n\n\nclass DownVoteHandler(MainHandler):\n\n def get(self):\n user = self.get_user()\n if user:\n post_id = self.request.get('post_id')\n post = PostData.get_by_id(int(post_id))\n voter_list = post.voter_list\n if post.author == user:\n error = 'cant vote for self'\n self.render('mainpage.html', error=error)\n elif user in voter_list:\n error = 'cant vote twice'\n self.render('mainpage.html', error=error)\n else:\n post.downscore += 1\n voter_list.append(user)\n post.put()\n self.redirect('/blog/' + post_id)\n else:\n self.redirect('/')\n",
"step-2": "<mask token>\n\n\nclass UpVoteHandler(MainHandler):\n <mask token>\n\n\nclass DownVoteHandler(MainHandler):\n\n def get(self):\n user = self.get_user()\n if user:\n post_id = self.request.get('post_id')\n post = PostData.get_by_id(int(post_id))\n voter_list = post.voter_list\n if post.author == user:\n error = 'cant vote for self'\n self.render('mainpage.html', error=error)\n elif user in voter_list:\n error = 'cant vote twice'\n self.render('mainpage.html', error=error)\n else:\n post.downscore += 1\n voter_list.append(user)\n post.put()\n self.redirect('/blog/' + post_id)\n else:\n self.redirect('/')\n",
"step-3": "<mask token>\n\n\nclass UpVoteHandler(MainHandler):\n\n def get(self):\n user = self.get_user()\n if user:\n post_id = self.request.get('post_id')\n post = PostData.get_by_id(int(post_id))\n voter_list = post.voter_list\n if post.author == user:\n error = 'cant vote for self'\n self.render('mainpage.html', error=error)\n elif user in voter_list:\n error = 'cant vote twice'\n self.render('mainpage.html', error=error)\n else:\n post.upscore += 1\n voter_list.append(user)\n post.put()\n self.redirect('/blog/' + post_id)\n else:\n self.redirect('/')\n\n\nclass DownVoteHandler(MainHandler):\n\n def get(self):\n user = self.get_user()\n if user:\n post_id = self.request.get('post_id')\n post = PostData.get_by_id(int(post_id))\n voter_list = post.voter_list\n if post.author == user:\n error = 'cant vote for self'\n self.render('mainpage.html', error=error)\n elif user in voter_list:\n error = 'cant vote twice'\n self.render('mainpage.html', error=error)\n else:\n post.downscore += 1\n voter_list.append(user)\n post.put()\n self.redirect('/blog/' + post_id)\n else:\n self.redirect('/')\n",
"step-4": "from mainhandler import MainHandler\nfrom sec.data import *\n\n\nclass UpVoteHandler(MainHandler):\n\n def get(self):\n user = self.get_user()\n if user:\n post_id = self.request.get('post_id')\n post = PostData.get_by_id(int(post_id))\n voter_list = post.voter_list\n if post.author == user:\n error = 'cant vote for self'\n self.render('mainpage.html', error=error)\n elif user in voter_list:\n error = 'cant vote twice'\n self.render('mainpage.html', error=error)\n else:\n post.upscore += 1\n voter_list.append(user)\n post.put()\n self.redirect('/blog/' + post_id)\n else:\n self.redirect('/')\n\n\nclass DownVoteHandler(MainHandler):\n\n def get(self):\n user = self.get_user()\n if user:\n post_id = self.request.get('post_id')\n post = PostData.get_by_id(int(post_id))\n voter_list = post.voter_list\n if post.author == user:\n error = 'cant vote for self'\n self.render('mainpage.html', error=error)\n elif user in voter_list:\n error = 'cant vote twice'\n self.render('mainpage.html', error=error)\n else:\n post.downscore += 1\n voter_list.append(user)\n post.put()\n self.redirect('/blog/' + post_id)\n else:\n self.redirect('/')\n",
"step-5": "# [BEGIN IMPORTS]\nfrom mainhandler import MainHandler\nfrom sec.data import *\n# [END IMPORTS]\n\n\nclass UpVoteHandler (MainHandler):\n\n def get(self):\n user = self.get_user()\n if user:\n post_id = self.request.get('post_id')\n post = PostData.get_by_id(int(post_id))\n\n voter_list = post.voter_list\n\n if post.author == user:\n error = \"cant vote for self\"\n self.render('mainpage.html', error=error)\n elif user in voter_list:\n error = \"cant vote twice\"\n self.render('mainpage.html', error=error)\n else:\n post.upscore += 1\n voter_list.append(user)\n post.put()\n self.redirect('/blog/' + post_id)\n else:\n self.redirect('/')\n\n\nclass DownVoteHandler (MainHandler):\n\n def get(self):\n user = self.get_user()\n if user:\n post_id = self.request.get('post_id')\n post = PostData.get_by_id(int(post_id))\n\n voter_list = post.voter_list\n\n if post.author == user:\n error = \"cant vote for self\"\n self.render('mainpage.html', error=error)\n elif user in voter_list:\n error = \"cant vote twice\"\n self.render('mainpage.html', error=error)\n else:\n post.downscore += 1\n voter_list.append(user)\n post.put()\n self.redirect('/blog/' + post_id)\n else:\n self.redirect('/')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('Asha', '0005_baby')]
operations = [migrations.AlterField(model_name='baby', name='Auth_Id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
to='Asha.BasicDetails'))]
<|reserved_special_token_1|>
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('Asha', '0005_baby')]
operations = [migrations.AlterField(model_name='baby', name='Auth_Id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
to='Asha.BasicDetails'))]
<|reserved_special_token_1|>
# Generated by Django 3.0.8 on 2021-03-25 13:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Asha', '0005_baby'),
]
operations = [
migrations.AlterField(
model_name='baby',
name='Auth_Id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Asha.BasicDetails'),
),
]
|
flexible
|
{
"blob_id": "e14b8d0f85042ceda955022bee08b3b3b4c2361d",
"index": 7367,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Asha', '0005_baby')]\n operations = [migrations.AlterField(model_name='baby', name='Auth_Id',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n to='Asha.BasicDetails'))]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Asha', '0005_baby')]\n operations = [migrations.AlterField(model_name='baby', name='Auth_Id',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n to='Asha.BasicDetails'))]\n",
"step-5": "# Generated by Django 3.0.8 on 2021-03-25 13:47\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('Asha', '0005_baby'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='baby',\r\n name='Auth_Id',\r\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Asha.BasicDetails'),\r\n ),\r\n ]\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 18 18:53:02 2020
@author: vinhe
I followed below tutorial to push newly created csv to google sheets:
https://medium.com/craftsmenltd/from-csv-to-google-sheet-using-python-ef097cb014f9
"""
import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope = ["https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive"]
credentials = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
client = gspread.authorize(credentials)
spreadsheet = client.open('golf-csv-to-sheets')
with open('C:/users/vinhe/code/projects/golf/golf_stats.csv', 'r') as file_obj:
content = file_obj.read()
client.import_csv(spreadsheet.id, data=content)
|
normal
|
{
"blob_id": "ac2edcd6ea71ebdc5b1df5fd4211632b5d8e2704",
"index": 3019,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('C:/users/vinhe/code/projects/golf/golf_stats.csv', 'r') as file_obj:\n content = file_obj.read()\n client.import_csv(spreadsheet.id, data=content)\n",
"step-3": "<mask token>\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/spreadsheets',\n 'https://www.googleapis.com/auth/drive.file',\n 'https://www.googleapis.com/auth/drive']\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'client_secret.json', scope)\nclient = gspread.authorize(credentials)\nspreadsheet = client.open('golf-csv-to-sheets')\nwith open('C:/users/vinhe/code/projects/golf/golf_stats.csv', 'r') as file_obj:\n content = file_obj.read()\n client.import_csv(spreadsheet.id, data=content)\n",
"step-4": "<mask token>\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/spreadsheets',\n 'https://www.googleapis.com/auth/drive.file',\n 'https://www.googleapis.com/auth/drive']\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'client_secret.json', scope)\nclient = gspread.authorize(credentials)\nspreadsheet = client.open('golf-csv-to-sheets')\nwith open('C:/users/vinhe/code/projects/golf/golf_stats.csv', 'r') as file_obj:\n content = file_obj.read()\n client.import_csv(spreadsheet.id, data=content)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 18 18:53:02 2020\r\n\r\n@author: vinhe\r\n\r\nI followed below tutorial to push newly created csv to google sheets:\r\nhttps://medium.com/craftsmenltd/from-csv-to-google-sheet-using-python-ef097cb014f9\r\n\r\n\"\"\"\r\n\r\n\r\nimport gspread\r\nfrom oauth2client.service_account import ServiceAccountCredentials\r\n\r\nscope = [\"https://spreadsheets.google.com/feeds\", \r\n \"https://www.googleapis.com/auth/spreadsheets\",\r\n \"https://www.googleapis.com/auth/drive.file\", \r\n \"https://www.googleapis.com/auth/drive\"]\r\n\r\ncredentials = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)\r\nclient = gspread.authorize(credentials)\r\n\r\nspreadsheet = client.open('golf-csv-to-sheets')\r\n\r\nwith open('C:/users/vinhe/code/projects/golf/golf_stats.csv', 'r') as file_obj:\r\n content = file_obj.read()\r\n client.import_csv(spreadsheet.id, data=content)\r\n\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
num=5
a=5
for row in range(num,0,-1):
for col in range(row,0,-1):
print(a,end="")
a-=1
print()
|
normal
|
{
"blob_id": "a567a2dc1dbb59979d849a5a772e4592910a9f27",
"index": 2783,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor row in range(num, 0, -1):\n for col in range(row, 0, -1):\n print(a, end='')\n a -= 1\n print()\n",
"step-3": "num = 5\na = 5\nfor row in range(num, 0, -1):\n for col in range(row, 0, -1):\n print(a, end='')\n a -= 1\n print()\n",
"step-4": "num=5\r\na=5\r\nfor row in range(num,0,-1):\r\n for col in range(row,0,-1):\r\n print(a,end=\"\")\r\n a-=1\r\n print()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_token_lookups_with_full_data():
token_lookup = mango.SplTokenLookup.load(mango.SplTokenLookup.
DefaultDataFilepath)
assert token_lookup.find_by_symbol('BTC').mint == PublicKey(
'9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E')
assert token_lookup.find_by_symbol('ETH').mint == PublicKey(
'2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk')
assert token_lookup.find_by_mint(
'AKJHspCwDhABucCxNLXUSfEzb7Ny62RqFtC9uNjJi4fq').symbol == 'SRM-SOL'
assert token_lookup.find_by_mint(
'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB').symbol == 'USDT'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_token_lookup():
data = {'tokens': [{'address':
'So11111111111111111111111111111111111111112', 'symbol': 'SOL',
'name': 'Wrapped SOL', 'decimals': 9}, {'address':
'EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v', 'symbol': 'USDC',
'name': 'USD Coin', 'decimals': 6}, {'address':
'9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E', 'symbol': 'BTC',
'name': 'Wrapped Bitcoin (Sollet)', 'decimals': 6}, {'address':
'2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk', 'symbol': 'ETH',
'name': 'Wrapped Ethereum (Sollet)', 'decimals': 6}]}
actual = mango.SplTokenLookup('test-filename', data)
assert actual is not None
assert actual.logger is not None
assert actual.find_by_symbol('ETH') is not None
assert actual.find_by_symbol('ETH').name == 'Wrapped Ethereum (Sollet)'
assert actual.find_by_symbol('BTC') is not None
assert actual.find_by_symbol('BTC').name == 'Wrapped Bitcoin (Sollet)'
def test_token_lookups_with_full_data():
token_lookup = mango.SplTokenLookup.load(mango.SplTokenLookup.
DefaultDataFilepath)
assert token_lookup.find_by_symbol('BTC').mint == PublicKey(
'9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E')
assert token_lookup.find_by_symbol('ETH').mint == PublicKey(
'2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk')
assert token_lookup.find_by_mint(
'AKJHspCwDhABucCxNLXUSfEzb7Ny62RqFtC9uNjJi4fq').symbol == 'SRM-SOL'
assert token_lookup.find_by_mint(
'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB').symbol == 'USDT'
<|reserved_special_token_1|>
from .context import mango
from solana.publickey import PublicKey
def test_token_lookup():
data = {'tokens': [{'address':
'So11111111111111111111111111111111111111112', 'symbol': 'SOL',
'name': 'Wrapped SOL', 'decimals': 9}, {'address':
'EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v', 'symbol': 'USDC',
'name': 'USD Coin', 'decimals': 6}, {'address':
'9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E', 'symbol': 'BTC',
'name': 'Wrapped Bitcoin (Sollet)', 'decimals': 6}, {'address':
'2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk', 'symbol': 'ETH',
'name': 'Wrapped Ethereum (Sollet)', 'decimals': 6}]}
actual = mango.SplTokenLookup('test-filename', data)
assert actual is not None
assert actual.logger is not None
assert actual.find_by_symbol('ETH') is not None
assert actual.find_by_symbol('ETH').name == 'Wrapped Ethereum (Sollet)'
assert actual.find_by_symbol('BTC') is not None
assert actual.find_by_symbol('BTC').name == 'Wrapped Bitcoin (Sollet)'
def test_token_lookups_with_full_data():
token_lookup = mango.SplTokenLookup.load(mango.SplTokenLookup.
DefaultDataFilepath)
assert token_lookup.find_by_symbol('BTC').mint == PublicKey(
'9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E')
assert token_lookup.find_by_symbol('ETH').mint == PublicKey(
'2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk')
assert token_lookup.find_by_mint(
'AKJHspCwDhABucCxNLXUSfEzb7Ny62RqFtC9uNjJi4fq').symbol == 'SRM-SOL'
assert token_lookup.find_by_mint(
'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB').symbol == 'USDT'
<|reserved_special_token_1|>
from .context import mango
from solana.publickey import PublicKey
def test_token_lookup():
data = {
"tokens": [
{
"address": "So11111111111111111111111111111111111111112",
"symbol": "SOL",
"name": "Wrapped SOL",
"decimals": 9,
},
{
"address": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v",
"symbol": "USDC",
"name": "USD Coin",
"decimals": 6,
},
{
"address": "9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E",
"symbol": "BTC",
"name": "Wrapped Bitcoin (Sollet)",
"decimals": 6,
},
{
"address": "2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk",
"symbol": "ETH",
"name": "Wrapped Ethereum (Sollet)",
"decimals": 6,
}]
}
actual = mango.SplTokenLookup("test-filename", data)
assert actual is not None
assert actual.logger is not None
assert actual.find_by_symbol("ETH") is not None
assert actual.find_by_symbol("ETH").name == "Wrapped Ethereum (Sollet)"
assert actual.find_by_symbol("BTC") is not None
assert actual.find_by_symbol("BTC").name == "Wrapped Bitcoin (Sollet)"
def test_token_lookups_with_full_data():
token_lookup = mango.SplTokenLookup.load(mango.SplTokenLookup.DefaultDataFilepath)
assert token_lookup.find_by_symbol("BTC").mint == PublicKey("9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E")
assert token_lookup.find_by_symbol("ETH").mint == PublicKey("2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk")
assert token_lookup.find_by_mint("AKJHspCwDhABucCxNLXUSfEzb7Ny62RqFtC9uNjJi4fq").symbol == "SRM-SOL"
assert token_lookup.find_by_mint("Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB").symbol == "USDT"
|
flexible
|
{
"blob_id": "5e7a589af69a604021ed9558fcce721a8e254fee",
"index": 5269,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_token_lookups_with_full_data():\n token_lookup = mango.SplTokenLookup.load(mango.SplTokenLookup.\n DefaultDataFilepath)\n assert token_lookup.find_by_symbol('BTC').mint == PublicKey(\n '9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E')\n assert token_lookup.find_by_symbol('ETH').mint == PublicKey(\n '2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk')\n assert token_lookup.find_by_mint(\n 'AKJHspCwDhABucCxNLXUSfEzb7Ny62RqFtC9uNjJi4fq').symbol == 'SRM-SOL'\n assert token_lookup.find_by_mint(\n 'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB').symbol == 'USDT'\n",
"step-3": "<mask token>\n\n\ndef test_token_lookup():\n data = {'tokens': [{'address':\n 'So11111111111111111111111111111111111111112', 'symbol': 'SOL',\n 'name': 'Wrapped SOL', 'decimals': 9}, {'address':\n 'EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v', 'symbol': 'USDC',\n 'name': 'USD Coin', 'decimals': 6}, {'address':\n '9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E', 'symbol': 'BTC',\n 'name': 'Wrapped Bitcoin (Sollet)', 'decimals': 6}, {'address':\n '2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk', 'symbol': 'ETH',\n 'name': 'Wrapped Ethereum (Sollet)', 'decimals': 6}]}\n actual = mango.SplTokenLookup('test-filename', data)\n assert actual is not None\n assert actual.logger is not None\n assert actual.find_by_symbol('ETH') is not None\n assert actual.find_by_symbol('ETH').name == 'Wrapped Ethereum (Sollet)'\n assert actual.find_by_symbol('BTC') is not None\n assert actual.find_by_symbol('BTC').name == 'Wrapped Bitcoin (Sollet)'\n\n\ndef test_token_lookups_with_full_data():\n token_lookup = mango.SplTokenLookup.load(mango.SplTokenLookup.\n DefaultDataFilepath)\n assert token_lookup.find_by_symbol('BTC').mint == PublicKey(\n '9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E')\n assert token_lookup.find_by_symbol('ETH').mint == PublicKey(\n '2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk')\n assert token_lookup.find_by_mint(\n 'AKJHspCwDhABucCxNLXUSfEzb7Ny62RqFtC9uNjJi4fq').symbol == 'SRM-SOL'\n assert token_lookup.find_by_mint(\n 'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB').symbol == 'USDT'\n",
"step-4": "from .context import mango\nfrom solana.publickey import PublicKey\n\n\ndef test_token_lookup():\n data = {'tokens': [{'address':\n 'So11111111111111111111111111111111111111112', 'symbol': 'SOL',\n 'name': 'Wrapped SOL', 'decimals': 9}, {'address':\n 'EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v', 'symbol': 'USDC',\n 'name': 'USD Coin', 'decimals': 6}, {'address':\n '9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E', 'symbol': 'BTC',\n 'name': 'Wrapped Bitcoin (Sollet)', 'decimals': 6}, {'address':\n '2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk', 'symbol': 'ETH',\n 'name': 'Wrapped Ethereum (Sollet)', 'decimals': 6}]}\n actual = mango.SplTokenLookup('test-filename', data)\n assert actual is not None\n assert actual.logger is not None\n assert actual.find_by_symbol('ETH') is not None\n assert actual.find_by_symbol('ETH').name == 'Wrapped Ethereum (Sollet)'\n assert actual.find_by_symbol('BTC') is not None\n assert actual.find_by_symbol('BTC').name == 'Wrapped Bitcoin (Sollet)'\n\n\ndef test_token_lookups_with_full_data():\n token_lookup = mango.SplTokenLookup.load(mango.SplTokenLookup.\n DefaultDataFilepath)\n assert token_lookup.find_by_symbol('BTC').mint == PublicKey(\n '9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E')\n assert token_lookup.find_by_symbol('ETH').mint == PublicKey(\n '2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk')\n assert token_lookup.find_by_mint(\n 'AKJHspCwDhABucCxNLXUSfEzb7Ny62RqFtC9uNjJi4fq').symbol == 'SRM-SOL'\n assert token_lookup.find_by_mint(\n 'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB').symbol == 'USDT'\n",
"step-5": "from .context import mango\n\nfrom solana.publickey import PublicKey\n\n\ndef test_token_lookup():\n data = {\n \"tokens\": [\n {\n \"address\": \"So11111111111111111111111111111111111111112\",\n \"symbol\": \"SOL\",\n \"name\": \"Wrapped SOL\",\n \"decimals\": 9,\n },\n {\n \"address\": \"EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v\",\n \"symbol\": \"USDC\",\n \"name\": \"USD Coin\",\n \"decimals\": 6,\n },\n {\n \"address\": \"9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E\",\n \"symbol\": \"BTC\",\n \"name\": \"Wrapped Bitcoin (Sollet)\",\n \"decimals\": 6,\n },\n {\n \"address\": \"2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk\",\n \"symbol\": \"ETH\",\n \"name\": \"Wrapped Ethereum (Sollet)\",\n \"decimals\": 6,\n }]\n }\n actual = mango.SplTokenLookup(\"test-filename\", data)\n assert actual is not None\n assert actual.logger is not None\n assert actual.find_by_symbol(\"ETH\") is not None\n assert actual.find_by_symbol(\"ETH\").name == \"Wrapped Ethereum (Sollet)\"\n assert actual.find_by_symbol(\"BTC\") is not None\n assert actual.find_by_symbol(\"BTC\").name == \"Wrapped Bitcoin (Sollet)\"\n\n\ndef test_token_lookups_with_full_data():\n token_lookup = mango.SplTokenLookup.load(mango.SplTokenLookup.DefaultDataFilepath)\n assert token_lookup.find_by_symbol(\"BTC\").mint == PublicKey(\"9n4nbM75f5Ui33ZbPYXn59EwSgE8CGsHtAeTH5YFeJ9E\")\n assert token_lookup.find_by_symbol(\"ETH\").mint == PublicKey(\"2FPyTwcZLUg1MDrwsyoP4D6s1tM7hAkHYRjkNb5w6Pxk\")\n assert token_lookup.find_by_mint(\"AKJHspCwDhABucCxNLXUSfEzb7Ny62RqFtC9uNjJi4fq\").symbol == \"SRM-SOL\"\n assert token_lookup.find_by_mint(\"Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB\").symbol == \"USDT\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from igbot import InstaBot
from settings import username, pw
from sys import argv
def execute_script(InstaBot):
InstaBot.get_unfollowers()
#InstaBot.unfollow()
#InstaBot.follow()
#InstaBot.remove_followers()
def isheadless():
if len(argv) > 1:
if argv[1] == 'head':
return False
else:
raise ValueError("optional arg must be : 'head'")
return True
if __name__ == '__main__':
bot = None
headless = isheadless()
if headless:
bot = InstaBot(username, pw, True)
else:
bot = InstaBot(username, pw)
if bot.legal:
execute_script(bot)
bot.close_session()
|
normal
|
{
"blob_id": "f379092cefe83a0a449789fbc09af490081b00a4",
"index": 3818,
"step-1": "<mask token>\n\n\ndef isheadless():\n if len(argv) > 1:\n if argv[1] == 'head':\n return False\n else:\n raise ValueError(\"optional arg must be : 'head'\")\n return True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef execute_script(InstaBot):\n InstaBot.get_unfollowers()\n\n\ndef isheadless():\n if len(argv) > 1:\n if argv[1] == 'head':\n return False\n else:\n raise ValueError(\"optional arg must be : 'head'\")\n return True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef execute_script(InstaBot):\n InstaBot.get_unfollowers()\n\n\ndef isheadless():\n if len(argv) > 1:\n if argv[1] == 'head':\n return False\n else:\n raise ValueError(\"optional arg must be : 'head'\")\n return True\n\n\nif __name__ == '__main__':\n bot = None\n headless = isheadless()\n if headless:\n bot = InstaBot(username, pw, True)\n else:\n bot = InstaBot(username, pw)\n if bot.legal:\n execute_script(bot)\n bot.close_session()\n",
"step-4": "from igbot import InstaBot\nfrom settings import username, pw\nfrom sys import argv\n\n\ndef execute_script(InstaBot):\n InstaBot.get_unfollowers()\n\n\ndef isheadless():\n if len(argv) > 1:\n if argv[1] == 'head':\n return False\n else:\n raise ValueError(\"optional arg must be : 'head'\")\n return True\n\n\nif __name__ == '__main__':\n bot = None\n headless = isheadless()\n if headless:\n bot = InstaBot(username, pw, True)\n else:\n bot = InstaBot(username, pw)\n if bot.legal:\n execute_script(bot)\n bot.close_session()\n",
"step-5": "from igbot import InstaBot\nfrom settings import username, pw\nfrom sys import argv\n\ndef execute_script(InstaBot):\n\tInstaBot.get_unfollowers()\n\t#InstaBot.unfollow()\n\t#InstaBot.follow()\n\t#InstaBot.remove_followers()\n\ndef isheadless():\n\tif len(argv) > 1:\n\t\tif argv[1] == 'head':\n\t\t\treturn False\n\t\telse:\n\t\t\traise ValueError(\"optional arg must be : 'head'\")\n\treturn True\n\nif __name__ == '__main__':\n\tbot = None\n\theadless = isheadless()\n\tif headless:\n\t\tbot = InstaBot(username, pw, True)\n\telse:\n\t\tbot = InstaBot(username, pw)\n\n\tif bot.legal:\n\t\texecute_script(bot)\n\t\tbot.close_session()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def parse_config_file_from_disk(path, confname='diskconf.json'):
json_path = str(path) + '/' + str(confname)
if not os.path.exists(json_path):
module_print('\tPath not exists: ' + str(json_path))
return None
try:
with open(json_path, 'r') as f:
data = json.load(f)
module_print('config: ' + str(confname) + ' => ' + str(data))
except Exception as e:
module_print('Json parse error: ' + str(e))
return None
return data
def write_state_config_file_from_disk(path, data, confname='diskconf.json'):
json_path = str(path) + '/' + str(confname)
try:
if os.path.exists(json_path):
module_print('\tWrite back format state to ' + str(json_path))
with open(json_path, 'w') as f:
if str(data['is_formatted']).lower() == 'false':
data['is_formatted'] = 'True'
json.dump(data, f, indent=2)
module_print('\t\tSUCCESS')
else:
module_print('State already set')
else:
module_print('diskconf not exists: ' + str(json_path))
except Exception as e:
module_print('\t\tFAILED')
module_print('Write back format state to disk failed:' + str(e))
def save_diskconf_file(path, confname='diskconf.json'):
json_path = str(path) + '/' + str(confname)
save_path = '/tmp'
cmd = 'sudo cp {} {}'.format(json_path, save_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
def restore_diskconf_file(path, confname='diskconf.json'):
json_path = str(path) + '/' + str(confname)
save_path = '/tmp/' + str(confname)
cmd = 'sudo cp {} {}'.format(save_path, json_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
cmd = 'sudo rm -f {}'.format(save_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
def safe_format_disk_check_force_mode(json_data, dev):
dev_data_modified = False
dev_data = BlockDeviceHandler.get_device_info_data(dev)
if json_data['label'] != dev_data['label']:
dev_data_modified = True
if json_data['format'] != dev_data['filesystem']:
dev_data_modified = True
if str(json_data['is_formatted']).lower() == 'false':
if str(json_data['force']).lower(
) == 'true' and dev_data_modified is False:
module_print(
'[i] [format] Block device paramaters not changed but force mode is ON'
)
return True
elif dev_data_modified is True:
module_print(
'[i] [format] Requested block device parameter(s) changed - format'
)
return True
else:
module_print(
'[i] [Skip format] Blockdevice format not needed - label and system not changed'
)
return False
else:
module_print('[i] [is_formatted:True] Blockdevice already formatted.')
return False
def format_device_based_on_config_file(dev, premount_path):
module_print('Format device')
diskconf_path = premount_path
data = parse_config_file_from_disk(diskconf_path)
if data is not None:
if safe_format_disk_check_force_mode(data, dev):
module_print('\tSave disk config file before formatting')
save_diskconf_file(diskconf_path)
module_print('\tUnmount device before formatting')
BlockDeviceHandler.unmount_device(dev)
module_print('\tFormat device')
BlockDeviceHandler.format_ex4(dev, data['label'])
module_print('\tMount formatted device')
mount_point = BlockDeviceHandler.mount_device(dev)
module_print('\tRestore config file to disk after formating')
restore_diskconf_file(mount_point)
module_print('\tSave back the the config file with the new state')
write_state_config_file_from_disk(mount_point, data)
else:
module_print('\tDisk already formatted: {}:{}'.format(dev,
premount_path))
module_print('mount device: ' + str(dev))
mount_point = BlockDeviceHandler.mount_device(dev)
def prepare_block_device():
if BlockDeviceHandler.is_any_device_avaible():
module_print('Block device exists')
devices = BlockDeviceHandler.list_connected_devices()
for dev in devices:
premount_path = BlockDeviceHandler.premount_device(dev)
format_device_based_on_config_file(dev, premount_path)
BlockDeviceHandler.unmount_all_premounted_devices()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def module_print(text):
print_text = '[ autoformat disk ] ' + str(text)
print(print_text)
def parse_config_file_from_disk(path, confname='diskconf.json'):
json_path = str(path) + '/' + str(confname)
if not os.path.exists(json_path):
module_print('\tPath not exists: ' + str(json_path))
return None
try:
with open(json_path, 'r') as f:
data = json.load(f)
module_print('config: ' + str(confname) + ' => ' + str(data))
except Exception as e:
module_print('Json parse error: ' + str(e))
return None
return data
def write_state_config_file_from_disk(path, data, confname='diskconf.json'):
json_path = str(path) + '/' + str(confname)
try:
if os.path.exists(json_path):
module_print('\tWrite back format state to ' + str(json_path))
with open(json_path, 'w') as f:
if str(data['is_formatted']).lower() == 'false':
data['is_formatted'] = 'True'
json.dump(data, f, indent=2)
module_print('\t\tSUCCESS')
else:
module_print('State already set')
else:
module_print('diskconf not exists: ' + str(json_path))
except Exception as e:
module_print('\t\tFAILED')
module_print('Write back format state to disk failed:' + str(e))
def save_diskconf_file(path, confname='diskconf.json'):
json_path = str(path) + '/' + str(confname)
save_path = '/tmp'
cmd = 'sudo cp {} {}'.format(json_path, save_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
def restore_diskconf_file(path, confname='diskconf.json'):
json_path = str(path) + '/' + str(confname)
save_path = '/tmp/' + str(confname)
cmd = 'sudo cp {} {}'.format(save_path, json_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
cmd = 'sudo rm -f {}'.format(save_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
def safe_format_disk_check_force_mode(json_data, dev):
dev_data_modified = False
dev_data = BlockDeviceHandler.get_device_info_data(dev)
if json_data['label'] != dev_data['label']:
dev_data_modified = True
if json_data['format'] != dev_data['filesystem']:
dev_data_modified = True
if str(json_data['is_formatted']).lower() == 'false':
if str(json_data['force']).lower(
) == 'true' and dev_data_modified is False:
module_print(
'[i] [format] Block device paramaters not changed but force mode is ON'
)
return True
elif dev_data_modified is True:
module_print(
'[i] [format] Requested block device parameter(s) changed - format'
)
return True
else:
module_print(
'[i] [Skip format] Blockdevice format not needed - label and system not changed'
)
return False
else:
module_print('[i] [is_formatted:True] Blockdevice already formatted.')
return False
def format_device_based_on_config_file(dev, premount_path):
module_print('Format device')
diskconf_path = premount_path
data = parse_config_file_from_disk(diskconf_path)
if data is not None:
if safe_format_disk_check_force_mode(data, dev):
module_print('\tSave disk config file before formatting')
save_diskconf_file(diskconf_path)
module_print('\tUnmount device before formatting')
BlockDeviceHandler.unmount_device(dev)
module_print('\tFormat device')
BlockDeviceHandler.format_ex4(dev, data['label'])
module_print('\tMount formatted device')
mount_point = BlockDeviceHandler.mount_device(dev)
module_print('\tRestore config file to disk after formating')
restore_diskconf_file(mount_point)
module_print('\tSave back the the config file with the new state')
write_state_config_file_from_disk(mount_point, data)
else:
module_print('\tDisk already formatted: {}:{}'.format(dev,
premount_path))
module_print('mount device: ' + str(dev))
mount_point = BlockDeviceHandler.mount_device(dev)
def prepare_block_device():
if BlockDeviceHandler.is_any_device_avaible():
module_print('Block device exists')
devices = BlockDeviceHandler.list_connected_devices()
for dev in devices:
premount_path = BlockDeviceHandler.premount_device(dev)
format_device_based_on_config_file(dev, premount_path)
BlockDeviceHandler.unmount_all_premounted_devices()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def module_print(text):
print_text = '[ autoformat disk ] ' + str(text)
print(print_text)
def parse_config_file_from_disk(path, confname='diskconf.json'):
json_path = str(path) + '/' + str(confname)
if not os.path.exists(json_path):
module_print('\tPath not exists: ' + str(json_path))
return None
try:
with open(json_path, 'r') as f:
data = json.load(f)
module_print('config: ' + str(confname) + ' => ' + str(data))
except Exception as e:
module_print('Json parse error: ' + str(e))
return None
return data
def write_state_config_file_from_disk(path, data, confname='diskconf.json'):
json_path = str(path) + '/' + str(confname)
try:
if os.path.exists(json_path):
module_print('\tWrite back format state to ' + str(json_path))
with open(json_path, 'w') as f:
if str(data['is_formatted']).lower() == 'false':
data['is_formatted'] = 'True'
json.dump(data, f, indent=2)
module_print('\t\tSUCCESS')
else:
module_print('State already set')
else:
module_print('diskconf not exists: ' + str(json_path))
except Exception as e:
module_print('\t\tFAILED')
module_print('Write back format state to disk failed:' + str(e))
def save_diskconf_file(path, confname='diskconf.json'):
json_path = str(path) + '/' + str(confname)
save_path = '/tmp'
cmd = 'sudo cp {} {}'.format(json_path, save_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
def restore_diskconf_file(path, confname='diskconf.json'):
json_path = str(path) + '/' + str(confname)
save_path = '/tmp/' + str(confname)
cmd = 'sudo cp {} {}'.format(save_path, json_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
cmd = 'sudo rm -f {}'.format(save_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
def safe_format_disk_check_force_mode(json_data, dev):
dev_data_modified = False
dev_data = BlockDeviceHandler.get_device_info_data(dev)
if json_data['label'] != dev_data['label']:
dev_data_modified = True
if json_data['format'] != dev_data['filesystem']:
dev_data_modified = True
if str(json_data['is_formatted']).lower() == 'false':
if str(json_data['force']).lower(
) == 'true' and dev_data_modified is False:
module_print(
'[i] [format] Block device paramaters not changed but force mode is ON'
)
return True
elif dev_data_modified is True:
module_print(
'[i] [format] Requested block device parameter(s) changed - format'
)
return True
else:
module_print(
'[i] [Skip format] Blockdevice format not needed - label and system not changed'
)
return False
else:
module_print('[i] [is_formatted:True] Blockdevice already formatted.')
return False
def format_device_based_on_config_file(dev, premount_path):
module_print('Format device')
diskconf_path = premount_path
data = parse_config_file_from_disk(diskconf_path)
if data is not None:
if safe_format_disk_check_force_mode(data, dev):
module_print('\tSave disk config file before formatting')
save_diskconf_file(diskconf_path)
module_print('\tUnmount device before formatting')
BlockDeviceHandler.unmount_device(dev)
module_print('\tFormat device')
BlockDeviceHandler.format_ex4(dev, data['label'])
module_print('\tMount formatted device')
mount_point = BlockDeviceHandler.mount_device(dev)
module_print('\tRestore config file to disk after formating')
restore_diskconf_file(mount_point)
module_print('\tSave back the the config file with the new state')
write_state_config_file_from_disk(mount_point, data)
else:
module_print('\tDisk already formatted: {}:{}'.format(dev,
premount_path))
module_print('mount device: ' + str(dev))
mount_point = BlockDeviceHandler.mount_device(dev)
def prepare_block_device():
if BlockDeviceHandler.is_any_device_avaible():
module_print('Block device exists')
devices = BlockDeviceHandler.list_connected_devices()
for dev in devices:
premount_path = BlockDeviceHandler.premount_device(dev)
format_device_based_on_config_file(dev, premount_path)
BlockDeviceHandler.unmount_all_premounted_devices()
if __name__ == '__main__':
prepare_block_device()
<|reserved_special_token_1|>
import BlockDeviceHandler
import json
import LocalMachine
import os
<|reserved_special_token_0|>
def module_print(text):
print_text = '[ autoformat disk ] ' + str(text)
print(print_text)
def parse_config_file_from_disk(path, confname='diskconf.json'):
json_path = str(path) + '/' + str(confname)
if not os.path.exists(json_path):
module_print('\tPath not exists: ' + str(json_path))
return None
try:
with open(json_path, 'r') as f:
data = json.load(f)
module_print('config: ' + str(confname) + ' => ' + str(data))
except Exception as e:
module_print('Json parse error: ' + str(e))
return None
return data
def write_state_config_file_from_disk(path, data, confname='diskconf.json'):
json_path = str(path) + '/' + str(confname)
try:
if os.path.exists(json_path):
module_print('\tWrite back format state to ' + str(json_path))
with open(json_path, 'w') as f:
if str(data['is_formatted']).lower() == 'false':
data['is_formatted'] = 'True'
json.dump(data, f, indent=2)
module_print('\t\tSUCCESS')
else:
module_print('State already set')
else:
module_print('diskconf not exists: ' + str(json_path))
except Exception as e:
module_print('\t\tFAILED')
module_print('Write back format state to disk failed:' + str(e))
def save_diskconf_file(path, confname='diskconf.json'):
json_path = str(path) + '/' + str(confname)
save_path = '/tmp'
cmd = 'sudo cp {} {}'.format(json_path, save_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
def restore_diskconf_file(path, confname='diskconf.json'):
json_path = str(path) + '/' + str(confname)
save_path = '/tmp/' + str(confname)
cmd = 'sudo cp {} {}'.format(save_path, json_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
cmd = 'sudo rm -f {}'.format(save_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
def safe_format_disk_check_force_mode(json_data, dev):
dev_data_modified = False
dev_data = BlockDeviceHandler.get_device_info_data(dev)
if json_data['label'] != dev_data['label']:
dev_data_modified = True
if json_data['format'] != dev_data['filesystem']:
dev_data_modified = True
if str(json_data['is_formatted']).lower() == 'false':
if str(json_data['force']).lower(
) == 'true' and dev_data_modified is False:
module_print(
'[i] [format] Block device paramaters not changed but force mode is ON'
)
return True
elif dev_data_modified is True:
module_print(
'[i] [format] Requested block device parameter(s) changed - format'
)
return True
else:
module_print(
'[i] [Skip format] Blockdevice format not needed - label and system not changed'
)
return False
else:
module_print('[i] [is_formatted:True] Blockdevice already formatted.')
return False
def format_device_based_on_config_file(dev, premount_path):
module_print('Format device')
diskconf_path = premount_path
data = parse_config_file_from_disk(diskconf_path)
if data is not None:
if safe_format_disk_check_force_mode(data, dev):
module_print('\tSave disk config file before formatting')
save_diskconf_file(diskconf_path)
module_print('\tUnmount device before formatting')
BlockDeviceHandler.unmount_device(dev)
module_print('\tFormat device')
BlockDeviceHandler.format_ex4(dev, data['label'])
module_print('\tMount formatted device')
mount_point = BlockDeviceHandler.mount_device(dev)
module_print('\tRestore config file to disk after formating')
restore_diskconf_file(mount_point)
module_print('\tSave back the the config file with the new state')
write_state_config_file_from_disk(mount_point, data)
else:
module_print('\tDisk already formatted: {}:{}'.format(dev,
premount_path))
module_print('mount device: ' + str(dev))
mount_point = BlockDeviceHandler.mount_device(dev)
def prepare_block_device():
if BlockDeviceHandler.is_any_device_avaible():
module_print('Block device exists')
devices = BlockDeviceHandler.list_connected_devices()
for dev in devices:
premount_path = BlockDeviceHandler.premount_device(dev)
format_device_based_on_config_file(dev, premount_path)
BlockDeviceHandler.unmount_all_premounted_devices()
if __name__ == '__main__':
prepare_block_device()
<|reserved_special_token_1|>
import BlockDeviceHandler
import json
import LocalMachine
import os
""" This module automaticly format the disk based on diskconf.json """
def module_print(text):
print_text = "[ autoformat disk ] " + str(text)
print(print_text)
def parse_config_file_from_disk(path, confname="diskconf.json"):
json_path = str(path) + "/" + str(confname)
if not os.path.exists(json_path):
module_print("\tPath not exists: " + str(json_path))
return None
try:
with open(json_path, "r") as f:
data = json.load(f)
module_print("config: " + str(confname) + " => " + str(data))
except Exception as e:
module_print("Json parse error: " + str(e))
return None
return data
def write_state_config_file_from_disk(path, data, confname="diskconf.json"):
json_path = str(path) + "/" + str(confname)
try:
if os.path.exists(json_path):
module_print("\tWrite back format state to " + str(json_path))
with open(json_path, "w") as f:
if str(data['is_formatted']).lower() == "false":
data['is_formatted'] = "True"
json.dump(data, f, indent=2)
module_print("\t\tSUCCESS")
else:
module_print("State already set")
else:
module_print("diskconf not exists: " + str(json_path))
except Exception as e:
module_print("\t\tFAILED")
module_print("Write back format state to disk failed:" + str(e))
def save_diskconf_file(path, confname="diskconf.json"):
json_path = str(path) + "/" + str(confname)
save_path = "/tmp"
cmd = "sudo cp {} {}".format(json_path, save_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
def restore_diskconf_file(path, confname="diskconf.json"):
json_path = str(path) + "/" + str(confname)
save_path = "/tmp/" + str(confname)
cmd = "sudo cp {} {}".format(save_path, json_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
cmd = "sudo rm -f {}".format(save_path)
exitcode, stdout, stderr = LocalMachine.run_command(cmd)
BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)
def safe_format_disk_check_force_mode(json_data, dev):
dev_data_modified = False
# disk is not formatted
dev_data = BlockDeviceHandler.get_device_info_data(dev)
if json_data['label'] != dev_data['label']:
dev_data_modified = True
if json_data['format'] != dev_data['filesystem']:
dev_data_modified = True
if str(json_data['is_formatted']).lower() == "false":
if str(json_data['force']).lower() == "true" and dev_data_modified is False:
module_print("[i] [format] Block device paramaters not changed but force mode is ON")
return True
elif dev_data_modified is True:
module_print("[i] [format] Requested block device parameter(s) changed - format")
return True
else:
module_print("[i] [Skip format] Blockdevice format not needed - label and system not changed")
return False
else:
module_print("[i] [is_formatted:True] Blockdevice already formatted.")
return False
def format_device_based_on_config_file(dev, premount_path):
module_print("Format device")
diskconf_path = premount_path
data = parse_config_file_from_disk(diskconf_path)
if data is not None:
if safe_format_disk_check_force_mode(data, dev):
module_print("\tSave disk config file before formatting")
save_diskconf_file(diskconf_path)
module_print("\tUnmount device before formatting")
BlockDeviceHandler.unmount_device(dev)
module_print("\tFormat device")
BlockDeviceHandler.format_ex4(dev, data['label'])
module_print("\tMount formatted device")
mount_point = BlockDeviceHandler.mount_device(dev)
module_print("\tRestore config file to disk after formating")
restore_diskconf_file(mount_point)
module_print("\tSave back the the config file with the new state")
write_state_config_file_from_disk(mount_point, data)
else:
module_print("\tDisk already formatted: {}:{}".format(dev, premount_path))
module_print("mount device: " + str(dev))
mount_point = BlockDeviceHandler.mount_device(dev)
def prepare_block_device():
if BlockDeviceHandler.is_any_device_avaible():
module_print("Block device exists")
devices = BlockDeviceHandler.list_connected_devices()
for dev in devices:
premount_path = BlockDeviceHandler.premount_device(dev)
format_device_based_on_config_file(dev, premount_path)
BlockDeviceHandler.unmount_all_premounted_devices()
if __name__ == "__main__":
prepare_block_device()
#BlockDeviceHandler.unmount_all_devices(del_mount_point=True)
|
flexible
|
{
"blob_id": "927470fe0087b17e5fe67a9b8b3cc13a40d8be1a",
"index": 7554,
"step-1": "<mask token>\n\n\ndef parse_config_file_from_disk(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n if not os.path.exists(json_path):\n module_print('\\tPath not exists: ' + str(json_path))\n return None\n try:\n with open(json_path, 'r') as f:\n data = json.load(f)\n module_print('config: ' + str(confname) + ' => ' + str(data))\n except Exception as e:\n module_print('Json parse error: ' + str(e))\n return None\n return data\n\n\ndef write_state_config_file_from_disk(path, data, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n try:\n if os.path.exists(json_path):\n module_print('\\tWrite back format state to ' + str(json_path))\n with open(json_path, 'w') as f:\n if str(data['is_formatted']).lower() == 'false':\n data['is_formatted'] = 'True'\n json.dump(data, f, indent=2)\n module_print('\\t\\tSUCCESS')\n else:\n module_print('State already set')\n else:\n module_print('diskconf not exists: ' + str(json_path))\n except Exception as e:\n module_print('\\t\\tFAILED')\n module_print('Write back format state to disk failed:' + str(e))\n\n\ndef save_diskconf_file(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n save_path = '/tmp'\n cmd = 'sudo cp {} {}'.format(json_path, save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\n\ndef restore_diskconf_file(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n save_path = '/tmp/' + str(confname)\n cmd = 'sudo cp {} {}'.format(save_path, json_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n cmd = 'sudo rm -f {}'.format(save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\n\ndef safe_format_disk_check_force_mode(json_data, dev):\n dev_data_modified = False\n dev_data = BlockDeviceHandler.get_device_info_data(dev)\n if json_data['label'] != dev_data['label']:\n dev_data_modified = True\n if json_data['format'] != dev_data['filesystem']:\n dev_data_modified = True\n if str(json_data['is_formatted']).lower() == 'false':\n if str(json_data['force']).lower(\n ) == 'true' and dev_data_modified is False:\n module_print(\n '[i] [format] Block device paramaters not changed but force mode is ON'\n )\n return True\n elif dev_data_modified is True:\n module_print(\n '[i] [format] Requested block device parameter(s) changed - format'\n )\n return True\n else:\n module_print(\n '[i] [Skip format] Blockdevice format not needed - label and system not changed'\n )\n return False\n else:\n module_print('[i] [is_formatted:True] Blockdevice already formatted.')\n return False\n\n\ndef format_device_based_on_config_file(dev, premount_path):\n module_print('Format device')\n diskconf_path = premount_path\n data = parse_config_file_from_disk(diskconf_path)\n if data is not None:\n if safe_format_disk_check_force_mode(data, dev):\n module_print('\\tSave disk config file before formatting')\n save_diskconf_file(diskconf_path)\n module_print('\\tUnmount device before formatting')\n BlockDeviceHandler.unmount_device(dev)\n module_print('\\tFormat device')\n BlockDeviceHandler.format_ex4(dev, data['label'])\n module_print('\\tMount formatted device')\n mount_point = BlockDeviceHandler.mount_device(dev)\n module_print('\\tRestore config file to disk after formating')\n restore_diskconf_file(mount_point)\n module_print('\\tSave back the the config file with the new state')\n write_state_config_file_from_disk(mount_point, data)\n else:\n module_print('\\tDisk already formatted: {}:{}'.format(dev,\n premount_path))\n module_print('mount device: ' + str(dev))\n mount_point = BlockDeviceHandler.mount_device(dev)\n\n\ndef prepare_block_device():\n if BlockDeviceHandler.is_any_device_avaible():\n module_print('Block device exists')\n devices = BlockDeviceHandler.list_connected_devices()\n for dev in devices:\n premount_path = BlockDeviceHandler.premount_device(dev)\n format_device_based_on_config_file(dev, premount_path)\n BlockDeviceHandler.unmount_all_premounted_devices()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef module_print(text):\n print_text = '[ autoformat disk ] ' + str(text)\n print(print_text)\n\n\ndef parse_config_file_from_disk(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n if not os.path.exists(json_path):\n module_print('\\tPath not exists: ' + str(json_path))\n return None\n try:\n with open(json_path, 'r') as f:\n data = json.load(f)\n module_print('config: ' + str(confname) + ' => ' + str(data))\n except Exception as e:\n module_print('Json parse error: ' + str(e))\n return None\n return data\n\n\ndef write_state_config_file_from_disk(path, data, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n try:\n if os.path.exists(json_path):\n module_print('\\tWrite back format state to ' + str(json_path))\n with open(json_path, 'w') as f:\n if str(data['is_formatted']).lower() == 'false':\n data['is_formatted'] = 'True'\n json.dump(data, f, indent=2)\n module_print('\\t\\tSUCCESS')\n else:\n module_print('State already set')\n else:\n module_print('diskconf not exists: ' + str(json_path))\n except Exception as e:\n module_print('\\t\\tFAILED')\n module_print('Write back format state to disk failed:' + str(e))\n\n\ndef save_diskconf_file(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n save_path = '/tmp'\n cmd = 'sudo cp {} {}'.format(json_path, save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\n\ndef restore_diskconf_file(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n save_path = '/tmp/' + str(confname)\n cmd = 'sudo cp {} {}'.format(save_path, json_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n cmd = 'sudo rm -f {}'.format(save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\n\ndef safe_format_disk_check_force_mode(json_data, dev):\n dev_data_modified = False\n dev_data = BlockDeviceHandler.get_device_info_data(dev)\n if json_data['label'] != dev_data['label']:\n dev_data_modified = True\n if json_data['format'] != dev_data['filesystem']:\n dev_data_modified = True\n if str(json_data['is_formatted']).lower() == 'false':\n if str(json_data['force']).lower(\n ) == 'true' and dev_data_modified is False:\n module_print(\n '[i] [format] Block device paramaters not changed but force mode is ON'\n )\n return True\n elif dev_data_modified is True:\n module_print(\n '[i] [format] Requested block device parameter(s) changed - format'\n )\n return True\n else:\n module_print(\n '[i] [Skip format] Blockdevice format not needed - label and system not changed'\n )\n return False\n else:\n module_print('[i] [is_formatted:True] Blockdevice already formatted.')\n return False\n\n\ndef format_device_based_on_config_file(dev, premount_path):\n module_print('Format device')\n diskconf_path = premount_path\n data = parse_config_file_from_disk(diskconf_path)\n if data is not None:\n if safe_format_disk_check_force_mode(data, dev):\n module_print('\\tSave disk config file before formatting')\n save_diskconf_file(diskconf_path)\n module_print('\\tUnmount device before formatting')\n BlockDeviceHandler.unmount_device(dev)\n module_print('\\tFormat device')\n BlockDeviceHandler.format_ex4(dev, data['label'])\n module_print('\\tMount formatted device')\n mount_point = BlockDeviceHandler.mount_device(dev)\n module_print('\\tRestore config file to disk after formating')\n restore_diskconf_file(mount_point)\n module_print('\\tSave back the the config file with the new state')\n write_state_config_file_from_disk(mount_point, data)\n else:\n module_print('\\tDisk already formatted: {}:{}'.format(dev,\n premount_path))\n module_print('mount device: ' + str(dev))\n mount_point = BlockDeviceHandler.mount_device(dev)\n\n\ndef prepare_block_device():\n if BlockDeviceHandler.is_any_device_avaible():\n module_print('Block device exists')\n devices = BlockDeviceHandler.list_connected_devices()\n for dev in devices:\n premount_path = BlockDeviceHandler.premount_device(dev)\n format_device_based_on_config_file(dev, premount_path)\n BlockDeviceHandler.unmount_all_premounted_devices()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef module_print(text):\n print_text = '[ autoformat disk ] ' + str(text)\n print(print_text)\n\n\ndef parse_config_file_from_disk(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n if not os.path.exists(json_path):\n module_print('\\tPath not exists: ' + str(json_path))\n return None\n try:\n with open(json_path, 'r') as f:\n data = json.load(f)\n module_print('config: ' + str(confname) + ' => ' + str(data))\n except Exception as e:\n module_print('Json parse error: ' + str(e))\n return None\n return data\n\n\ndef write_state_config_file_from_disk(path, data, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n try:\n if os.path.exists(json_path):\n module_print('\\tWrite back format state to ' + str(json_path))\n with open(json_path, 'w') as f:\n if str(data['is_formatted']).lower() == 'false':\n data['is_formatted'] = 'True'\n json.dump(data, f, indent=2)\n module_print('\\t\\tSUCCESS')\n else:\n module_print('State already set')\n else:\n module_print('diskconf not exists: ' + str(json_path))\n except Exception as e:\n module_print('\\t\\tFAILED')\n module_print('Write back format state to disk failed:' + str(e))\n\n\ndef save_diskconf_file(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n save_path = '/tmp'\n cmd = 'sudo cp {} {}'.format(json_path, save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\n\ndef restore_diskconf_file(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n save_path = '/tmp/' + str(confname)\n cmd = 'sudo cp {} {}'.format(save_path, json_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n cmd = 'sudo rm -f {}'.format(save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\n\ndef safe_format_disk_check_force_mode(json_data, dev):\n dev_data_modified = False\n dev_data = BlockDeviceHandler.get_device_info_data(dev)\n if json_data['label'] != dev_data['label']:\n dev_data_modified = True\n if json_data['format'] != dev_data['filesystem']:\n dev_data_modified = True\n if str(json_data['is_formatted']).lower() == 'false':\n if str(json_data['force']).lower(\n ) == 'true' and dev_data_modified is False:\n module_print(\n '[i] [format] Block device paramaters not changed but force mode is ON'\n )\n return True\n elif dev_data_modified is True:\n module_print(\n '[i] [format] Requested block device parameter(s) changed - format'\n )\n return True\n else:\n module_print(\n '[i] [Skip format] Blockdevice format not needed - label and system not changed'\n )\n return False\n else:\n module_print('[i] [is_formatted:True] Blockdevice already formatted.')\n return False\n\n\ndef format_device_based_on_config_file(dev, premount_path):\n module_print('Format device')\n diskconf_path = premount_path\n data = parse_config_file_from_disk(diskconf_path)\n if data is not None:\n if safe_format_disk_check_force_mode(data, dev):\n module_print('\\tSave disk config file before formatting')\n save_diskconf_file(diskconf_path)\n module_print('\\tUnmount device before formatting')\n BlockDeviceHandler.unmount_device(dev)\n module_print('\\tFormat device')\n BlockDeviceHandler.format_ex4(dev, data['label'])\n module_print('\\tMount formatted device')\n mount_point = BlockDeviceHandler.mount_device(dev)\n module_print('\\tRestore config file to disk after formating')\n restore_diskconf_file(mount_point)\n module_print('\\tSave back the the config file with the new state')\n write_state_config_file_from_disk(mount_point, data)\n else:\n module_print('\\tDisk already formatted: {}:{}'.format(dev,\n premount_path))\n module_print('mount device: ' + str(dev))\n mount_point = BlockDeviceHandler.mount_device(dev)\n\n\ndef prepare_block_device():\n if BlockDeviceHandler.is_any_device_avaible():\n module_print('Block device exists')\n devices = BlockDeviceHandler.list_connected_devices()\n for dev in devices:\n premount_path = BlockDeviceHandler.premount_device(dev)\n format_device_based_on_config_file(dev, premount_path)\n BlockDeviceHandler.unmount_all_premounted_devices()\n\n\nif __name__ == '__main__':\n prepare_block_device()\n",
"step-4": "import BlockDeviceHandler\nimport json\nimport LocalMachine\nimport os\n<mask token>\n\n\ndef module_print(text):\n print_text = '[ autoformat disk ] ' + str(text)\n print(print_text)\n\n\ndef parse_config_file_from_disk(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n if not os.path.exists(json_path):\n module_print('\\tPath not exists: ' + str(json_path))\n return None\n try:\n with open(json_path, 'r') as f:\n data = json.load(f)\n module_print('config: ' + str(confname) + ' => ' + str(data))\n except Exception as e:\n module_print('Json parse error: ' + str(e))\n return None\n return data\n\n\ndef write_state_config_file_from_disk(path, data, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n try:\n if os.path.exists(json_path):\n module_print('\\tWrite back format state to ' + str(json_path))\n with open(json_path, 'w') as f:\n if str(data['is_formatted']).lower() == 'false':\n data['is_formatted'] = 'True'\n json.dump(data, f, indent=2)\n module_print('\\t\\tSUCCESS')\n else:\n module_print('State already set')\n else:\n module_print('diskconf not exists: ' + str(json_path))\n except Exception as e:\n module_print('\\t\\tFAILED')\n module_print('Write back format state to disk failed:' + str(e))\n\n\ndef save_diskconf_file(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n save_path = '/tmp'\n cmd = 'sudo cp {} {}'.format(json_path, save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\n\ndef restore_diskconf_file(path, confname='diskconf.json'):\n json_path = str(path) + '/' + str(confname)\n save_path = '/tmp/' + str(confname)\n cmd = 'sudo cp {} {}'.format(save_path, json_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n cmd = 'sudo rm -f {}'.format(save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\n\ndef safe_format_disk_check_force_mode(json_data, dev):\n dev_data_modified = False\n dev_data = BlockDeviceHandler.get_device_info_data(dev)\n if json_data['label'] != dev_data['label']:\n dev_data_modified = True\n if json_data['format'] != dev_data['filesystem']:\n dev_data_modified = True\n if str(json_data['is_formatted']).lower() == 'false':\n if str(json_data['force']).lower(\n ) == 'true' and dev_data_modified is False:\n module_print(\n '[i] [format] Block device paramaters not changed but force mode is ON'\n )\n return True\n elif dev_data_modified is True:\n module_print(\n '[i] [format] Requested block device parameter(s) changed - format'\n )\n return True\n else:\n module_print(\n '[i] [Skip format] Blockdevice format not needed - label and system not changed'\n )\n return False\n else:\n module_print('[i] [is_formatted:True] Blockdevice already formatted.')\n return False\n\n\ndef format_device_based_on_config_file(dev, premount_path):\n module_print('Format device')\n diskconf_path = premount_path\n data = parse_config_file_from_disk(diskconf_path)\n if data is not None:\n if safe_format_disk_check_force_mode(data, dev):\n module_print('\\tSave disk config file before formatting')\n save_diskconf_file(diskconf_path)\n module_print('\\tUnmount device before formatting')\n BlockDeviceHandler.unmount_device(dev)\n module_print('\\tFormat device')\n BlockDeviceHandler.format_ex4(dev, data['label'])\n module_print('\\tMount formatted device')\n mount_point = BlockDeviceHandler.mount_device(dev)\n module_print('\\tRestore config file to disk after formating')\n restore_diskconf_file(mount_point)\n module_print('\\tSave back the the config file with the new state')\n write_state_config_file_from_disk(mount_point, data)\n else:\n module_print('\\tDisk already formatted: {}:{}'.format(dev,\n premount_path))\n module_print('mount device: ' + str(dev))\n mount_point = BlockDeviceHandler.mount_device(dev)\n\n\ndef prepare_block_device():\n if BlockDeviceHandler.is_any_device_avaible():\n module_print('Block device exists')\n devices = BlockDeviceHandler.list_connected_devices()\n for dev in devices:\n premount_path = BlockDeviceHandler.premount_device(dev)\n format_device_based_on_config_file(dev, premount_path)\n BlockDeviceHandler.unmount_all_premounted_devices()\n\n\nif __name__ == '__main__':\n prepare_block_device()\n",
"step-5": "import BlockDeviceHandler\nimport json\nimport LocalMachine\nimport os\n\n\"\"\" This module automaticly format the disk based on diskconf.json \"\"\"\n\ndef module_print(text):\n print_text = \"[ autoformat disk ] \" + str(text)\n print(print_text)\n\ndef parse_config_file_from_disk(path, confname=\"diskconf.json\"):\n json_path = str(path) + \"/\" + str(confname)\n if not os.path.exists(json_path):\n module_print(\"\\tPath not exists: \" + str(json_path))\n return None\n try:\n with open(json_path, \"r\") as f:\n data = json.load(f)\n module_print(\"config: \" + str(confname) + \" => \" + str(data))\n except Exception as e:\n module_print(\"Json parse error: \" + str(e))\n return None\n return data\n\ndef write_state_config_file_from_disk(path, data, confname=\"diskconf.json\"):\n json_path = str(path) + \"/\" + str(confname)\n try:\n if os.path.exists(json_path):\n module_print(\"\\tWrite back format state to \" + str(json_path))\n with open(json_path, \"w\") as f:\n if str(data['is_formatted']).lower() == \"false\":\n data['is_formatted'] = \"True\"\n json.dump(data, f, indent=2)\n module_print(\"\\t\\tSUCCESS\")\n else:\n module_print(\"State already set\")\n else:\n module_print(\"diskconf not exists: \" + str(json_path))\n except Exception as e:\n module_print(\"\\t\\tFAILED\")\n module_print(\"Write back format state to disk failed:\" + str(e))\n\ndef save_diskconf_file(path, confname=\"diskconf.json\"):\n json_path = str(path) + \"/\" + str(confname)\n save_path = \"/tmp\"\n cmd = \"sudo cp {} {}\".format(json_path, save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\ndef restore_diskconf_file(path, confname=\"diskconf.json\"):\n json_path = str(path) + \"/\" + str(confname)\n save_path = \"/tmp/\" + str(confname)\n cmd = \"sudo cp {} {}\".format(save_path, json_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n cmd = \"sudo rm -f {}\".format(save_path)\n exitcode, stdout, stderr = LocalMachine.run_command(cmd)\n BlockDeviceHandler.check_exitcode(cmd, exitcode, stderr)\n\ndef safe_format_disk_check_force_mode(json_data, dev):\n dev_data_modified = False\n # disk is not formatted\n dev_data = BlockDeviceHandler.get_device_info_data(dev)\n if json_data['label'] != dev_data['label']:\n dev_data_modified = True\n if json_data['format'] != dev_data['filesystem']:\n dev_data_modified = True\n\n if str(json_data['is_formatted']).lower() == \"false\":\n if str(json_data['force']).lower() == \"true\" and dev_data_modified is False:\n module_print(\"[i] [format] Block device paramaters not changed but force mode is ON\")\n return True\n elif dev_data_modified is True:\n module_print(\"[i] [format] Requested block device parameter(s) changed - format\")\n return True\n else:\n module_print(\"[i] [Skip format] Blockdevice format not needed - label and system not changed\")\n return False\n else:\n module_print(\"[i] [is_formatted:True] Blockdevice already formatted.\")\n return False\n\ndef format_device_based_on_config_file(dev, premount_path):\n module_print(\"Format device\")\n diskconf_path = premount_path\n data = parse_config_file_from_disk(diskconf_path)\n if data is not None:\n if safe_format_disk_check_force_mode(data, dev):\n module_print(\"\\tSave disk config file before formatting\")\n save_diskconf_file(diskconf_path)\n module_print(\"\\tUnmount device before formatting\")\n BlockDeviceHandler.unmount_device(dev)\n module_print(\"\\tFormat device\")\n BlockDeviceHandler.format_ex4(dev, data['label'])\n module_print(\"\\tMount formatted device\")\n mount_point = BlockDeviceHandler.mount_device(dev)\n module_print(\"\\tRestore config file to disk after formating\")\n restore_diskconf_file(mount_point)\n module_print(\"\\tSave back the the config file with the new state\")\n write_state_config_file_from_disk(mount_point, data)\n else:\n module_print(\"\\tDisk already formatted: {}:{}\".format(dev, premount_path))\n module_print(\"mount device: \" + str(dev))\n mount_point = BlockDeviceHandler.mount_device(dev)\n\ndef prepare_block_device():\n if BlockDeviceHandler.is_any_device_avaible():\n module_print(\"Block device exists\")\n devices = BlockDeviceHandler.list_connected_devices()\n for dev in devices:\n premount_path = BlockDeviceHandler.premount_device(dev)\n format_device_based_on_config_file(dev, premount_path)\n BlockDeviceHandler.unmount_all_premounted_devices()\n\nif __name__ == \"__main__\":\n prepare_block_device()\n #BlockDeviceHandler.unmount_all_devices(del_mount_point=True)\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
@pytest.mark.django_db
class TestImage(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def setUp(self):
file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))
attachment1 = G(Attachment, original_filename=
'user_friendly_filename1.jpg', file=file1)
self.mock_image_mapper = Mock()
self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper,
attachments=[attachment1])
self.expected_html = (
'<figure><img conf="dummy.jpg" alt="alt_text" /><figcaption>alt_text</figcaption></figure>'
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.django_db
class TestImage(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def setUp(self):
file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))
attachment1 = G(Attachment, original_filename=
'user_friendly_filename1.jpg', file=file1)
self.mock_image_mapper = Mock()
self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper,
attachments=[attachment1])
self.expected_html = (
'<figure><img conf="dummy.jpg" alt="alt_text" /><figcaption>alt_text</figcaption></figure>'
)
def test_render_image(self):
self.mock_image_mapper.return_value = self.MOCK_SRC
html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.
MOCK_ALT_TEXT)
self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.
renderer.attachments)
assert html == self.expected_html
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.django_db
class TestImage(TestCase):
MOCK_SRC = 'dummy.jpg'
MOCK_TITLE = 'title'
MOCK_ALT_TEXT = 'alt_text'
def setUp(self):
file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))
attachment1 = G(Attachment, original_filename=
'user_friendly_filename1.jpg', file=file1)
self.mock_image_mapper = Mock()
self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper,
attachments=[attachment1])
self.expected_html = (
'<figure><img conf="dummy.jpg" alt="alt_text" /><figcaption>alt_text</figcaption></figure>'
)
def test_render_image(self):
self.mock_image_mapper.return_value = self.MOCK_SRC
html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.
MOCK_ALT_TEXT)
self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.
renderer.attachments)
assert html == self.expected_html
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from io import BytesIO
from unittest import TestCase
from unittest.mock import patch, Mock
import pytest
from django.core.files import File
from django_dynamic_fixture import G
from magplan.models import Attachment
from magplan.xmd.renderer import XMDRenderer
from magplan.xmd.mappers import plan_internal_mapper
@pytest.mark.django_db
class TestImage(TestCase):
MOCK_SRC = 'dummy.jpg'
MOCK_TITLE = 'title'
MOCK_ALT_TEXT = 'alt_text'
def setUp(self):
file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))
attachment1 = G(Attachment, original_filename=
'user_friendly_filename1.jpg', file=file1)
self.mock_image_mapper = Mock()
self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper,
attachments=[attachment1])
self.expected_html = (
'<figure><img conf="dummy.jpg" alt="alt_text" /><figcaption>alt_text</figcaption></figure>'
)
def test_render_image(self):
self.mock_image_mapper.return_value = self.MOCK_SRC
html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.
MOCK_ALT_TEXT)
self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.
renderer.attachments)
assert html == self.expected_html
<|reserved_special_token_1|>
"""
Every block element test will be automatically
wrapped inside `<p></p>\n`. Thats why every block
test should include this wrapper tag.
"""
from io import BytesIO
from unittest import TestCase
from unittest.mock import patch, Mock
import pytest
from django.core.files import File
from django_dynamic_fixture import G
from magplan.models import Attachment
from magplan.xmd.renderer import XMDRenderer
from magplan.xmd.mappers import plan_internal_mapper
@pytest.mark.django_db
class TestImage(TestCase):
MOCK_SRC = 'dummy.jpg'
MOCK_TITLE = 'title'
MOCK_ALT_TEXT = 'alt_text'
def setUp(self):
file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))
attachment1 = G(Attachment, original_filename='user_friendly_filename1.jpg', file=file1)
self.mock_image_mapper = Mock()
self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper, attachments=[attachment1])
self.expected_html = (
'<figure>'
'<img conf="dummy.jpg" alt="alt_text" /><figcaption>alt_text</figcaption>'
'</figure>'
)
def test_render_image(self):
self.mock_image_mapper.return_value = self.MOCK_SRC
html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.MOCK_ALT_TEXT)
self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.renderer.attachments)
assert html == self.expected_html
|
flexible
|
{
"blob_id": "e5bf57e7a171f7e42928b78d09dda7593a231cf9",
"index": 9841,
"step-1": "<mask token>\n\n\n@pytest.mark.django_db\nclass TestImage(TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def setUp(self):\n file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))\n attachment1 = G(Attachment, original_filename=\n 'user_friendly_filename1.jpg', file=file1)\n self.mock_image_mapper = Mock()\n self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper,\n attachments=[attachment1])\n self.expected_html = (\n '<figure><img conf=\"dummy.jpg\" alt=\"alt_text\" /><figcaption>alt_text</figcaption></figure>'\n )\n <mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.mark.django_db\nclass TestImage(TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def setUp(self):\n file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))\n attachment1 = G(Attachment, original_filename=\n 'user_friendly_filename1.jpg', file=file1)\n self.mock_image_mapper = Mock()\n self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper,\n attachments=[attachment1])\n self.expected_html = (\n '<figure><img conf=\"dummy.jpg\" alt=\"alt_text\" /><figcaption>alt_text</figcaption></figure>'\n )\n\n def test_render_image(self):\n self.mock_image_mapper.return_value = self.MOCK_SRC\n html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.\n MOCK_ALT_TEXT)\n self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.\n renderer.attachments)\n assert html == self.expected_html\n",
"step-3": "<mask token>\n\n\n@pytest.mark.django_db\nclass TestImage(TestCase):\n MOCK_SRC = 'dummy.jpg'\n MOCK_TITLE = 'title'\n MOCK_ALT_TEXT = 'alt_text'\n\n def setUp(self):\n file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))\n attachment1 = G(Attachment, original_filename=\n 'user_friendly_filename1.jpg', file=file1)\n self.mock_image_mapper = Mock()\n self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper,\n attachments=[attachment1])\n self.expected_html = (\n '<figure><img conf=\"dummy.jpg\" alt=\"alt_text\" /><figcaption>alt_text</figcaption></figure>'\n )\n\n def test_render_image(self):\n self.mock_image_mapper.return_value = self.MOCK_SRC\n html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.\n MOCK_ALT_TEXT)\n self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.\n renderer.attachments)\n assert html == self.expected_html\n",
"step-4": "<mask token>\nfrom io import BytesIO\nfrom unittest import TestCase\nfrom unittest.mock import patch, Mock\nimport pytest\nfrom django.core.files import File\nfrom django_dynamic_fixture import G\nfrom magplan.models import Attachment\nfrom magplan.xmd.renderer import XMDRenderer\nfrom magplan.xmd.mappers import plan_internal_mapper\n\n\n@pytest.mark.django_db\nclass TestImage(TestCase):\n MOCK_SRC = 'dummy.jpg'\n MOCK_TITLE = 'title'\n MOCK_ALT_TEXT = 'alt_text'\n\n def setUp(self):\n file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))\n attachment1 = G(Attachment, original_filename=\n 'user_friendly_filename1.jpg', file=file1)\n self.mock_image_mapper = Mock()\n self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper,\n attachments=[attachment1])\n self.expected_html = (\n '<figure><img conf=\"dummy.jpg\" alt=\"alt_text\" /><figcaption>alt_text</figcaption></figure>'\n )\n\n def test_render_image(self):\n self.mock_image_mapper.return_value = self.MOCK_SRC\n html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.\n MOCK_ALT_TEXT)\n self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.\n renderer.attachments)\n assert html == self.expected_html\n",
"step-5": "\"\"\"\nEvery block element test will be automatically\nwrapped inside `<p></p>\\n`. Thats why every block\n\ntest should include this wrapper tag.\n\"\"\"\nfrom io import BytesIO\nfrom unittest import TestCase\nfrom unittest.mock import patch, Mock\n\nimport pytest\nfrom django.core.files import File\nfrom django_dynamic_fixture import G\n\nfrom magplan.models import Attachment\nfrom magplan.xmd.renderer import XMDRenderer\nfrom magplan.xmd.mappers import plan_internal_mapper\n\n\n@pytest.mark.django_db\nclass TestImage(TestCase):\n MOCK_SRC = 'dummy.jpg'\n MOCK_TITLE = 'title'\n MOCK_ALT_TEXT = 'alt_text'\n\n def setUp(self):\n file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))\n attachment1 = G(Attachment, original_filename='user_friendly_filename1.jpg', file=file1)\n\n self.mock_image_mapper = Mock()\n\n self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper, attachments=[attachment1])\n\n self.expected_html = (\n '<figure>'\n '<img conf=\"dummy.jpg\" alt=\"alt_text\" /><figcaption>alt_text</figcaption>'\n '</figure>'\n )\n\n def test_render_image(self):\n self.mock_image_mapper.return_value = self.MOCK_SRC\n\n html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.MOCK_ALT_TEXT)\n\n self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.renderer.attachments)\n assert html == self.expected_html\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os,sys
import logging
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
def create_app():
app = Flask(__name__)
Bootstrap(app)
return app
logging.basicConfig(level=logging.DEBUG)
app = create_app()
app.config['WTF_CSRF_ENABLED'] = True
app.config['SECRET_KEY'] = 'you-will-never-guess'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),'db','micro_scrabble.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
#app.config.from_object('flask_config')
from . import views
|
normal
|
{
"blob_id": "bd726c86bdecd0b63eb48d056932706d3ecf147d",
"index": 7665,
"step-1": "<mask token>\n\n\ndef create_app():\n app = Flask(__name__)\n Bootstrap(app)\n return app\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_app():\n app = Flask(__name__)\n Bootstrap(app)\n return app\n\n\nlogging.basicConfig(level=logging.DEBUG)\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef create_app():\n app = Flask(__name__)\n Bootstrap(app)\n return app\n\n\nlogging.basicConfig(level=logging.DEBUG)\napp = create_app()\napp.config['WTF_CSRF_ENABLED'] = True\napp.config['SECRET_KEY'] = 'you-will-never-guess'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(os.path\n .dirname(os.path.dirname(os.path.realpath(__file__))), 'db',\n 'micro_scrabble.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\ndb = SQLAlchemy(app)\n<mask token>\n",
"step-4": "import os, sys\nimport logging\nfrom flask import Flask\nfrom flask_bootstrap import Bootstrap\nfrom flask_sqlalchemy import SQLAlchemy\n\n\ndef create_app():\n app = Flask(__name__)\n Bootstrap(app)\n return app\n\n\nlogging.basicConfig(level=logging.DEBUG)\napp = create_app()\napp.config['WTF_CSRF_ENABLED'] = True\napp.config['SECRET_KEY'] = 'you-will-never-guess'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(os.path\n .dirname(os.path.dirname(os.path.realpath(__file__))), 'db',\n 'micro_scrabble.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\ndb = SQLAlchemy(app)\nfrom . import views\n",
"step-5": "import os,sys\nimport logging\nfrom flask import Flask\nfrom flask_bootstrap import Bootstrap\nfrom flask_sqlalchemy import SQLAlchemy\n\ndef create_app():\n app = Flask(__name__)\n Bootstrap(app)\n return app\n\nlogging.basicConfig(level=logging.DEBUG)\napp = create_app()\napp.config['WTF_CSRF_ENABLED'] = True\napp.config['SECRET_KEY'] = 'you-will-never-guess'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),'db','micro_scrabble.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\ndb = SQLAlchemy(app)\n#app.config.from_object('flask_config')\n\nfrom . import views\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
orange.eat()
apple.eat()
<|reserved_special_token_1|>
from foods.fruits import *
orange.eat()
apple.eat()
|
flexible
|
{
"blob_id": "ad84a5bfcf82dff1f4a7e8f08f3c4243ad24de52",
"index": 7318,
"step-1": "<mask token>\n",
"step-2": "<mask token>\norange.eat()\napple.eat()\n",
"step-3": "from foods.fruits import *\norange.eat()\napple.eat()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for j in range(1, 5):
for k in range(1, 14):
if j == 1:
cardlist.append(['S', '{}'.format(k)])
elif j == 2:
cardlist.append(['H', '{}'.format(k)])
elif j == 3:
cardlist.append(['C', '{}'.format(k)])
elif j == 4:
cardlist.append(['D', '{}'.format(k)])
<|reserved_special_token_0|>
for i in range(num):
card.append(input().split())
for i in range(num):
cardlist.remove(card[i])
for i in range(52 - num):
print('{0} {1}'.format(cardlist[i][0], cardlist[i][1]))
<|reserved_special_token_1|>
cardlist = []
card = []
for j in range(1, 5):
for k in range(1, 14):
if j == 1:
cardlist.append(['S', '{}'.format(k)])
elif j == 2:
cardlist.append(['H', '{}'.format(k)])
elif j == 3:
cardlist.append(['C', '{}'.format(k)])
elif j == 4:
cardlist.append(['D', '{}'.format(k)])
num = int(input())
for i in range(num):
card.append(input().split())
for i in range(num):
cardlist.remove(card[i])
for i in range(52 - num):
print('{0} {1}'.format(cardlist[i][0], cardlist[i][1]))
<|reserved_special_token_1|>
cardlist = []
card = []
for j in range(1,5):
for k in range(1,14):
if j == 1:
cardlist.append(["S", "{}".format(k)])
elif j == 2:
cardlist.append(["H", "{}".format(k)])
elif j == 3:
cardlist.append(["C", "{}".format(k)])
elif j == 4:
cardlist.append(["D", "{}".format(k)])
num = int(input())
for i in range(num):
card.append(input().split())
for i in range(num):
cardlist.remove(card[i])
for i in range(52-num):
print("{0} {1}".format(cardlist[i][0], cardlist[i][1]))
|
flexible
|
{
"blob_id": "937a101cf5c7e943fc62d18b77357eea151fdfaf",
"index": 7789,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor j in range(1, 5):\n for k in range(1, 14):\n if j == 1:\n cardlist.append(['S', '{}'.format(k)])\n elif j == 2:\n cardlist.append(['H', '{}'.format(k)])\n elif j == 3:\n cardlist.append(['C', '{}'.format(k)])\n elif j == 4:\n cardlist.append(['D', '{}'.format(k)])\n<mask token>\nfor i in range(num):\n card.append(input().split())\nfor i in range(num):\n cardlist.remove(card[i])\nfor i in range(52 - num):\n print('{0} {1}'.format(cardlist[i][0], cardlist[i][1]))\n",
"step-3": "cardlist = []\ncard = []\nfor j in range(1, 5):\n for k in range(1, 14):\n if j == 1:\n cardlist.append(['S', '{}'.format(k)])\n elif j == 2:\n cardlist.append(['H', '{}'.format(k)])\n elif j == 3:\n cardlist.append(['C', '{}'.format(k)])\n elif j == 4:\n cardlist.append(['D', '{}'.format(k)])\nnum = int(input())\nfor i in range(num):\n card.append(input().split())\nfor i in range(num):\n cardlist.remove(card[i])\nfor i in range(52 - num):\n print('{0} {1}'.format(cardlist[i][0], cardlist[i][1]))\n",
"step-4": "cardlist = []\ncard = []\n\nfor j in range(1,5):\n for k in range(1,14):\n if j == 1:\n cardlist.append([\"S\", \"{}\".format(k)])\n elif j == 2:\n cardlist.append([\"H\", \"{}\".format(k)])\n elif j == 3:\n cardlist.append([\"C\", \"{}\".format(k)])\n elif j == 4:\n cardlist.append([\"D\", \"{}\".format(k)])\n\nnum = int(input())\n\nfor i in range(num):\n card.append(input().split())\n\nfor i in range(num):\n cardlist.remove(card[i])\n\nfor i in range(52-num):\n print(\"{0} {1}\".format(cardlist[i][0], cardlist[i][1]))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
table.py [-m] base1 base2 ... baseN
Combines output from base1.txt, base2.txt, etc., which are created by
the TestDriver (such as timcv.py) output, and displays tabulated
comparison statistics to stdout. Each input file is represented by
one column in the table.
Optional argument -m shows a final column with the mean value of each
statistic.
"""
def suck(f):
hamdevall = spamdevall = (0.0, 0.0)
cost = 0.0
bestcost = 0.0
fp = 0
fn = 0
un = 0
fpp = 0.0
fnp = 0.0
unp = 0.0
htest = 0
stest = 0
get = f.readline
while 1:
line = get()
if line.startswith('-> <stat> tested'):
print(line, end=' ')
elif line.find(' items; mean ') > 0 and line.find('for all runs') > 0:
vals = line.split(';')
mean = float(vals[1].split()[-1])
sdev = float(vals[2].split()[-1])
val = (mean, sdev)
ntested = int(vals[0].split()[-2])
typ = vals[0].split()[2]
if line.find('for all runs') != -1:
if typ == 'Ham':
hamdevall = val
htest = ntested
else:
spamdevall = val
stest = ntested
elif line.startswith('-> best cost for all runs: $'):
bestcost = float(line.split('$')[-1])
elif line.startswith('-> <stat> all runs false positives: '):
fp = int(line.split()[-1])
elif line.startswith('-> <stat> all runs false negatives: '):
fn = int(line.split()[-1])
elif line.startswith('-> <stat> all runs unsure: '):
un = int(line.split()[-1])
elif line.startswith('-> <stat> all runs false positive %: '):
fpp = float(line.split()[-1])
elif line.startswith('-> <stat> all runs false negative %: '):
fnp = float(line.split()[-1])
elif line.startswith('-> <stat> all runs unsure %: '):
unp = float(line.split()[-1])
elif line.startswith('-> <stat> all runs cost: '):
cost = float(line.split('$')[-1])
break
return (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,
hamdevall, spamdevall)
def windowsfy(fn):
import os
if os.path.exists(fn + '.txt'):
return fn + '.txt'
else:
return fn
def table():
import getopt, sys
showMean = 0
fname = "filename: "
fnam2 = " "
ratio = "ham:spam: "
rat2 = " "
fptot = "fp total: "
fpper = "fp %: "
fntot = "fn total: "
fnper = "fn %: "
untot = "unsure t: "
unper = "unsure %: "
rcost = "real cost:"
bcost = "best cost:"
hmean = "h mean: "
hsdev = "h sdev: "
smean = "s mean: "
ssdev = "s sdev: "
meand = "mean diff:"
kval = "k: "
tfptot = tfpper = tfntot = tfnper = tuntot = tunper = trcost = tbcost = \
thmean = thsdev = tsmean = tssdev = tmeand = tkval = 0
args, fileargs = getopt.getopt(sys.argv[1:], 'm')
for arg, val in args:
if arg == "-m":
showMean = 1
for filename in fileargs:
filename = windowsfy(filename)
(htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,
hamdevall, spamdevall) = suck(file(filename))
if filename.endswith('.txt'):
filename = filename[:-4]
filename = filename[filename.rfind('/')+1:]
filename = filename[filename.rfind("\\")+1:]
if len(fname) > len(fnam2):
fname += " "
fname = fname[0:(len(fnam2) + 12)]
fnam2 += " %11s" % filename
else:
fnam2 += " "
fnam2 = fnam2[0:(len(fname) + 12)]
fname += " %11s" % filename
if len(ratio) > len(rat2):
ratio += " "
ratio = ratio[0:(len(rat2) + 12)]
rat2 += " %11s" % ("%d:%d" % (htest, stest))
else:
rat2 += " "
rat2 = rat2[0:(len(ratio) + 12)]
ratio += " %11s" % ("%d:%d" % (htest, stest))
fptot += "%12d" % fp
tfptot += fp
fpper += "%12.2f" % fpp
tfpper += fpp
fntot += "%12d" % fn
tfntot += fn
fnper += "%12.2f" % fnp
tfnper += fnp
untot += "%12d" % un
tuntot += un
unper += "%12.2f" % unp
tunper += unp
rcost += "%12s" % ("$%.2f" % cost)
trcost += cost
bcost += "%12s" % ("$%.2f" % bestcost)
tbcost += bestcost
hmean += "%12.2f" % hamdevall[0]
thmean += hamdevall[0]
hsdev += "%12.2f" % hamdevall[1]
thsdev += hamdevall[1]
smean += "%12.2f" % spamdevall[0]
tsmean += spamdevall[0]
ssdev += "%12.2f" % spamdevall[1]
tssdev += spamdevall[1]
meand += "%12.2f" % (spamdevall[0] - hamdevall[0])
tmeand += (spamdevall[0] - hamdevall[0])
k = (spamdevall[0] - hamdevall[0]) / (spamdevall[1] + hamdevall[1])
kval += "%12.2f" % k
tkval += k
nfiles = len(fileargs)
if nfiles and showMean:
fptot += "%12d" % (tfptot/nfiles)
fpper += "%12.2f" % (tfpper/nfiles)
fntot += "%12d" % (tfntot/nfiles)
fnper += "%12.2f" % (tfnper/nfiles)
untot += "%12d" % (tuntot/nfiles)
unper += "%12.2f" % (tunper/nfiles)
rcost += "%12s" % ("$%.2f" % (trcost/nfiles))
bcost += "%12s" % ("$%.2f" % (tbcost/nfiles))
hmean += "%12.2f" % (thmean/nfiles)
hsdev += "%12.2f" % (thsdev/nfiles)
smean += "%12.2f" % (tsmean/nfiles)
ssdev += "%12.2f" % (tssdev/nfiles)
meand += "%12.2f" % (tmeand/nfiles)
kval += "%12.2f" % (tkval/nfiles)
print(fname)
if len(fnam2.strip()) > 0:
print(fnam2)
print(ratio)
if len(rat2.strip()) > 0:
print(rat2)
print(fptot)
print(fpper)
print(fntot)
print(fnper)
print(untot)
print(unper)
print(rcost)
print(bcost)
print(hmean)
print(hsdev)
print(smean)
print(ssdev)
print(meand)
print(kval)
if __name__ == "__main__":
table()
|
normal
|
{
"blob_id": "4e94e9e2b45d3786aa86be800be882cc3d5a80b5",
"index": 8328,
"step-1": "<mask token>\n\n\ndef suck(f):\n hamdevall = spamdevall = 0.0, 0.0\n cost = 0.0\n bestcost = 0.0\n fp = 0\n fn = 0\n un = 0\n fpp = 0.0\n fnp = 0.0\n unp = 0.0\n htest = 0\n stest = 0\n get = f.readline\n while 1:\n line = get()\n if line.startswith('-> <stat> tested'):\n print(line, end=' ')\n elif line.find(' items; mean ') > 0 and line.find('for all runs') > 0:\n vals = line.split(';')\n mean = float(vals[1].split()[-1])\n sdev = float(vals[2].split()[-1])\n val = mean, sdev\n ntested = int(vals[0].split()[-2])\n typ = vals[0].split()[2]\n if line.find('for all runs') != -1:\n if typ == 'Ham':\n hamdevall = val\n htest = ntested\n else:\n spamdevall = val\n stest = ntested\n elif line.startswith('-> best cost for all runs: $'):\n bestcost = float(line.split('$')[-1])\n elif line.startswith('-> <stat> all runs false positives: '):\n fp = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negatives: '):\n fn = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure: '):\n un = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false positive %: '):\n fpp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negative %: '):\n fnp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure %: '):\n unp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs cost: '):\n cost = float(line.split('$')[-1])\n break\n return (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,\n hamdevall, spamdevall)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef suck(f):\n hamdevall = spamdevall = 0.0, 0.0\n cost = 0.0\n bestcost = 0.0\n fp = 0\n fn = 0\n un = 0\n fpp = 0.0\n fnp = 0.0\n unp = 0.0\n htest = 0\n stest = 0\n get = f.readline\n while 1:\n line = get()\n if line.startswith('-> <stat> tested'):\n print(line, end=' ')\n elif line.find(' items; mean ') > 0 and line.find('for all runs') > 0:\n vals = line.split(';')\n mean = float(vals[1].split()[-1])\n sdev = float(vals[2].split()[-1])\n val = mean, sdev\n ntested = int(vals[0].split()[-2])\n typ = vals[0].split()[2]\n if line.find('for all runs') != -1:\n if typ == 'Ham':\n hamdevall = val\n htest = ntested\n else:\n spamdevall = val\n stest = ntested\n elif line.startswith('-> best cost for all runs: $'):\n bestcost = float(line.split('$')[-1])\n elif line.startswith('-> <stat> all runs false positives: '):\n fp = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negatives: '):\n fn = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure: '):\n un = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false positive %: '):\n fpp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negative %: '):\n fnp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure %: '):\n unp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs cost: '):\n cost = float(line.split('$')[-1])\n break\n return (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,\n hamdevall, spamdevall)\n\n\ndef windowsfy(fn):\n import os\n if os.path.exists(fn + '.txt'):\n return fn + '.txt'\n else:\n return fn\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef suck(f):\n hamdevall = spamdevall = 0.0, 0.0\n cost = 0.0\n bestcost = 0.0\n fp = 0\n fn = 0\n un = 0\n fpp = 0.0\n fnp = 0.0\n unp = 0.0\n htest = 0\n stest = 0\n get = f.readline\n while 1:\n line = get()\n if line.startswith('-> <stat> tested'):\n print(line, end=' ')\n elif line.find(' items; mean ') > 0 and line.find('for all runs') > 0:\n vals = line.split(';')\n mean = float(vals[1].split()[-1])\n sdev = float(vals[2].split()[-1])\n val = mean, sdev\n ntested = int(vals[0].split()[-2])\n typ = vals[0].split()[2]\n if line.find('for all runs') != -1:\n if typ == 'Ham':\n hamdevall = val\n htest = ntested\n else:\n spamdevall = val\n stest = ntested\n elif line.startswith('-> best cost for all runs: $'):\n bestcost = float(line.split('$')[-1])\n elif line.startswith('-> <stat> all runs false positives: '):\n fp = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negatives: '):\n fn = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure: '):\n un = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false positive %: '):\n fpp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negative %: '):\n fnp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure %: '):\n unp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs cost: '):\n cost = float(line.split('$')[-1])\n break\n return (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,\n hamdevall, spamdevall)\n\n\ndef windowsfy(fn):\n import os\n if os.path.exists(fn + '.txt'):\n return fn + '.txt'\n else:\n return fn\n\n\ndef table():\n import getopt, sys\n showMean = 0\n fname = 'filename: '\n fnam2 = ' '\n ratio = 'ham:spam: '\n rat2 = ' '\n fptot = 'fp total: '\n fpper = 'fp %: '\n fntot = 'fn total: '\n fnper = 'fn %: '\n untot = 'unsure t: '\n unper = 'unsure %: '\n rcost = 'real cost:'\n bcost = 'best cost:'\n hmean = 'h mean: '\n hsdev = 'h sdev: '\n smean = 's mean: '\n ssdev = 's sdev: '\n meand = 'mean diff:'\n kval = 'k: '\n (tfptot) = (tfpper) = (tfntot) = (tfnper) = (tuntot) = (tunper) = (trcost\n ) = (tbcost) = (thmean) = (thsdev) = (tsmean) = (tssdev) = (tmeand) = (\n tkval) = 0\n args, fileargs = getopt.getopt(sys.argv[1:], 'm')\n for arg, val in args:\n if arg == '-m':\n showMean = 1\n for filename in fileargs:\n filename = windowsfy(filename)\n (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost, hamdevall,\n spamdevall) = suck(file(filename))\n if filename.endswith('.txt'):\n filename = filename[:-4]\n filename = filename[filename.rfind('/') + 1:]\n filename = filename[filename.rfind('\\\\') + 1:]\n if len(fname) > len(fnam2):\n fname += ' '\n fname = fname[0:len(fnam2) + 12]\n fnam2 += ' %11s' % filename\n else:\n fnam2 += ' '\n fnam2 = fnam2[0:len(fname) + 12]\n fname += ' %11s' % filename\n if len(ratio) > len(rat2):\n ratio += ' '\n ratio = ratio[0:len(rat2) + 12]\n rat2 += ' %11s' % ('%d:%d' % (htest, stest))\n else:\n rat2 += ' '\n rat2 = rat2[0:len(ratio) + 12]\n ratio += ' %11s' % ('%d:%d' % (htest, stest))\n fptot += '%12d' % fp\n tfptot += fp\n fpper += '%12.2f' % fpp\n tfpper += fpp\n fntot += '%12d' % fn\n tfntot += fn\n fnper += '%12.2f' % fnp\n tfnper += fnp\n untot += '%12d' % un\n tuntot += un\n unper += '%12.2f' % unp\n tunper += unp\n rcost += '%12s' % ('$%.2f' % cost)\n trcost += cost\n bcost += '%12s' % ('$%.2f' % bestcost)\n tbcost += bestcost\n hmean += '%12.2f' % hamdevall[0]\n thmean += hamdevall[0]\n hsdev += '%12.2f' % hamdevall[1]\n thsdev += hamdevall[1]\n smean += '%12.2f' % spamdevall[0]\n tsmean += spamdevall[0]\n ssdev += '%12.2f' % spamdevall[1]\n tssdev += spamdevall[1]\n meand += '%12.2f' % (spamdevall[0] - hamdevall[0])\n tmeand += spamdevall[0] - hamdevall[0]\n k = (spamdevall[0] - hamdevall[0]) / (spamdevall[1] + hamdevall[1])\n kval += '%12.2f' % k\n tkval += k\n nfiles = len(fileargs)\n if nfiles and showMean:\n fptot += '%12d' % (tfptot / nfiles)\n fpper += '%12.2f' % (tfpper / nfiles)\n fntot += '%12d' % (tfntot / nfiles)\n fnper += '%12.2f' % (tfnper / nfiles)\n untot += '%12d' % (tuntot / nfiles)\n unper += '%12.2f' % (tunper / nfiles)\n rcost += '%12s' % ('$%.2f' % (trcost / nfiles))\n bcost += '%12s' % ('$%.2f' % (tbcost / nfiles))\n hmean += '%12.2f' % (thmean / nfiles)\n hsdev += '%12.2f' % (thsdev / nfiles)\n smean += '%12.2f' % (tsmean / nfiles)\n ssdev += '%12.2f' % (tssdev / nfiles)\n meand += '%12.2f' % (tmeand / nfiles)\n kval += '%12.2f' % (tkval / nfiles)\n print(fname)\n if len(fnam2.strip()) > 0:\n print(fnam2)\n print(ratio)\n if len(rat2.strip()) > 0:\n print(rat2)\n print(fptot)\n print(fpper)\n print(fntot)\n print(fnper)\n print(untot)\n print(unper)\n print(rcost)\n print(bcost)\n print(hmean)\n print(hsdev)\n print(smean)\n print(ssdev)\n print(meand)\n print(kval)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef suck(f):\n hamdevall = spamdevall = 0.0, 0.0\n cost = 0.0\n bestcost = 0.0\n fp = 0\n fn = 0\n un = 0\n fpp = 0.0\n fnp = 0.0\n unp = 0.0\n htest = 0\n stest = 0\n get = f.readline\n while 1:\n line = get()\n if line.startswith('-> <stat> tested'):\n print(line, end=' ')\n elif line.find(' items; mean ') > 0 and line.find('for all runs') > 0:\n vals = line.split(';')\n mean = float(vals[1].split()[-1])\n sdev = float(vals[2].split()[-1])\n val = mean, sdev\n ntested = int(vals[0].split()[-2])\n typ = vals[0].split()[2]\n if line.find('for all runs') != -1:\n if typ == 'Ham':\n hamdevall = val\n htest = ntested\n else:\n spamdevall = val\n stest = ntested\n elif line.startswith('-> best cost for all runs: $'):\n bestcost = float(line.split('$')[-1])\n elif line.startswith('-> <stat> all runs false positives: '):\n fp = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negatives: '):\n fn = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure: '):\n un = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false positive %: '):\n fpp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negative %: '):\n fnp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure %: '):\n unp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs cost: '):\n cost = float(line.split('$')[-1])\n break\n return (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,\n hamdevall, spamdevall)\n\n\ndef windowsfy(fn):\n import os\n if os.path.exists(fn + '.txt'):\n return fn + '.txt'\n else:\n return fn\n\n\ndef table():\n import getopt, sys\n showMean = 0\n fname = 'filename: '\n fnam2 = ' '\n ratio = 'ham:spam: '\n rat2 = ' '\n fptot = 'fp total: '\n fpper = 'fp %: '\n fntot = 'fn total: '\n fnper = 'fn %: '\n untot = 'unsure t: '\n unper = 'unsure %: '\n rcost = 'real cost:'\n bcost = 'best cost:'\n hmean = 'h mean: '\n hsdev = 'h sdev: '\n smean = 's mean: '\n ssdev = 's sdev: '\n meand = 'mean diff:'\n kval = 'k: '\n (tfptot) = (tfpper) = (tfntot) = (tfnper) = (tuntot) = (tunper) = (trcost\n ) = (tbcost) = (thmean) = (thsdev) = (tsmean) = (tssdev) = (tmeand) = (\n tkval) = 0\n args, fileargs = getopt.getopt(sys.argv[1:], 'm')\n for arg, val in args:\n if arg == '-m':\n showMean = 1\n for filename in fileargs:\n filename = windowsfy(filename)\n (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost, hamdevall,\n spamdevall) = suck(file(filename))\n if filename.endswith('.txt'):\n filename = filename[:-4]\n filename = filename[filename.rfind('/') + 1:]\n filename = filename[filename.rfind('\\\\') + 1:]\n if len(fname) > len(fnam2):\n fname += ' '\n fname = fname[0:len(fnam2) + 12]\n fnam2 += ' %11s' % filename\n else:\n fnam2 += ' '\n fnam2 = fnam2[0:len(fname) + 12]\n fname += ' %11s' % filename\n if len(ratio) > len(rat2):\n ratio += ' '\n ratio = ratio[0:len(rat2) + 12]\n rat2 += ' %11s' % ('%d:%d' % (htest, stest))\n else:\n rat2 += ' '\n rat2 = rat2[0:len(ratio) + 12]\n ratio += ' %11s' % ('%d:%d' % (htest, stest))\n fptot += '%12d' % fp\n tfptot += fp\n fpper += '%12.2f' % fpp\n tfpper += fpp\n fntot += '%12d' % fn\n tfntot += fn\n fnper += '%12.2f' % fnp\n tfnper += fnp\n untot += '%12d' % un\n tuntot += un\n unper += '%12.2f' % unp\n tunper += unp\n rcost += '%12s' % ('$%.2f' % cost)\n trcost += cost\n bcost += '%12s' % ('$%.2f' % bestcost)\n tbcost += bestcost\n hmean += '%12.2f' % hamdevall[0]\n thmean += hamdevall[0]\n hsdev += '%12.2f' % hamdevall[1]\n thsdev += hamdevall[1]\n smean += '%12.2f' % spamdevall[0]\n tsmean += spamdevall[0]\n ssdev += '%12.2f' % spamdevall[1]\n tssdev += spamdevall[1]\n meand += '%12.2f' % (spamdevall[0] - hamdevall[0])\n tmeand += spamdevall[0] - hamdevall[0]\n k = (spamdevall[0] - hamdevall[0]) / (spamdevall[1] + hamdevall[1])\n kval += '%12.2f' % k\n tkval += k\n nfiles = len(fileargs)\n if nfiles and showMean:\n fptot += '%12d' % (tfptot / nfiles)\n fpper += '%12.2f' % (tfpper / nfiles)\n fntot += '%12d' % (tfntot / nfiles)\n fnper += '%12.2f' % (tfnper / nfiles)\n untot += '%12d' % (tuntot / nfiles)\n unper += '%12.2f' % (tunper / nfiles)\n rcost += '%12s' % ('$%.2f' % (trcost / nfiles))\n bcost += '%12s' % ('$%.2f' % (tbcost / nfiles))\n hmean += '%12.2f' % (thmean / nfiles)\n hsdev += '%12.2f' % (thsdev / nfiles)\n smean += '%12.2f' % (tsmean / nfiles)\n ssdev += '%12.2f' % (tssdev / nfiles)\n meand += '%12.2f' % (tmeand / nfiles)\n kval += '%12.2f' % (tkval / nfiles)\n print(fname)\n if len(fnam2.strip()) > 0:\n print(fnam2)\n print(ratio)\n if len(rat2.strip()) > 0:\n print(rat2)\n print(fptot)\n print(fpper)\n print(fntot)\n print(fnper)\n print(untot)\n print(unper)\n print(rcost)\n print(bcost)\n print(hmean)\n print(hsdev)\n print(smean)\n print(ssdev)\n print(meand)\n print(kval)\n\n\nif __name__ == '__main__':\n table()\n",
"step-5": "\"\"\"\ntable.py [-m] base1 base2 ... baseN\nCombines output from base1.txt, base2.txt, etc., which are created by\nthe TestDriver (such as timcv.py) output, and displays tabulated\ncomparison statistics to stdout. Each input file is represented by\none column in the table.\nOptional argument -m shows a final column with the mean value of each\nstatistic.\n\"\"\"\ndef suck(f):\n hamdevall = spamdevall = (0.0, 0.0)\n cost = 0.0\n bestcost = 0.0\n fp = 0\n fn = 0\n un = 0\n fpp = 0.0\n fnp = 0.0\n unp = 0.0\n htest = 0\n stest = 0\n get = f.readline\n while 1:\n line = get()\n if line.startswith('-> <stat> tested'):\n print(line, end=' ')\n elif line.find(' items; mean ') > 0 and line.find('for all runs') > 0:\n vals = line.split(';')\n mean = float(vals[1].split()[-1])\n sdev = float(vals[2].split()[-1])\n val = (mean, sdev)\n ntested = int(vals[0].split()[-2])\n typ = vals[0].split()[2]\n if line.find('for all runs') != -1:\n if typ == 'Ham':\n hamdevall = val\n htest = ntested\n else:\n spamdevall = val\n stest = ntested\n elif line.startswith('-> best cost for all runs: $'):\n bestcost = float(line.split('$')[-1])\n elif line.startswith('-> <stat> all runs false positives: '):\n fp = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negatives: '):\n fn = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure: '):\n un = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false positive %: '):\n fpp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negative %: '):\n fnp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure %: '):\n unp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs cost: '):\n cost = float(line.split('$')[-1])\n break\n return (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,\n hamdevall, spamdevall)\ndef windowsfy(fn):\n import os\n if os.path.exists(fn + '.txt'):\n return fn + '.txt'\n else:\n return fn\ndef table():\n import getopt, sys\n showMean = 0\n fname = \"filename: \"\n fnam2 = \" \"\n ratio = \"ham:spam: \"\n rat2 = \" \"\n fptot = \"fp total: \"\n fpper = \"fp %: \"\n fntot = \"fn total: \"\n fnper = \"fn %: \"\n untot = \"unsure t: \"\n unper = \"unsure %: \"\n rcost = \"real cost:\"\n bcost = \"best cost:\"\n hmean = \"h mean: \"\n hsdev = \"h sdev: \"\n smean = \"s mean: \"\n ssdev = \"s sdev: \"\n meand = \"mean diff:\"\n kval = \"k: \"\n tfptot = tfpper = tfntot = tfnper = tuntot = tunper = trcost = tbcost = \\\n thmean = thsdev = tsmean = tssdev = tmeand = tkval = 0\n args, fileargs = getopt.getopt(sys.argv[1:], 'm')\n for arg, val in args:\n if arg == \"-m\":\n showMean = 1\n for filename in fileargs:\n filename = windowsfy(filename)\n (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,\n hamdevall, spamdevall) = suck(file(filename))\n if filename.endswith('.txt'):\n filename = filename[:-4]\n filename = filename[filename.rfind('/')+1:]\n filename = filename[filename.rfind(\"\\\\\")+1:]\n if len(fname) > len(fnam2):\n fname += \" \"\n fname = fname[0:(len(fnam2) + 12)]\n fnam2 += \" %11s\" % filename\n else:\n fnam2 += \" \"\n fnam2 = fnam2[0:(len(fname) + 12)]\n fname += \" %11s\" % filename\n if len(ratio) > len(rat2):\n ratio += \" \"\n ratio = ratio[0:(len(rat2) + 12)]\n rat2 += \" %11s\" % (\"%d:%d\" % (htest, stest))\n else:\n rat2 += \" \"\n rat2 = rat2[0:(len(ratio) + 12)]\n ratio += \" %11s\" % (\"%d:%d\" % (htest, stest))\n fptot += \"%12d\" % fp\n tfptot += fp\n fpper += \"%12.2f\" % fpp\n tfpper += fpp\n fntot += \"%12d\" % fn\n tfntot += fn\n fnper += \"%12.2f\" % fnp\n tfnper += fnp\n untot += \"%12d\" % un\n tuntot += un\n unper += \"%12.2f\" % unp\n tunper += unp\n rcost += \"%12s\" % (\"$%.2f\" % cost)\n trcost += cost\n bcost += \"%12s\" % (\"$%.2f\" % bestcost)\n tbcost += bestcost\n hmean += \"%12.2f\" % hamdevall[0]\n thmean += hamdevall[0]\n hsdev += \"%12.2f\" % hamdevall[1]\n thsdev += hamdevall[1]\n smean += \"%12.2f\" % spamdevall[0]\n tsmean += spamdevall[0]\n ssdev += \"%12.2f\" % spamdevall[1]\n tssdev += spamdevall[1]\n meand += \"%12.2f\" % (spamdevall[0] - hamdevall[0])\n tmeand += (spamdevall[0] - hamdevall[0])\n k = (spamdevall[0] - hamdevall[0]) / (spamdevall[1] + hamdevall[1])\n kval += \"%12.2f\" % k\n tkval += k\n nfiles = len(fileargs)\n if nfiles and showMean:\n fptot += \"%12d\" % (tfptot/nfiles)\n fpper += \"%12.2f\" % (tfpper/nfiles)\n fntot += \"%12d\" % (tfntot/nfiles)\n fnper += \"%12.2f\" % (tfnper/nfiles)\n untot += \"%12d\" % (tuntot/nfiles)\n unper += \"%12.2f\" % (tunper/nfiles)\n rcost += \"%12s\" % (\"$%.2f\" % (trcost/nfiles))\n bcost += \"%12s\" % (\"$%.2f\" % (tbcost/nfiles))\n hmean += \"%12.2f\" % (thmean/nfiles)\n hsdev += \"%12.2f\" % (thsdev/nfiles)\n smean += \"%12.2f\" % (tsmean/nfiles)\n ssdev += \"%12.2f\" % (tssdev/nfiles)\n meand += \"%12.2f\" % (tmeand/nfiles)\n kval += \"%12.2f\" % (tkval/nfiles)\n print(fname)\n if len(fnam2.strip()) > 0:\n print(fnam2)\n print(ratio)\n if len(rat2.strip()) > 0:\n print(rat2)\n print(fptot)\n print(fpper)\n print(fntot)\n print(fnper)\n print(untot)\n print(unper)\n print(rcost)\n print(bcost)\n print(hmean)\n print(hsdev)\n print(smean)\n print(ssdev)\n print(meand)\n print(kval)\nif __name__ == \"__main__\":\n table()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class CalibraterBase:
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', symmetric=False, use_external_data_format=False
):
"""
:param model_path: ONNX model to calibrate. It should be a model file path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param symmetric: make range of tensor symmetric (central point is 0).
:param use_external_data_format: use external data format to store model which size is >= 2Gb
"""
if isinstance(model_path, str):
self.model = load_model_with_shape_infer(Path(model_path))
elif isinstance(model_path, Path):
self.model = load_model_with_shape_infer(model_path)
else:
raise ValueError('model_path should be model path.')
self.op_types_to_calibrate = op_types_to_calibrate
self.augmented_model_path = augmented_model_path
self.symmetric = symmetric
self.use_external_data_format = use_external_data_format
self.augment_model = None
self.infer_session = None
self.execution_providers = ['CPUExecutionProvider']
def set_execution_providers(self, execution_providers=[
'CPUExecutionProvider']):
"""
reset the execution providers to execute the collect_data. It triggers to re-creating inference session.
"""
self.execution_providers = execution_providers
self.create_inference_session()
def create_inference_session(self):
"""
create an OnnxRuntime InferenceSession.
"""
sess_options = onnxruntime.SessionOptions()
sess_options.graph_optimization_level = (onnxruntime.
GraphOptimizationLevel.ORT_DISABLE_ALL)
self.infer_session = onnxruntime.InferenceSession(self.
augmented_model_path, sess_options=sess_options, providers=self
.execution_providers)
def select_tensors_to_calibrate(self, model: ModelProto):
"""
select input/output tensors of candidate nodes to calibrate.
returns:
tensors (set): set of tensor name.
value_infos (dict): tensor name to value info.
"""
value_infos = {vi.name: vi for vi in model.graph.value_info}
value_infos.update({ot.name: ot for ot in model.graph.output})
value_infos.update({it.name: it for it in model.graph.input})
initializer = {init.name for init in model.graph.initializer}
tensors_to_calibrate = set()
tensor_type_to_calibrate = {TensorProto.FLOAT}
for node in model.graph.node:
if (not self.op_types_to_calibrate or node.op_type in self.
op_types_to_calibrate):
for tensor_name in itertools.chain(node.input, node.output):
if tensor_name in value_infos:
vi = value_infos[tensor_name]
if (vi.type.HasField('tensor_type') and vi.type.
tensor_type.elem_type in
tensor_type_to_calibrate and tensor_name not in
initializer):
tensors_to_calibrate.add(tensor_name)
return tensors_to_calibrate, value_infos
def get_augment_model(self):
"""
return: augmented onnx model. Call after calling augment_graph
"""
return self.model
def augment_graph(self):
"""
abstract method: augment the input model to prepare for collecting data. It will:
1. augment the model to be able to collect desired statistics data
2. save augmented model to augmented_model_paths
"""
raise NotImplementedError
def collect_data(self, data_reader: CalibrationDataReader):
"""
abstract method: collect the tensors that will be used for range computation. It can be called multiple times.
"""
raise NotImplementedError
def compute_data(self) ->TensorsData:
"""
abstract method: compute data based on the calibration method stored in TensorsData
"""
raise NotImplementedError
class MinMaxCalibrater(CalibraterBase):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', symmetric=False, use_external_data_format=
False, moving_average=False, averaging_constant=0.01):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param symmetric: make range of tensor symmetric (central point is 0).
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.
:param averaging_constant: constant smoothing factor to use when computing the moving average.
"""
super().__init__(model_path, op_types_to_calibrate=
op_types_to_calibrate, augmented_model_path=
augmented_model_path, symmetric=symmetric,
use_external_data_format=use_external_data_format)
self.intermediate_outputs = []
self.calibrate_tensors_range = None
self.num_model_outputs = len(self.model.graph.output)
self.model_original_outputs = {output.name for output in self.model
.graph.output}
self.moving_average = moving_average
if moving_average and (averaging_constant < 0 or averaging_constant > 1
):
raise ValueError(
'Invalid averaging constant, which should not be < 0 or > 1.')
self.averaging_constant = averaging_constant
def augment_graph(self):
"""
Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in
model and ensures their outputs are stored as part of the graph output
:return: augmented ONNX model
"""
tensors, _ = self.select_tensors_to_calibrate(self.model)
reshape_shape_name = str(uuid.uuid4())
reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.
int64), reshape_shape_name)
self.model.graph.initializer.append(reshape_shape)
def add_reduce_min_max(tensor_name, reduce_op_name):
keepdims = 1
reduce_output = tensor_name + '_' + reduce_op_name
intermediate_output = reduce_output + '_Reshape'
reduce_node = onnx.helper.make_node(reduce_op_name, [
tensor_name], [intermediate_output], keepdims=keepdims,
name=reduce_output)
reshape_node = onnx.helper.make_node('Reshape', inputs=[
intermediate_output, reshape_shape_name], outputs=[
reduce_output], name=intermediate_output)
self.model.graph.node.extend([reduce_node, reshape_node])
self.model.graph.output.append(helper.make_tensor_value_info(
reduce_output, TensorProto.FLOAT, [1]))
for tensor in tensors:
add_reduce_min_max(tensor, 'ReduceMin')
add_reduce_min_max(tensor, 'ReduceMax')
onnx.save(self.model, self.augmented_model_path,
save_as_external_data=self.use_external_data_format)
def clear_collected_data(self):
self.intermediate_outputs = []
def collect_data(self, data_reader: CalibrationDataReader):
while True:
inputs = data_reader.get_next()
if not inputs:
break
self.intermediate_outputs.append(self.infer_session.run(None,
inputs))
if len(self.intermediate_outputs) == 0:
raise ValueError('No data is collected.')
t = self.compute_data()
if not isinstance(t, TensorsData):
raise TypeError(
f'compute_data must return a TensorsData not {type(t)}.')
self.clear_collected_data()
def merge_range(self, old_range, new_range):
if not old_range:
return new_range
for key, value in old_range.items():
if self.moving_average:
min_value = value[0] + self.averaging_constant * (new_range
[key][0] - value[0])
max_value = value[1] + self.averaging_constant * (new_range
[key][1] - value[1])
else:
min_value = min(value[0], new_range[key][0])
max_value = max(value[1], new_range[key][1])
new_range[key] = min_value, max_value
return new_range
def compute_data(self) ->TensorsData:
"""
Compute the min-max range of tensor
:return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }
"""
if len(self.intermediate_outputs) == 0:
return self.calibrate_tensors_range
output_names = [self.infer_session.get_outputs()[i].name for i in
range(len(self.intermediate_outputs[0]))]
output_dicts_list = [dict(zip(output_names, intermediate_output)) for
intermediate_output in self.intermediate_outputs]
merged_output_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_output_dict.setdefault(k, []).append(v)
added_output_names = output_names[self.num_model_outputs:]
calibrate_tensor_names = [added_output_names[i].rpartition('_')[0] for
i in range(0, len(added_output_names), 2)]
merged_added_output_dict = {i: merged_output_dict[i] for i in
merged_output_dict if i not in self.model_original_outputs}
pairs = []
for i in range(0, len(added_output_names), 2):
min_value = 0
max_value = 0
if self.moving_average:
min_value_array = np.mean(merged_added_output_dict[
added_output_names[i]], axis=0)
max_value_array = np.mean(merged_added_output_dict[
added_output_names[i + 1]], axis=0)
else:
min_value_array = min(merged_added_output_dict[
added_output_names[i]])
max_value_array = max(merged_added_output_dict[
added_output_names[i + 1]])
if type(min_value_array) == int or min_value_array.size > 0:
min_value = float(min_value_array)
if type(max_value_array) == int or max_value_array.size > 0:
max_value = float(max_value_array)
if self.symmetric:
max_absolute_value = max(abs(min_value), abs(max_value))
pairs.append(tuple([-max_absolute_value, max_absolute_value]))
else:
pairs.append(tuple([min_value, max_value]))
new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax,
dict(zip(calibrate_tensor_names, pairs)))
if self.calibrate_tensors_range:
self.calibrate_tensors_range = self.merge_range(self.
calibrate_tensors_range, new_calibrate_tensors_range)
else:
self.calibrate_tensors_range = new_calibrate_tensors_range
return self.calibrate_tensors_range
class HistogramCalibrater(CalibraterBase):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', use_external_data_format=False, method=
'percentile', symmetric=False, num_bins=128, num_quantized_bins=
2048, percentile=99.999, scenario='same'):
"""
:param model_path: ONNX model to calibrate. It is a model path.
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param num_quantized_bins: number of quantized bins. Default 128.
:param percentile: A float number between [0, 100]. Default 99.99.
:param scenario: see :class:`DistributionCalibrater`
"""
super().__init__(model_path, op_types_to_calibrate=
op_types_to_calibrate, augmented_model_path=
augmented_model_path, symmetric=symmetric,
use_external_data_format=use_external_data_format)
self.intermediate_outputs = []
self.calibrate_tensors_range = None
self.num_model_outputs = len(self.model.graph.output)
self.model_original_outputs = {output.name for output in self.model
.graph.output}
self.collector = None
self.method = method
self.num_bins = num_bins
self.num_quantized_bins = num_quantized_bins
self.percentile = percentile
self.tensors_to_calibrate = None
self.scenario = scenario
def augment_graph(self):
"""
make all quantization_candidates op type nodes as part of the graph output.
:return: augmented ONNX model
"""
self.tensors_to_calibrate, value_infos = (self.
select_tensors_to_calibrate(self.model))
for tensor in self.tensors_to_calibrate:
if tensor not in self.model_original_outputs:
self.model.graph.output.append(value_infos[tensor])
onnx.save(self.model, self.augmented_model_path,
save_as_external_data=self.use_external_data_format)
def clear_collected_data(self):
self.intermediate_outputs = []
def collect_data(self, data_reader: CalibrationDataReader):
"""
Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.
"""
while True:
inputs = data_reader.get_next()
if not inputs:
break
self.intermediate_outputs.append(self.infer_session.run(None,
inputs))
if len(self.intermediate_outputs) == 0:
raise ValueError('No data is collected.')
output_names = [self.infer_session.get_outputs()[i].name for i in
range(len(self.intermediate_outputs[0]))]
output_dicts_list = [dict(zip(output_names, intermediate_output)) for
intermediate_output in self.intermediate_outputs]
merged_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_dict.setdefault(k, []).append(v)
clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in
self.tensors_to_calibrate}
if not self.collector:
self.collector = HistogramCollector(method=self.method,
symmetric=self.symmetric, num_bins=self.num_bins,
num_quantized_bins=self.num_quantized_bins, percentile=self
.percentile, scenario=self.scenario)
self.collector.collect(clean_merged_dict)
self.clear_collected_data()
def compute_data(self) ->TensorsData:
"""
Compute the min-max range of tensor
:return: dictionary mapping: {tensor name: (min value, max value)}
"""
if not self.collector:
raise ValueError(
"No collector created and can't generate calibration data.")
if isinstance(self, EntropyCalibrater):
cal = CalibrationMethod.Entropy
elif isinstance(self, PercentileCalibrater):
cal = CalibrationMethod.Percentile
elif isinstance(self, DistributionCalibrater):
cal = CalibrationMethod.Distribution
else:
raise TypeError(
f'Unknown calibrater {type(self)}. This method must be overwritten.'
)
return TensorsData(cal, self.collector.compute_collection_result())
class EntropyCalibrater(HistogramCalibrater):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', use_external_data_format=False, method=
'entropy', symmetric=False, num_bins=128, num_quantized_bins=128):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param num_quantized_bins: number of quantized bins. Default 128.
"""
super().__init__(model_path, op_types_to_calibrate,
augmented_model_path, use_external_data_format, method=method,
symmetric=symmetric, num_bins=num_bins, num_quantized_bins=
num_quantized_bins)
class PercentileCalibrater(HistogramCalibrater):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', use_external_data_format=False, method=
'percentile', symmetric=False, num_bins=2048, percentile=99.999):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_quantized_bins: number of quantized bins. Default 128.
:param percentile: A float number between [0, 100]. Default 99.99.
"""
super().__init__(model_path, op_types_to_calibrate,
augmented_model_path, use_external_data_format, method=method,
symmetric=symmetric, num_bins=num_bins, percentile=percentile)
class DistributionCalibrater(HistogramCalibrater):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', use_external_data_format=False, method=
'distribution', num_bins=128, scenario='same'):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param scenario: for float 8 only, if `scenario="same"`,
the algorithm weights and float 8 follow the same distribution,
if `scenario="p3"`, it assumes the weights follow
a gaussian law and float 8 ~ X^3 where X is a gaussian law
"""
super().__init__(model_path, op_types_to_calibrate,
augmented_model_path, use_external_data_format, method=method,
num_bins=num_bins, scenario=scenario)
class CalibrationDataCollector(metaclass=abc.ABCMeta):
"""
Base class for collecting data for calibration-based quantization.
"""
@abc.abstractmethod
def collect(self, name_to_arr):
"""
Generate informative data based on given data.
name_to_arr : dict
tensor name to NDArray data
"""
raise NotImplementedError
@abc.abstractmethod
def compute_collection_result(self):
"""
Get the optimal result among collection data.
"""
raise NotImplementedError
class HistogramCollector(CalibrationDataCollector):
"""
Collecting histogram for each tensor. Percentile and Entropy method are supported.
ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py
ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/
pytorch_quantization/calib/histogram.html
"""
def __init__(self, method, symmetric, num_bins, num_quantized_bins,
percentile, scenario):
self.histogram_dict = {}
self.method = method
self.symmetric = symmetric
self.num_bins = num_bins
self.num_quantized_bins = num_quantized_bins
self.percentile = percentile
self.scenario = scenario
def get_histogram_dict(self):
return self.histogram_dict
def collect(self, name_to_arr):
print('Collecting tensor data and making histogram ...')
if self.method in {'distribution', 'entropy'}:
return self.collect_value(name_to_arr)
elif self.method == 'percentile':
if self.symmetric:
return self.collect_absolute_value(name_to_arr)
else:
return self.collect_value(name_to_arr)
else:
raise ValueError(
"Only 'entropy', 'percentile' or 'distribution' methods are supported"
)
def collect_absolute_value(self, name_to_arr):
"""
Collect histogram on absolute value
"""
for tensor, data_arr in name_to_arr.items():
data_arr = np.asarray(data_arr)
data_arr = data_arr.flatten()
if data_arr.size > 0:
min_value = np.min(data_arr)
max_value = np.max(data_arr)
else:
min_value = 0
max_value = 0
data_arr = np.absolute(data_arr)
if tensor not in self.histogram_dict:
hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)
self.histogram_dict[tensor
] = hist, hist_edges, min_value, max_value
else:
old_histogram = self.histogram_dict[tensor]
old_min = old_histogram[2]
old_max = old_histogram[3]
old_hist = old_histogram[0]
old_hist_edges = old_histogram[1]
temp_amax = np.max(data_arr)
if temp_amax > old_hist_edges[-1]:
width = old_hist_edges[1] - old_hist_edges[0]
new_bin_edges = np.arange(old_hist_edges[-1] + width,
temp_amax + width, width)
old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))
hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)
hist[:len(old_hist)] += old_hist
self.histogram_dict[tensor] = hist, hist_edges, min(old_min,
min_value), max(old_max, max_value)
def collect_value(self, name_to_arr):
"""
Collect histogram on real value
"""
for tensor, data_arr in name_to_arr.items():
data_arr = np.asarray(data_arr)
data_arr = data_arr.flatten()
if data_arr.size > 0:
min_value = np.min(data_arr)
max_value = np.max(data_arr)
else:
min_value = 0
max_value = 0
threshold = max(abs(min_value), abs(max_value))
if tensor in self.histogram_dict:
old_histogram = self.histogram_dict[tensor]
self.histogram_dict[tensor] = self.merge_histogram(
old_histogram, data_arr, min_value, max_value, threshold)
else:
hist, hist_edges = np.histogram(data_arr, self.num_bins,
range=(-threshold, threshold))
self.histogram_dict[tensor
] = hist, hist_edges, min_value, max_value, threshold
def merge_histogram(self, old_histogram, data_arr, new_min, new_max,
new_threshold):
old_hist, old_hist_edges, old_min, old_max, old_threshold = (
old_histogram)
if new_threshold <= old_threshold:
new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-
old_threshold, old_threshold))
return new_hist + old_hist, old_hist_edges, min(old_min, new_min
), max(old_max, new_max), old_threshold
else:
if old_threshold == 0:
hist, hist_edges = np.histogram(data_arr, len(old_hist),
range=(-new_threshold, new_threshold))
hist += old_hist
else:
old_num_bins = len(old_hist)
old_stride = 2 * old_threshold / old_num_bins
half_increased_bins = int((new_threshold - old_threshold) //
old_stride + 1)
new_num_bins = old_num_bins + 2 * half_increased_bins
new_threshold = (half_increased_bins * old_stride +
old_threshold)
hist, hist_edges = np.histogram(data_arr, new_num_bins,
range=(-new_threshold, new_threshold))
hist[half_increased_bins:new_num_bins - half_increased_bins
] += old_hist
return hist, hist_edges, min(old_min, new_min), max(old_max,
new_max), new_threshold
def compute_collection_result(self):
if not self.histogram_dict or len(self.histogram_dict) == 0:
raise ValueError(
'Histogram has not been collected. Please run collect() first.'
)
print(
f'Finding optimal threshold for each tensor using {self.method} algorithm ...'
)
if self.method == 'entropy':
return self.compute_entropy()
elif self.method == 'percentile':
return self.compute_percentile()
elif self.method == 'distribution':
return self.compute_distribution()
else:
raise ValueError(
"Only 'entropy', 'percentile' or 'distribution' methods are supported"
)
def compute_percentile(self):
if self.percentile < 0 or self.percentile > 100:
raise ValueError(
'Invalid percentile. Must be in range 0 <= percentile <= 100.')
histogram_dict = self.histogram_dict
percentile = self.percentile
thresholds_dict = {}
print(f'Number of tensors : {len(histogram_dict)}')
print(f'Number of histogram bins : {self.num_bins}')
print(f'Percentile : ({100.0 - percentile},{percentile})')
for tensor, histogram in histogram_dict.items():
hist = histogram[0]
hist_edges = histogram[1]
total = hist.sum()
cdf = np.cumsum(hist / total)
if self.symmetric:
idx_right = np.searchsorted(cdf, percentile / 100.0)
thresholds_dict[tensor] = -float(hist_edges[idx_right]), float(
hist_edges[idx_right])
else:
percent_to_cut_one_side = (100.0 - percentile) / 200.0
idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)
idx_left = np.searchsorted(cdf, percent_to_cut_one_side)
thresholds_dict[tensor] = float(hist_edges[idx_left]), float(
hist_edges[idx_right])
min_value = histogram[2]
max_value = histogram[3]
if thresholds_dict[tensor][0] < min_value:
thresholds_dict[tensor] = min_value, thresholds_dict[tensor][1]
if thresholds_dict[tensor][1] > max_value:
thresholds_dict[tensor] = thresholds_dict[tensor][0], max_value
thresholds_dict[tensor] = *thresholds_dict[tensor], *hist[:2]
if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):
apply_plot(hist, hist_edges)
return thresholds_dict
def compute_entropy(self):
histogram_dict = self.histogram_dict
num_quantized_bins = self.num_quantized_bins
thresholds_dict = {}
print(f'Number of tensors : {len(histogram_dict)}')
print(
'Number of histogram bins : {} (The number may increase depends on the data it collects)'
.format(self.num_bins))
print(f'Number of quantized bins : {self.num_quantized_bins}')
for tensor, histogram in histogram_dict.items():
optimal_threshold = self.get_entropy_threshold(histogram,
num_quantized_bins)
thresholds_dict[tensor] = optimal_threshold
thresholds_dict[tensor] = *optimal_threshold, *histogram[:2]
if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):
apply_plot(histogram[0], histogram[1])
return thresholds_dict
@staticmethod
def _avg_std(hist, hist_edges, power=1):
if power <= 0:
raise ValueError(f'power={power} <= 0 is invalid.')
values = (hist_edges[:-1] + hist_edges[1:]) * 0.5
if power == 1:
avg = (hist * values).sum() / hist.sum()
std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5
return avg, std
if int(power) == power and int(power) % 2 == 1:
avg = (hist * values ** power).sum() / hist.sum()
std = ((hist * (values ** power - avg) ** 2).sum() / hist.sum()
) ** 0.5
return avg, std
fact = np.abs(values) / values
fact[np.isnan(fact)] = 1
fact[np.isinf(fact)] = 1
values = np.abs(values) ** power * fact
avg = (hist * values).sum() / hist.sum()
std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5
return avg, std
def compute_distribution(self):
if self.num_bins < 512:
raise ValueError(
'Invalid num_bins. Must be in range 512 <= num_bins.')
histogram_dict = self.histogram_dict
thresholds_dict = {}
print(f'Number of tensors : {len(histogram_dict)}')
print(f'Number of histogram bins : {self.num_bins}')
print(f'Scenario : {self.scenario!r})')
for tensor, histogram in histogram_dict.items():
hist = histogram[0]
hist_edges = histogram[1]
if self.scenario == 'same':
avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)
elif self.scenario == 'p3':
avg_coef, std_coef = self._avg_std(hist, hist_edges, power=
1.0 / 3.0)
else:
raise ValueError("Invalid scenario. Must be in {'same', 'p3'}."
)
thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef,
hist=hist, hist_edges=hist_edges)
if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):
apply_plot(hist, hist_edges)
return thresholds_dict
def get_entropy_threshold(self, histogram, num_quantized_bins):
"""Given a dataset, find the optimal threshold for quantizing it.
The reference distribution is `q`, and the candidate distribution is `p`.
`q` is a truncated version of the original distribution.
Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
"""
import copy
from scipy.stats import entropy
hist = histogram[0]
hist_edges = histogram[1]
num_bins = hist.size
zero_bin_index = num_bins // 2
num_half_quantized_bin = num_quantized_bins // 2
kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)
thresholds = [(0, 0) for i in range(kl_divergence.size)]
for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):
start_index = zero_bin_index - i
end_index = (zero_bin_index + i + 1 if zero_bin_index + i + 1 <=
num_bins else num_bins)
thresholds[i - num_half_quantized_bin] = float(hist_edges[
start_index]), float(hist_edges[end_index])
sliced_distribution = copy.deepcopy(hist[start_index:end_index])
p = sliced_distribution.copy()
left_outliers_count = sum(hist[:start_index])
right_outliers_count = sum(hist[end_index:])
p[0] += left_outliers_count
p[-1] += right_outliers_count
nonzeros = (p != 0).astype(np.int64)
quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)
num_merged_bins = sliced_distribution.size // num_quantized_bins
for index in range(num_quantized_bins):
start = index * num_merged_bins
end = start + num_merged_bins
quantized_bins[index] = sum(sliced_distribution[start:end])
quantized_bins[-1] += sum(sliced_distribution[
num_quantized_bins * num_merged_bins:])
q = np.zeros(p.size, dtype=np.int64)
for index in range(num_quantized_bins):
start = index * num_merged_bins
end = start + num_merged_bins
norm = sum(nonzeros[start:end])
if norm != 0:
q[start:end] = float(quantized_bins[index]) / float(norm)
p = smooth_distribution(p)
q = smooth_distribution(q)
if isinstance(q, np.ndarray):
kl_divergence[i - num_half_quantized_bin] = entropy(p, q)
else:
kl_divergence[i - num_half_quantized_bin] = float('inf')
min_kl_divergence_idx = np.argmin(kl_divergence)
optimal_threshold = thresholds[min_kl_divergence_idx]
min_value = histogram[2]
max_value = histogram[3]
if optimal_threshold[0] < min_value:
optimal_threshold = min_value, optimal_threshold[1]
if optimal_threshold[1] > max_value:
optimal_threshold = optimal_threshold[0], max_value
return optimal_threshold
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TensorsData:
def __init__(self, calibration_method, data: Dict[str, Union[TensorData,
Tuple]]):
self.calibration_method = calibration_method
self.data = {}
for k, v in data.items():
if not isinstance(k, str):
raise TypeError(f'Keys must be strings not {type(k)}.')
if isinstance(v, tuple):
if calibration_method == CalibrationMethod.MinMax and len(v
) == 2:
self.data[k] = TensorData(lowest=v[0], highest=v[1])
continue
if len(v) == 4:
self.data[k] = TensorData(lowest=v[0], highest=v[1],
histogram=v[2], bins=v[3])
continue
raise TypeError(
f'Unexpected tuple for {k:r}, it has {len(v)} elements: {v}.'
)
if not isinstance(v, TensorData):
raise TypeError(f'Values must be TensorData not {type(v)}.')
self.data[k] = v
def __iter__(self):
yield from self.data
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class CalibrationMethod(Enum):
MinMax = 0
Entropy = 1
Percentile = 2
Distribution = 3
class CalibrationDataReader(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return hasattr(subclass, 'get_next') and callable(subclass.get_next
) or NotImplemented
@abc.abstractmethod
def get_next(self) ->dict:
"""generate the input data dict for ONNXinferenceSession run"""
raise NotImplementedError
def __iter__(self):
return self
def __next__(self):
result = self.get_next()
if result is None:
raise StopIteration
return result
class CalibraterBase:
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', symmetric=False, use_external_data_format=False
):
"""
:param model_path: ONNX model to calibrate. It should be a model file path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param symmetric: make range of tensor symmetric (central point is 0).
:param use_external_data_format: use external data format to store model which size is >= 2Gb
"""
if isinstance(model_path, str):
self.model = load_model_with_shape_infer(Path(model_path))
elif isinstance(model_path, Path):
self.model = load_model_with_shape_infer(model_path)
else:
raise ValueError('model_path should be model path.')
self.op_types_to_calibrate = op_types_to_calibrate
self.augmented_model_path = augmented_model_path
self.symmetric = symmetric
self.use_external_data_format = use_external_data_format
self.augment_model = None
self.infer_session = None
self.execution_providers = ['CPUExecutionProvider']
def set_execution_providers(self, execution_providers=[
'CPUExecutionProvider']):
"""
reset the execution providers to execute the collect_data. It triggers to re-creating inference session.
"""
self.execution_providers = execution_providers
self.create_inference_session()
def create_inference_session(self):
"""
create an OnnxRuntime InferenceSession.
"""
sess_options = onnxruntime.SessionOptions()
sess_options.graph_optimization_level = (onnxruntime.
GraphOptimizationLevel.ORT_DISABLE_ALL)
self.infer_session = onnxruntime.InferenceSession(self.
augmented_model_path, sess_options=sess_options, providers=self
.execution_providers)
def select_tensors_to_calibrate(self, model: ModelProto):
"""
select input/output tensors of candidate nodes to calibrate.
returns:
tensors (set): set of tensor name.
value_infos (dict): tensor name to value info.
"""
value_infos = {vi.name: vi for vi in model.graph.value_info}
value_infos.update({ot.name: ot for ot in model.graph.output})
value_infos.update({it.name: it for it in model.graph.input})
initializer = {init.name for init in model.graph.initializer}
tensors_to_calibrate = set()
tensor_type_to_calibrate = {TensorProto.FLOAT}
for node in model.graph.node:
if (not self.op_types_to_calibrate or node.op_type in self.
op_types_to_calibrate):
for tensor_name in itertools.chain(node.input, node.output):
if tensor_name in value_infos:
vi = value_infos[tensor_name]
if (vi.type.HasField('tensor_type') and vi.type.
tensor_type.elem_type in
tensor_type_to_calibrate and tensor_name not in
initializer):
tensors_to_calibrate.add(tensor_name)
return tensors_to_calibrate, value_infos
def get_augment_model(self):
"""
return: augmented onnx model. Call after calling augment_graph
"""
return self.model
def augment_graph(self):
"""
abstract method: augment the input model to prepare for collecting data. It will:
1. augment the model to be able to collect desired statistics data
2. save augmented model to augmented_model_paths
"""
raise NotImplementedError
def collect_data(self, data_reader: CalibrationDataReader):
"""
abstract method: collect the tensors that will be used for range computation. It can be called multiple times.
"""
raise NotImplementedError
def compute_data(self) ->TensorsData:
"""
abstract method: compute data based on the calibration method stored in TensorsData
"""
raise NotImplementedError
class MinMaxCalibrater(CalibraterBase):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', symmetric=False, use_external_data_format=
False, moving_average=False, averaging_constant=0.01):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param symmetric: make range of tensor symmetric (central point is 0).
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.
:param averaging_constant: constant smoothing factor to use when computing the moving average.
"""
super().__init__(model_path, op_types_to_calibrate=
op_types_to_calibrate, augmented_model_path=
augmented_model_path, symmetric=symmetric,
use_external_data_format=use_external_data_format)
self.intermediate_outputs = []
self.calibrate_tensors_range = None
self.num_model_outputs = len(self.model.graph.output)
self.model_original_outputs = {output.name for output in self.model
.graph.output}
self.moving_average = moving_average
if moving_average and (averaging_constant < 0 or averaging_constant > 1
):
raise ValueError(
'Invalid averaging constant, which should not be < 0 or > 1.')
self.averaging_constant = averaging_constant
def augment_graph(self):
"""
Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in
model and ensures their outputs are stored as part of the graph output
:return: augmented ONNX model
"""
tensors, _ = self.select_tensors_to_calibrate(self.model)
reshape_shape_name = str(uuid.uuid4())
reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.
int64), reshape_shape_name)
self.model.graph.initializer.append(reshape_shape)
def add_reduce_min_max(tensor_name, reduce_op_name):
keepdims = 1
reduce_output = tensor_name + '_' + reduce_op_name
intermediate_output = reduce_output + '_Reshape'
reduce_node = onnx.helper.make_node(reduce_op_name, [
tensor_name], [intermediate_output], keepdims=keepdims,
name=reduce_output)
reshape_node = onnx.helper.make_node('Reshape', inputs=[
intermediate_output, reshape_shape_name], outputs=[
reduce_output], name=intermediate_output)
self.model.graph.node.extend([reduce_node, reshape_node])
self.model.graph.output.append(helper.make_tensor_value_info(
reduce_output, TensorProto.FLOAT, [1]))
for tensor in tensors:
add_reduce_min_max(tensor, 'ReduceMin')
add_reduce_min_max(tensor, 'ReduceMax')
onnx.save(self.model, self.augmented_model_path,
save_as_external_data=self.use_external_data_format)
def clear_collected_data(self):
self.intermediate_outputs = []
def collect_data(self, data_reader: CalibrationDataReader):
while True:
inputs = data_reader.get_next()
if not inputs:
break
self.intermediate_outputs.append(self.infer_session.run(None,
inputs))
if len(self.intermediate_outputs) == 0:
raise ValueError('No data is collected.')
t = self.compute_data()
if not isinstance(t, TensorsData):
raise TypeError(
f'compute_data must return a TensorsData not {type(t)}.')
self.clear_collected_data()
def merge_range(self, old_range, new_range):
if not old_range:
return new_range
for key, value in old_range.items():
if self.moving_average:
min_value = value[0] + self.averaging_constant * (new_range
[key][0] - value[0])
max_value = value[1] + self.averaging_constant * (new_range
[key][1] - value[1])
else:
min_value = min(value[0], new_range[key][0])
max_value = max(value[1], new_range[key][1])
new_range[key] = min_value, max_value
return new_range
def compute_data(self) ->TensorsData:
"""
Compute the min-max range of tensor
:return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }
"""
if len(self.intermediate_outputs) == 0:
return self.calibrate_tensors_range
output_names = [self.infer_session.get_outputs()[i].name for i in
range(len(self.intermediate_outputs[0]))]
output_dicts_list = [dict(zip(output_names, intermediate_output)) for
intermediate_output in self.intermediate_outputs]
merged_output_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_output_dict.setdefault(k, []).append(v)
added_output_names = output_names[self.num_model_outputs:]
calibrate_tensor_names = [added_output_names[i].rpartition('_')[0] for
i in range(0, len(added_output_names), 2)]
merged_added_output_dict = {i: merged_output_dict[i] for i in
merged_output_dict if i not in self.model_original_outputs}
pairs = []
for i in range(0, len(added_output_names), 2):
min_value = 0
max_value = 0
if self.moving_average:
min_value_array = np.mean(merged_added_output_dict[
added_output_names[i]], axis=0)
max_value_array = np.mean(merged_added_output_dict[
added_output_names[i + 1]], axis=0)
else:
min_value_array = min(merged_added_output_dict[
added_output_names[i]])
max_value_array = max(merged_added_output_dict[
added_output_names[i + 1]])
if type(min_value_array) == int or min_value_array.size > 0:
min_value = float(min_value_array)
if type(max_value_array) == int or max_value_array.size > 0:
max_value = float(max_value_array)
if self.symmetric:
max_absolute_value = max(abs(min_value), abs(max_value))
pairs.append(tuple([-max_absolute_value, max_absolute_value]))
else:
pairs.append(tuple([min_value, max_value]))
new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax,
dict(zip(calibrate_tensor_names, pairs)))
if self.calibrate_tensors_range:
self.calibrate_tensors_range = self.merge_range(self.
calibrate_tensors_range, new_calibrate_tensors_range)
else:
self.calibrate_tensors_range = new_calibrate_tensors_range
return self.calibrate_tensors_range
class HistogramCalibrater(CalibraterBase):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', use_external_data_format=False, method=
'percentile', symmetric=False, num_bins=128, num_quantized_bins=
2048, percentile=99.999, scenario='same'):
"""
:param model_path: ONNX model to calibrate. It is a model path.
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param num_quantized_bins: number of quantized bins. Default 128.
:param percentile: A float number between [0, 100]. Default 99.99.
:param scenario: see :class:`DistributionCalibrater`
"""
super().__init__(model_path, op_types_to_calibrate=
op_types_to_calibrate, augmented_model_path=
augmented_model_path, symmetric=symmetric,
use_external_data_format=use_external_data_format)
self.intermediate_outputs = []
self.calibrate_tensors_range = None
self.num_model_outputs = len(self.model.graph.output)
self.model_original_outputs = {output.name for output in self.model
.graph.output}
self.collector = None
self.method = method
self.num_bins = num_bins
self.num_quantized_bins = num_quantized_bins
self.percentile = percentile
self.tensors_to_calibrate = None
self.scenario = scenario
def augment_graph(self):
"""
make all quantization_candidates op type nodes as part of the graph output.
:return: augmented ONNX model
"""
self.tensors_to_calibrate, value_infos = (self.
select_tensors_to_calibrate(self.model))
for tensor in self.tensors_to_calibrate:
if tensor not in self.model_original_outputs:
self.model.graph.output.append(value_infos[tensor])
onnx.save(self.model, self.augmented_model_path,
save_as_external_data=self.use_external_data_format)
def clear_collected_data(self):
self.intermediate_outputs = []
def collect_data(self, data_reader: CalibrationDataReader):
"""
Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.
"""
while True:
inputs = data_reader.get_next()
if not inputs:
break
self.intermediate_outputs.append(self.infer_session.run(None,
inputs))
if len(self.intermediate_outputs) == 0:
raise ValueError('No data is collected.')
output_names = [self.infer_session.get_outputs()[i].name for i in
range(len(self.intermediate_outputs[0]))]
output_dicts_list = [dict(zip(output_names, intermediate_output)) for
intermediate_output in self.intermediate_outputs]
merged_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_dict.setdefault(k, []).append(v)
clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in
self.tensors_to_calibrate}
if not self.collector:
self.collector = HistogramCollector(method=self.method,
symmetric=self.symmetric, num_bins=self.num_bins,
num_quantized_bins=self.num_quantized_bins, percentile=self
.percentile, scenario=self.scenario)
self.collector.collect(clean_merged_dict)
self.clear_collected_data()
def compute_data(self) ->TensorsData:
"""
Compute the min-max range of tensor
:return: dictionary mapping: {tensor name: (min value, max value)}
"""
if not self.collector:
raise ValueError(
"No collector created and can't generate calibration data.")
if isinstance(self, EntropyCalibrater):
cal = CalibrationMethod.Entropy
elif isinstance(self, PercentileCalibrater):
cal = CalibrationMethod.Percentile
elif isinstance(self, DistributionCalibrater):
cal = CalibrationMethod.Distribution
else:
raise TypeError(
f'Unknown calibrater {type(self)}. This method must be overwritten.'
)
return TensorsData(cal, self.collector.compute_collection_result())
class EntropyCalibrater(HistogramCalibrater):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', use_external_data_format=False, method=
'entropy', symmetric=False, num_bins=128, num_quantized_bins=128):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param num_quantized_bins: number of quantized bins. Default 128.
"""
super().__init__(model_path, op_types_to_calibrate,
augmented_model_path, use_external_data_format, method=method,
symmetric=symmetric, num_bins=num_bins, num_quantized_bins=
num_quantized_bins)
class PercentileCalibrater(HistogramCalibrater):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', use_external_data_format=False, method=
'percentile', symmetric=False, num_bins=2048, percentile=99.999):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_quantized_bins: number of quantized bins. Default 128.
:param percentile: A float number between [0, 100]. Default 99.99.
"""
super().__init__(model_path, op_types_to_calibrate,
augmented_model_path, use_external_data_format, method=method,
symmetric=symmetric, num_bins=num_bins, percentile=percentile)
class DistributionCalibrater(HistogramCalibrater):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', use_external_data_format=False, method=
'distribution', num_bins=128, scenario='same'):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param scenario: for float 8 only, if `scenario="same"`,
the algorithm weights and float 8 follow the same distribution,
if `scenario="p3"`, it assumes the weights follow
a gaussian law and float 8 ~ X^3 where X is a gaussian law
"""
super().__init__(model_path, op_types_to_calibrate,
augmented_model_path, use_external_data_format, method=method,
num_bins=num_bins, scenario=scenario)
class CalibrationDataCollector(metaclass=abc.ABCMeta):
"""
Base class for collecting data for calibration-based quantization.
"""
@abc.abstractmethod
def collect(self, name_to_arr):
"""
Generate informative data based on given data.
name_to_arr : dict
tensor name to NDArray data
"""
raise NotImplementedError
@abc.abstractmethod
def compute_collection_result(self):
"""
Get the optimal result among collection data.
"""
raise NotImplementedError
class HistogramCollector(CalibrationDataCollector):
"""
Collecting histogram for each tensor. Percentile and Entropy method are supported.
ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py
ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/
pytorch_quantization/calib/histogram.html
"""
def __init__(self, method, symmetric, num_bins, num_quantized_bins,
percentile, scenario):
self.histogram_dict = {}
self.method = method
self.symmetric = symmetric
self.num_bins = num_bins
self.num_quantized_bins = num_quantized_bins
self.percentile = percentile
self.scenario = scenario
def get_histogram_dict(self):
return self.histogram_dict
def collect(self, name_to_arr):
print('Collecting tensor data and making histogram ...')
if self.method in {'distribution', 'entropy'}:
return self.collect_value(name_to_arr)
elif self.method == 'percentile':
if self.symmetric:
return self.collect_absolute_value(name_to_arr)
else:
return self.collect_value(name_to_arr)
else:
raise ValueError(
"Only 'entropy', 'percentile' or 'distribution' methods are supported"
)
def collect_absolute_value(self, name_to_arr):
"""
Collect histogram on absolute value
"""
for tensor, data_arr in name_to_arr.items():
data_arr = np.asarray(data_arr)
data_arr = data_arr.flatten()
if data_arr.size > 0:
min_value = np.min(data_arr)
max_value = np.max(data_arr)
else:
min_value = 0
max_value = 0
data_arr = np.absolute(data_arr)
if tensor not in self.histogram_dict:
hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)
self.histogram_dict[tensor
] = hist, hist_edges, min_value, max_value
else:
old_histogram = self.histogram_dict[tensor]
old_min = old_histogram[2]
old_max = old_histogram[3]
old_hist = old_histogram[0]
old_hist_edges = old_histogram[1]
temp_amax = np.max(data_arr)
if temp_amax > old_hist_edges[-1]:
width = old_hist_edges[1] - old_hist_edges[0]
new_bin_edges = np.arange(old_hist_edges[-1] + width,
temp_amax + width, width)
old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))
hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)
hist[:len(old_hist)] += old_hist
self.histogram_dict[tensor] = hist, hist_edges, min(old_min,
min_value), max(old_max, max_value)
def collect_value(self, name_to_arr):
"""
Collect histogram on real value
"""
for tensor, data_arr in name_to_arr.items():
data_arr = np.asarray(data_arr)
data_arr = data_arr.flatten()
if data_arr.size > 0:
min_value = np.min(data_arr)
max_value = np.max(data_arr)
else:
min_value = 0
max_value = 0
threshold = max(abs(min_value), abs(max_value))
if tensor in self.histogram_dict:
old_histogram = self.histogram_dict[tensor]
self.histogram_dict[tensor] = self.merge_histogram(
old_histogram, data_arr, min_value, max_value, threshold)
else:
hist, hist_edges = np.histogram(data_arr, self.num_bins,
range=(-threshold, threshold))
self.histogram_dict[tensor
] = hist, hist_edges, min_value, max_value, threshold
def merge_histogram(self, old_histogram, data_arr, new_min, new_max,
new_threshold):
old_hist, old_hist_edges, old_min, old_max, old_threshold = (
old_histogram)
if new_threshold <= old_threshold:
new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-
old_threshold, old_threshold))
return new_hist + old_hist, old_hist_edges, min(old_min, new_min
), max(old_max, new_max), old_threshold
else:
if old_threshold == 0:
hist, hist_edges = np.histogram(data_arr, len(old_hist),
range=(-new_threshold, new_threshold))
hist += old_hist
else:
old_num_bins = len(old_hist)
old_stride = 2 * old_threshold / old_num_bins
half_increased_bins = int((new_threshold - old_threshold) //
old_stride + 1)
new_num_bins = old_num_bins + 2 * half_increased_bins
new_threshold = (half_increased_bins * old_stride +
old_threshold)
hist, hist_edges = np.histogram(data_arr, new_num_bins,
range=(-new_threshold, new_threshold))
hist[half_increased_bins:new_num_bins - half_increased_bins
] += old_hist
return hist, hist_edges, min(old_min, new_min), max(old_max,
new_max), new_threshold
def compute_collection_result(self):
if not self.histogram_dict or len(self.histogram_dict) == 0:
raise ValueError(
'Histogram has not been collected. Please run collect() first.'
)
print(
f'Finding optimal threshold for each tensor using {self.method} algorithm ...'
)
if self.method == 'entropy':
return self.compute_entropy()
elif self.method == 'percentile':
return self.compute_percentile()
elif self.method == 'distribution':
return self.compute_distribution()
else:
raise ValueError(
"Only 'entropy', 'percentile' or 'distribution' methods are supported"
)
def compute_percentile(self):
if self.percentile < 0 or self.percentile > 100:
raise ValueError(
'Invalid percentile. Must be in range 0 <= percentile <= 100.')
histogram_dict = self.histogram_dict
percentile = self.percentile
thresholds_dict = {}
print(f'Number of tensors : {len(histogram_dict)}')
print(f'Number of histogram bins : {self.num_bins}')
print(f'Percentile : ({100.0 - percentile},{percentile})')
for tensor, histogram in histogram_dict.items():
hist = histogram[0]
hist_edges = histogram[1]
total = hist.sum()
cdf = np.cumsum(hist / total)
if self.symmetric:
idx_right = np.searchsorted(cdf, percentile / 100.0)
thresholds_dict[tensor] = -float(hist_edges[idx_right]), float(
hist_edges[idx_right])
else:
percent_to_cut_one_side = (100.0 - percentile) / 200.0
idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)
idx_left = np.searchsorted(cdf, percent_to_cut_one_side)
thresholds_dict[tensor] = float(hist_edges[idx_left]), float(
hist_edges[idx_right])
min_value = histogram[2]
max_value = histogram[3]
if thresholds_dict[tensor][0] < min_value:
thresholds_dict[tensor] = min_value, thresholds_dict[tensor][1]
if thresholds_dict[tensor][1] > max_value:
thresholds_dict[tensor] = thresholds_dict[tensor][0], max_value
thresholds_dict[tensor] = *thresholds_dict[tensor], *hist[:2]
if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):
apply_plot(hist, hist_edges)
return thresholds_dict
def compute_entropy(self):
histogram_dict = self.histogram_dict
num_quantized_bins = self.num_quantized_bins
thresholds_dict = {}
print(f'Number of tensors : {len(histogram_dict)}')
print(
'Number of histogram bins : {} (The number may increase depends on the data it collects)'
.format(self.num_bins))
print(f'Number of quantized bins : {self.num_quantized_bins}')
for tensor, histogram in histogram_dict.items():
optimal_threshold = self.get_entropy_threshold(histogram,
num_quantized_bins)
thresholds_dict[tensor] = optimal_threshold
thresholds_dict[tensor] = *optimal_threshold, *histogram[:2]
if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):
apply_plot(histogram[0], histogram[1])
return thresholds_dict
@staticmethod
def _avg_std(hist, hist_edges, power=1):
if power <= 0:
raise ValueError(f'power={power} <= 0 is invalid.')
values = (hist_edges[:-1] + hist_edges[1:]) * 0.5
if power == 1:
avg = (hist * values).sum() / hist.sum()
std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5
return avg, std
if int(power) == power and int(power) % 2 == 1:
avg = (hist * values ** power).sum() / hist.sum()
std = ((hist * (values ** power - avg) ** 2).sum() / hist.sum()
) ** 0.5
return avg, std
fact = np.abs(values) / values
fact[np.isnan(fact)] = 1
fact[np.isinf(fact)] = 1
values = np.abs(values) ** power * fact
avg = (hist * values).sum() / hist.sum()
std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5
return avg, std
def compute_distribution(self):
if self.num_bins < 512:
raise ValueError(
'Invalid num_bins. Must be in range 512 <= num_bins.')
histogram_dict = self.histogram_dict
thresholds_dict = {}
print(f'Number of tensors : {len(histogram_dict)}')
print(f'Number of histogram bins : {self.num_bins}')
print(f'Scenario : {self.scenario!r})')
for tensor, histogram in histogram_dict.items():
hist = histogram[0]
hist_edges = histogram[1]
if self.scenario == 'same':
avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)
elif self.scenario == 'p3':
avg_coef, std_coef = self._avg_std(hist, hist_edges, power=
1.0 / 3.0)
else:
raise ValueError("Invalid scenario. Must be in {'same', 'p3'}."
)
thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef,
hist=hist, hist_edges=hist_edges)
if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):
apply_plot(hist, hist_edges)
return thresholds_dict
def get_entropy_threshold(self, histogram, num_quantized_bins):
"""Given a dataset, find the optimal threshold for quantizing it.
The reference distribution is `q`, and the candidate distribution is `p`.
`q` is a truncated version of the original distribution.
Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
"""
import copy
from scipy.stats import entropy
hist = histogram[0]
hist_edges = histogram[1]
num_bins = hist.size
zero_bin_index = num_bins // 2
num_half_quantized_bin = num_quantized_bins // 2
kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)
thresholds = [(0, 0) for i in range(kl_divergence.size)]
for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):
start_index = zero_bin_index - i
end_index = (zero_bin_index + i + 1 if zero_bin_index + i + 1 <=
num_bins else num_bins)
thresholds[i - num_half_quantized_bin] = float(hist_edges[
start_index]), float(hist_edges[end_index])
sliced_distribution = copy.deepcopy(hist[start_index:end_index])
p = sliced_distribution.copy()
left_outliers_count = sum(hist[:start_index])
right_outliers_count = sum(hist[end_index:])
p[0] += left_outliers_count
p[-1] += right_outliers_count
nonzeros = (p != 0).astype(np.int64)
quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)
num_merged_bins = sliced_distribution.size // num_quantized_bins
for index in range(num_quantized_bins):
start = index * num_merged_bins
end = start + num_merged_bins
quantized_bins[index] = sum(sliced_distribution[start:end])
quantized_bins[-1] += sum(sliced_distribution[
num_quantized_bins * num_merged_bins:])
q = np.zeros(p.size, dtype=np.int64)
for index in range(num_quantized_bins):
start = index * num_merged_bins
end = start + num_merged_bins
norm = sum(nonzeros[start:end])
if norm != 0:
q[start:end] = float(quantized_bins[index]) / float(norm)
p = smooth_distribution(p)
q = smooth_distribution(q)
if isinstance(q, np.ndarray):
kl_divergence[i - num_half_quantized_bin] = entropy(p, q)
else:
kl_divergence[i - num_half_quantized_bin] = float('inf')
min_kl_divergence_idx = np.argmin(kl_divergence)
optimal_threshold = thresholds[min_kl_divergence_idx]
min_value = histogram[2]
max_value = histogram[3]
if optimal_threshold[0] < min_value:
optimal_threshold = min_value, optimal_threshold[1]
if optimal_threshold[1] > max_value:
optimal_threshold = optimal_threshold[0], max_value
return optimal_threshold
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TensorsData:
def __init__(self, calibration_method, data: Dict[str, Union[TensorData,
Tuple]]):
self.calibration_method = calibration_method
self.data = {}
for k, v in data.items():
if not isinstance(k, str):
raise TypeError(f'Keys must be strings not {type(k)}.')
if isinstance(v, tuple):
if calibration_method == CalibrationMethod.MinMax and len(v
) == 2:
self.data[k] = TensorData(lowest=v[0], highest=v[1])
continue
if len(v) == 4:
self.data[k] = TensorData(lowest=v[0], highest=v[1],
histogram=v[2], bins=v[3])
continue
raise TypeError(
f'Unexpected tuple for {k:r}, it has {len(v)} elements: {v}.'
)
if not isinstance(v, TensorData):
raise TypeError(f'Values must be TensorData not {type(v)}.')
self.data[k] = v
def __iter__(self):
yield from self.data
<|reserved_special_token_0|>
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
if key not in self.data:
raise RuntimeError(
f'Only an existing tensor can be modified, {key!r} is not.')
self.data[key] = value
def values(self):
return self.data.values()
class CalibrationMethod(Enum):
MinMax = 0
Entropy = 1
Percentile = 2
Distribution = 3
class CalibrationDataReader(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return hasattr(subclass, 'get_next') and callable(subclass.get_next
) or NotImplemented
@abc.abstractmethod
def get_next(self) ->dict:
"""generate the input data dict for ONNXinferenceSession run"""
raise NotImplementedError
def __iter__(self):
return self
def __next__(self):
result = self.get_next()
if result is None:
raise StopIteration
return result
class CalibraterBase:
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', symmetric=False, use_external_data_format=False
):
"""
:param model_path: ONNX model to calibrate. It should be a model file path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param symmetric: make range of tensor symmetric (central point is 0).
:param use_external_data_format: use external data format to store model which size is >= 2Gb
"""
if isinstance(model_path, str):
self.model = load_model_with_shape_infer(Path(model_path))
elif isinstance(model_path, Path):
self.model = load_model_with_shape_infer(model_path)
else:
raise ValueError('model_path should be model path.')
self.op_types_to_calibrate = op_types_to_calibrate
self.augmented_model_path = augmented_model_path
self.symmetric = symmetric
self.use_external_data_format = use_external_data_format
self.augment_model = None
self.infer_session = None
self.execution_providers = ['CPUExecutionProvider']
def set_execution_providers(self, execution_providers=[
'CPUExecutionProvider']):
"""
reset the execution providers to execute the collect_data. It triggers to re-creating inference session.
"""
self.execution_providers = execution_providers
self.create_inference_session()
def create_inference_session(self):
"""
create an OnnxRuntime InferenceSession.
"""
sess_options = onnxruntime.SessionOptions()
sess_options.graph_optimization_level = (onnxruntime.
GraphOptimizationLevel.ORT_DISABLE_ALL)
self.infer_session = onnxruntime.InferenceSession(self.
augmented_model_path, sess_options=sess_options, providers=self
.execution_providers)
def select_tensors_to_calibrate(self, model: ModelProto):
"""
select input/output tensors of candidate nodes to calibrate.
returns:
tensors (set): set of tensor name.
value_infos (dict): tensor name to value info.
"""
value_infos = {vi.name: vi for vi in model.graph.value_info}
value_infos.update({ot.name: ot for ot in model.graph.output})
value_infos.update({it.name: it for it in model.graph.input})
initializer = {init.name for init in model.graph.initializer}
tensors_to_calibrate = set()
tensor_type_to_calibrate = {TensorProto.FLOAT}
for node in model.graph.node:
if (not self.op_types_to_calibrate or node.op_type in self.
op_types_to_calibrate):
for tensor_name in itertools.chain(node.input, node.output):
if tensor_name in value_infos:
vi = value_infos[tensor_name]
if (vi.type.HasField('tensor_type') and vi.type.
tensor_type.elem_type in
tensor_type_to_calibrate and tensor_name not in
initializer):
tensors_to_calibrate.add(tensor_name)
return tensors_to_calibrate, value_infos
def get_augment_model(self):
"""
return: augmented onnx model. Call after calling augment_graph
"""
return self.model
def augment_graph(self):
"""
abstract method: augment the input model to prepare for collecting data. It will:
1. augment the model to be able to collect desired statistics data
2. save augmented model to augmented_model_paths
"""
raise NotImplementedError
def collect_data(self, data_reader: CalibrationDataReader):
"""
abstract method: collect the tensors that will be used for range computation. It can be called multiple times.
"""
raise NotImplementedError
def compute_data(self) ->TensorsData:
"""
abstract method: compute data based on the calibration method stored in TensorsData
"""
raise NotImplementedError
class MinMaxCalibrater(CalibraterBase):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', symmetric=False, use_external_data_format=
False, moving_average=False, averaging_constant=0.01):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param symmetric: make range of tensor symmetric (central point is 0).
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.
:param averaging_constant: constant smoothing factor to use when computing the moving average.
"""
super().__init__(model_path, op_types_to_calibrate=
op_types_to_calibrate, augmented_model_path=
augmented_model_path, symmetric=symmetric,
use_external_data_format=use_external_data_format)
self.intermediate_outputs = []
self.calibrate_tensors_range = None
self.num_model_outputs = len(self.model.graph.output)
self.model_original_outputs = {output.name for output in self.model
.graph.output}
self.moving_average = moving_average
if moving_average and (averaging_constant < 0 or averaging_constant > 1
):
raise ValueError(
'Invalid averaging constant, which should not be < 0 or > 1.')
self.averaging_constant = averaging_constant
def augment_graph(self):
"""
Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in
model and ensures their outputs are stored as part of the graph output
:return: augmented ONNX model
"""
tensors, _ = self.select_tensors_to_calibrate(self.model)
reshape_shape_name = str(uuid.uuid4())
reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.
int64), reshape_shape_name)
self.model.graph.initializer.append(reshape_shape)
def add_reduce_min_max(tensor_name, reduce_op_name):
keepdims = 1
reduce_output = tensor_name + '_' + reduce_op_name
intermediate_output = reduce_output + '_Reshape'
reduce_node = onnx.helper.make_node(reduce_op_name, [
tensor_name], [intermediate_output], keepdims=keepdims,
name=reduce_output)
reshape_node = onnx.helper.make_node('Reshape', inputs=[
intermediate_output, reshape_shape_name], outputs=[
reduce_output], name=intermediate_output)
self.model.graph.node.extend([reduce_node, reshape_node])
self.model.graph.output.append(helper.make_tensor_value_info(
reduce_output, TensorProto.FLOAT, [1]))
for tensor in tensors:
add_reduce_min_max(tensor, 'ReduceMin')
add_reduce_min_max(tensor, 'ReduceMax')
onnx.save(self.model, self.augmented_model_path,
save_as_external_data=self.use_external_data_format)
def clear_collected_data(self):
self.intermediate_outputs = []
def collect_data(self, data_reader: CalibrationDataReader):
while True:
inputs = data_reader.get_next()
if not inputs:
break
self.intermediate_outputs.append(self.infer_session.run(None,
inputs))
if len(self.intermediate_outputs) == 0:
raise ValueError('No data is collected.')
t = self.compute_data()
if not isinstance(t, TensorsData):
raise TypeError(
f'compute_data must return a TensorsData not {type(t)}.')
self.clear_collected_data()
def merge_range(self, old_range, new_range):
if not old_range:
return new_range
for key, value in old_range.items():
if self.moving_average:
min_value = value[0] + self.averaging_constant * (new_range
[key][0] - value[0])
max_value = value[1] + self.averaging_constant * (new_range
[key][1] - value[1])
else:
min_value = min(value[0], new_range[key][0])
max_value = max(value[1], new_range[key][1])
new_range[key] = min_value, max_value
return new_range
def compute_data(self) ->TensorsData:
"""
Compute the min-max range of tensor
:return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }
"""
if len(self.intermediate_outputs) == 0:
return self.calibrate_tensors_range
output_names = [self.infer_session.get_outputs()[i].name for i in
range(len(self.intermediate_outputs[0]))]
output_dicts_list = [dict(zip(output_names, intermediate_output)) for
intermediate_output in self.intermediate_outputs]
merged_output_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_output_dict.setdefault(k, []).append(v)
added_output_names = output_names[self.num_model_outputs:]
calibrate_tensor_names = [added_output_names[i].rpartition('_')[0] for
i in range(0, len(added_output_names), 2)]
merged_added_output_dict = {i: merged_output_dict[i] for i in
merged_output_dict if i not in self.model_original_outputs}
pairs = []
for i in range(0, len(added_output_names), 2):
min_value = 0
max_value = 0
if self.moving_average:
min_value_array = np.mean(merged_added_output_dict[
added_output_names[i]], axis=0)
max_value_array = np.mean(merged_added_output_dict[
added_output_names[i + 1]], axis=0)
else:
min_value_array = min(merged_added_output_dict[
added_output_names[i]])
max_value_array = max(merged_added_output_dict[
added_output_names[i + 1]])
if type(min_value_array) == int or min_value_array.size > 0:
min_value = float(min_value_array)
if type(max_value_array) == int or max_value_array.size > 0:
max_value = float(max_value_array)
if self.symmetric:
max_absolute_value = max(abs(min_value), abs(max_value))
pairs.append(tuple([-max_absolute_value, max_absolute_value]))
else:
pairs.append(tuple([min_value, max_value]))
new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax,
dict(zip(calibrate_tensor_names, pairs)))
if self.calibrate_tensors_range:
self.calibrate_tensors_range = self.merge_range(self.
calibrate_tensors_range, new_calibrate_tensors_range)
else:
self.calibrate_tensors_range = new_calibrate_tensors_range
return self.calibrate_tensors_range
class HistogramCalibrater(CalibraterBase):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', use_external_data_format=False, method=
'percentile', symmetric=False, num_bins=128, num_quantized_bins=
2048, percentile=99.999, scenario='same'):
"""
:param model_path: ONNX model to calibrate. It is a model path.
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param num_quantized_bins: number of quantized bins. Default 128.
:param percentile: A float number between [0, 100]. Default 99.99.
:param scenario: see :class:`DistributionCalibrater`
"""
super().__init__(model_path, op_types_to_calibrate=
op_types_to_calibrate, augmented_model_path=
augmented_model_path, symmetric=symmetric,
use_external_data_format=use_external_data_format)
self.intermediate_outputs = []
self.calibrate_tensors_range = None
self.num_model_outputs = len(self.model.graph.output)
self.model_original_outputs = {output.name for output in self.model
.graph.output}
self.collector = None
self.method = method
self.num_bins = num_bins
self.num_quantized_bins = num_quantized_bins
self.percentile = percentile
self.tensors_to_calibrate = None
self.scenario = scenario
def augment_graph(self):
"""
make all quantization_candidates op type nodes as part of the graph output.
:return: augmented ONNX model
"""
self.tensors_to_calibrate, value_infos = (self.
select_tensors_to_calibrate(self.model))
for tensor in self.tensors_to_calibrate:
if tensor not in self.model_original_outputs:
self.model.graph.output.append(value_infos[tensor])
onnx.save(self.model, self.augmented_model_path,
save_as_external_data=self.use_external_data_format)
def clear_collected_data(self):
self.intermediate_outputs = []
def collect_data(self, data_reader: CalibrationDataReader):
"""
Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.
"""
while True:
inputs = data_reader.get_next()
if not inputs:
break
self.intermediate_outputs.append(self.infer_session.run(None,
inputs))
if len(self.intermediate_outputs) == 0:
raise ValueError('No data is collected.')
output_names = [self.infer_session.get_outputs()[i].name for i in
range(len(self.intermediate_outputs[0]))]
output_dicts_list = [dict(zip(output_names, intermediate_output)) for
intermediate_output in self.intermediate_outputs]
merged_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_dict.setdefault(k, []).append(v)
clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in
self.tensors_to_calibrate}
if not self.collector:
self.collector = HistogramCollector(method=self.method,
symmetric=self.symmetric, num_bins=self.num_bins,
num_quantized_bins=self.num_quantized_bins, percentile=self
.percentile, scenario=self.scenario)
self.collector.collect(clean_merged_dict)
self.clear_collected_data()
def compute_data(self) ->TensorsData:
"""
Compute the min-max range of tensor
:return: dictionary mapping: {tensor name: (min value, max value)}
"""
if not self.collector:
raise ValueError(
"No collector created and can't generate calibration data.")
if isinstance(self, EntropyCalibrater):
cal = CalibrationMethod.Entropy
elif isinstance(self, PercentileCalibrater):
cal = CalibrationMethod.Percentile
elif isinstance(self, DistributionCalibrater):
cal = CalibrationMethod.Distribution
else:
raise TypeError(
f'Unknown calibrater {type(self)}. This method must be overwritten.'
)
return TensorsData(cal, self.collector.compute_collection_result())
class EntropyCalibrater(HistogramCalibrater):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', use_external_data_format=False, method=
'entropy', symmetric=False, num_bins=128, num_quantized_bins=128):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param num_quantized_bins: number of quantized bins. Default 128.
"""
super().__init__(model_path, op_types_to_calibrate,
augmented_model_path, use_external_data_format, method=method,
symmetric=symmetric, num_bins=num_bins, num_quantized_bins=
num_quantized_bins)
class PercentileCalibrater(HistogramCalibrater):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', use_external_data_format=False, method=
'percentile', symmetric=False, num_bins=2048, percentile=99.999):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_quantized_bins: number of quantized bins. Default 128.
:param percentile: A float number between [0, 100]. Default 99.99.
"""
super().__init__(model_path, op_types_to_calibrate,
augmented_model_path, use_external_data_format, method=method,
symmetric=symmetric, num_bins=num_bins, percentile=percentile)
class DistributionCalibrater(HistogramCalibrater):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', use_external_data_format=False, method=
'distribution', num_bins=128, scenario='same'):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param scenario: for float 8 only, if `scenario="same"`,
the algorithm weights and float 8 follow the same distribution,
if `scenario="p3"`, it assumes the weights follow
a gaussian law and float 8 ~ X^3 where X is a gaussian law
"""
super().__init__(model_path, op_types_to_calibrate,
augmented_model_path, use_external_data_format, method=method,
num_bins=num_bins, scenario=scenario)
class CalibrationDataCollector(metaclass=abc.ABCMeta):
"""
Base class for collecting data for calibration-based quantization.
"""
@abc.abstractmethod
def collect(self, name_to_arr):
"""
Generate informative data based on given data.
name_to_arr : dict
tensor name to NDArray data
"""
raise NotImplementedError
@abc.abstractmethod
def compute_collection_result(self):
"""
Get the optimal result among collection data.
"""
raise NotImplementedError
class HistogramCollector(CalibrationDataCollector):
"""
Collecting histogram for each tensor. Percentile and Entropy method are supported.
ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py
ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/
pytorch_quantization/calib/histogram.html
"""
def __init__(self, method, symmetric, num_bins, num_quantized_bins,
percentile, scenario):
self.histogram_dict = {}
self.method = method
self.symmetric = symmetric
self.num_bins = num_bins
self.num_quantized_bins = num_quantized_bins
self.percentile = percentile
self.scenario = scenario
def get_histogram_dict(self):
return self.histogram_dict
def collect(self, name_to_arr):
print('Collecting tensor data and making histogram ...')
if self.method in {'distribution', 'entropy'}:
return self.collect_value(name_to_arr)
elif self.method == 'percentile':
if self.symmetric:
return self.collect_absolute_value(name_to_arr)
else:
return self.collect_value(name_to_arr)
else:
raise ValueError(
"Only 'entropy', 'percentile' or 'distribution' methods are supported"
)
def collect_absolute_value(self, name_to_arr):
"""
Collect histogram on absolute value
"""
for tensor, data_arr in name_to_arr.items():
data_arr = np.asarray(data_arr)
data_arr = data_arr.flatten()
if data_arr.size > 0:
min_value = np.min(data_arr)
max_value = np.max(data_arr)
else:
min_value = 0
max_value = 0
data_arr = np.absolute(data_arr)
if tensor not in self.histogram_dict:
hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)
self.histogram_dict[tensor
] = hist, hist_edges, min_value, max_value
else:
old_histogram = self.histogram_dict[tensor]
old_min = old_histogram[2]
old_max = old_histogram[3]
old_hist = old_histogram[0]
old_hist_edges = old_histogram[1]
temp_amax = np.max(data_arr)
if temp_amax > old_hist_edges[-1]:
width = old_hist_edges[1] - old_hist_edges[0]
new_bin_edges = np.arange(old_hist_edges[-1] + width,
temp_amax + width, width)
old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))
hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)
hist[:len(old_hist)] += old_hist
self.histogram_dict[tensor] = hist, hist_edges, min(old_min,
min_value), max(old_max, max_value)
def collect_value(self, name_to_arr):
"""
Collect histogram on real value
"""
for tensor, data_arr in name_to_arr.items():
data_arr = np.asarray(data_arr)
data_arr = data_arr.flatten()
if data_arr.size > 0:
min_value = np.min(data_arr)
max_value = np.max(data_arr)
else:
min_value = 0
max_value = 0
threshold = max(abs(min_value), abs(max_value))
if tensor in self.histogram_dict:
old_histogram = self.histogram_dict[tensor]
self.histogram_dict[tensor] = self.merge_histogram(
old_histogram, data_arr, min_value, max_value, threshold)
else:
hist, hist_edges = np.histogram(data_arr, self.num_bins,
range=(-threshold, threshold))
self.histogram_dict[tensor
] = hist, hist_edges, min_value, max_value, threshold
def merge_histogram(self, old_histogram, data_arr, new_min, new_max,
new_threshold):
old_hist, old_hist_edges, old_min, old_max, old_threshold = (
old_histogram)
if new_threshold <= old_threshold:
new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-
old_threshold, old_threshold))
return new_hist + old_hist, old_hist_edges, min(old_min, new_min
), max(old_max, new_max), old_threshold
else:
if old_threshold == 0:
hist, hist_edges = np.histogram(data_arr, len(old_hist),
range=(-new_threshold, new_threshold))
hist += old_hist
else:
old_num_bins = len(old_hist)
old_stride = 2 * old_threshold / old_num_bins
half_increased_bins = int((new_threshold - old_threshold) //
old_stride + 1)
new_num_bins = old_num_bins + 2 * half_increased_bins
new_threshold = (half_increased_bins * old_stride +
old_threshold)
hist, hist_edges = np.histogram(data_arr, new_num_bins,
range=(-new_threshold, new_threshold))
hist[half_increased_bins:new_num_bins - half_increased_bins
] += old_hist
return hist, hist_edges, min(old_min, new_min), max(old_max,
new_max), new_threshold
def compute_collection_result(self):
if not self.histogram_dict or len(self.histogram_dict) == 0:
raise ValueError(
'Histogram has not been collected. Please run collect() first.'
)
print(
f'Finding optimal threshold for each tensor using {self.method} algorithm ...'
)
if self.method == 'entropy':
return self.compute_entropy()
elif self.method == 'percentile':
return self.compute_percentile()
elif self.method == 'distribution':
return self.compute_distribution()
else:
raise ValueError(
"Only 'entropy', 'percentile' or 'distribution' methods are supported"
)
def compute_percentile(self):
if self.percentile < 0 or self.percentile > 100:
raise ValueError(
'Invalid percentile. Must be in range 0 <= percentile <= 100.')
histogram_dict = self.histogram_dict
percentile = self.percentile
thresholds_dict = {}
print(f'Number of tensors : {len(histogram_dict)}')
print(f'Number of histogram bins : {self.num_bins}')
print(f'Percentile : ({100.0 - percentile},{percentile})')
for tensor, histogram in histogram_dict.items():
hist = histogram[0]
hist_edges = histogram[1]
total = hist.sum()
cdf = np.cumsum(hist / total)
if self.symmetric:
idx_right = np.searchsorted(cdf, percentile / 100.0)
thresholds_dict[tensor] = -float(hist_edges[idx_right]), float(
hist_edges[idx_right])
else:
percent_to_cut_one_side = (100.0 - percentile) / 200.0
idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)
idx_left = np.searchsorted(cdf, percent_to_cut_one_side)
thresholds_dict[tensor] = float(hist_edges[idx_left]), float(
hist_edges[idx_right])
min_value = histogram[2]
max_value = histogram[3]
if thresholds_dict[tensor][0] < min_value:
thresholds_dict[tensor] = min_value, thresholds_dict[tensor][1]
if thresholds_dict[tensor][1] > max_value:
thresholds_dict[tensor] = thresholds_dict[tensor][0], max_value
thresholds_dict[tensor] = *thresholds_dict[tensor], *hist[:2]
if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):
apply_plot(hist, hist_edges)
return thresholds_dict
def compute_entropy(self):
histogram_dict = self.histogram_dict
num_quantized_bins = self.num_quantized_bins
thresholds_dict = {}
print(f'Number of tensors : {len(histogram_dict)}')
print(
'Number of histogram bins : {} (The number may increase depends on the data it collects)'
.format(self.num_bins))
print(f'Number of quantized bins : {self.num_quantized_bins}')
for tensor, histogram in histogram_dict.items():
optimal_threshold = self.get_entropy_threshold(histogram,
num_quantized_bins)
thresholds_dict[tensor] = optimal_threshold
thresholds_dict[tensor] = *optimal_threshold, *histogram[:2]
if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):
apply_plot(histogram[0], histogram[1])
return thresholds_dict
@staticmethod
def _avg_std(hist, hist_edges, power=1):
if power <= 0:
raise ValueError(f'power={power} <= 0 is invalid.')
values = (hist_edges[:-1] + hist_edges[1:]) * 0.5
if power == 1:
avg = (hist * values).sum() / hist.sum()
std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5
return avg, std
if int(power) == power and int(power) % 2 == 1:
avg = (hist * values ** power).sum() / hist.sum()
std = ((hist * (values ** power - avg) ** 2).sum() / hist.sum()
) ** 0.5
return avg, std
fact = np.abs(values) / values
fact[np.isnan(fact)] = 1
fact[np.isinf(fact)] = 1
values = np.abs(values) ** power * fact
avg = (hist * values).sum() / hist.sum()
std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5
return avg, std
def compute_distribution(self):
if self.num_bins < 512:
raise ValueError(
'Invalid num_bins. Must be in range 512 <= num_bins.')
histogram_dict = self.histogram_dict
thresholds_dict = {}
print(f'Number of tensors : {len(histogram_dict)}')
print(f'Number of histogram bins : {self.num_bins}')
print(f'Scenario : {self.scenario!r})')
for tensor, histogram in histogram_dict.items():
hist = histogram[0]
hist_edges = histogram[1]
if self.scenario == 'same':
avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)
elif self.scenario == 'p3':
avg_coef, std_coef = self._avg_std(hist, hist_edges, power=
1.0 / 3.0)
else:
raise ValueError("Invalid scenario. Must be in {'same', 'p3'}."
)
thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef,
hist=hist, hist_edges=hist_edges)
if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):
apply_plot(hist, hist_edges)
return thresholds_dict
def get_entropy_threshold(self, histogram, num_quantized_bins):
"""Given a dataset, find the optimal threshold for quantizing it.
The reference distribution is `q`, and the candidate distribution is `p`.
`q` is a truncated version of the original distribution.
Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
"""
import copy
from scipy.stats import entropy
hist = histogram[0]
hist_edges = histogram[1]
num_bins = hist.size
zero_bin_index = num_bins // 2
num_half_quantized_bin = num_quantized_bins // 2
kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)
thresholds = [(0, 0) for i in range(kl_divergence.size)]
for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):
start_index = zero_bin_index - i
end_index = (zero_bin_index + i + 1 if zero_bin_index + i + 1 <=
num_bins else num_bins)
thresholds[i - num_half_quantized_bin] = float(hist_edges[
start_index]), float(hist_edges[end_index])
sliced_distribution = copy.deepcopy(hist[start_index:end_index])
p = sliced_distribution.copy()
left_outliers_count = sum(hist[:start_index])
right_outliers_count = sum(hist[end_index:])
p[0] += left_outliers_count
p[-1] += right_outliers_count
nonzeros = (p != 0).astype(np.int64)
quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)
num_merged_bins = sliced_distribution.size // num_quantized_bins
for index in range(num_quantized_bins):
start = index * num_merged_bins
end = start + num_merged_bins
quantized_bins[index] = sum(sliced_distribution[start:end])
quantized_bins[-1] += sum(sliced_distribution[
num_quantized_bins * num_merged_bins:])
q = np.zeros(p.size, dtype=np.int64)
for index in range(num_quantized_bins):
start = index * num_merged_bins
end = start + num_merged_bins
norm = sum(nonzeros[start:end])
if norm != 0:
q[start:end] = float(quantized_bins[index]) / float(norm)
p = smooth_distribution(p)
q = smooth_distribution(q)
if isinstance(q, np.ndarray):
kl_divergence[i - num_half_quantized_bin] = entropy(p, q)
else:
kl_divergence[i - num_half_quantized_bin] = float('inf')
min_kl_divergence_idx = np.argmin(kl_divergence)
optimal_threshold = thresholds[min_kl_divergence_idx]
min_value = histogram[2]
max_value = histogram[3]
if optimal_threshold[0] < min_value:
optimal_threshold = min_value, optimal_threshold[1]
if optimal_threshold[1] > max_value:
optimal_threshold = optimal_threshold[0], max_value
return optimal_threshold
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TensorsData:
def __init__(self, calibration_method, data: Dict[str, Union[TensorData,
Tuple]]):
self.calibration_method = calibration_method
self.data = {}
for k, v in data.items():
if not isinstance(k, str):
raise TypeError(f'Keys must be strings not {type(k)}.')
if isinstance(v, tuple):
if calibration_method == CalibrationMethod.MinMax and len(v
) == 2:
self.data[k] = TensorData(lowest=v[0], highest=v[1])
continue
if len(v) == 4:
self.data[k] = TensorData(lowest=v[0], highest=v[1],
histogram=v[2], bins=v[3])
continue
raise TypeError(
f'Unexpected tuple for {k:r}, it has {len(v)} elements: {v}.'
)
if not isinstance(v, TensorData):
raise TypeError(f'Values must be TensorData not {type(v)}.')
self.data[k] = v
def __iter__(self):
yield from self.data
def __contains__(self, key):
return key in self.data
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
if key not in self.data:
raise RuntimeError(
f'Only an existing tensor can be modified, {key!r} is not.')
self.data[key] = value
def values(self):
return self.data.values()
class CalibrationMethod(Enum):
MinMax = 0
Entropy = 1
Percentile = 2
Distribution = 3
class CalibrationDataReader(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return hasattr(subclass, 'get_next') and callable(subclass.get_next
) or NotImplemented
@abc.abstractmethod
def get_next(self) ->dict:
"""generate the input data dict for ONNXinferenceSession run"""
raise NotImplementedError
def __iter__(self):
return self
def __next__(self):
result = self.get_next()
if result is None:
raise StopIteration
return result
class CalibraterBase:
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', symmetric=False, use_external_data_format=False
):
"""
:param model_path: ONNX model to calibrate. It should be a model file path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param symmetric: make range of tensor symmetric (central point is 0).
:param use_external_data_format: use external data format to store model which size is >= 2Gb
"""
if isinstance(model_path, str):
self.model = load_model_with_shape_infer(Path(model_path))
elif isinstance(model_path, Path):
self.model = load_model_with_shape_infer(model_path)
else:
raise ValueError('model_path should be model path.')
self.op_types_to_calibrate = op_types_to_calibrate
self.augmented_model_path = augmented_model_path
self.symmetric = symmetric
self.use_external_data_format = use_external_data_format
self.augment_model = None
self.infer_session = None
self.execution_providers = ['CPUExecutionProvider']
def set_execution_providers(self, execution_providers=[
'CPUExecutionProvider']):
"""
reset the execution providers to execute the collect_data. It triggers to re-creating inference session.
"""
self.execution_providers = execution_providers
self.create_inference_session()
def create_inference_session(self):
"""
create an OnnxRuntime InferenceSession.
"""
sess_options = onnxruntime.SessionOptions()
sess_options.graph_optimization_level = (onnxruntime.
GraphOptimizationLevel.ORT_DISABLE_ALL)
self.infer_session = onnxruntime.InferenceSession(self.
augmented_model_path, sess_options=sess_options, providers=self
.execution_providers)
def select_tensors_to_calibrate(self, model: ModelProto):
"""
select input/output tensors of candidate nodes to calibrate.
returns:
tensors (set): set of tensor name.
value_infos (dict): tensor name to value info.
"""
value_infos = {vi.name: vi for vi in model.graph.value_info}
value_infos.update({ot.name: ot for ot in model.graph.output})
value_infos.update({it.name: it for it in model.graph.input})
initializer = {init.name for init in model.graph.initializer}
tensors_to_calibrate = set()
tensor_type_to_calibrate = {TensorProto.FLOAT}
for node in model.graph.node:
if (not self.op_types_to_calibrate or node.op_type in self.
op_types_to_calibrate):
for tensor_name in itertools.chain(node.input, node.output):
if tensor_name in value_infos:
vi = value_infos[tensor_name]
if (vi.type.HasField('tensor_type') and vi.type.
tensor_type.elem_type in
tensor_type_to_calibrate and tensor_name not in
initializer):
tensors_to_calibrate.add(tensor_name)
return tensors_to_calibrate, value_infos
def get_augment_model(self):
"""
return: augmented onnx model. Call after calling augment_graph
"""
return self.model
def augment_graph(self):
"""
abstract method: augment the input model to prepare for collecting data. It will:
1. augment the model to be able to collect desired statistics data
2. save augmented model to augmented_model_paths
"""
raise NotImplementedError
def collect_data(self, data_reader: CalibrationDataReader):
"""
abstract method: collect the tensors that will be used for range computation. It can be called multiple times.
"""
raise NotImplementedError
def compute_data(self) ->TensorsData:
"""
abstract method: compute data based on the calibration method stored in TensorsData
"""
raise NotImplementedError
class MinMaxCalibrater(CalibraterBase):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', symmetric=False, use_external_data_format=
False, moving_average=False, averaging_constant=0.01):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param symmetric: make range of tensor symmetric (central point is 0).
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.
:param averaging_constant: constant smoothing factor to use when computing the moving average.
"""
super().__init__(model_path, op_types_to_calibrate=
op_types_to_calibrate, augmented_model_path=
augmented_model_path, symmetric=symmetric,
use_external_data_format=use_external_data_format)
self.intermediate_outputs = []
self.calibrate_tensors_range = None
self.num_model_outputs = len(self.model.graph.output)
self.model_original_outputs = {output.name for output in self.model
.graph.output}
self.moving_average = moving_average
if moving_average and (averaging_constant < 0 or averaging_constant > 1
):
raise ValueError(
'Invalid averaging constant, which should not be < 0 or > 1.')
self.averaging_constant = averaging_constant
def augment_graph(self):
"""
Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in
model and ensures their outputs are stored as part of the graph output
:return: augmented ONNX model
"""
tensors, _ = self.select_tensors_to_calibrate(self.model)
reshape_shape_name = str(uuid.uuid4())
reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.
int64), reshape_shape_name)
self.model.graph.initializer.append(reshape_shape)
def add_reduce_min_max(tensor_name, reduce_op_name):
keepdims = 1
reduce_output = tensor_name + '_' + reduce_op_name
intermediate_output = reduce_output + '_Reshape'
reduce_node = onnx.helper.make_node(reduce_op_name, [
tensor_name], [intermediate_output], keepdims=keepdims,
name=reduce_output)
reshape_node = onnx.helper.make_node('Reshape', inputs=[
intermediate_output, reshape_shape_name], outputs=[
reduce_output], name=intermediate_output)
self.model.graph.node.extend([reduce_node, reshape_node])
self.model.graph.output.append(helper.make_tensor_value_info(
reduce_output, TensorProto.FLOAT, [1]))
for tensor in tensors:
add_reduce_min_max(tensor, 'ReduceMin')
add_reduce_min_max(tensor, 'ReduceMax')
onnx.save(self.model, self.augmented_model_path,
save_as_external_data=self.use_external_data_format)
def clear_collected_data(self):
self.intermediate_outputs = []
def collect_data(self, data_reader: CalibrationDataReader):
while True:
inputs = data_reader.get_next()
if not inputs:
break
self.intermediate_outputs.append(self.infer_session.run(None,
inputs))
if len(self.intermediate_outputs) == 0:
raise ValueError('No data is collected.')
t = self.compute_data()
if not isinstance(t, TensorsData):
raise TypeError(
f'compute_data must return a TensorsData not {type(t)}.')
self.clear_collected_data()
def merge_range(self, old_range, new_range):
if not old_range:
return new_range
for key, value in old_range.items():
if self.moving_average:
min_value = value[0] + self.averaging_constant * (new_range
[key][0] - value[0])
max_value = value[1] + self.averaging_constant * (new_range
[key][1] - value[1])
else:
min_value = min(value[0], new_range[key][0])
max_value = max(value[1], new_range[key][1])
new_range[key] = min_value, max_value
return new_range
def compute_data(self) ->TensorsData:
"""
Compute the min-max range of tensor
:return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }
"""
if len(self.intermediate_outputs) == 0:
return self.calibrate_tensors_range
output_names = [self.infer_session.get_outputs()[i].name for i in
range(len(self.intermediate_outputs[0]))]
output_dicts_list = [dict(zip(output_names, intermediate_output)) for
intermediate_output in self.intermediate_outputs]
merged_output_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_output_dict.setdefault(k, []).append(v)
added_output_names = output_names[self.num_model_outputs:]
calibrate_tensor_names = [added_output_names[i].rpartition('_')[0] for
i in range(0, len(added_output_names), 2)]
merged_added_output_dict = {i: merged_output_dict[i] for i in
merged_output_dict if i not in self.model_original_outputs}
pairs = []
for i in range(0, len(added_output_names), 2):
min_value = 0
max_value = 0
if self.moving_average:
min_value_array = np.mean(merged_added_output_dict[
added_output_names[i]], axis=0)
max_value_array = np.mean(merged_added_output_dict[
added_output_names[i + 1]], axis=0)
else:
min_value_array = min(merged_added_output_dict[
added_output_names[i]])
max_value_array = max(merged_added_output_dict[
added_output_names[i + 1]])
if type(min_value_array) == int or min_value_array.size > 0:
min_value = float(min_value_array)
if type(max_value_array) == int or max_value_array.size > 0:
max_value = float(max_value_array)
if self.symmetric:
max_absolute_value = max(abs(min_value), abs(max_value))
pairs.append(tuple([-max_absolute_value, max_absolute_value]))
else:
pairs.append(tuple([min_value, max_value]))
new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax,
dict(zip(calibrate_tensor_names, pairs)))
if self.calibrate_tensors_range:
self.calibrate_tensors_range = self.merge_range(self.
calibrate_tensors_range, new_calibrate_tensors_range)
else:
self.calibrate_tensors_range = new_calibrate_tensors_range
return self.calibrate_tensors_range
class HistogramCalibrater(CalibraterBase):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', use_external_data_format=False, method=
'percentile', symmetric=False, num_bins=128, num_quantized_bins=
2048, percentile=99.999, scenario='same'):
"""
:param model_path: ONNX model to calibrate. It is a model path.
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param num_quantized_bins: number of quantized bins. Default 128.
:param percentile: A float number between [0, 100]. Default 99.99.
:param scenario: see :class:`DistributionCalibrater`
"""
super().__init__(model_path, op_types_to_calibrate=
op_types_to_calibrate, augmented_model_path=
augmented_model_path, symmetric=symmetric,
use_external_data_format=use_external_data_format)
self.intermediate_outputs = []
self.calibrate_tensors_range = None
self.num_model_outputs = len(self.model.graph.output)
self.model_original_outputs = {output.name for output in self.model
.graph.output}
self.collector = None
self.method = method
self.num_bins = num_bins
self.num_quantized_bins = num_quantized_bins
self.percentile = percentile
self.tensors_to_calibrate = None
self.scenario = scenario
def augment_graph(self):
"""
make all quantization_candidates op type nodes as part of the graph output.
:return: augmented ONNX model
"""
self.tensors_to_calibrate, value_infos = (self.
select_tensors_to_calibrate(self.model))
for tensor in self.tensors_to_calibrate:
if tensor not in self.model_original_outputs:
self.model.graph.output.append(value_infos[tensor])
onnx.save(self.model, self.augmented_model_path,
save_as_external_data=self.use_external_data_format)
def clear_collected_data(self):
self.intermediate_outputs = []
def collect_data(self, data_reader: CalibrationDataReader):
"""
Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.
"""
while True:
inputs = data_reader.get_next()
if not inputs:
break
self.intermediate_outputs.append(self.infer_session.run(None,
inputs))
if len(self.intermediate_outputs) == 0:
raise ValueError('No data is collected.')
output_names = [self.infer_session.get_outputs()[i].name for i in
range(len(self.intermediate_outputs[0]))]
output_dicts_list = [dict(zip(output_names, intermediate_output)) for
intermediate_output in self.intermediate_outputs]
merged_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_dict.setdefault(k, []).append(v)
clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in
self.tensors_to_calibrate}
if not self.collector:
self.collector = HistogramCollector(method=self.method,
symmetric=self.symmetric, num_bins=self.num_bins,
num_quantized_bins=self.num_quantized_bins, percentile=self
.percentile, scenario=self.scenario)
self.collector.collect(clean_merged_dict)
self.clear_collected_data()
def compute_data(self) ->TensorsData:
"""
Compute the min-max range of tensor
:return: dictionary mapping: {tensor name: (min value, max value)}
"""
if not self.collector:
raise ValueError(
"No collector created and can't generate calibration data.")
if isinstance(self, EntropyCalibrater):
cal = CalibrationMethod.Entropy
elif isinstance(self, PercentileCalibrater):
cal = CalibrationMethod.Percentile
elif isinstance(self, DistributionCalibrater):
cal = CalibrationMethod.Distribution
else:
raise TypeError(
f'Unknown calibrater {type(self)}. This method must be overwritten.'
)
return TensorsData(cal, self.collector.compute_collection_result())
class EntropyCalibrater(HistogramCalibrater):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', use_external_data_format=False, method=
'entropy', symmetric=False, num_bins=128, num_quantized_bins=128):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param num_quantized_bins: number of quantized bins. Default 128.
"""
super().__init__(model_path, op_types_to_calibrate,
augmented_model_path, use_external_data_format, method=method,
symmetric=symmetric, num_bins=num_bins, num_quantized_bins=
num_quantized_bins)
class PercentileCalibrater(HistogramCalibrater):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', use_external_data_format=False, method=
'percentile', symmetric=False, num_bins=2048, percentile=99.999):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_quantized_bins: number of quantized bins. Default 128.
:param percentile: A float number between [0, 100]. Default 99.99.
"""
super().__init__(model_path, op_types_to_calibrate,
augmented_model_path, use_external_data_format, method=method,
symmetric=symmetric, num_bins=num_bins, percentile=percentile)
class DistributionCalibrater(HistogramCalibrater):
def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:
Optional[Sequence[str]]=None, augmented_model_path=
'augmented_model.onnx', use_external_data_format=False, method=
'distribution', num_bins=128, scenario='same'):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param scenario: for float 8 only, if `scenario="same"`,
the algorithm weights and float 8 follow the same distribution,
if `scenario="p3"`, it assumes the weights follow
a gaussian law and float 8 ~ X^3 where X is a gaussian law
"""
super().__init__(model_path, op_types_to_calibrate,
augmented_model_path, use_external_data_format, method=method,
num_bins=num_bins, scenario=scenario)
class CalibrationDataCollector(metaclass=abc.ABCMeta):
"""
Base class for collecting data for calibration-based quantization.
"""
@abc.abstractmethod
def collect(self, name_to_arr):
"""
Generate informative data based on given data.
name_to_arr : dict
tensor name to NDArray data
"""
raise NotImplementedError
@abc.abstractmethod
def compute_collection_result(self):
"""
Get the optimal result among collection data.
"""
raise NotImplementedError
class HistogramCollector(CalibrationDataCollector):
"""
Collecting histogram for each tensor. Percentile and Entropy method are supported.
ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py
ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/
pytorch_quantization/calib/histogram.html
"""
def __init__(self, method, symmetric, num_bins, num_quantized_bins,
percentile, scenario):
self.histogram_dict = {}
self.method = method
self.symmetric = symmetric
self.num_bins = num_bins
self.num_quantized_bins = num_quantized_bins
self.percentile = percentile
self.scenario = scenario
def get_histogram_dict(self):
return self.histogram_dict
def collect(self, name_to_arr):
print('Collecting tensor data and making histogram ...')
if self.method in {'distribution', 'entropy'}:
return self.collect_value(name_to_arr)
elif self.method == 'percentile':
if self.symmetric:
return self.collect_absolute_value(name_to_arr)
else:
return self.collect_value(name_to_arr)
else:
raise ValueError(
"Only 'entropy', 'percentile' or 'distribution' methods are supported"
)
def collect_absolute_value(self, name_to_arr):
"""
Collect histogram on absolute value
"""
for tensor, data_arr in name_to_arr.items():
data_arr = np.asarray(data_arr)
data_arr = data_arr.flatten()
if data_arr.size > 0:
min_value = np.min(data_arr)
max_value = np.max(data_arr)
else:
min_value = 0
max_value = 0
data_arr = np.absolute(data_arr)
if tensor not in self.histogram_dict:
hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)
self.histogram_dict[tensor
] = hist, hist_edges, min_value, max_value
else:
old_histogram = self.histogram_dict[tensor]
old_min = old_histogram[2]
old_max = old_histogram[3]
old_hist = old_histogram[0]
old_hist_edges = old_histogram[1]
temp_amax = np.max(data_arr)
if temp_amax > old_hist_edges[-1]:
width = old_hist_edges[1] - old_hist_edges[0]
new_bin_edges = np.arange(old_hist_edges[-1] + width,
temp_amax + width, width)
old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))
hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)
hist[:len(old_hist)] += old_hist
self.histogram_dict[tensor] = hist, hist_edges, min(old_min,
min_value), max(old_max, max_value)
def collect_value(self, name_to_arr):
"""
Collect histogram on real value
"""
for tensor, data_arr in name_to_arr.items():
data_arr = np.asarray(data_arr)
data_arr = data_arr.flatten()
if data_arr.size > 0:
min_value = np.min(data_arr)
max_value = np.max(data_arr)
else:
min_value = 0
max_value = 0
threshold = max(abs(min_value), abs(max_value))
if tensor in self.histogram_dict:
old_histogram = self.histogram_dict[tensor]
self.histogram_dict[tensor] = self.merge_histogram(
old_histogram, data_arr, min_value, max_value, threshold)
else:
hist, hist_edges = np.histogram(data_arr, self.num_bins,
range=(-threshold, threshold))
self.histogram_dict[tensor
] = hist, hist_edges, min_value, max_value, threshold
def merge_histogram(self, old_histogram, data_arr, new_min, new_max,
new_threshold):
old_hist, old_hist_edges, old_min, old_max, old_threshold = (
old_histogram)
if new_threshold <= old_threshold:
new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-
old_threshold, old_threshold))
return new_hist + old_hist, old_hist_edges, min(old_min, new_min
), max(old_max, new_max), old_threshold
else:
if old_threshold == 0:
hist, hist_edges = np.histogram(data_arr, len(old_hist),
range=(-new_threshold, new_threshold))
hist += old_hist
else:
old_num_bins = len(old_hist)
old_stride = 2 * old_threshold / old_num_bins
half_increased_bins = int((new_threshold - old_threshold) //
old_stride + 1)
new_num_bins = old_num_bins + 2 * half_increased_bins
new_threshold = (half_increased_bins * old_stride +
old_threshold)
hist, hist_edges = np.histogram(data_arr, new_num_bins,
range=(-new_threshold, new_threshold))
hist[half_increased_bins:new_num_bins - half_increased_bins
] += old_hist
return hist, hist_edges, min(old_min, new_min), max(old_max,
new_max), new_threshold
def compute_collection_result(self):
if not self.histogram_dict or len(self.histogram_dict) == 0:
raise ValueError(
'Histogram has not been collected. Please run collect() first.'
)
print(
f'Finding optimal threshold for each tensor using {self.method} algorithm ...'
)
if self.method == 'entropy':
return self.compute_entropy()
elif self.method == 'percentile':
return self.compute_percentile()
elif self.method == 'distribution':
return self.compute_distribution()
else:
raise ValueError(
"Only 'entropy', 'percentile' or 'distribution' methods are supported"
)
def compute_percentile(self):
if self.percentile < 0 or self.percentile > 100:
raise ValueError(
'Invalid percentile. Must be in range 0 <= percentile <= 100.')
histogram_dict = self.histogram_dict
percentile = self.percentile
thresholds_dict = {}
print(f'Number of tensors : {len(histogram_dict)}')
print(f'Number of histogram bins : {self.num_bins}')
print(f'Percentile : ({100.0 - percentile},{percentile})')
for tensor, histogram in histogram_dict.items():
hist = histogram[0]
hist_edges = histogram[1]
total = hist.sum()
cdf = np.cumsum(hist / total)
if self.symmetric:
idx_right = np.searchsorted(cdf, percentile / 100.0)
thresholds_dict[tensor] = -float(hist_edges[idx_right]), float(
hist_edges[idx_right])
else:
percent_to_cut_one_side = (100.0 - percentile) / 200.0
idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)
idx_left = np.searchsorted(cdf, percent_to_cut_one_side)
thresholds_dict[tensor] = float(hist_edges[idx_left]), float(
hist_edges[idx_right])
min_value = histogram[2]
max_value = histogram[3]
if thresholds_dict[tensor][0] < min_value:
thresholds_dict[tensor] = min_value, thresholds_dict[tensor][1]
if thresholds_dict[tensor][1] > max_value:
thresholds_dict[tensor] = thresholds_dict[tensor][0], max_value
thresholds_dict[tensor] = *thresholds_dict[tensor], *hist[:2]
if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):
apply_plot(hist, hist_edges)
return thresholds_dict
def compute_entropy(self):
histogram_dict = self.histogram_dict
num_quantized_bins = self.num_quantized_bins
thresholds_dict = {}
print(f'Number of tensors : {len(histogram_dict)}')
print(
'Number of histogram bins : {} (The number may increase depends on the data it collects)'
.format(self.num_bins))
print(f'Number of quantized bins : {self.num_quantized_bins}')
for tensor, histogram in histogram_dict.items():
optimal_threshold = self.get_entropy_threshold(histogram,
num_quantized_bins)
thresholds_dict[tensor] = optimal_threshold
thresholds_dict[tensor] = *optimal_threshold, *histogram[:2]
if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):
apply_plot(histogram[0], histogram[1])
return thresholds_dict
@staticmethod
def _avg_std(hist, hist_edges, power=1):
if power <= 0:
raise ValueError(f'power={power} <= 0 is invalid.')
values = (hist_edges[:-1] + hist_edges[1:]) * 0.5
if power == 1:
avg = (hist * values).sum() / hist.sum()
std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5
return avg, std
if int(power) == power and int(power) % 2 == 1:
avg = (hist * values ** power).sum() / hist.sum()
std = ((hist * (values ** power - avg) ** 2).sum() / hist.sum()
) ** 0.5
return avg, std
fact = np.abs(values) / values
fact[np.isnan(fact)] = 1
fact[np.isinf(fact)] = 1
values = np.abs(values) ** power * fact
avg = (hist * values).sum() / hist.sum()
std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5
return avg, std
def compute_distribution(self):
if self.num_bins < 512:
raise ValueError(
'Invalid num_bins. Must be in range 512 <= num_bins.')
histogram_dict = self.histogram_dict
thresholds_dict = {}
print(f'Number of tensors : {len(histogram_dict)}')
print(f'Number of histogram bins : {self.num_bins}')
print(f'Scenario : {self.scenario!r})')
for tensor, histogram in histogram_dict.items():
hist = histogram[0]
hist_edges = histogram[1]
if self.scenario == 'same':
avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)
elif self.scenario == 'p3':
avg_coef, std_coef = self._avg_std(hist, hist_edges, power=
1.0 / 3.0)
else:
raise ValueError("Invalid scenario. Must be in {'same', 'p3'}."
)
thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef,
hist=hist, hist_edges=hist_edges)
if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):
apply_plot(hist, hist_edges)
return thresholds_dict
def get_entropy_threshold(self, histogram, num_quantized_bins):
"""Given a dataset, find the optimal threshold for quantizing it.
The reference distribution is `q`, and the candidate distribution is `p`.
`q` is a truncated version of the original distribution.
Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
"""
import copy
from scipy.stats import entropy
hist = histogram[0]
hist_edges = histogram[1]
num_bins = hist.size
zero_bin_index = num_bins // 2
num_half_quantized_bin = num_quantized_bins // 2
kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)
thresholds = [(0, 0) for i in range(kl_divergence.size)]
for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):
start_index = zero_bin_index - i
end_index = (zero_bin_index + i + 1 if zero_bin_index + i + 1 <=
num_bins else num_bins)
thresholds[i - num_half_quantized_bin] = float(hist_edges[
start_index]), float(hist_edges[end_index])
sliced_distribution = copy.deepcopy(hist[start_index:end_index])
p = sliced_distribution.copy()
left_outliers_count = sum(hist[:start_index])
right_outliers_count = sum(hist[end_index:])
p[0] += left_outliers_count
p[-1] += right_outliers_count
nonzeros = (p != 0).astype(np.int64)
quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)
num_merged_bins = sliced_distribution.size // num_quantized_bins
for index in range(num_quantized_bins):
start = index * num_merged_bins
end = start + num_merged_bins
quantized_bins[index] = sum(sliced_distribution[start:end])
quantized_bins[-1] += sum(sliced_distribution[
num_quantized_bins * num_merged_bins:])
q = np.zeros(p.size, dtype=np.int64)
for index in range(num_quantized_bins):
start = index * num_merged_bins
end = start + num_merged_bins
norm = sum(nonzeros[start:end])
if norm != 0:
q[start:end] = float(quantized_bins[index]) / float(norm)
p = smooth_distribution(p)
q = smooth_distribution(q)
if isinstance(q, np.ndarray):
kl_divergence[i - num_half_quantized_bin] = entropy(p, q)
else:
kl_divergence[i - num_half_quantized_bin] = float('inf')
min_kl_divergence_idx = np.argmin(kl_divergence)
optimal_threshold = thresholds[min_kl_divergence_idx]
min_value = histogram[2]
max_value = histogram[3]
if optimal_threshold[0] < min_value:
optimal_threshold = min_value, optimal_threshold[1]
if optimal_threshold[1] > max_value:
optimal_threshold = optimal_threshold[0], max_value
return optimal_threshold
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python
# -------------------------------------------------------------------------
# Copyright (c) Microsoft, Intel Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import abc
import itertools
import os
import uuid
from enum import Enum
from pathlib import Path
from typing import Dict, Optional, Sequence, Tuple, Union
import numpy as np
import onnx
from onnx import ModelProto, TensorProto, helper, numpy_helper
import onnxruntime
from .quant_utils import apply_plot, load_model_with_shape_infer, smooth_distribution
class TensorData:
_allowed = frozenset(["avg", "std", "lowest", "highest", "hist", "hist_edges"])
def __init__(self, **kwargs):
for k, v in kwargs.items():
if k not in TensorData._allowed:
raise ValueError(f"Unexpected value {k!r} not in {TensorData._allowed}.")
setattr(self, k, v)
@property
def range_value(self):
if not hasattr(self, "lowest") or not hasattr(self, "highest"):
raise AttributeError(f"Attributes 'lowest' and/or 'highest' missing in {dir(self)}.")
return (self.lowest, self.highest)
@property
def avg_std(self):
if not hasattr(self, "avg") or not hasattr(self, "std"):
raise AttributeError(f"Attributes 'avg' and/or 'std' missing in {dir(self)}.")
return (self.avg, self.std)
class TensorsData:
def __init__(self, calibration_method, data: Dict[str, Union[TensorData, Tuple]]):
self.calibration_method = calibration_method
self.data = {}
for k, v in data.items():
if not isinstance(k, str):
raise TypeError(f"Keys must be strings not {type(k)}.")
if isinstance(v, tuple):
if calibration_method == CalibrationMethod.MinMax and len(v) == 2:
self.data[k] = TensorData(lowest=v[0], highest=v[1])
continue
if len(v) == 4:
self.data[k] = TensorData(lowest=v[0], highest=v[1], histogram=v[2], bins=v[3])
continue
raise TypeError(f"Unexpected tuple for {k:r}, it has {len(v)} elements: {v}.")
if not isinstance(v, TensorData):
raise TypeError(f"Values must be TensorData not {type(v)}.")
self.data[k] = v
def __iter__(self):
yield from self.data
def __contains__(self, key):
return key in self.data
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
if key not in self.data:
raise RuntimeError(f"Only an existing tensor can be modified, {key!r} is not.")
self.data[key] = value
def values(self):
return self.data.values()
class CalibrationMethod(Enum):
MinMax = 0
Entropy = 1
Percentile = 2
Distribution = 3
class CalibrationDataReader(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return hasattr(subclass, "get_next") and callable(subclass.get_next) or NotImplemented
@abc.abstractmethod
def get_next(self) -> dict:
"""generate the input data dict for ONNXinferenceSession run"""
raise NotImplementedError
def __iter__(self):
return self
def __next__(self):
result = self.get_next()
if result is None:
raise StopIteration
return result
class CalibraterBase:
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
symmetric=False,
use_external_data_format=False,
):
"""
:param model_path: ONNX model to calibrate. It should be a model file path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param symmetric: make range of tensor symmetric (central point is 0).
:param use_external_data_format: use external data format to store model which size is >= 2Gb
"""
if isinstance(model_path, str):
self.model = load_model_with_shape_infer(Path(model_path))
elif isinstance(model_path, Path):
self.model = load_model_with_shape_infer(model_path)
else:
raise ValueError("model_path should be model path.")
self.op_types_to_calibrate = op_types_to_calibrate
self.augmented_model_path = augmented_model_path
self.symmetric = symmetric
self.use_external_data_format = use_external_data_format
self.augment_model = None
self.infer_session = None
self.execution_providers = ["CPUExecutionProvider"]
def set_execution_providers(self, execution_providers=["CPUExecutionProvider"]): # noqa: B006
"""
reset the execution providers to execute the collect_data. It triggers to re-creating inference session.
"""
self.execution_providers = execution_providers
self.create_inference_session()
def create_inference_session(self):
"""
create an OnnxRuntime InferenceSession.
"""
sess_options = onnxruntime.SessionOptions()
sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL
self.infer_session = onnxruntime.InferenceSession(
self.augmented_model_path,
sess_options=sess_options,
providers=self.execution_providers,
)
def select_tensors_to_calibrate(self, model: ModelProto):
"""
select input/output tensors of candidate nodes to calibrate.
returns:
tensors (set): set of tensor name.
value_infos (dict): tensor name to value info.
"""
value_infos = {vi.name: vi for vi in model.graph.value_info}
value_infos.update({ot.name: ot for ot in model.graph.output})
value_infos.update({it.name: it for it in model.graph.input})
initializer = {init.name for init in model.graph.initializer}
tensors_to_calibrate = set()
tensor_type_to_calibrate = {TensorProto.FLOAT}
for node in model.graph.node:
if not self.op_types_to_calibrate or node.op_type in self.op_types_to_calibrate:
for tensor_name in itertools.chain(node.input, node.output):
if tensor_name in value_infos:
vi = value_infos[tensor_name]
if (
vi.type.HasField("tensor_type")
and (vi.type.tensor_type.elem_type in tensor_type_to_calibrate)
and (tensor_name not in initializer)
):
tensors_to_calibrate.add(tensor_name)
return tensors_to_calibrate, value_infos
def get_augment_model(self):
"""
return: augmented onnx model. Call after calling augment_graph
"""
return self.model
def augment_graph(self):
"""
abstract method: augment the input model to prepare for collecting data. It will:
1. augment the model to be able to collect desired statistics data
2. save augmented model to augmented_model_paths
"""
raise NotImplementedError
def collect_data(self, data_reader: CalibrationDataReader):
"""
abstract method: collect the tensors that will be used for range computation. It can be called multiple times.
"""
raise NotImplementedError
def compute_data(self) -> TensorsData:
"""
abstract method: compute data based on the calibration method stored in TensorsData
"""
raise NotImplementedError
class MinMaxCalibrater(CalibraterBase):
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
symmetric=False,
use_external_data_format=False,
moving_average=False,
averaging_constant=0.01,
):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param symmetric: make range of tensor symmetric (central point is 0).
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.
:param averaging_constant: constant smoothing factor to use when computing the moving average.
"""
super().__init__(
model_path,
op_types_to_calibrate=op_types_to_calibrate,
augmented_model_path=augmented_model_path,
symmetric=symmetric,
use_external_data_format=use_external_data_format,
)
self.intermediate_outputs = []
self.calibrate_tensors_range = None
self.num_model_outputs = len(self.model.graph.output)
self.model_original_outputs = {output.name for output in self.model.graph.output}
self.moving_average = moving_average
if moving_average and (averaging_constant < 0 or averaging_constant > 1):
raise ValueError("Invalid averaging constant, which should not be < 0 or > 1.")
self.averaging_constant = averaging_constant
def augment_graph(self):
"""
Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in
model and ensures their outputs are stored as part of the graph output
:return: augmented ONNX model
"""
tensors, _ = self.select_tensors_to_calibrate(self.model)
reshape_shape_name = str(uuid.uuid4())
reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.int64), reshape_shape_name)
self.model.graph.initializer.append(reshape_shape)
def add_reduce_min_max(tensor_name, reduce_op_name):
# When doing ReduceMax/ReduceMin, ORT can't reduce on dim with value of 0 if 'keepdims' is false.
# To make the code simple, we always let keepdims to be 1.
keepdims = 1
# Adding ReduceMin/ReduceMax nodes: ReduceMin/ReduceMax -> Reshape-> (output)
reduce_output = tensor_name + "_" + reduce_op_name
intermediate_output = reduce_output + "_Reshape"
reduce_node = onnx.helper.make_node(
reduce_op_name, [tensor_name], [intermediate_output], keepdims=keepdims, name=reduce_output
)
reshape_node = onnx.helper.make_node(
"Reshape",
inputs=[intermediate_output, reshape_shape_name],
outputs=[reduce_output],
name=intermediate_output,
)
self.model.graph.node.extend([reduce_node, reshape_node])
self.model.graph.output.append(helper.make_tensor_value_info(reduce_output, TensorProto.FLOAT, [1]))
for tensor in tensors:
add_reduce_min_max(tensor, "ReduceMin")
add_reduce_min_max(tensor, "ReduceMax")
onnx.save(
self.model,
self.augmented_model_path,
save_as_external_data=self.use_external_data_format,
)
def clear_collected_data(self):
self.intermediate_outputs = []
def collect_data(self, data_reader: CalibrationDataReader):
while True:
inputs = data_reader.get_next()
if not inputs:
break
self.intermediate_outputs.append(self.infer_session.run(None, inputs))
if len(self.intermediate_outputs) == 0:
raise ValueError("No data is collected.")
t = self.compute_data()
if not isinstance(t, TensorsData):
raise TypeError(f"compute_data must return a TensorsData not {type(t)}.")
self.clear_collected_data()
def merge_range(self, old_range, new_range):
if not old_range:
return new_range
for key, value in old_range.items():
if self.moving_average:
min_value = value[0] + self.averaging_constant * (new_range[key][0] - value[0])
max_value = value[1] + self.averaging_constant * (new_range[key][1] - value[1])
else:
min_value = min(value[0], new_range[key][0])
max_value = max(value[1], new_range[key][1])
new_range[key] = (min_value, max_value)
return new_range
def compute_data(self) -> TensorsData:
"""
Compute the min-max range of tensor
:return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }
"""
if len(self.intermediate_outputs) == 0:
return self.calibrate_tensors_range
output_names = [self.infer_session.get_outputs()[i].name for i in range(len(self.intermediate_outputs[0]))]
output_dicts_list = [
dict(zip(output_names, intermediate_output)) for intermediate_output in self.intermediate_outputs
]
merged_output_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_output_dict.setdefault(k, []).append(v)
added_output_names = output_names[self.num_model_outputs :]
calibrate_tensor_names = [
added_output_names[i].rpartition("_")[0] for i in range(0, len(added_output_names), 2)
] # output names
merged_added_output_dict = {
i: merged_output_dict[i] for i in merged_output_dict if i not in self.model_original_outputs
}
pairs = []
for i in range(0, len(added_output_names), 2):
min_value = 0
max_value = 0
if self.moving_average:
min_value_array = np.mean(merged_added_output_dict[added_output_names[i]], axis=0)
max_value_array = np.mean(merged_added_output_dict[added_output_names[i + 1]], axis=0)
else:
min_value_array = min(merged_added_output_dict[added_output_names[i]])
max_value_array = max(merged_added_output_dict[added_output_names[i + 1]])
if type(min_value_array) == int or min_value_array.size > 0:
min_value = float(min_value_array)
if type(max_value_array) == int or max_value_array.size > 0:
max_value = float(max_value_array)
if self.symmetric:
max_absolute_value = max(abs(min_value), abs(max_value))
pairs.append(tuple([-max_absolute_value, max_absolute_value]))
else:
pairs.append(tuple([min_value, max_value]))
new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax, dict(zip(calibrate_tensor_names, pairs)))
if self.calibrate_tensors_range:
self.calibrate_tensors_range = self.merge_range(self.calibrate_tensors_range, new_calibrate_tensors_range)
else:
self.calibrate_tensors_range = new_calibrate_tensors_range
return self.calibrate_tensors_range
class HistogramCalibrater(CalibraterBase):
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
use_external_data_format=False,
method="percentile",
symmetric=False,
num_bins=128,
num_quantized_bins=2048,
percentile=99.999,
scenario="same",
):
"""
:param model_path: ONNX model to calibrate. It is a model path.
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param num_quantized_bins: number of quantized bins. Default 128.
:param percentile: A float number between [0, 100]. Default 99.99.
:param scenario: see :class:`DistributionCalibrater`
"""
super().__init__(
model_path,
op_types_to_calibrate=op_types_to_calibrate,
augmented_model_path=augmented_model_path,
symmetric=symmetric,
use_external_data_format=use_external_data_format,
)
self.intermediate_outputs = []
self.calibrate_tensors_range = None
self.num_model_outputs = len(self.model.graph.output)
self.model_original_outputs = {output.name for output in self.model.graph.output}
self.collector = None
self.method = method
self.num_bins = num_bins
self.num_quantized_bins = num_quantized_bins
self.percentile = percentile
self.tensors_to_calibrate = None
self.scenario = scenario
def augment_graph(self):
"""
make all quantization_candidates op type nodes as part of the graph output.
:return: augmented ONNX model
"""
self.tensors_to_calibrate, value_infos = self.select_tensors_to_calibrate(self.model)
for tensor in self.tensors_to_calibrate:
if tensor not in self.model_original_outputs:
self.model.graph.output.append(value_infos[tensor])
onnx.save(
self.model,
self.augmented_model_path,
save_as_external_data=self.use_external_data_format,
)
def clear_collected_data(self):
self.intermediate_outputs = []
def collect_data(self, data_reader: CalibrationDataReader):
"""
Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.
"""
while True:
inputs = data_reader.get_next()
if not inputs:
break
self.intermediate_outputs.append(self.infer_session.run(None, inputs))
if len(self.intermediate_outputs) == 0:
raise ValueError("No data is collected.")
output_names = [self.infer_session.get_outputs()[i].name for i in range(len(self.intermediate_outputs[0]))]
output_dicts_list = [
dict(zip(output_names, intermediate_output)) for intermediate_output in self.intermediate_outputs
]
merged_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_dict.setdefault(k, []).append(v)
clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in self.tensors_to_calibrate}
if not self.collector:
self.collector = HistogramCollector(
method=self.method,
symmetric=self.symmetric,
num_bins=self.num_bins,
num_quantized_bins=self.num_quantized_bins,
percentile=self.percentile,
scenario=self.scenario,
)
self.collector.collect(clean_merged_dict)
self.clear_collected_data()
def compute_data(self) -> TensorsData:
"""
Compute the min-max range of tensor
:return: dictionary mapping: {tensor name: (min value, max value)}
"""
if not self.collector:
raise ValueError("No collector created and can't generate calibration data.")
if isinstance(self, EntropyCalibrater):
cal = CalibrationMethod.Entropy
elif isinstance(self, PercentileCalibrater):
cal = CalibrationMethod.Percentile
elif isinstance(self, DistributionCalibrater):
cal = CalibrationMethod.Distribution
else:
raise TypeError(f"Unknown calibrater {type(self)}. This method must be overwritten.")
return TensorsData(cal, self.collector.compute_collection_result())
class EntropyCalibrater(HistogramCalibrater):
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
use_external_data_format=False,
method="entropy",
symmetric=False,
num_bins=128,
num_quantized_bins=128,
):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param num_quantized_bins: number of quantized bins. Default 128.
"""
super().__init__(
model_path,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format,
method=method,
symmetric=symmetric,
num_bins=num_bins,
num_quantized_bins=num_quantized_bins,
)
class PercentileCalibrater(HistogramCalibrater):
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
use_external_data_format=False,
method="percentile",
symmetric=False,
num_bins=2048,
percentile=99.999,
):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_quantized_bins: number of quantized bins. Default 128.
:param percentile: A float number between [0, 100]. Default 99.99.
"""
super().__init__(
model_path,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format,
method=method,
symmetric=symmetric,
num_bins=num_bins,
percentile=percentile,
)
class DistributionCalibrater(HistogramCalibrater):
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
use_external_data_format=False,
method="distribution",
num_bins=128,
scenario="same",
):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param scenario: for float 8 only, if `scenario="same"`,
the algorithm weights and float 8 follow the same distribution,
if `scenario="p3"`, it assumes the weights follow
a gaussian law and float 8 ~ X^3 where X is a gaussian law
"""
super().__init__(
model_path,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format,
method=method,
num_bins=num_bins,
scenario=scenario,
)
class CalibrationDataCollector(metaclass=abc.ABCMeta):
"""
Base class for collecting data for calibration-based quantization.
"""
@abc.abstractmethod
def collect(self, name_to_arr):
"""
Generate informative data based on given data.
name_to_arr : dict
tensor name to NDArray data
"""
raise NotImplementedError
@abc.abstractmethod
def compute_collection_result(self):
"""
Get the optimal result among collection data.
"""
raise NotImplementedError
class HistogramCollector(CalibrationDataCollector):
"""
Collecting histogram for each tensor. Percentile and Entropy method are supported.
ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py
ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/
pytorch_quantization/calib/histogram.html
"""
def __init__(self, method, symmetric, num_bins, num_quantized_bins, percentile, scenario):
self.histogram_dict = {}
self.method = method
self.symmetric = symmetric
self.num_bins = num_bins
self.num_quantized_bins = num_quantized_bins
self.percentile = percentile
self.scenario = scenario
def get_histogram_dict(self):
return self.histogram_dict
def collect(self, name_to_arr):
print("Collecting tensor data and making histogram ...")
# TODO: Currently we have different collect() for entropy and percentile method respectively.
# Need unified collect in the future.
if self.method in {"distribution", "entropy"}:
return self.collect_value(name_to_arr)
elif self.method == "percentile":
if self.symmetric:
return self.collect_absolute_value(name_to_arr)
else:
return self.collect_value(name_to_arr)
else:
raise ValueError("Only 'entropy', 'percentile' or 'distribution' methods are supported")
def collect_absolute_value(self, name_to_arr):
"""
Collect histogram on absolute value
"""
for tensor, data_arr in name_to_arr.items():
data_arr = np.asarray(data_arr) # noqa: PLW2901
data_arr = data_arr.flatten() # noqa: PLW2901
if data_arr.size > 0:
min_value = np.min(data_arr)
max_value = np.max(data_arr)
else:
min_value = 0
max_value = 0
data_arr = np.absolute(data_arr) # only consider absolute value # noqa: PLW2901
if tensor not in self.histogram_dict:
# first time it uses num_bins to compute histogram.
hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)
self.histogram_dict[tensor] = (hist, hist_edges, min_value, max_value)
else:
old_histogram = self.histogram_dict[tensor]
old_min = old_histogram[2]
old_max = old_histogram[3]
old_hist = old_histogram[0]
old_hist_edges = old_histogram[1]
temp_amax = np.max(data_arr)
if temp_amax > old_hist_edges[-1]:
# increase the number of bins
width = old_hist_edges[1] - old_hist_edges[0]
# NOTE: np.arange may create an extra bin after the one containing temp_amax
new_bin_edges = np.arange(old_hist_edges[-1] + width, temp_amax + width, width)
old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))
hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)
hist[: len(old_hist)] += old_hist
self.histogram_dict[tensor] = (hist, hist_edges, min(old_min, min_value), max(old_max, max_value))
def collect_value(self, name_to_arr):
"""
Collect histogram on real value
"""
for tensor, data_arr in name_to_arr.items():
data_arr = np.asarray(data_arr) # noqa: PLW2901
data_arr = data_arr.flatten() # noqa: PLW2901
if data_arr.size > 0:
min_value = np.min(data_arr)
max_value = np.max(data_arr)
else:
min_value = 0
max_value = 0
threshold = max(abs(min_value), abs(max_value))
if tensor in self.histogram_dict:
old_histogram = self.histogram_dict[tensor]
self.histogram_dict[tensor] = self.merge_histogram(
old_histogram, data_arr, min_value, max_value, threshold
)
else:
hist, hist_edges = np.histogram(data_arr, self.num_bins, range=(-threshold, threshold))
self.histogram_dict[tensor] = (
hist,
hist_edges,
min_value,
max_value,
threshold,
)
def merge_histogram(self, old_histogram, data_arr, new_min, new_max, new_threshold):
(old_hist, old_hist_edges, old_min, old_max, old_threshold) = old_histogram
if new_threshold <= old_threshold:
new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-old_threshold, old_threshold))
return (
new_hist + old_hist,
old_hist_edges,
min(old_min, new_min),
max(old_max, new_max),
old_threshold,
)
else:
if old_threshold == 0:
hist, hist_edges = np.histogram(data_arr, len(old_hist), range=(-new_threshold, new_threshold))
hist += old_hist
else:
old_num_bins = len(old_hist)
old_stride = 2 * old_threshold / old_num_bins
half_increased_bins = int((new_threshold - old_threshold) // old_stride + 1)
new_num_bins = old_num_bins + 2 * half_increased_bins
new_threshold = half_increased_bins * old_stride + old_threshold
hist, hist_edges = np.histogram(data_arr, new_num_bins, range=(-new_threshold, new_threshold))
hist[half_increased_bins : new_num_bins - half_increased_bins] += old_hist
return (
hist,
hist_edges,
min(old_min, new_min),
max(old_max, new_max),
new_threshold,
)
def compute_collection_result(self):
if not self.histogram_dict or len(self.histogram_dict) == 0:
raise ValueError("Histogram has not been collected. Please run collect() first.")
print(f"Finding optimal threshold for each tensor using {self.method} algorithm ...")
if self.method == "entropy":
return self.compute_entropy()
elif self.method == "percentile":
return self.compute_percentile()
elif self.method == "distribution":
return self.compute_distribution()
else:
raise ValueError("Only 'entropy', 'percentile' or 'distribution' methods are supported")
def compute_percentile(self):
if self.percentile < 0 or self.percentile > 100:
raise ValueError("Invalid percentile. Must be in range 0 <= percentile <= 100.")
histogram_dict = self.histogram_dict
percentile = self.percentile
thresholds_dict = {} # per tensor thresholds
print(f"Number of tensors : {len(histogram_dict)}")
print(f"Number of histogram bins : {self.num_bins}")
print(f"Percentile : ({100.0 - percentile},{percentile})")
for tensor, histogram in histogram_dict.items():
hist = histogram[0]
hist_edges = histogram[1]
total = hist.sum()
cdf = np.cumsum(hist / total)
if self.symmetric:
idx_right = np.searchsorted(cdf, percentile / 100.0)
thresholds_dict[tensor] = (
-float(hist_edges[idx_right]),
float(hist_edges[idx_right]),
)
else:
percent_to_cut_one_side = (100.0 - percentile) / 200.0
idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)
idx_left = np.searchsorted(cdf, percent_to_cut_one_side)
thresholds_dict[tensor] = (
float(hist_edges[idx_left]),
float(hist_edges[idx_right]),
)
min_value = histogram[2]
max_value = histogram[3]
if thresholds_dict[tensor][0] < min_value:
thresholds_dict[tensor] = (min_value, thresholds_dict[tensor][1])
if thresholds_dict[tensor][1] > max_value:
thresholds_dict[tensor] = (thresholds_dict[tensor][0], max_value)
thresholds_dict[tensor] = (*thresholds_dict[tensor], *hist[:2])
# Plot histogram for debug only
if os.environ.get("QUANTIZATION_DEBUG", 0) in (1, "1"):
apply_plot(hist, hist_edges)
return thresholds_dict
def compute_entropy(self):
histogram_dict = self.histogram_dict
num_quantized_bins = self.num_quantized_bins
thresholds_dict = {} # per tensor thresholds
print(f"Number of tensors : {len(histogram_dict)}")
print(
"Number of histogram bins : {} (The number may increase depends on the data it collects)".format(
self.num_bins
)
)
print(f"Number of quantized bins : {self.num_quantized_bins}")
for tensor, histogram in histogram_dict.items():
optimal_threshold = self.get_entropy_threshold(histogram, num_quantized_bins)
thresholds_dict[tensor] = optimal_threshold
thresholds_dict[tensor] = (*optimal_threshold, *histogram[:2])
# Plot histogram for debug only
if os.environ.get("QUANTIZATION_DEBUG", 0) in (1, "1"):
apply_plot(histogram[0], histogram[1])
return thresholds_dict
@staticmethod
def _avg_std(hist, hist_edges, power=1):
if power <= 0:
raise ValueError(f"power={power} <= 0 is invalid.")
values = (hist_edges[:-1] + hist_edges[1:]) * 0.5
if power == 1:
avg = (hist * values).sum() / hist.sum()
std = ((hist * values**2).sum() / hist.sum() - avg**2) ** 0.5
return avg, std
if int(power) == power and int(power) % 2 == 1:
avg = (hist * values**power).sum() / hist.sum()
std = ((hist * (values**power - avg) ** 2).sum() / hist.sum()) ** 0.5
return avg, std
fact = np.abs(values) / values
fact[np.isnan(fact)] = 1
fact[np.isinf(fact)] = 1
values = np.abs(values) ** power * fact
avg = (hist * values).sum() / hist.sum()
std = ((hist * values**2).sum() / hist.sum() - avg**2) ** 0.5
return avg, std
def compute_distribution(self):
if self.num_bins < 512:
raise ValueError("Invalid num_bins. Must be in range 512 <= num_bins.")
histogram_dict = self.histogram_dict
thresholds_dict = {} # per tensor thresholds
print(f"Number of tensors : {len(histogram_dict)}")
print(f"Number of histogram bins : {self.num_bins}")
print(f"Scenario : {self.scenario!r})")
for tensor, histogram in histogram_dict.items():
hist = histogram[0]
hist_edges = histogram[1]
if self.scenario == "same":
avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)
elif self.scenario == "p3":
avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1.0 / 3.0)
else:
raise ValueError("Invalid scenario. Must be in {'same', 'p3'}.")
thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef, hist=hist, hist_edges=hist_edges)
# Plot histogram for debug only
if os.environ.get("QUANTIZATION_DEBUG", 0) in (1, "1"):
apply_plot(hist, hist_edges)
return thresholds_dict
def get_entropy_threshold(self, histogram, num_quantized_bins):
"""Given a dataset, find the optimal threshold for quantizing it.
The reference distribution is `q`, and the candidate distribution is `p`.
`q` is a truncated version of the original distribution.
Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
"""
import copy
from scipy.stats import entropy
hist = histogram[0]
hist_edges = histogram[1]
num_bins = hist.size
zero_bin_index = num_bins // 2
num_half_quantized_bin = num_quantized_bins // 2
kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)
thresholds = [(0, 0) for i in range(kl_divergence.size)]
# <------------ num bins ---------------->
# <--- quantized bins ---->
# |======|===========|===========|=======|
# zero bin index
# ^ ^
# | |
# start index end index (start of iteration)
# ^ ^
# | |
# start index end index ...
# ^ ^
# | |
# start index end index (end of iteration)
for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):
start_index = zero_bin_index - i
end_index = zero_bin_index + i + 1 if (zero_bin_index + i + 1) <= num_bins else num_bins
thresholds[i - num_half_quantized_bin] = (
float(hist_edges[start_index]),
float(hist_edges[end_index]),
)
sliced_distribution = copy.deepcopy(hist[start_index:end_index])
# reference distribution p
p = sliced_distribution.copy() # a copy of np array
left_outliers_count = sum(hist[:start_index])
right_outliers_count = sum(hist[end_index:])
p[0] += left_outliers_count
p[-1] += right_outliers_count
# nonzeros[i] incidates whether p[i] is non-zero
nonzeros = (p != 0).astype(np.int64)
# quantize p.size bins into quantized bins (default 128 bins)
quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)
num_merged_bins = sliced_distribution.size // num_quantized_bins
# merge bins into quantized bins
for index in range(num_quantized_bins):
start = index * num_merged_bins
end = start + num_merged_bins
quantized_bins[index] = sum(sliced_distribution[start:end])
quantized_bins[-1] += sum(sliced_distribution[num_quantized_bins * num_merged_bins :])
# in order to compare p and q, we need to make length of q equals to length of p
# expand quantized bins into p.size bins
q = np.zeros(p.size, dtype=np.int64)
for index in range(num_quantized_bins):
start = index * num_merged_bins
end = start + num_merged_bins
norm = sum(nonzeros[start:end])
if norm != 0:
q[start:end] = float(quantized_bins[index]) / float(norm)
p = smooth_distribution(p)
q = smooth_distribution(q)
if isinstance(q, np.ndarray):
kl_divergence[i - num_half_quantized_bin] = entropy(p, q)
else:
kl_divergence[i - num_half_quantized_bin] = float("inf")
min_kl_divergence_idx = np.argmin(kl_divergence)
optimal_threshold = thresholds[min_kl_divergence_idx]
min_value = histogram[2]
max_value = histogram[3]
if optimal_threshold[0] < min_value:
optimal_threshold = (min_value, optimal_threshold[1])
if optimal_threshold[1] > max_value:
optimal_threshold = (optimal_threshold[0], max_value)
return optimal_threshold
def create_calibrator(
model: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
calibrate_method=CalibrationMethod.MinMax,
use_external_data_format=False,
extra_options={}, # noqa: B006
):
calibrator = None
if calibrate_method == CalibrationMethod.MinMax:
# default settings for min-max algorithm
symmetric = False if "symmetric" not in extra_options else extra_options["symmetric"]
moving_average = False if "moving_average" not in extra_options else extra_options["moving_average"]
averaging_constant = 0.01 if "averaging_constant" not in extra_options else extra_options["averaging_constant"]
calibrator = MinMaxCalibrater(
model,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format=use_external_data_format,
symmetric=symmetric,
moving_average=moving_average,
averaging_constant=averaging_constant,
)
elif calibrate_method == CalibrationMethod.Entropy:
# default settings for entropy algorithm
num_bins = 128 if "num_bins" not in extra_options else extra_options["num_bins"]
num_quantized_bins = 128 if "num_quantized_bins" not in extra_options else extra_options["num_quantized_bins"]
symmetric = False if "symmetric" not in extra_options else extra_options["symmetric"]
calibrator = EntropyCalibrater(
model,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format=use_external_data_format,
symmetric=symmetric,
num_bins=num_bins,
num_quantized_bins=num_quantized_bins,
)
elif calibrate_method == CalibrationMethod.Percentile:
# default settings for percentile algorithm
num_bins = 2048 if "num_bins" not in extra_options else extra_options["num_bins"]
percentile = 99.999 if "percentile" not in extra_options else extra_options["percentile"]
symmetric = True if "symmetric" not in extra_options else extra_options["symmetric"]
calibrator = PercentileCalibrater(
model,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format=use_external_data_format,
symmetric=symmetric,
num_bins=num_bins,
percentile=percentile,
)
elif calibrate_method == CalibrationMethod.Distribution:
# default settings for percentile algorithm
num_bins = 2048 if "num_bins" not in extra_options else extra_options["num_bins"]
scenario = "same" if "scenario" not in extra_options else extra_options["scenario"]
calibrator = DistributionCalibrater(
model,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format=use_external_data_format,
num_bins=num_bins,
scenario=scenario,
)
if calibrator:
calibrator.augment_graph()
calibrator.create_inference_session()
return calibrator
raise ValueError(f"Unsupported calibration method {calibrate_method}")
|
flexible
|
{
"blob_id": "a61132d2d504ed31d4e1e7889bde670853968559",
"index": 5739,
"step-1": "<mask token>\n\n\nclass CalibraterBase:\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', symmetric=False, use_external_data_format=False\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It should be a model file path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n \"\"\"\n if isinstance(model_path, str):\n self.model = load_model_with_shape_infer(Path(model_path))\n elif isinstance(model_path, Path):\n self.model = load_model_with_shape_infer(model_path)\n else:\n raise ValueError('model_path should be model path.')\n self.op_types_to_calibrate = op_types_to_calibrate\n self.augmented_model_path = augmented_model_path\n self.symmetric = symmetric\n self.use_external_data_format = use_external_data_format\n self.augment_model = None\n self.infer_session = None\n self.execution_providers = ['CPUExecutionProvider']\n\n def set_execution_providers(self, execution_providers=[\n 'CPUExecutionProvider']):\n \"\"\"\n reset the execution providers to execute the collect_data. It triggers to re-creating inference session.\n \"\"\"\n self.execution_providers = execution_providers\n self.create_inference_session()\n\n def create_inference_session(self):\n \"\"\"\n create an OnnxRuntime InferenceSession.\n \"\"\"\n sess_options = onnxruntime.SessionOptions()\n sess_options.graph_optimization_level = (onnxruntime.\n GraphOptimizationLevel.ORT_DISABLE_ALL)\n self.infer_session = onnxruntime.InferenceSession(self.\n augmented_model_path, sess_options=sess_options, providers=self\n .execution_providers)\n\n def select_tensors_to_calibrate(self, model: ModelProto):\n \"\"\"\n select input/output tensors of candidate nodes to calibrate.\n returns:\n tensors (set): set of tensor name.\n value_infos (dict): tensor name to value info.\n \"\"\"\n value_infos = {vi.name: vi for vi in model.graph.value_info}\n value_infos.update({ot.name: ot for ot in model.graph.output})\n value_infos.update({it.name: it for it in model.graph.input})\n initializer = {init.name for init in model.graph.initializer}\n tensors_to_calibrate = set()\n tensor_type_to_calibrate = {TensorProto.FLOAT}\n for node in model.graph.node:\n if (not self.op_types_to_calibrate or node.op_type in self.\n op_types_to_calibrate):\n for tensor_name in itertools.chain(node.input, node.output):\n if tensor_name in value_infos:\n vi = value_infos[tensor_name]\n if (vi.type.HasField('tensor_type') and vi.type.\n tensor_type.elem_type in\n tensor_type_to_calibrate and tensor_name not in\n initializer):\n tensors_to_calibrate.add(tensor_name)\n return tensors_to_calibrate, value_infos\n\n def get_augment_model(self):\n \"\"\"\n return: augmented onnx model. Call after calling augment_graph\n \"\"\"\n return self.model\n\n def augment_graph(self):\n \"\"\"\n abstract method: augment the input model to prepare for collecting data. It will:\n 1. augment the model to be able to collect desired statistics data\n 2. save augmented model to augmented_model_paths\n \"\"\"\n raise NotImplementedError\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n abstract method: collect the tensors that will be used for range computation. It can be called multiple times.\n \"\"\"\n raise NotImplementedError\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n abstract method: compute data based on the calibration method stored in TensorsData\n \"\"\"\n raise NotImplementedError\n\n\nclass MinMaxCalibrater(CalibraterBase):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', symmetric=False, use_external_data_format=\n False, moving_average=False, averaging_constant=0.01):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.\n :param averaging_constant: constant smoothing factor to use when computing the moving average.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate=\n op_types_to_calibrate, augmented_model_path=\n augmented_model_path, symmetric=symmetric,\n use_external_data_format=use_external_data_format)\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model\n .graph.output}\n self.moving_average = moving_average\n if moving_average and (averaging_constant < 0 or averaging_constant > 1\n ):\n raise ValueError(\n 'Invalid averaging constant, which should not be < 0 or > 1.')\n self.averaging_constant = averaging_constant\n\n def augment_graph(self):\n \"\"\"\n Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in\n model and ensures their outputs are stored as part of the graph output\n :return: augmented ONNX model\n \"\"\"\n tensors, _ = self.select_tensors_to_calibrate(self.model)\n reshape_shape_name = str(uuid.uuid4())\n reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.\n int64), reshape_shape_name)\n self.model.graph.initializer.append(reshape_shape)\n\n def add_reduce_min_max(tensor_name, reduce_op_name):\n keepdims = 1\n reduce_output = tensor_name + '_' + reduce_op_name\n intermediate_output = reduce_output + '_Reshape'\n reduce_node = onnx.helper.make_node(reduce_op_name, [\n tensor_name], [intermediate_output], keepdims=keepdims,\n name=reduce_output)\n reshape_node = onnx.helper.make_node('Reshape', inputs=[\n intermediate_output, reshape_shape_name], outputs=[\n reduce_output], name=intermediate_output)\n self.model.graph.node.extend([reduce_node, reshape_node])\n self.model.graph.output.append(helper.make_tensor_value_info(\n reduce_output, TensorProto.FLOAT, [1]))\n for tensor in tensors:\n add_reduce_min_max(tensor, 'ReduceMin')\n add_reduce_min_max(tensor, 'ReduceMax')\n onnx.save(self.model, self.augmented_model_path,\n save_as_external_data=self.use_external_data_format)\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None,\n inputs))\n if len(self.intermediate_outputs) == 0:\n raise ValueError('No data is collected.')\n t = self.compute_data()\n if not isinstance(t, TensorsData):\n raise TypeError(\n f'compute_data must return a TensorsData not {type(t)}.')\n self.clear_collected_data()\n\n def merge_range(self, old_range, new_range):\n if not old_range:\n return new_range\n for key, value in old_range.items():\n if self.moving_average:\n min_value = value[0] + self.averaging_constant * (new_range\n [key][0] - value[0])\n max_value = value[1] + self.averaging_constant * (new_range\n [key][1] - value[1])\n else:\n min_value = min(value[0], new_range[key][0])\n max_value = max(value[1], new_range[key][1])\n new_range[key] = min_value, max_value\n return new_range\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }\n \"\"\"\n if len(self.intermediate_outputs) == 0:\n return self.calibrate_tensors_range\n output_names = [self.infer_session.get_outputs()[i].name for i in\n range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [dict(zip(output_names, intermediate_output)) for\n intermediate_output in self.intermediate_outputs]\n merged_output_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_output_dict.setdefault(k, []).append(v)\n added_output_names = output_names[self.num_model_outputs:]\n calibrate_tensor_names = [added_output_names[i].rpartition('_')[0] for\n i in range(0, len(added_output_names), 2)]\n merged_added_output_dict = {i: merged_output_dict[i] for i in\n merged_output_dict if i not in self.model_original_outputs}\n pairs = []\n for i in range(0, len(added_output_names), 2):\n min_value = 0\n max_value = 0\n if self.moving_average:\n min_value_array = np.mean(merged_added_output_dict[\n added_output_names[i]], axis=0)\n max_value_array = np.mean(merged_added_output_dict[\n added_output_names[i + 1]], axis=0)\n else:\n min_value_array = min(merged_added_output_dict[\n added_output_names[i]])\n max_value_array = max(merged_added_output_dict[\n added_output_names[i + 1]])\n if type(min_value_array) == int or min_value_array.size > 0:\n min_value = float(min_value_array)\n if type(max_value_array) == int or max_value_array.size > 0:\n max_value = float(max_value_array)\n if self.symmetric:\n max_absolute_value = max(abs(min_value), abs(max_value))\n pairs.append(tuple([-max_absolute_value, max_absolute_value]))\n else:\n pairs.append(tuple([min_value, max_value]))\n new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax,\n dict(zip(calibrate_tensor_names, pairs)))\n if self.calibrate_tensors_range:\n self.calibrate_tensors_range = self.merge_range(self.\n calibrate_tensors_range, new_calibrate_tensors_range)\n else:\n self.calibrate_tensors_range = new_calibrate_tensors_range\n return self.calibrate_tensors_range\n\n\nclass HistogramCalibrater(CalibraterBase):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'percentile', symmetric=False, num_bins=128, num_quantized_bins=\n 2048, percentile=99.999, scenario='same'):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path.\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n :param scenario: see :class:`DistributionCalibrater`\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate=\n op_types_to_calibrate, augmented_model_path=\n augmented_model_path, symmetric=symmetric,\n use_external_data_format=use_external_data_format)\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model\n .graph.output}\n self.collector = None\n self.method = method\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.tensors_to_calibrate = None\n self.scenario = scenario\n\n def augment_graph(self):\n \"\"\"\n make all quantization_candidates op type nodes as part of the graph output.\n :return: augmented ONNX model\n \"\"\"\n self.tensors_to_calibrate, value_infos = (self.\n select_tensors_to_calibrate(self.model))\n for tensor in self.tensors_to_calibrate:\n if tensor not in self.model_original_outputs:\n self.model.graph.output.append(value_infos[tensor])\n onnx.save(self.model, self.augmented_model_path,\n save_as_external_data=self.use_external_data_format)\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.\n \"\"\"\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None,\n inputs))\n if len(self.intermediate_outputs) == 0:\n raise ValueError('No data is collected.')\n output_names = [self.infer_session.get_outputs()[i].name for i in\n range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [dict(zip(output_names, intermediate_output)) for\n intermediate_output in self.intermediate_outputs]\n merged_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_dict.setdefault(k, []).append(v)\n clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in\n self.tensors_to_calibrate}\n if not self.collector:\n self.collector = HistogramCollector(method=self.method,\n symmetric=self.symmetric, num_bins=self.num_bins,\n num_quantized_bins=self.num_quantized_bins, percentile=self\n .percentile, scenario=self.scenario)\n self.collector.collect(clean_merged_dict)\n self.clear_collected_data()\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {tensor name: (min value, max value)}\n \"\"\"\n if not self.collector:\n raise ValueError(\n \"No collector created and can't generate calibration data.\")\n if isinstance(self, EntropyCalibrater):\n cal = CalibrationMethod.Entropy\n elif isinstance(self, PercentileCalibrater):\n cal = CalibrationMethod.Percentile\n elif isinstance(self, DistributionCalibrater):\n cal = CalibrationMethod.Distribution\n else:\n raise TypeError(\n f'Unknown calibrater {type(self)}. This method must be overwritten.'\n )\n return TensorsData(cal, self.collector.compute_collection_result())\n\n\nclass EntropyCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'entropy', symmetric=False, num_bins=128, num_quantized_bins=128):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n symmetric=symmetric, num_bins=num_bins, num_quantized_bins=\n num_quantized_bins)\n\n\nclass PercentileCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'percentile', symmetric=False, num_bins=2048, percentile=99.999):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n symmetric=symmetric, num_bins=num_bins, percentile=percentile)\n\n\nclass DistributionCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'distribution', num_bins=128, scenario='same'):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param scenario: for float 8 only, if `scenario=\"same\"`,\n the algorithm weights and float 8 follow the same distribution,\n if `scenario=\"p3\"`, it assumes the weights follow\n a gaussian law and float 8 ~ X^3 where X is a gaussian law\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n num_bins=num_bins, scenario=scenario)\n\n\nclass CalibrationDataCollector(metaclass=abc.ABCMeta):\n \"\"\"\n Base class for collecting data for calibration-based quantization.\n \"\"\"\n\n @abc.abstractmethod\n def collect(self, name_to_arr):\n \"\"\"\n Generate informative data based on given data.\n name_to_arr : dict\n tensor name to NDArray data\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def compute_collection_result(self):\n \"\"\"\n Get the optimal result among collection data.\n \"\"\"\n raise NotImplementedError\n\n\nclass HistogramCollector(CalibrationDataCollector):\n \"\"\"\n Collecting histogram for each tensor. Percentile and Entropy method are supported.\n\n ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py\n ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/\n pytorch_quantization/calib/histogram.html\n \"\"\"\n\n def __init__(self, method, symmetric, num_bins, num_quantized_bins,\n percentile, scenario):\n self.histogram_dict = {}\n self.method = method\n self.symmetric = symmetric\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.scenario = scenario\n\n def get_histogram_dict(self):\n return self.histogram_dict\n\n def collect(self, name_to_arr):\n print('Collecting tensor data and making histogram ...')\n if self.method in {'distribution', 'entropy'}:\n return self.collect_value(name_to_arr)\n elif self.method == 'percentile':\n if self.symmetric:\n return self.collect_absolute_value(name_to_arr)\n else:\n return self.collect_value(name_to_arr)\n else:\n raise ValueError(\n \"Only 'entropy', 'percentile' or 'distribution' methods are supported\"\n )\n\n def collect_absolute_value(self, name_to_arr):\n \"\"\"\n Collect histogram on absolute value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr)\n data_arr = data_arr.flatten()\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n data_arr = np.absolute(data_arr)\n if tensor not in self.histogram_dict:\n hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)\n self.histogram_dict[tensor\n ] = hist, hist_edges, min_value, max_value\n else:\n old_histogram = self.histogram_dict[tensor]\n old_min = old_histogram[2]\n old_max = old_histogram[3]\n old_hist = old_histogram[0]\n old_hist_edges = old_histogram[1]\n temp_amax = np.max(data_arr)\n if temp_amax > old_hist_edges[-1]:\n width = old_hist_edges[1] - old_hist_edges[0]\n new_bin_edges = np.arange(old_hist_edges[-1] + width, \n temp_amax + width, width)\n old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))\n hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)\n hist[:len(old_hist)] += old_hist\n self.histogram_dict[tensor] = hist, hist_edges, min(old_min,\n min_value), max(old_max, max_value)\n\n def collect_value(self, name_to_arr):\n \"\"\"\n Collect histogram on real value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr)\n data_arr = data_arr.flatten()\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n threshold = max(abs(min_value), abs(max_value))\n if tensor in self.histogram_dict:\n old_histogram = self.histogram_dict[tensor]\n self.histogram_dict[tensor] = self.merge_histogram(\n old_histogram, data_arr, min_value, max_value, threshold)\n else:\n hist, hist_edges = np.histogram(data_arr, self.num_bins,\n range=(-threshold, threshold))\n self.histogram_dict[tensor\n ] = hist, hist_edges, min_value, max_value, threshold\n\n def merge_histogram(self, old_histogram, data_arr, new_min, new_max,\n new_threshold):\n old_hist, old_hist_edges, old_min, old_max, old_threshold = (\n old_histogram)\n if new_threshold <= old_threshold:\n new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-\n old_threshold, old_threshold))\n return new_hist + old_hist, old_hist_edges, min(old_min, new_min\n ), max(old_max, new_max), old_threshold\n else:\n if old_threshold == 0:\n hist, hist_edges = np.histogram(data_arr, len(old_hist),\n range=(-new_threshold, new_threshold))\n hist += old_hist\n else:\n old_num_bins = len(old_hist)\n old_stride = 2 * old_threshold / old_num_bins\n half_increased_bins = int((new_threshold - old_threshold) //\n old_stride + 1)\n new_num_bins = old_num_bins + 2 * half_increased_bins\n new_threshold = (half_increased_bins * old_stride +\n old_threshold)\n hist, hist_edges = np.histogram(data_arr, new_num_bins,\n range=(-new_threshold, new_threshold))\n hist[half_increased_bins:new_num_bins - half_increased_bins\n ] += old_hist\n return hist, hist_edges, min(old_min, new_min), max(old_max,\n new_max), new_threshold\n\n def compute_collection_result(self):\n if not self.histogram_dict or len(self.histogram_dict) == 0:\n raise ValueError(\n 'Histogram has not been collected. Please run collect() first.'\n )\n print(\n f'Finding optimal threshold for each tensor using {self.method} algorithm ...'\n )\n if self.method == 'entropy':\n return self.compute_entropy()\n elif self.method == 'percentile':\n return self.compute_percentile()\n elif self.method == 'distribution':\n return self.compute_distribution()\n else:\n raise ValueError(\n \"Only 'entropy', 'percentile' or 'distribution' methods are supported\"\n )\n\n def compute_percentile(self):\n if self.percentile < 0 or self.percentile > 100:\n raise ValueError(\n 'Invalid percentile. Must be in range 0 <= percentile <= 100.')\n histogram_dict = self.histogram_dict\n percentile = self.percentile\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(f'Number of histogram bins : {self.num_bins}')\n print(f'Percentile : ({100.0 - percentile},{percentile})')\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n total = hist.sum()\n cdf = np.cumsum(hist / total)\n if self.symmetric:\n idx_right = np.searchsorted(cdf, percentile / 100.0)\n thresholds_dict[tensor] = -float(hist_edges[idx_right]), float(\n hist_edges[idx_right])\n else:\n percent_to_cut_one_side = (100.0 - percentile) / 200.0\n idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)\n idx_left = np.searchsorted(cdf, percent_to_cut_one_side)\n thresholds_dict[tensor] = float(hist_edges[idx_left]), float(\n hist_edges[idx_right])\n min_value = histogram[2]\n max_value = histogram[3]\n if thresholds_dict[tensor][0] < min_value:\n thresholds_dict[tensor] = min_value, thresholds_dict[tensor][1]\n if thresholds_dict[tensor][1] > max_value:\n thresholds_dict[tensor] = thresholds_dict[tensor][0], max_value\n thresholds_dict[tensor] = *thresholds_dict[tensor], *hist[:2]\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(hist, hist_edges)\n return thresholds_dict\n\n def compute_entropy(self):\n histogram_dict = self.histogram_dict\n num_quantized_bins = self.num_quantized_bins\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(\n 'Number of histogram bins : {} (The number may increase depends on the data it collects)'\n .format(self.num_bins))\n print(f'Number of quantized bins : {self.num_quantized_bins}')\n for tensor, histogram in histogram_dict.items():\n optimal_threshold = self.get_entropy_threshold(histogram,\n num_quantized_bins)\n thresholds_dict[tensor] = optimal_threshold\n thresholds_dict[tensor] = *optimal_threshold, *histogram[:2]\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(histogram[0], histogram[1])\n return thresholds_dict\n\n @staticmethod\n def _avg_std(hist, hist_edges, power=1):\n if power <= 0:\n raise ValueError(f'power={power} <= 0 is invalid.')\n values = (hist_edges[:-1] + hist_edges[1:]) * 0.5\n if power == 1:\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5\n return avg, std\n if int(power) == power and int(power) % 2 == 1:\n avg = (hist * values ** power).sum() / hist.sum()\n std = ((hist * (values ** power - avg) ** 2).sum() / hist.sum()\n ) ** 0.5\n return avg, std\n fact = np.abs(values) / values\n fact[np.isnan(fact)] = 1\n fact[np.isinf(fact)] = 1\n values = np.abs(values) ** power * fact\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5\n return avg, std\n\n def compute_distribution(self):\n if self.num_bins < 512:\n raise ValueError(\n 'Invalid num_bins. Must be in range 512 <= num_bins.')\n histogram_dict = self.histogram_dict\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(f'Number of histogram bins : {self.num_bins}')\n print(f'Scenario : {self.scenario!r})')\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n if self.scenario == 'same':\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)\n elif self.scenario == 'p3':\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=\n 1.0 / 3.0)\n else:\n raise ValueError(\"Invalid scenario. Must be in {'same', 'p3'}.\"\n )\n thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef,\n hist=hist, hist_edges=hist_edges)\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(hist, hist_edges)\n return thresholds_dict\n\n def get_entropy_threshold(self, histogram, num_quantized_bins):\n \"\"\"Given a dataset, find the optimal threshold for quantizing it.\n The reference distribution is `q`, and the candidate distribution is `p`.\n `q` is a truncated version of the original distribution.\n Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf\n \"\"\"\n import copy\n from scipy.stats import entropy\n hist = histogram[0]\n hist_edges = histogram[1]\n num_bins = hist.size\n zero_bin_index = num_bins // 2\n num_half_quantized_bin = num_quantized_bins // 2\n kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)\n thresholds = [(0, 0) for i in range(kl_divergence.size)]\n for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):\n start_index = zero_bin_index - i\n end_index = (zero_bin_index + i + 1 if zero_bin_index + i + 1 <=\n num_bins else num_bins)\n thresholds[i - num_half_quantized_bin] = float(hist_edges[\n start_index]), float(hist_edges[end_index])\n sliced_distribution = copy.deepcopy(hist[start_index:end_index])\n p = sliced_distribution.copy()\n left_outliers_count = sum(hist[:start_index])\n right_outliers_count = sum(hist[end_index:])\n p[0] += left_outliers_count\n p[-1] += right_outliers_count\n nonzeros = (p != 0).astype(np.int64)\n quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)\n num_merged_bins = sliced_distribution.size // num_quantized_bins\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n quantized_bins[index] = sum(sliced_distribution[start:end])\n quantized_bins[-1] += sum(sliced_distribution[\n num_quantized_bins * num_merged_bins:])\n q = np.zeros(p.size, dtype=np.int64)\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n norm = sum(nonzeros[start:end])\n if norm != 0:\n q[start:end] = float(quantized_bins[index]) / float(norm)\n p = smooth_distribution(p)\n q = smooth_distribution(q)\n if isinstance(q, np.ndarray):\n kl_divergence[i - num_half_quantized_bin] = entropy(p, q)\n else:\n kl_divergence[i - num_half_quantized_bin] = float('inf')\n min_kl_divergence_idx = np.argmin(kl_divergence)\n optimal_threshold = thresholds[min_kl_divergence_idx]\n min_value = histogram[2]\n max_value = histogram[3]\n if optimal_threshold[0] < min_value:\n optimal_threshold = min_value, optimal_threshold[1]\n if optimal_threshold[1] > max_value:\n optimal_threshold = optimal_threshold[0], max_value\n return optimal_threshold\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TensorsData:\n\n def __init__(self, calibration_method, data: Dict[str, Union[TensorData,\n Tuple]]):\n self.calibration_method = calibration_method\n self.data = {}\n for k, v in data.items():\n if not isinstance(k, str):\n raise TypeError(f'Keys must be strings not {type(k)}.')\n if isinstance(v, tuple):\n if calibration_method == CalibrationMethod.MinMax and len(v\n ) == 2:\n self.data[k] = TensorData(lowest=v[0], highest=v[1])\n continue\n if len(v) == 4:\n self.data[k] = TensorData(lowest=v[0], highest=v[1],\n histogram=v[2], bins=v[3])\n continue\n raise TypeError(\n f'Unexpected tuple for {k:r}, it has {len(v)} elements: {v}.'\n )\n if not isinstance(v, TensorData):\n raise TypeError(f'Values must be TensorData not {type(v)}.')\n self.data[k] = v\n\n def __iter__(self):\n yield from self.data\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CalibrationMethod(Enum):\n MinMax = 0\n Entropy = 1\n Percentile = 2\n Distribution = 3\n\n\nclass CalibrationDataReader(metaclass=abc.ABCMeta):\n\n @classmethod\n def __subclasshook__(cls, subclass):\n return hasattr(subclass, 'get_next') and callable(subclass.get_next\n ) or NotImplemented\n\n @abc.abstractmethod\n def get_next(self) ->dict:\n \"\"\"generate the input data dict for ONNXinferenceSession run\"\"\"\n raise NotImplementedError\n\n def __iter__(self):\n return self\n\n def __next__(self):\n result = self.get_next()\n if result is None:\n raise StopIteration\n return result\n\n\nclass CalibraterBase:\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', symmetric=False, use_external_data_format=False\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It should be a model file path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n \"\"\"\n if isinstance(model_path, str):\n self.model = load_model_with_shape_infer(Path(model_path))\n elif isinstance(model_path, Path):\n self.model = load_model_with_shape_infer(model_path)\n else:\n raise ValueError('model_path should be model path.')\n self.op_types_to_calibrate = op_types_to_calibrate\n self.augmented_model_path = augmented_model_path\n self.symmetric = symmetric\n self.use_external_data_format = use_external_data_format\n self.augment_model = None\n self.infer_session = None\n self.execution_providers = ['CPUExecutionProvider']\n\n def set_execution_providers(self, execution_providers=[\n 'CPUExecutionProvider']):\n \"\"\"\n reset the execution providers to execute the collect_data. It triggers to re-creating inference session.\n \"\"\"\n self.execution_providers = execution_providers\n self.create_inference_session()\n\n def create_inference_session(self):\n \"\"\"\n create an OnnxRuntime InferenceSession.\n \"\"\"\n sess_options = onnxruntime.SessionOptions()\n sess_options.graph_optimization_level = (onnxruntime.\n GraphOptimizationLevel.ORT_DISABLE_ALL)\n self.infer_session = onnxruntime.InferenceSession(self.\n augmented_model_path, sess_options=sess_options, providers=self\n .execution_providers)\n\n def select_tensors_to_calibrate(self, model: ModelProto):\n \"\"\"\n select input/output tensors of candidate nodes to calibrate.\n returns:\n tensors (set): set of tensor name.\n value_infos (dict): tensor name to value info.\n \"\"\"\n value_infos = {vi.name: vi for vi in model.graph.value_info}\n value_infos.update({ot.name: ot for ot in model.graph.output})\n value_infos.update({it.name: it for it in model.graph.input})\n initializer = {init.name for init in model.graph.initializer}\n tensors_to_calibrate = set()\n tensor_type_to_calibrate = {TensorProto.FLOAT}\n for node in model.graph.node:\n if (not self.op_types_to_calibrate or node.op_type in self.\n op_types_to_calibrate):\n for tensor_name in itertools.chain(node.input, node.output):\n if tensor_name in value_infos:\n vi = value_infos[tensor_name]\n if (vi.type.HasField('tensor_type') and vi.type.\n tensor_type.elem_type in\n tensor_type_to_calibrate and tensor_name not in\n initializer):\n tensors_to_calibrate.add(tensor_name)\n return tensors_to_calibrate, value_infos\n\n def get_augment_model(self):\n \"\"\"\n return: augmented onnx model. Call after calling augment_graph\n \"\"\"\n return self.model\n\n def augment_graph(self):\n \"\"\"\n abstract method: augment the input model to prepare for collecting data. It will:\n 1. augment the model to be able to collect desired statistics data\n 2. save augmented model to augmented_model_paths\n \"\"\"\n raise NotImplementedError\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n abstract method: collect the tensors that will be used for range computation. It can be called multiple times.\n \"\"\"\n raise NotImplementedError\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n abstract method: compute data based on the calibration method stored in TensorsData\n \"\"\"\n raise NotImplementedError\n\n\nclass MinMaxCalibrater(CalibraterBase):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', symmetric=False, use_external_data_format=\n False, moving_average=False, averaging_constant=0.01):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.\n :param averaging_constant: constant smoothing factor to use when computing the moving average.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate=\n op_types_to_calibrate, augmented_model_path=\n augmented_model_path, symmetric=symmetric,\n use_external_data_format=use_external_data_format)\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model\n .graph.output}\n self.moving_average = moving_average\n if moving_average and (averaging_constant < 0 or averaging_constant > 1\n ):\n raise ValueError(\n 'Invalid averaging constant, which should not be < 0 or > 1.')\n self.averaging_constant = averaging_constant\n\n def augment_graph(self):\n \"\"\"\n Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in\n model and ensures their outputs are stored as part of the graph output\n :return: augmented ONNX model\n \"\"\"\n tensors, _ = self.select_tensors_to_calibrate(self.model)\n reshape_shape_name = str(uuid.uuid4())\n reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.\n int64), reshape_shape_name)\n self.model.graph.initializer.append(reshape_shape)\n\n def add_reduce_min_max(tensor_name, reduce_op_name):\n keepdims = 1\n reduce_output = tensor_name + '_' + reduce_op_name\n intermediate_output = reduce_output + '_Reshape'\n reduce_node = onnx.helper.make_node(reduce_op_name, [\n tensor_name], [intermediate_output], keepdims=keepdims,\n name=reduce_output)\n reshape_node = onnx.helper.make_node('Reshape', inputs=[\n intermediate_output, reshape_shape_name], outputs=[\n reduce_output], name=intermediate_output)\n self.model.graph.node.extend([reduce_node, reshape_node])\n self.model.graph.output.append(helper.make_tensor_value_info(\n reduce_output, TensorProto.FLOAT, [1]))\n for tensor in tensors:\n add_reduce_min_max(tensor, 'ReduceMin')\n add_reduce_min_max(tensor, 'ReduceMax')\n onnx.save(self.model, self.augmented_model_path,\n save_as_external_data=self.use_external_data_format)\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None,\n inputs))\n if len(self.intermediate_outputs) == 0:\n raise ValueError('No data is collected.')\n t = self.compute_data()\n if not isinstance(t, TensorsData):\n raise TypeError(\n f'compute_data must return a TensorsData not {type(t)}.')\n self.clear_collected_data()\n\n def merge_range(self, old_range, new_range):\n if not old_range:\n return new_range\n for key, value in old_range.items():\n if self.moving_average:\n min_value = value[0] + self.averaging_constant * (new_range\n [key][0] - value[0])\n max_value = value[1] + self.averaging_constant * (new_range\n [key][1] - value[1])\n else:\n min_value = min(value[0], new_range[key][0])\n max_value = max(value[1], new_range[key][1])\n new_range[key] = min_value, max_value\n return new_range\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }\n \"\"\"\n if len(self.intermediate_outputs) == 0:\n return self.calibrate_tensors_range\n output_names = [self.infer_session.get_outputs()[i].name for i in\n range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [dict(zip(output_names, intermediate_output)) for\n intermediate_output in self.intermediate_outputs]\n merged_output_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_output_dict.setdefault(k, []).append(v)\n added_output_names = output_names[self.num_model_outputs:]\n calibrate_tensor_names = [added_output_names[i].rpartition('_')[0] for\n i in range(0, len(added_output_names), 2)]\n merged_added_output_dict = {i: merged_output_dict[i] for i in\n merged_output_dict if i not in self.model_original_outputs}\n pairs = []\n for i in range(0, len(added_output_names), 2):\n min_value = 0\n max_value = 0\n if self.moving_average:\n min_value_array = np.mean(merged_added_output_dict[\n added_output_names[i]], axis=0)\n max_value_array = np.mean(merged_added_output_dict[\n added_output_names[i + 1]], axis=0)\n else:\n min_value_array = min(merged_added_output_dict[\n added_output_names[i]])\n max_value_array = max(merged_added_output_dict[\n added_output_names[i + 1]])\n if type(min_value_array) == int or min_value_array.size > 0:\n min_value = float(min_value_array)\n if type(max_value_array) == int or max_value_array.size > 0:\n max_value = float(max_value_array)\n if self.symmetric:\n max_absolute_value = max(abs(min_value), abs(max_value))\n pairs.append(tuple([-max_absolute_value, max_absolute_value]))\n else:\n pairs.append(tuple([min_value, max_value]))\n new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax,\n dict(zip(calibrate_tensor_names, pairs)))\n if self.calibrate_tensors_range:\n self.calibrate_tensors_range = self.merge_range(self.\n calibrate_tensors_range, new_calibrate_tensors_range)\n else:\n self.calibrate_tensors_range = new_calibrate_tensors_range\n return self.calibrate_tensors_range\n\n\nclass HistogramCalibrater(CalibraterBase):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'percentile', symmetric=False, num_bins=128, num_quantized_bins=\n 2048, percentile=99.999, scenario='same'):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path.\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n :param scenario: see :class:`DistributionCalibrater`\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate=\n op_types_to_calibrate, augmented_model_path=\n augmented_model_path, symmetric=symmetric,\n use_external_data_format=use_external_data_format)\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model\n .graph.output}\n self.collector = None\n self.method = method\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.tensors_to_calibrate = None\n self.scenario = scenario\n\n def augment_graph(self):\n \"\"\"\n make all quantization_candidates op type nodes as part of the graph output.\n :return: augmented ONNX model\n \"\"\"\n self.tensors_to_calibrate, value_infos = (self.\n select_tensors_to_calibrate(self.model))\n for tensor in self.tensors_to_calibrate:\n if tensor not in self.model_original_outputs:\n self.model.graph.output.append(value_infos[tensor])\n onnx.save(self.model, self.augmented_model_path,\n save_as_external_data=self.use_external_data_format)\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.\n \"\"\"\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None,\n inputs))\n if len(self.intermediate_outputs) == 0:\n raise ValueError('No data is collected.')\n output_names = [self.infer_session.get_outputs()[i].name for i in\n range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [dict(zip(output_names, intermediate_output)) for\n intermediate_output in self.intermediate_outputs]\n merged_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_dict.setdefault(k, []).append(v)\n clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in\n self.tensors_to_calibrate}\n if not self.collector:\n self.collector = HistogramCollector(method=self.method,\n symmetric=self.symmetric, num_bins=self.num_bins,\n num_quantized_bins=self.num_quantized_bins, percentile=self\n .percentile, scenario=self.scenario)\n self.collector.collect(clean_merged_dict)\n self.clear_collected_data()\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {tensor name: (min value, max value)}\n \"\"\"\n if not self.collector:\n raise ValueError(\n \"No collector created and can't generate calibration data.\")\n if isinstance(self, EntropyCalibrater):\n cal = CalibrationMethod.Entropy\n elif isinstance(self, PercentileCalibrater):\n cal = CalibrationMethod.Percentile\n elif isinstance(self, DistributionCalibrater):\n cal = CalibrationMethod.Distribution\n else:\n raise TypeError(\n f'Unknown calibrater {type(self)}. This method must be overwritten.'\n )\n return TensorsData(cal, self.collector.compute_collection_result())\n\n\nclass EntropyCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'entropy', symmetric=False, num_bins=128, num_quantized_bins=128):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n symmetric=symmetric, num_bins=num_bins, num_quantized_bins=\n num_quantized_bins)\n\n\nclass PercentileCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'percentile', symmetric=False, num_bins=2048, percentile=99.999):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n symmetric=symmetric, num_bins=num_bins, percentile=percentile)\n\n\nclass DistributionCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'distribution', num_bins=128, scenario='same'):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param scenario: for float 8 only, if `scenario=\"same\"`,\n the algorithm weights and float 8 follow the same distribution,\n if `scenario=\"p3\"`, it assumes the weights follow\n a gaussian law and float 8 ~ X^3 where X is a gaussian law\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n num_bins=num_bins, scenario=scenario)\n\n\nclass CalibrationDataCollector(metaclass=abc.ABCMeta):\n \"\"\"\n Base class for collecting data for calibration-based quantization.\n \"\"\"\n\n @abc.abstractmethod\n def collect(self, name_to_arr):\n \"\"\"\n Generate informative data based on given data.\n name_to_arr : dict\n tensor name to NDArray data\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def compute_collection_result(self):\n \"\"\"\n Get the optimal result among collection data.\n \"\"\"\n raise NotImplementedError\n\n\nclass HistogramCollector(CalibrationDataCollector):\n \"\"\"\n Collecting histogram for each tensor. Percentile and Entropy method are supported.\n\n ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py\n ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/\n pytorch_quantization/calib/histogram.html\n \"\"\"\n\n def __init__(self, method, symmetric, num_bins, num_quantized_bins,\n percentile, scenario):\n self.histogram_dict = {}\n self.method = method\n self.symmetric = symmetric\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.scenario = scenario\n\n def get_histogram_dict(self):\n return self.histogram_dict\n\n def collect(self, name_to_arr):\n print('Collecting tensor data and making histogram ...')\n if self.method in {'distribution', 'entropy'}:\n return self.collect_value(name_to_arr)\n elif self.method == 'percentile':\n if self.symmetric:\n return self.collect_absolute_value(name_to_arr)\n else:\n return self.collect_value(name_to_arr)\n else:\n raise ValueError(\n \"Only 'entropy', 'percentile' or 'distribution' methods are supported\"\n )\n\n def collect_absolute_value(self, name_to_arr):\n \"\"\"\n Collect histogram on absolute value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr)\n data_arr = data_arr.flatten()\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n data_arr = np.absolute(data_arr)\n if tensor not in self.histogram_dict:\n hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)\n self.histogram_dict[tensor\n ] = hist, hist_edges, min_value, max_value\n else:\n old_histogram = self.histogram_dict[tensor]\n old_min = old_histogram[2]\n old_max = old_histogram[3]\n old_hist = old_histogram[0]\n old_hist_edges = old_histogram[1]\n temp_amax = np.max(data_arr)\n if temp_amax > old_hist_edges[-1]:\n width = old_hist_edges[1] - old_hist_edges[0]\n new_bin_edges = np.arange(old_hist_edges[-1] + width, \n temp_amax + width, width)\n old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))\n hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)\n hist[:len(old_hist)] += old_hist\n self.histogram_dict[tensor] = hist, hist_edges, min(old_min,\n min_value), max(old_max, max_value)\n\n def collect_value(self, name_to_arr):\n \"\"\"\n Collect histogram on real value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr)\n data_arr = data_arr.flatten()\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n threshold = max(abs(min_value), abs(max_value))\n if tensor in self.histogram_dict:\n old_histogram = self.histogram_dict[tensor]\n self.histogram_dict[tensor] = self.merge_histogram(\n old_histogram, data_arr, min_value, max_value, threshold)\n else:\n hist, hist_edges = np.histogram(data_arr, self.num_bins,\n range=(-threshold, threshold))\n self.histogram_dict[tensor\n ] = hist, hist_edges, min_value, max_value, threshold\n\n def merge_histogram(self, old_histogram, data_arr, new_min, new_max,\n new_threshold):\n old_hist, old_hist_edges, old_min, old_max, old_threshold = (\n old_histogram)\n if new_threshold <= old_threshold:\n new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-\n old_threshold, old_threshold))\n return new_hist + old_hist, old_hist_edges, min(old_min, new_min\n ), max(old_max, new_max), old_threshold\n else:\n if old_threshold == 0:\n hist, hist_edges = np.histogram(data_arr, len(old_hist),\n range=(-new_threshold, new_threshold))\n hist += old_hist\n else:\n old_num_bins = len(old_hist)\n old_stride = 2 * old_threshold / old_num_bins\n half_increased_bins = int((new_threshold - old_threshold) //\n old_stride + 1)\n new_num_bins = old_num_bins + 2 * half_increased_bins\n new_threshold = (half_increased_bins * old_stride +\n old_threshold)\n hist, hist_edges = np.histogram(data_arr, new_num_bins,\n range=(-new_threshold, new_threshold))\n hist[half_increased_bins:new_num_bins - half_increased_bins\n ] += old_hist\n return hist, hist_edges, min(old_min, new_min), max(old_max,\n new_max), new_threshold\n\n def compute_collection_result(self):\n if not self.histogram_dict or len(self.histogram_dict) == 0:\n raise ValueError(\n 'Histogram has not been collected. Please run collect() first.'\n )\n print(\n f'Finding optimal threshold for each tensor using {self.method} algorithm ...'\n )\n if self.method == 'entropy':\n return self.compute_entropy()\n elif self.method == 'percentile':\n return self.compute_percentile()\n elif self.method == 'distribution':\n return self.compute_distribution()\n else:\n raise ValueError(\n \"Only 'entropy', 'percentile' or 'distribution' methods are supported\"\n )\n\n def compute_percentile(self):\n if self.percentile < 0 or self.percentile > 100:\n raise ValueError(\n 'Invalid percentile. Must be in range 0 <= percentile <= 100.')\n histogram_dict = self.histogram_dict\n percentile = self.percentile\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(f'Number of histogram bins : {self.num_bins}')\n print(f'Percentile : ({100.0 - percentile},{percentile})')\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n total = hist.sum()\n cdf = np.cumsum(hist / total)\n if self.symmetric:\n idx_right = np.searchsorted(cdf, percentile / 100.0)\n thresholds_dict[tensor] = -float(hist_edges[idx_right]), float(\n hist_edges[idx_right])\n else:\n percent_to_cut_one_side = (100.0 - percentile) / 200.0\n idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)\n idx_left = np.searchsorted(cdf, percent_to_cut_one_side)\n thresholds_dict[tensor] = float(hist_edges[idx_left]), float(\n hist_edges[idx_right])\n min_value = histogram[2]\n max_value = histogram[3]\n if thresholds_dict[tensor][0] < min_value:\n thresholds_dict[tensor] = min_value, thresholds_dict[tensor][1]\n if thresholds_dict[tensor][1] > max_value:\n thresholds_dict[tensor] = thresholds_dict[tensor][0], max_value\n thresholds_dict[tensor] = *thresholds_dict[tensor], *hist[:2]\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(hist, hist_edges)\n return thresholds_dict\n\n def compute_entropy(self):\n histogram_dict = self.histogram_dict\n num_quantized_bins = self.num_quantized_bins\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(\n 'Number of histogram bins : {} (The number may increase depends on the data it collects)'\n .format(self.num_bins))\n print(f'Number of quantized bins : {self.num_quantized_bins}')\n for tensor, histogram in histogram_dict.items():\n optimal_threshold = self.get_entropy_threshold(histogram,\n num_quantized_bins)\n thresholds_dict[tensor] = optimal_threshold\n thresholds_dict[tensor] = *optimal_threshold, *histogram[:2]\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(histogram[0], histogram[1])\n return thresholds_dict\n\n @staticmethod\n def _avg_std(hist, hist_edges, power=1):\n if power <= 0:\n raise ValueError(f'power={power} <= 0 is invalid.')\n values = (hist_edges[:-1] + hist_edges[1:]) * 0.5\n if power == 1:\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5\n return avg, std\n if int(power) == power and int(power) % 2 == 1:\n avg = (hist * values ** power).sum() / hist.sum()\n std = ((hist * (values ** power - avg) ** 2).sum() / hist.sum()\n ) ** 0.5\n return avg, std\n fact = np.abs(values) / values\n fact[np.isnan(fact)] = 1\n fact[np.isinf(fact)] = 1\n values = np.abs(values) ** power * fact\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5\n return avg, std\n\n def compute_distribution(self):\n if self.num_bins < 512:\n raise ValueError(\n 'Invalid num_bins. Must be in range 512 <= num_bins.')\n histogram_dict = self.histogram_dict\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(f'Number of histogram bins : {self.num_bins}')\n print(f'Scenario : {self.scenario!r})')\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n if self.scenario == 'same':\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)\n elif self.scenario == 'p3':\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=\n 1.0 / 3.0)\n else:\n raise ValueError(\"Invalid scenario. Must be in {'same', 'p3'}.\"\n )\n thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef,\n hist=hist, hist_edges=hist_edges)\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(hist, hist_edges)\n return thresholds_dict\n\n def get_entropy_threshold(self, histogram, num_quantized_bins):\n \"\"\"Given a dataset, find the optimal threshold for quantizing it.\n The reference distribution is `q`, and the candidate distribution is `p`.\n `q` is a truncated version of the original distribution.\n Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf\n \"\"\"\n import copy\n from scipy.stats import entropy\n hist = histogram[0]\n hist_edges = histogram[1]\n num_bins = hist.size\n zero_bin_index = num_bins // 2\n num_half_quantized_bin = num_quantized_bins // 2\n kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)\n thresholds = [(0, 0) for i in range(kl_divergence.size)]\n for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):\n start_index = zero_bin_index - i\n end_index = (zero_bin_index + i + 1 if zero_bin_index + i + 1 <=\n num_bins else num_bins)\n thresholds[i - num_half_quantized_bin] = float(hist_edges[\n start_index]), float(hist_edges[end_index])\n sliced_distribution = copy.deepcopy(hist[start_index:end_index])\n p = sliced_distribution.copy()\n left_outliers_count = sum(hist[:start_index])\n right_outliers_count = sum(hist[end_index:])\n p[0] += left_outliers_count\n p[-1] += right_outliers_count\n nonzeros = (p != 0).astype(np.int64)\n quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)\n num_merged_bins = sliced_distribution.size // num_quantized_bins\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n quantized_bins[index] = sum(sliced_distribution[start:end])\n quantized_bins[-1] += sum(sliced_distribution[\n num_quantized_bins * num_merged_bins:])\n q = np.zeros(p.size, dtype=np.int64)\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n norm = sum(nonzeros[start:end])\n if norm != 0:\n q[start:end] = float(quantized_bins[index]) / float(norm)\n p = smooth_distribution(p)\n q = smooth_distribution(q)\n if isinstance(q, np.ndarray):\n kl_divergence[i - num_half_quantized_bin] = entropy(p, q)\n else:\n kl_divergence[i - num_half_quantized_bin] = float('inf')\n min_kl_divergence_idx = np.argmin(kl_divergence)\n optimal_threshold = thresholds[min_kl_divergence_idx]\n min_value = histogram[2]\n max_value = histogram[3]\n if optimal_threshold[0] < min_value:\n optimal_threshold = min_value, optimal_threshold[1]\n if optimal_threshold[1] > max_value:\n optimal_threshold = optimal_threshold[0], max_value\n return optimal_threshold\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TensorsData:\n\n def __init__(self, calibration_method, data: Dict[str, Union[TensorData,\n Tuple]]):\n self.calibration_method = calibration_method\n self.data = {}\n for k, v in data.items():\n if not isinstance(k, str):\n raise TypeError(f'Keys must be strings not {type(k)}.')\n if isinstance(v, tuple):\n if calibration_method == CalibrationMethod.MinMax and len(v\n ) == 2:\n self.data[k] = TensorData(lowest=v[0], highest=v[1])\n continue\n if len(v) == 4:\n self.data[k] = TensorData(lowest=v[0], highest=v[1],\n histogram=v[2], bins=v[3])\n continue\n raise TypeError(\n f'Unexpected tuple for {k:r}, it has {len(v)} elements: {v}.'\n )\n if not isinstance(v, TensorData):\n raise TypeError(f'Values must be TensorData not {type(v)}.')\n self.data[k] = v\n\n def __iter__(self):\n yield from self.data\n <mask token>\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __setitem__(self, key, value):\n if key not in self.data:\n raise RuntimeError(\n f'Only an existing tensor can be modified, {key!r} is not.')\n self.data[key] = value\n\n def values(self):\n return self.data.values()\n\n\nclass CalibrationMethod(Enum):\n MinMax = 0\n Entropy = 1\n Percentile = 2\n Distribution = 3\n\n\nclass CalibrationDataReader(metaclass=abc.ABCMeta):\n\n @classmethod\n def __subclasshook__(cls, subclass):\n return hasattr(subclass, 'get_next') and callable(subclass.get_next\n ) or NotImplemented\n\n @abc.abstractmethod\n def get_next(self) ->dict:\n \"\"\"generate the input data dict for ONNXinferenceSession run\"\"\"\n raise NotImplementedError\n\n def __iter__(self):\n return self\n\n def __next__(self):\n result = self.get_next()\n if result is None:\n raise StopIteration\n return result\n\n\nclass CalibraterBase:\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', symmetric=False, use_external_data_format=False\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It should be a model file path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n \"\"\"\n if isinstance(model_path, str):\n self.model = load_model_with_shape_infer(Path(model_path))\n elif isinstance(model_path, Path):\n self.model = load_model_with_shape_infer(model_path)\n else:\n raise ValueError('model_path should be model path.')\n self.op_types_to_calibrate = op_types_to_calibrate\n self.augmented_model_path = augmented_model_path\n self.symmetric = symmetric\n self.use_external_data_format = use_external_data_format\n self.augment_model = None\n self.infer_session = None\n self.execution_providers = ['CPUExecutionProvider']\n\n def set_execution_providers(self, execution_providers=[\n 'CPUExecutionProvider']):\n \"\"\"\n reset the execution providers to execute the collect_data. It triggers to re-creating inference session.\n \"\"\"\n self.execution_providers = execution_providers\n self.create_inference_session()\n\n def create_inference_session(self):\n \"\"\"\n create an OnnxRuntime InferenceSession.\n \"\"\"\n sess_options = onnxruntime.SessionOptions()\n sess_options.graph_optimization_level = (onnxruntime.\n GraphOptimizationLevel.ORT_DISABLE_ALL)\n self.infer_session = onnxruntime.InferenceSession(self.\n augmented_model_path, sess_options=sess_options, providers=self\n .execution_providers)\n\n def select_tensors_to_calibrate(self, model: ModelProto):\n \"\"\"\n select input/output tensors of candidate nodes to calibrate.\n returns:\n tensors (set): set of tensor name.\n value_infos (dict): tensor name to value info.\n \"\"\"\n value_infos = {vi.name: vi for vi in model.graph.value_info}\n value_infos.update({ot.name: ot for ot in model.graph.output})\n value_infos.update({it.name: it for it in model.graph.input})\n initializer = {init.name for init in model.graph.initializer}\n tensors_to_calibrate = set()\n tensor_type_to_calibrate = {TensorProto.FLOAT}\n for node in model.graph.node:\n if (not self.op_types_to_calibrate or node.op_type in self.\n op_types_to_calibrate):\n for tensor_name in itertools.chain(node.input, node.output):\n if tensor_name in value_infos:\n vi = value_infos[tensor_name]\n if (vi.type.HasField('tensor_type') and vi.type.\n tensor_type.elem_type in\n tensor_type_to_calibrate and tensor_name not in\n initializer):\n tensors_to_calibrate.add(tensor_name)\n return tensors_to_calibrate, value_infos\n\n def get_augment_model(self):\n \"\"\"\n return: augmented onnx model. Call after calling augment_graph\n \"\"\"\n return self.model\n\n def augment_graph(self):\n \"\"\"\n abstract method: augment the input model to prepare for collecting data. It will:\n 1. augment the model to be able to collect desired statistics data\n 2. save augmented model to augmented_model_paths\n \"\"\"\n raise NotImplementedError\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n abstract method: collect the tensors that will be used for range computation. It can be called multiple times.\n \"\"\"\n raise NotImplementedError\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n abstract method: compute data based on the calibration method stored in TensorsData\n \"\"\"\n raise NotImplementedError\n\n\nclass MinMaxCalibrater(CalibraterBase):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', symmetric=False, use_external_data_format=\n False, moving_average=False, averaging_constant=0.01):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.\n :param averaging_constant: constant smoothing factor to use when computing the moving average.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate=\n op_types_to_calibrate, augmented_model_path=\n augmented_model_path, symmetric=symmetric,\n use_external_data_format=use_external_data_format)\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model\n .graph.output}\n self.moving_average = moving_average\n if moving_average and (averaging_constant < 0 or averaging_constant > 1\n ):\n raise ValueError(\n 'Invalid averaging constant, which should not be < 0 or > 1.')\n self.averaging_constant = averaging_constant\n\n def augment_graph(self):\n \"\"\"\n Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in\n model and ensures their outputs are stored as part of the graph output\n :return: augmented ONNX model\n \"\"\"\n tensors, _ = self.select_tensors_to_calibrate(self.model)\n reshape_shape_name = str(uuid.uuid4())\n reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.\n int64), reshape_shape_name)\n self.model.graph.initializer.append(reshape_shape)\n\n def add_reduce_min_max(tensor_name, reduce_op_name):\n keepdims = 1\n reduce_output = tensor_name + '_' + reduce_op_name\n intermediate_output = reduce_output + '_Reshape'\n reduce_node = onnx.helper.make_node(reduce_op_name, [\n tensor_name], [intermediate_output], keepdims=keepdims,\n name=reduce_output)\n reshape_node = onnx.helper.make_node('Reshape', inputs=[\n intermediate_output, reshape_shape_name], outputs=[\n reduce_output], name=intermediate_output)\n self.model.graph.node.extend([reduce_node, reshape_node])\n self.model.graph.output.append(helper.make_tensor_value_info(\n reduce_output, TensorProto.FLOAT, [1]))\n for tensor in tensors:\n add_reduce_min_max(tensor, 'ReduceMin')\n add_reduce_min_max(tensor, 'ReduceMax')\n onnx.save(self.model, self.augmented_model_path,\n save_as_external_data=self.use_external_data_format)\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None,\n inputs))\n if len(self.intermediate_outputs) == 0:\n raise ValueError('No data is collected.')\n t = self.compute_data()\n if not isinstance(t, TensorsData):\n raise TypeError(\n f'compute_data must return a TensorsData not {type(t)}.')\n self.clear_collected_data()\n\n def merge_range(self, old_range, new_range):\n if not old_range:\n return new_range\n for key, value in old_range.items():\n if self.moving_average:\n min_value = value[0] + self.averaging_constant * (new_range\n [key][0] - value[0])\n max_value = value[1] + self.averaging_constant * (new_range\n [key][1] - value[1])\n else:\n min_value = min(value[0], new_range[key][0])\n max_value = max(value[1], new_range[key][1])\n new_range[key] = min_value, max_value\n return new_range\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }\n \"\"\"\n if len(self.intermediate_outputs) == 0:\n return self.calibrate_tensors_range\n output_names = [self.infer_session.get_outputs()[i].name for i in\n range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [dict(zip(output_names, intermediate_output)) for\n intermediate_output in self.intermediate_outputs]\n merged_output_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_output_dict.setdefault(k, []).append(v)\n added_output_names = output_names[self.num_model_outputs:]\n calibrate_tensor_names = [added_output_names[i].rpartition('_')[0] for\n i in range(0, len(added_output_names), 2)]\n merged_added_output_dict = {i: merged_output_dict[i] for i in\n merged_output_dict if i not in self.model_original_outputs}\n pairs = []\n for i in range(0, len(added_output_names), 2):\n min_value = 0\n max_value = 0\n if self.moving_average:\n min_value_array = np.mean(merged_added_output_dict[\n added_output_names[i]], axis=0)\n max_value_array = np.mean(merged_added_output_dict[\n added_output_names[i + 1]], axis=0)\n else:\n min_value_array = min(merged_added_output_dict[\n added_output_names[i]])\n max_value_array = max(merged_added_output_dict[\n added_output_names[i + 1]])\n if type(min_value_array) == int or min_value_array.size > 0:\n min_value = float(min_value_array)\n if type(max_value_array) == int or max_value_array.size > 0:\n max_value = float(max_value_array)\n if self.symmetric:\n max_absolute_value = max(abs(min_value), abs(max_value))\n pairs.append(tuple([-max_absolute_value, max_absolute_value]))\n else:\n pairs.append(tuple([min_value, max_value]))\n new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax,\n dict(zip(calibrate_tensor_names, pairs)))\n if self.calibrate_tensors_range:\n self.calibrate_tensors_range = self.merge_range(self.\n calibrate_tensors_range, new_calibrate_tensors_range)\n else:\n self.calibrate_tensors_range = new_calibrate_tensors_range\n return self.calibrate_tensors_range\n\n\nclass HistogramCalibrater(CalibraterBase):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'percentile', symmetric=False, num_bins=128, num_quantized_bins=\n 2048, percentile=99.999, scenario='same'):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path.\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n :param scenario: see :class:`DistributionCalibrater`\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate=\n op_types_to_calibrate, augmented_model_path=\n augmented_model_path, symmetric=symmetric,\n use_external_data_format=use_external_data_format)\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model\n .graph.output}\n self.collector = None\n self.method = method\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.tensors_to_calibrate = None\n self.scenario = scenario\n\n def augment_graph(self):\n \"\"\"\n make all quantization_candidates op type nodes as part of the graph output.\n :return: augmented ONNX model\n \"\"\"\n self.tensors_to_calibrate, value_infos = (self.\n select_tensors_to_calibrate(self.model))\n for tensor in self.tensors_to_calibrate:\n if tensor not in self.model_original_outputs:\n self.model.graph.output.append(value_infos[tensor])\n onnx.save(self.model, self.augmented_model_path,\n save_as_external_data=self.use_external_data_format)\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.\n \"\"\"\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None,\n inputs))\n if len(self.intermediate_outputs) == 0:\n raise ValueError('No data is collected.')\n output_names = [self.infer_session.get_outputs()[i].name for i in\n range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [dict(zip(output_names, intermediate_output)) for\n intermediate_output in self.intermediate_outputs]\n merged_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_dict.setdefault(k, []).append(v)\n clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in\n self.tensors_to_calibrate}\n if not self.collector:\n self.collector = HistogramCollector(method=self.method,\n symmetric=self.symmetric, num_bins=self.num_bins,\n num_quantized_bins=self.num_quantized_bins, percentile=self\n .percentile, scenario=self.scenario)\n self.collector.collect(clean_merged_dict)\n self.clear_collected_data()\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {tensor name: (min value, max value)}\n \"\"\"\n if not self.collector:\n raise ValueError(\n \"No collector created and can't generate calibration data.\")\n if isinstance(self, EntropyCalibrater):\n cal = CalibrationMethod.Entropy\n elif isinstance(self, PercentileCalibrater):\n cal = CalibrationMethod.Percentile\n elif isinstance(self, DistributionCalibrater):\n cal = CalibrationMethod.Distribution\n else:\n raise TypeError(\n f'Unknown calibrater {type(self)}. This method must be overwritten.'\n )\n return TensorsData(cal, self.collector.compute_collection_result())\n\n\nclass EntropyCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'entropy', symmetric=False, num_bins=128, num_quantized_bins=128):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n symmetric=symmetric, num_bins=num_bins, num_quantized_bins=\n num_quantized_bins)\n\n\nclass PercentileCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'percentile', symmetric=False, num_bins=2048, percentile=99.999):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n symmetric=symmetric, num_bins=num_bins, percentile=percentile)\n\n\nclass DistributionCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'distribution', num_bins=128, scenario='same'):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param scenario: for float 8 only, if `scenario=\"same\"`,\n the algorithm weights and float 8 follow the same distribution,\n if `scenario=\"p3\"`, it assumes the weights follow\n a gaussian law and float 8 ~ X^3 where X is a gaussian law\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n num_bins=num_bins, scenario=scenario)\n\n\nclass CalibrationDataCollector(metaclass=abc.ABCMeta):\n \"\"\"\n Base class for collecting data for calibration-based quantization.\n \"\"\"\n\n @abc.abstractmethod\n def collect(self, name_to_arr):\n \"\"\"\n Generate informative data based on given data.\n name_to_arr : dict\n tensor name to NDArray data\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def compute_collection_result(self):\n \"\"\"\n Get the optimal result among collection data.\n \"\"\"\n raise NotImplementedError\n\n\nclass HistogramCollector(CalibrationDataCollector):\n \"\"\"\n Collecting histogram for each tensor. Percentile and Entropy method are supported.\n\n ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py\n ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/\n pytorch_quantization/calib/histogram.html\n \"\"\"\n\n def __init__(self, method, symmetric, num_bins, num_quantized_bins,\n percentile, scenario):\n self.histogram_dict = {}\n self.method = method\n self.symmetric = symmetric\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.scenario = scenario\n\n def get_histogram_dict(self):\n return self.histogram_dict\n\n def collect(self, name_to_arr):\n print('Collecting tensor data and making histogram ...')\n if self.method in {'distribution', 'entropy'}:\n return self.collect_value(name_to_arr)\n elif self.method == 'percentile':\n if self.symmetric:\n return self.collect_absolute_value(name_to_arr)\n else:\n return self.collect_value(name_to_arr)\n else:\n raise ValueError(\n \"Only 'entropy', 'percentile' or 'distribution' methods are supported\"\n )\n\n def collect_absolute_value(self, name_to_arr):\n \"\"\"\n Collect histogram on absolute value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr)\n data_arr = data_arr.flatten()\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n data_arr = np.absolute(data_arr)\n if tensor not in self.histogram_dict:\n hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)\n self.histogram_dict[tensor\n ] = hist, hist_edges, min_value, max_value\n else:\n old_histogram = self.histogram_dict[tensor]\n old_min = old_histogram[2]\n old_max = old_histogram[3]\n old_hist = old_histogram[0]\n old_hist_edges = old_histogram[1]\n temp_amax = np.max(data_arr)\n if temp_amax > old_hist_edges[-1]:\n width = old_hist_edges[1] - old_hist_edges[0]\n new_bin_edges = np.arange(old_hist_edges[-1] + width, \n temp_amax + width, width)\n old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))\n hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)\n hist[:len(old_hist)] += old_hist\n self.histogram_dict[tensor] = hist, hist_edges, min(old_min,\n min_value), max(old_max, max_value)\n\n def collect_value(self, name_to_arr):\n \"\"\"\n Collect histogram on real value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr)\n data_arr = data_arr.flatten()\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n threshold = max(abs(min_value), abs(max_value))\n if tensor in self.histogram_dict:\n old_histogram = self.histogram_dict[tensor]\n self.histogram_dict[tensor] = self.merge_histogram(\n old_histogram, data_arr, min_value, max_value, threshold)\n else:\n hist, hist_edges = np.histogram(data_arr, self.num_bins,\n range=(-threshold, threshold))\n self.histogram_dict[tensor\n ] = hist, hist_edges, min_value, max_value, threshold\n\n def merge_histogram(self, old_histogram, data_arr, new_min, new_max,\n new_threshold):\n old_hist, old_hist_edges, old_min, old_max, old_threshold = (\n old_histogram)\n if new_threshold <= old_threshold:\n new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-\n old_threshold, old_threshold))\n return new_hist + old_hist, old_hist_edges, min(old_min, new_min\n ), max(old_max, new_max), old_threshold\n else:\n if old_threshold == 0:\n hist, hist_edges = np.histogram(data_arr, len(old_hist),\n range=(-new_threshold, new_threshold))\n hist += old_hist\n else:\n old_num_bins = len(old_hist)\n old_stride = 2 * old_threshold / old_num_bins\n half_increased_bins = int((new_threshold - old_threshold) //\n old_stride + 1)\n new_num_bins = old_num_bins + 2 * half_increased_bins\n new_threshold = (half_increased_bins * old_stride +\n old_threshold)\n hist, hist_edges = np.histogram(data_arr, new_num_bins,\n range=(-new_threshold, new_threshold))\n hist[half_increased_bins:new_num_bins - half_increased_bins\n ] += old_hist\n return hist, hist_edges, min(old_min, new_min), max(old_max,\n new_max), new_threshold\n\n def compute_collection_result(self):\n if not self.histogram_dict or len(self.histogram_dict) == 0:\n raise ValueError(\n 'Histogram has not been collected. Please run collect() first.'\n )\n print(\n f'Finding optimal threshold for each tensor using {self.method} algorithm ...'\n )\n if self.method == 'entropy':\n return self.compute_entropy()\n elif self.method == 'percentile':\n return self.compute_percentile()\n elif self.method == 'distribution':\n return self.compute_distribution()\n else:\n raise ValueError(\n \"Only 'entropy', 'percentile' or 'distribution' methods are supported\"\n )\n\n def compute_percentile(self):\n if self.percentile < 0 or self.percentile > 100:\n raise ValueError(\n 'Invalid percentile. Must be in range 0 <= percentile <= 100.')\n histogram_dict = self.histogram_dict\n percentile = self.percentile\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(f'Number of histogram bins : {self.num_bins}')\n print(f'Percentile : ({100.0 - percentile},{percentile})')\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n total = hist.sum()\n cdf = np.cumsum(hist / total)\n if self.symmetric:\n idx_right = np.searchsorted(cdf, percentile / 100.0)\n thresholds_dict[tensor] = -float(hist_edges[idx_right]), float(\n hist_edges[idx_right])\n else:\n percent_to_cut_one_side = (100.0 - percentile) / 200.0\n idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)\n idx_left = np.searchsorted(cdf, percent_to_cut_one_side)\n thresholds_dict[tensor] = float(hist_edges[idx_left]), float(\n hist_edges[idx_right])\n min_value = histogram[2]\n max_value = histogram[3]\n if thresholds_dict[tensor][0] < min_value:\n thresholds_dict[tensor] = min_value, thresholds_dict[tensor][1]\n if thresholds_dict[tensor][1] > max_value:\n thresholds_dict[tensor] = thresholds_dict[tensor][0], max_value\n thresholds_dict[tensor] = *thresholds_dict[tensor], *hist[:2]\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(hist, hist_edges)\n return thresholds_dict\n\n def compute_entropy(self):\n histogram_dict = self.histogram_dict\n num_quantized_bins = self.num_quantized_bins\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(\n 'Number of histogram bins : {} (The number may increase depends on the data it collects)'\n .format(self.num_bins))\n print(f'Number of quantized bins : {self.num_quantized_bins}')\n for tensor, histogram in histogram_dict.items():\n optimal_threshold = self.get_entropy_threshold(histogram,\n num_quantized_bins)\n thresholds_dict[tensor] = optimal_threshold\n thresholds_dict[tensor] = *optimal_threshold, *histogram[:2]\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(histogram[0], histogram[1])\n return thresholds_dict\n\n @staticmethod\n def _avg_std(hist, hist_edges, power=1):\n if power <= 0:\n raise ValueError(f'power={power} <= 0 is invalid.')\n values = (hist_edges[:-1] + hist_edges[1:]) * 0.5\n if power == 1:\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5\n return avg, std\n if int(power) == power and int(power) % 2 == 1:\n avg = (hist * values ** power).sum() / hist.sum()\n std = ((hist * (values ** power - avg) ** 2).sum() / hist.sum()\n ) ** 0.5\n return avg, std\n fact = np.abs(values) / values\n fact[np.isnan(fact)] = 1\n fact[np.isinf(fact)] = 1\n values = np.abs(values) ** power * fact\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5\n return avg, std\n\n def compute_distribution(self):\n if self.num_bins < 512:\n raise ValueError(\n 'Invalid num_bins. Must be in range 512 <= num_bins.')\n histogram_dict = self.histogram_dict\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(f'Number of histogram bins : {self.num_bins}')\n print(f'Scenario : {self.scenario!r})')\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n if self.scenario == 'same':\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)\n elif self.scenario == 'p3':\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=\n 1.0 / 3.0)\n else:\n raise ValueError(\"Invalid scenario. Must be in {'same', 'p3'}.\"\n )\n thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef,\n hist=hist, hist_edges=hist_edges)\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(hist, hist_edges)\n return thresholds_dict\n\n def get_entropy_threshold(self, histogram, num_quantized_bins):\n \"\"\"Given a dataset, find the optimal threshold for quantizing it.\n The reference distribution is `q`, and the candidate distribution is `p`.\n `q` is a truncated version of the original distribution.\n Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf\n \"\"\"\n import copy\n from scipy.stats import entropy\n hist = histogram[0]\n hist_edges = histogram[1]\n num_bins = hist.size\n zero_bin_index = num_bins // 2\n num_half_quantized_bin = num_quantized_bins // 2\n kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)\n thresholds = [(0, 0) for i in range(kl_divergence.size)]\n for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):\n start_index = zero_bin_index - i\n end_index = (zero_bin_index + i + 1 if zero_bin_index + i + 1 <=\n num_bins else num_bins)\n thresholds[i - num_half_quantized_bin] = float(hist_edges[\n start_index]), float(hist_edges[end_index])\n sliced_distribution = copy.deepcopy(hist[start_index:end_index])\n p = sliced_distribution.copy()\n left_outliers_count = sum(hist[:start_index])\n right_outliers_count = sum(hist[end_index:])\n p[0] += left_outliers_count\n p[-1] += right_outliers_count\n nonzeros = (p != 0).astype(np.int64)\n quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)\n num_merged_bins = sliced_distribution.size // num_quantized_bins\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n quantized_bins[index] = sum(sliced_distribution[start:end])\n quantized_bins[-1] += sum(sliced_distribution[\n num_quantized_bins * num_merged_bins:])\n q = np.zeros(p.size, dtype=np.int64)\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n norm = sum(nonzeros[start:end])\n if norm != 0:\n q[start:end] = float(quantized_bins[index]) / float(norm)\n p = smooth_distribution(p)\n q = smooth_distribution(q)\n if isinstance(q, np.ndarray):\n kl_divergence[i - num_half_quantized_bin] = entropy(p, q)\n else:\n kl_divergence[i - num_half_quantized_bin] = float('inf')\n min_kl_divergence_idx = np.argmin(kl_divergence)\n optimal_threshold = thresholds[min_kl_divergence_idx]\n min_value = histogram[2]\n max_value = histogram[3]\n if optimal_threshold[0] < min_value:\n optimal_threshold = min_value, optimal_threshold[1]\n if optimal_threshold[1] > max_value:\n optimal_threshold = optimal_threshold[0], max_value\n return optimal_threshold\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TensorsData:\n\n def __init__(self, calibration_method, data: Dict[str, Union[TensorData,\n Tuple]]):\n self.calibration_method = calibration_method\n self.data = {}\n for k, v in data.items():\n if not isinstance(k, str):\n raise TypeError(f'Keys must be strings not {type(k)}.')\n if isinstance(v, tuple):\n if calibration_method == CalibrationMethod.MinMax and len(v\n ) == 2:\n self.data[k] = TensorData(lowest=v[0], highest=v[1])\n continue\n if len(v) == 4:\n self.data[k] = TensorData(lowest=v[0], highest=v[1],\n histogram=v[2], bins=v[3])\n continue\n raise TypeError(\n f'Unexpected tuple for {k:r}, it has {len(v)} elements: {v}.'\n )\n if not isinstance(v, TensorData):\n raise TypeError(f'Values must be TensorData not {type(v)}.')\n self.data[k] = v\n\n def __iter__(self):\n yield from self.data\n\n def __contains__(self, key):\n return key in self.data\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __setitem__(self, key, value):\n if key not in self.data:\n raise RuntimeError(\n f'Only an existing tensor can be modified, {key!r} is not.')\n self.data[key] = value\n\n def values(self):\n return self.data.values()\n\n\nclass CalibrationMethod(Enum):\n MinMax = 0\n Entropy = 1\n Percentile = 2\n Distribution = 3\n\n\nclass CalibrationDataReader(metaclass=abc.ABCMeta):\n\n @classmethod\n def __subclasshook__(cls, subclass):\n return hasattr(subclass, 'get_next') and callable(subclass.get_next\n ) or NotImplemented\n\n @abc.abstractmethod\n def get_next(self) ->dict:\n \"\"\"generate the input data dict for ONNXinferenceSession run\"\"\"\n raise NotImplementedError\n\n def __iter__(self):\n return self\n\n def __next__(self):\n result = self.get_next()\n if result is None:\n raise StopIteration\n return result\n\n\nclass CalibraterBase:\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', symmetric=False, use_external_data_format=False\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It should be a model file path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n \"\"\"\n if isinstance(model_path, str):\n self.model = load_model_with_shape_infer(Path(model_path))\n elif isinstance(model_path, Path):\n self.model = load_model_with_shape_infer(model_path)\n else:\n raise ValueError('model_path should be model path.')\n self.op_types_to_calibrate = op_types_to_calibrate\n self.augmented_model_path = augmented_model_path\n self.symmetric = symmetric\n self.use_external_data_format = use_external_data_format\n self.augment_model = None\n self.infer_session = None\n self.execution_providers = ['CPUExecutionProvider']\n\n def set_execution_providers(self, execution_providers=[\n 'CPUExecutionProvider']):\n \"\"\"\n reset the execution providers to execute the collect_data. It triggers to re-creating inference session.\n \"\"\"\n self.execution_providers = execution_providers\n self.create_inference_session()\n\n def create_inference_session(self):\n \"\"\"\n create an OnnxRuntime InferenceSession.\n \"\"\"\n sess_options = onnxruntime.SessionOptions()\n sess_options.graph_optimization_level = (onnxruntime.\n GraphOptimizationLevel.ORT_DISABLE_ALL)\n self.infer_session = onnxruntime.InferenceSession(self.\n augmented_model_path, sess_options=sess_options, providers=self\n .execution_providers)\n\n def select_tensors_to_calibrate(self, model: ModelProto):\n \"\"\"\n select input/output tensors of candidate nodes to calibrate.\n returns:\n tensors (set): set of tensor name.\n value_infos (dict): tensor name to value info.\n \"\"\"\n value_infos = {vi.name: vi for vi in model.graph.value_info}\n value_infos.update({ot.name: ot for ot in model.graph.output})\n value_infos.update({it.name: it for it in model.graph.input})\n initializer = {init.name for init in model.graph.initializer}\n tensors_to_calibrate = set()\n tensor_type_to_calibrate = {TensorProto.FLOAT}\n for node in model.graph.node:\n if (not self.op_types_to_calibrate or node.op_type in self.\n op_types_to_calibrate):\n for tensor_name in itertools.chain(node.input, node.output):\n if tensor_name in value_infos:\n vi = value_infos[tensor_name]\n if (vi.type.HasField('tensor_type') and vi.type.\n tensor_type.elem_type in\n tensor_type_to_calibrate and tensor_name not in\n initializer):\n tensors_to_calibrate.add(tensor_name)\n return tensors_to_calibrate, value_infos\n\n def get_augment_model(self):\n \"\"\"\n return: augmented onnx model. Call after calling augment_graph\n \"\"\"\n return self.model\n\n def augment_graph(self):\n \"\"\"\n abstract method: augment the input model to prepare for collecting data. It will:\n 1. augment the model to be able to collect desired statistics data\n 2. save augmented model to augmented_model_paths\n \"\"\"\n raise NotImplementedError\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n abstract method: collect the tensors that will be used for range computation. It can be called multiple times.\n \"\"\"\n raise NotImplementedError\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n abstract method: compute data based on the calibration method stored in TensorsData\n \"\"\"\n raise NotImplementedError\n\n\nclass MinMaxCalibrater(CalibraterBase):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', symmetric=False, use_external_data_format=\n False, moving_average=False, averaging_constant=0.01):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.\n :param averaging_constant: constant smoothing factor to use when computing the moving average.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate=\n op_types_to_calibrate, augmented_model_path=\n augmented_model_path, symmetric=symmetric,\n use_external_data_format=use_external_data_format)\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model\n .graph.output}\n self.moving_average = moving_average\n if moving_average and (averaging_constant < 0 or averaging_constant > 1\n ):\n raise ValueError(\n 'Invalid averaging constant, which should not be < 0 or > 1.')\n self.averaging_constant = averaging_constant\n\n def augment_graph(self):\n \"\"\"\n Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in\n model and ensures their outputs are stored as part of the graph output\n :return: augmented ONNX model\n \"\"\"\n tensors, _ = self.select_tensors_to_calibrate(self.model)\n reshape_shape_name = str(uuid.uuid4())\n reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.\n int64), reshape_shape_name)\n self.model.graph.initializer.append(reshape_shape)\n\n def add_reduce_min_max(tensor_name, reduce_op_name):\n keepdims = 1\n reduce_output = tensor_name + '_' + reduce_op_name\n intermediate_output = reduce_output + '_Reshape'\n reduce_node = onnx.helper.make_node(reduce_op_name, [\n tensor_name], [intermediate_output], keepdims=keepdims,\n name=reduce_output)\n reshape_node = onnx.helper.make_node('Reshape', inputs=[\n intermediate_output, reshape_shape_name], outputs=[\n reduce_output], name=intermediate_output)\n self.model.graph.node.extend([reduce_node, reshape_node])\n self.model.graph.output.append(helper.make_tensor_value_info(\n reduce_output, TensorProto.FLOAT, [1]))\n for tensor in tensors:\n add_reduce_min_max(tensor, 'ReduceMin')\n add_reduce_min_max(tensor, 'ReduceMax')\n onnx.save(self.model, self.augmented_model_path,\n save_as_external_data=self.use_external_data_format)\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None,\n inputs))\n if len(self.intermediate_outputs) == 0:\n raise ValueError('No data is collected.')\n t = self.compute_data()\n if not isinstance(t, TensorsData):\n raise TypeError(\n f'compute_data must return a TensorsData not {type(t)}.')\n self.clear_collected_data()\n\n def merge_range(self, old_range, new_range):\n if not old_range:\n return new_range\n for key, value in old_range.items():\n if self.moving_average:\n min_value = value[0] + self.averaging_constant * (new_range\n [key][0] - value[0])\n max_value = value[1] + self.averaging_constant * (new_range\n [key][1] - value[1])\n else:\n min_value = min(value[0], new_range[key][0])\n max_value = max(value[1], new_range[key][1])\n new_range[key] = min_value, max_value\n return new_range\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }\n \"\"\"\n if len(self.intermediate_outputs) == 0:\n return self.calibrate_tensors_range\n output_names = [self.infer_session.get_outputs()[i].name for i in\n range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [dict(zip(output_names, intermediate_output)) for\n intermediate_output in self.intermediate_outputs]\n merged_output_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_output_dict.setdefault(k, []).append(v)\n added_output_names = output_names[self.num_model_outputs:]\n calibrate_tensor_names = [added_output_names[i].rpartition('_')[0] for\n i in range(0, len(added_output_names), 2)]\n merged_added_output_dict = {i: merged_output_dict[i] for i in\n merged_output_dict if i not in self.model_original_outputs}\n pairs = []\n for i in range(0, len(added_output_names), 2):\n min_value = 0\n max_value = 0\n if self.moving_average:\n min_value_array = np.mean(merged_added_output_dict[\n added_output_names[i]], axis=0)\n max_value_array = np.mean(merged_added_output_dict[\n added_output_names[i + 1]], axis=0)\n else:\n min_value_array = min(merged_added_output_dict[\n added_output_names[i]])\n max_value_array = max(merged_added_output_dict[\n added_output_names[i + 1]])\n if type(min_value_array) == int or min_value_array.size > 0:\n min_value = float(min_value_array)\n if type(max_value_array) == int or max_value_array.size > 0:\n max_value = float(max_value_array)\n if self.symmetric:\n max_absolute_value = max(abs(min_value), abs(max_value))\n pairs.append(tuple([-max_absolute_value, max_absolute_value]))\n else:\n pairs.append(tuple([min_value, max_value]))\n new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax,\n dict(zip(calibrate_tensor_names, pairs)))\n if self.calibrate_tensors_range:\n self.calibrate_tensors_range = self.merge_range(self.\n calibrate_tensors_range, new_calibrate_tensors_range)\n else:\n self.calibrate_tensors_range = new_calibrate_tensors_range\n return self.calibrate_tensors_range\n\n\nclass HistogramCalibrater(CalibraterBase):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'percentile', symmetric=False, num_bins=128, num_quantized_bins=\n 2048, percentile=99.999, scenario='same'):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path.\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n :param scenario: see :class:`DistributionCalibrater`\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate=\n op_types_to_calibrate, augmented_model_path=\n augmented_model_path, symmetric=symmetric,\n use_external_data_format=use_external_data_format)\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model\n .graph.output}\n self.collector = None\n self.method = method\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.tensors_to_calibrate = None\n self.scenario = scenario\n\n def augment_graph(self):\n \"\"\"\n make all quantization_candidates op type nodes as part of the graph output.\n :return: augmented ONNX model\n \"\"\"\n self.tensors_to_calibrate, value_infos = (self.\n select_tensors_to_calibrate(self.model))\n for tensor in self.tensors_to_calibrate:\n if tensor not in self.model_original_outputs:\n self.model.graph.output.append(value_infos[tensor])\n onnx.save(self.model, self.augmented_model_path,\n save_as_external_data=self.use_external_data_format)\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.\n \"\"\"\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None,\n inputs))\n if len(self.intermediate_outputs) == 0:\n raise ValueError('No data is collected.')\n output_names = [self.infer_session.get_outputs()[i].name for i in\n range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [dict(zip(output_names, intermediate_output)) for\n intermediate_output in self.intermediate_outputs]\n merged_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_dict.setdefault(k, []).append(v)\n clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in\n self.tensors_to_calibrate}\n if not self.collector:\n self.collector = HistogramCollector(method=self.method,\n symmetric=self.symmetric, num_bins=self.num_bins,\n num_quantized_bins=self.num_quantized_bins, percentile=self\n .percentile, scenario=self.scenario)\n self.collector.collect(clean_merged_dict)\n self.clear_collected_data()\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {tensor name: (min value, max value)}\n \"\"\"\n if not self.collector:\n raise ValueError(\n \"No collector created and can't generate calibration data.\")\n if isinstance(self, EntropyCalibrater):\n cal = CalibrationMethod.Entropy\n elif isinstance(self, PercentileCalibrater):\n cal = CalibrationMethod.Percentile\n elif isinstance(self, DistributionCalibrater):\n cal = CalibrationMethod.Distribution\n else:\n raise TypeError(\n f'Unknown calibrater {type(self)}. This method must be overwritten.'\n )\n return TensorsData(cal, self.collector.compute_collection_result())\n\n\nclass EntropyCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'entropy', symmetric=False, num_bins=128, num_quantized_bins=128):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n symmetric=symmetric, num_bins=num_bins, num_quantized_bins=\n num_quantized_bins)\n\n\nclass PercentileCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'percentile', symmetric=False, num_bins=2048, percentile=99.999):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n symmetric=symmetric, num_bins=num_bins, percentile=percentile)\n\n\nclass DistributionCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'distribution', num_bins=128, scenario='same'):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param scenario: for float 8 only, if `scenario=\"same\"`,\n the algorithm weights and float 8 follow the same distribution,\n if `scenario=\"p3\"`, it assumes the weights follow\n a gaussian law and float 8 ~ X^3 where X is a gaussian law\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n num_bins=num_bins, scenario=scenario)\n\n\nclass CalibrationDataCollector(metaclass=abc.ABCMeta):\n \"\"\"\n Base class for collecting data for calibration-based quantization.\n \"\"\"\n\n @abc.abstractmethod\n def collect(self, name_to_arr):\n \"\"\"\n Generate informative data based on given data.\n name_to_arr : dict\n tensor name to NDArray data\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def compute_collection_result(self):\n \"\"\"\n Get the optimal result among collection data.\n \"\"\"\n raise NotImplementedError\n\n\nclass HistogramCollector(CalibrationDataCollector):\n \"\"\"\n Collecting histogram for each tensor. Percentile and Entropy method are supported.\n\n ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py\n ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/\n pytorch_quantization/calib/histogram.html\n \"\"\"\n\n def __init__(self, method, symmetric, num_bins, num_quantized_bins,\n percentile, scenario):\n self.histogram_dict = {}\n self.method = method\n self.symmetric = symmetric\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.scenario = scenario\n\n def get_histogram_dict(self):\n return self.histogram_dict\n\n def collect(self, name_to_arr):\n print('Collecting tensor data and making histogram ...')\n if self.method in {'distribution', 'entropy'}:\n return self.collect_value(name_to_arr)\n elif self.method == 'percentile':\n if self.symmetric:\n return self.collect_absolute_value(name_to_arr)\n else:\n return self.collect_value(name_to_arr)\n else:\n raise ValueError(\n \"Only 'entropy', 'percentile' or 'distribution' methods are supported\"\n )\n\n def collect_absolute_value(self, name_to_arr):\n \"\"\"\n Collect histogram on absolute value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr)\n data_arr = data_arr.flatten()\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n data_arr = np.absolute(data_arr)\n if tensor not in self.histogram_dict:\n hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)\n self.histogram_dict[tensor\n ] = hist, hist_edges, min_value, max_value\n else:\n old_histogram = self.histogram_dict[tensor]\n old_min = old_histogram[2]\n old_max = old_histogram[3]\n old_hist = old_histogram[0]\n old_hist_edges = old_histogram[1]\n temp_amax = np.max(data_arr)\n if temp_amax > old_hist_edges[-1]:\n width = old_hist_edges[1] - old_hist_edges[0]\n new_bin_edges = np.arange(old_hist_edges[-1] + width, \n temp_amax + width, width)\n old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))\n hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)\n hist[:len(old_hist)] += old_hist\n self.histogram_dict[tensor] = hist, hist_edges, min(old_min,\n min_value), max(old_max, max_value)\n\n def collect_value(self, name_to_arr):\n \"\"\"\n Collect histogram on real value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr)\n data_arr = data_arr.flatten()\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n threshold = max(abs(min_value), abs(max_value))\n if tensor in self.histogram_dict:\n old_histogram = self.histogram_dict[tensor]\n self.histogram_dict[tensor] = self.merge_histogram(\n old_histogram, data_arr, min_value, max_value, threshold)\n else:\n hist, hist_edges = np.histogram(data_arr, self.num_bins,\n range=(-threshold, threshold))\n self.histogram_dict[tensor\n ] = hist, hist_edges, min_value, max_value, threshold\n\n def merge_histogram(self, old_histogram, data_arr, new_min, new_max,\n new_threshold):\n old_hist, old_hist_edges, old_min, old_max, old_threshold = (\n old_histogram)\n if new_threshold <= old_threshold:\n new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-\n old_threshold, old_threshold))\n return new_hist + old_hist, old_hist_edges, min(old_min, new_min\n ), max(old_max, new_max), old_threshold\n else:\n if old_threshold == 0:\n hist, hist_edges = np.histogram(data_arr, len(old_hist),\n range=(-new_threshold, new_threshold))\n hist += old_hist\n else:\n old_num_bins = len(old_hist)\n old_stride = 2 * old_threshold / old_num_bins\n half_increased_bins = int((new_threshold - old_threshold) //\n old_stride + 1)\n new_num_bins = old_num_bins + 2 * half_increased_bins\n new_threshold = (half_increased_bins * old_stride +\n old_threshold)\n hist, hist_edges = np.histogram(data_arr, new_num_bins,\n range=(-new_threshold, new_threshold))\n hist[half_increased_bins:new_num_bins - half_increased_bins\n ] += old_hist\n return hist, hist_edges, min(old_min, new_min), max(old_max,\n new_max), new_threshold\n\n def compute_collection_result(self):\n if not self.histogram_dict or len(self.histogram_dict) == 0:\n raise ValueError(\n 'Histogram has not been collected. Please run collect() first.'\n )\n print(\n f'Finding optimal threshold for each tensor using {self.method} algorithm ...'\n )\n if self.method == 'entropy':\n return self.compute_entropy()\n elif self.method == 'percentile':\n return self.compute_percentile()\n elif self.method == 'distribution':\n return self.compute_distribution()\n else:\n raise ValueError(\n \"Only 'entropy', 'percentile' or 'distribution' methods are supported\"\n )\n\n def compute_percentile(self):\n if self.percentile < 0 or self.percentile > 100:\n raise ValueError(\n 'Invalid percentile. Must be in range 0 <= percentile <= 100.')\n histogram_dict = self.histogram_dict\n percentile = self.percentile\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(f'Number of histogram bins : {self.num_bins}')\n print(f'Percentile : ({100.0 - percentile},{percentile})')\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n total = hist.sum()\n cdf = np.cumsum(hist / total)\n if self.symmetric:\n idx_right = np.searchsorted(cdf, percentile / 100.0)\n thresholds_dict[tensor] = -float(hist_edges[idx_right]), float(\n hist_edges[idx_right])\n else:\n percent_to_cut_one_side = (100.0 - percentile) / 200.0\n idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)\n idx_left = np.searchsorted(cdf, percent_to_cut_one_side)\n thresholds_dict[tensor] = float(hist_edges[idx_left]), float(\n hist_edges[idx_right])\n min_value = histogram[2]\n max_value = histogram[3]\n if thresholds_dict[tensor][0] < min_value:\n thresholds_dict[tensor] = min_value, thresholds_dict[tensor][1]\n if thresholds_dict[tensor][1] > max_value:\n thresholds_dict[tensor] = thresholds_dict[tensor][0], max_value\n thresholds_dict[tensor] = *thresholds_dict[tensor], *hist[:2]\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(hist, hist_edges)\n return thresholds_dict\n\n def compute_entropy(self):\n histogram_dict = self.histogram_dict\n num_quantized_bins = self.num_quantized_bins\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(\n 'Number of histogram bins : {} (The number may increase depends on the data it collects)'\n .format(self.num_bins))\n print(f'Number of quantized bins : {self.num_quantized_bins}')\n for tensor, histogram in histogram_dict.items():\n optimal_threshold = self.get_entropy_threshold(histogram,\n num_quantized_bins)\n thresholds_dict[tensor] = optimal_threshold\n thresholds_dict[tensor] = *optimal_threshold, *histogram[:2]\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(histogram[0], histogram[1])\n return thresholds_dict\n\n @staticmethod\n def _avg_std(hist, hist_edges, power=1):\n if power <= 0:\n raise ValueError(f'power={power} <= 0 is invalid.')\n values = (hist_edges[:-1] + hist_edges[1:]) * 0.5\n if power == 1:\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5\n return avg, std\n if int(power) == power and int(power) % 2 == 1:\n avg = (hist * values ** power).sum() / hist.sum()\n std = ((hist * (values ** power - avg) ** 2).sum() / hist.sum()\n ) ** 0.5\n return avg, std\n fact = np.abs(values) / values\n fact[np.isnan(fact)] = 1\n fact[np.isinf(fact)] = 1\n values = np.abs(values) ** power * fact\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5\n return avg, std\n\n def compute_distribution(self):\n if self.num_bins < 512:\n raise ValueError(\n 'Invalid num_bins. Must be in range 512 <= num_bins.')\n histogram_dict = self.histogram_dict\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(f'Number of histogram bins : {self.num_bins}')\n print(f'Scenario : {self.scenario!r})')\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n if self.scenario == 'same':\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)\n elif self.scenario == 'p3':\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=\n 1.0 / 3.0)\n else:\n raise ValueError(\"Invalid scenario. Must be in {'same', 'p3'}.\"\n )\n thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef,\n hist=hist, hist_edges=hist_edges)\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(hist, hist_edges)\n return thresholds_dict\n\n def get_entropy_threshold(self, histogram, num_quantized_bins):\n \"\"\"Given a dataset, find the optimal threshold for quantizing it.\n The reference distribution is `q`, and the candidate distribution is `p`.\n `q` is a truncated version of the original distribution.\n Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf\n \"\"\"\n import copy\n from scipy.stats import entropy\n hist = histogram[0]\n hist_edges = histogram[1]\n num_bins = hist.size\n zero_bin_index = num_bins // 2\n num_half_quantized_bin = num_quantized_bins // 2\n kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)\n thresholds = [(0, 0) for i in range(kl_divergence.size)]\n for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):\n start_index = zero_bin_index - i\n end_index = (zero_bin_index + i + 1 if zero_bin_index + i + 1 <=\n num_bins else num_bins)\n thresholds[i - num_half_quantized_bin] = float(hist_edges[\n start_index]), float(hist_edges[end_index])\n sliced_distribution = copy.deepcopy(hist[start_index:end_index])\n p = sliced_distribution.copy()\n left_outliers_count = sum(hist[:start_index])\n right_outliers_count = sum(hist[end_index:])\n p[0] += left_outliers_count\n p[-1] += right_outliers_count\n nonzeros = (p != 0).astype(np.int64)\n quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)\n num_merged_bins = sliced_distribution.size // num_quantized_bins\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n quantized_bins[index] = sum(sliced_distribution[start:end])\n quantized_bins[-1] += sum(sliced_distribution[\n num_quantized_bins * num_merged_bins:])\n q = np.zeros(p.size, dtype=np.int64)\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n norm = sum(nonzeros[start:end])\n if norm != 0:\n q[start:end] = float(quantized_bins[index]) / float(norm)\n p = smooth_distribution(p)\n q = smooth_distribution(q)\n if isinstance(q, np.ndarray):\n kl_divergence[i - num_half_quantized_bin] = entropy(p, q)\n else:\n kl_divergence[i - num_half_quantized_bin] = float('inf')\n min_kl_divergence_idx = np.argmin(kl_divergence)\n optimal_threshold = thresholds[min_kl_divergence_idx]\n min_value = histogram[2]\n max_value = histogram[3]\n if optimal_threshold[0] < min_value:\n optimal_threshold = min_value, optimal_threshold[1]\n if optimal_threshold[1] > max_value:\n optimal_threshold = optimal_threshold[0], max_value\n return optimal_threshold\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python\n# -------------------------------------------------------------------------\n# Copyright (c) Microsoft, Intel Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\nimport abc\nimport itertools\nimport os\nimport uuid\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Dict, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport onnx\nfrom onnx import ModelProto, TensorProto, helper, numpy_helper\n\nimport onnxruntime\n\nfrom .quant_utils import apply_plot, load_model_with_shape_infer, smooth_distribution\n\n\nclass TensorData:\n _allowed = frozenset([\"avg\", \"std\", \"lowest\", \"highest\", \"hist\", \"hist_edges\"])\n\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n if k not in TensorData._allowed:\n raise ValueError(f\"Unexpected value {k!r} not in {TensorData._allowed}.\")\n setattr(self, k, v)\n\n @property\n def range_value(self):\n if not hasattr(self, \"lowest\") or not hasattr(self, \"highest\"):\n raise AttributeError(f\"Attributes 'lowest' and/or 'highest' missing in {dir(self)}.\")\n return (self.lowest, self.highest)\n\n @property\n def avg_std(self):\n if not hasattr(self, \"avg\") or not hasattr(self, \"std\"):\n raise AttributeError(f\"Attributes 'avg' and/or 'std' missing in {dir(self)}.\")\n return (self.avg, self.std)\n\n\nclass TensorsData:\n def __init__(self, calibration_method, data: Dict[str, Union[TensorData, Tuple]]):\n self.calibration_method = calibration_method\n self.data = {}\n for k, v in data.items():\n if not isinstance(k, str):\n raise TypeError(f\"Keys must be strings not {type(k)}.\")\n if isinstance(v, tuple):\n if calibration_method == CalibrationMethod.MinMax and len(v) == 2:\n self.data[k] = TensorData(lowest=v[0], highest=v[1])\n continue\n if len(v) == 4:\n self.data[k] = TensorData(lowest=v[0], highest=v[1], histogram=v[2], bins=v[3])\n continue\n raise TypeError(f\"Unexpected tuple for {k:r}, it has {len(v)} elements: {v}.\")\n if not isinstance(v, TensorData):\n raise TypeError(f\"Values must be TensorData not {type(v)}.\")\n self.data[k] = v\n\n def __iter__(self):\n yield from self.data\n\n def __contains__(self, key):\n return key in self.data\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __setitem__(self, key, value):\n if key not in self.data:\n raise RuntimeError(f\"Only an existing tensor can be modified, {key!r} is not.\")\n self.data[key] = value\n\n def values(self):\n return self.data.values()\n\n\nclass CalibrationMethod(Enum):\n MinMax = 0\n Entropy = 1\n Percentile = 2\n Distribution = 3\n\n\nclass CalibrationDataReader(metaclass=abc.ABCMeta):\n @classmethod\n def __subclasshook__(cls, subclass):\n return hasattr(subclass, \"get_next\") and callable(subclass.get_next) or NotImplemented\n\n @abc.abstractmethod\n def get_next(self) -> dict:\n \"\"\"generate the input data dict for ONNXinferenceSession run\"\"\"\n raise NotImplementedError\n\n def __iter__(self):\n return self\n\n def __next__(self):\n result = self.get_next()\n if result is None:\n raise StopIteration\n return result\n\n\nclass CalibraterBase:\n def __init__(\n self,\n model_path: Union[str, Path],\n op_types_to_calibrate: Optional[Sequence[str]] = None,\n augmented_model_path=\"augmented_model.onnx\",\n symmetric=False,\n use_external_data_format=False,\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It should be a model file path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n \"\"\"\n if isinstance(model_path, str):\n self.model = load_model_with_shape_infer(Path(model_path))\n elif isinstance(model_path, Path):\n self.model = load_model_with_shape_infer(model_path)\n else:\n raise ValueError(\"model_path should be model path.\")\n\n self.op_types_to_calibrate = op_types_to_calibrate\n self.augmented_model_path = augmented_model_path\n self.symmetric = symmetric\n self.use_external_data_format = use_external_data_format\n\n self.augment_model = None\n self.infer_session = None\n self.execution_providers = [\"CPUExecutionProvider\"]\n\n def set_execution_providers(self, execution_providers=[\"CPUExecutionProvider\"]): # noqa: B006\n \"\"\"\n reset the execution providers to execute the collect_data. It triggers to re-creating inference session.\n \"\"\"\n self.execution_providers = execution_providers\n self.create_inference_session()\n\n def create_inference_session(self):\n \"\"\"\n create an OnnxRuntime InferenceSession.\n \"\"\"\n sess_options = onnxruntime.SessionOptions()\n sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL\n self.infer_session = onnxruntime.InferenceSession(\n self.augmented_model_path,\n sess_options=sess_options,\n providers=self.execution_providers,\n )\n\n def select_tensors_to_calibrate(self, model: ModelProto):\n \"\"\"\n select input/output tensors of candidate nodes to calibrate.\n returns:\n tensors (set): set of tensor name.\n value_infos (dict): tensor name to value info.\n \"\"\"\n value_infos = {vi.name: vi for vi in model.graph.value_info}\n value_infos.update({ot.name: ot for ot in model.graph.output})\n value_infos.update({it.name: it for it in model.graph.input})\n initializer = {init.name for init in model.graph.initializer}\n\n tensors_to_calibrate = set()\n tensor_type_to_calibrate = {TensorProto.FLOAT}\n\n for node in model.graph.node:\n if not self.op_types_to_calibrate or node.op_type in self.op_types_to_calibrate:\n for tensor_name in itertools.chain(node.input, node.output):\n if tensor_name in value_infos:\n vi = value_infos[tensor_name]\n if (\n vi.type.HasField(\"tensor_type\")\n and (vi.type.tensor_type.elem_type in tensor_type_to_calibrate)\n and (tensor_name not in initializer)\n ):\n tensors_to_calibrate.add(tensor_name)\n\n return tensors_to_calibrate, value_infos\n\n def get_augment_model(self):\n \"\"\"\n return: augmented onnx model. Call after calling augment_graph\n \"\"\"\n return self.model\n\n def augment_graph(self):\n \"\"\"\n abstract method: augment the input model to prepare for collecting data. It will:\n 1. augment the model to be able to collect desired statistics data\n 2. save augmented model to augmented_model_paths\n \"\"\"\n raise NotImplementedError\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n abstract method: collect the tensors that will be used for range computation. It can be called multiple times.\n \"\"\"\n raise NotImplementedError\n\n def compute_data(self) -> TensorsData:\n \"\"\"\n abstract method: compute data based on the calibration method stored in TensorsData\n \"\"\"\n raise NotImplementedError\n\n\nclass MinMaxCalibrater(CalibraterBase):\n def __init__(\n self,\n model_path: Union[str, Path],\n op_types_to_calibrate: Optional[Sequence[str]] = None,\n augmented_model_path=\"augmented_model.onnx\",\n symmetric=False,\n use_external_data_format=False,\n moving_average=False,\n averaging_constant=0.01,\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.\n :param averaging_constant: constant smoothing factor to use when computing the moving average.\n \"\"\"\n super().__init__(\n model_path,\n op_types_to_calibrate=op_types_to_calibrate,\n augmented_model_path=augmented_model_path,\n symmetric=symmetric,\n use_external_data_format=use_external_data_format,\n )\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model.graph.output}\n self.moving_average = moving_average\n if moving_average and (averaging_constant < 0 or averaging_constant > 1):\n raise ValueError(\"Invalid averaging constant, which should not be < 0 or > 1.\")\n self.averaging_constant = averaging_constant\n\n def augment_graph(self):\n \"\"\"\n Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in\n model and ensures their outputs are stored as part of the graph output\n :return: augmented ONNX model\n \"\"\"\n tensors, _ = self.select_tensors_to_calibrate(self.model)\n reshape_shape_name = str(uuid.uuid4())\n reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.int64), reshape_shape_name)\n self.model.graph.initializer.append(reshape_shape)\n\n def add_reduce_min_max(tensor_name, reduce_op_name):\n # When doing ReduceMax/ReduceMin, ORT can't reduce on dim with value of 0 if 'keepdims' is false.\n # To make the code simple, we always let keepdims to be 1.\n keepdims = 1\n\n # Adding ReduceMin/ReduceMax nodes: ReduceMin/ReduceMax -> Reshape-> (output)\n reduce_output = tensor_name + \"_\" + reduce_op_name\n intermediate_output = reduce_output + \"_Reshape\"\n reduce_node = onnx.helper.make_node(\n reduce_op_name, [tensor_name], [intermediate_output], keepdims=keepdims, name=reduce_output\n )\n\n reshape_node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[intermediate_output, reshape_shape_name],\n outputs=[reduce_output],\n name=intermediate_output,\n )\n\n self.model.graph.node.extend([reduce_node, reshape_node])\n self.model.graph.output.append(helper.make_tensor_value_info(reduce_output, TensorProto.FLOAT, [1]))\n\n for tensor in tensors:\n add_reduce_min_max(tensor, \"ReduceMin\")\n add_reduce_min_max(tensor, \"ReduceMax\")\n\n onnx.save(\n self.model,\n self.augmented_model_path,\n save_as_external_data=self.use_external_data_format,\n )\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None, inputs))\n\n if len(self.intermediate_outputs) == 0:\n raise ValueError(\"No data is collected.\")\n\n t = self.compute_data()\n if not isinstance(t, TensorsData):\n raise TypeError(f\"compute_data must return a TensorsData not {type(t)}.\")\n self.clear_collected_data()\n\n def merge_range(self, old_range, new_range):\n if not old_range:\n return new_range\n\n for key, value in old_range.items():\n if self.moving_average:\n min_value = value[0] + self.averaging_constant * (new_range[key][0] - value[0])\n max_value = value[1] + self.averaging_constant * (new_range[key][1] - value[1])\n else:\n min_value = min(value[0], new_range[key][0])\n max_value = max(value[1], new_range[key][1])\n new_range[key] = (min_value, max_value)\n\n return new_range\n\n def compute_data(self) -> TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }\n \"\"\"\n\n if len(self.intermediate_outputs) == 0:\n return self.calibrate_tensors_range\n\n output_names = [self.infer_session.get_outputs()[i].name for i in range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [\n dict(zip(output_names, intermediate_output)) for intermediate_output in self.intermediate_outputs\n ]\n\n merged_output_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_output_dict.setdefault(k, []).append(v)\n added_output_names = output_names[self.num_model_outputs :]\n calibrate_tensor_names = [\n added_output_names[i].rpartition(\"_\")[0] for i in range(0, len(added_output_names), 2)\n ] # output names\n\n merged_added_output_dict = {\n i: merged_output_dict[i] for i in merged_output_dict if i not in self.model_original_outputs\n }\n\n pairs = []\n for i in range(0, len(added_output_names), 2):\n min_value = 0\n max_value = 0\n if self.moving_average:\n min_value_array = np.mean(merged_added_output_dict[added_output_names[i]], axis=0)\n max_value_array = np.mean(merged_added_output_dict[added_output_names[i + 1]], axis=0)\n else:\n min_value_array = min(merged_added_output_dict[added_output_names[i]])\n max_value_array = max(merged_added_output_dict[added_output_names[i + 1]])\n if type(min_value_array) == int or min_value_array.size > 0:\n min_value = float(min_value_array)\n if type(max_value_array) == int or max_value_array.size > 0:\n max_value = float(max_value_array)\n\n if self.symmetric:\n max_absolute_value = max(abs(min_value), abs(max_value))\n pairs.append(tuple([-max_absolute_value, max_absolute_value]))\n else:\n pairs.append(tuple([min_value, max_value]))\n\n new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax, dict(zip(calibrate_tensor_names, pairs)))\n if self.calibrate_tensors_range:\n self.calibrate_tensors_range = self.merge_range(self.calibrate_tensors_range, new_calibrate_tensors_range)\n else:\n self.calibrate_tensors_range = new_calibrate_tensors_range\n\n return self.calibrate_tensors_range\n\n\nclass HistogramCalibrater(CalibraterBase):\n def __init__(\n self,\n model_path: Union[str, Path],\n op_types_to_calibrate: Optional[Sequence[str]] = None,\n augmented_model_path=\"augmented_model.onnx\",\n use_external_data_format=False,\n method=\"percentile\",\n symmetric=False,\n num_bins=128,\n num_quantized_bins=2048,\n percentile=99.999,\n scenario=\"same\",\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path.\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n :param scenario: see :class:`DistributionCalibrater`\n \"\"\"\n super().__init__(\n model_path,\n op_types_to_calibrate=op_types_to_calibrate,\n augmented_model_path=augmented_model_path,\n symmetric=symmetric,\n use_external_data_format=use_external_data_format,\n )\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model.graph.output}\n self.collector = None\n self.method = method\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.tensors_to_calibrate = None\n self.scenario = scenario\n\n def augment_graph(self):\n \"\"\"\n make all quantization_candidates op type nodes as part of the graph output.\n :return: augmented ONNX model\n \"\"\"\n self.tensors_to_calibrate, value_infos = self.select_tensors_to_calibrate(self.model)\n for tensor in self.tensors_to_calibrate:\n if tensor not in self.model_original_outputs:\n self.model.graph.output.append(value_infos[tensor])\n\n onnx.save(\n self.model,\n self.augmented_model_path,\n save_as_external_data=self.use_external_data_format,\n )\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.\n \"\"\"\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None, inputs))\n\n if len(self.intermediate_outputs) == 0:\n raise ValueError(\"No data is collected.\")\n\n output_names = [self.infer_session.get_outputs()[i].name for i in range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [\n dict(zip(output_names, intermediate_output)) for intermediate_output in self.intermediate_outputs\n ]\n\n merged_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_dict.setdefault(k, []).append(v)\n\n clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in self.tensors_to_calibrate}\n\n if not self.collector:\n self.collector = HistogramCollector(\n method=self.method,\n symmetric=self.symmetric,\n num_bins=self.num_bins,\n num_quantized_bins=self.num_quantized_bins,\n percentile=self.percentile,\n scenario=self.scenario,\n )\n self.collector.collect(clean_merged_dict)\n\n self.clear_collected_data()\n\n def compute_data(self) -> TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {tensor name: (min value, max value)}\n \"\"\"\n if not self.collector:\n raise ValueError(\"No collector created and can't generate calibration data.\")\n\n if isinstance(self, EntropyCalibrater):\n cal = CalibrationMethod.Entropy\n elif isinstance(self, PercentileCalibrater):\n cal = CalibrationMethod.Percentile\n elif isinstance(self, DistributionCalibrater):\n cal = CalibrationMethod.Distribution\n else:\n raise TypeError(f\"Unknown calibrater {type(self)}. This method must be overwritten.\")\n return TensorsData(cal, self.collector.compute_collection_result())\n\n\nclass EntropyCalibrater(HistogramCalibrater):\n def __init__(\n self,\n model_path: Union[str, Path],\n op_types_to_calibrate: Optional[Sequence[str]] = None,\n augmented_model_path=\"augmented_model.onnx\",\n use_external_data_format=False,\n method=\"entropy\",\n symmetric=False,\n num_bins=128,\n num_quantized_bins=128,\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n \"\"\"\n super().__init__(\n model_path,\n op_types_to_calibrate,\n augmented_model_path,\n use_external_data_format,\n method=method,\n symmetric=symmetric,\n num_bins=num_bins,\n num_quantized_bins=num_quantized_bins,\n )\n\n\nclass PercentileCalibrater(HistogramCalibrater):\n def __init__(\n self,\n model_path: Union[str, Path],\n op_types_to_calibrate: Optional[Sequence[str]] = None,\n augmented_model_path=\"augmented_model.onnx\",\n use_external_data_format=False,\n method=\"percentile\",\n symmetric=False,\n num_bins=2048,\n percentile=99.999,\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n \"\"\"\n super().__init__(\n model_path,\n op_types_to_calibrate,\n augmented_model_path,\n use_external_data_format,\n method=method,\n symmetric=symmetric,\n num_bins=num_bins,\n percentile=percentile,\n )\n\n\nclass DistributionCalibrater(HistogramCalibrater):\n def __init__(\n self,\n model_path: Union[str, Path],\n op_types_to_calibrate: Optional[Sequence[str]] = None,\n augmented_model_path=\"augmented_model.onnx\",\n use_external_data_format=False,\n method=\"distribution\",\n num_bins=128,\n scenario=\"same\",\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param scenario: for float 8 only, if `scenario=\"same\"`,\n the algorithm weights and float 8 follow the same distribution,\n if `scenario=\"p3\"`, it assumes the weights follow\n a gaussian law and float 8 ~ X^3 where X is a gaussian law\n \"\"\"\n super().__init__(\n model_path,\n op_types_to_calibrate,\n augmented_model_path,\n use_external_data_format,\n method=method,\n num_bins=num_bins,\n scenario=scenario,\n )\n\n\nclass CalibrationDataCollector(metaclass=abc.ABCMeta):\n \"\"\"\n Base class for collecting data for calibration-based quantization.\n \"\"\"\n\n @abc.abstractmethod\n def collect(self, name_to_arr):\n \"\"\"\n Generate informative data based on given data.\n name_to_arr : dict\n tensor name to NDArray data\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def compute_collection_result(self):\n \"\"\"\n Get the optimal result among collection data.\n \"\"\"\n raise NotImplementedError\n\n\nclass HistogramCollector(CalibrationDataCollector):\n \"\"\"\n Collecting histogram for each tensor. Percentile and Entropy method are supported.\n\n ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py\n ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/\n pytorch_quantization/calib/histogram.html\n \"\"\"\n\n def __init__(self, method, symmetric, num_bins, num_quantized_bins, percentile, scenario):\n self.histogram_dict = {}\n self.method = method\n self.symmetric = symmetric\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.scenario = scenario\n\n def get_histogram_dict(self):\n return self.histogram_dict\n\n def collect(self, name_to_arr):\n print(\"Collecting tensor data and making histogram ...\")\n\n # TODO: Currently we have different collect() for entropy and percentile method respectively.\n # Need unified collect in the future.\n if self.method in {\"distribution\", \"entropy\"}:\n return self.collect_value(name_to_arr)\n elif self.method == \"percentile\":\n if self.symmetric:\n return self.collect_absolute_value(name_to_arr)\n else:\n return self.collect_value(name_to_arr)\n else:\n raise ValueError(\"Only 'entropy', 'percentile' or 'distribution' methods are supported\")\n\n def collect_absolute_value(self, name_to_arr):\n \"\"\"\n Collect histogram on absolute value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr) # noqa: PLW2901\n data_arr = data_arr.flatten() # noqa: PLW2901\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n\n data_arr = np.absolute(data_arr) # only consider absolute value # noqa: PLW2901\n\n if tensor not in self.histogram_dict:\n # first time it uses num_bins to compute histogram.\n hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)\n self.histogram_dict[tensor] = (hist, hist_edges, min_value, max_value)\n else:\n old_histogram = self.histogram_dict[tensor]\n old_min = old_histogram[2]\n old_max = old_histogram[3]\n old_hist = old_histogram[0]\n old_hist_edges = old_histogram[1]\n temp_amax = np.max(data_arr)\n if temp_amax > old_hist_edges[-1]:\n # increase the number of bins\n width = old_hist_edges[1] - old_hist_edges[0]\n # NOTE: np.arange may create an extra bin after the one containing temp_amax\n new_bin_edges = np.arange(old_hist_edges[-1] + width, temp_amax + width, width)\n old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))\n hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)\n hist[: len(old_hist)] += old_hist\n self.histogram_dict[tensor] = (hist, hist_edges, min(old_min, min_value), max(old_max, max_value))\n\n def collect_value(self, name_to_arr):\n \"\"\"\n Collect histogram on real value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr) # noqa: PLW2901\n data_arr = data_arr.flatten() # noqa: PLW2901\n\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n\n threshold = max(abs(min_value), abs(max_value))\n\n if tensor in self.histogram_dict:\n old_histogram = self.histogram_dict[tensor]\n self.histogram_dict[tensor] = self.merge_histogram(\n old_histogram, data_arr, min_value, max_value, threshold\n )\n else:\n hist, hist_edges = np.histogram(data_arr, self.num_bins, range=(-threshold, threshold))\n self.histogram_dict[tensor] = (\n hist,\n hist_edges,\n min_value,\n max_value,\n threshold,\n )\n\n def merge_histogram(self, old_histogram, data_arr, new_min, new_max, new_threshold):\n (old_hist, old_hist_edges, old_min, old_max, old_threshold) = old_histogram\n\n if new_threshold <= old_threshold:\n new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-old_threshold, old_threshold))\n return (\n new_hist + old_hist,\n old_hist_edges,\n min(old_min, new_min),\n max(old_max, new_max),\n old_threshold,\n )\n else:\n if old_threshold == 0:\n hist, hist_edges = np.histogram(data_arr, len(old_hist), range=(-new_threshold, new_threshold))\n hist += old_hist\n else:\n old_num_bins = len(old_hist)\n old_stride = 2 * old_threshold / old_num_bins\n half_increased_bins = int((new_threshold - old_threshold) // old_stride + 1)\n new_num_bins = old_num_bins + 2 * half_increased_bins\n new_threshold = half_increased_bins * old_stride + old_threshold\n hist, hist_edges = np.histogram(data_arr, new_num_bins, range=(-new_threshold, new_threshold))\n hist[half_increased_bins : new_num_bins - half_increased_bins] += old_hist\n return (\n hist,\n hist_edges,\n min(old_min, new_min),\n max(old_max, new_max),\n new_threshold,\n )\n\n def compute_collection_result(self):\n if not self.histogram_dict or len(self.histogram_dict) == 0:\n raise ValueError(\"Histogram has not been collected. Please run collect() first.\")\n print(f\"Finding optimal threshold for each tensor using {self.method} algorithm ...\")\n\n if self.method == \"entropy\":\n return self.compute_entropy()\n elif self.method == \"percentile\":\n return self.compute_percentile()\n elif self.method == \"distribution\":\n return self.compute_distribution()\n else:\n raise ValueError(\"Only 'entropy', 'percentile' or 'distribution' methods are supported\")\n\n def compute_percentile(self):\n if self.percentile < 0 or self.percentile > 100:\n raise ValueError(\"Invalid percentile. Must be in range 0 <= percentile <= 100.\")\n\n histogram_dict = self.histogram_dict\n percentile = self.percentile\n\n thresholds_dict = {} # per tensor thresholds\n\n print(f\"Number of tensors : {len(histogram_dict)}\")\n print(f\"Number of histogram bins : {self.num_bins}\")\n print(f\"Percentile : ({100.0 - percentile},{percentile})\")\n\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n total = hist.sum()\n cdf = np.cumsum(hist / total)\n if self.symmetric:\n idx_right = np.searchsorted(cdf, percentile / 100.0)\n\n thresholds_dict[tensor] = (\n -float(hist_edges[idx_right]),\n float(hist_edges[idx_right]),\n )\n else:\n percent_to_cut_one_side = (100.0 - percentile) / 200.0\n idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)\n idx_left = np.searchsorted(cdf, percent_to_cut_one_side)\n thresholds_dict[tensor] = (\n float(hist_edges[idx_left]),\n float(hist_edges[idx_right]),\n )\n min_value = histogram[2]\n max_value = histogram[3]\n if thresholds_dict[tensor][0] < min_value:\n thresholds_dict[tensor] = (min_value, thresholds_dict[tensor][1])\n if thresholds_dict[tensor][1] > max_value:\n thresholds_dict[tensor] = (thresholds_dict[tensor][0], max_value)\n thresholds_dict[tensor] = (*thresholds_dict[tensor], *hist[:2])\n # Plot histogram for debug only\n if os.environ.get(\"QUANTIZATION_DEBUG\", 0) in (1, \"1\"):\n apply_plot(hist, hist_edges)\n\n return thresholds_dict\n\n def compute_entropy(self):\n histogram_dict = self.histogram_dict\n num_quantized_bins = self.num_quantized_bins\n\n thresholds_dict = {} # per tensor thresholds\n\n print(f\"Number of tensors : {len(histogram_dict)}\")\n print(\n \"Number of histogram bins : {} (The number may increase depends on the data it collects)\".format(\n self.num_bins\n )\n )\n print(f\"Number of quantized bins : {self.num_quantized_bins}\")\n\n for tensor, histogram in histogram_dict.items():\n optimal_threshold = self.get_entropy_threshold(histogram, num_quantized_bins)\n thresholds_dict[tensor] = optimal_threshold\n thresholds_dict[tensor] = (*optimal_threshold, *histogram[:2])\n\n # Plot histogram for debug only\n if os.environ.get(\"QUANTIZATION_DEBUG\", 0) in (1, \"1\"):\n apply_plot(histogram[0], histogram[1])\n\n return thresholds_dict\n\n @staticmethod\n def _avg_std(hist, hist_edges, power=1):\n if power <= 0:\n raise ValueError(f\"power={power} <= 0 is invalid.\")\n values = (hist_edges[:-1] + hist_edges[1:]) * 0.5\n if power == 1:\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values**2).sum() / hist.sum() - avg**2) ** 0.5\n return avg, std\n if int(power) == power and int(power) % 2 == 1:\n avg = (hist * values**power).sum() / hist.sum()\n std = ((hist * (values**power - avg) ** 2).sum() / hist.sum()) ** 0.5\n return avg, std\n\n fact = np.abs(values) / values\n fact[np.isnan(fact)] = 1\n fact[np.isinf(fact)] = 1\n values = np.abs(values) ** power * fact\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values**2).sum() / hist.sum() - avg**2) ** 0.5\n return avg, std\n\n def compute_distribution(self):\n if self.num_bins < 512:\n raise ValueError(\"Invalid num_bins. Must be in range 512 <= num_bins.\")\n\n histogram_dict = self.histogram_dict\n thresholds_dict = {} # per tensor thresholds\n\n print(f\"Number of tensors : {len(histogram_dict)}\")\n print(f\"Number of histogram bins : {self.num_bins}\")\n print(f\"Scenario : {self.scenario!r})\")\n\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n\n if self.scenario == \"same\":\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)\n elif self.scenario == \"p3\":\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1.0 / 3.0)\n else:\n raise ValueError(\"Invalid scenario. Must be in {'same', 'p3'}.\")\n thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef, hist=hist, hist_edges=hist_edges)\n\n # Plot histogram for debug only\n if os.environ.get(\"QUANTIZATION_DEBUG\", 0) in (1, \"1\"):\n apply_plot(hist, hist_edges)\n\n return thresholds_dict\n\n def get_entropy_threshold(self, histogram, num_quantized_bins):\n \"\"\"Given a dataset, find the optimal threshold for quantizing it.\n The reference distribution is `q`, and the candidate distribution is `p`.\n `q` is a truncated version of the original distribution.\n Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf\n \"\"\"\n import copy\n\n from scipy.stats import entropy\n\n hist = histogram[0]\n hist_edges = histogram[1]\n num_bins = hist.size\n zero_bin_index = num_bins // 2\n num_half_quantized_bin = num_quantized_bins // 2\n\n kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)\n thresholds = [(0, 0) for i in range(kl_divergence.size)]\n\n # <------------ num bins ---------------->\n # <--- quantized bins ---->\n # |======|===========|===========|=======|\n # zero bin index\n # ^ ^\n # | |\n # start index end index (start of iteration)\n # ^ ^\n # | |\n # start index end index ...\n # ^ ^\n # | |\n # start index end index (end of iteration)\n\n for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):\n start_index = zero_bin_index - i\n end_index = zero_bin_index + i + 1 if (zero_bin_index + i + 1) <= num_bins else num_bins\n\n thresholds[i - num_half_quantized_bin] = (\n float(hist_edges[start_index]),\n float(hist_edges[end_index]),\n )\n\n sliced_distribution = copy.deepcopy(hist[start_index:end_index])\n\n # reference distribution p\n p = sliced_distribution.copy() # a copy of np array\n left_outliers_count = sum(hist[:start_index])\n right_outliers_count = sum(hist[end_index:])\n p[0] += left_outliers_count\n p[-1] += right_outliers_count\n\n # nonzeros[i] incidates whether p[i] is non-zero\n nonzeros = (p != 0).astype(np.int64)\n\n # quantize p.size bins into quantized bins (default 128 bins)\n quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)\n num_merged_bins = sliced_distribution.size // num_quantized_bins\n\n # merge bins into quantized bins\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n quantized_bins[index] = sum(sliced_distribution[start:end])\n quantized_bins[-1] += sum(sliced_distribution[num_quantized_bins * num_merged_bins :])\n\n # in order to compare p and q, we need to make length of q equals to length of p\n # expand quantized bins into p.size bins\n q = np.zeros(p.size, dtype=np.int64)\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n\n norm = sum(nonzeros[start:end])\n if norm != 0:\n q[start:end] = float(quantized_bins[index]) / float(norm)\n\n p = smooth_distribution(p)\n q = smooth_distribution(q)\n\n if isinstance(q, np.ndarray):\n kl_divergence[i - num_half_quantized_bin] = entropy(p, q)\n else:\n kl_divergence[i - num_half_quantized_bin] = float(\"inf\")\n\n min_kl_divergence_idx = np.argmin(kl_divergence)\n optimal_threshold = thresholds[min_kl_divergence_idx]\n min_value = histogram[2]\n max_value = histogram[3]\n if optimal_threshold[0] < min_value:\n optimal_threshold = (min_value, optimal_threshold[1])\n if optimal_threshold[1] > max_value:\n optimal_threshold = (optimal_threshold[0], max_value)\n return optimal_threshold\n\n\ndef create_calibrator(\n model: Union[str, Path],\n op_types_to_calibrate: Optional[Sequence[str]] = None,\n augmented_model_path=\"augmented_model.onnx\",\n calibrate_method=CalibrationMethod.MinMax,\n use_external_data_format=False,\n extra_options={}, # noqa: B006\n):\n calibrator = None\n if calibrate_method == CalibrationMethod.MinMax:\n # default settings for min-max algorithm\n symmetric = False if \"symmetric\" not in extra_options else extra_options[\"symmetric\"]\n moving_average = False if \"moving_average\" not in extra_options else extra_options[\"moving_average\"]\n averaging_constant = 0.01 if \"averaging_constant\" not in extra_options else extra_options[\"averaging_constant\"]\n calibrator = MinMaxCalibrater(\n model,\n op_types_to_calibrate,\n augmented_model_path,\n use_external_data_format=use_external_data_format,\n symmetric=symmetric,\n moving_average=moving_average,\n averaging_constant=averaging_constant,\n )\n elif calibrate_method == CalibrationMethod.Entropy:\n # default settings for entropy algorithm\n num_bins = 128 if \"num_bins\" not in extra_options else extra_options[\"num_bins\"]\n num_quantized_bins = 128 if \"num_quantized_bins\" not in extra_options else extra_options[\"num_quantized_bins\"]\n symmetric = False if \"symmetric\" not in extra_options else extra_options[\"symmetric\"]\n calibrator = EntropyCalibrater(\n model,\n op_types_to_calibrate,\n augmented_model_path,\n use_external_data_format=use_external_data_format,\n symmetric=symmetric,\n num_bins=num_bins,\n num_quantized_bins=num_quantized_bins,\n )\n elif calibrate_method == CalibrationMethod.Percentile:\n # default settings for percentile algorithm\n num_bins = 2048 if \"num_bins\" not in extra_options else extra_options[\"num_bins\"]\n percentile = 99.999 if \"percentile\" not in extra_options else extra_options[\"percentile\"]\n symmetric = True if \"symmetric\" not in extra_options else extra_options[\"symmetric\"]\n calibrator = PercentileCalibrater(\n model,\n op_types_to_calibrate,\n augmented_model_path,\n use_external_data_format=use_external_data_format,\n symmetric=symmetric,\n num_bins=num_bins,\n percentile=percentile,\n )\n\n elif calibrate_method == CalibrationMethod.Distribution:\n # default settings for percentile algorithm\n num_bins = 2048 if \"num_bins\" not in extra_options else extra_options[\"num_bins\"]\n scenario = \"same\" if \"scenario\" not in extra_options else extra_options[\"scenario\"]\n\n calibrator = DistributionCalibrater(\n model,\n op_types_to_calibrate,\n augmented_model_path,\n use_external_data_format=use_external_data_format,\n num_bins=num_bins,\n scenario=scenario,\n )\n\n if calibrator:\n calibrator.augment_graph()\n calibrator.create_inference_session()\n return calibrator\n\n raise ValueError(f\"Unsupported calibration method {calibrate_method}\")\n",
"step-ids": [
46,
56,
59,
60,
68
]
}
|
[
46,
56,
59,
60,
68
] |
from .parse_categories import extract_categories
from .parse_sections import extract_sections
from .utils import remove_xml_comments
def parse_page(page):
if 'redirect' in page.keys():
return
page_text = page['revision']['text']['#text']
page_text = remove_xml_comments(page_text)
title = page['title']
categories = extract_categories(page_text)
try:
sections = extract_sections(page_text)
except:
return title, 'Can not parse', None, None
return title, sections, categories
|
normal
|
{
"blob_id": "0ad2e6d7e3fd61943fc1dfe6662110a6f48c1bd5",
"index": 5347,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_page(page):\n if 'redirect' in page.keys():\n return\n page_text = page['revision']['text']['#text']\n page_text = remove_xml_comments(page_text)\n title = page['title']\n categories = extract_categories(page_text)\n try:\n sections = extract_sections(page_text)\n except:\n return title, 'Can not parse', None, None\n return title, sections, categories\n",
"step-3": "from .parse_categories import extract_categories\nfrom .parse_sections import extract_sections\nfrom .utils import remove_xml_comments\n\n\ndef parse_page(page):\n if 'redirect' in page.keys():\n return\n page_text = page['revision']['text']['#text']\n page_text = remove_xml_comments(page_text)\n title = page['title']\n categories = extract_categories(page_text)\n try:\n sections = extract_sections(page_text)\n except:\n return title, 'Can not parse', None, None\n return title, sections, categories\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from features.steps.web.test_home_page import *
from features.steps.mobile.test_home_page import *
from features.steps.web.test_login_page import *
|
normal
|
{
"blob_id": "b09d0806dfc6f4badfd9f2ac9c3f6d17d3df8e8c",
"index": 3254,
"step-1": "<mask token>\n",
"step-2": "from features.steps.web.test_home_page import *\nfrom features.steps.mobile.test_home_page import *\nfrom features.steps.web.test_login_page import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
## This file is the celeryconfig for the Task Worker (scanworker).
from scanworker.commonconfig import *
import sys
sys.path.append('.')
BROKER_CONF = {
'uid' : '{{ mq_user }}',
'pass' : '{{ mq_password }}',
'host' : '{{ mq_host }}',
'port' : '5672',
'vhost' : '{{ mq_vhost }}',
}
BROKER_URL = 'amqp://'+BROKER_CONF['uid']+':'+BROKER_CONF['pass']+'@'+BROKER_CONF['host']+':'+BROKER_CONF['port']+'/'+BROKER_CONF['vhost']
BROKER_HEARTBEAT=True
CELERY_IMPORTS = ('scanworker.tasks',)
from scanworker.tasks import VALID_SCANNERS as vs
VALID_SCANNERS=vs()
CELERY_QUEUES = VALID_SCANNERS.celery_virus_scan_queues()
CELERY_ROUTES = VALID_SCANNERS.celery_virus_scan_routes()
|
normal
|
{
"blob_id": "1a569b88c350124968212cb910bef7b09b166152",
"index": 8990,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append('.')\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('.')\nBROKER_CONF = {'uid': '{{ mq_user }}', 'pass': '{{ mq_password }}', 'host':\n '{{ mq_host }}', 'port': '5672', 'vhost': '{{ mq_vhost }}'}\nBROKER_URL = 'amqp://' + BROKER_CONF['uid'] + ':' + BROKER_CONF['pass'\n ] + '@' + BROKER_CONF['host'] + ':' + BROKER_CONF['port'\n ] + '/' + BROKER_CONF['vhost']\nBROKER_HEARTBEAT = True\nCELERY_IMPORTS = 'scanworker.tasks',\n<mask token>\nVALID_SCANNERS = vs()\nCELERY_QUEUES = VALID_SCANNERS.celery_virus_scan_queues()\nCELERY_ROUTES = VALID_SCANNERS.celery_virus_scan_routes()\n",
"step-4": "from scanworker.commonconfig import *\nimport sys\nsys.path.append('.')\nBROKER_CONF = {'uid': '{{ mq_user }}', 'pass': '{{ mq_password }}', 'host':\n '{{ mq_host }}', 'port': '5672', 'vhost': '{{ mq_vhost }}'}\nBROKER_URL = 'amqp://' + BROKER_CONF['uid'] + ':' + BROKER_CONF['pass'\n ] + '@' + BROKER_CONF['host'] + ':' + BROKER_CONF['port'\n ] + '/' + BROKER_CONF['vhost']\nBROKER_HEARTBEAT = True\nCELERY_IMPORTS = 'scanworker.tasks',\nfrom scanworker.tasks import VALID_SCANNERS as vs\nVALID_SCANNERS = vs()\nCELERY_QUEUES = VALID_SCANNERS.celery_virus_scan_queues()\nCELERY_ROUTES = VALID_SCANNERS.celery_virus_scan_routes()\n",
"step-5": "\n## This file is the celeryconfig for the Task Worker (scanworker).\nfrom scanworker.commonconfig import *\nimport sys\nsys.path.append('.')\n\n\nBROKER_CONF = {\n 'uid' \t: '{{ mq_user }}',\n 'pass' \t: '{{ mq_password }}',\n 'host' \t: '{{ mq_host }}',\n 'port' \t: '5672',\n 'vhost' \t: '{{ mq_vhost }}',\n}\nBROKER_URL = 'amqp://'+BROKER_CONF['uid']+':'+BROKER_CONF['pass']+'@'+BROKER_CONF['host']+':'+BROKER_CONF['port']+'/'+BROKER_CONF['vhost']\n\nBROKER_HEARTBEAT=True\nCELERY_IMPORTS = ('scanworker.tasks',)\nfrom scanworker.tasks import VALID_SCANNERS as vs\nVALID_SCANNERS=vs()\nCELERY_QUEUES = VALID_SCANNERS.celery_virus_scan_queues()\nCELERY_ROUTES = VALID_SCANNERS.celery_virus_scan_routes()\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Rational:
def __init__(self, numer, denom):
self.numer = numer
self.denom = denom
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __mul__(self, other):
return Rational(self.numer * other.numer, self.denom * other.denom)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Rational:
def __init__(self, numer, denom):
self.numer = numer
self.denom = denom
<|reserved_special_token_0|>
def __sub__(self, other):
return Rational(self.numer * other.denom - other.numer * self.denom,
self.denom * other.denom)
def __mul__(self, other):
return Rational(self.numer * other.numer, self.denom * other.denom)
def __truediv__(self, other):
return Rational(self.numer * other.denom, self.denom * other.numer)
<|reserved_special_token_0|>
def __repr__(self):
return 'Rational({numer}/{denom})'.format(numer=self.numer, denom=
self.denom)
<|reserved_special_token_1|>
class Rational:
def __init__(self, numer, denom):
self.numer = numer
self.denom = denom
<|reserved_special_token_0|>
def __sub__(self, other):
return Rational(self.numer * other.denom - other.numer * self.denom,
self.denom * other.denom)
def __mul__(self, other):
return Rational(self.numer * other.numer, self.denom * other.denom)
def __truediv__(self, other):
return Rational(self.numer * other.denom, self.denom * other.numer)
def __str__(self):
return '{numer}/{denom}'.format(numer=self.numer, denom=self.denom)
def __repr__(self):
return 'Rational({numer}/{denom})'.format(numer=self.numer, denom=
self.denom)
<|reserved_special_token_1|>
class Rational:
def __init__(self, numer, denom):
self.numer = numer
self.denom = denom
def __add__(self, other):
return Rational(self.numer * other.denom + other.numer * self.denom,
self.denom * other.denom)
def __sub__(self, other):
return Rational(self.numer * other.denom - other.numer * self.denom,
self.denom * other.denom)
def __mul__(self, other):
return Rational(self.numer * other.numer, self.denom * other.denom)
def __truediv__(self, other):
return Rational(self.numer * other.denom, self.denom * other.numer)
def __str__(self):
return '{numer}/{denom}'.format(numer=self.numer, denom=self.denom)
def __repr__(self):
return 'Rational({numer}/{denom})'.format(numer=self.numer, denom=
self.denom)
<|reserved_special_token_1|>
# using python3
class Rational:
def __init__(self, numer, denom):
self.numer = numer
self.denom = denom
def __add__(self, other):
return Rational(
self.numer * other.denom + other.numer * self.denom,
self.denom * other.denom
)
def __sub__(self, other):
return Rational(
self.numer * other.denom - other.numer * self.denom,
self.denom * other.denom
)
def __mul__(self, other):
return Rational(
self.numer * other.numer,
self.denom * other.denom
)
def __truediv__(self, other):
return Rational(
self.numer * other.denom,
self.denom * other.numer
)
def __str__(self):
return "{numer}/{denom}".format(
numer=self.numer, denom=self.denom
)
def __repr__(self):
return "Rational({numer}/{denom})".format(
numer=self.numer, denom=self.denom
)
|
flexible
|
{
"blob_id": "8098b9c27689dd4168ef05c03d4ec00f67f8090e",
"index": 4771,
"step-1": "class Rational:\n\n def __init__(self, numer, denom):\n self.numer = numer\n self.denom = denom\n <mask token>\n <mask token>\n\n def __mul__(self, other):\n return Rational(self.numer * other.numer, self.denom * other.denom)\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class Rational:\n\n def __init__(self, numer, denom):\n self.numer = numer\n self.denom = denom\n <mask token>\n\n def __sub__(self, other):\n return Rational(self.numer * other.denom - other.numer * self.denom,\n self.denom * other.denom)\n\n def __mul__(self, other):\n return Rational(self.numer * other.numer, self.denom * other.denom)\n\n def __truediv__(self, other):\n return Rational(self.numer * other.denom, self.denom * other.numer)\n <mask token>\n\n def __repr__(self):\n return 'Rational({numer}/{denom})'.format(numer=self.numer, denom=\n self.denom)\n",
"step-3": "class Rational:\n\n def __init__(self, numer, denom):\n self.numer = numer\n self.denom = denom\n <mask token>\n\n def __sub__(self, other):\n return Rational(self.numer * other.denom - other.numer * self.denom,\n self.denom * other.denom)\n\n def __mul__(self, other):\n return Rational(self.numer * other.numer, self.denom * other.denom)\n\n def __truediv__(self, other):\n return Rational(self.numer * other.denom, self.denom * other.numer)\n\n def __str__(self):\n return '{numer}/{denom}'.format(numer=self.numer, denom=self.denom)\n\n def __repr__(self):\n return 'Rational({numer}/{denom})'.format(numer=self.numer, denom=\n self.denom)\n",
"step-4": "class Rational:\n\n def __init__(self, numer, denom):\n self.numer = numer\n self.denom = denom\n\n def __add__(self, other):\n return Rational(self.numer * other.denom + other.numer * self.denom,\n self.denom * other.denom)\n\n def __sub__(self, other):\n return Rational(self.numer * other.denom - other.numer * self.denom,\n self.denom * other.denom)\n\n def __mul__(self, other):\n return Rational(self.numer * other.numer, self.denom * other.denom)\n\n def __truediv__(self, other):\n return Rational(self.numer * other.denom, self.denom * other.numer)\n\n def __str__(self):\n return '{numer}/{denom}'.format(numer=self.numer, denom=self.denom)\n\n def __repr__(self):\n return 'Rational({numer}/{denom})'.format(numer=self.numer, denom=\n self.denom)\n",
"step-5": "# using python3\n\n\nclass Rational:\n def __init__(self, numer, denom):\n self.numer = numer\n self.denom = denom\n\n def __add__(self, other):\n return Rational(\n self.numer * other.denom + other.numer * self.denom,\n self.denom * other.denom\n )\n\n def __sub__(self, other):\n return Rational(\n self.numer * other.denom - other.numer * self.denom,\n self.denom * other.denom\n )\n\n def __mul__(self, other):\n return Rational(\n self.numer * other.numer,\n self.denom * other.denom\n )\n\n def __truediv__(self, other):\n return Rational(\n self.numer * other.denom,\n self.denom * other.numer\n )\n\n def __str__(self):\n return \"{numer}/{denom}\".format(\n numer=self.numer, denom=self.denom\n )\n\n def __repr__(self):\n return \"Rational({numer}/{denom})\".format(\n numer=self.numer, denom=self.denom\n )\n\n",
"step-ids": [
3,
6,
7,
8,
9
]
}
|
[
3,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class EPInfoLight(EP):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EPInfoLight(EP):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def getRequestDescriptionWithPayloadParameters(self):
ret = {}
ret['name'] = EPInfoLight.NAME
ret['url'] = EPInfoLight.URL_ROUTE_PAR_PAYLOAD
ret['method'] = EPInfoLight.METHOD
ret['payload-desc'] = [{}, {}]
ret['payload-desc'][0]['attribute'] = EPInfoLight.ATTR_ACTUATOR_ID
ret['payload-desc'][0]['type'] = 'integer'
ret['payload-desc'][0]['value'] = 1
return ret
def executeByParameters(self, actuatorId) ->dict:
payload = {}
payload[EPInfoLight.ATTR_ACTUATOR_ID] = int(actuatorId)
return self.executeByPayload(payload)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EPInfoLight(EP):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def getRequestDescriptionWithPayloadParameters(self):
ret = {}
ret['name'] = EPInfoLight.NAME
ret['url'] = EPInfoLight.URL_ROUTE_PAR_PAYLOAD
ret['method'] = EPInfoLight.METHOD
ret['payload-desc'] = [{}, {}]
ret['payload-desc'][0]['attribute'] = EPInfoLight.ATTR_ACTUATOR_ID
ret['payload-desc'][0]['type'] = 'integer'
ret['payload-desc'][0]['value'] = 1
return ret
def executeByParameters(self, actuatorId) ->dict:
payload = {}
payload[EPInfoLight.ATTR_ACTUATOR_ID] = int(actuatorId)
return self.executeByPayload(payload)
def executeByPayload(self, payload) ->dict:
actuatorId = int(payload[EPInfoLight.ATTR_ACTUATOR_ID])
if actuatorId == self.web_gadget.getLightId():
actualValue = self.web_gadget.fetchSavedLightValue()
logging.debug("WEB request: {0} {1} ('{2}': {3})".format(
EPInfoLight.METHOD, EPInfoLight.URL, EPInfoLight.
ATTR_ACTUATOR_ID, actuatorId))
return {'value': actualValue, 'thread': self.web_gadget.
getThreadControllerStatus()}
else:
raise InvalidAPIUsage('No such actuator: {0} or value: {1}'.
format(actuatorId, value), error_code=404)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EPInfoLight(EP):
NAME = 'info_light'
URL = '/info'
URL_ROUTE_PAR_PAYLOAD = '/'
URL_ROUTE_PAR_URL = '/actuatorId/<actuatorId>'
METHOD = 'GET'
ATTR_ACTUATOR_ID = 'actuatorId'
def __init__(self, web_gadget):
self.web_gadget = web_gadget
def getRequestDescriptionWithPayloadParameters(self):
ret = {}
ret['name'] = EPInfoLight.NAME
ret['url'] = EPInfoLight.URL_ROUTE_PAR_PAYLOAD
ret['method'] = EPInfoLight.METHOD
ret['payload-desc'] = [{}, {}]
ret['payload-desc'][0]['attribute'] = EPInfoLight.ATTR_ACTUATOR_ID
ret['payload-desc'][0]['type'] = 'integer'
ret['payload-desc'][0]['value'] = 1
return ret
def executeByParameters(self, actuatorId) ->dict:
payload = {}
payload[EPInfoLight.ATTR_ACTUATOR_ID] = int(actuatorId)
return self.executeByPayload(payload)
def executeByPayload(self, payload) ->dict:
actuatorId = int(payload[EPInfoLight.ATTR_ACTUATOR_ID])
if actuatorId == self.web_gadget.getLightId():
actualValue = self.web_gadget.fetchSavedLightValue()
logging.debug("WEB request: {0} {1} ('{2}': {3})".format(
EPInfoLight.METHOD, EPInfoLight.URL, EPInfoLight.
ATTR_ACTUATOR_ID, actuatorId))
return {'value': actualValue, 'thread': self.web_gadget.
getThreadControllerStatus()}
else:
raise InvalidAPIUsage('No such actuator: {0} or value: {1}'.
format(actuatorId, value), error_code=404)
<|reserved_special_token_1|>
import logging
from exceptions.invalid_api_usage import InvalidAPIUsage
from wgadget.endpoints.ep import EP
class EPInfoLight(EP):
NAME = 'info_light'
URL = '/info'
URL_ROUTE_PAR_PAYLOAD = '/'
URL_ROUTE_PAR_URL = '/actuatorId/<actuatorId>'
METHOD = 'GET'
ATTR_ACTUATOR_ID = 'actuatorId'
def __init__(self, web_gadget):
self.web_gadget = web_gadget
def getRequestDescriptionWithPayloadParameters(self):
ret = {}
ret['name'] = EPInfoLight.NAME
ret['url'] = EPInfoLight.URL_ROUTE_PAR_PAYLOAD
ret['method'] = EPInfoLight.METHOD
ret['payload-desc'] = [{},{}]
ret['payload-desc'][0]['attribute'] = EPInfoLight.ATTR_ACTUATOR_ID
ret['payload-desc'][0]['type'] = 'integer'
ret['payload-desc'][0]['value'] = 1
return ret
def executeByParameters(self, actuatorId) -> dict:
payload = {}
payload[EPInfoLight.ATTR_ACTUATOR_ID] = int(actuatorId)
return self.executeByPayload(payload)
def executeByPayload(self, payload) -> dict:
actuatorId = int(payload[EPInfoLight.ATTR_ACTUATOR_ID])
if actuatorId == self.web_gadget.getLightId():
actualValue = self.web_gadget.fetchSavedLightValue()
logging.debug( "WEB request: {0} {1} ('{2}': {3})".format(
EPInfoLight.METHOD, EPInfoLight.URL,
EPInfoLight.ATTR_ACTUATOR_ID, actuatorId)
)
return {"value": actualValue, "thread": self.web_gadget.getThreadControllerStatus()}
# return {"value": actualValue, "thread": {"inProgress": False, "id":1}}
else:
raise InvalidAPIUsage("No such actuator: {0} or value: {1}".format(actuatorId, value), error_code=404)
|
flexible
|
{
"blob_id": "e5abab3f718bbbd25dcfc49290383203d53248c3",
"index": 9464,
"step-1": "<mask token>\n\n\nclass EPInfoLight(EP):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass EPInfoLight(EP):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def getRequestDescriptionWithPayloadParameters(self):\n ret = {}\n ret['name'] = EPInfoLight.NAME\n ret['url'] = EPInfoLight.URL_ROUTE_PAR_PAYLOAD\n ret['method'] = EPInfoLight.METHOD\n ret['payload-desc'] = [{}, {}]\n ret['payload-desc'][0]['attribute'] = EPInfoLight.ATTR_ACTUATOR_ID\n ret['payload-desc'][0]['type'] = 'integer'\n ret['payload-desc'][0]['value'] = 1\n return ret\n\n def executeByParameters(self, actuatorId) ->dict:\n payload = {}\n payload[EPInfoLight.ATTR_ACTUATOR_ID] = int(actuatorId)\n return self.executeByPayload(payload)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass EPInfoLight(EP):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def getRequestDescriptionWithPayloadParameters(self):\n ret = {}\n ret['name'] = EPInfoLight.NAME\n ret['url'] = EPInfoLight.URL_ROUTE_PAR_PAYLOAD\n ret['method'] = EPInfoLight.METHOD\n ret['payload-desc'] = [{}, {}]\n ret['payload-desc'][0]['attribute'] = EPInfoLight.ATTR_ACTUATOR_ID\n ret['payload-desc'][0]['type'] = 'integer'\n ret['payload-desc'][0]['value'] = 1\n return ret\n\n def executeByParameters(self, actuatorId) ->dict:\n payload = {}\n payload[EPInfoLight.ATTR_ACTUATOR_ID] = int(actuatorId)\n return self.executeByPayload(payload)\n\n def executeByPayload(self, payload) ->dict:\n actuatorId = int(payload[EPInfoLight.ATTR_ACTUATOR_ID])\n if actuatorId == self.web_gadget.getLightId():\n actualValue = self.web_gadget.fetchSavedLightValue()\n logging.debug(\"WEB request: {0} {1} ('{2}': {3})\".format(\n EPInfoLight.METHOD, EPInfoLight.URL, EPInfoLight.\n ATTR_ACTUATOR_ID, actuatorId))\n return {'value': actualValue, 'thread': self.web_gadget.\n getThreadControllerStatus()}\n else:\n raise InvalidAPIUsage('No such actuator: {0} or value: {1}'.\n format(actuatorId, value), error_code=404)\n",
"step-4": "<mask token>\n\n\nclass EPInfoLight(EP):\n NAME = 'info_light'\n URL = '/info'\n URL_ROUTE_PAR_PAYLOAD = '/'\n URL_ROUTE_PAR_URL = '/actuatorId/<actuatorId>'\n METHOD = 'GET'\n ATTR_ACTUATOR_ID = 'actuatorId'\n\n def __init__(self, web_gadget):\n self.web_gadget = web_gadget\n\n def getRequestDescriptionWithPayloadParameters(self):\n ret = {}\n ret['name'] = EPInfoLight.NAME\n ret['url'] = EPInfoLight.URL_ROUTE_PAR_PAYLOAD\n ret['method'] = EPInfoLight.METHOD\n ret['payload-desc'] = [{}, {}]\n ret['payload-desc'][0]['attribute'] = EPInfoLight.ATTR_ACTUATOR_ID\n ret['payload-desc'][0]['type'] = 'integer'\n ret['payload-desc'][0]['value'] = 1\n return ret\n\n def executeByParameters(self, actuatorId) ->dict:\n payload = {}\n payload[EPInfoLight.ATTR_ACTUATOR_ID] = int(actuatorId)\n return self.executeByPayload(payload)\n\n def executeByPayload(self, payload) ->dict:\n actuatorId = int(payload[EPInfoLight.ATTR_ACTUATOR_ID])\n if actuatorId == self.web_gadget.getLightId():\n actualValue = self.web_gadget.fetchSavedLightValue()\n logging.debug(\"WEB request: {0} {1} ('{2}': {3})\".format(\n EPInfoLight.METHOD, EPInfoLight.URL, EPInfoLight.\n ATTR_ACTUATOR_ID, actuatorId))\n return {'value': actualValue, 'thread': self.web_gadget.\n getThreadControllerStatus()}\n else:\n raise InvalidAPIUsage('No such actuator: {0} or value: {1}'.\n format(actuatorId, value), error_code=404)\n",
"step-5": "\nimport logging\nfrom exceptions.invalid_api_usage import InvalidAPIUsage\nfrom wgadget.endpoints.ep import EP\n\nclass EPInfoLight(EP):\n\n NAME = 'info_light'\n URL = '/info'\n\n URL_ROUTE_PAR_PAYLOAD = '/'\n URL_ROUTE_PAR_URL = '/actuatorId/<actuatorId>'\n\n METHOD = 'GET'\n\n ATTR_ACTUATOR_ID = 'actuatorId'\n\n def __init__(self, web_gadget):\n self.web_gadget = web_gadget\n\n def getRequestDescriptionWithPayloadParameters(self):\n\n ret = {}\n ret['name'] = EPInfoLight.NAME\n ret['url'] = EPInfoLight.URL_ROUTE_PAR_PAYLOAD\n ret['method'] = EPInfoLight.METHOD\n\n ret['payload-desc'] = [{},{}]\n\n ret['payload-desc'][0]['attribute'] = EPInfoLight.ATTR_ACTUATOR_ID\n ret['payload-desc'][0]['type'] = 'integer'\n ret['payload-desc'][0]['value'] = 1\n\n return ret\n\n def executeByParameters(self, actuatorId) -> dict:\n payload = {}\n payload[EPInfoLight.ATTR_ACTUATOR_ID] = int(actuatorId)\n return self.executeByPayload(payload)\n\n def executeByPayload(self, payload) -> dict:\n\n actuatorId = int(payload[EPInfoLight.ATTR_ACTUATOR_ID])\n\n if actuatorId == self.web_gadget.getLightId():\n\n actualValue = self.web_gadget.fetchSavedLightValue()\n\n logging.debug( \"WEB request: {0} {1} ('{2}': {3})\".format(\n EPInfoLight.METHOD, EPInfoLight.URL,\n EPInfoLight.ATTR_ACTUATOR_ID, actuatorId)\n )\n\n return {\"value\": actualValue, \"thread\": self.web_gadget.getThreadControllerStatus()}\n# return {\"value\": actualValue, \"thread\": {\"inProgress\": False, \"id\":1}}\n\n else:\n raise InvalidAPIUsage(\"No such actuator: {0} or value: {1}\".format(actuatorId, value), error_code=404)\n",
"step-ids": [
1,
3,
4,
6,
8
]
}
|
[
1,
3,
4,
6,
8
] |
x = 5
y = x
print(id(x))
print(id(y))
print()
y = 3
print(id(x))
print(id(y))
print()
z = [1, 4, 3, 25]
w = z
print(z)
print(w)
print(id(z))
print(id(w))
print()
w[1] = 10
print(z)
print(w)
print(id(z))
print(id(w))
# So when you assign a mutable, you're actually assigning a reference to the mutable,
# and I have the side effect that when I change an element of that list in one place,
# it gets changed in both places because it's really just one object, and functions work exactly the same way.
|
normal
|
{
"blob_id": "956adc5961188458393b56564649ad0a3a787669",
"index": 7327,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(id(x))\nprint(id(y))\nprint()\n<mask token>\nprint(id(x))\nprint(id(y))\nprint()\n<mask token>\nprint(z)\nprint(w)\nprint(id(z))\nprint(id(w))\nprint()\n<mask token>\nprint(z)\nprint(w)\nprint(id(z))\nprint(id(w))\n",
"step-3": "x = 5\ny = x\nprint(id(x))\nprint(id(y))\nprint()\ny = 3\nprint(id(x))\nprint(id(y))\nprint()\nz = [1, 4, 3, 25]\nw = z\nprint(z)\nprint(w)\nprint(id(z))\nprint(id(w))\nprint()\nw[1] = 10\nprint(z)\nprint(w)\nprint(id(z))\nprint(id(w))\n",
"step-4": "x = 5\ny = x\n\nprint(id(x))\nprint(id(y))\n\nprint()\n\ny = 3\n\nprint(id(x))\nprint(id(y))\n\nprint()\n\nz = [1, 4, 3, 25]\nw = z\n\nprint(z)\nprint(w)\nprint(id(z))\nprint(id(w))\n\nprint()\n\nw[1] = 10\n\nprint(z)\nprint(w)\nprint(id(z))\nprint(id(w))\n\n# So when you assign a mutable, you're actually assigning a reference to the mutable,\n# and I have the side effect that when I change an element of that list in one place,\n# it gets changed in both places because it's really just one object, and functions work exactly the same way.\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import pickle
import preprocessor
import pandas as pd
import sys
from scipy import spatial
class Predict:
def __init__(self, text):
"""
taking the user input string
loading trained feature numpy array
loading the output for the numpy array
loading the vectorizer saved during training
:param text:
"""
self.text = text
self.train_vec = np.load('feat.npy')
self.train_output = pickle.load(open('mylist.pkl', 'rb'))
self.vec = pickle.load(open('vector.pkl', 'rb'))
def process_text(self):
"""
creating an instance of Preprocess class
applying clean_data function on the text
transforming the text to tfidf array
"""
prp1 = preprocessor.Preprocess()
processed_text = prp1.clean_data(self.text)
self.vec1 = self.vec.transform(pd.Series(processed_text))
def compute_cosine_similarity(self):
"""
creating an empty list for storing the cosine values
multiplying the input vector with every row of training vector
appending the cosine value to the list
taking the index of maximum value of the list
using the index to find the attribute from the output_vector
"""
cos_matrix = []
for i in range(len(self.train_vec)):
val = self.vec1 * self.train_vec[i]
cos_matrix.append(val[0])
out = np.argmax(cos_matrix)
print(self.train_output[out])
if __name__ == '__main__':
text = sys.argv[1:]
text = ' '.join(text)
p1 = Predict(text)
p1.process_text()
p1.compute_cosine_similarity()
|
normal
|
{
"blob_id": "26df6ddf3533a8648b59f0fa2b03f89c93af7491",
"index": 8154,
"step-1": "<mask token>\n\n\nclass Predict:\n\n def __init__(self, text):\n \"\"\"\n taking the user input string\n loading trained feature numpy array\n loading the output for the numpy array\n loading the vectorizer saved during training\n\n :param text:\n \"\"\"\n self.text = text\n self.train_vec = np.load('feat.npy')\n self.train_output = pickle.load(open('mylist.pkl', 'rb'))\n self.vec = pickle.load(open('vector.pkl', 'rb'))\n <mask token>\n\n def compute_cosine_similarity(self):\n \"\"\"\n creating an empty list for storing the cosine values\n multiplying the input vector with every row of training vector\n appending the cosine value to the list\n taking the index of maximum value of the list\n using the index to find the attribute from the output_vector\n\n \"\"\"\n cos_matrix = []\n for i in range(len(self.train_vec)):\n val = self.vec1 * self.train_vec[i]\n cos_matrix.append(val[0])\n out = np.argmax(cos_matrix)\n print(self.train_output[out])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Predict:\n\n def __init__(self, text):\n \"\"\"\n taking the user input string\n loading trained feature numpy array\n loading the output for the numpy array\n loading the vectorizer saved during training\n\n :param text:\n \"\"\"\n self.text = text\n self.train_vec = np.load('feat.npy')\n self.train_output = pickle.load(open('mylist.pkl', 'rb'))\n self.vec = pickle.load(open('vector.pkl', 'rb'))\n\n def process_text(self):\n \"\"\"\n creating an instance of Preprocess class\n applying clean_data function on the text\n transforming the text to tfidf array\n\n \"\"\"\n prp1 = preprocessor.Preprocess()\n processed_text = prp1.clean_data(self.text)\n self.vec1 = self.vec.transform(pd.Series(processed_text))\n\n def compute_cosine_similarity(self):\n \"\"\"\n creating an empty list for storing the cosine values\n multiplying the input vector with every row of training vector\n appending the cosine value to the list\n taking the index of maximum value of the list\n using the index to find the attribute from the output_vector\n\n \"\"\"\n cos_matrix = []\n for i in range(len(self.train_vec)):\n val = self.vec1 * self.train_vec[i]\n cos_matrix.append(val[0])\n out = np.argmax(cos_matrix)\n print(self.train_output[out])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Predict:\n\n def __init__(self, text):\n \"\"\"\n taking the user input string\n loading trained feature numpy array\n loading the output for the numpy array\n loading the vectorizer saved during training\n\n :param text:\n \"\"\"\n self.text = text\n self.train_vec = np.load('feat.npy')\n self.train_output = pickle.load(open('mylist.pkl', 'rb'))\n self.vec = pickle.load(open('vector.pkl', 'rb'))\n\n def process_text(self):\n \"\"\"\n creating an instance of Preprocess class\n applying clean_data function on the text\n transforming the text to tfidf array\n\n \"\"\"\n prp1 = preprocessor.Preprocess()\n processed_text = prp1.clean_data(self.text)\n self.vec1 = self.vec.transform(pd.Series(processed_text))\n\n def compute_cosine_similarity(self):\n \"\"\"\n creating an empty list for storing the cosine values\n multiplying the input vector with every row of training vector\n appending the cosine value to the list\n taking the index of maximum value of the list\n using the index to find the attribute from the output_vector\n\n \"\"\"\n cos_matrix = []\n for i in range(len(self.train_vec)):\n val = self.vec1 * self.train_vec[i]\n cos_matrix.append(val[0])\n out = np.argmax(cos_matrix)\n print(self.train_output[out])\n\n\nif __name__ == '__main__':\n text = sys.argv[1:]\n text = ' '.join(text)\n p1 = Predict(text)\n p1.process_text()\n p1.compute_cosine_similarity()\n",
"step-4": "import numpy as np\nimport pickle\nimport preprocessor\nimport pandas as pd\nimport sys\nfrom scipy import spatial\n\n\nclass Predict:\n\n def __init__(self, text):\n \"\"\"\n taking the user input string\n loading trained feature numpy array\n loading the output for the numpy array\n loading the vectorizer saved during training\n\n :param text:\n \"\"\"\n self.text = text\n self.train_vec = np.load('feat.npy')\n self.train_output = pickle.load(open('mylist.pkl', 'rb'))\n self.vec = pickle.load(open('vector.pkl', 'rb'))\n\n def process_text(self):\n \"\"\"\n creating an instance of Preprocess class\n applying clean_data function on the text\n transforming the text to tfidf array\n\n \"\"\"\n prp1 = preprocessor.Preprocess()\n processed_text = prp1.clean_data(self.text)\n self.vec1 = self.vec.transform(pd.Series(processed_text))\n\n def compute_cosine_similarity(self):\n \"\"\"\n creating an empty list for storing the cosine values\n multiplying the input vector with every row of training vector\n appending the cosine value to the list\n taking the index of maximum value of the list\n using the index to find the attribute from the output_vector\n\n \"\"\"\n cos_matrix = []\n for i in range(len(self.train_vec)):\n val = self.vec1 * self.train_vec[i]\n cos_matrix.append(val[0])\n out = np.argmax(cos_matrix)\n print(self.train_output[out])\n\n\nif __name__ == '__main__':\n text = sys.argv[1:]\n text = ' '.join(text)\n p1 = Predict(text)\n p1.process_text()\n p1.compute_cosine_similarity()\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from dajaxice.core import dajaxice_autodiscover, dajaxice_config
dajaxice_autodiscover()
from spoticle import views
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'spoticle.views.home', name='home'),
# url(r'^spoticle/', include('spoticle.foo.urls')),
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^play/$', views.index, name='play'),
url(r'^compose/$', views.compose, name='compose'),
url(r'^random/$', views.random, name='random'),
url(r'^play/(?P<pk>\d+)/$', views.DetailView.as_view(), name='quiz'),
url(r'^compose/(?P<pk>\d+)/$', views.UpdateView.as_view()),
url(r'^clip/(?P<clip_id>\d+)/$', views.clip, name='clip'),
# Auth
url(r'^accounts/login/$', 'django.contrib.auth.views.login', { 'template_name': 'login.html', 'extra_context': { 'next': '/' }}, name='login'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', { 'next_page': '/' }, name='logout'),
url(dajaxice_config.dajaxice_url, include('dajaxice.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
normal
|
{
"blob_id": "68a503b2a94304530e20d79baf9fb094024ba67e",
"index": 539,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.autodiscover()\n<mask token>\ndajaxice_autodiscover()\n<mask token>\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"step-3": "<mask token>\nadmin.autodiscover()\n<mask token>\ndajaxice_autodiscover()\n<mask token>\nurlpatterns = patterns('', url('^$', views.IndexView.as_view(), name=\n 'index'), url('^play/$', views.index, name='play'), url('^compose/$',\n views.compose, name='compose'), url('^random/$', views.random, name=\n 'random'), url('^play/(?P<pk>\\\\d+)/$', views.DetailView.as_view(), name\n ='quiz'), url('^compose/(?P<pk>\\\\d+)/$', views.UpdateView.as_view()),\n url('^clip/(?P<clip_id>\\\\d+)/$', views.clip, name='clip'), url(\n '^accounts/login/$', 'django.contrib.auth.views.login', {\n 'template_name': 'login.html', 'extra_context': {'next': '/'}}, name=\n 'login'), url('^accounts/logout/$', 'django.contrib.auth.views.logout',\n {'next_page': '/'}, name='logout'), url(dajaxice_config.dajaxice_url,\n include('dajaxice.urls')), url('^admin/doc/', include(\n 'django.contrib.admindocs.urls')), url('^admin/', include(admin.site.urls))\n )\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"step-4": "from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nadmin.autodiscover()\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom dajaxice.core import dajaxice_autodiscover, dajaxice_config\ndajaxice_autodiscover()\nfrom spoticle import views\nurlpatterns = patterns('', url('^$', views.IndexView.as_view(), name=\n 'index'), url('^play/$', views.index, name='play'), url('^compose/$',\n views.compose, name='compose'), url('^random/$', views.random, name=\n 'random'), url('^play/(?P<pk>\\\\d+)/$', views.DetailView.as_view(), name\n ='quiz'), url('^compose/(?P<pk>\\\\d+)/$', views.UpdateView.as_view()),\n url('^clip/(?P<clip_id>\\\\d+)/$', views.clip, name='clip'), url(\n '^accounts/login/$', 'django.contrib.auth.views.login', {\n 'template_name': 'login.html', 'extra_context': {'next': '/'}}, name=\n 'login'), url('^accounts/logout/$', 'django.contrib.auth.views.logout',\n {'next_page': '/'}, name='logout'), url(dajaxice_config.dajaxice_url,\n include('dajaxice.urls')), url('^admin/doc/', include(\n 'django.contrib.admindocs.urls')), url('^admin/', include(admin.site.urls))\n )\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"step-5": "from django.conf.urls import patterns, include, url\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom dajaxice.core import dajaxice_autodiscover, dajaxice_config\ndajaxice_autodiscover()\n\nfrom spoticle import views\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'spoticle.views.home', name='home'),\n # url(r'^spoticle/', include('spoticle.foo.urls')),\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^play/$', views.index, name='play'),\n url(r'^compose/$', views.compose, name='compose'),\n url(r'^random/$', views.random, name='random'),\n url(r'^play/(?P<pk>\\d+)/$', views.DetailView.as_view(), name='quiz'),\n url(r'^compose/(?P<pk>\\d+)/$', views.UpdateView.as_view()),\n\n url(r'^clip/(?P<clip_id>\\d+)/$', views.clip, name='clip'),\n\n # Auth\n url(r'^accounts/login/$', 'django.contrib.auth.views.login', { 'template_name': 'login.html', 'extra_context': { 'next': '/' }}, name='login'),\n url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', { 'next_page': '/' }, name='logout'),\n\n url(dajaxice_config.dajaxice_url, include('dajaxice.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n)\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 3.2.8 on 2021-10-20 08:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20211020_0817'),
]
operations = [
migrations.AlterModelOptions(
name='currencies',
options={'verbose_name': 'Currencie'},
),
migrations.AlterModelOptions(
name='proposals',
options={'verbose_name': 'Proposal'},
),
migrations.AlterModelOptions(
name='transactions',
options={'verbose_name': 'Transaction'},
),
migrations.AlterModelOptions(
name='tutorials',
options={'verbose_name': 'Tutorial'},
),
migrations.AlterModelOptions(
name='userkyc',
options={'verbose_name': 'KYC Document'},
),
]
|
normal
|
{
"blob_id": "a6cc0078fb37f9c63e119046193f521290c9fb21",
"index": 4634,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('app', '0006_auto_20211020_0817')]\n operations = [migrations.AlterModelOptions(name='currencies', options={\n 'verbose_name': 'Currencie'}), migrations.AlterModelOptions(name=\n 'proposals', options={'verbose_name': 'Proposal'}), migrations.\n AlterModelOptions(name='transactions', options={'verbose_name':\n 'Transaction'}), migrations.AlterModelOptions(name='tutorials',\n options={'verbose_name': 'Tutorial'}), migrations.AlterModelOptions\n (name='userkyc', options={'verbose_name': 'KYC Document'})]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('app', '0006_auto_20211020_0817')]\n operations = [migrations.AlterModelOptions(name='currencies', options={\n 'verbose_name': 'Currencie'}), migrations.AlterModelOptions(name=\n 'proposals', options={'verbose_name': 'Proposal'}), migrations.\n AlterModelOptions(name='transactions', options={'verbose_name':\n 'Transaction'}), migrations.AlterModelOptions(name='tutorials',\n options={'verbose_name': 'Tutorial'}), migrations.AlterModelOptions\n (name='userkyc', options={'verbose_name': 'KYC Document'})]\n",
"step-5": "# Generated by Django 3.2.8 on 2021-10-20 08:25\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0006_auto_20211020_0817'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='currencies',\n options={'verbose_name': 'Currencie'},\n ),\n migrations.AlterModelOptions(\n name='proposals',\n options={'verbose_name': 'Proposal'},\n ),\n migrations.AlterModelOptions(\n name='transactions',\n options={'verbose_name': 'Transaction'},\n ),\n migrations.AlterModelOptions(\n name='tutorials',\n options={'verbose_name': 'Tutorial'},\n ),\n migrations.AlterModelOptions(\n name='userkyc',\n options={'verbose_name': 'KYC Document'},\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import urllib.request
from urllib.request import Request, urlopen
import json
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup
"""
Web Scraper ======================================================================
"""
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def is_good_response(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
def log_error(e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e)
def save_json(file):
with open('gif_list.txt', 'w') as f:
f.write(file)
"""
Scraping
"""
# req = Request('https://play.pokemonshowdown.com/sprites/ani/', headers={'User-Agent': 'Mozilla/5.0'})
# url = urlopen(req).read()
# url = 'https://play.pokemonshowdown.com/sprites/ani/'
# raw_html = simple_get(url)
# soup = BeautifulSoup(url, 'lxml')
# # a = soup.find_all('td', attrs={'valign': 'top'})
# a = soup.find_all('a')
# videolist = []
# print(a)
# for v in a:
# tmp = v['href']
# videolist.append(tmp)
# filename = videolist[5:]
# print(filename)
def dl_img(url, file_path, file_name):
full_path = file_path + '/' + file_name + '.gif'
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(url, full_path)
filename = ['abomasnow-f.gif', 'abomasnow-mega.gif', 'abomasnow.gif', 'abra.gif', 'absol-mega.gif', 'absol.gif', 'accelgor.gif', 'aegislash-blade.gif', 'aegislash.gif', 'aerodactyl-mega.gif', 'aerodactyl.gif', 'aggron-mega.gif', 'aggron.gif', 'aipom-f.gif', 'aipom.gif', 'alakazam-mega.gif', 'alakazam.gif', 'alcremie-caramel-swirl-berry.gif', 'alcremie-caramel-swirl-clover.gif', 'alcremie-caramel-swirl-flower.gif', 'alcremie-caramel-swirl-love.gif', 'alcremie-caramel-swirl-ribbon.gif', 'alcremie-caramel-swirl-star.gif', 'alcremie-caramelswirl.gif', 'alcremie-gmax.gif', 'alcremie-lemon-cream-berry.gif', 'alcremie-lemon-cream-clover.gif', 'alcremie-lemon-cream-flower.gif', 'alcremie-lemon-cream-love.gif', 'alcremie-lemon-cream-ribbon.gif', 'alcremie-lemon-cream-star.gif', 'alcremie-lemoncream.gif', 'alcremie-matcha-cream-berry.gif', 'alcremie-matcha-cream-clover.gif', 'alcremie-matcha-cream-flower.gif', 'alcremie-matcha-cream-love.gif', 'alcremie-matcha-cream-ribbon.gif', 'alcremie-matcha-cream-star.gif', 'alcremie-matchacream.gif', 'alcremie-mint-cream-berry.gif', 'alcremie-mint-cream-clover.gif', 'alcremie-mint-cream-flower.gif', 'alcremie-mint-cream-love.gif', 'alcremie-mint-cream-ribbon.gif', 'alcremie-mint-cream-star.gif', 'alcremie-mintcream.gif', 'alcremie-rainbow-swirl-berry.gif', 'alcremie-rainbow-swirl-clover.gif', 'alcremie-rainbow-swirl-flower.gif', 'alcremie-rainbow-swirl-love.gif', 'alcremie-rainbow-swirl-ribbon.gif', 'alcremie-rainbow-swirl-star.gif', 'alcremie-rainbowswirl.gif', 'alcremie-ruby-cream-berry.gif', 'alcremie-ruby-cream-clover.gif', 'alcremie-ruby-cream-flower.gif', 'alcremie-ruby-cream-love.gif', 'alcremie-ruby-cream-ribbon.gif', 'alcremie-ruby-cream-star.gif', 'alcremie-ruby-swirl-berry.gif', 'alcremie-ruby-swirl-clover.gif', 'alcremie-ruby-swirl-flower.gif', 'alcremie-ruby-swirl-love.gif', 'alcremie-ruby-swirl-ribbon.gif', 'alcremie-ruby-swirl-star.gif', 'alcremie-rubycream.gif', 'alcremie-rubyswirl.gif', 'alcremie-salted-cream-berry.gif', 'alcremie-salted-cream-clover.gif', 'alcremie-salted-cream-flower.gif', 'alcremie-salted-cream-love.gif', 'alcremie-salted-cream-ribbon.gif', 'alcremie-salted-cream-star.gif', 'alcremie-saltedcream.gif', 'alcremie-vanilla-cream-berry.gif', 'alcremie-vanilla-cream-clover.gif', 'alcremie-vanilla-cream-flower.gif', 'alcremie-vanilla-cream-love.gif', 'alcremie-vanilla-cream-ribbon.gif', 'alcremie-vanilla-cream-star.gif', 'alcremie.gif', 'alomomola.gif', 'altaria-mega.gif', 'altaria.gif', 'amaura.gif', 'ambipom-f.gif', 'ambipom.gif', 'amoonguss.gif', 'ampharos-mega.gif', 'ampharos.gif', 'anorith.gif', 'appletun-gmax.gif', 'appletun.gif', 'applin.gif', 'araquanid-totem.gif', 'araquanid.gif', 'arbok.gif', 'arcanine.gif', 'arceus-bug.gif', 'arceus-dark.gif', 'arceus-dragon.gif', 'arceus-electric.gif', 'arceus-fairy.gif', 'arceus-fighting.gif', 'arceus-fire.gif', 'arceus-flying.gif', 'arceus-ghost.gif', 'arceus-grass.gif', 'arceus-ground.gif', 'arceus-ice.gif', 'arceus-poison.gif', 'arceus-psychic.gif', 'arceus-rock.gif', 'arceus-steel.gif', 'arceus-water.gif', 'arceus.gif', 'archen.gif', 'archeops.gif', 'arctovish.gif', 'arctozolt.gif', 'ariados.gif', 'armaldo.gif', 'aromatisse.gif', 'aron.gif', 'arrokuda.gif', 'articuno.gif', 'audino-mega.gif', 'audino.gif', 'aurorus.gif', 'aurumoth.gif', 'avalugg.gif', 'axew.gif', 'azelf.gif', 'azumarill.gif', 'azurill.gif', 'bagon.gif', 'baltoy.gif', 'banette-mega.gif', 'banette.gif', 'barbaracle.gif', 'barboach.gif', 'barboarch.gif', 'barraskewda.gif', 'basculin-bluestriped.gif', 'basculin.gif', 'bastiodon.gif', 'bayleef.gif', 'beartic.gif', 'beautifly-f.gif', 'beautifly.gif', 'beedrill-mega.gif', 'beedrill.gif', 'beheeyem.gif', 'beldum.gif', 'bellossom.gif', 'bellsprout.gif', 'bergmite.gif', 'bewear.gif', 'bibarel-f.gif', 'bibarel.gif', 'bidoof-f.gif', 'bidoof.gif', 'binacle.gif', 'bisharp.gif', 'blacephalon.gif', 'blastoise-mega.gif', 'blastoise.gif', 'blaziken-f.gif', 'blaziken-mega.gif', 'blaziken.gif', 'blipbug.gif', 'blissey.gif', 'blitzle.gif', 'boldore.gif', 'boltund.gif', 'bonsly.gif', 'bouffalant.gif', 'bounsweet.gif', 'braixen.gif', 'braviary.gif', 'breloom.gif', 'brionne.gif', 'bronzong.gif', 'bronzor.gif', 'bruxish.gif', 'budew.gif', 'buizel-f.gif', 'buizel.gif', 'bulbasaur.gif', 'buneary.gif', 'bunnelby.gif', 'burmy-sandy.gif', 'burmy-trash.gif', 'burmy.gif', 'butterfree-gmax.gif', 'butterfree.gif', 'buzzwole.gif', 'cacnea.gif', 'cacturne-f.gif', 'cacturne.gif', 'camerupt-f.gif', 'camerupt-mega.gif', 'camerupt.gif', 'camperupt-mega.gif', 'carbink.gif', 'caribolt.gif', 'carkol.gif', 'carnivine.gif', 'carracosta.gif', 'carvanha.gif', 'cascoon.gif', 'castform-rainy.gif', 'castform-snowy.gif', 'castform-sunny.gif', 'castform.gif', 'caterpie.gif', 'cawmodore.gif', 'celebi.gif', 'celesteela.gif', 'centiskorch-gmax.gif', 'centiskorch.gif', 'chandelure.gif', 'chansey.gif', 'charizard-gmax.gif', 'charizard-megax.gif', 'charizard-megay.gif', 'charizard.gif', 'charjabug.gif', 'charmander.gif', 'charmeleon.gif', 'chatot.gif', 'cherrim-sunshine.gif', 'cherrim.gif', 'cherubi.gif', 'chesnaught.gif', 'chespin.gif', 'chewtle.gif', 'chikorita.gif', 'chimchar.gif', 'chimecho.gif', 'chinchou.gif', 'chingling.gif', 'cinccino.gif', 'cinderace.gif', 'clamperl.gif', 'clauncher.gif', 'clawitzer.gif', 'claydol.gif', 'clefable.gif', 'clefairy.gif', 'cleffa.gif', 'clobbopus.gif', 'cloyster.gif', 'coalossal-gmax.gif', 'coalossal.gif', 'cobalion.gif', 'cofagrigus.gif', 'combee-f.gif', 'combee.gif', 'combusken-f.gif', 'combusken.gif', 'comfey.gif', 'conkeldurr.gif', 'copperajah-gmax.gif', 'copperajah.gif', 'corphish.gif', 'corsola-galar.gif', 'corsola.gif', 'corviknight-gmax.gif', 'corviknight.gif', 'corvisquire.gif', 'cosmoem.gif', 'cosmog.gif', 'cottonee.gif', 'crabominable.gif', 'crabrawler.gif', 'cradily.gif', 'cramorant-gorging.gif', 'cramorant-gulping.gif', 'cramorant.gif', 'cranidos.gif', 'crawdaunt.gif', 'cresselia.gif', 'croagunk-f.gif', 'croagunk.gif', 'crobat.gif', 'croconaw.gif', 'crucibelle-mega.gif', 'crucibelle.gif', 'crustle.gif', 'cryogonal.gif', 'cubchoo.gif', 'cubone.gif', 'cufant.gif', 'cursola.gif', 'cutiefly.gif', 'cyndaquil.gif', 'darkrai.gif', 'darmanitan-galar.gif', 'darmanitan-galarzen.gif', 'darmanitan-zen.gif', 'darmanitan.gif', 'dartrix.gif', 'darumaka-galar.gif', 'darumaka.gif', 'decidueye.gif', 'dedenne.gif', 'deerling-autumn.gif', 'deerling-summer.gif', 'deerling-winter.gif', 'deerling.gif', 'deino.gif', 'delcatty.gif', 'delibird.gif', 'delphox.gif', 'deoxys-attack.gif', 'deoxys-defense.gif', 'deoxys-speed.gif', 'deoxys.gif', 'dewgong.gif', 'dewott.gif', 'dewpider.gif', 'dhelmise.gif', 'dialga.gif', 'diancie-mega.gif', 'diancie.gif', 'diggersby.gif', 'diglett-alola.gif', 'diglett.gif', 'ditto.gif', 'dodrio.gif', 'doduo.gif', 'donphan-f.gif', 'donphan.gif', 'dottler.gif', 'doublade.gif', 'dracovish.gif', 'dracozolt.gif', 'dragalge.gif', 'dragapult.gif', 'dragonair.gif', 'dragonite.gif', 'drakloak.gif', 'drampa.gif', 'drapion.gif', 'dratini.gif', 'drednaw-gmax.gif', 'drednaw.gif', 'dreepy.gif', 'drifblim.gif', 'drifloon.gif', 'drilbur.gif', 'drizzile.gif', 'drowzee.gif', 'druddigon.gif', 'dubwool.gif', 'ducklett.gif', 'dugtrio-alola.gif', 'dugtrio.gif', 'dunsparce.gif', 'duosion.gif', 'duraludon-gmax.gif', 'duraludon.gif', 'durant.gif', 'dusclops.gif', 'dusknoir.gif', 'duskull.gif', 'dustox-f.gif', 'dustox.gif', 'dwebble.gif', 'eelektrik.gif', 'eelektross.gif', 'eevee-gmax.gif', 'eevee-starter.gif', 'eevee.gif', 'eiscue-noice.gif', 'eiscue.gif', 'ekans.gif', 'eldegoss.gif', 'electabuzz.gif', 'electivire.gif', 'electrike.gif', 'electrode.gif', 'elekid.gif', 'elgyem.gif', 'emboar.gif', 'emolga.gif', 'empoleon.gif', 'entei.gif', 'equilibra.gif', 'escavalier.gif', 'espeon.gif', 'espurr.gif', 'eternatus-eternamax.gif', 'eternatus.gif', 'excadrill.gif', 'exeggcute.gif', 'exeggutor-alola.gif', 'exeggutor.gif', 'exploud.gif', 'falinks.gif', 'farfetchd-galar.gif', 'farfetchd.gif', 'fearow.gif', 'feebas.gif', 'fennekin.gif', 'feraligatr.gif', 'ferroseed.gif', 'ferrothorn.gif', 'fidgit.gif', 'finneon-f.gif', 'finneon.gif', 'flaaffy.gif', 'flabebe-blue.gif', 'flabebe-orange.gif', 'flabebe-white.gif', 'flabebe-yellow.gif', 'flabebe.gif', 'flapple-gmax.gif', 'flapple.gif', 'flareon.gif', 'fletchinder.gif', 'fletchling.gif', 'floatzel-f.gif', 'floatzel.gif', 'floette-blue.gif', 'floette-eternal.gif', 'floette-orange.gif', 'floette-white.gif', 'floette-yellow.gif', 'floette.gif', 'florges-blue.gif', 'florges-orange.gif', 'florges-white.gif', 'florges-yellow.gif', 'florges.gif', 'flygon.gif', 'fomantis.gif', 'foongus.gif', 'forretress.gif', 'fraxure.gif', 'frillish-f.gif', 'frillish.gif', 'froakie.gif', 'frogadier.gif', 'froslass.gif', 'frosmoth.gif', 'furfrou-dandy.gif', 'furfrou-debutante.gif', 'furfrou-diamond.gif', 'furfrou-heart.gif', 'furfrou-kabuki.gif', 'furfrou-lareine.gif', 'furfrou-matron.gif', 'furfrou-pharaoh.gif', 'furfrou-star.gif', 'furfrou.gif', 'furret.gif', 'gabite-f.gif', 'gabite.gif', 'gallade-mega.gif', 'gallade.gif', 'galvantula.gif', 'garbodor-gmax.gif', 'garbodor.gif', 'garchomp-f.gif', 'garchomp-mega.gif', 'garchomp.gif', 'gardevoir-mega.gif', 'gardevoir.gif', 'gastly.gif', 'gastrodon-east.gif', 'gastrodon.gif', 'genesect-burn.gif', 'genesect-chill.gif', 'genesect-douse.gif', 'genesect-shock.gif', 'genesect.gif', 'gengar-gmax.gif', 'gengar-mega.gif', 'gengar.gif', 'geodude-alola.gif', 'geodude.gif', 'gible-f.gif', 'gible.gif', 'gigalith.gif', 'girafarig-f.gif', 'girafarig.gif', 'giratina-origin.gif', 'giratina.gif', 'glaceon.gif', 'glalie-mega.gif', 'glalie.gif', 'glameow.gif', 'gligar-f.gif', 'gligar.gif', 'gliscor.gif', 'gloom.gif', 'gogoat.gif', 'golbat-f.gif', 'golbat.gif', 'goldeen.gif', 'golduck.gif', 'golem-alola.gif', 'golem.gif', 'golett.gif', 'golisopod.gif', 'golurk.gif', 'goodra.gif', 'goomy.gif', 'gorebyss.gif', 'gossifleur.gif', 'gothita.gif', 'gothitelle.gif', 'gothorita.gif', 'gourgeist-large.gif', 'gourgeist-small.gif', 'gourgeist-super.gif', 'gourgeist.gif', 'granbull.gif', 'grapploct.gif', 'graveler-alola.gif', 'graveler.gif', 'greedent.gif', 'greninja-ash.gif', 'greninja.gif', 'grimer-alola.gif', 'grimer.gif', 'grimmsnarl-gmax.gif', 'grimmsnarl.gif', 'grookey.gif', 'grotle.gif', 'groudon-primal.gif', 'groudon.gif', 'grovyle.gif', 'growlithe.gif', 'grubbin.gif', 'grumpig.gif', 'gulpin-f.gif', 'gulpin.gif', 'gumshoos-totem.gif', 'gumshoos.gif', 'gurdurr.gif', 'guzzlord.gif', 'gyarados-mega.gif', 'gyarados.gif', 'hakamoo.gif', 'happiny.gif', 'hariyama.gif', 'hatenna.gif', 'hatterene-gmax.gif', 'hatterene.gif', 'hattrem.gif', 'haunter.gif', 'hawlucha.gif', 'haxorus.gif', 'heatmor.gif', 'heatran.gif', 'heliolisk.gif', 'helioptile.gif', 'heracross-f.gif', 'heracross-mega.gif', 'heracross.gif', 'herdier.gif', 'hippopotas-f.gif', 'hippopotas.gif', 'hippowdon-f.gif', 'hippowdon.gif', 'hitmonchan.gif', 'hitmonlee.gif', 'hitmontop.gif', 'honchkrow.gif', 'honedge.gif', 'hooh.gif', 'hoopa-mega.gif', 'hoopa-unbound.gif', 'hoopa.gif', 'hoothoot.gif', 'hoppip.gif', 'horsea.gif', 'houndoom-f.gif', 'houndoom-mega.gif', 'houndoom.gif', 'houndour.gif', 'huntail.gif', 'hydreigon.gif', 'hypno.gif', 'igglybuff.gif', 'illumise.gif', 'impidimp.gif', 'incineroar.gif', 'indeedee-f.gif', 'indeedee.gif', 'infernape.gif', 'inkay.gif', 'inteleon.gif', 'ivysaur.gif', 'jangmoo.gif', 'jellicent-f.gif', 'jellicent.gif', 'jigglypuff.gif', 'jirachi.gif', 'jolteon.gif', 'joltik.gif', 'jumbao.gif', 'jumpluff.gif', 'jynx.gif', 'kabuto.gif', 'kabutops.gif', 'kadabra.gif', 'kakuna.gif', 'kangaskhan-mega.gif', 'kangaskhan.gif', 'karrablast.gif', 'kartana.gif', 'kecleon.gif', 'keldeo-resolute.gif', 'keldeo.gif', 'kerfluffle-f.gif', 'kerfluffle.gif', 'kingdra.gif', 'kingler-gmax.gif', 'kingler.gif', 'kirlia.gif', 'kitsunoh-f.gif', 'kitsunoh.gif', 'klang.gif', 'klefki.gif', 'klink.gif', 'klinklang.gif', 'koffing.gif', 'komala.gif', 'kommo-o-totem.gif', 'kommoo.gif', 'krabby.gif', 'kricketot-f.gif', 'kricketot.gif', 'kricketune-f.gif', 'kricketune.gif', 'krilowatt.gif', 'krokorok.gif', 'krookodile.gif', 'kyogre-primal.gif', 'kyogre.gif', 'kyurem-black.gif', 'kyurem-white.gif', 'kyurem.gif', 'lairon.gif', 'lampent.gif', 'landorus-therian.gif', 'landorus.gif', 'lanturn.gif', 'lapras-gmax.gif', 'lapras.gif', 'larvesta.gif', 'larvitar.gif', 'latias-mega.gif', 'latias.gif', 'latios-mega.gif', 'latios.gif', 'leafeon.gif', 'leavanny.gif', 'ledian-f.gif', 'ledian.gif', 'ledyba-f.gif', 'ledyba.gif', 'lickilicky.gif', 'lickitung.gif', 'liepard.gif', 'lileep.gif', 'lilligant.gif', 'lillipup.gif', 'linoone-galar.gif', 'linoone.gif', 'litleo.gif', 'litten.gif', 'litwick.gif', 'lombre.gif', 'lopunny-mega.gif', 'lopunny.gif', 'lotad.gif', 'loudred.gif', 'lucario-mega.gif', 'lucario.gif', 'ludicolo-f.gif', 'ludicolo.gif', 'lugia.gif', 'lumineon-f.gif', 'lumineon.gif', 'lunala.gif', 'lunatone.gif', 'lurantis-totem.gif', 'lurantis.gif', 'luvdisc.gif', 'luxio-f.gif', 'luxio.gif', 'luxray-f.gif', 'luxray.gif', 'lycanroc-dusk.gif', 'lycanroc-midnight.gif', 'lycanroc.gif', 'machamp-gmax.gif', 'machamp.gif', 'machoke.gif', 'machop.gif', 'magby.gif', 'magcargo.gif', 'magearna-original.gif', 'magearna.gif', 'magikarp-f.gif', 'magikarp.gif', 'magmar.gif', 'magmortar.gif', 'magnemite.gif', 'magneton.gif', 'magnezone.gif', 'makuhita.gif', 'malaconda.gif', 'malamar.gif', 'mamoswine-f.gif', 'mamoswine.gif', 'manaphy.gif', 'mandibuzz.gif', 'manectric-mega.gif', 'manectric.gif', 'mankey.gif', 'mantine.gif', 'mantyke.gif', 'maractus.gif', 'mareanie.gif', 'mareep.gif', 'marill.gif', 'marowak-alola-totem.gif', 'marowak-alola.gif', 'marowak-totem.gif', 'marowak.gif', 'marshadow.gif', 'marshtomp.gif', 'masquerain.gif', 'mawile-mega.gif', 'mawile.gif', 'medicham-f.gif', 'medicham-mega.gif', 'medicham.gif', 'meditite-f.gif', 'meditite.gif', 'meganium-f.gif', 'meganium.gif', 'melmetal-gmax.gif', 'melmetal.gif', 'meloetta-pirouette.gif', 'meloetta.gif', 'meltan.gif', 'meowstic-f.gif', 'meowstic.gif', 'meowth-alola.gif', 'meowth-galar.gif', 'meowth-gmax.gif', 'meowth.gif', 'mesprit.gif', 'metagross-mega.gif', 'metagross.gif', 'metang.gif', 'metapod.gif', 'mew.gif', 'mewtwo-megax.gif', 'mewtwo-megay.gif', 'mewtwo.gif', 'mienfoo.gif', 'mienshao.gif', 'mightyena.gif', 'milcery.gif', 'milotic-f.gif', 'milotic.gif', 'miltank.gif', 'mimejr.gif', 'mimikyu-busted-totem.gif', 'mimikyu-busted.gif', 'mimikyu-totem.gif', 'mimikyu.gif', 'minccino.gif', 'minior-blue.gif', 'minior-green.gif', 'minior-indigo.gif', 'minior-meteor.gif', 'minior-orange.gif', 'minior-violet.gif', 'minior-yellow.gif', 'minior.gif', 'minun.gif', 'misdreavus.gif', 'mismagius.gif', 'mollux.gif', 'moltres.gif', 'monferno.gif', 'morelull.gif', 'morgrem.gif', 'morpeko-hangry.gif', 'morpeko.gif', 'mothim.gif', 'mrmime-galar.gif', 'mrmime.gif', 'mrrime.gif', 'mudbray.gif', 'mudkip.gif', 'mudsdale.gif', 'muk-alola.gif', 'muk.gif', 'munchlax.gif', 'munna.gif', 'murkrow-f.gif', 'murkrow.gif', 'musharna.gif', 'naganadel.gif', 'natu.gif', 'naviathan.gif', 'necrozma-dawnwings.gif', 'necrozma-duskmane.gif', 'necrozma-ultra.gif', 'necrozma.gif', 'necturna.gif', 'nickit.gif', 'nidoking.gif', 'nidoqueen.gif', 'nidoran-f.gif', 'nidoran.gif', 'nidoranf.gif', 'nidoranm.gif', 'nidorina.gif', 'nidorino.gif', 'nihilego.gif', 'nincada.gif', 'ninetales-alola.gif', 'ninetales.gif', 'ninjask.gif', 'noctowl.gif', 'noibat.gif', 'noivern.gif', 'nosepass.gif', 'numel-f.gif', 'numel.gif', 'nuzleaf-f.gif', 'nuzleaf.gif', 'obstagoon.gif', 'octillery-f.gif', 'octillery.gif', 'oddish.gif', 'omanyte.gif', 'omastar.gif', 'onix.gif', 'oranguru.gif', 'orbeetle-gmax.gif', 'orbeetle.gif', 'oricorio-pau.gif', 'oricorio-pompom.gif', 'oricorio-sensu.gif', 'oricorio.gif', 'oshawott.gif', 'pachirisu-f.gif', 'pachirisu.gif', 'pajantom.gif', 'palkia.gif', 'palossand.gif', 'palpitoad.gif', 'pancham.gif', 'pangoro.gif', 'panpour.gif', 'pansage.gif', 'pansear.gif', 'paras.gif', 'parasect.gif', 'passimian.gif', 'patrat.gif', 'pawniard.gif', 'pelipper.gif', 'perrserker.gif', 'persian-alola.gif', 'persian.gif', 'petilil.gif', 'phanpy.gif', 'phantump.gif', 'pheromosa.gif', 'phione.gif', 'pichu.gif', 'pidgeot-mega.gif', 'pidgeot.gif', 'pidgeotto.gif', 'pidgey.gif', 'pidove.gif', 'pignite.gif', 'pikachu-alola.gif', 'pikachu-belle.gif', 'pikachu-cosplay.gif', 'pikachu-f.gif', 'pikachu-gmax.gif', 'pikachu-hoenn.gif', 'pikachu-kalos.gif', 'pikachu-libre.gif', 'pikachu-original.gif', 'pikachu-partner.gif', 'pikachu-phd.gif', 'pikachu-pop-star.gif', 'pikachu-popstar.gif', 'pikachu-rockstar.gif', 'pikachu-sinnoh.gif', 'pikachu-starter-f.gif', 'pikachu-starter.gif', 'pikachu-unova.gif', 'pikachu.gif', 'pikipek.gif', 'piloswine-f.gif', 'piloswine.gif', 'pincurchin.gif', 'pineco.gif', 'pinsir-mega.gif', 'pinsir.gif', 'piplup.gif', 'plasmanta.gif', 'pluffle.gif', 'plusle.gif', 'poipole.gif', 'pokestarblackbelt.gif', 'pokestarblackdoor.gif', 'pokestarbrycenman.gif', 'pokestarf00.gif', 'pokestarf002.gif', 'pokestargiant.gif', 'pokestarhumanoid.gif', 'pokestarmonster.gif', 'pokestarmt.gif', 'pokestarmt2.gif', 'pokestarsmeargle.gif', 'pokestarspirit.gif', 'pokestartransport.gif', 'pokestarufo-2.gif', 'pokestarufo.gif', 'pokestarufo2.gif', 'pokestarwhitedoor.gif', 'politoed-f.gif', 'politoed.gif', 'poliwag.gif', 'poliwhirl.gif', 'poliwrath.gif', 'polteageist-antique.gif', 'polteageist.gif', 'ponyta-galar.gif', 'ponyta.gif', 'poochyena.gif', 'popplio.gif', 'porygon-z.gif', 'porygon.gif', 'porygon2.gif', 'porygonz.gif', 'primarina.gif', 'primeape.gif', 'prinplup.gif', 'probopass.gif', 'psyduck.gif', 'pumpkaboo-large.gif', 'pumpkaboo-small.gif', 'pumpkaboo-super.gif', 'pumpkaboo.gif', 'pupitar.gif', 'purrloin.gif', 'purugly.gif', 'pyroak.gif', 'pyroar-f.gif', 'pyroar.gif', 'pyukumuku.gif', 'quagsire-f.gif', 'quagsire.gif', 'quilava.gif', 'quilladin.gif', 'qwilfish.gif', 'raboot.gif', 'raichu-alola.gif', 'raichu.gif', 'raikou.gif', 'ralts.gif', 'rampardos.gif', 'rapidash-galar.gif', 'rapidash.gif', 'raticate-alola-totem.gif', 'raticate-alola.gif', 'raticate-f.gif', 'raticate-totem-a.gif', 'raticate.gif', 'rattata-alola.gif', 'rattata-f.gif', 'rattata.gif', 'rayquaza-mega.gif', 'rayquaza.gif', 'regice.gif', 'regigigas.gif', 'regirock.gif', 'registeel.gif', 'relicanth-f.gif', 'relicanth.gif', 'remoraid.gif', 'reshiram.gif', 'reuniclus.gif', 'rhydon-f.gif', 'rhydon.gif', 'rhyhorn.gif', 'rhyperior-f.gif', 'rhyperior.gif', 'ribombee.gif', 'rillaboom.gif', 'riolu.gif', 'rockruff.gif', 'roggenrola.gif', 'rolycoly.gif', 'rookidee.gif', 'roselia-f.gif', 'roselia.gif', 'roserade-f.gif', 'roserade.gif', 'rotom-f.gif', 'rotom-fan.gif', 'rotom-frost.gif', 'rotom-h.gif', 'rotom-heat.gif', 'rotom-m.gif', 'rotom-mow.gif', 'rotom-s.gif', 'rotom-w.gif', 'rotom-wash.gif', 'rotom.gif', 'rowlet.gif', 'rufflet.gif', 'runerigus.gif', 'sableye-mega.gif', 'sableye.gif', 'salamence-mega.gif', 'salamence.gif', 'salandit.gif', 'salazzle-totem.gif', 'salazzle.gif', 'samurott.gif', 'sandaconda-gmax.gif', 'sandaconda.gif', 'sandile.gif', 'sandshrew-alola.gif', 'sandshrew.gif', 'sandslash-alola.gif', 'sandslash.gif', 'sandygast.gif', 'sawk.gif', 'sawsbuck-autumn.gif', 'sawsbuck-summer.gif', 'sawsbuck-winter.gif', 'sawsbuck.gif', 'scatterbug.gif', 'sceptile-mega.gif', 'sceptile.gif', 'scizor-f.gif', 'scizor-mega.gif', 'scizor.gif', 'scolipede.gif', 'scorbunny.gif', 'scrafty.gif', 'scraggy.gif', 'scratchet.gif', 'scyther-f.gif', 'scyther.gif', 'seadra.gif', 'seaking.gif', 'sealeo.gif', 'seedot.gif', 'seel.gif', 'seismitoad.gif', 'sentret.gif', 'serperior.gif', 'servine.gif', 'seviper.gif', 'sewaddle.gif', 'shapedo-mega.gif', 'sharpedo-mega.gif', 'sharpedo.gif', 'shaymin-sky.gif', 'shaymin.gif', 'shedinja.gif', 'shelgon.gif', 'shellder.gif', 'shellos-east.gif', 'shellos.gif', 'shelmet.gif', 'shieldon.gif', 'shiftry-f.gif', 'shiftry.gif', 'shiinotic.gif', 'shinx-f.gif', 'shinx.gif', 'shroomish.gif', 'shuckle.gif', 'shuppet.gif', 'sigilyph.gif', 'silcoon.gif', 'silicobra.gif', 'silvally-bug.gif', 'silvally-dark.gif', 'silvally-dragon.gif', 'silvally-electric.gif', 'silvally-fairy.gif', 'silvally-fighting.gif', 'silvally-fire.gif', 'silvally-flying.gif', 'silvally-ghost.gif', 'silvally-grass.gif', 'silvally-ground.gif', 'silvally-ice.gif', 'silvally-poison.gif', 'silvally-psychic.gif', 'silvally-rock.gif', 'silvally-steel.gif', 'silvally-water.gif', 'silvally.gif', 'simipour.gif', 'simisage.gif', 'simisear.gif', 'sinistea-antique.gif', 'sinistea.gif', 'sirfetchd.gif', 'sizzlipede.gif', 'skarmory.gif', 'skiddo.gif', 'skiploom.gif', 'skitty.gif', 'skorupi.gif', 'skrelp.gif', 'skuntank.gif', 'skwovet.gif', 'slaking.gif', 'slakoth.gif', 'sliggoo.gif', 'slowbro-mega.gif', 'slowbro.gif', 'slowking.gif', 'slowpoke-galar.gif', 'slowpoke.gif', 'slugma.gif', 'slurpuff.gif', 'smeargle.gif', 'smogecko.gif', 'smoguana.gif', 'smokomodo.gif', 'smoochum.gif', 'snaelstrom.gif', 'sneasel-f.gif', 'sneasel.gif', 'snivy.gif', 'snom.gif', 'snorlax-gmax.gif', 'snorlax.gif', 'snornut.gif', 'snorunt.gif', 'snover-f.gif', 'snover.gif', 'snubbull.gif', 'sobble.gif', 'solgaleo.gif', 'solosis.gif', 'solrock.gif', 'spearow.gif', 'spewpa.gif', 'spheal.gif', 'spinarak.gif', 'spinda.gif', 'spiritomb.gif', 'spoink.gif', 'spritzee.gif', 'squirtle.gif', 'stakataka.gif', 'stantler.gif', 'staraptor-f.gif', 'staraptor.gif', 'staravia-f.gif', 'staravia.gif', 'starly-f.gif', 'starly.gif', 'starmie.gif', 'staryu.gif', 'steelix-f.gif', 'steelix-mega.gif', 'steelix.gif', 'steenee.gif', 'stonjourner.gif', 'stoutland.gif', 'stratagem.gif', 'stufful.gif', 'stunfisk-galar.gif', 'stunfisk.gif', 'stunky.gif', 'substitute.gif', 'sudowoodo-f.gif', 'sudowoodo.gif', 'suicune.gif', 'sunflora.gif', 'sunkern.gif', 'surskit.gif', 'swablu.gif', 'swadloon.gif', 'swalot-f.gif', 'swalot.gif', 'swampert-mega.gif', 'swampert.gif', 'swanna.gif', 'swellow.gif', 'swinub.gif', 'swirlix.gif', 'swoobat.gif', 'sylveon.gif', 'taillow.gif', 'talonflame.gif', 'tangela.gif', 'tangrowth-f.gif', 'tangrowth.gif', 'tapubulu.gif', 'tapufini.gif', 'tapukoko.gif', 'tapulele.gif', 'tauros.gif', 'teddiursa.gif', 'tentacool.gif', 'tentacruel.gif', 'tepig.gif', 'terrakion.gif', 'thievul.gif', 'throh.gif', 'thundurus-therian.gif', 'thundurus.gif', 'thwackey.gif', 'timburr.gif', 'tirtouga.gif', 'togedemaru-totem.gif', 'togedemaru.gif', 'togekiss.gif', 'togepi.gif', 'togetic.gif', 'tomohawk.gif', 'torchic-f.gif', 'torchic.gif', 'torkoal.gif', 'tornadus-therian.gif', 'tornadus.gif', 'torracat.gif', 'torterra.gif', 'totodile.gif', 'toucannon.gif', 'toxapex.gif', 'toxel.gif', 'toxicroak-f.gif', 'toxicroak.gif', 'toxtricity-gmax.gif', 'toxtricity-lowkey.gif', 'toxtricity.gif', 'tranquill.gif', 'trapinch.gif', 'treecko.gif', 'trevenant.gif', 'tropius.gif', 'trubbish.gif', 'trumbeak.gif', 'tsareena.gif', 'turtonator.gif', 'turtwig.gif', 'tympole.gif', 'tynamo.gif', 'typenull.gif', 'typhlosion.gif', 'tyranitar-mega.gif', 'tyranitar.gif', 'tyrantrum.gif', 'tyrantum.gif', 'tyrogue.gif', 'tyrunt.gif', 'umbreon.gif', 'unfezant-f.gif', 'unfezant.gif', 'unown-b.gif', 'unown-c.gif', 'unown-d.gif', 'unown-e.gif', 'unown-exclamation.gif', 'unown-f.gif', 'unown-g.gif', 'unown-h.gif', 'unown-i.gif', 'unown-j.gif', 'unown-k.gif', 'unown-l.gif', 'unown-m.gif', 'unown-n.gif', 'unown-o.gif', 'unown-p.gif', 'unown-q.gif', 'unown-question.gif', 'unown-r.gif', 'unown-s.gif', 'unown-t.gif', 'unown-u.gif', 'unown-v.gif', 'unown-w.gif', 'unown-x.gif', 'unown-y.gif', 'unown-z.gif', 'unown.gif', 'ursaring-f.gif', 'ursaring.gif', 'uxie.gif', 'vanillish.gif', 'vanillite.gif', 'vanilluxe.gif', 'vaporeon.gif', 'venipede.gif', 'venomoth.gif', 'venonat.gif', 'venusaur-mega.gif', 'venusaur.gif', 'vespiquen.gif', 'vibrava.gif', 'victini.gif', 'victreebel.gif', 'vigoroth.gif', 'vikavolt-totem.gif', 'vikavolt.gif', 'vileplume.gif', 'virizion.gif', 'vivillon-archipelago.gif', 'vivillon-continental.gif', 'vivillon-elegant.gif', 'vivillon-fancy.gif', 'vivillon-garden.gif', 'vivillon-highplains.gif', 'vivillon-icysnow.gif', 'vivillon-jungle.gif', 'vivillon-marine.gif', 'vivillon-modern.gif', 'vivillon-monsoon.gif', 'vivillon-ocean.gif', 'vivillon-pokeball.gif', 'vivillon-polar.gif', 'vivillon-river.gif', 'vivillon-sandstorm.gif', 'vivillon-savanna.gif', 'vivillon-sun.gif', 'vivillon-tundra.gif', 'vivillon.gif', 'volbeat.gif', 'volcanion.gif', 'volcarona.gif', 'volkraken.gif', 'voltorb.gif', 'vullaby.gif', 'vulpix-alola.gif', 'vulpix.gif', 'wailmer.gif', 'wailord.gif', 'walrein.gif', 'wartortle.gif', 'watchog.gif', 'weavile-f.gif', 'weavile.gif', 'weedle.gif', 'weepinbell.gif', 'weezing-galar.gif', 'weezing.gif', 'whimsicott.gif', 'whirlipede.gif', 'whiscash.gif', 'whismur.gif', 'wigglytuff.gif', 'wimpod.gif', 'wingull.gif', 'wishiwashi-school.gif', 'wishiwashi.gif', 'wobbuffet-f.gif', 'wobbuffet.gif', 'woobat.gif', 'wooloo.gif', 'wooper-f.gif', 'wooper.gif', 'wormadam-sandy.gif', 'wormadam-trash.gif', 'wormadam.gif', 'wurmple.gif', 'wynaut.gif', 'xatu-f.gif', 'xatu.gif', 'xerneas-neutral.gif', 'xerneas.gif', 'xurkitree.gif', 'yamask-galar.gif', 'yamask.gif', 'yamper.gif', 'yanma.gif', 'yanmega.gif', 'yungoos.gif', 'yveltal.gif', 'zacian-crowned.gif', 'zacian.gif', 'zamazenta-crowned.gif', 'zamazenta.gif', 'zangoose.gif', 'zapdos.gif', 'zebstrika.gif', 'zekrom.gif', 'zeraora.gif', 'zigzagoon-galar.gif', 'zigzagoon.gif', 'zoroark.gif', 'zorua.gif', 'zubat-f.gif', 'zubat.gif', 'zweilous.gif', 'zygarde-10.gif', 'zygarde-complete.gif', 'zygarde.gif']
for i in filename:
url = 'https://play.pokemonshowdown.com/sprites/ani/{}'.format(i)
file_name = str(i[:-4])
dl_img(url, 'files/pokemon/front', file_name)
|
normal
|
{
"blob_id": "4c9a3983180cc75c39da41f7f9b595811ba0dc35",
"index": 8390,
"step-1": "<mask token>\n\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None.\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 and content_type is not None and \n content_type.find('html') > -1)\n\n\ndef log_error(e):\n \"\"\"\n It is always a good idea to log errors.\n This function just prints them, but you can\n make it do anything.\n \"\"\"\n print(e)\n\n\ndef save_json(file):\n with open('gif_list.txt', 'w') as f:\n f.write(file)\n\n\n<mask token>\n\n\ndef dl_img(url, file_path, file_name):\n full_path = file_path + '/' + file_name + '.gif'\n opener = urllib.request.build_opener()\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n urllib.request.install_opener(opener)\n urllib.request.urlretrieve(url, full_path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None.\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 and content_type is not None and \n content_type.find('html') > -1)\n\n\ndef log_error(e):\n \"\"\"\n It is always a good idea to log errors.\n This function just prints them, but you can\n make it do anything.\n \"\"\"\n print(e)\n\n\ndef save_json(file):\n with open('gif_list.txt', 'w') as f:\n f.write(file)\n\n\n<mask token>\n\n\ndef dl_img(url, file_path, file_name):\n full_path = file_path + '/' + file_name + '.gif'\n opener = urllib.request.build_opener()\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n urllib.request.install_opener(opener)\n urllib.request.urlretrieve(url, full_path)\n\n\n<mask token>\nfor i in filename:\n url = 'https://play.pokemonshowdown.com/sprites/ani/{}'.format(i)\n file_name = str(i[:-4])\n dl_img(url, 'files/pokemon/front', file_name)\n",
"step-3": "<mask token>\n\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None.\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 and content_type is not None and \n content_type.find('html') > -1)\n\n\ndef log_error(e):\n \"\"\"\n It is always a good idea to log errors.\n This function just prints them, but you can\n make it do anything.\n \"\"\"\n print(e)\n\n\ndef save_json(file):\n with open('gif_list.txt', 'w') as f:\n f.write(file)\n\n\n<mask token>\n\n\ndef dl_img(url, file_path, file_name):\n full_path = file_path + '/' + file_name + '.gif'\n opener = urllib.request.build_opener()\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n urllib.request.install_opener(opener)\n urllib.request.urlretrieve(url, full_path)\n\n\nfilename = ['abomasnow-f.gif', 'abomasnow-mega.gif', 'abomasnow.gif',\n 'abra.gif', 'absol-mega.gif', 'absol.gif', 'accelgor.gif',\n 'aegislash-blade.gif', 'aegislash.gif', 'aerodactyl-mega.gif',\n 'aerodactyl.gif', 'aggron-mega.gif', 'aggron.gif', 'aipom-f.gif',\n 'aipom.gif', 'alakazam-mega.gif', 'alakazam.gif',\n 'alcremie-caramel-swirl-berry.gif', 'alcremie-caramel-swirl-clover.gif',\n 'alcremie-caramel-swirl-flower.gif', 'alcremie-caramel-swirl-love.gif',\n 'alcremie-caramel-swirl-ribbon.gif', 'alcremie-caramel-swirl-star.gif',\n 'alcremie-caramelswirl.gif', 'alcremie-gmax.gif',\n 'alcremie-lemon-cream-berry.gif', 'alcremie-lemon-cream-clover.gif',\n 'alcremie-lemon-cream-flower.gif', 'alcremie-lemon-cream-love.gif',\n 'alcremie-lemon-cream-ribbon.gif', 'alcremie-lemon-cream-star.gif',\n 'alcremie-lemoncream.gif', 'alcremie-matcha-cream-berry.gif',\n 'alcremie-matcha-cream-clover.gif', 'alcremie-matcha-cream-flower.gif',\n 'alcremie-matcha-cream-love.gif', 'alcremie-matcha-cream-ribbon.gif',\n 'alcremie-matcha-cream-star.gif', 'alcremie-matchacream.gif',\n 'alcremie-mint-cream-berry.gif', 'alcremie-mint-cream-clover.gif',\n 'alcremie-mint-cream-flower.gif', 'alcremie-mint-cream-love.gif',\n 'alcremie-mint-cream-ribbon.gif', 'alcremie-mint-cream-star.gif',\n 'alcremie-mintcream.gif', 'alcremie-rainbow-swirl-berry.gif',\n 'alcremie-rainbow-swirl-clover.gif',\n 'alcremie-rainbow-swirl-flower.gif', 'alcremie-rainbow-swirl-love.gif',\n 'alcremie-rainbow-swirl-ribbon.gif', 'alcremie-rainbow-swirl-star.gif',\n 'alcremie-rainbowswirl.gif', 'alcremie-ruby-cream-berry.gif',\n 'alcremie-ruby-cream-clover.gif', 'alcremie-ruby-cream-flower.gif',\n 'alcremie-ruby-cream-love.gif', 'alcremie-ruby-cream-ribbon.gif',\n 'alcremie-ruby-cream-star.gif', 'alcremie-ruby-swirl-berry.gif',\n 'alcremie-ruby-swirl-clover.gif', 'alcremie-ruby-swirl-flower.gif',\n 'alcremie-ruby-swirl-love.gif', 'alcremie-ruby-swirl-ribbon.gif',\n 'alcremie-ruby-swirl-star.gif', 'alcremie-rubycream.gif',\n 'alcremie-rubyswirl.gif', 'alcremie-salted-cream-berry.gif',\n 'alcremie-salted-cream-clover.gif', 'alcremie-salted-cream-flower.gif',\n 'alcremie-salted-cream-love.gif', 'alcremie-salted-cream-ribbon.gif',\n 'alcremie-salted-cream-star.gif', 'alcremie-saltedcream.gif',\n 'alcremie-vanilla-cream-berry.gif', 'alcremie-vanilla-cream-clover.gif',\n 'alcremie-vanilla-cream-flower.gif', 'alcremie-vanilla-cream-love.gif',\n 'alcremie-vanilla-cream-ribbon.gif', 'alcremie-vanilla-cream-star.gif',\n 'alcremie.gif', 'alomomola.gif', 'altaria-mega.gif', 'altaria.gif',\n 'amaura.gif', 'ambipom-f.gif', 'ambipom.gif', 'amoonguss.gif',\n 'ampharos-mega.gif', 'ampharos.gif', 'anorith.gif', 'appletun-gmax.gif',\n 'appletun.gif', 'applin.gif', 'araquanid-totem.gif', 'araquanid.gif',\n 'arbok.gif', 'arcanine.gif', 'arceus-bug.gif', 'arceus-dark.gif',\n 'arceus-dragon.gif', 'arceus-electric.gif', 'arceus-fairy.gif',\n 'arceus-fighting.gif', 'arceus-fire.gif', 'arceus-flying.gif',\n 'arceus-ghost.gif', 'arceus-grass.gif', 'arceus-ground.gif',\n 'arceus-ice.gif', 'arceus-poison.gif', 'arceus-psychic.gif',\n 'arceus-rock.gif', 'arceus-steel.gif', 'arceus-water.gif', 'arceus.gif',\n 'archen.gif', 'archeops.gif', 'arctovish.gif', 'arctozolt.gif',\n 'ariados.gif', 'armaldo.gif', 'aromatisse.gif', 'aron.gif',\n 'arrokuda.gif', 'articuno.gif', 'audino-mega.gif', 'audino.gif',\n 'aurorus.gif', 'aurumoth.gif', 'avalugg.gif', 'axew.gif', 'azelf.gif',\n 'azumarill.gif', 'azurill.gif', 'bagon.gif', 'baltoy.gif',\n 'banette-mega.gif', 'banette.gif', 'barbaracle.gif', 'barboach.gif',\n 'barboarch.gif', 'barraskewda.gif', 'basculin-bluestriped.gif',\n 'basculin.gif', 'bastiodon.gif', 'bayleef.gif', 'beartic.gif',\n 'beautifly-f.gif', 'beautifly.gif', 'beedrill-mega.gif', 'beedrill.gif',\n 'beheeyem.gif', 'beldum.gif', 'bellossom.gif', 'bellsprout.gif',\n 'bergmite.gif', 'bewear.gif', 'bibarel-f.gif', 'bibarel.gif',\n 'bidoof-f.gif', 'bidoof.gif', 'binacle.gif', 'bisharp.gif',\n 'blacephalon.gif', 'blastoise-mega.gif', 'blastoise.gif',\n 'blaziken-f.gif', 'blaziken-mega.gif', 'blaziken.gif', 'blipbug.gif',\n 'blissey.gif', 'blitzle.gif', 'boldore.gif', 'boltund.gif',\n 'bonsly.gif', 'bouffalant.gif', 'bounsweet.gif', 'braixen.gif',\n 'braviary.gif', 'breloom.gif', 'brionne.gif', 'bronzong.gif',\n 'bronzor.gif', 'bruxish.gif', 'budew.gif', 'buizel-f.gif', 'buizel.gif',\n 'bulbasaur.gif', 'buneary.gif', 'bunnelby.gif', 'burmy-sandy.gif',\n 'burmy-trash.gif', 'burmy.gif', 'butterfree-gmax.gif', 'butterfree.gif',\n 'buzzwole.gif', 'cacnea.gif', 'cacturne-f.gif', 'cacturne.gif',\n 'camerupt-f.gif', 'camerupt-mega.gif', 'camerupt.gif',\n 'camperupt-mega.gif', 'carbink.gif', 'caribolt.gif', 'carkol.gif',\n 'carnivine.gif', 'carracosta.gif', 'carvanha.gif', 'cascoon.gif',\n 'castform-rainy.gif', 'castform-snowy.gif', 'castform-sunny.gif',\n 'castform.gif', 'caterpie.gif', 'cawmodore.gif', 'celebi.gif',\n 'celesteela.gif', 'centiskorch-gmax.gif', 'centiskorch.gif',\n 'chandelure.gif', 'chansey.gif', 'charizard-gmax.gif',\n 'charizard-megax.gif', 'charizard-megay.gif', 'charizard.gif',\n 'charjabug.gif', 'charmander.gif', 'charmeleon.gif', 'chatot.gif',\n 'cherrim-sunshine.gif', 'cherrim.gif', 'cherubi.gif', 'chesnaught.gif',\n 'chespin.gif', 'chewtle.gif', 'chikorita.gif', 'chimchar.gif',\n 'chimecho.gif', 'chinchou.gif', 'chingling.gif', 'cinccino.gif',\n 'cinderace.gif', 'clamperl.gif', 'clauncher.gif', 'clawitzer.gif',\n 'claydol.gif', 'clefable.gif', 'clefairy.gif', 'cleffa.gif',\n 'clobbopus.gif', 'cloyster.gif', 'coalossal-gmax.gif', 'coalossal.gif',\n 'cobalion.gif', 'cofagrigus.gif', 'combee-f.gif', 'combee.gif',\n 'combusken-f.gif', 'combusken.gif', 'comfey.gif', 'conkeldurr.gif',\n 'copperajah-gmax.gif', 'copperajah.gif', 'corphish.gif',\n 'corsola-galar.gif', 'corsola.gif', 'corviknight-gmax.gif',\n 'corviknight.gif', 'corvisquire.gif', 'cosmoem.gif', 'cosmog.gif',\n 'cottonee.gif', 'crabominable.gif', 'crabrawler.gif', 'cradily.gif',\n 'cramorant-gorging.gif', 'cramorant-gulping.gif', 'cramorant.gif',\n 'cranidos.gif', 'crawdaunt.gif', 'cresselia.gif', 'croagunk-f.gif',\n 'croagunk.gif', 'crobat.gif', 'croconaw.gif', 'crucibelle-mega.gif',\n 'crucibelle.gif', 'crustle.gif', 'cryogonal.gif', 'cubchoo.gif',\n 'cubone.gif', 'cufant.gif', 'cursola.gif', 'cutiefly.gif',\n 'cyndaquil.gif', 'darkrai.gif', 'darmanitan-galar.gif',\n 'darmanitan-galarzen.gif', 'darmanitan-zen.gif', 'darmanitan.gif',\n 'dartrix.gif', 'darumaka-galar.gif', 'darumaka.gif', 'decidueye.gif',\n 'dedenne.gif', 'deerling-autumn.gif', 'deerling-summer.gif',\n 'deerling-winter.gif', 'deerling.gif', 'deino.gif', 'delcatty.gif',\n 'delibird.gif', 'delphox.gif', 'deoxys-attack.gif',\n 'deoxys-defense.gif', 'deoxys-speed.gif', 'deoxys.gif', 'dewgong.gif',\n 'dewott.gif', 'dewpider.gif', 'dhelmise.gif', 'dialga.gif',\n 'diancie-mega.gif', 'diancie.gif', 'diggersby.gif', 'diglett-alola.gif',\n 'diglett.gif', 'ditto.gif', 'dodrio.gif', 'doduo.gif', 'donphan-f.gif',\n 'donphan.gif', 'dottler.gif', 'doublade.gif', 'dracovish.gif',\n 'dracozolt.gif', 'dragalge.gif', 'dragapult.gif', 'dragonair.gif',\n 'dragonite.gif', 'drakloak.gif', 'drampa.gif', 'drapion.gif',\n 'dratini.gif', 'drednaw-gmax.gif', 'drednaw.gif', 'dreepy.gif',\n 'drifblim.gif', 'drifloon.gif', 'drilbur.gif', 'drizzile.gif',\n 'drowzee.gif', 'druddigon.gif', 'dubwool.gif', 'ducklett.gif',\n 'dugtrio-alola.gif', 'dugtrio.gif', 'dunsparce.gif', 'duosion.gif',\n 'duraludon-gmax.gif', 'duraludon.gif', 'durant.gif', 'dusclops.gif',\n 'dusknoir.gif', 'duskull.gif', 'dustox-f.gif', 'dustox.gif',\n 'dwebble.gif', 'eelektrik.gif', 'eelektross.gif', 'eevee-gmax.gif',\n 'eevee-starter.gif', 'eevee.gif', 'eiscue-noice.gif', 'eiscue.gif',\n 'ekans.gif', 'eldegoss.gif', 'electabuzz.gif', 'electivire.gif',\n 'electrike.gif', 'electrode.gif', 'elekid.gif', 'elgyem.gif',\n 'emboar.gif', 'emolga.gif', 'empoleon.gif', 'entei.gif',\n 'equilibra.gif', 'escavalier.gif', 'espeon.gif', 'espurr.gif',\n 'eternatus-eternamax.gif', 'eternatus.gif', 'excadrill.gif',\n 'exeggcute.gif', 'exeggutor-alola.gif', 'exeggutor.gif', 'exploud.gif',\n 'falinks.gif', 'farfetchd-galar.gif', 'farfetchd.gif', 'fearow.gif',\n 'feebas.gif', 'fennekin.gif', 'feraligatr.gif', 'ferroseed.gif',\n 'ferrothorn.gif', 'fidgit.gif', 'finneon-f.gif', 'finneon.gif',\n 'flaaffy.gif', 'flabebe-blue.gif', 'flabebe-orange.gif',\n 'flabebe-white.gif', 'flabebe-yellow.gif', 'flabebe.gif',\n 'flapple-gmax.gif', 'flapple.gif', 'flareon.gif', 'fletchinder.gif',\n 'fletchling.gif', 'floatzel-f.gif', 'floatzel.gif', 'floette-blue.gif',\n 'floette-eternal.gif', 'floette-orange.gif', 'floette-white.gif',\n 'floette-yellow.gif', 'floette.gif', 'florges-blue.gif',\n 'florges-orange.gif', 'florges-white.gif', 'florges-yellow.gif',\n 'florges.gif', 'flygon.gif', 'fomantis.gif', 'foongus.gif',\n 'forretress.gif', 'fraxure.gif', 'frillish-f.gif', 'frillish.gif',\n 'froakie.gif', 'frogadier.gif', 'froslass.gif', 'frosmoth.gif',\n 'furfrou-dandy.gif', 'furfrou-debutante.gif', 'furfrou-diamond.gif',\n 'furfrou-heart.gif', 'furfrou-kabuki.gif', 'furfrou-lareine.gif',\n 'furfrou-matron.gif', 'furfrou-pharaoh.gif', 'furfrou-star.gif',\n 'furfrou.gif', 'furret.gif', 'gabite-f.gif', 'gabite.gif',\n 'gallade-mega.gif', 'gallade.gif', 'galvantula.gif',\n 'garbodor-gmax.gif', 'garbodor.gif', 'garchomp-f.gif',\n 'garchomp-mega.gif', 'garchomp.gif', 'gardevoir-mega.gif',\n 'gardevoir.gif', 'gastly.gif', 'gastrodon-east.gif', 'gastrodon.gif',\n 'genesect-burn.gif', 'genesect-chill.gif', 'genesect-douse.gif',\n 'genesect-shock.gif', 'genesect.gif', 'gengar-gmax.gif',\n 'gengar-mega.gif', 'gengar.gif', 'geodude-alola.gif', 'geodude.gif',\n 'gible-f.gif', 'gible.gif', 'gigalith.gif', 'girafarig-f.gif',\n 'girafarig.gif', 'giratina-origin.gif', 'giratina.gif', 'glaceon.gif',\n 'glalie-mega.gif', 'glalie.gif', 'glameow.gif', 'gligar-f.gif',\n 'gligar.gif', 'gliscor.gif', 'gloom.gif', 'gogoat.gif', 'golbat-f.gif',\n 'golbat.gif', 'goldeen.gif', 'golduck.gif', 'golem-alola.gif',\n 'golem.gif', 'golett.gif', 'golisopod.gif', 'golurk.gif', 'goodra.gif',\n 'goomy.gif', 'gorebyss.gif', 'gossifleur.gif', 'gothita.gif',\n 'gothitelle.gif', 'gothorita.gif', 'gourgeist-large.gif',\n 'gourgeist-small.gif', 'gourgeist-super.gif', 'gourgeist.gif',\n 'granbull.gif', 'grapploct.gif', 'graveler-alola.gif', 'graveler.gif',\n 'greedent.gif', 'greninja-ash.gif', 'greninja.gif', 'grimer-alola.gif',\n 'grimer.gif', 'grimmsnarl-gmax.gif', 'grimmsnarl.gif', 'grookey.gif',\n 'grotle.gif', 'groudon-primal.gif', 'groudon.gif', 'grovyle.gif',\n 'growlithe.gif', 'grubbin.gif', 'grumpig.gif', 'gulpin-f.gif',\n 'gulpin.gif', 'gumshoos-totem.gif', 'gumshoos.gif', 'gurdurr.gif',\n 'guzzlord.gif', 'gyarados-mega.gif', 'gyarados.gif', 'hakamoo.gif',\n 'happiny.gif', 'hariyama.gif', 'hatenna.gif', 'hatterene-gmax.gif',\n 'hatterene.gif', 'hattrem.gif', 'haunter.gif', 'hawlucha.gif',\n 'haxorus.gif', 'heatmor.gif', 'heatran.gif', 'heliolisk.gif',\n 'helioptile.gif', 'heracross-f.gif', 'heracross-mega.gif',\n 'heracross.gif', 'herdier.gif', 'hippopotas-f.gif', 'hippopotas.gif',\n 'hippowdon-f.gif', 'hippowdon.gif', 'hitmonchan.gif', 'hitmonlee.gif',\n 'hitmontop.gif', 'honchkrow.gif', 'honedge.gif', 'hooh.gif',\n 'hoopa-mega.gif', 'hoopa-unbound.gif', 'hoopa.gif', 'hoothoot.gif',\n 'hoppip.gif', 'horsea.gif', 'houndoom-f.gif', 'houndoom-mega.gif',\n 'houndoom.gif', 'houndour.gif', 'huntail.gif', 'hydreigon.gif',\n 'hypno.gif', 'igglybuff.gif', 'illumise.gif', 'impidimp.gif',\n 'incineroar.gif', 'indeedee-f.gif', 'indeedee.gif', 'infernape.gif',\n 'inkay.gif', 'inteleon.gif', 'ivysaur.gif', 'jangmoo.gif',\n 'jellicent-f.gif', 'jellicent.gif', 'jigglypuff.gif', 'jirachi.gif',\n 'jolteon.gif', 'joltik.gif', 'jumbao.gif', 'jumpluff.gif', 'jynx.gif',\n 'kabuto.gif', 'kabutops.gif', 'kadabra.gif', 'kakuna.gif',\n 'kangaskhan-mega.gif', 'kangaskhan.gif', 'karrablast.gif',\n 'kartana.gif', 'kecleon.gif', 'keldeo-resolute.gif', 'keldeo.gif',\n 'kerfluffle-f.gif', 'kerfluffle.gif', 'kingdra.gif', 'kingler-gmax.gif',\n 'kingler.gif', 'kirlia.gif', 'kitsunoh-f.gif', 'kitsunoh.gif',\n 'klang.gif', 'klefki.gif', 'klink.gif', 'klinklang.gif', 'koffing.gif',\n 'komala.gif', 'kommo-o-totem.gif', 'kommoo.gif', 'krabby.gif',\n 'kricketot-f.gif', 'kricketot.gif', 'kricketune-f.gif',\n 'kricketune.gif', 'krilowatt.gif', 'krokorok.gif', 'krookodile.gif',\n 'kyogre-primal.gif', 'kyogre.gif', 'kyurem-black.gif',\n 'kyurem-white.gif', 'kyurem.gif', 'lairon.gif', 'lampent.gif',\n 'landorus-therian.gif', 'landorus.gif', 'lanturn.gif',\n 'lapras-gmax.gif', 'lapras.gif', 'larvesta.gif', 'larvitar.gif',\n 'latias-mega.gif', 'latias.gif', 'latios-mega.gif', 'latios.gif',\n 'leafeon.gif', 'leavanny.gif', 'ledian-f.gif', 'ledian.gif',\n 'ledyba-f.gif', 'ledyba.gif', 'lickilicky.gif', 'lickitung.gif',\n 'liepard.gif', 'lileep.gif', 'lilligant.gif', 'lillipup.gif',\n 'linoone-galar.gif', 'linoone.gif', 'litleo.gif', 'litten.gif',\n 'litwick.gif', 'lombre.gif', 'lopunny-mega.gif', 'lopunny.gif',\n 'lotad.gif', 'loudred.gif', 'lucario-mega.gif', 'lucario.gif',\n 'ludicolo-f.gif', 'ludicolo.gif', 'lugia.gif', 'lumineon-f.gif',\n 'lumineon.gif', 'lunala.gif', 'lunatone.gif', 'lurantis-totem.gif',\n 'lurantis.gif', 'luvdisc.gif', 'luxio-f.gif', 'luxio.gif',\n 'luxray-f.gif', 'luxray.gif', 'lycanroc-dusk.gif',\n 'lycanroc-midnight.gif', 'lycanroc.gif', 'machamp-gmax.gif',\n 'machamp.gif', 'machoke.gif', 'machop.gif', 'magby.gif', 'magcargo.gif',\n 'magearna-original.gif', 'magearna.gif', 'magikarp-f.gif',\n 'magikarp.gif', 'magmar.gif', 'magmortar.gif', 'magnemite.gif',\n 'magneton.gif', 'magnezone.gif', 'makuhita.gif', 'malaconda.gif',\n 'malamar.gif', 'mamoswine-f.gif', 'mamoswine.gif', 'manaphy.gif',\n 'mandibuzz.gif', 'manectric-mega.gif', 'manectric.gif', 'mankey.gif',\n 'mantine.gif', 'mantyke.gif', 'maractus.gif', 'mareanie.gif',\n 'mareep.gif', 'marill.gif', 'marowak-alola-totem.gif',\n 'marowak-alola.gif', 'marowak-totem.gif', 'marowak.gif',\n 'marshadow.gif', 'marshtomp.gif', 'masquerain.gif', 'mawile-mega.gif',\n 'mawile.gif', 'medicham-f.gif', 'medicham-mega.gif', 'medicham.gif',\n 'meditite-f.gif', 'meditite.gif', 'meganium-f.gif', 'meganium.gif',\n 'melmetal-gmax.gif', 'melmetal.gif', 'meloetta-pirouette.gif',\n 'meloetta.gif', 'meltan.gif', 'meowstic-f.gif', 'meowstic.gif',\n 'meowth-alola.gif', 'meowth-galar.gif', 'meowth-gmax.gif', 'meowth.gif',\n 'mesprit.gif', 'metagross-mega.gif', 'metagross.gif', 'metang.gif',\n 'metapod.gif', 'mew.gif', 'mewtwo-megax.gif', 'mewtwo-megay.gif',\n 'mewtwo.gif', 'mienfoo.gif', 'mienshao.gif', 'mightyena.gif',\n 'milcery.gif', 'milotic-f.gif', 'milotic.gif', 'miltank.gif',\n 'mimejr.gif', 'mimikyu-busted-totem.gif', 'mimikyu-busted.gif',\n 'mimikyu-totem.gif', 'mimikyu.gif', 'minccino.gif', 'minior-blue.gif',\n 'minior-green.gif', 'minior-indigo.gif', 'minior-meteor.gif',\n 'minior-orange.gif', 'minior-violet.gif', 'minior-yellow.gif',\n 'minior.gif', 'minun.gif', 'misdreavus.gif', 'mismagius.gif',\n 'mollux.gif', 'moltres.gif', 'monferno.gif', 'morelull.gif',\n 'morgrem.gif', 'morpeko-hangry.gif', 'morpeko.gif', 'mothim.gif',\n 'mrmime-galar.gif', 'mrmime.gif', 'mrrime.gif', 'mudbray.gif',\n 'mudkip.gif', 'mudsdale.gif', 'muk-alola.gif', 'muk.gif',\n 'munchlax.gif', 'munna.gif', 'murkrow-f.gif', 'murkrow.gif',\n 'musharna.gif', 'naganadel.gif', 'natu.gif', 'naviathan.gif',\n 'necrozma-dawnwings.gif', 'necrozma-duskmane.gif', 'necrozma-ultra.gif',\n 'necrozma.gif', 'necturna.gif', 'nickit.gif', 'nidoking.gif',\n 'nidoqueen.gif', 'nidoran-f.gif', 'nidoran.gif', 'nidoranf.gif',\n 'nidoranm.gif', 'nidorina.gif', 'nidorino.gif', 'nihilego.gif',\n 'nincada.gif', 'ninetales-alola.gif', 'ninetales.gif', 'ninjask.gif',\n 'noctowl.gif', 'noibat.gif', 'noivern.gif', 'nosepass.gif',\n 'numel-f.gif', 'numel.gif', 'nuzleaf-f.gif', 'nuzleaf.gif',\n 'obstagoon.gif', 'octillery-f.gif', 'octillery.gif', 'oddish.gif',\n 'omanyte.gif', 'omastar.gif', 'onix.gif', 'oranguru.gif',\n 'orbeetle-gmax.gif', 'orbeetle.gif', 'oricorio-pau.gif',\n 'oricorio-pompom.gif', 'oricorio-sensu.gif', 'oricorio.gif',\n 'oshawott.gif', 'pachirisu-f.gif', 'pachirisu.gif', 'pajantom.gif',\n 'palkia.gif', 'palossand.gif', 'palpitoad.gif', 'pancham.gif',\n 'pangoro.gif', 'panpour.gif', 'pansage.gif', 'pansear.gif', 'paras.gif',\n 'parasect.gif', 'passimian.gif', 'patrat.gif', 'pawniard.gif',\n 'pelipper.gif', 'perrserker.gif', 'persian-alola.gif', 'persian.gif',\n 'petilil.gif', 'phanpy.gif', 'phantump.gif', 'pheromosa.gif',\n 'phione.gif', 'pichu.gif', 'pidgeot-mega.gif', 'pidgeot.gif',\n 'pidgeotto.gif', 'pidgey.gif', 'pidove.gif', 'pignite.gif',\n 'pikachu-alola.gif', 'pikachu-belle.gif', 'pikachu-cosplay.gif',\n 'pikachu-f.gif', 'pikachu-gmax.gif', 'pikachu-hoenn.gif',\n 'pikachu-kalos.gif', 'pikachu-libre.gif', 'pikachu-original.gif',\n 'pikachu-partner.gif', 'pikachu-phd.gif', 'pikachu-pop-star.gif',\n 'pikachu-popstar.gif', 'pikachu-rockstar.gif', 'pikachu-sinnoh.gif',\n 'pikachu-starter-f.gif', 'pikachu-starter.gif', 'pikachu-unova.gif',\n 'pikachu.gif', 'pikipek.gif', 'piloswine-f.gif', 'piloswine.gif',\n 'pincurchin.gif', 'pineco.gif', 'pinsir-mega.gif', 'pinsir.gif',\n 'piplup.gif', 'plasmanta.gif', 'pluffle.gif', 'plusle.gif',\n 'poipole.gif', 'pokestarblackbelt.gif', 'pokestarblackdoor.gif',\n 'pokestarbrycenman.gif', 'pokestarf00.gif', 'pokestarf002.gif',\n 'pokestargiant.gif', 'pokestarhumanoid.gif', 'pokestarmonster.gif',\n 'pokestarmt.gif', 'pokestarmt2.gif', 'pokestarsmeargle.gif',\n 'pokestarspirit.gif', 'pokestartransport.gif', 'pokestarufo-2.gif',\n 'pokestarufo.gif', 'pokestarufo2.gif', 'pokestarwhitedoor.gif',\n 'politoed-f.gif', 'politoed.gif', 'poliwag.gif', 'poliwhirl.gif',\n 'poliwrath.gif', 'polteageist-antique.gif', 'polteageist.gif',\n 'ponyta-galar.gif', 'ponyta.gif', 'poochyena.gif', 'popplio.gif',\n 'porygon-z.gif', 'porygon.gif', 'porygon2.gif', 'porygonz.gif',\n 'primarina.gif', 'primeape.gif', 'prinplup.gif', 'probopass.gif',\n 'psyduck.gif', 'pumpkaboo-large.gif', 'pumpkaboo-small.gif',\n 'pumpkaboo-super.gif', 'pumpkaboo.gif', 'pupitar.gif', 'purrloin.gif',\n 'purugly.gif', 'pyroak.gif', 'pyroar-f.gif', 'pyroar.gif',\n 'pyukumuku.gif', 'quagsire-f.gif', 'quagsire.gif', 'quilava.gif',\n 'quilladin.gif', 'qwilfish.gif', 'raboot.gif', 'raichu-alola.gif',\n 'raichu.gif', 'raikou.gif', 'ralts.gif', 'rampardos.gif',\n 'rapidash-galar.gif', 'rapidash.gif', 'raticate-alola-totem.gif',\n 'raticate-alola.gif', 'raticate-f.gif', 'raticate-totem-a.gif',\n 'raticate.gif', 'rattata-alola.gif', 'rattata-f.gif', 'rattata.gif',\n 'rayquaza-mega.gif', 'rayquaza.gif', 'regice.gif', 'regigigas.gif',\n 'regirock.gif', 'registeel.gif', 'relicanth-f.gif', 'relicanth.gif',\n 'remoraid.gif', 'reshiram.gif', 'reuniclus.gif', 'rhydon-f.gif',\n 'rhydon.gif', 'rhyhorn.gif', 'rhyperior-f.gif', 'rhyperior.gif',\n 'ribombee.gif', 'rillaboom.gif', 'riolu.gif', 'rockruff.gif',\n 'roggenrola.gif', 'rolycoly.gif', 'rookidee.gif', 'roselia-f.gif',\n 'roselia.gif', 'roserade-f.gif', 'roserade.gif', 'rotom-f.gif',\n 'rotom-fan.gif', 'rotom-frost.gif', 'rotom-h.gif', 'rotom-heat.gif',\n 'rotom-m.gif', 'rotom-mow.gif', 'rotom-s.gif', 'rotom-w.gif',\n 'rotom-wash.gif', 'rotom.gif', 'rowlet.gif', 'rufflet.gif',\n 'runerigus.gif', 'sableye-mega.gif', 'sableye.gif',\n 'salamence-mega.gif', 'salamence.gif', 'salandit.gif',\n 'salazzle-totem.gif', 'salazzle.gif', 'samurott.gif',\n 'sandaconda-gmax.gif', 'sandaconda.gif', 'sandile.gif',\n 'sandshrew-alola.gif', 'sandshrew.gif', 'sandslash-alola.gif',\n 'sandslash.gif', 'sandygast.gif', 'sawk.gif', 'sawsbuck-autumn.gif',\n 'sawsbuck-summer.gif', 'sawsbuck-winter.gif', 'sawsbuck.gif',\n 'scatterbug.gif', 'sceptile-mega.gif', 'sceptile.gif', 'scizor-f.gif',\n 'scizor-mega.gif', 'scizor.gif', 'scolipede.gif', 'scorbunny.gif',\n 'scrafty.gif', 'scraggy.gif', 'scratchet.gif', 'scyther-f.gif',\n 'scyther.gif', 'seadra.gif', 'seaking.gif', 'sealeo.gif', 'seedot.gif',\n 'seel.gif', 'seismitoad.gif', 'sentret.gif', 'serperior.gif',\n 'servine.gif', 'seviper.gif', 'sewaddle.gif', 'shapedo-mega.gif',\n 'sharpedo-mega.gif', 'sharpedo.gif', 'shaymin-sky.gif', 'shaymin.gif',\n 'shedinja.gif', 'shelgon.gif', 'shellder.gif', 'shellos-east.gif',\n 'shellos.gif', 'shelmet.gif', 'shieldon.gif', 'shiftry-f.gif',\n 'shiftry.gif', 'shiinotic.gif', 'shinx-f.gif', 'shinx.gif',\n 'shroomish.gif', 'shuckle.gif', 'shuppet.gif', 'sigilyph.gif',\n 'silcoon.gif', 'silicobra.gif', 'silvally-bug.gif', 'silvally-dark.gif',\n 'silvally-dragon.gif', 'silvally-electric.gif', 'silvally-fairy.gif',\n 'silvally-fighting.gif', 'silvally-fire.gif', 'silvally-flying.gif',\n 'silvally-ghost.gif', 'silvally-grass.gif', 'silvally-ground.gif',\n 'silvally-ice.gif', 'silvally-poison.gif', 'silvally-psychic.gif',\n 'silvally-rock.gif', 'silvally-steel.gif', 'silvally-water.gif',\n 'silvally.gif', 'simipour.gif', 'simisage.gif', 'simisear.gif',\n 'sinistea-antique.gif', 'sinistea.gif', 'sirfetchd.gif',\n 'sizzlipede.gif', 'skarmory.gif', 'skiddo.gif', 'skiploom.gif',\n 'skitty.gif', 'skorupi.gif', 'skrelp.gif', 'skuntank.gif',\n 'skwovet.gif', 'slaking.gif', 'slakoth.gif', 'sliggoo.gif',\n 'slowbro-mega.gif', 'slowbro.gif', 'slowking.gif', 'slowpoke-galar.gif',\n 'slowpoke.gif', 'slugma.gif', 'slurpuff.gif', 'smeargle.gif',\n 'smogecko.gif', 'smoguana.gif', 'smokomodo.gif', 'smoochum.gif',\n 'snaelstrom.gif', 'sneasel-f.gif', 'sneasel.gif', 'snivy.gif',\n 'snom.gif', 'snorlax-gmax.gif', 'snorlax.gif', 'snornut.gif',\n 'snorunt.gif', 'snover-f.gif', 'snover.gif', 'snubbull.gif',\n 'sobble.gif', 'solgaleo.gif', 'solosis.gif', 'solrock.gif',\n 'spearow.gif', 'spewpa.gif', 'spheal.gif', 'spinarak.gif', 'spinda.gif',\n 'spiritomb.gif', 'spoink.gif', 'spritzee.gif', 'squirtle.gif',\n 'stakataka.gif', 'stantler.gif', 'staraptor-f.gif', 'staraptor.gif',\n 'staravia-f.gif', 'staravia.gif', 'starly-f.gif', 'starly.gif',\n 'starmie.gif', 'staryu.gif', 'steelix-f.gif', 'steelix-mega.gif',\n 'steelix.gif', 'steenee.gif', 'stonjourner.gif', 'stoutland.gif',\n 'stratagem.gif', 'stufful.gif', 'stunfisk-galar.gif', 'stunfisk.gif',\n 'stunky.gif', 'substitute.gif', 'sudowoodo-f.gif', 'sudowoodo.gif',\n 'suicune.gif', 'sunflora.gif', 'sunkern.gif', 'surskit.gif',\n 'swablu.gif', 'swadloon.gif', 'swalot-f.gif', 'swalot.gif',\n 'swampert-mega.gif', 'swampert.gif', 'swanna.gif', 'swellow.gif',\n 'swinub.gif', 'swirlix.gif', 'swoobat.gif', 'sylveon.gif',\n 'taillow.gif', 'talonflame.gif', 'tangela.gif', 'tangrowth-f.gif',\n 'tangrowth.gif', 'tapubulu.gif', 'tapufini.gif', 'tapukoko.gif',\n 'tapulele.gif', 'tauros.gif', 'teddiursa.gif', 'tentacool.gif',\n 'tentacruel.gif', 'tepig.gif', 'terrakion.gif', 'thievul.gif',\n 'throh.gif', 'thundurus-therian.gif', 'thundurus.gif', 'thwackey.gif',\n 'timburr.gif', 'tirtouga.gif', 'togedemaru-totem.gif', 'togedemaru.gif',\n 'togekiss.gif', 'togepi.gif', 'togetic.gif', 'tomohawk.gif',\n 'torchic-f.gif', 'torchic.gif', 'torkoal.gif', 'tornadus-therian.gif',\n 'tornadus.gif', 'torracat.gif', 'torterra.gif', 'totodile.gif',\n 'toucannon.gif', 'toxapex.gif', 'toxel.gif', 'toxicroak-f.gif',\n 'toxicroak.gif', 'toxtricity-gmax.gif', 'toxtricity-lowkey.gif',\n 'toxtricity.gif', 'tranquill.gif', 'trapinch.gif', 'treecko.gif',\n 'trevenant.gif', 'tropius.gif', 'trubbish.gif', 'trumbeak.gif',\n 'tsareena.gif', 'turtonator.gif', 'turtwig.gif', 'tympole.gif',\n 'tynamo.gif', 'typenull.gif', 'typhlosion.gif', 'tyranitar-mega.gif',\n 'tyranitar.gif', 'tyrantrum.gif', 'tyrantum.gif', 'tyrogue.gif',\n 'tyrunt.gif', 'umbreon.gif', 'unfezant-f.gif', 'unfezant.gif',\n 'unown-b.gif', 'unown-c.gif', 'unown-d.gif', 'unown-e.gif',\n 'unown-exclamation.gif', 'unown-f.gif', 'unown-g.gif', 'unown-h.gif',\n 'unown-i.gif', 'unown-j.gif', 'unown-k.gif', 'unown-l.gif',\n 'unown-m.gif', 'unown-n.gif', 'unown-o.gif', 'unown-p.gif',\n 'unown-q.gif', 'unown-question.gif', 'unown-r.gif', 'unown-s.gif',\n 'unown-t.gif', 'unown-u.gif', 'unown-v.gif', 'unown-w.gif',\n 'unown-x.gif', 'unown-y.gif', 'unown-z.gif', 'unown.gif',\n 'ursaring-f.gif', 'ursaring.gif', 'uxie.gif', 'vanillish.gif',\n 'vanillite.gif', 'vanilluxe.gif', 'vaporeon.gif', 'venipede.gif',\n 'venomoth.gif', 'venonat.gif', 'venusaur-mega.gif', 'venusaur.gif',\n 'vespiquen.gif', 'vibrava.gif', 'victini.gif', 'victreebel.gif',\n 'vigoroth.gif', 'vikavolt-totem.gif', 'vikavolt.gif', 'vileplume.gif',\n 'virizion.gif', 'vivillon-archipelago.gif', 'vivillon-continental.gif',\n 'vivillon-elegant.gif', 'vivillon-fancy.gif', 'vivillon-garden.gif',\n 'vivillon-highplains.gif', 'vivillon-icysnow.gif',\n 'vivillon-jungle.gif', 'vivillon-marine.gif', 'vivillon-modern.gif',\n 'vivillon-monsoon.gif', 'vivillon-ocean.gif', 'vivillon-pokeball.gif',\n 'vivillon-polar.gif', 'vivillon-river.gif', 'vivillon-sandstorm.gif',\n 'vivillon-savanna.gif', 'vivillon-sun.gif', 'vivillon-tundra.gif',\n 'vivillon.gif', 'volbeat.gif', 'volcanion.gif', 'volcarona.gif',\n 'volkraken.gif', 'voltorb.gif', 'vullaby.gif', 'vulpix-alola.gif',\n 'vulpix.gif', 'wailmer.gif', 'wailord.gif', 'walrein.gif',\n 'wartortle.gif', 'watchog.gif', 'weavile-f.gif', 'weavile.gif',\n 'weedle.gif', 'weepinbell.gif', 'weezing-galar.gif', 'weezing.gif',\n 'whimsicott.gif', 'whirlipede.gif', 'whiscash.gif', 'whismur.gif',\n 'wigglytuff.gif', 'wimpod.gif', 'wingull.gif', 'wishiwashi-school.gif',\n 'wishiwashi.gif', 'wobbuffet-f.gif', 'wobbuffet.gif', 'woobat.gif',\n 'wooloo.gif', 'wooper-f.gif', 'wooper.gif', 'wormadam-sandy.gif',\n 'wormadam-trash.gif', 'wormadam.gif', 'wurmple.gif', 'wynaut.gif',\n 'xatu-f.gif', 'xatu.gif', 'xerneas-neutral.gif', 'xerneas.gif',\n 'xurkitree.gif', 'yamask-galar.gif', 'yamask.gif', 'yamper.gif',\n 'yanma.gif', 'yanmega.gif', 'yungoos.gif', 'yveltal.gif',\n 'zacian-crowned.gif', 'zacian.gif', 'zamazenta-crowned.gif',\n 'zamazenta.gif', 'zangoose.gif', 'zapdos.gif', 'zebstrika.gif',\n 'zekrom.gif', 'zeraora.gif', 'zigzagoon-galar.gif', 'zigzagoon.gif',\n 'zoroark.gif', 'zorua.gif', 'zubat-f.gif', 'zubat.gif', 'zweilous.gif',\n 'zygarde-10.gif', 'zygarde-complete.gif', 'zygarde.gif']\nfor i in filename:\n url = 'https://play.pokemonshowdown.com/sprites/ani/{}'.format(i)\n file_name = str(i[:-4])\n dl_img(url, 'files/pokemon/front', file_name)\n",
"step-4": "import urllib.request\nfrom urllib.request import Request, urlopen\nimport json\nfrom requests import get\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\nfrom bs4 import BeautifulSoup\n<mask token>\n\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None.\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 and content_type is not None and \n content_type.find('html') > -1)\n\n\ndef log_error(e):\n \"\"\"\n It is always a good idea to log errors.\n This function just prints them, but you can\n make it do anything.\n \"\"\"\n print(e)\n\n\ndef save_json(file):\n with open('gif_list.txt', 'w') as f:\n f.write(file)\n\n\n<mask token>\n\n\ndef dl_img(url, file_path, file_name):\n full_path = file_path + '/' + file_name + '.gif'\n opener = urllib.request.build_opener()\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n urllib.request.install_opener(opener)\n urllib.request.urlretrieve(url, full_path)\n\n\nfilename = ['abomasnow-f.gif', 'abomasnow-mega.gif', 'abomasnow.gif',\n 'abra.gif', 'absol-mega.gif', 'absol.gif', 'accelgor.gif',\n 'aegislash-blade.gif', 'aegislash.gif', 'aerodactyl-mega.gif',\n 'aerodactyl.gif', 'aggron-mega.gif', 'aggron.gif', 'aipom-f.gif',\n 'aipom.gif', 'alakazam-mega.gif', 'alakazam.gif',\n 'alcremie-caramel-swirl-berry.gif', 'alcremie-caramel-swirl-clover.gif',\n 'alcremie-caramel-swirl-flower.gif', 'alcremie-caramel-swirl-love.gif',\n 'alcremie-caramel-swirl-ribbon.gif', 'alcremie-caramel-swirl-star.gif',\n 'alcremie-caramelswirl.gif', 'alcremie-gmax.gif',\n 'alcremie-lemon-cream-berry.gif', 'alcremie-lemon-cream-clover.gif',\n 'alcremie-lemon-cream-flower.gif', 'alcremie-lemon-cream-love.gif',\n 'alcremie-lemon-cream-ribbon.gif', 'alcremie-lemon-cream-star.gif',\n 'alcremie-lemoncream.gif', 'alcremie-matcha-cream-berry.gif',\n 'alcremie-matcha-cream-clover.gif', 'alcremie-matcha-cream-flower.gif',\n 'alcremie-matcha-cream-love.gif', 'alcremie-matcha-cream-ribbon.gif',\n 'alcremie-matcha-cream-star.gif', 'alcremie-matchacream.gif',\n 'alcremie-mint-cream-berry.gif', 'alcremie-mint-cream-clover.gif',\n 'alcremie-mint-cream-flower.gif', 'alcremie-mint-cream-love.gif',\n 'alcremie-mint-cream-ribbon.gif', 'alcremie-mint-cream-star.gif',\n 'alcremie-mintcream.gif', 'alcremie-rainbow-swirl-berry.gif',\n 'alcremie-rainbow-swirl-clover.gif',\n 'alcremie-rainbow-swirl-flower.gif', 'alcremie-rainbow-swirl-love.gif',\n 'alcremie-rainbow-swirl-ribbon.gif', 'alcremie-rainbow-swirl-star.gif',\n 'alcremie-rainbowswirl.gif', 'alcremie-ruby-cream-berry.gif',\n 'alcremie-ruby-cream-clover.gif', 'alcremie-ruby-cream-flower.gif',\n 'alcremie-ruby-cream-love.gif', 'alcremie-ruby-cream-ribbon.gif',\n 'alcremie-ruby-cream-star.gif', 'alcremie-ruby-swirl-berry.gif',\n 'alcremie-ruby-swirl-clover.gif', 'alcremie-ruby-swirl-flower.gif',\n 'alcremie-ruby-swirl-love.gif', 'alcremie-ruby-swirl-ribbon.gif',\n 'alcremie-ruby-swirl-star.gif', 'alcremie-rubycream.gif',\n 'alcremie-rubyswirl.gif', 'alcremie-salted-cream-berry.gif',\n 'alcremie-salted-cream-clover.gif', 'alcremie-salted-cream-flower.gif',\n 'alcremie-salted-cream-love.gif', 'alcremie-salted-cream-ribbon.gif',\n 'alcremie-salted-cream-star.gif', 'alcremie-saltedcream.gif',\n 'alcremie-vanilla-cream-berry.gif', 'alcremie-vanilla-cream-clover.gif',\n 'alcremie-vanilla-cream-flower.gif', 'alcremie-vanilla-cream-love.gif',\n 'alcremie-vanilla-cream-ribbon.gif', 'alcremie-vanilla-cream-star.gif',\n 'alcremie.gif', 'alomomola.gif', 'altaria-mega.gif', 'altaria.gif',\n 'amaura.gif', 'ambipom-f.gif', 'ambipom.gif', 'amoonguss.gif',\n 'ampharos-mega.gif', 'ampharos.gif', 'anorith.gif', 'appletun-gmax.gif',\n 'appletun.gif', 'applin.gif', 'araquanid-totem.gif', 'araquanid.gif',\n 'arbok.gif', 'arcanine.gif', 'arceus-bug.gif', 'arceus-dark.gif',\n 'arceus-dragon.gif', 'arceus-electric.gif', 'arceus-fairy.gif',\n 'arceus-fighting.gif', 'arceus-fire.gif', 'arceus-flying.gif',\n 'arceus-ghost.gif', 'arceus-grass.gif', 'arceus-ground.gif',\n 'arceus-ice.gif', 'arceus-poison.gif', 'arceus-psychic.gif',\n 'arceus-rock.gif', 'arceus-steel.gif', 'arceus-water.gif', 'arceus.gif',\n 'archen.gif', 'archeops.gif', 'arctovish.gif', 'arctozolt.gif',\n 'ariados.gif', 'armaldo.gif', 'aromatisse.gif', 'aron.gif',\n 'arrokuda.gif', 'articuno.gif', 'audino-mega.gif', 'audino.gif',\n 'aurorus.gif', 'aurumoth.gif', 'avalugg.gif', 'axew.gif', 'azelf.gif',\n 'azumarill.gif', 'azurill.gif', 'bagon.gif', 'baltoy.gif',\n 'banette-mega.gif', 'banette.gif', 'barbaracle.gif', 'barboach.gif',\n 'barboarch.gif', 'barraskewda.gif', 'basculin-bluestriped.gif',\n 'basculin.gif', 'bastiodon.gif', 'bayleef.gif', 'beartic.gif',\n 'beautifly-f.gif', 'beautifly.gif', 'beedrill-mega.gif', 'beedrill.gif',\n 'beheeyem.gif', 'beldum.gif', 'bellossom.gif', 'bellsprout.gif',\n 'bergmite.gif', 'bewear.gif', 'bibarel-f.gif', 'bibarel.gif',\n 'bidoof-f.gif', 'bidoof.gif', 'binacle.gif', 'bisharp.gif',\n 'blacephalon.gif', 'blastoise-mega.gif', 'blastoise.gif',\n 'blaziken-f.gif', 'blaziken-mega.gif', 'blaziken.gif', 'blipbug.gif',\n 'blissey.gif', 'blitzle.gif', 'boldore.gif', 'boltund.gif',\n 'bonsly.gif', 'bouffalant.gif', 'bounsweet.gif', 'braixen.gif',\n 'braviary.gif', 'breloom.gif', 'brionne.gif', 'bronzong.gif',\n 'bronzor.gif', 'bruxish.gif', 'budew.gif', 'buizel-f.gif', 'buizel.gif',\n 'bulbasaur.gif', 'buneary.gif', 'bunnelby.gif', 'burmy-sandy.gif',\n 'burmy-trash.gif', 'burmy.gif', 'butterfree-gmax.gif', 'butterfree.gif',\n 'buzzwole.gif', 'cacnea.gif', 'cacturne-f.gif', 'cacturne.gif',\n 'camerupt-f.gif', 'camerupt-mega.gif', 'camerupt.gif',\n 'camperupt-mega.gif', 'carbink.gif', 'caribolt.gif', 'carkol.gif',\n 'carnivine.gif', 'carracosta.gif', 'carvanha.gif', 'cascoon.gif',\n 'castform-rainy.gif', 'castform-snowy.gif', 'castform-sunny.gif',\n 'castform.gif', 'caterpie.gif', 'cawmodore.gif', 'celebi.gif',\n 'celesteela.gif', 'centiskorch-gmax.gif', 'centiskorch.gif',\n 'chandelure.gif', 'chansey.gif', 'charizard-gmax.gif',\n 'charizard-megax.gif', 'charizard-megay.gif', 'charizard.gif',\n 'charjabug.gif', 'charmander.gif', 'charmeleon.gif', 'chatot.gif',\n 'cherrim-sunshine.gif', 'cherrim.gif', 'cherubi.gif', 'chesnaught.gif',\n 'chespin.gif', 'chewtle.gif', 'chikorita.gif', 'chimchar.gif',\n 'chimecho.gif', 'chinchou.gif', 'chingling.gif', 'cinccino.gif',\n 'cinderace.gif', 'clamperl.gif', 'clauncher.gif', 'clawitzer.gif',\n 'claydol.gif', 'clefable.gif', 'clefairy.gif', 'cleffa.gif',\n 'clobbopus.gif', 'cloyster.gif', 'coalossal-gmax.gif', 'coalossal.gif',\n 'cobalion.gif', 'cofagrigus.gif', 'combee-f.gif', 'combee.gif',\n 'combusken-f.gif', 'combusken.gif', 'comfey.gif', 'conkeldurr.gif',\n 'copperajah-gmax.gif', 'copperajah.gif', 'corphish.gif',\n 'corsola-galar.gif', 'corsola.gif', 'corviknight-gmax.gif',\n 'corviknight.gif', 'corvisquire.gif', 'cosmoem.gif', 'cosmog.gif',\n 'cottonee.gif', 'crabominable.gif', 'crabrawler.gif', 'cradily.gif',\n 'cramorant-gorging.gif', 'cramorant-gulping.gif', 'cramorant.gif',\n 'cranidos.gif', 'crawdaunt.gif', 'cresselia.gif', 'croagunk-f.gif',\n 'croagunk.gif', 'crobat.gif', 'croconaw.gif', 'crucibelle-mega.gif',\n 'crucibelle.gif', 'crustle.gif', 'cryogonal.gif', 'cubchoo.gif',\n 'cubone.gif', 'cufant.gif', 'cursola.gif', 'cutiefly.gif',\n 'cyndaquil.gif', 'darkrai.gif', 'darmanitan-galar.gif',\n 'darmanitan-galarzen.gif', 'darmanitan-zen.gif', 'darmanitan.gif',\n 'dartrix.gif', 'darumaka-galar.gif', 'darumaka.gif', 'decidueye.gif',\n 'dedenne.gif', 'deerling-autumn.gif', 'deerling-summer.gif',\n 'deerling-winter.gif', 'deerling.gif', 'deino.gif', 'delcatty.gif',\n 'delibird.gif', 'delphox.gif', 'deoxys-attack.gif',\n 'deoxys-defense.gif', 'deoxys-speed.gif', 'deoxys.gif', 'dewgong.gif',\n 'dewott.gif', 'dewpider.gif', 'dhelmise.gif', 'dialga.gif',\n 'diancie-mega.gif', 'diancie.gif', 'diggersby.gif', 'diglett-alola.gif',\n 'diglett.gif', 'ditto.gif', 'dodrio.gif', 'doduo.gif', 'donphan-f.gif',\n 'donphan.gif', 'dottler.gif', 'doublade.gif', 'dracovish.gif',\n 'dracozolt.gif', 'dragalge.gif', 'dragapult.gif', 'dragonair.gif',\n 'dragonite.gif', 'drakloak.gif', 'drampa.gif', 'drapion.gif',\n 'dratini.gif', 'drednaw-gmax.gif', 'drednaw.gif', 'dreepy.gif',\n 'drifblim.gif', 'drifloon.gif', 'drilbur.gif', 'drizzile.gif',\n 'drowzee.gif', 'druddigon.gif', 'dubwool.gif', 'ducklett.gif',\n 'dugtrio-alola.gif', 'dugtrio.gif', 'dunsparce.gif', 'duosion.gif',\n 'duraludon-gmax.gif', 'duraludon.gif', 'durant.gif', 'dusclops.gif',\n 'dusknoir.gif', 'duskull.gif', 'dustox-f.gif', 'dustox.gif',\n 'dwebble.gif', 'eelektrik.gif', 'eelektross.gif', 'eevee-gmax.gif',\n 'eevee-starter.gif', 'eevee.gif', 'eiscue-noice.gif', 'eiscue.gif',\n 'ekans.gif', 'eldegoss.gif', 'electabuzz.gif', 'electivire.gif',\n 'electrike.gif', 'electrode.gif', 'elekid.gif', 'elgyem.gif',\n 'emboar.gif', 'emolga.gif', 'empoleon.gif', 'entei.gif',\n 'equilibra.gif', 'escavalier.gif', 'espeon.gif', 'espurr.gif',\n 'eternatus-eternamax.gif', 'eternatus.gif', 'excadrill.gif',\n 'exeggcute.gif', 'exeggutor-alola.gif', 'exeggutor.gif', 'exploud.gif',\n 'falinks.gif', 'farfetchd-galar.gif', 'farfetchd.gif', 'fearow.gif',\n 'feebas.gif', 'fennekin.gif', 'feraligatr.gif', 'ferroseed.gif',\n 'ferrothorn.gif', 'fidgit.gif', 'finneon-f.gif', 'finneon.gif',\n 'flaaffy.gif', 'flabebe-blue.gif', 'flabebe-orange.gif',\n 'flabebe-white.gif', 'flabebe-yellow.gif', 'flabebe.gif',\n 'flapple-gmax.gif', 'flapple.gif', 'flareon.gif', 'fletchinder.gif',\n 'fletchling.gif', 'floatzel-f.gif', 'floatzel.gif', 'floette-blue.gif',\n 'floette-eternal.gif', 'floette-orange.gif', 'floette-white.gif',\n 'floette-yellow.gif', 'floette.gif', 'florges-blue.gif',\n 'florges-orange.gif', 'florges-white.gif', 'florges-yellow.gif',\n 'florges.gif', 'flygon.gif', 'fomantis.gif', 'foongus.gif',\n 'forretress.gif', 'fraxure.gif', 'frillish-f.gif', 'frillish.gif',\n 'froakie.gif', 'frogadier.gif', 'froslass.gif', 'frosmoth.gif',\n 'furfrou-dandy.gif', 'furfrou-debutante.gif', 'furfrou-diamond.gif',\n 'furfrou-heart.gif', 'furfrou-kabuki.gif', 'furfrou-lareine.gif',\n 'furfrou-matron.gif', 'furfrou-pharaoh.gif', 'furfrou-star.gif',\n 'furfrou.gif', 'furret.gif', 'gabite-f.gif', 'gabite.gif',\n 'gallade-mega.gif', 'gallade.gif', 'galvantula.gif',\n 'garbodor-gmax.gif', 'garbodor.gif', 'garchomp-f.gif',\n 'garchomp-mega.gif', 'garchomp.gif', 'gardevoir-mega.gif',\n 'gardevoir.gif', 'gastly.gif', 'gastrodon-east.gif', 'gastrodon.gif',\n 'genesect-burn.gif', 'genesect-chill.gif', 'genesect-douse.gif',\n 'genesect-shock.gif', 'genesect.gif', 'gengar-gmax.gif',\n 'gengar-mega.gif', 'gengar.gif', 'geodude-alola.gif', 'geodude.gif',\n 'gible-f.gif', 'gible.gif', 'gigalith.gif', 'girafarig-f.gif',\n 'girafarig.gif', 'giratina-origin.gif', 'giratina.gif', 'glaceon.gif',\n 'glalie-mega.gif', 'glalie.gif', 'glameow.gif', 'gligar-f.gif',\n 'gligar.gif', 'gliscor.gif', 'gloom.gif', 'gogoat.gif', 'golbat-f.gif',\n 'golbat.gif', 'goldeen.gif', 'golduck.gif', 'golem-alola.gif',\n 'golem.gif', 'golett.gif', 'golisopod.gif', 'golurk.gif', 'goodra.gif',\n 'goomy.gif', 'gorebyss.gif', 'gossifleur.gif', 'gothita.gif',\n 'gothitelle.gif', 'gothorita.gif', 'gourgeist-large.gif',\n 'gourgeist-small.gif', 'gourgeist-super.gif', 'gourgeist.gif',\n 'granbull.gif', 'grapploct.gif', 'graveler-alola.gif', 'graveler.gif',\n 'greedent.gif', 'greninja-ash.gif', 'greninja.gif', 'grimer-alola.gif',\n 'grimer.gif', 'grimmsnarl-gmax.gif', 'grimmsnarl.gif', 'grookey.gif',\n 'grotle.gif', 'groudon-primal.gif', 'groudon.gif', 'grovyle.gif',\n 'growlithe.gif', 'grubbin.gif', 'grumpig.gif', 'gulpin-f.gif',\n 'gulpin.gif', 'gumshoos-totem.gif', 'gumshoos.gif', 'gurdurr.gif',\n 'guzzlord.gif', 'gyarados-mega.gif', 'gyarados.gif', 'hakamoo.gif',\n 'happiny.gif', 'hariyama.gif', 'hatenna.gif', 'hatterene-gmax.gif',\n 'hatterene.gif', 'hattrem.gif', 'haunter.gif', 'hawlucha.gif',\n 'haxorus.gif', 'heatmor.gif', 'heatran.gif', 'heliolisk.gif',\n 'helioptile.gif', 'heracross-f.gif', 'heracross-mega.gif',\n 'heracross.gif', 'herdier.gif', 'hippopotas-f.gif', 'hippopotas.gif',\n 'hippowdon-f.gif', 'hippowdon.gif', 'hitmonchan.gif', 'hitmonlee.gif',\n 'hitmontop.gif', 'honchkrow.gif', 'honedge.gif', 'hooh.gif',\n 'hoopa-mega.gif', 'hoopa-unbound.gif', 'hoopa.gif', 'hoothoot.gif',\n 'hoppip.gif', 'horsea.gif', 'houndoom-f.gif', 'houndoom-mega.gif',\n 'houndoom.gif', 'houndour.gif', 'huntail.gif', 'hydreigon.gif',\n 'hypno.gif', 'igglybuff.gif', 'illumise.gif', 'impidimp.gif',\n 'incineroar.gif', 'indeedee-f.gif', 'indeedee.gif', 'infernape.gif',\n 'inkay.gif', 'inteleon.gif', 'ivysaur.gif', 'jangmoo.gif',\n 'jellicent-f.gif', 'jellicent.gif', 'jigglypuff.gif', 'jirachi.gif',\n 'jolteon.gif', 'joltik.gif', 'jumbao.gif', 'jumpluff.gif', 'jynx.gif',\n 'kabuto.gif', 'kabutops.gif', 'kadabra.gif', 'kakuna.gif',\n 'kangaskhan-mega.gif', 'kangaskhan.gif', 'karrablast.gif',\n 'kartana.gif', 'kecleon.gif', 'keldeo-resolute.gif', 'keldeo.gif',\n 'kerfluffle-f.gif', 'kerfluffle.gif', 'kingdra.gif', 'kingler-gmax.gif',\n 'kingler.gif', 'kirlia.gif', 'kitsunoh-f.gif', 'kitsunoh.gif',\n 'klang.gif', 'klefki.gif', 'klink.gif', 'klinklang.gif', 'koffing.gif',\n 'komala.gif', 'kommo-o-totem.gif', 'kommoo.gif', 'krabby.gif',\n 'kricketot-f.gif', 'kricketot.gif', 'kricketune-f.gif',\n 'kricketune.gif', 'krilowatt.gif', 'krokorok.gif', 'krookodile.gif',\n 'kyogre-primal.gif', 'kyogre.gif', 'kyurem-black.gif',\n 'kyurem-white.gif', 'kyurem.gif', 'lairon.gif', 'lampent.gif',\n 'landorus-therian.gif', 'landorus.gif', 'lanturn.gif',\n 'lapras-gmax.gif', 'lapras.gif', 'larvesta.gif', 'larvitar.gif',\n 'latias-mega.gif', 'latias.gif', 'latios-mega.gif', 'latios.gif',\n 'leafeon.gif', 'leavanny.gif', 'ledian-f.gif', 'ledian.gif',\n 'ledyba-f.gif', 'ledyba.gif', 'lickilicky.gif', 'lickitung.gif',\n 'liepard.gif', 'lileep.gif', 'lilligant.gif', 'lillipup.gif',\n 'linoone-galar.gif', 'linoone.gif', 'litleo.gif', 'litten.gif',\n 'litwick.gif', 'lombre.gif', 'lopunny-mega.gif', 'lopunny.gif',\n 'lotad.gif', 'loudred.gif', 'lucario-mega.gif', 'lucario.gif',\n 'ludicolo-f.gif', 'ludicolo.gif', 'lugia.gif', 'lumineon-f.gif',\n 'lumineon.gif', 'lunala.gif', 'lunatone.gif', 'lurantis-totem.gif',\n 'lurantis.gif', 'luvdisc.gif', 'luxio-f.gif', 'luxio.gif',\n 'luxray-f.gif', 'luxray.gif', 'lycanroc-dusk.gif',\n 'lycanroc-midnight.gif', 'lycanroc.gif', 'machamp-gmax.gif',\n 'machamp.gif', 'machoke.gif', 'machop.gif', 'magby.gif', 'magcargo.gif',\n 'magearna-original.gif', 'magearna.gif', 'magikarp-f.gif',\n 'magikarp.gif', 'magmar.gif', 'magmortar.gif', 'magnemite.gif',\n 'magneton.gif', 'magnezone.gif', 'makuhita.gif', 'malaconda.gif',\n 'malamar.gif', 'mamoswine-f.gif', 'mamoswine.gif', 'manaphy.gif',\n 'mandibuzz.gif', 'manectric-mega.gif', 'manectric.gif', 'mankey.gif',\n 'mantine.gif', 'mantyke.gif', 'maractus.gif', 'mareanie.gif',\n 'mareep.gif', 'marill.gif', 'marowak-alola-totem.gif',\n 'marowak-alola.gif', 'marowak-totem.gif', 'marowak.gif',\n 'marshadow.gif', 'marshtomp.gif', 'masquerain.gif', 'mawile-mega.gif',\n 'mawile.gif', 'medicham-f.gif', 'medicham-mega.gif', 'medicham.gif',\n 'meditite-f.gif', 'meditite.gif', 'meganium-f.gif', 'meganium.gif',\n 'melmetal-gmax.gif', 'melmetal.gif', 'meloetta-pirouette.gif',\n 'meloetta.gif', 'meltan.gif', 'meowstic-f.gif', 'meowstic.gif',\n 'meowth-alola.gif', 'meowth-galar.gif', 'meowth-gmax.gif', 'meowth.gif',\n 'mesprit.gif', 'metagross-mega.gif', 'metagross.gif', 'metang.gif',\n 'metapod.gif', 'mew.gif', 'mewtwo-megax.gif', 'mewtwo-megay.gif',\n 'mewtwo.gif', 'mienfoo.gif', 'mienshao.gif', 'mightyena.gif',\n 'milcery.gif', 'milotic-f.gif', 'milotic.gif', 'miltank.gif',\n 'mimejr.gif', 'mimikyu-busted-totem.gif', 'mimikyu-busted.gif',\n 'mimikyu-totem.gif', 'mimikyu.gif', 'minccino.gif', 'minior-blue.gif',\n 'minior-green.gif', 'minior-indigo.gif', 'minior-meteor.gif',\n 'minior-orange.gif', 'minior-violet.gif', 'minior-yellow.gif',\n 'minior.gif', 'minun.gif', 'misdreavus.gif', 'mismagius.gif',\n 'mollux.gif', 'moltres.gif', 'monferno.gif', 'morelull.gif',\n 'morgrem.gif', 'morpeko-hangry.gif', 'morpeko.gif', 'mothim.gif',\n 'mrmime-galar.gif', 'mrmime.gif', 'mrrime.gif', 'mudbray.gif',\n 'mudkip.gif', 'mudsdale.gif', 'muk-alola.gif', 'muk.gif',\n 'munchlax.gif', 'munna.gif', 'murkrow-f.gif', 'murkrow.gif',\n 'musharna.gif', 'naganadel.gif', 'natu.gif', 'naviathan.gif',\n 'necrozma-dawnwings.gif', 'necrozma-duskmane.gif', 'necrozma-ultra.gif',\n 'necrozma.gif', 'necturna.gif', 'nickit.gif', 'nidoking.gif',\n 'nidoqueen.gif', 'nidoran-f.gif', 'nidoran.gif', 'nidoranf.gif',\n 'nidoranm.gif', 'nidorina.gif', 'nidorino.gif', 'nihilego.gif',\n 'nincada.gif', 'ninetales-alola.gif', 'ninetales.gif', 'ninjask.gif',\n 'noctowl.gif', 'noibat.gif', 'noivern.gif', 'nosepass.gif',\n 'numel-f.gif', 'numel.gif', 'nuzleaf-f.gif', 'nuzleaf.gif',\n 'obstagoon.gif', 'octillery-f.gif', 'octillery.gif', 'oddish.gif',\n 'omanyte.gif', 'omastar.gif', 'onix.gif', 'oranguru.gif',\n 'orbeetle-gmax.gif', 'orbeetle.gif', 'oricorio-pau.gif',\n 'oricorio-pompom.gif', 'oricorio-sensu.gif', 'oricorio.gif',\n 'oshawott.gif', 'pachirisu-f.gif', 'pachirisu.gif', 'pajantom.gif',\n 'palkia.gif', 'palossand.gif', 'palpitoad.gif', 'pancham.gif',\n 'pangoro.gif', 'panpour.gif', 'pansage.gif', 'pansear.gif', 'paras.gif',\n 'parasect.gif', 'passimian.gif', 'patrat.gif', 'pawniard.gif',\n 'pelipper.gif', 'perrserker.gif', 'persian-alola.gif', 'persian.gif',\n 'petilil.gif', 'phanpy.gif', 'phantump.gif', 'pheromosa.gif',\n 'phione.gif', 'pichu.gif', 'pidgeot-mega.gif', 'pidgeot.gif',\n 'pidgeotto.gif', 'pidgey.gif', 'pidove.gif', 'pignite.gif',\n 'pikachu-alola.gif', 'pikachu-belle.gif', 'pikachu-cosplay.gif',\n 'pikachu-f.gif', 'pikachu-gmax.gif', 'pikachu-hoenn.gif',\n 'pikachu-kalos.gif', 'pikachu-libre.gif', 'pikachu-original.gif',\n 'pikachu-partner.gif', 'pikachu-phd.gif', 'pikachu-pop-star.gif',\n 'pikachu-popstar.gif', 'pikachu-rockstar.gif', 'pikachu-sinnoh.gif',\n 'pikachu-starter-f.gif', 'pikachu-starter.gif', 'pikachu-unova.gif',\n 'pikachu.gif', 'pikipek.gif', 'piloswine-f.gif', 'piloswine.gif',\n 'pincurchin.gif', 'pineco.gif', 'pinsir-mega.gif', 'pinsir.gif',\n 'piplup.gif', 'plasmanta.gif', 'pluffle.gif', 'plusle.gif',\n 'poipole.gif', 'pokestarblackbelt.gif', 'pokestarblackdoor.gif',\n 'pokestarbrycenman.gif', 'pokestarf00.gif', 'pokestarf002.gif',\n 'pokestargiant.gif', 'pokestarhumanoid.gif', 'pokestarmonster.gif',\n 'pokestarmt.gif', 'pokestarmt2.gif', 'pokestarsmeargle.gif',\n 'pokestarspirit.gif', 'pokestartransport.gif', 'pokestarufo-2.gif',\n 'pokestarufo.gif', 'pokestarufo2.gif', 'pokestarwhitedoor.gif',\n 'politoed-f.gif', 'politoed.gif', 'poliwag.gif', 'poliwhirl.gif',\n 'poliwrath.gif', 'polteageist-antique.gif', 'polteageist.gif',\n 'ponyta-galar.gif', 'ponyta.gif', 'poochyena.gif', 'popplio.gif',\n 'porygon-z.gif', 'porygon.gif', 'porygon2.gif', 'porygonz.gif',\n 'primarina.gif', 'primeape.gif', 'prinplup.gif', 'probopass.gif',\n 'psyduck.gif', 'pumpkaboo-large.gif', 'pumpkaboo-small.gif',\n 'pumpkaboo-super.gif', 'pumpkaboo.gif', 'pupitar.gif', 'purrloin.gif',\n 'purugly.gif', 'pyroak.gif', 'pyroar-f.gif', 'pyroar.gif',\n 'pyukumuku.gif', 'quagsire-f.gif', 'quagsire.gif', 'quilava.gif',\n 'quilladin.gif', 'qwilfish.gif', 'raboot.gif', 'raichu-alola.gif',\n 'raichu.gif', 'raikou.gif', 'ralts.gif', 'rampardos.gif',\n 'rapidash-galar.gif', 'rapidash.gif', 'raticate-alola-totem.gif',\n 'raticate-alola.gif', 'raticate-f.gif', 'raticate-totem-a.gif',\n 'raticate.gif', 'rattata-alola.gif', 'rattata-f.gif', 'rattata.gif',\n 'rayquaza-mega.gif', 'rayquaza.gif', 'regice.gif', 'regigigas.gif',\n 'regirock.gif', 'registeel.gif', 'relicanth-f.gif', 'relicanth.gif',\n 'remoraid.gif', 'reshiram.gif', 'reuniclus.gif', 'rhydon-f.gif',\n 'rhydon.gif', 'rhyhorn.gif', 'rhyperior-f.gif', 'rhyperior.gif',\n 'ribombee.gif', 'rillaboom.gif', 'riolu.gif', 'rockruff.gif',\n 'roggenrola.gif', 'rolycoly.gif', 'rookidee.gif', 'roselia-f.gif',\n 'roselia.gif', 'roserade-f.gif', 'roserade.gif', 'rotom-f.gif',\n 'rotom-fan.gif', 'rotom-frost.gif', 'rotom-h.gif', 'rotom-heat.gif',\n 'rotom-m.gif', 'rotom-mow.gif', 'rotom-s.gif', 'rotom-w.gif',\n 'rotom-wash.gif', 'rotom.gif', 'rowlet.gif', 'rufflet.gif',\n 'runerigus.gif', 'sableye-mega.gif', 'sableye.gif',\n 'salamence-mega.gif', 'salamence.gif', 'salandit.gif',\n 'salazzle-totem.gif', 'salazzle.gif', 'samurott.gif',\n 'sandaconda-gmax.gif', 'sandaconda.gif', 'sandile.gif',\n 'sandshrew-alola.gif', 'sandshrew.gif', 'sandslash-alola.gif',\n 'sandslash.gif', 'sandygast.gif', 'sawk.gif', 'sawsbuck-autumn.gif',\n 'sawsbuck-summer.gif', 'sawsbuck-winter.gif', 'sawsbuck.gif',\n 'scatterbug.gif', 'sceptile-mega.gif', 'sceptile.gif', 'scizor-f.gif',\n 'scizor-mega.gif', 'scizor.gif', 'scolipede.gif', 'scorbunny.gif',\n 'scrafty.gif', 'scraggy.gif', 'scratchet.gif', 'scyther-f.gif',\n 'scyther.gif', 'seadra.gif', 'seaking.gif', 'sealeo.gif', 'seedot.gif',\n 'seel.gif', 'seismitoad.gif', 'sentret.gif', 'serperior.gif',\n 'servine.gif', 'seviper.gif', 'sewaddle.gif', 'shapedo-mega.gif',\n 'sharpedo-mega.gif', 'sharpedo.gif', 'shaymin-sky.gif', 'shaymin.gif',\n 'shedinja.gif', 'shelgon.gif', 'shellder.gif', 'shellos-east.gif',\n 'shellos.gif', 'shelmet.gif', 'shieldon.gif', 'shiftry-f.gif',\n 'shiftry.gif', 'shiinotic.gif', 'shinx-f.gif', 'shinx.gif',\n 'shroomish.gif', 'shuckle.gif', 'shuppet.gif', 'sigilyph.gif',\n 'silcoon.gif', 'silicobra.gif', 'silvally-bug.gif', 'silvally-dark.gif',\n 'silvally-dragon.gif', 'silvally-electric.gif', 'silvally-fairy.gif',\n 'silvally-fighting.gif', 'silvally-fire.gif', 'silvally-flying.gif',\n 'silvally-ghost.gif', 'silvally-grass.gif', 'silvally-ground.gif',\n 'silvally-ice.gif', 'silvally-poison.gif', 'silvally-psychic.gif',\n 'silvally-rock.gif', 'silvally-steel.gif', 'silvally-water.gif',\n 'silvally.gif', 'simipour.gif', 'simisage.gif', 'simisear.gif',\n 'sinistea-antique.gif', 'sinistea.gif', 'sirfetchd.gif',\n 'sizzlipede.gif', 'skarmory.gif', 'skiddo.gif', 'skiploom.gif',\n 'skitty.gif', 'skorupi.gif', 'skrelp.gif', 'skuntank.gif',\n 'skwovet.gif', 'slaking.gif', 'slakoth.gif', 'sliggoo.gif',\n 'slowbro-mega.gif', 'slowbro.gif', 'slowking.gif', 'slowpoke-galar.gif',\n 'slowpoke.gif', 'slugma.gif', 'slurpuff.gif', 'smeargle.gif',\n 'smogecko.gif', 'smoguana.gif', 'smokomodo.gif', 'smoochum.gif',\n 'snaelstrom.gif', 'sneasel-f.gif', 'sneasel.gif', 'snivy.gif',\n 'snom.gif', 'snorlax-gmax.gif', 'snorlax.gif', 'snornut.gif',\n 'snorunt.gif', 'snover-f.gif', 'snover.gif', 'snubbull.gif',\n 'sobble.gif', 'solgaleo.gif', 'solosis.gif', 'solrock.gif',\n 'spearow.gif', 'spewpa.gif', 'spheal.gif', 'spinarak.gif', 'spinda.gif',\n 'spiritomb.gif', 'spoink.gif', 'spritzee.gif', 'squirtle.gif',\n 'stakataka.gif', 'stantler.gif', 'staraptor-f.gif', 'staraptor.gif',\n 'staravia-f.gif', 'staravia.gif', 'starly-f.gif', 'starly.gif',\n 'starmie.gif', 'staryu.gif', 'steelix-f.gif', 'steelix-mega.gif',\n 'steelix.gif', 'steenee.gif', 'stonjourner.gif', 'stoutland.gif',\n 'stratagem.gif', 'stufful.gif', 'stunfisk-galar.gif', 'stunfisk.gif',\n 'stunky.gif', 'substitute.gif', 'sudowoodo-f.gif', 'sudowoodo.gif',\n 'suicune.gif', 'sunflora.gif', 'sunkern.gif', 'surskit.gif',\n 'swablu.gif', 'swadloon.gif', 'swalot-f.gif', 'swalot.gif',\n 'swampert-mega.gif', 'swampert.gif', 'swanna.gif', 'swellow.gif',\n 'swinub.gif', 'swirlix.gif', 'swoobat.gif', 'sylveon.gif',\n 'taillow.gif', 'talonflame.gif', 'tangela.gif', 'tangrowth-f.gif',\n 'tangrowth.gif', 'tapubulu.gif', 'tapufini.gif', 'tapukoko.gif',\n 'tapulele.gif', 'tauros.gif', 'teddiursa.gif', 'tentacool.gif',\n 'tentacruel.gif', 'tepig.gif', 'terrakion.gif', 'thievul.gif',\n 'throh.gif', 'thundurus-therian.gif', 'thundurus.gif', 'thwackey.gif',\n 'timburr.gif', 'tirtouga.gif', 'togedemaru-totem.gif', 'togedemaru.gif',\n 'togekiss.gif', 'togepi.gif', 'togetic.gif', 'tomohawk.gif',\n 'torchic-f.gif', 'torchic.gif', 'torkoal.gif', 'tornadus-therian.gif',\n 'tornadus.gif', 'torracat.gif', 'torterra.gif', 'totodile.gif',\n 'toucannon.gif', 'toxapex.gif', 'toxel.gif', 'toxicroak-f.gif',\n 'toxicroak.gif', 'toxtricity-gmax.gif', 'toxtricity-lowkey.gif',\n 'toxtricity.gif', 'tranquill.gif', 'trapinch.gif', 'treecko.gif',\n 'trevenant.gif', 'tropius.gif', 'trubbish.gif', 'trumbeak.gif',\n 'tsareena.gif', 'turtonator.gif', 'turtwig.gif', 'tympole.gif',\n 'tynamo.gif', 'typenull.gif', 'typhlosion.gif', 'tyranitar-mega.gif',\n 'tyranitar.gif', 'tyrantrum.gif', 'tyrantum.gif', 'tyrogue.gif',\n 'tyrunt.gif', 'umbreon.gif', 'unfezant-f.gif', 'unfezant.gif',\n 'unown-b.gif', 'unown-c.gif', 'unown-d.gif', 'unown-e.gif',\n 'unown-exclamation.gif', 'unown-f.gif', 'unown-g.gif', 'unown-h.gif',\n 'unown-i.gif', 'unown-j.gif', 'unown-k.gif', 'unown-l.gif',\n 'unown-m.gif', 'unown-n.gif', 'unown-o.gif', 'unown-p.gif',\n 'unown-q.gif', 'unown-question.gif', 'unown-r.gif', 'unown-s.gif',\n 'unown-t.gif', 'unown-u.gif', 'unown-v.gif', 'unown-w.gif',\n 'unown-x.gif', 'unown-y.gif', 'unown-z.gif', 'unown.gif',\n 'ursaring-f.gif', 'ursaring.gif', 'uxie.gif', 'vanillish.gif',\n 'vanillite.gif', 'vanilluxe.gif', 'vaporeon.gif', 'venipede.gif',\n 'venomoth.gif', 'venonat.gif', 'venusaur-mega.gif', 'venusaur.gif',\n 'vespiquen.gif', 'vibrava.gif', 'victini.gif', 'victreebel.gif',\n 'vigoroth.gif', 'vikavolt-totem.gif', 'vikavolt.gif', 'vileplume.gif',\n 'virizion.gif', 'vivillon-archipelago.gif', 'vivillon-continental.gif',\n 'vivillon-elegant.gif', 'vivillon-fancy.gif', 'vivillon-garden.gif',\n 'vivillon-highplains.gif', 'vivillon-icysnow.gif',\n 'vivillon-jungle.gif', 'vivillon-marine.gif', 'vivillon-modern.gif',\n 'vivillon-monsoon.gif', 'vivillon-ocean.gif', 'vivillon-pokeball.gif',\n 'vivillon-polar.gif', 'vivillon-river.gif', 'vivillon-sandstorm.gif',\n 'vivillon-savanna.gif', 'vivillon-sun.gif', 'vivillon-tundra.gif',\n 'vivillon.gif', 'volbeat.gif', 'volcanion.gif', 'volcarona.gif',\n 'volkraken.gif', 'voltorb.gif', 'vullaby.gif', 'vulpix-alola.gif',\n 'vulpix.gif', 'wailmer.gif', 'wailord.gif', 'walrein.gif',\n 'wartortle.gif', 'watchog.gif', 'weavile-f.gif', 'weavile.gif',\n 'weedle.gif', 'weepinbell.gif', 'weezing-galar.gif', 'weezing.gif',\n 'whimsicott.gif', 'whirlipede.gif', 'whiscash.gif', 'whismur.gif',\n 'wigglytuff.gif', 'wimpod.gif', 'wingull.gif', 'wishiwashi-school.gif',\n 'wishiwashi.gif', 'wobbuffet-f.gif', 'wobbuffet.gif', 'woobat.gif',\n 'wooloo.gif', 'wooper-f.gif', 'wooper.gif', 'wormadam-sandy.gif',\n 'wormadam-trash.gif', 'wormadam.gif', 'wurmple.gif', 'wynaut.gif',\n 'xatu-f.gif', 'xatu.gif', 'xerneas-neutral.gif', 'xerneas.gif',\n 'xurkitree.gif', 'yamask-galar.gif', 'yamask.gif', 'yamper.gif',\n 'yanma.gif', 'yanmega.gif', 'yungoos.gif', 'yveltal.gif',\n 'zacian-crowned.gif', 'zacian.gif', 'zamazenta-crowned.gif',\n 'zamazenta.gif', 'zangoose.gif', 'zapdos.gif', 'zebstrika.gif',\n 'zekrom.gif', 'zeraora.gif', 'zigzagoon-galar.gif', 'zigzagoon.gif',\n 'zoroark.gif', 'zorua.gif', 'zubat-f.gif', 'zubat.gif', 'zweilous.gif',\n 'zygarde-10.gif', 'zygarde-complete.gif', 'zygarde.gif']\nfor i in filename:\n url = 'https://play.pokemonshowdown.com/sprites/ani/{}'.format(i)\n file_name = str(i[:-4])\n dl_img(url, 'files/pokemon/front', file_name)\n",
"step-5": "import urllib.request\r\nfrom urllib.request import Request, urlopen\r\nimport json\r\n\r\nfrom requests import get\r\nfrom requests.exceptions import RequestException\r\nfrom contextlib import closing\r\nfrom bs4 import BeautifulSoup\r\n\r\n\"\"\"\r\nWeb Scraper ======================================================================\r\n\"\"\"\r\n\r\n\r\ndef simple_get(url):\r\n \"\"\"\r\n Attempts to get the content at `url` by making an HTTP GET request.\r\n If the content-type of response is some kind of HTML/XML, return the\r\n text content, otherwise return None.\r\n \"\"\"\r\n try:\r\n with closing(get(url, stream=True)) as resp:\r\n if is_good_response(resp):\r\n return resp.content\r\n else:\r\n return None\r\n\r\n except RequestException as e:\r\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\r\n return None\r\n\r\n\r\ndef is_good_response(resp):\r\n \"\"\"\r\n Returns True if the response seems to be HTML, False otherwise.\r\n \"\"\"\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200\r\n and content_type is not None\r\n and content_type.find('html') > -1)\r\n\r\n\r\ndef log_error(e):\r\n \"\"\"\r\n It is always a good idea to log errors.\r\n This function just prints them, but you can\r\n make it do anything.\r\n \"\"\"\r\n print(e)\r\n\r\n\r\n\r\ndef save_json(file):\r\n with open('gif_list.txt', 'w') as f:\r\n f.write(file)\r\n\r\n\r\n\"\"\"\r\n Scraping\r\n\"\"\"\r\n\r\n# req = Request('https://play.pokemonshowdown.com/sprites/ani/', headers={'User-Agent': 'Mozilla/5.0'})\r\n# url = urlopen(req).read()\r\n# url = 'https://play.pokemonshowdown.com/sprites/ani/'\r\n# raw_html = simple_get(url)\r\n# soup = BeautifulSoup(url, 'lxml')\r\n# # a = soup.find_all('td', attrs={'valign': 'top'})\r\n# a = soup.find_all('a')\r\n# videolist = []\r\n# print(a)\r\n# for v in a:\r\n# tmp = v['href']\r\n# videolist.append(tmp)\r\n# filename = videolist[5:]\r\n# print(filename)\r\n\r\n\r\ndef dl_img(url, file_path, file_name):\r\n full_path = file_path + '/' + file_name + '.gif'\r\n opener = urllib.request.build_opener()\r\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\r\n urllib.request.install_opener(opener)\r\n urllib.request.urlretrieve(url, full_path)\r\n\r\n\r\nfilename = ['abomasnow-f.gif', 'abomasnow-mega.gif', 'abomasnow.gif', 'abra.gif', 'absol-mega.gif', 'absol.gif', 'accelgor.gif', 'aegislash-blade.gif', 'aegislash.gif', 'aerodactyl-mega.gif', 'aerodactyl.gif', 'aggron-mega.gif', 'aggron.gif', 'aipom-f.gif', 'aipom.gif', 'alakazam-mega.gif', 'alakazam.gif', 'alcremie-caramel-swirl-berry.gif', 'alcremie-caramel-swirl-clover.gif', 'alcremie-caramel-swirl-flower.gif', 'alcremie-caramel-swirl-love.gif', 'alcremie-caramel-swirl-ribbon.gif', 'alcremie-caramel-swirl-star.gif', 'alcremie-caramelswirl.gif', 'alcremie-gmax.gif', 'alcremie-lemon-cream-berry.gif', 'alcremie-lemon-cream-clover.gif', 'alcremie-lemon-cream-flower.gif', 'alcremie-lemon-cream-love.gif', 'alcremie-lemon-cream-ribbon.gif', 'alcremie-lemon-cream-star.gif', 'alcremie-lemoncream.gif', 'alcremie-matcha-cream-berry.gif', 'alcremie-matcha-cream-clover.gif', 'alcremie-matcha-cream-flower.gif', 'alcremie-matcha-cream-love.gif', 'alcremie-matcha-cream-ribbon.gif', 'alcremie-matcha-cream-star.gif', 'alcremie-matchacream.gif', 'alcremie-mint-cream-berry.gif', 'alcremie-mint-cream-clover.gif', 'alcremie-mint-cream-flower.gif', 'alcremie-mint-cream-love.gif', 'alcremie-mint-cream-ribbon.gif', 'alcremie-mint-cream-star.gif', 'alcremie-mintcream.gif', 'alcremie-rainbow-swirl-berry.gif', 'alcremie-rainbow-swirl-clover.gif', 'alcremie-rainbow-swirl-flower.gif', 'alcremie-rainbow-swirl-love.gif', 'alcremie-rainbow-swirl-ribbon.gif', 'alcremie-rainbow-swirl-star.gif', 'alcremie-rainbowswirl.gif', 'alcremie-ruby-cream-berry.gif', 'alcremie-ruby-cream-clover.gif', 'alcremie-ruby-cream-flower.gif', 'alcremie-ruby-cream-love.gif', 'alcremie-ruby-cream-ribbon.gif', 'alcremie-ruby-cream-star.gif', 'alcremie-ruby-swirl-berry.gif', 'alcremie-ruby-swirl-clover.gif', 'alcremie-ruby-swirl-flower.gif', 'alcremie-ruby-swirl-love.gif', 'alcremie-ruby-swirl-ribbon.gif', 'alcremie-ruby-swirl-star.gif', 'alcremie-rubycream.gif', 'alcremie-rubyswirl.gif', 'alcremie-salted-cream-berry.gif', 'alcremie-salted-cream-clover.gif', 'alcremie-salted-cream-flower.gif', 'alcremie-salted-cream-love.gif', 'alcremie-salted-cream-ribbon.gif', 'alcremie-salted-cream-star.gif', 'alcremie-saltedcream.gif', 'alcremie-vanilla-cream-berry.gif', 'alcremie-vanilla-cream-clover.gif', 'alcremie-vanilla-cream-flower.gif', 'alcremie-vanilla-cream-love.gif', 'alcremie-vanilla-cream-ribbon.gif', 'alcremie-vanilla-cream-star.gif', 'alcremie.gif', 'alomomola.gif', 'altaria-mega.gif', 'altaria.gif', 'amaura.gif', 'ambipom-f.gif', 'ambipom.gif', 'amoonguss.gif', 'ampharos-mega.gif', 'ampharos.gif', 'anorith.gif', 'appletun-gmax.gif', 'appletun.gif', 'applin.gif', 'araquanid-totem.gif', 'araquanid.gif', 'arbok.gif', 'arcanine.gif', 'arceus-bug.gif', 'arceus-dark.gif', 'arceus-dragon.gif', 'arceus-electric.gif', 'arceus-fairy.gif', 'arceus-fighting.gif', 'arceus-fire.gif', 'arceus-flying.gif', 'arceus-ghost.gif', 'arceus-grass.gif', 'arceus-ground.gif', 'arceus-ice.gif', 'arceus-poison.gif', 'arceus-psychic.gif', 'arceus-rock.gif', 'arceus-steel.gif', 'arceus-water.gif', 'arceus.gif', 'archen.gif', 'archeops.gif', 'arctovish.gif', 'arctozolt.gif', 'ariados.gif', 'armaldo.gif', 'aromatisse.gif', 'aron.gif', 'arrokuda.gif', 'articuno.gif', 'audino-mega.gif', 'audino.gif', 'aurorus.gif', 'aurumoth.gif', 'avalugg.gif', 'axew.gif', 'azelf.gif', 'azumarill.gif', 'azurill.gif', 'bagon.gif', 'baltoy.gif', 'banette-mega.gif', 'banette.gif', 'barbaracle.gif', 'barboach.gif', 'barboarch.gif', 'barraskewda.gif', 'basculin-bluestriped.gif', 'basculin.gif', 'bastiodon.gif', 'bayleef.gif', 'beartic.gif', 'beautifly-f.gif', 'beautifly.gif', 'beedrill-mega.gif', 'beedrill.gif', 'beheeyem.gif', 'beldum.gif', 'bellossom.gif', 'bellsprout.gif', 'bergmite.gif', 'bewear.gif', 'bibarel-f.gif', 'bibarel.gif', 'bidoof-f.gif', 'bidoof.gif', 'binacle.gif', 'bisharp.gif', 'blacephalon.gif', 'blastoise-mega.gif', 'blastoise.gif', 'blaziken-f.gif', 'blaziken-mega.gif', 'blaziken.gif', 'blipbug.gif', 'blissey.gif', 'blitzle.gif', 'boldore.gif', 'boltund.gif', 'bonsly.gif', 'bouffalant.gif', 'bounsweet.gif', 'braixen.gif', 'braviary.gif', 'breloom.gif', 'brionne.gif', 'bronzong.gif', 'bronzor.gif', 'bruxish.gif', 'budew.gif', 'buizel-f.gif', 'buizel.gif', 'bulbasaur.gif', 'buneary.gif', 'bunnelby.gif', 'burmy-sandy.gif', 'burmy-trash.gif', 'burmy.gif', 'butterfree-gmax.gif', 'butterfree.gif', 'buzzwole.gif', 'cacnea.gif', 'cacturne-f.gif', 'cacturne.gif', 'camerupt-f.gif', 'camerupt-mega.gif', 'camerupt.gif', 'camperupt-mega.gif', 'carbink.gif', 'caribolt.gif', 'carkol.gif', 'carnivine.gif', 'carracosta.gif', 'carvanha.gif', 'cascoon.gif', 'castform-rainy.gif', 'castform-snowy.gif', 'castform-sunny.gif', 'castform.gif', 'caterpie.gif', 'cawmodore.gif', 'celebi.gif', 'celesteela.gif', 'centiskorch-gmax.gif', 'centiskorch.gif', 'chandelure.gif', 'chansey.gif', 'charizard-gmax.gif', 'charizard-megax.gif', 'charizard-megay.gif', 'charizard.gif', 'charjabug.gif', 'charmander.gif', 'charmeleon.gif', 'chatot.gif', 'cherrim-sunshine.gif', 'cherrim.gif', 'cherubi.gif', 'chesnaught.gif', 'chespin.gif', 'chewtle.gif', 'chikorita.gif', 'chimchar.gif', 'chimecho.gif', 'chinchou.gif', 'chingling.gif', 'cinccino.gif', 'cinderace.gif', 'clamperl.gif', 'clauncher.gif', 'clawitzer.gif', 'claydol.gif', 'clefable.gif', 'clefairy.gif', 'cleffa.gif', 'clobbopus.gif', 'cloyster.gif', 'coalossal-gmax.gif', 'coalossal.gif', 'cobalion.gif', 'cofagrigus.gif', 'combee-f.gif', 'combee.gif', 'combusken-f.gif', 'combusken.gif', 'comfey.gif', 'conkeldurr.gif', 'copperajah-gmax.gif', 'copperajah.gif', 'corphish.gif', 'corsola-galar.gif', 'corsola.gif', 'corviknight-gmax.gif', 'corviknight.gif', 'corvisquire.gif', 'cosmoem.gif', 'cosmog.gif', 'cottonee.gif', 'crabominable.gif', 'crabrawler.gif', 'cradily.gif', 'cramorant-gorging.gif', 'cramorant-gulping.gif', 'cramorant.gif', 'cranidos.gif', 'crawdaunt.gif', 'cresselia.gif', 'croagunk-f.gif', 'croagunk.gif', 'crobat.gif', 'croconaw.gif', 'crucibelle-mega.gif', 'crucibelle.gif', 'crustle.gif', 'cryogonal.gif', 'cubchoo.gif', 'cubone.gif', 'cufant.gif', 'cursola.gif', 'cutiefly.gif', 'cyndaquil.gif', 'darkrai.gif', 'darmanitan-galar.gif', 'darmanitan-galarzen.gif', 'darmanitan-zen.gif', 'darmanitan.gif', 'dartrix.gif', 'darumaka-galar.gif', 'darumaka.gif', 'decidueye.gif', 'dedenne.gif', 'deerling-autumn.gif', 'deerling-summer.gif', 'deerling-winter.gif', 'deerling.gif', 'deino.gif', 'delcatty.gif', 'delibird.gif', 'delphox.gif', 'deoxys-attack.gif', 'deoxys-defense.gif', 'deoxys-speed.gif', 'deoxys.gif', 'dewgong.gif', 'dewott.gif', 'dewpider.gif', 'dhelmise.gif', 'dialga.gif', 'diancie-mega.gif', 'diancie.gif', 'diggersby.gif', 'diglett-alola.gif', 'diglett.gif', 'ditto.gif', 'dodrio.gif', 'doduo.gif', 'donphan-f.gif', 'donphan.gif', 'dottler.gif', 'doublade.gif', 'dracovish.gif', 'dracozolt.gif', 'dragalge.gif', 'dragapult.gif', 'dragonair.gif', 'dragonite.gif', 'drakloak.gif', 'drampa.gif', 'drapion.gif', 'dratini.gif', 'drednaw-gmax.gif', 'drednaw.gif', 'dreepy.gif', 'drifblim.gif', 'drifloon.gif', 'drilbur.gif', 'drizzile.gif', 'drowzee.gif', 'druddigon.gif', 'dubwool.gif', 'ducklett.gif', 'dugtrio-alola.gif', 'dugtrio.gif', 'dunsparce.gif', 'duosion.gif', 'duraludon-gmax.gif', 'duraludon.gif', 'durant.gif', 'dusclops.gif', 'dusknoir.gif', 'duskull.gif', 'dustox-f.gif', 'dustox.gif', 'dwebble.gif', 'eelektrik.gif', 'eelektross.gif', 'eevee-gmax.gif', 'eevee-starter.gif', 'eevee.gif', 'eiscue-noice.gif', 'eiscue.gif', 'ekans.gif', 'eldegoss.gif', 'electabuzz.gif', 'electivire.gif', 'electrike.gif', 'electrode.gif', 'elekid.gif', 'elgyem.gif', 'emboar.gif', 'emolga.gif', 'empoleon.gif', 'entei.gif', 'equilibra.gif', 'escavalier.gif', 'espeon.gif', 'espurr.gif', 'eternatus-eternamax.gif', 'eternatus.gif', 'excadrill.gif', 'exeggcute.gif', 'exeggutor-alola.gif', 'exeggutor.gif', 'exploud.gif', 'falinks.gif', 'farfetchd-galar.gif', 'farfetchd.gif', 'fearow.gif', 'feebas.gif', 'fennekin.gif', 'feraligatr.gif', 'ferroseed.gif', 'ferrothorn.gif', 'fidgit.gif', 'finneon-f.gif', 'finneon.gif', 'flaaffy.gif', 'flabebe-blue.gif', 'flabebe-orange.gif', 'flabebe-white.gif', 'flabebe-yellow.gif', 'flabebe.gif', 'flapple-gmax.gif', 'flapple.gif', 'flareon.gif', 'fletchinder.gif', 'fletchling.gif', 'floatzel-f.gif', 'floatzel.gif', 'floette-blue.gif', 'floette-eternal.gif', 'floette-orange.gif', 'floette-white.gif', 'floette-yellow.gif', 'floette.gif', 'florges-blue.gif', 'florges-orange.gif', 'florges-white.gif', 'florges-yellow.gif', 'florges.gif', 'flygon.gif', 'fomantis.gif', 'foongus.gif', 'forretress.gif', 'fraxure.gif', 'frillish-f.gif', 'frillish.gif', 'froakie.gif', 'frogadier.gif', 'froslass.gif', 'frosmoth.gif', 'furfrou-dandy.gif', 'furfrou-debutante.gif', 'furfrou-diamond.gif', 'furfrou-heart.gif', 'furfrou-kabuki.gif', 'furfrou-lareine.gif', 'furfrou-matron.gif', 'furfrou-pharaoh.gif', 'furfrou-star.gif', 'furfrou.gif', 'furret.gif', 'gabite-f.gif', 'gabite.gif', 'gallade-mega.gif', 'gallade.gif', 'galvantula.gif', 'garbodor-gmax.gif', 'garbodor.gif', 'garchomp-f.gif', 'garchomp-mega.gif', 'garchomp.gif', 'gardevoir-mega.gif', 'gardevoir.gif', 'gastly.gif', 'gastrodon-east.gif', 'gastrodon.gif', 'genesect-burn.gif', 'genesect-chill.gif', 'genesect-douse.gif', 'genesect-shock.gif', 'genesect.gif', 'gengar-gmax.gif', 'gengar-mega.gif', 'gengar.gif', 'geodude-alola.gif', 'geodude.gif', 'gible-f.gif', 'gible.gif', 'gigalith.gif', 'girafarig-f.gif', 'girafarig.gif', 'giratina-origin.gif', 'giratina.gif', 'glaceon.gif', 'glalie-mega.gif', 'glalie.gif', 'glameow.gif', 'gligar-f.gif', 'gligar.gif', 'gliscor.gif', 'gloom.gif', 'gogoat.gif', 'golbat-f.gif', 'golbat.gif', 'goldeen.gif', 'golduck.gif', 'golem-alola.gif', 'golem.gif', 'golett.gif', 'golisopod.gif', 'golurk.gif', 'goodra.gif', 'goomy.gif', 'gorebyss.gif', 'gossifleur.gif', 'gothita.gif', 'gothitelle.gif', 'gothorita.gif', 'gourgeist-large.gif', 'gourgeist-small.gif', 'gourgeist-super.gif', 'gourgeist.gif', 'granbull.gif', 'grapploct.gif', 'graveler-alola.gif', 'graveler.gif', 'greedent.gif', 'greninja-ash.gif', 'greninja.gif', 'grimer-alola.gif', 'grimer.gif', 'grimmsnarl-gmax.gif', 'grimmsnarl.gif', 'grookey.gif', 'grotle.gif', 'groudon-primal.gif', 'groudon.gif', 'grovyle.gif', 'growlithe.gif', 'grubbin.gif', 'grumpig.gif', 'gulpin-f.gif', 'gulpin.gif', 'gumshoos-totem.gif', 'gumshoos.gif', 'gurdurr.gif', 'guzzlord.gif', 'gyarados-mega.gif', 'gyarados.gif', 'hakamoo.gif', 'happiny.gif', 'hariyama.gif', 'hatenna.gif', 'hatterene-gmax.gif', 'hatterene.gif', 'hattrem.gif', 'haunter.gif', 'hawlucha.gif', 'haxorus.gif', 'heatmor.gif', 'heatran.gif', 'heliolisk.gif', 'helioptile.gif', 'heracross-f.gif', 'heracross-mega.gif', 'heracross.gif', 'herdier.gif', 'hippopotas-f.gif', 'hippopotas.gif', 'hippowdon-f.gif', 'hippowdon.gif', 'hitmonchan.gif', 'hitmonlee.gif', 'hitmontop.gif', 'honchkrow.gif', 'honedge.gif', 'hooh.gif', 'hoopa-mega.gif', 'hoopa-unbound.gif', 'hoopa.gif', 'hoothoot.gif', 'hoppip.gif', 'horsea.gif', 'houndoom-f.gif', 'houndoom-mega.gif', 'houndoom.gif', 'houndour.gif', 'huntail.gif', 'hydreigon.gif', 'hypno.gif', 'igglybuff.gif', 'illumise.gif', 'impidimp.gif', 'incineroar.gif', 'indeedee-f.gif', 'indeedee.gif', 'infernape.gif', 'inkay.gif', 'inteleon.gif', 'ivysaur.gif', 'jangmoo.gif', 'jellicent-f.gif', 'jellicent.gif', 'jigglypuff.gif', 'jirachi.gif', 'jolteon.gif', 'joltik.gif', 'jumbao.gif', 'jumpluff.gif', 'jynx.gif', 'kabuto.gif', 'kabutops.gif', 'kadabra.gif', 'kakuna.gif', 'kangaskhan-mega.gif', 'kangaskhan.gif', 'karrablast.gif', 'kartana.gif', 'kecleon.gif', 'keldeo-resolute.gif', 'keldeo.gif', 'kerfluffle-f.gif', 'kerfluffle.gif', 'kingdra.gif', 'kingler-gmax.gif', 'kingler.gif', 'kirlia.gif', 'kitsunoh-f.gif', 'kitsunoh.gif', 'klang.gif', 'klefki.gif', 'klink.gif', 'klinklang.gif', 'koffing.gif', 'komala.gif', 'kommo-o-totem.gif', 'kommoo.gif', 'krabby.gif', 'kricketot-f.gif', 'kricketot.gif', 'kricketune-f.gif', 'kricketune.gif', 'krilowatt.gif', 'krokorok.gif', 'krookodile.gif', 'kyogre-primal.gif', 'kyogre.gif', 'kyurem-black.gif', 'kyurem-white.gif', 'kyurem.gif', 'lairon.gif', 'lampent.gif', 'landorus-therian.gif', 'landorus.gif', 'lanturn.gif', 'lapras-gmax.gif', 'lapras.gif', 'larvesta.gif', 'larvitar.gif', 'latias-mega.gif', 'latias.gif', 'latios-mega.gif', 'latios.gif', 'leafeon.gif', 'leavanny.gif', 'ledian-f.gif', 'ledian.gif', 'ledyba-f.gif', 'ledyba.gif', 'lickilicky.gif', 'lickitung.gif', 'liepard.gif', 'lileep.gif', 'lilligant.gif', 'lillipup.gif', 'linoone-galar.gif', 'linoone.gif', 'litleo.gif', 'litten.gif', 'litwick.gif', 'lombre.gif', 'lopunny-mega.gif', 'lopunny.gif', 'lotad.gif', 'loudred.gif', 'lucario-mega.gif', 'lucario.gif', 'ludicolo-f.gif', 'ludicolo.gif', 'lugia.gif', 'lumineon-f.gif', 'lumineon.gif', 'lunala.gif', 'lunatone.gif', 'lurantis-totem.gif', 'lurantis.gif', 'luvdisc.gif', 'luxio-f.gif', 'luxio.gif', 'luxray-f.gif', 'luxray.gif', 'lycanroc-dusk.gif', 'lycanroc-midnight.gif', 'lycanroc.gif', 'machamp-gmax.gif', 'machamp.gif', 'machoke.gif', 'machop.gif', 'magby.gif', 'magcargo.gif', 'magearna-original.gif', 'magearna.gif', 'magikarp-f.gif', 'magikarp.gif', 'magmar.gif', 'magmortar.gif', 'magnemite.gif', 'magneton.gif', 'magnezone.gif', 'makuhita.gif', 'malaconda.gif', 'malamar.gif', 'mamoswine-f.gif', 'mamoswine.gif', 'manaphy.gif', 'mandibuzz.gif', 'manectric-mega.gif', 'manectric.gif', 'mankey.gif', 'mantine.gif', 'mantyke.gif', 'maractus.gif', 'mareanie.gif', 'mareep.gif', 'marill.gif', 'marowak-alola-totem.gif', 'marowak-alola.gif', 'marowak-totem.gif', 'marowak.gif', 'marshadow.gif', 'marshtomp.gif', 'masquerain.gif', 'mawile-mega.gif', 'mawile.gif', 'medicham-f.gif', 'medicham-mega.gif', 'medicham.gif', 'meditite-f.gif', 'meditite.gif', 'meganium-f.gif', 'meganium.gif', 'melmetal-gmax.gif', 'melmetal.gif', 'meloetta-pirouette.gif', 'meloetta.gif', 'meltan.gif', 'meowstic-f.gif', 'meowstic.gif', 'meowth-alola.gif', 'meowth-galar.gif', 'meowth-gmax.gif', 'meowth.gif', 'mesprit.gif', 'metagross-mega.gif', 'metagross.gif', 'metang.gif', 'metapod.gif', 'mew.gif', 'mewtwo-megax.gif', 'mewtwo-megay.gif', 'mewtwo.gif', 'mienfoo.gif', 'mienshao.gif', 'mightyena.gif', 'milcery.gif', 'milotic-f.gif', 'milotic.gif', 'miltank.gif', 'mimejr.gif', 'mimikyu-busted-totem.gif', 'mimikyu-busted.gif', 'mimikyu-totem.gif', 'mimikyu.gif', 'minccino.gif', 'minior-blue.gif', 'minior-green.gif', 'minior-indigo.gif', 'minior-meteor.gif', 'minior-orange.gif', 'minior-violet.gif', 'minior-yellow.gif', 'minior.gif', 'minun.gif', 'misdreavus.gif', 'mismagius.gif', 'mollux.gif', 'moltres.gif', 'monferno.gif', 'morelull.gif', 'morgrem.gif', 'morpeko-hangry.gif', 'morpeko.gif', 'mothim.gif', 'mrmime-galar.gif', 'mrmime.gif', 'mrrime.gif', 'mudbray.gif', 'mudkip.gif', 'mudsdale.gif', 'muk-alola.gif', 'muk.gif', 'munchlax.gif', 'munna.gif', 'murkrow-f.gif', 'murkrow.gif', 'musharna.gif', 'naganadel.gif', 'natu.gif', 'naviathan.gif', 'necrozma-dawnwings.gif', 'necrozma-duskmane.gif', 'necrozma-ultra.gif', 'necrozma.gif', 'necturna.gif', 'nickit.gif', 'nidoking.gif', 'nidoqueen.gif', 'nidoran-f.gif', 'nidoran.gif', 'nidoranf.gif', 'nidoranm.gif', 'nidorina.gif', 'nidorino.gif', 'nihilego.gif', 'nincada.gif', 'ninetales-alola.gif', 'ninetales.gif', 'ninjask.gif', 'noctowl.gif', 'noibat.gif', 'noivern.gif', 'nosepass.gif', 'numel-f.gif', 'numel.gif', 'nuzleaf-f.gif', 'nuzleaf.gif', 'obstagoon.gif', 'octillery-f.gif', 'octillery.gif', 'oddish.gif', 'omanyte.gif', 'omastar.gif', 'onix.gif', 'oranguru.gif', 'orbeetle-gmax.gif', 'orbeetle.gif', 'oricorio-pau.gif', 'oricorio-pompom.gif', 'oricorio-sensu.gif', 'oricorio.gif', 'oshawott.gif', 'pachirisu-f.gif', 'pachirisu.gif', 'pajantom.gif', 'palkia.gif', 'palossand.gif', 'palpitoad.gif', 'pancham.gif', 'pangoro.gif', 'panpour.gif', 'pansage.gif', 'pansear.gif', 'paras.gif', 'parasect.gif', 'passimian.gif', 'patrat.gif', 'pawniard.gif', 'pelipper.gif', 'perrserker.gif', 'persian-alola.gif', 'persian.gif', 'petilil.gif', 'phanpy.gif', 'phantump.gif', 'pheromosa.gif', 'phione.gif', 'pichu.gif', 'pidgeot-mega.gif', 'pidgeot.gif', 'pidgeotto.gif', 'pidgey.gif', 'pidove.gif', 'pignite.gif', 'pikachu-alola.gif', 'pikachu-belle.gif', 'pikachu-cosplay.gif', 'pikachu-f.gif', 'pikachu-gmax.gif', 'pikachu-hoenn.gif', 'pikachu-kalos.gif', 'pikachu-libre.gif', 'pikachu-original.gif', 'pikachu-partner.gif', 'pikachu-phd.gif', 'pikachu-pop-star.gif', 'pikachu-popstar.gif', 'pikachu-rockstar.gif', 'pikachu-sinnoh.gif', 'pikachu-starter-f.gif', 'pikachu-starter.gif', 'pikachu-unova.gif', 'pikachu.gif', 'pikipek.gif', 'piloswine-f.gif', 'piloswine.gif', 'pincurchin.gif', 'pineco.gif', 'pinsir-mega.gif', 'pinsir.gif', 'piplup.gif', 'plasmanta.gif', 'pluffle.gif', 'plusle.gif', 'poipole.gif', 'pokestarblackbelt.gif', 'pokestarblackdoor.gif', 'pokestarbrycenman.gif', 'pokestarf00.gif', 'pokestarf002.gif', 'pokestargiant.gif', 'pokestarhumanoid.gif', 'pokestarmonster.gif', 'pokestarmt.gif', 'pokestarmt2.gif', 'pokestarsmeargle.gif', 'pokestarspirit.gif', 'pokestartransport.gif', 'pokestarufo-2.gif', 'pokestarufo.gif', 'pokestarufo2.gif', 'pokestarwhitedoor.gif', 'politoed-f.gif', 'politoed.gif', 'poliwag.gif', 'poliwhirl.gif', 'poliwrath.gif', 'polteageist-antique.gif', 'polteageist.gif', 'ponyta-galar.gif', 'ponyta.gif', 'poochyena.gif', 'popplio.gif', 'porygon-z.gif', 'porygon.gif', 'porygon2.gif', 'porygonz.gif', 'primarina.gif', 'primeape.gif', 'prinplup.gif', 'probopass.gif', 'psyduck.gif', 'pumpkaboo-large.gif', 'pumpkaboo-small.gif', 'pumpkaboo-super.gif', 'pumpkaboo.gif', 'pupitar.gif', 'purrloin.gif', 'purugly.gif', 'pyroak.gif', 'pyroar-f.gif', 'pyroar.gif', 'pyukumuku.gif', 'quagsire-f.gif', 'quagsire.gif', 'quilava.gif', 'quilladin.gif', 'qwilfish.gif', 'raboot.gif', 'raichu-alola.gif', 'raichu.gif', 'raikou.gif', 'ralts.gif', 'rampardos.gif', 'rapidash-galar.gif', 'rapidash.gif', 'raticate-alola-totem.gif', 'raticate-alola.gif', 'raticate-f.gif', 'raticate-totem-a.gif', 'raticate.gif', 'rattata-alola.gif', 'rattata-f.gif', 'rattata.gif', 'rayquaza-mega.gif', 'rayquaza.gif', 'regice.gif', 'regigigas.gif', 'regirock.gif', 'registeel.gif', 'relicanth-f.gif', 'relicanth.gif', 'remoraid.gif', 'reshiram.gif', 'reuniclus.gif', 'rhydon-f.gif', 'rhydon.gif', 'rhyhorn.gif', 'rhyperior-f.gif', 'rhyperior.gif', 'ribombee.gif', 'rillaboom.gif', 'riolu.gif', 'rockruff.gif', 'roggenrola.gif', 'rolycoly.gif', 'rookidee.gif', 'roselia-f.gif', 'roselia.gif', 'roserade-f.gif', 'roserade.gif', 'rotom-f.gif', 'rotom-fan.gif', 'rotom-frost.gif', 'rotom-h.gif', 'rotom-heat.gif', 'rotom-m.gif', 'rotom-mow.gif', 'rotom-s.gif', 'rotom-w.gif', 'rotom-wash.gif', 'rotom.gif', 'rowlet.gif', 'rufflet.gif', 'runerigus.gif', 'sableye-mega.gif', 'sableye.gif', 'salamence-mega.gif', 'salamence.gif', 'salandit.gif', 'salazzle-totem.gif', 'salazzle.gif', 'samurott.gif', 'sandaconda-gmax.gif', 'sandaconda.gif', 'sandile.gif', 'sandshrew-alola.gif', 'sandshrew.gif', 'sandslash-alola.gif', 'sandslash.gif', 'sandygast.gif', 'sawk.gif', 'sawsbuck-autumn.gif', 'sawsbuck-summer.gif', 'sawsbuck-winter.gif', 'sawsbuck.gif', 'scatterbug.gif', 'sceptile-mega.gif', 'sceptile.gif', 'scizor-f.gif', 'scizor-mega.gif', 'scizor.gif', 'scolipede.gif', 'scorbunny.gif', 'scrafty.gif', 'scraggy.gif', 'scratchet.gif', 'scyther-f.gif', 'scyther.gif', 'seadra.gif', 'seaking.gif', 'sealeo.gif', 'seedot.gif', 'seel.gif', 'seismitoad.gif', 'sentret.gif', 'serperior.gif', 'servine.gif', 'seviper.gif', 'sewaddle.gif', 'shapedo-mega.gif', 'sharpedo-mega.gif', 'sharpedo.gif', 'shaymin-sky.gif', 'shaymin.gif', 'shedinja.gif', 'shelgon.gif', 'shellder.gif', 'shellos-east.gif', 'shellos.gif', 'shelmet.gif', 'shieldon.gif', 'shiftry-f.gif', 'shiftry.gif', 'shiinotic.gif', 'shinx-f.gif', 'shinx.gif', 'shroomish.gif', 'shuckle.gif', 'shuppet.gif', 'sigilyph.gif', 'silcoon.gif', 'silicobra.gif', 'silvally-bug.gif', 'silvally-dark.gif', 'silvally-dragon.gif', 'silvally-electric.gif', 'silvally-fairy.gif', 'silvally-fighting.gif', 'silvally-fire.gif', 'silvally-flying.gif', 'silvally-ghost.gif', 'silvally-grass.gif', 'silvally-ground.gif', 'silvally-ice.gif', 'silvally-poison.gif', 'silvally-psychic.gif', 'silvally-rock.gif', 'silvally-steel.gif', 'silvally-water.gif', 'silvally.gif', 'simipour.gif', 'simisage.gif', 'simisear.gif', 'sinistea-antique.gif', 'sinistea.gif', 'sirfetchd.gif', 'sizzlipede.gif', 'skarmory.gif', 'skiddo.gif', 'skiploom.gif', 'skitty.gif', 'skorupi.gif', 'skrelp.gif', 'skuntank.gif', 'skwovet.gif', 'slaking.gif', 'slakoth.gif', 'sliggoo.gif', 'slowbro-mega.gif', 'slowbro.gif', 'slowking.gif', 'slowpoke-galar.gif', 'slowpoke.gif', 'slugma.gif', 'slurpuff.gif', 'smeargle.gif', 'smogecko.gif', 'smoguana.gif', 'smokomodo.gif', 'smoochum.gif', 'snaelstrom.gif', 'sneasel-f.gif', 'sneasel.gif', 'snivy.gif', 'snom.gif', 'snorlax-gmax.gif', 'snorlax.gif', 'snornut.gif', 'snorunt.gif', 'snover-f.gif', 'snover.gif', 'snubbull.gif', 'sobble.gif', 'solgaleo.gif', 'solosis.gif', 'solrock.gif', 'spearow.gif', 'spewpa.gif', 'spheal.gif', 'spinarak.gif', 'spinda.gif', 'spiritomb.gif', 'spoink.gif', 'spritzee.gif', 'squirtle.gif', 'stakataka.gif', 'stantler.gif', 'staraptor-f.gif', 'staraptor.gif', 'staravia-f.gif', 'staravia.gif', 'starly-f.gif', 'starly.gif', 'starmie.gif', 'staryu.gif', 'steelix-f.gif', 'steelix-mega.gif', 'steelix.gif', 'steenee.gif', 'stonjourner.gif', 'stoutland.gif', 'stratagem.gif', 'stufful.gif', 'stunfisk-galar.gif', 'stunfisk.gif', 'stunky.gif', 'substitute.gif', 'sudowoodo-f.gif', 'sudowoodo.gif', 'suicune.gif', 'sunflora.gif', 'sunkern.gif', 'surskit.gif', 'swablu.gif', 'swadloon.gif', 'swalot-f.gif', 'swalot.gif', 'swampert-mega.gif', 'swampert.gif', 'swanna.gif', 'swellow.gif', 'swinub.gif', 'swirlix.gif', 'swoobat.gif', 'sylveon.gif', 'taillow.gif', 'talonflame.gif', 'tangela.gif', 'tangrowth-f.gif', 'tangrowth.gif', 'tapubulu.gif', 'tapufini.gif', 'tapukoko.gif', 'tapulele.gif', 'tauros.gif', 'teddiursa.gif', 'tentacool.gif', 'tentacruel.gif', 'tepig.gif', 'terrakion.gif', 'thievul.gif', 'throh.gif', 'thundurus-therian.gif', 'thundurus.gif', 'thwackey.gif', 'timburr.gif', 'tirtouga.gif', 'togedemaru-totem.gif', 'togedemaru.gif', 'togekiss.gif', 'togepi.gif', 'togetic.gif', 'tomohawk.gif', 'torchic-f.gif', 'torchic.gif', 'torkoal.gif', 'tornadus-therian.gif', 'tornadus.gif', 'torracat.gif', 'torterra.gif', 'totodile.gif', 'toucannon.gif', 'toxapex.gif', 'toxel.gif', 'toxicroak-f.gif', 'toxicroak.gif', 'toxtricity-gmax.gif', 'toxtricity-lowkey.gif', 'toxtricity.gif', 'tranquill.gif', 'trapinch.gif', 'treecko.gif', 'trevenant.gif', 'tropius.gif', 'trubbish.gif', 'trumbeak.gif', 'tsareena.gif', 'turtonator.gif', 'turtwig.gif', 'tympole.gif', 'tynamo.gif', 'typenull.gif', 'typhlosion.gif', 'tyranitar-mega.gif', 'tyranitar.gif', 'tyrantrum.gif', 'tyrantum.gif', 'tyrogue.gif', 'tyrunt.gif', 'umbreon.gif', 'unfezant-f.gif', 'unfezant.gif', 'unown-b.gif', 'unown-c.gif', 'unown-d.gif', 'unown-e.gif', 'unown-exclamation.gif', 'unown-f.gif', 'unown-g.gif', 'unown-h.gif', 'unown-i.gif', 'unown-j.gif', 'unown-k.gif', 'unown-l.gif', 'unown-m.gif', 'unown-n.gif', 'unown-o.gif', 'unown-p.gif', 'unown-q.gif', 'unown-question.gif', 'unown-r.gif', 'unown-s.gif', 'unown-t.gif', 'unown-u.gif', 'unown-v.gif', 'unown-w.gif', 'unown-x.gif', 'unown-y.gif', 'unown-z.gif', 'unown.gif', 'ursaring-f.gif', 'ursaring.gif', 'uxie.gif', 'vanillish.gif', 'vanillite.gif', 'vanilluxe.gif', 'vaporeon.gif', 'venipede.gif', 'venomoth.gif', 'venonat.gif', 'venusaur-mega.gif', 'venusaur.gif', 'vespiquen.gif', 'vibrava.gif', 'victini.gif', 'victreebel.gif', 'vigoroth.gif', 'vikavolt-totem.gif', 'vikavolt.gif', 'vileplume.gif', 'virizion.gif', 'vivillon-archipelago.gif', 'vivillon-continental.gif', 'vivillon-elegant.gif', 'vivillon-fancy.gif', 'vivillon-garden.gif', 'vivillon-highplains.gif', 'vivillon-icysnow.gif', 'vivillon-jungle.gif', 'vivillon-marine.gif', 'vivillon-modern.gif', 'vivillon-monsoon.gif', 'vivillon-ocean.gif', 'vivillon-pokeball.gif', 'vivillon-polar.gif', 'vivillon-river.gif', 'vivillon-sandstorm.gif', 'vivillon-savanna.gif', 'vivillon-sun.gif', 'vivillon-tundra.gif', 'vivillon.gif', 'volbeat.gif', 'volcanion.gif', 'volcarona.gif', 'volkraken.gif', 'voltorb.gif', 'vullaby.gif', 'vulpix-alola.gif', 'vulpix.gif', 'wailmer.gif', 'wailord.gif', 'walrein.gif', 'wartortle.gif', 'watchog.gif', 'weavile-f.gif', 'weavile.gif', 'weedle.gif', 'weepinbell.gif', 'weezing-galar.gif', 'weezing.gif', 'whimsicott.gif', 'whirlipede.gif', 'whiscash.gif', 'whismur.gif', 'wigglytuff.gif', 'wimpod.gif', 'wingull.gif', 'wishiwashi-school.gif', 'wishiwashi.gif', 'wobbuffet-f.gif', 'wobbuffet.gif', 'woobat.gif', 'wooloo.gif', 'wooper-f.gif', 'wooper.gif', 'wormadam-sandy.gif', 'wormadam-trash.gif', 'wormadam.gif', 'wurmple.gif', 'wynaut.gif', 'xatu-f.gif', 'xatu.gif', 'xerneas-neutral.gif', 'xerneas.gif', 'xurkitree.gif', 'yamask-galar.gif', 'yamask.gif', 'yamper.gif', 'yanma.gif', 'yanmega.gif', 'yungoos.gif', 'yveltal.gif', 'zacian-crowned.gif', 'zacian.gif', 'zamazenta-crowned.gif', 'zamazenta.gif', 'zangoose.gif', 'zapdos.gif', 'zebstrika.gif', 'zekrom.gif', 'zeraora.gif', 'zigzagoon-galar.gif', 'zigzagoon.gif', 'zoroark.gif', 'zorua.gif', 'zubat-f.gif', 'zubat.gif', 'zweilous.gif', 'zygarde-10.gif', 'zygarde-complete.gif', 'zygarde.gif']\r\n\r\n\r\nfor i in filename:\r\n url = 'https://play.pokemonshowdown.com/sprites/ani/{}'.format(i)\r\n file_name = str(i[:-4])\r\n dl_img(url, 'files/pokemon/front', file_name)\r\n\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# 1.闭包
# 2.装饰圈初识
# 3.标准版装饰器
|
normal
|
{
"blob_id": "a1ebb00d7cda65cb528b2253e817d925214cdce3",
"index": 5847,
"step-1": "# 1.闭包\n# 2.装饰圈初识\n# 3.标准版装饰器",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
x, y, z = mc.player.getTilePos()
color = random.randrange(0, 9)
mc.setBlock(x, y, z - 1, 38, color)
time.sleep(0.01)
<|reserved_special_token_1|>
from mcpi.minecraft import Minecraft
import random, time
while True:
x, y, z = mc.player.getTilePos()
color = random.randrange(0, 9)
mc.setBlock(x, y, z - 1, 38, color)
time.sleep(0.01)
|
flexible
|
{
"blob_id": "a2e00af84f743e949b53840ae6d5509e08935486",
"index": 7978,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n x, y, z = mc.player.getTilePos()\n color = random.randrange(0, 9)\n mc.setBlock(x, y, z - 1, 38, color)\n time.sleep(0.01)\n",
"step-3": "from mcpi.minecraft import Minecraft\nimport random, time\nwhile True:\n x, y, z = mc.player.getTilePos()\n color = random.randrange(0, 9)\n mc.setBlock(x, y, z - 1, 38, color)\n time.sleep(0.01)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(numbers + new_numbers)
print(numbers * 5)
<|reserved_special_token_1|>
numbers = [1, 1, 1, 1, 1]
new_numbers = [2, 2, 2, 3, 3]
print(numbers + new_numbers)
print(numbers * 5)
|
flexible
|
{
"blob_id": "843df062702c9abf34cf14d911d927d786f1d912",
"index": 1573,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(numbers + new_numbers)\nprint(numbers * 5)\n",
"step-3": "numbers = [1, 1, 1, 1, 1]\nnew_numbers = [2, 2, 2, 3, 3]\nprint(numbers + new_numbers)\nprint(numbers * 5)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Binding:
def __init__(self, parent, binding):
self.parent = parent
self.binding = binding
<|reserved_special_token_0|>
def add(self, var_name, value):
self.binding[var_name] = value
<|reserved_special_token_0|>
class FunctionCall:
def __init__(self, func_name, call_args):
self.func_name = func_name
self.call_args = call_args
def eval(self, binding):
if type(self.call_args) == VarReference:
args = self.call_args.eval(binding)
return args[1].eval(binding)
func_binding = Binding(binding.get(self.func_name.value)[2], {})
parameters = self.call_args[0].eval(func_binding)[0]
if func_binding.contains(parameters[0]):
args = self.call_args[1].eval(func_binding)
else:
args = self.call_args[1].eval(binding)
for i in range(len(parameters)):
func_binding.add(parameters[i], args[i])
code = func_binding.get(self.func_name.value)[1]
return code.eval(func_binding)
class FunctionDefinition:
def __init__(self, param_list, code_block):
self.param_list = param_list
self.code_block = code_block
def eval(self, binding):
func_binding = Binding(binding, {})
return self.param_list, self.code_block, func_binding
class CallArguments:
def __init__(self, arguments):
self.arguments = arguments
def eval(self, binding):
arg_list = []
for arg in self.arguments:
arg_list.append(arg.eval(binding))
return arg_list
class VariableDecl:
def __init__(self, declarations):
self.declarations = declarations
def eval(self, binding):
for decl in self.declarations:
temp = decl.eval(binding)
return temp
class Decl:
def __init__(self, var_name, val):
self.var_name = var_name
self.val = val
def eval(self, binding):
var_val = self.val.eval(binding)
binding.add(self.var_name, var_val)
return var_val
class Assignment:
def __init__(self, var_name, val):
self.var_name = var_name
self.val = val
def eval(self, binding):
var_val = self.val.eval(binding)
binding.add(self.var_name, var_val)
return var_val
class VarReference:
def __init__(self, var_name):
self.var_name = var_name
def eval(self, binding):
return binding.get(self.var_name)
class EqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) == self.right.eval(binding):
return 1
else:
return 0
class NotEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) != self.right.eval(binding):
return 1
else:
return 0
class LessThan(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) < self.right.eval(binding):
return 1
else:
return 0
class LessThanOrEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) <= self.right.eval(binding):
return 1
else:
return 0
class GreaterThan(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) > self.right.eval(binding):
return 1
else:
return 0
class GreaterThanOrEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) >= self.right.eval(binding):
return 1
else:
return 0
class IfExpression:
def __init__(self, bool_expr, if_block):
self.bool_expr = bool_expr
self.if_block = if_block
def eval(self, binding):
bool_result = self.bool_expr.eval(binding)
if type(bool_result) is not int:
bool_result = bool_result[0]
if bool_result == 1:
return self.if_block.eval(binding)
else:
return 0
class IfElseExpression:
def __init__(self, bool_expr, if_block, else_block):
self.bool_expr = bool_expr
self.if_block = if_block
self.else_block = else_block
def eval(self, binding):
bool_result = self.bool_expr.eval(binding)
if type(bool_result) is not int:
bool_result = bool_result[0]
if bool_result == 1:
return self.if_block.eval(binding)
else:
return self.else_block.eval(binding)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Mult(BinaryOp):
<|reserved_special_token_0|>
class Div(BinaryOp):
def eval(self, binding):
return int(self.left.eval(binding) / self.right.eval(binding))
class BuiltInFunction:
def __init__(self, func_name, call_args):
self.func_name = func_name
self.call_args = call_args
def eval(self, binding):
args = self.call_args.eval(binding)
if self.func_name == 'print':
if type(args) == int:
print(args)
else:
print('Print: ')
count = 0
for arg in args:
out = str(arg)
if count != len(args) - 1:
print(out, end='|')
else:
print(out)
count += 1
return
else:
return args[0]
class Binding:
def __init__(self, parent, binding):
self.parent = parent
self.binding = binding
def get(self, name):
if name in self.binding:
return self.binding[name]
return self.parent.get(name)
def add(self, var_name, value):
self.binding[var_name] = value
def contains(self, name):
for i in self.binding:
if i == name:
return True
if self.parent:
return self.parent.contains(name)
return False
class FunctionCall:
def __init__(self, func_name, call_args):
self.func_name = func_name
self.call_args = call_args
def eval(self, binding):
if type(self.call_args) == VarReference:
args = self.call_args.eval(binding)
return args[1].eval(binding)
func_binding = Binding(binding.get(self.func_name.value)[2], {})
parameters = self.call_args[0].eval(func_binding)[0]
if func_binding.contains(parameters[0]):
args = self.call_args[1].eval(func_binding)
else:
args = self.call_args[1].eval(binding)
for i in range(len(parameters)):
func_binding.add(parameters[i], args[i])
code = func_binding.get(self.func_name.value)[1]
return code.eval(func_binding)
class FunctionDefinition:
def __init__(self, param_list, code_block):
self.param_list = param_list
self.code_block = code_block
def eval(self, binding):
func_binding = Binding(binding, {})
return self.param_list, self.code_block, func_binding
class CallArguments:
def __init__(self, arguments):
self.arguments = arguments
def eval(self, binding):
arg_list = []
for arg in self.arguments:
arg_list.append(arg.eval(binding))
return arg_list
class VariableDecl:
def __init__(self, declarations):
self.declarations = declarations
def eval(self, binding):
for decl in self.declarations:
temp = decl.eval(binding)
return temp
class Decl:
def __init__(self, var_name, val):
self.var_name = var_name
self.val = val
def eval(self, binding):
var_val = self.val.eval(binding)
binding.add(self.var_name, var_val)
return var_val
class Assignment:
def __init__(self, var_name, val):
self.var_name = var_name
self.val = val
def eval(self, binding):
var_val = self.val.eval(binding)
binding.add(self.var_name, var_val)
return var_val
class VarReference:
def __init__(self, var_name):
self.var_name = var_name
def eval(self, binding):
return binding.get(self.var_name)
class EqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) == self.right.eval(binding):
return 1
else:
return 0
class NotEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) != self.right.eval(binding):
return 1
else:
return 0
class LessThan(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) < self.right.eval(binding):
return 1
else:
return 0
class LessThanOrEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) <= self.right.eval(binding):
return 1
else:
return 0
class GreaterThan(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) > self.right.eval(binding):
return 1
else:
return 0
class GreaterThanOrEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) >= self.right.eval(binding):
return 1
else:
return 0
class IfExpression:
def __init__(self, bool_expr, if_block):
self.bool_expr = bool_expr
self.if_block = if_block
def eval(self, binding):
bool_result = self.bool_expr.eval(binding)
if type(bool_result) is not int:
bool_result = bool_result[0]
if bool_result == 1:
return self.if_block.eval(binding)
else:
return 0
class IfElseExpression:
def __init__(self, bool_expr, if_block, else_block):
self.bool_expr = bool_expr
self.if_block = if_block
self.else_block = else_block
def eval(self, binding):
bool_result = self.bool_expr.eval(binding)
if type(bool_result) is not int:
bool_result = bool_result[0]
if bool_result == 1:
return self.if_block.eval(binding)
else:
return self.else_block.eval(binding)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BinaryOp:
<|reserved_special_token_0|>
class Sum(BinaryOp):
def eval(self, binding):
return self.left.eval(binding) + self.right.eval(binding)
class Sub(BinaryOp):
def eval(self, binding):
return self.left.eval(binding) - self.right.eval(binding)
class Mult(BinaryOp):
def eval(self, binding):
return self.left.eval(binding) * self.right.eval(binding)
class Div(BinaryOp):
def eval(self, binding):
return int(self.left.eval(binding) / self.right.eval(binding))
class BuiltInFunction:
def __init__(self, func_name, call_args):
self.func_name = func_name
self.call_args = call_args
def eval(self, binding):
args = self.call_args.eval(binding)
if self.func_name == 'print':
if type(args) == int:
print(args)
else:
print('Print: ')
count = 0
for arg in args:
out = str(arg)
if count != len(args) - 1:
print(out, end='|')
else:
print(out)
count += 1
return
else:
return args[0]
class Binding:
def __init__(self, parent, binding):
self.parent = parent
self.binding = binding
def get(self, name):
if name in self.binding:
return self.binding[name]
return self.parent.get(name)
def add(self, var_name, value):
self.binding[var_name] = value
def contains(self, name):
for i in self.binding:
if i == name:
return True
if self.parent:
return self.parent.contains(name)
return False
class FunctionCall:
def __init__(self, func_name, call_args):
self.func_name = func_name
self.call_args = call_args
def eval(self, binding):
if type(self.call_args) == VarReference:
args = self.call_args.eval(binding)
return args[1].eval(binding)
func_binding = Binding(binding.get(self.func_name.value)[2], {})
parameters = self.call_args[0].eval(func_binding)[0]
if func_binding.contains(parameters[0]):
args = self.call_args[1].eval(func_binding)
else:
args = self.call_args[1].eval(binding)
for i in range(len(parameters)):
func_binding.add(parameters[i], args[i])
code = func_binding.get(self.func_name.value)[1]
return code.eval(func_binding)
class FunctionDefinition:
def __init__(self, param_list, code_block):
self.param_list = param_list
self.code_block = code_block
def eval(self, binding):
func_binding = Binding(binding, {})
return self.param_list, self.code_block, func_binding
class CallArguments:
def __init__(self, arguments):
self.arguments = arguments
def eval(self, binding):
arg_list = []
for arg in self.arguments:
arg_list.append(arg.eval(binding))
return arg_list
class VariableDecl:
def __init__(self, declarations):
self.declarations = declarations
def eval(self, binding):
for decl in self.declarations:
temp = decl.eval(binding)
return temp
class Decl:
def __init__(self, var_name, val):
self.var_name = var_name
self.val = val
def eval(self, binding):
var_val = self.val.eval(binding)
binding.add(self.var_name, var_val)
return var_val
class Assignment:
def __init__(self, var_name, val):
self.var_name = var_name
self.val = val
def eval(self, binding):
var_val = self.val.eval(binding)
binding.add(self.var_name, var_val)
return var_val
class VarReference:
def __init__(self, var_name):
self.var_name = var_name
def eval(self, binding):
return binding.get(self.var_name)
class EqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) == self.right.eval(binding):
return 1
else:
return 0
class NotEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) != self.right.eval(binding):
return 1
else:
return 0
class LessThan(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) < self.right.eval(binding):
return 1
else:
return 0
class LessThanOrEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) <= self.right.eval(binding):
return 1
else:
return 0
class GreaterThan(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) > self.right.eval(binding):
return 1
else:
return 0
class GreaterThanOrEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) >= self.right.eval(binding):
return 1
else:
return 0
class IfExpression:
def __init__(self, bool_expr, if_block):
self.bool_expr = bool_expr
self.if_block = if_block
def eval(self, binding):
bool_result = self.bool_expr.eval(binding)
if type(bool_result) is not int:
bool_result = bool_result[0]
if bool_result == 1:
return self.if_block.eval(binding)
else:
return 0
class IfElseExpression:
def __init__(self, bool_expr, if_block, else_block):
self.bool_expr = bool_expr
self.if_block = if_block
self.else_block = else_block
def eval(self, binding):
bool_result = self.bool_expr.eval(binding)
if type(bool_result) is not int:
bool_result = bool_result[0]
if bool_result == 1:
return self.if_block.eval(binding)
else:
return self.else_block.eval(binding)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Code:
def __init__(self, statements):
self.statements = statements
<|reserved_special_token_0|>
class Statement:
def __init__(self, statement):
self.statement = statement
def eval(self, binding):
return self.statement.eval(binding)
class Expr:
def __init__(self, expression):
self.expression = expression
def eval(self, binding):
return self.expression.eval(binding)
class Integer:
def __init__(self, value):
self.value = value
def eval(self, binding):
return int(self.value)
class BinaryOp:
def __init__(self, left, right):
self.left = left
self.right = right
class Sum(BinaryOp):
def eval(self, binding):
return self.left.eval(binding) + self.right.eval(binding)
class Sub(BinaryOp):
def eval(self, binding):
return self.left.eval(binding) - self.right.eval(binding)
class Mult(BinaryOp):
def eval(self, binding):
return self.left.eval(binding) * self.right.eval(binding)
class Div(BinaryOp):
def eval(self, binding):
return int(self.left.eval(binding) / self.right.eval(binding))
class BuiltInFunction:
def __init__(self, func_name, call_args):
self.func_name = func_name
self.call_args = call_args
def eval(self, binding):
args = self.call_args.eval(binding)
if self.func_name == 'print':
if type(args) == int:
print(args)
else:
print('Print: ')
count = 0
for arg in args:
out = str(arg)
if count != len(args) - 1:
print(out, end='|')
else:
print(out)
count += 1
return
else:
return args[0]
class Binding:
def __init__(self, parent, binding):
self.parent = parent
self.binding = binding
def get(self, name):
if name in self.binding:
return self.binding[name]
return self.parent.get(name)
def add(self, var_name, value):
self.binding[var_name] = value
def contains(self, name):
for i in self.binding:
if i == name:
return True
if self.parent:
return self.parent.contains(name)
return False
class FunctionCall:
def __init__(self, func_name, call_args):
self.func_name = func_name
self.call_args = call_args
def eval(self, binding):
if type(self.call_args) == VarReference:
args = self.call_args.eval(binding)
return args[1].eval(binding)
func_binding = Binding(binding.get(self.func_name.value)[2], {})
parameters = self.call_args[0].eval(func_binding)[0]
if func_binding.contains(parameters[0]):
args = self.call_args[1].eval(func_binding)
else:
args = self.call_args[1].eval(binding)
for i in range(len(parameters)):
func_binding.add(parameters[i], args[i])
code = func_binding.get(self.func_name.value)[1]
return code.eval(func_binding)
class FunctionDefinition:
def __init__(self, param_list, code_block):
self.param_list = param_list
self.code_block = code_block
def eval(self, binding):
func_binding = Binding(binding, {})
return self.param_list, self.code_block, func_binding
class CallArguments:
def __init__(self, arguments):
self.arguments = arguments
def eval(self, binding):
arg_list = []
for arg in self.arguments:
arg_list.append(arg.eval(binding))
return arg_list
class VariableDecl:
def __init__(self, declarations):
self.declarations = declarations
def eval(self, binding):
for decl in self.declarations:
temp = decl.eval(binding)
return temp
class Decl:
def __init__(self, var_name, val):
self.var_name = var_name
self.val = val
def eval(self, binding):
var_val = self.val.eval(binding)
binding.add(self.var_name, var_val)
return var_val
class Assignment:
def __init__(self, var_name, val):
self.var_name = var_name
self.val = val
def eval(self, binding):
var_val = self.val.eval(binding)
binding.add(self.var_name, var_val)
return var_val
class VarReference:
def __init__(self, var_name):
self.var_name = var_name
def eval(self, binding):
return binding.get(self.var_name)
class EqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) == self.right.eval(binding):
return 1
else:
return 0
class NotEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) != self.right.eval(binding):
return 1
else:
return 0
class LessThan(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) < self.right.eval(binding):
return 1
else:
return 0
class LessThanOrEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) <= self.right.eval(binding):
return 1
else:
return 0
class GreaterThan(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) > self.right.eval(binding):
return 1
else:
return 0
class GreaterThanOrEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) >= self.right.eval(binding):
return 1
else:
return 0
class IfExpression:
def __init__(self, bool_expr, if_block):
self.bool_expr = bool_expr
self.if_block = if_block
def eval(self, binding):
bool_result = self.bool_expr.eval(binding)
if type(bool_result) is not int:
bool_result = bool_result[0]
if bool_result == 1:
return self.if_block.eval(binding)
else:
return 0
class IfElseExpression:
def __init__(self, bool_expr, if_block, else_block):
self.bool_expr = bool_expr
self.if_block = if_block
self.else_block = else_block
def eval(self, binding):
bool_result = self.bool_expr.eval(binding)
if type(bool_result) is not int:
bool_result = bool_result[0]
if bool_result == 1:
return self.if_block.eval(binding)
else:
return self.else_block.eval(binding)
<|reserved_special_token_1|>
# Interprets the AST
class Program:
def __init__(self, code):
self.code = code
def eval(self, binding):
return self.code.eval(binding)
class Code:
def __init__(self, statements):
self.statements = statements
def eval(self, binding):
val = 0
for statement in self.statements:
val = statement.eval(binding)
return val
class Statement:
def __init__(self, statement):
self.statement = statement
def eval(self, binding):
return self.statement.eval(binding)
class Expr:
def __init__(self, expression):
self.expression = expression
def eval(self, binding):
return self.expression.eval(binding)
class Integer:
def __init__(self, value):
self.value = value
def eval(self, binding):
return int(self.value)
class BinaryOp:
def __init__(self, left, right):
self.left = left
self.right = right
class Sum(BinaryOp):
def eval(self, binding):
return self.left.eval(binding) + self.right.eval(binding)
class Sub(BinaryOp):
def eval(self, binding):
return self.left.eval(binding) - self.right.eval(binding)
class Mult(BinaryOp):
def eval(self, binding):
return self.left.eval(binding) * self.right.eval(binding)
class Div(BinaryOp):
def eval(self, binding):
return int(self.left.eval(binding) / self.right.eval(binding))
class BuiltInFunction:
# built-in functions are print, and ()
def __init__(self, func_name, call_args):
self.func_name = func_name
self.call_args = call_args
def eval(self, binding):
args = self.call_args.eval(binding)
if self.func_name == "print":
if type(args) == int:
print(args)
else:
print("Print: ")
count = 0
for arg in args:
out = str(arg)
if count != len(args) - 1:
print(out, end="|")
else:
print(out)
count += 1
return
else:
return args[0]
# binding class used to store variables and functions
class Binding:
def __init__(self, parent, binding):
self.parent = parent
self.binding = binding
def get(self, name):
if name in self.binding:
return self.binding[name]
return self.parent.get(name)
def add(self, var_name, value):
self.binding[var_name] = value
def contains(self, name):
for i in self.binding:
if i == name:
return True
if self.parent:
return self.parent.contains(name)
return False
class FunctionCall:
def __init__(self, func_name, call_args):
self.func_name = func_name
self.call_args = call_args
def eval(self, binding):
# if function has no parameters
if type(self.call_args) == VarReference:
args = self.call_args.eval(binding)
return args[1].eval(binding)
# else
# creates a new function binding that is a child of the binding when the function was created
func_binding = Binding(binding.get(self.func_name.value)[2], {})
# sets parameters and arguments and adds them to the function binding
parameters = self.call_args[0].eval(func_binding)[0]
# checks to see if the arg values for param_list are in the function binding. This is for recursion.
if func_binding.contains(parameters[0]):
args = self.call_args[1].eval(func_binding)
# if not, checks if the arg values are in the global binding
else:
args = self.call_args[1].eval(binding)
# assigns the arg values to the parameters and adds it to the function binding
for i in range(len(parameters)):
func_binding.add(parameters[i], args[i])
# returns the evaluated code using the function binding
code = func_binding.get(self.func_name.value)[1]
return code.eval(func_binding)
class FunctionDefinition:
def __init__(self, param_list, code_block):
self.param_list = param_list
self.code_block = code_block
def eval(self, binding):
# creates a new binding
func_binding = Binding(binding, {})
# used to store a function's parameters, code, and function binding in global binding
return self.param_list, self.code_block, func_binding
class CallArguments:
def __init__(self, arguments):
self.arguments = arguments
def eval(self, binding):
arg_list = []
for arg in self.arguments:
arg_list.append(arg.eval(binding))
return arg_list
class VariableDecl:
def __init__(self, declarations):
self.declarations = declarations
def eval(self, binding):
for decl in self.declarations:
temp = decl.eval(binding)
return temp
class Decl:
def __init__(self, var_name, val):
self.var_name = var_name
self.val = val
def eval(self, binding):
var_val = self.val.eval(binding)
binding.add(self.var_name, var_val)
return var_val
class Assignment:
def __init__(self, var_name, val):
self.var_name = var_name
self.val = val
def eval(self, binding):
var_val = self.val.eval(binding)
binding.add(self.var_name, var_val)
return var_val
class VarReference:
def __init__(self, var_name):
self.var_name = var_name
def eval(self, binding):
return binding.get(self.var_name)
class EqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) == self.right.eval(binding):
return 1
else:
return 0
class NotEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) != self.right.eval(binding):
return 1
else:
return 0
class LessThan(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) < self.right.eval(binding):
return 1
else:
return 0
class LessThanOrEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) <= self.right.eval(binding):
return 1
else:
return 0
class GreaterThan(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) > self.right.eval(binding):
return 1
else:
return 0
class GreaterThanOrEqualTo(BinaryOp):
def eval(self, binding):
if self.left.eval(binding) >= self.right.eval(binding):
return 1
else:
return 0
class IfExpression:
def __init__(self, bool_expr, if_block):
self.bool_expr = bool_expr
self.if_block = if_block
def eval(self, binding):
bool_result = self.bool_expr.eval(binding)
if type(bool_result) is not int:
bool_result = bool_result[0]
if bool_result == 1:
return self.if_block.eval(binding)
else:
return 0
class IfElseExpression:
def __init__(self, bool_expr, if_block, else_block):
self.bool_expr = bool_expr
self.if_block = if_block
self.else_block = else_block
def eval(self, binding):
bool_result = self.bool_expr.eval(binding)
if type(bool_result) is not int:
bool_result = bool_result[0]
if bool_result == 1:
return self.if_block.eval(binding)
else:
return self.else_block.eval(binding)
|
flexible
|
{
"blob_id": "5fa91a5061a5e87a4a2b8fece0378299e87e5a48",
"index": 6694,
"step-1": "<mask token>\n\n\nclass Binding:\n\n def __init__(self, parent, binding):\n self.parent = parent\n self.binding = binding\n <mask token>\n\n def add(self, var_name, value):\n self.binding[var_name] = value\n <mask token>\n\n\nclass FunctionCall:\n\n def __init__(self, func_name, call_args):\n self.func_name = func_name\n self.call_args = call_args\n\n def eval(self, binding):\n if type(self.call_args) == VarReference:\n args = self.call_args.eval(binding)\n return args[1].eval(binding)\n func_binding = Binding(binding.get(self.func_name.value)[2], {})\n parameters = self.call_args[0].eval(func_binding)[0]\n if func_binding.contains(parameters[0]):\n args = self.call_args[1].eval(func_binding)\n else:\n args = self.call_args[1].eval(binding)\n for i in range(len(parameters)):\n func_binding.add(parameters[i], args[i])\n code = func_binding.get(self.func_name.value)[1]\n return code.eval(func_binding)\n\n\nclass FunctionDefinition:\n\n def __init__(self, param_list, code_block):\n self.param_list = param_list\n self.code_block = code_block\n\n def eval(self, binding):\n func_binding = Binding(binding, {})\n return self.param_list, self.code_block, func_binding\n\n\nclass CallArguments:\n\n def __init__(self, arguments):\n self.arguments = arguments\n\n def eval(self, binding):\n arg_list = []\n for arg in self.arguments:\n arg_list.append(arg.eval(binding))\n return arg_list\n\n\nclass VariableDecl:\n\n def __init__(self, declarations):\n self.declarations = declarations\n\n def eval(self, binding):\n for decl in self.declarations:\n temp = decl.eval(binding)\n return temp\n\n\nclass Decl:\n\n def __init__(self, var_name, val):\n self.var_name = var_name\n self.val = val\n\n def eval(self, binding):\n var_val = self.val.eval(binding)\n binding.add(self.var_name, var_val)\n return var_val\n\n\nclass Assignment:\n\n def __init__(self, var_name, val):\n self.var_name = var_name\n self.val = val\n\n def eval(self, binding):\n var_val = self.val.eval(binding)\n binding.add(self.var_name, var_val)\n return var_val\n\n\nclass VarReference:\n\n def __init__(self, var_name):\n self.var_name = var_name\n\n def eval(self, binding):\n return binding.get(self.var_name)\n\n\nclass EqualTo(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) == self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass NotEqualTo(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) != self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass LessThan(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) < self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass LessThanOrEqualTo(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) <= self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass GreaterThan(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) > self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass GreaterThanOrEqualTo(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) >= self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass IfExpression:\n\n def __init__(self, bool_expr, if_block):\n self.bool_expr = bool_expr\n self.if_block = if_block\n\n def eval(self, binding):\n bool_result = self.bool_expr.eval(binding)\n if type(bool_result) is not int:\n bool_result = bool_result[0]\n if bool_result == 1:\n return self.if_block.eval(binding)\n else:\n return 0\n\n\nclass IfElseExpression:\n\n def __init__(self, bool_expr, if_block, else_block):\n self.bool_expr = bool_expr\n self.if_block = if_block\n self.else_block = else_block\n\n def eval(self, binding):\n bool_result = self.bool_expr.eval(binding)\n if type(bool_result) is not int:\n bool_result = bool_result[0]\n if bool_result == 1:\n return self.if_block.eval(binding)\n else:\n return self.else_block.eval(binding)\n",
"step-2": "<mask token>\n\n\nclass Mult(BinaryOp):\n <mask token>\n\n\nclass Div(BinaryOp):\n\n def eval(self, binding):\n return int(self.left.eval(binding) / self.right.eval(binding))\n\n\nclass BuiltInFunction:\n\n def __init__(self, func_name, call_args):\n self.func_name = func_name\n self.call_args = call_args\n\n def eval(self, binding):\n args = self.call_args.eval(binding)\n if self.func_name == 'print':\n if type(args) == int:\n print(args)\n else:\n print('Print: ')\n count = 0\n for arg in args:\n out = str(arg)\n if count != len(args) - 1:\n print(out, end='|')\n else:\n print(out)\n count += 1\n return\n else:\n return args[0]\n\n\nclass Binding:\n\n def __init__(self, parent, binding):\n self.parent = parent\n self.binding = binding\n\n def get(self, name):\n if name in self.binding:\n return self.binding[name]\n return self.parent.get(name)\n\n def add(self, var_name, value):\n self.binding[var_name] = value\n\n def contains(self, name):\n for i in self.binding:\n if i == name:\n return True\n if self.parent:\n return self.parent.contains(name)\n return False\n\n\nclass FunctionCall:\n\n def __init__(self, func_name, call_args):\n self.func_name = func_name\n self.call_args = call_args\n\n def eval(self, binding):\n if type(self.call_args) == VarReference:\n args = self.call_args.eval(binding)\n return args[1].eval(binding)\n func_binding = Binding(binding.get(self.func_name.value)[2], {})\n parameters = self.call_args[0].eval(func_binding)[0]\n if func_binding.contains(parameters[0]):\n args = self.call_args[1].eval(func_binding)\n else:\n args = self.call_args[1].eval(binding)\n for i in range(len(parameters)):\n func_binding.add(parameters[i], args[i])\n code = func_binding.get(self.func_name.value)[1]\n return code.eval(func_binding)\n\n\nclass FunctionDefinition:\n\n def __init__(self, param_list, code_block):\n self.param_list = param_list\n self.code_block = code_block\n\n def eval(self, binding):\n func_binding = Binding(binding, {})\n return self.param_list, self.code_block, func_binding\n\n\nclass CallArguments:\n\n def __init__(self, arguments):\n self.arguments = arguments\n\n def eval(self, binding):\n arg_list = []\n for arg in self.arguments:\n arg_list.append(arg.eval(binding))\n return arg_list\n\n\nclass VariableDecl:\n\n def __init__(self, declarations):\n self.declarations = declarations\n\n def eval(self, binding):\n for decl in self.declarations:\n temp = decl.eval(binding)\n return temp\n\n\nclass Decl:\n\n def __init__(self, var_name, val):\n self.var_name = var_name\n self.val = val\n\n def eval(self, binding):\n var_val = self.val.eval(binding)\n binding.add(self.var_name, var_val)\n return var_val\n\n\nclass Assignment:\n\n def __init__(self, var_name, val):\n self.var_name = var_name\n self.val = val\n\n def eval(self, binding):\n var_val = self.val.eval(binding)\n binding.add(self.var_name, var_val)\n return var_val\n\n\nclass VarReference:\n\n def __init__(self, var_name):\n self.var_name = var_name\n\n def eval(self, binding):\n return binding.get(self.var_name)\n\n\nclass EqualTo(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) == self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass NotEqualTo(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) != self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass LessThan(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) < self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass LessThanOrEqualTo(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) <= self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass GreaterThan(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) > self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass GreaterThanOrEqualTo(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) >= self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass IfExpression:\n\n def __init__(self, bool_expr, if_block):\n self.bool_expr = bool_expr\n self.if_block = if_block\n\n def eval(self, binding):\n bool_result = self.bool_expr.eval(binding)\n if type(bool_result) is not int:\n bool_result = bool_result[0]\n if bool_result == 1:\n return self.if_block.eval(binding)\n else:\n return 0\n\n\nclass IfElseExpression:\n\n def __init__(self, bool_expr, if_block, else_block):\n self.bool_expr = bool_expr\n self.if_block = if_block\n self.else_block = else_block\n\n def eval(self, binding):\n bool_result = self.bool_expr.eval(binding)\n if type(bool_result) is not int:\n bool_result = bool_result[0]\n if bool_result == 1:\n return self.if_block.eval(binding)\n else:\n return self.else_block.eval(binding)\n",
"step-3": "<mask token>\n\n\nclass BinaryOp:\n <mask token>\n\n\nclass Sum(BinaryOp):\n\n def eval(self, binding):\n return self.left.eval(binding) + self.right.eval(binding)\n\n\nclass Sub(BinaryOp):\n\n def eval(self, binding):\n return self.left.eval(binding) - self.right.eval(binding)\n\n\nclass Mult(BinaryOp):\n\n def eval(self, binding):\n return self.left.eval(binding) * self.right.eval(binding)\n\n\nclass Div(BinaryOp):\n\n def eval(self, binding):\n return int(self.left.eval(binding) / self.right.eval(binding))\n\n\nclass BuiltInFunction:\n\n def __init__(self, func_name, call_args):\n self.func_name = func_name\n self.call_args = call_args\n\n def eval(self, binding):\n args = self.call_args.eval(binding)\n if self.func_name == 'print':\n if type(args) == int:\n print(args)\n else:\n print('Print: ')\n count = 0\n for arg in args:\n out = str(arg)\n if count != len(args) - 1:\n print(out, end='|')\n else:\n print(out)\n count += 1\n return\n else:\n return args[0]\n\n\nclass Binding:\n\n def __init__(self, parent, binding):\n self.parent = parent\n self.binding = binding\n\n def get(self, name):\n if name in self.binding:\n return self.binding[name]\n return self.parent.get(name)\n\n def add(self, var_name, value):\n self.binding[var_name] = value\n\n def contains(self, name):\n for i in self.binding:\n if i == name:\n return True\n if self.parent:\n return self.parent.contains(name)\n return False\n\n\nclass FunctionCall:\n\n def __init__(self, func_name, call_args):\n self.func_name = func_name\n self.call_args = call_args\n\n def eval(self, binding):\n if type(self.call_args) == VarReference:\n args = self.call_args.eval(binding)\n return args[1].eval(binding)\n func_binding = Binding(binding.get(self.func_name.value)[2], {})\n parameters = self.call_args[0].eval(func_binding)[0]\n if func_binding.contains(parameters[0]):\n args = self.call_args[1].eval(func_binding)\n else:\n args = self.call_args[1].eval(binding)\n for i in range(len(parameters)):\n func_binding.add(parameters[i], args[i])\n code = func_binding.get(self.func_name.value)[1]\n return code.eval(func_binding)\n\n\nclass FunctionDefinition:\n\n def __init__(self, param_list, code_block):\n self.param_list = param_list\n self.code_block = code_block\n\n def eval(self, binding):\n func_binding = Binding(binding, {})\n return self.param_list, self.code_block, func_binding\n\n\nclass CallArguments:\n\n def __init__(self, arguments):\n self.arguments = arguments\n\n def eval(self, binding):\n arg_list = []\n for arg in self.arguments:\n arg_list.append(arg.eval(binding))\n return arg_list\n\n\nclass VariableDecl:\n\n def __init__(self, declarations):\n self.declarations = declarations\n\n def eval(self, binding):\n for decl in self.declarations:\n temp = decl.eval(binding)\n return temp\n\n\nclass Decl:\n\n def __init__(self, var_name, val):\n self.var_name = var_name\n self.val = val\n\n def eval(self, binding):\n var_val = self.val.eval(binding)\n binding.add(self.var_name, var_val)\n return var_val\n\n\nclass Assignment:\n\n def __init__(self, var_name, val):\n self.var_name = var_name\n self.val = val\n\n def eval(self, binding):\n var_val = self.val.eval(binding)\n binding.add(self.var_name, var_val)\n return var_val\n\n\nclass VarReference:\n\n def __init__(self, var_name):\n self.var_name = var_name\n\n def eval(self, binding):\n return binding.get(self.var_name)\n\n\nclass EqualTo(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) == self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass NotEqualTo(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) != self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass LessThan(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) < self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass LessThanOrEqualTo(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) <= self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass GreaterThan(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) > self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass GreaterThanOrEqualTo(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) >= self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass IfExpression:\n\n def __init__(self, bool_expr, if_block):\n self.bool_expr = bool_expr\n self.if_block = if_block\n\n def eval(self, binding):\n bool_result = self.bool_expr.eval(binding)\n if type(bool_result) is not int:\n bool_result = bool_result[0]\n if bool_result == 1:\n return self.if_block.eval(binding)\n else:\n return 0\n\n\nclass IfElseExpression:\n\n def __init__(self, bool_expr, if_block, else_block):\n self.bool_expr = bool_expr\n self.if_block = if_block\n self.else_block = else_block\n\n def eval(self, binding):\n bool_result = self.bool_expr.eval(binding)\n if type(bool_result) is not int:\n bool_result = bool_result[0]\n if bool_result == 1:\n return self.if_block.eval(binding)\n else:\n return self.else_block.eval(binding)\n",
"step-4": "<mask token>\n\n\nclass Code:\n\n def __init__(self, statements):\n self.statements = statements\n <mask token>\n\n\nclass Statement:\n\n def __init__(self, statement):\n self.statement = statement\n\n def eval(self, binding):\n return self.statement.eval(binding)\n\n\nclass Expr:\n\n def __init__(self, expression):\n self.expression = expression\n\n def eval(self, binding):\n return self.expression.eval(binding)\n\n\nclass Integer:\n\n def __init__(self, value):\n self.value = value\n\n def eval(self, binding):\n return int(self.value)\n\n\nclass BinaryOp:\n\n def __init__(self, left, right):\n self.left = left\n self.right = right\n\n\nclass Sum(BinaryOp):\n\n def eval(self, binding):\n return self.left.eval(binding) + self.right.eval(binding)\n\n\nclass Sub(BinaryOp):\n\n def eval(self, binding):\n return self.left.eval(binding) - self.right.eval(binding)\n\n\nclass Mult(BinaryOp):\n\n def eval(self, binding):\n return self.left.eval(binding) * self.right.eval(binding)\n\n\nclass Div(BinaryOp):\n\n def eval(self, binding):\n return int(self.left.eval(binding) / self.right.eval(binding))\n\n\nclass BuiltInFunction:\n\n def __init__(self, func_name, call_args):\n self.func_name = func_name\n self.call_args = call_args\n\n def eval(self, binding):\n args = self.call_args.eval(binding)\n if self.func_name == 'print':\n if type(args) == int:\n print(args)\n else:\n print('Print: ')\n count = 0\n for arg in args:\n out = str(arg)\n if count != len(args) - 1:\n print(out, end='|')\n else:\n print(out)\n count += 1\n return\n else:\n return args[0]\n\n\nclass Binding:\n\n def __init__(self, parent, binding):\n self.parent = parent\n self.binding = binding\n\n def get(self, name):\n if name in self.binding:\n return self.binding[name]\n return self.parent.get(name)\n\n def add(self, var_name, value):\n self.binding[var_name] = value\n\n def contains(self, name):\n for i in self.binding:\n if i == name:\n return True\n if self.parent:\n return self.parent.contains(name)\n return False\n\n\nclass FunctionCall:\n\n def __init__(self, func_name, call_args):\n self.func_name = func_name\n self.call_args = call_args\n\n def eval(self, binding):\n if type(self.call_args) == VarReference:\n args = self.call_args.eval(binding)\n return args[1].eval(binding)\n func_binding = Binding(binding.get(self.func_name.value)[2], {})\n parameters = self.call_args[0].eval(func_binding)[0]\n if func_binding.contains(parameters[0]):\n args = self.call_args[1].eval(func_binding)\n else:\n args = self.call_args[1].eval(binding)\n for i in range(len(parameters)):\n func_binding.add(parameters[i], args[i])\n code = func_binding.get(self.func_name.value)[1]\n return code.eval(func_binding)\n\n\nclass FunctionDefinition:\n\n def __init__(self, param_list, code_block):\n self.param_list = param_list\n self.code_block = code_block\n\n def eval(self, binding):\n func_binding = Binding(binding, {})\n return self.param_list, self.code_block, func_binding\n\n\nclass CallArguments:\n\n def __init__(self, arguments):\n self.arguments = arguments\n\n def eval(self, binding):\n arg_list = []\n for arg in self.arguments:\n arg_list.append(arg.eval(binding))\n return arg_list\n\n\nclass VariableDecl:\n\n def __init__(self, declarations):\n self.declarations = declarations\n\n def eval(self, binding):\n for decl in self.declarations:\n temp = decl.eval(binding)\n return temp\n\n\nclass Decl:\n\n def __init__(self, var_name, val):\n self.var_name = var_name\n self.val = val\n\n def eval(self, binding):\n var_val = self.val.eval(binding)\n binding.add(self.var_name, var_val)\n return var_val\n\n\nclass Assignment:\n\n def __init__(self, var_name, val):\n self.var_name = var_name\n self.val = val\n\n def eval(self, binding):\n var_val = self.val.eval(binding)\n binding.add(self.var_name, var_val)\n return var_val\n\n\nclass VarReference:\n\n def __init__(self, var_name):\n self.var_name = var_name\n\n def eval(self, binding):\n return binding.get(self.var_name)\n\n\nclass EqualTo(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) == self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass NotEqualTo(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) != self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass LessThan(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) < self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass LessThanOrEqualTo(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) <= self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass GreaterThan(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) > self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass GreaterThanOrEqualTo(BinaryOp):\n\n def eval(self, binding):\n if self.left.eval(binding) >= self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass IfExpression:\n\n def __init__(self, bool_expr, if_block):\n self.bool_expr = bool_expr\n self.if_block = if_block\n\n def eval(self, binding):\n bool_result = self.bool_expr.eval(binding)\n if type(bool_result) is not int:\n bool_result = bool_result[0]\n if bool_result == 1:\n return self.if_block.eval(binding)\n else:\n return 0\n\n\nclass IfElseExpression:\n\n def __init__(self, bool_expr, if_block, else_block):\n self.bool_expr = bool_expr\n self.if_block = if_block\n self.else_block = else_block\n\n def eval(self, binding):\n bool_result = self.bool_expr.eval(binding)\n if type(bool_result) is not int:\n bool_result = bool_result[0]\n if bool_result == 1:\n return self.if_block.eval(binding)\n else:\n return self.else_block.eval(binding)\n",
"step-5": "# Interprets the AST\n\n\nclass Program:\n def __init__(self, code):\n self.code = code\n\n def eval(self, binding):\n return self.code.eval(binding)\n\n\nclass Code:\n def __init__(self, statements):\n self.statements = statements\n\n def eval(self, binding):\n val = 0\n for statement in self.statements:\n val = statement.eval(binding)\n return val\n\n\nclass Statement:\n def __init__(self, statement):\n self.statement = statement\n\n def eval(self, binding):\n return self.statement.eval(binding)\n\n\nclass Expr:\n def __init__(self, expression):\n self.expression = expression\n\n def eval(self, binding):\n return self.expression.eval(binding)\n\n\nclass Integer:\n def __init__(self, value):\n self.value = value\n\n def eval(self, binding):\n return int(self.value)\n\n\nclass BinaryOp:\n def __init__(self, left, right):\n self.left = left\n self.right = right\n\n\nclass Sum(BinaryOp):\n def eval(self, binding):\n return self.left.eval(binding) + self.right.eval(binding)\n\n\nclass Sub(BinaryOp):\n def eval(self, binding):\n return self.left.eval(binding) - self.right.eval(binding)\n\n\nclass Mult(BinaryOp):\n def eval(self, binding):\n return self.left.eval(binding) * self.right.eval(binding)\n\n\nclass Div(BinaryOp):\n def eval(self, binding):\n return int(self.left.eval(binding) / self.right.eval(binding))\n\n\nclass BuiltInFunction:\n # built-in functions are print, and ()\n def __init__(self, func_name, call_args):\n self.func_name = func_name\n self.call_args = call_args\n\n def eval(self, binding):\n args = self.call_args.eval(binding)\n if self.func_name == \"print\":\n if type(args) == int:\n print(args)\n else:\n print(\"Print: \")\n count = 0\n for arg in args:\n out = str(arg)\n if count != len(args) - 1:\n print(out, end=\"|\")\n else:\n print(out)\n count += 1\n return\n else:\n return args[0]\n\n\n# binding class used to store variables and functions\nclass Binding:\n def __init__(self, parent, binding):\n self.parent = parent\n self.binding = binding\n\n def get(self, name):\n if name in self.binding:\n return self.binding[name]\n return self.parent.get(name)\n\n def add(self, var_name, value):\n self.binding[var_name] = value\n\n def contains(self, name):\n for i in self.binding:\n if i == name:\n return True\n if self.parent:\n return self.parent.contains(name)\n return False\n\n\nclass FunctionCall:\n def __init__(self, func_name, call_args):\n self.func_name = func_name\n self.call_args = call_args\n\n def eval(self, binding):\n # if function has no parameters\n if type(self.call_args) == VarReference:\n args = self.call_args.eval(binding)\n return args[1].eval(binding)\n\n # else\n # creates a new function binding that is a child of the binding when the function was created\n func_binding = Binding(binding.get(self.func_name.value)[2], {})\n # sets parameters and arguments and adds them to the function binding\n parameters = self.call_args[0].eval(func_binding)[0]\n # checks to see if the arg values for param_list are in the function binding. This is for recursion.\n if func_binding.contains(parameters[0]):\n args = self.call_args[1].eval(func_binding)\n # if not, checks if the arg values are in the global binding\n else:\n args = self.call_args[1].eval(binding)\n\n # assigns the arg values to the parameters and adds it to the function binding\n for i in range(len(parameters)):\n func_binding.add(parameters[i], args[i])\n\n # returns the evaluated code using the function binding\n code = func_binding.get(self.func_name.value)[1]\n return code.eval(func_binding)\n\n\nclass FunctionDefinition:\n def __init__(self, param_list, code_block):\n self.param_list = param_list\n self.code_block = code_block\n\n def eval(self, binding):\n # creates a new binding\n func_binding = Binding(binding, {})\n # used to store a function's parameters, code, and function binding in global binding\n return self.param_list, self.code_block, func_binding\n\n\nclass CallArguments:\n def __init__(self, arguments):\n self.arguments = arguments\n\n def eval(self, binding):\n arg_list = []\n for arg in self.arguments:\n arg_list.append(arg.eval(binding))\n return arg_list\n\n\nclass VariableDecl:\n def __init__(self, declarations):\n self.declarations = declarations\n\n def eval(self, binding):\n for decl in self.declarations:\n temp = decl.eval(binding)\n return temp\n\n\nclass Decl:\n def __init__(self, var_name, val):\n self.var_name = var_name\n self.val = val\n\n def eval(self, binding):\n var_val = self.val.eval(binding)\n binding.add(self.var_name, var_val)\n return var_val\n\n\nclass Assignment:\n def __init__(self, var_name, val):\n self.var_name = var_name\n self.val = val\n\n def eval(self, binding):\n var_val = self.val.eval(binding)\n binding.add(self.var_name, var_val)\n return var_val\n\n\nclass VarReference:\n def __init__(self, var_name):\n self.var_name = var_name\n\n def eval(self, binding):\n return binding.get(self.var_name)\n\n\nclass EqualTo(BinaryOp):\n def eval(self, binding):\n if self.left.eval(binding) == self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass NotEqualTo(BinaryOp):\n def eval(self, binding):\n if self.left.eval(binding) != self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass LessThan(BinaryOp):\n def eval(self, binding):\n if self.left.eval(binding) < self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass LessThanOrEqualTo(BinaryOp):\n def eval(self, binding):\n if self.left.eval(binding) <= self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass GreaterThan(BinaryOp):\n def eval(self, binding):\n if self.left.eval(binding) > self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass GreaterThanOrEqualTo(BinaryOp):\n def eval(self, binding):\n if self.left.eval(binding) >= self.right.eval(binding):\n return 1\n else:\n return 0\n\n\nclass IfExpression:\n def __init__(self, bool_expr, if_block):\n self.bool_expr = bool_expr\n self.if_block = if_block\n\n def eval(self, binding):\n bool_result = self.bool_expr.eval(binding)\n if type(bool_result) is not int:\n bool_result = bool_result[0]\n if bool_result == 1:\n return self.if_block.eval(binding)\n else:\n return 0\n\n\nclass IfElseExpression:\n def __init__(self, bool_expr, if_block, else_block):\n self.bool_expr = bool_expr\n self.if_block = if_block\n self.else_block = else_block\n\n def eval(self, binding):\n bool_result = self.bool_expr.eval(binding)\n if type(bool_result) is not int:\n bool_result = bool_result[0]\n if bool_result == 1:\n return self.if_block.eval(binding)\n else:\n return self.else_block.eval(binding)\n\n",
"step-ids": [
42,
50,
56,
68,
73
]
}
|
[
42,
50,
56,
68,
73
] |
'''
Created on Nov 16, 2013
@author: mo
'''
import unittest
from Board import TicTacToe_Board
from ComputerPlayer import ComputerPlayer
from utils import debug_print as d_pr
from main import StartNewGame
class Test(unittest.TestCase):
def setUp(self):
self.the_board = TicTacToe_Board()
def tearDown(self):
pass
#these may be impossible boards, but still it tests the win detector
def test_these_should_win_for_x(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['x', 'x', 'x'],
['o', 'x', 'o'],
['o', 'x', 'o']]), 'x', "should return x")
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['x', 'o', 'o'],
['o', 'x', 'o'],
['x', 'o', 'x']
]) , 'x', 'should return x')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['o','x', 'o'],
['x', 'x', 'x'],
['-', '-', '-']
]), 'x', 'should return x'
)
def test_these_should_win_for_o(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['o', 'x', 'o'],
['o', 'x', 'x'],
['o', 'o', 'x']]), 'o', "should return o")
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['x', 'o', '-'],
['o', 'o', 'o'],
['o', 'x', 'x']
]) , 'o', 'should return o')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['o','x', 'o'],
['x', 'o', 'x'],
['-', '-', 'o']
]), 'o', 'should return o'
)
def test_these_should_win_for_nobody(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['x', 'x', '-'],
['o', '-', 'o'],
['o', '-', 'o']]), None, "should return None")
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['-', '-', '-'],
['-', '-', '-'],
['x', 'o', 'x']
]) , None, 'should return None')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['o','x', 'o'],
['-', '-', 'x'],
['-', 'o', 'o']
]), None, 'should return None'
)
def test_make_move(self):
self.the_board.board_array=[ ['x', '-', 'x'],
['o', '-', 'o'],
['o', 'x', '-']
]
self.the_board.whose_turn='o'
self.the_board.MakeMove([1,1])
self.assertEqual(self.the_board.board_array[1][1], 'o', "should be an o")
self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')
def test_computer_player_get_outcome(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [ ['-', '-', 'x'],
['-', 'o', '-'],
['-', '-', '-']
]
self.the_board.whose_turn = 'x'
move_seq_1 = [ {'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [2,1]}, {'player': 'x', 'move': [0,0]} ]
out=self.the_board.GetOutcomeOfMoveSequence(move_seq_1)
self.assertEqual(out, 'x', 'x should win: outcome should be x')
move_seq_2 = [{'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [2,1]}]
out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)
self.assertEqual(out, None, 'no one should win: outcome will be None')
move_seq_3 = [ {'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [0,0] }, {'player': 'x', 'move' : [2,1]},
{'player': 'o', 'move' : [2,2] }
]
out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)
self.assertEqual(out, 'o', 'o should win')
def test_get_winning_moves_for_opponent(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [ ['x', '-', 'x'],
['-', 'o', '-'],
['o', 'o', '-']
]
self.the_board.whose_turn = 'x'
winning_moves=self.the_board.GetWinningMovesFor( 'human')
d_pr(winning_moves)
self.assertIn([0,1], winning_moves)
self.assertIn([2,2], winning_moves)
comp_player = ComputerPlayer('o', self.the_board)
self.the_board.human_player_x_or_o = 'x'
self.the_board.c_player_x_or_o = 'o'
self.the_board.board_array = [ ['x', '-', 'x'],
['-', 'o', '-'],
['o', 'o', '-']
]
self.the_board.whose_turn = 'o'
winning_moves=self.the_board.GetWinningMovesFor( 'human')
d_pr(winning_moves)
self.assertIn([0,1], winning_moves)
def test_get_threatening_moves(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [ ['-', '-', 'x'],
['-', 'o', '-'],
['o', '-', '-']
]
self.the_board.whose_turn = 'x'
threatening_moves=comp_player.GetThreateningMovesWithoutTraps(self.the_board.GetEmptySquares())
self.assertIn([0,0], threatening_moves)
self.assertIn([2,2], threatening_moves)
d_pr('threats without traps: ' + str(threatening_moves))
self.assertEqual(len(threatening_moves), 2)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [ ['-', '-', 'o'],
['-', 'x', '-'],
['o', '-', '-']
]
self.the_board.whose_turn = 'x'
threatening_moves=comp_player.GetThreateningMovesWithoutTraps(self.the_board.GetEmptySquares())
self.assertIn([0,1], threatening_moves)
self.assertIn([2,1], threatening_moves)
self.assertIn([1,0], threatening_moves)
self.assertIn([1,2], threatening_moves)
d_pr('threats without traps: ' + str(threatening_moves))
self.assertEqual(len(threatening_moves), 4)
def test_algorithm_by_playing_large_num_of_random_games(self):
NUM_GAMES = 10
#NUM_GAMES=100000 # this works but takes a long time
NUM_GAMES=10
for i in range(0, NUM_GAMES + 1):
win_result = StartNewGame(UseRandom=True)
self.assertTrue(win_result == 'Computer' or win_result == 'Tie')
def test_print(self):
self.the_board.board_array = [ ['-', '-', 'x'],
['-', 'o', '-'],
['x', 'o', '-']]
self.the_board.PrintBoardToConsole()
def test_empty_squares(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
normal
|
{
"blob_id": "1968923cd923e68dc5ff2148802f18e40a5e6c33",
"index": 939,
"step-1": "<mask token>\n\n\nclass Test(unittest.TestCase):\n <mask token>\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n <mask token>\n <mask token>\n <mask token>\n\n def test_algorithm_by_playing_large_num_of_random_games(self):\n NUM_GAMES = 10\n NUM_GAMES = 10\n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n <mask token>\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n\n def test_algorithm_by_playing_large_num_of_random_games(self):\n NUM_GAMES = 10\n NUM_GAMES = 10\n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n\n def test_computer_player_get_outcome(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n '-', '-', '-']]\n self.the_board.whose_turn = 'x'\n move_seq_1 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}, {'player': 'x', 'move': [0, 0]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_1)\n self.assertEqual(out, 'x', 'x should win: outcome should be x')\n move_seq_2 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)\n self.assertEqual(out, None, 'no one should win: outcome will be None')\n move_seq_3 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [0, 0]}, {'player': 'x', 'move': [2, 1]}, {'player':\n 'o', 'move': [2, 2]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)\n self.assertEqual(out, 'o', 'o should win')\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n\n def test_algorithm_by_playing_large_num_of_random_games(self):\n NUM_GAMES = 10\n NUM_GAMES = 10\n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n\n def test_computer_player_get_outcome(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n '-', '-', '-']]\n self.the_board.whose_turn = 'x'\n move_seq_1 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}, {'player': 'x', 'move': [0, 0]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_1)\n self.assertEqual(out, 'x', 'x should win: outcome should be x')\n move_seq_2 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)\n self.assertEqual(out, None, 'no one should win: outcome will be None')\n move_seq_3 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [0, 0]}, {'player': 'x', 'move': [2, 1]}, {'player':\n 'o', 'move': [2, 2]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)\n self.assertEqual(out, 'o', 'o should win')\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n\n def test_algorithm_by_playing_large_num_of_random_games(self):\n NUM_GAMES = 10\n NUM_GAMES = 10\n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "'''\nCreated on Nov 16, 2013\n\n@author: mo\n'''\nimport unittest\nfrom Board import TicTacToe_Board\nfrom ComputerPlayer import ComputerPlayer\nfrom utils import debug_print as d_pr\n\nfrom main import StartNewGame\n\nclass Test(unittest.TestCase):\n\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n \n def tearDown(self):\n pass\n\n #these may be impossible boards, but still it tests the win detector\n \n def test_these_should_win_for_x(self):\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['x', 'x', 'x'], \n ['o', 'x', 'o'], \n ['o', 'x', 'o']]), 'x', \"should return x\")\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['x', 'o', 'o'],\n ['o', 'x', 'o'],\n ['x', 'o', 'x']\n \n \n ]) , 'x', 'should return x')\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['o','x', 'o'],\n ['x', 'x', 'x'],\n ['-', '-', '-']\n ]), 'x', 'should return x'\n )\n \n \n \n def test_these_should_win_for_o(self):\n \n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['o', 'x', 'o'], \n ['o', 'x', 'x'], \n ['o', 'o', 'x']]), 'o', \"should return o\")\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['x', 'o', '-'],\n ['o', 'o', 'o'],\n ['o', 'x', 'x']\n \n \n ]) , 'o', 'should return o')\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['o','x', 'o'],\n ['x', 'o', 'x'],\n ['-', '-', 'o']\n ]), 'o', 'should return o'\n )\n \n\n\n def test_these_should_win_for_nobody(self):\n \n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['x', 'x', '-'], \n ['o', '-', 'o'], \n ['o', '-', 'o']]), None, \"should return None\")\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['-', '-', '-'],\n ['-', '-', '-'],\n ['x', 'o', 'x']\n \n \n ]) , None, 'should return None')\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['o','x', 'o'],\n ['-', '-', 'x'],\n ['-', 'o', 'o']\n ]), None, 'should return None'\n )\n \n def test_make_move(self):\n \n self.the_board.board_array=[ ['x', '-', 'x'],\n ['o', '-', 'o'],\n ['o', 'x', '-']\n ]\n \n self.the_board.whose_turn='o'\n \n self.the_board.MakeMove([1,1])\n \n self.assertEqual(self.the_board.board_array[1][1], 'o', \"should be an o\")\n \n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n \n \n\n def test_computer_player_get_outcome(self):\n \n comp_player = ComputerPlayer('x', self.the_board)\n \n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n \n \n self.the_board.board_array = [ ['-', '-', 'x'],\n ['-', 'o', '-'],\n ['-', '-', '-']\n ]\n self.the_board.whose_turn = 'x'\n \n move_seq_1 = [ {'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [2,1]}, {'player': 'x', 'move': [0,0]} ]\n \n out=self.the_board.GetOutcomeOfMoveSequence(move_seq_1)\n \n self.assertEqual(out, 'x', 'x should win: outcome should be x')\n \n \n move_seq_2 = [{'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [2,1]}]\n \n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)\n self.assertEqual(out, None, 'no one should win: outcome will be None')\n\n move_seq_3 = [ {'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [0,0] }, {'player': 'x', 'move' : [2,1]},\n {'player': 'o', 'move' : [2,2] }\n ]\n \n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)\n \n self.assertEqual(out, 'o', 'o should win')\n \n \n def test_get_winning_moves_for_opponent(self):\n \n comp_player = ComputerPlayer('x', self.the_board)\n \n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n \n \n self.the_board.board_array = [ ['x', '-', 'x'],\n ['-', 'o', '-'],\n ['o', 'o', '-']\n ]\n self.the_board.whose_turn = 'x'\n \n winning_moves=self.the_board.GetWinningMovesFor( 'human')\n \n d_pr(winning_moves)\n self.assertIn([0,1], winning_moves)\n self.assertIn([2,2], winning_moves)\n \n comp_player = ComputerPlayer('o', self.the_board)\n \n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n \n \n self.the_board.board_array = [ ['x', '-', 'x'],\n ['-', 'o', '-'],\n ['o', 'o', '-']\n ]\n self.the_board.whose_turn = 'o'\n \n winning_moves=self.the_board.GetWinningMovesFor( 'human')\n \n d_pr(winning_moves)\n self.assertIn([0,1], winning_moves)\n \n \n \n def test_get_threatening_moves(self):\n \n comp_player = ComputerPlayer('x', self.the_board)\n \n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n \n \n self.the_board.board_array = [ ['-', '-', 'x'],\n ['-', 'o', '-'],\n ['o', '-', '-']\n ]\n self.the_board.whose_turn = 'x'\n \n threatening_moves=comp_player.GetThreateningMovesWithoutTraps(self.the_board.GetEmptySquares())\n \n \n self.assertIn([0,0], threatening_moves)\n self.assertIn([2,2], threatening_moves)\n \n d_pr('threats without traps: ' + str(threatening_moves))\n \n self.assertEqual(len(threatening_moves), 2)\n \n \n \n \n \n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n \n \n self.the_board.board_array = [ ['-', '-', 'o'],\n ['-', 'x', '-'],\n ['o', '-', '-']\n ]\n self.the_board.whose_turn = 'x'\n \n threatening_moves=comp_player.GetThreateningMovesWithoutTraps(self.the_board.GetEmptySquares())\n \n \n self.assertIn([0,1], threatening_moves)\n self.assertIn([2,1], threatening_moves)\n self.assertIn([1,0], threatening_moves)\n self.assertIn([1,2], threatening_moves)\n \n \n \n d_pr('threats without traps: ' + str(threatening_moves))\n \n self.assertEqual(len(threatening_moves), 4)\n \n \n \n \n def test_algorithm_by_playing_large_num_of_random_games(self):\n \n NUM_GAMES = 10\n #NUM_GAMES=100000 # this works but takes a long time\n NUM_GAMES=10\n \n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n \n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n \n \n def test_print(self):\n \n \n self.the_board.board_array = [ ['-', '-', 'x'],\n ['-', 'o', '-'],\n ['x', 'o', '-']]\n \n self.the_board.PrintBoardToConsole()\n \n \n def test_empty_squares(self):\n pass\n \n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n",
"step-ids": [
9,
12,
13,
14,
16
]
}
|
[
9,
12,
13,
14,
16
] |
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['savefig.dpi'] = 300 #图片像素
plt.rcParams['figure.dpi'] = 300 #分辨率
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] = False
x_axis = [20,40,60,80,100]
rf = [184,174,166,159,157.5]
anns = [186,179,170,164,161]
adaboost = [187.5,176,172,163,162]
x = np.arange(len(x_axis)) #首先用第一个的长度作为横坐标
width = 0.2 #设置柱与柱之间的宽度
fig,ax = plt.subplots()
p_rf = ax.bar(x-width,rf,width,alpha = 0.9,)
p_anns = ax.bar(x,anns,width,alpha = 0.9,color= 'red')
p_adaboost = ax.bar(x+width,adaboost,width,alpha = 0.9,color= 'green')
ax.set_xticks(x +width/2)#将坐标设置在指定位置
ax.set_xticklabels(x_axis)#将横坐标替换成
plt.legend((p_rf[0],p_anns[0],p_adaboost[0]),('RF','ANNs','AdaBoost'),loc='best',fontsize=20)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.ylim(150,200) # 指定Y轴的高度
plt.xlabel('训练集大小(%)',fontsize=20)
plt.ylabel('MAE(s)',fontsize=20)
plt.show()
#plt.savefig('MAE.png', dpi=3600)
|
normal
|
{
"blob_id": "13342922022f0a0e8928c81c1c4716125af0b2c4",
"index": 418,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nax.set_xticks(x + width / 2)\nax.set_xticklabels(x_axis)\nplt.legend((p_rf[0], p_anns[0], p_adaboost[0]), ('RF', 'ANNs', 'AdaBoost'),\n loc='best', fontsize=20)\nplt.xticks(fontsize=18)\nplt.yticks(fontsize=18)\nplt.ylim(150, 200)\nplt.xlabel('训练集大小(%)', fontsize=20)\nplt.ylabel('MAE(s)', fontsize=20)\nplt.show()\n",
"step-3": "<mask token>\nplt.rcParams['savefig.dpi'] = 300\nplt.rcParams['figure.dpi'] = 300\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\nx_axis = [20, 40, 60, 80, 100]\nrf = [184, 174, 166, 159, 157.5]\nanns = [186, 179, 170, 164, 161]\nadaboost = [187.5, 176, 172, 163, 162]\nx = np.arange(len(x_axis))\nwidth = 0.2\nfig, ax = plt.subplots()\np_rf = ax.bar(x - width, rf, width, alpha=0.9)\np_anns = ax.bar(x, anns, width, alpha=0.9, color='red')\np_adaboost = ax.bar(x + width, adaboost, width, alpha=0.9, color='green')\nax.set_xticks(x + width / 2)\nax.set_xticklabels(x_axis)\nplt.legend((p_rf[0], p_anns[0], p_adaboost[0]), ('RF', 'ANNs', 'AdaBoost'),\n loc='best', fontsize=20)\nplt.xticks(fontsize=18)\nplt.yticks(fontsize=18)\nplt.ylim(150, 200)\nplt.xlabel('训练集大小(%)', fontsize=20)\nplt.ylabel('MAE(s)', fontsize=20)\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nplt.rcParams['savefig.dpi'] = 300\nplt.rcParams['figure.dpi'] = 300\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\nx_axis = [20, 40, 60, 80, 100]\nrf = [184, 174, 166, 159, 157.5]\nanns = [186, 179, 170, 164, 161]\nadaboost = [187.5, 176, 172, 163, 162]\nx = np.arange(len(x_axis))\nwidth = 0.2\nfig, ax = plt.subplots()\np_rf = ax.bar(x - width, rf, width, alpha=0.9)\np_anns = ax.bar(x, anns, width, alpha=0.9, color='red')\np_adaboost = ax.bar(x + width, adaboost, width, alpha=0.9, color='green')\nax.set_xticks(x + width / 2)\nax.set_xticklabels(x_axis)\nplt.legend((p_rf[0], p_anns[0], p_adaboost[0]), ('RF', 'ANNs', 'AdaBoost'),\n loc='best', fontsize=20)\nplt.xticks(fontsize=18)\nplt.yticks(fontsize=18)\nplt.ylim(150, 200)\nplt.xlabel('训练集大小(%)', fontsize=20)\nplt.ylabel('MAE(s)', fontsize=20)\nplt.show()\n",
"step-5": "import matplotlib.pyplot as plt\nimport numpy as np\nplt.rcParams['savefig.dpi'] = 300 #图片像素\nplt.rcParams['figure.dpi'] = 300 #分辨率\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\nx_axis = [20,40,60,80,100]\n\nrf = [184,174,166,159,157.5]\nanns = [186,179,170,164,161]\nadaboost = [187.5,176,172,163,162]\n\n\nx = np.arange(len(x_axis)) #首先用第一个的长度作为横坐标\nwidth = 0.2 #设置柱与柱之间的宽度\nfig,ax = plt.subplots()\np_rf = ax.bar(x-width,rf,width,alpha = 0.9,)\np_anns = ax.bar(x,anns,width,alpha = 0.9,color= 'red')\np_adaboost = ax.bar(x+width,adaboost,width,alpha = 0.9,color= 'green')\nax.set_xticks(x +width/2)#将坐标设置在指定位置\nax.set_xticklabels(x_axis)#将横坐标替换成\nplt.legend((p_rf[0],p_anns[0],p_adaboost[0]),('RF','ANNs','AdaBoost'),loc='best',fontsize=20)\nplt.xticks(fontsize=18)\nplt.yticks(fontsize=18)\nplt.ylim(150,200) # 指定Y轴的高度\nplt.xlabel('训练集大小(%)',fontsize=20)\nplt.ylabel('MAE(s)',fontsize=20)\nplt.show()\n#plt.savefig('MAE.png', dpi=3600)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from typing import Dict, List
pilha = list()
print(pilha)
|
normal
|
{
"blob_id": "f3f3bbb715f16dc84221f3349aa5f26e9a6dc7c8",
"index": 2726,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(pilha)\n",
"step-3": "<mask token>\npilha = list()\nprint(pilha)\n",
"step-4": "from typing import Dict, List\npilha = list()\nprint(pilha)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def getData():
power_file = './data/power_20210129_20210429_preprocess_1hour'
power_df = read_csv(power_file + '.csv', encoding='CP949', converters={
'date': int})
print(power_df.shape)
sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'
sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters
={'date': int})
sensor_df = sensor_df.sort_values('date')
print(sensor_df.shape)
power_df.drop(['date'], axis=1, inplace=True)
pow_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_pow = pow_scaler.fit_transform(power_df.values)
power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,
index=list(power_df.index.values))
weather_df = sensor_df.copy()
weather_df.drop(['date'], axis=1, inplace=True)
weather_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_weather = weather_scaler.fit_transform(weather_df.values)
weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.
columns, index=list(weather_df.index.values))
df = weather_scaleddf.copy()
df.insert(0, 'pow', power_scaleddf.values, True)
return pow_scaler, df
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.set_printoptions(suppress=True)
<|reserved_special_token_0|>
def getData():
power_file = './data/power_20210129_20210429_preprocess_1hour'
power_df = read_csv(power_file + '.csv', encoding='CP949', converters={
'date': int})
print(power_df.shape)
sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'
sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters
={'date': int})
sensor_df = sensor_df.sort_values('date')
print(sensor_df.shape)
power_df.drop(['date'], axis=1, inplace=True)
pow_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_pow = pow_scaler.fit_transform(power_df.values)
power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,
index=list(power_df.index.values))
weather_df = sensor_df.copy()
weather_df.drop(['date'], axis=1, inplace=True)
weather_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_weather = weather_scaler.fit_transform(weather_df.values)
weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.
columns, index=list(weather_df.index.values))
df = weather_scaleddf.copy()
df.insert(0, 'pow', power_scaleddf.values, True)
return pow_scaler, df
<|reserved_special_token_0|>
print(data_x.shape)
print(data_y.shape)
<|reserved_special_token_0|>
clf.fit(x=data_x, y=data_y, validation_data=(data_x_val, data_y_val),
batch_size=128, epochs=10)
<|reserved_special_token_0|>
print(predictions.shape)
print(clf.evaluate(data_x_val, data_y_val))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.set_printoptions(suppress=True)
EPOCHS = 10
BATCH_SIZE = 128
SHIFT_DAYS = 3
PRED_STEPS = 24 * 6
TIME_STEPS = SHIFT_DAYS * PRED_STEPS
DIMENSION = 15
MODEL_NUM = 10
CAPACITY = 89.7
TRAIN_RATIO = 0.6
VAL_RATIO = 0.2
START_DATE = '2021012899'
END_DATE = '2021042924'
SAVE_PATH = './data/'
SAVE_NAME = 'autoML_Test'
def getData():
power_file = './data/power_20210129_20210429_preprocess_1hour'
power_df = read_csv(power_file + '.csv', encoding='CP949', converters={
'date': int})
print(power_df.shape)
sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'
sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters
={'date': int})
sensor_df = sensor_df.sort_values('date')
print(sensor_df.shape)
power_df.drop(['date'], axis=1, inplace=True)
pow_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_pow = pow_scaler.fit_transform(power_df.values)
power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,
index=list(power_df.index.values))
weather_df = sensor_df.copy()
weather_df.drop(['date'], axis=1, inplace=True)
weather_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_weather = weather_scaler.fit_transform(weather_df.values)
weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.
columns, index=list(weather_df.index.values))
df = weather_scaleddf.copy()
df.insert(0, 'pow', power_scaleddf.values, True)
return pow_scaler, df
pow_scaler, df = getData()
dataset = df
val_split = int(len(dataset) * 0.7)
data_train = dataset[:val_split]
validation_data = dataset[val_split:]
data_x = data_train[['pow', 'temp', 'humidity', 'windspeed', 'windgust',
'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',
'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',
'feelslike', 'dewpoint', 'outside_status']].astype('float64')
data_x_val = validation_data[['pow', 'temp', 'humidity', 'windspeed',
'windgust', 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',
'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',
'feelslike', 'dewpoint', 'outside_status']].astype('float64')
data_x_test = dataset[['pow', 'temp', 'humidity', 'windspeed', 'windgust',
'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',
'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',
'feelslike', 'dewpoint', 'outside_status']].astype('float64')
data_y = data_train['pow'].astype('float64')
data_y_val = validation_data['pow'].astype('float64')
print(data_x.shape)
print(data_y.shape)
predict_from = 1
predict_until = 10
lookback = 3
clf = ak.TimeseriesForecaster(lookback=lookback, predict_from=predict_from,
objective='val_loss')
clf.fit(x=data_x, y=data_y, validation_data=(data_x_val, data_y_val),
batch_size=128, epochs=10)
predictions = clf.predict(data_x_test)
print(predictions.shape)
print(clf.evaluate(data_x_val, data_y_val))
<|reserved_special_token_1|>
import pandas as pd
import tensorflow as tf
import autokeras as ak
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from numpy import concatenate
from pandas import read_csv, DataFrame, concat
from sklearn.preprocessing import MinMaxScaler
np.set_printoptions(suppress=True)
EPOCHS = 10
BATCH_SIZE = 128
SHIFT_DAYS = 3
PRED_STEPS = 24 * 6
TIME_STEPS = SHIFT_DAYS * PRED_STEPS
DIMENSION = 15
MODEL_NUM = 10
CAPACITY = 89.7
TRAIN_RATIO = 0.6
VAL_RATIO = 0.2
START_DATE = '2021012899'
END_DATE = '2021042924'
SAVE_PATH = './data/'
SAVE_NAME = 'autoML_Test'
def getData():
power_file = './data/power_20210129_20210429_preprocess_1hour'
power_df = read_csv(power_file + '.csv', encoding='CP949', converters={
'date': int})
print(power_df.shape)
sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'
sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters
={'date': int})
sensor_df = sensor_df.sort_values('date')
print(sensor_df.shape)
power_df.drop(['date'], axis=1, inplace=True)
pow_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_pow = pow_scaler.fit_transform(power_df.values)
power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,
index=list(power_df.index.values))
weather_df = sensor_df.copy()
weather_df.drop(['date'], axis=1, inplace=True)
weather_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_weather = weather_scaler.fit_transform(weather_df.values)
weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.
columns, index=list(weather_df.index.values))
df = weather_scaleddf.copy()
df.insert(0, 'pow', power_scaleddf.values, True)
return pow_scaler, df
pow_scaler, df = getData()
dataset = df
val_split = int(len(dataset) * 0.7)
data_train = dataset[:val_split]
validation_data = dataset[val_split:]
data_x = data_train[['pow', 'temp', 'humidity', 'windspeed', 'windgust',
'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',
'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',
'feelslike', 'dewpoint', 'outside_status']].astype('float64')
data_x_val = validation_data[['pow', 'temp', 'humidity', 'windspeed',
'windgust', 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',
'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',
'feelslike', 'dewpoint', 'outside_status']].astype('float64')
data_x_test = dataset[['pow', 'temp', 'humidity', 'windspeed', 'windgust',
'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',
'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',
'feelslike', 'dewpoint', 'outside_status']].astype('float64')
data_y = data_train['pow'].astype('float64')
data_y_val = validation_data['pow'].astype('float64')
print(data_x.shape)
print(data_y.shape)
predict_from = 1
predict_until = 10
lookback = 3
clf = ak.TimeseriesForecaster(lookback=lookback, predict_from=predict_from,
objective='val_loss')
clf.fit(x=data_x, y=data_y, validation_data=(data_x_val, data_y_val),
batch_size=128, epochs=10)
predictions = clf.predict(data_x_test)
print(predictions.shape)
print(clf.evaluate(data_x_val, data_y_val))
<|reserved_special_token_1|>
import pandas as pd
import tensorflow as tf
import autokeras as ak
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from numpy import concatenate
from pandas import read_csv, DataFrame, concat
from sklearn.preprocessing import MinMaxScaler
np.set_printoptions(suppress=True)
EPOCHS = 10
BATCH_SIZE = 128
SHIFT_DAYS = 3
PRED_STEPS = 24*6 #48hr * 10분단위 예측
TIME_STEPS = SHIFT_DAYS*PRED_STEPS #hours step
DIMENSION = 15
MODEL_NUM = 10
CAPACITY = 89.7
TRAIN_RATIO = 0.6
VAL_RATIO = 0.2
START_DATE = '2021012899'
END_DATE = '2021042924'
SAVE_PATH = './data/'
SAVE_NAME = 'autoML_Test'
def getData():
# power
power_file = './data/power_20210129_20210429_preprocess_1hour'
power_df = read_csv(power_file+'.csv', encoding='CP949', converters={'date':int})
print(power_df.shape)
# sensor
sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'
sensor_df = read_csv(sensor_file+'.csv', encoding='CP949', converters={'date':int})
sensor_df = sensor_df.sort_values('date')
print(sensor_df.shape)
# scale
power_df.drop(['date'], axis=1, inplace=True)
pow_scaler = MinMaxScaler(feature_range = (0, 1))
scaled_pow = pow_scaler.fit_transform(power_df.values)
power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns, index=list(power_df.index.values))
weather_df = sensor_df.copy()
weather_df.drop(['date'], axis=1, inplace=True)
weather_scaler = MinMaxScaler(feature_range = (0, 1))#scale
scaled_weather = weather_scaler.fit_transform(weather_df.values)
weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.columns, index=list(weather_df.index.values))
# JOIN
df = weather_scaleddf.copy()
# pow + weather + powY
df.insert(0, 'pow', power_scaleddf.values, True)
#df = df.iloc[0:-TIME_STEPS, :]
#df.insert(df.shape[1], 'pow_Y', power_scaleddf.iloc[TIME_STEPS:, :].values, True)
#df.insert(df.shape[1], 'pow_Y', power_scaleddf.iloc[TIME_STEPS:, :].values, True)
#df.to_csv(SAVE_PATH+"total_scaled"+SAVE_NAME+".csv",mode='w',index=False, encoding='CP949')
#display(df)
return pow_scaler, df
pow_scaler, df = getData()
#display(df)
dataset = df
val_split = int(len(dataset) * 0.7)
data_train = dataset[:val_split]
validation_data = dataset[val_split:]
data_x = data_train[
[
'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',
'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',
'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',
'dewpoint', 'outside_status'
]
].astype("float64")
data_x_val = validation_data[
[
'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',
'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',
'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',
'dewpoint', 'outside_status'
]
].astype("float64")
# Data with train data and the unseen data from subsequent time steps.
data_x_test = dataset[
[
'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',
'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',
'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',
'dewpoint', 'outside_status'
]
].astype("float64")
data_y = data_train["pow"].astype("float64")
data_y_val = validation_data["pow"].astype("float64")
print(data_x.shape) # (6549, 12)
print(data_y.shape) # (6549,)
predict_from = 1
predict_until = 10
lookback = 3
clf = ak.TimeseriesForecaster(
lookback=lookback,
predict_from=predict_from,
#predict_until=predict_until,
#max_trials=1,
objective="val_loss",
)
# Train the TimeSeriesForecaster with train data
clf.fit(
x=data_x,
y=data_y,
validation_data=(data_x_val, data_y_val),
batch_size=128,
epochs=10,
)
# Predict with the best model(includes original training data).
predictions = clf.predict(data_x_test)
print(predictions.shape)
# Evaluate the best model with testing data.
print(clf.evaluate(data_x_val, data_y_val))
|
flexible
|
{
"blob_id": "013189cd67cc44efd539c75ed235a0753d95f54e",
"index": 2165,
"step-1": "<mask token>\n\n\ndef getData():\n power_file = './data/power_20210129_20210429_preprocess_1hour'\n power_df = read_csv(power_file + '.csv', encoding='CP949', converters={\n 'date': int})\n print(power_df.shape)\n sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'\n sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters\n ={'date': int})\n sensor_df = sensor_df.sort_values('date')\n print(sensor_df.shape)\n power_df.drop(['date'], axis=1, inplace=True)\n pow_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_pow = pow_scaler.fit_transform(power_df.values)\n power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,\n index=list(power_df.index.values))\n weather_df = sensor_df.copy()\n weather_df.drop(['date'], axis=1, inplace=True)\n weather_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_weather = weather_scaler.fit_transform(weather_df.values)\n weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.\n columns, index=list(weather_df.index.values))\n df = weather_scaleddf.copy()\n df.insert(0, 'pow', power_scaleddf.values, True)\n return pow_scaler, df\n\n\n<mask token>\n",
"step-2": "<mask token>\nnp.set_printoptions(suppress=True)\n<mask token>\n\n\ndef getData():\n power_file = './data/power_20210129_20210429_preprocess_1hour'\n power_df = read_csv(power_file + '.csv', encoding='CP949', converters={\n 'date': int})\n print(power_df.shape)\n sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'\n sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters\n ={'date': int})\n sensor_df = sensor_df.sort_values('date')\n print(sensor_df.shape)\n power_df.drop(['date'], axis=1, inplace=True)\n pow_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_pow = pow_scaler.fit_transform(power_df.values)\n power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,\n index=list(power_df.index.values))\n weather_df = sensor_df.copy()\n weather_df.drop(['date'], axis=1, inplace=True)\n weather_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_weather = weather_scaler.fit_transform(weather_df.values)\n weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.\n columns, index=list(weather_df.index.values))\n df = weather_scaleddf.copy()\n df.insert(0, 'pow', power_scaleddf.values, True)\n return pow_scaler, df\n\n\n<mask token>\nprint(data_x.shape)\nprint(data_y.shape)\n<mask token>\nclf.fit(x=data_x, y=data_y, validation_data=(data_x_val, data_y_val),\n batch_size=128, epochs=10)\n<mask token>\nprint(predictions.shape)\nprint(clf.evaluate(data_x_val, data_y_val))\n",
"step-3": "<mask token>\nnp.set_printoptions(suppress=True)\nEPOCHS = 10\nBATCH_SIZE = 128\nSHIFT_DAYS = 3\nPRED_STEPS = 24 * 6\nTIME_STEPS = SHIFT_DAYS * PRED_STEPS\nDIMENSION = 15\nMODEL_NUM = 10\nCAPACITY = 89.7\nTRAIN_RATIO = 0.6\nVAL_RATIO = 0.2\nSTART_DATE = '2021012899'\nEND_DATE = '2021042924'\nSAVE_PATH = './data/'\nSAVE_NAME = 'autoML_Test'\n\n\ndef getData():\n power_file = './data/power_20210129_20210429_preprocess_1hour'\n power_df = read_csv(power_file + '.csv', encoding='CP949', converters={\n 'date': int})\n print(power_df.shape)\n sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'\n sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters\n ={'date': int})\n sensor_df = sensor_df.sort_values('date')\n print(sensor_df.shape)\n power_df.drop(['date'], axis=1, inplace=True)\n pow_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_pow = pow_scaler.fit_transform(power_df.values)\n power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,\n index=list(power_df.index.values))\n weather_df = sensor_df.copy()\n weather_df.drop(['date'], axis=1, inplace=True)\n weather_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_weather = weather_scaler.fit_transform(weather_df.values)\n weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.\n columns, index=list(weather_df.index.values))\n df = weather_scaleddf.copy()\n df.insert(0, 'pow', power_scaleddf.values, True)\n return pow_scaler, df\n\n\npow_scaler, df = getData()\ndataset = df\nval_split = int(len(dataset) * 0.7)\ndata_train = dataset[:val_split]\nvalidation_data = dataset[val_split:]\ndata_x = data_train[['pow', 'temp', 'humidity', 'windspeed', 'windgust',\n 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_x_val = validation_data[['pow', 'temp', 'humidity', 'windspeed',\n 'windgust', 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_x_test = dataset[['pow', 'temp', 'humidity', 'windspeed', 'windgust',\n 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_y = data_train['pow'].astype('float64')\ndata_y_val = validation_data['pow'].astype('float64')\nprint(data_x.shape)\nprint(data_y.shape)\npredict_from = 1\npredict_until = 10\nlookback = 3\nclf = ak.TimeseriesForecaster(lookback=lookback, predict_from=predict_from,\n objective='val_loss')\nclf.fit(x=data_x, y=data_y, validation_data=(data_x_val, data_y_val),\n batch_size=128, epochs=10)\npredictions = clf.predict(data_x_test)\nprint(predictions.shape)\nprint(clf.evaluate(data_x_val, data_y_val))\n",
"step-4": "import pandas as pd\nimport tensorflow as tf\nimport autokeras as ak\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport tensorflow as tf\nfrom numpy import concatenate\nfrom pandas import read_csv, DataFrame, concat\nfrom sklearn.preprocessing import MinMaxScaler\nnp.set_printoptions(suppress=True)\nEPOCHS = 10\nBATCH_SIZE = 128\nSHIFT_DAYS = 3\nPRED_STEPS = 24 * 6\nTIME_STEPS = SHIFT_DAYS * PRED_STEPS\nDIMENSION = 15\nMODEL_NUM = 10\nCAPACITY = 89.7\nTRAIN_RATIO = 0.6\nVAL_RATIO = 0.2\nSTART_DATE = '2021012899'\nEND_DATE = '2021042924'\nSAVE_PATH = './data/'\nSAVE_NAME = 'autoML_Test'\n\n\ndef getData():\n power_file = './data/power_20210129_20210429_preprocess_1hour'\n power_df = read_csv(power_file + '.csv', encoding='CP949', converters={\n 'date': int})\n print(power_df.shape)\n sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'\n sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters\n ={'date': int})\n sensor_df = sensor_df.sort_values('date')\n print(sensor_df.shape)\n power_df.drop(['date'], axis=1, inplace=True)\n pow_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_pow = pow_scaler.fit_transform(power_df.values)\n power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,\n index=list(power_df.index.values))\n weather_df = sensor_df.copy()\n weather_df.drop(['date'], axis=1, inplace=True)\n weather_scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_weather = weather_scaler.fit_transform(weather_df.values)\n weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.\n columns, index=list(weather_df.index.values))\n df = weather_scaleddf.copy()\n df.insert(0, 'pow', power_scaleddf.values, True)\n return pow_scaler, df\n\n\npow_scaler, df = getData()\ndataset = df\nval_split = int(len(dataset) * 0.7)\ndata_train = dataset[:val_split]\nvalidation_data = dataset[val_split:]\ndata_x = data_train[['pow', 'temp', 'humidity', 'windspeed', 'windgust',\n 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_x_val = validation_data[['pow', 'temp', 'humidity', 'windspeed',\n 'windgust', 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_x_test = dataset[['pow', 'temp', 'humidity', 'windspeed', 'windgust',\n 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',\n 'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',\n 'feelslike', 'dewpoint', 'outside_status']].astype('float64')\ndata_y = data_train['pow'].astype('float64')\ndata_y_val = validation_data['pow'].astype('float64')\nprint(data_x.shape)\nprint(data_y.shape)\npredict_from = 1\npredict_until = 10\nlookback = 3\nclf = ak.TimeseriesForecaster(lookback=lookback, predict_from=predict_from,\n objective='val_loss')\nclf.fit(x=data_x, y=data_y, validation_data=(data_x_val, data_y_val),\n batch_size=128, epochs=10)\npredictions = clf.predict(data_x_test)\nprint(predictions.shape)\nprint(clf.evaluate(data_x_val, data_y_val))\n",
"step-5": "import pandas as pd\r\nimport tensorflow as tf\r\nimport autokeras as ak\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport tensorflow as tf\r\n\r\nfrom numpy import concatenate\r\nfrom pandas import read_csv, DataFrame, concat\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\nnp.set_printoptions(suppress=True)\r\n\r\nEPOCHS = 10\r\nBATCH_SIZE = 128\r\n\r\nSHIFT_DAYS = 3\r\nPRED_STEPS = 24*6 #48hr * 10분단위 예측\r\nTIME_STEPS = SHIFT_DAYS*PRED_STEPS #hours step\r\nDIMENSION = 15\r\nMODEL_NUM = 10\r\nCAPACITY = 89.7\r\n\r\nTRAIN_RATIO = 0.6\r\nVAL_RATIO = 0.2\r\n\r\nSTART_DATE = '2021012899'\r\nEND_DATE = '2021042924'\r\n\r\nSAVE_PATH = './data/'\r\nSAVE_NAME = 'autoML_Test'\r\n\r\n\r\ndef getData():\r\n # power\r\n power_file = './data/power_20210129_20210429_preprocess_1hour'\r\n power_df = read_csv(power_file+'.csv', encoding='CP949', converters={'date':int})\r\n print(power_df.shape)\r\n \r\n # sensor \r\n sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'\r\n sensor_df = read_csv(sensor_file+'.csv', encoding='CP949', converters={'date':int})\r\n sensor_df = sensor_df.sort_values('date')\r\n print(sensor_df.shape)\r\n\r\n # scale\r\n power_df.drop(['date'], axis=1, inplace=True)\r\n pow_scaler = MinMaxScaler(feature_range = (0, 1))\r\n scaled_pow = pow_scaler.fit_transform(power_df.values)\r\n power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns, index=list(power_df.index.values))\r\n\r\n weather_df = sensor_df.copy()\r\n weather_df.drop(['date'], axis=1, inplace=True)\r\n weather_scaler = MinMaxScaler(feature_range = (0, 1))#scale\r\n scaled_weather = weather_scaler.fit_transform(weather_df.values)\r\n weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.columns, index=list(weather_df.index.values))\r\n\r\n # JOIN \r\n df = weather_scaleddf.copy()\r\n\r\n # pow + weather + powY\r\n df.insert(0, 'pow', power_scaleddf.values, True)\r\n #df = df.iloc[0:-TIME_STEPS, :]\r\n #df.insert(df.shape[1], 'pow_Y', power_scaleddf.iloc[TIME_STEPS:, :].values, True)\r\n #df.insert(df.shape[1], 'pow_Y', power_scaleddf.iloc[TIME_STEPS:, :].values, True)\r\n\r\n #df.to_csv(SAVE_PATH+\"total_scaled\"+SAVE_NAME+\".csv\",mode='w',index=False, encoding='CP949')\r\n #display(df) \r\n\r\n return pow_scaler, df\r\n\r\npow_scaler, df = getData()\r\n#display(df)\r\n\r\ndataset = df\r\nval_split = int(len(dataset) * 0.7)\r\ndata_train = dataset[:val_split]\r\nvalidation_data = dataset[val_split:]\r\n\r\ndata_x = data_train[\r\n [\r\n 'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',\r\n 'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',\r\n 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',\r\n 'dewpoint', 'outside_status'\r\n ]\r\n].astype(\"float64\")\r\n\r\ndata_x_val = validation_data[\r\n [\r\n 'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',\r\n 'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',\r\n 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',\r\n 'dewpoint', 'outside_status'\r\n ]\r\n].astype(\"float64\")\r\n\r\n# Data with train data and the unseen data from subsequent time steps.\r\ndata_x_test = dataset[\r\n [\r\n 'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',\r\n 'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',\r\n 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',\r\n 'dewpoint', 'outside_status'\r\n ]\r\n].astype(\"float64\")\r\n\r\ndata_y = data_train[\"pow\"].astype(\"float64\")\r\n\r\ndata_y_val = validation_data[\"pow\"].astype(\"float64\")\r\n\r\nprint(data_x.shape) # (6549, 12)\r\nprint(data_y.shape) # (6549,)\r\n\r\npredict_from = 1\r\npredict_until = 10\r\nlookback = 3\r\nclf = ak.TimeseriesForecaster(\r\n lookback=lookback,\r\n predict_from=predict_from,\r\n #predict_until=predict_until,\r\n #max_trials=1,\r\n objective=\"val_loss\",\r\n)\r\n# Train the TimeSeriesForecaster with train data\r\nclf.fit(\r\n x=data_x,\r\n y=data_y,\r\n validation_data=(data_x_val, data_y_val),\r\n batch_size=128,\r\n epochs=10,\r\n)\r\n# Predict with the best model(includes original training data).\r\npredictions = clf.predict(data_x_test)\r\nprint(predictions.shape)\r\n# Evaluate the best model with testing data.\r\nprint(clf.evaluate(data_x_val, data_y_val))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
Templating support library and renderer configuration.
"""
from restish import templating
class Templating(templating.Templating):
"""
Application-specific templating implementation.
Overriding "args" methods makes it trivial to push extra, application-wide
data to the templates without any assistance from the resource.
"""
def __init__(self, app_conf):
renderer = make_renderer(app_conf)
templating.Templating.__init__(self, renderer)
def make_renderer(app_conf):
"""
Create and return a restish.templating "renderer".
"""
# Uncomment for an example of Mako templating support.
import pkg_resources
import os.path
from restish.contrib.makorenderer import MakoRenderer
return MakoRenderer(
directories=[
pkg_resources.resource_filename('example', 'templates'),
pkg_resources.resource_filename('formish', 'templates/mako'),
pkg_resources.resource_filename('adminish', 'templates'),
],
module_directory=os.path.join(app_conf['cache_dir'], 'templates'),
input_encoding='utf-8', output_encoding='utf-8',
default_filters=['unicode', 'h']
)
|
normal
|
{
"blob_id": "18391df9a3e52400fe4fc54d6381b9ce21e25f0b",
"index": 2296,
"step-1": "<mask token>\n\n\nclass Templating(templating.Templating):\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Templating(templating.Templating):\n \"\"\"\n Application-specific templating implementation.\n\n Overriding \"args\" methods makes it trivial to push extra, application-wide\n data to the templates without any assistance from the resource.\n \"\"\"\n\n def __init__(self, app_conf):\n renderer = make_renderer(app_conf)\n templating.Templating.__init__(self, renderer)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Templating(templating.Templating):\n \"\"\"\n Application-specific templating implementation.\n\n Overriding \"args\" methods makes it trivial to push extra, application-wide\n data to the templates without any assistance from the resource.\n \"\"\"\n\n def __init__(self, app_conf):\n renderer = make_renderer(app_conf)\n templating.Templating.__init__(self, renderer)\n\n\ndef make_renderer(app_conf):\n \"\"\"\n Create and return a restish.templating \"renderer\".\n \"\"\"\n import pkg_resources\n import os.path\n from restish.contrib.makorenderer import MakoRenderer\n return MakoRenderer(directories=[pkg_resources.resource_filename(\n 'example', 'templates'), pkg_resources.resource_filename('formish',\n 'templates/mako'), pkg_resources.resource_filename('adminish',\n 'templates')], module_directory=os.path.join(app_conf['cache_dir'],\n 'templates'), input_encoding='utf-8', output_encoding='utf-8',\n default_filters=['unicode', 'h'])\n",
"step-4": "<mask token>\nfrom restish import templating\n\n\nclass Templating(templating.Templating):\n \"\"\"\n Application-specific templating implementation.\n\n Overriding \"args\" methods makes it trivial to push extra, application-wide\n data to the templates without any assistance from the resource.\n \"\"\"\n\n def __init__(self, app_conf):\n renderer = make_renderer(app_conf)\n templating.Templating.__init__(self, renderer)\n\n\ndef make_renderer(app_conf):\n \"\"\"\n Create and return a restish.templating \"renderer\".\n \"\"\"\n import pkg_resources\n import os.path\n from restish.contrib.makorenderer import MakoRenderer\n return MakoRenderer(directories=[pkg_resources.resource_filename(\n 'example', 'templates'), pkg_resources.resource_filename('formish',\n 'templates/mako'), pkg_resources.resource_filename('adminish',\n 'templates')], module_directory=os.path.join(app_conf['cache_dir'],\n 'templates'), input_encoding='utf-8', output_encoding='utf-8',\n default_filters=['unicode', 'h'])\n",
"step-5": "\"\"\"\nTemplating support library and renderer configuration.\n\"\"\"\n\nfrom restish import templating\n\nclass Templating(templating.Templating):\n \"\"\"\n Application-specific templating implementation.\n\n Overriding \"args\" methods makes it trivial to push extra, application-wide\n data to the templates without any assistance from the resource.\n \"\"\"\n\n def __init__(self, app_conf):\n renderer = make_renderer(app_conf)\n templating.Templating.__init__(self, renderer)\n\n\ndef make_renderer(app_conf):\n \"\"\"\n Create and return a restish.templating \"renderer\".\n \"\"\"\n\n # Uncomment for an example of Mako templating support.\n import pkg_resources\n import os.path\n from restish.contrib.makorenderer import MakoRenderer\n return MakoRenderer(\n directories=[\n pkg_resources.resource_filename('example', 'templates'),\n pkg_resources.resource_filename('formish', 'templates/mako'),\n pkg_resources.resource_filename('adminish', 'templates'),\n ],\n module_directory=os.path.join(app_conf['cache_dir'], 'templates'),\n input_encoding='utf-8', output_encoding='utf-8',\n default_filters=['unicode', 'h']\n )\n\n\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
class Day8MemoryManeuver:
def __init__(self, use_reference_count=False):
"""
Args:
use_reference_count (bool):
True: If an entry has child nodes, the meta data are referring to the results of
the child node
False: Sum all meta data up
"""
self._use_child_references = use_reference_count
def solve(self, license_input):
_, result = self._solve(license_input.split(" "), 0)
return result
def _solve(self, structure, pos):
if pos >= len(structure):
return pos, 0
child_node_count = int(structure[pos])
pos += 1
meta_count = int(structure[pos])
result = 0
child_results = []
for i in range(child_node_count):
pos += 1
pos, tmp = self._solve(structure, pos)
if not self._use_child_references:
result += tmp
child_results.append(tmp)
if meta_count > 0:
for i in range(pos, pos + meta_count):
current = int(structure[i + 1])
if self._use_child_references and child_node_count > 0:
if current <= len(child_results):
result += child_results[current - 1]
else:
result += current
pos += 1
return pos, result
|
normal
|
{
"blob_id": "84d096a51fa052ee210e975ab61c0cbbf05bc5ae",
"index": 8358,
"step-1": "class Day8MemoryManeuver:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class Day8MemoryManeuver:\n <mask token>\n <mask token>\n\n def _solve(self, structure, pos):\n if pos >= len(structure):\n return pos, 0\n child_node_count = int(structure[pos])\n pos += 1\n meta_count = int(structure[pos])\n result = 0\n child_results = []\n for i in range(child_node_count):\n pos += 1\n pos, tmp = self._solve(structure, pos)\n if not self._use_child_references:\n result += tmp\n child_results.append(tmp)\n if meta_count > 0:\n for i in range(pos, pos + meta_count):\n current = int(structure[i + 1])\n if self._use_child_references and child_node_count > 0:\n if current <= len(child_results):\n result += child_results[current - 1]\n else:\n result += current\n pos += 1\n return pos, result\n",
"step-3": "class Day8MemoryManeuver:\n <mask token>\n\n def solve(self, license_input):\n _, result = self._solve(license_input.split(' '), 0)\n return result\n\n def _solve(self, structure, pos):\n if pos >= len(structure):\n return pos, 0\n child_node_count = int(structure[pos])\n pos += 1\n meta_count = int(structure[pos])\n result = 0\n child_results = []\n for i in range(child_node_count):\n pos += 1\n pos, tmp = self._solve(structure, pos)\n if not self._use_child_references:\n result += tmp\n child_results.append(tmp)\n if meta_count > 0:\n for i in range(pos, pos + meta_count):\n current = int(structure[i + 1])\n if self._use_child_references and child_node_count > 0:\n if current <= len(child_results):\n result += child_results[current - 1]\n else:\n result += current\n pos += 1\n return pos, result\n",
"step-4": "class Day8MemoryManeuver:\n\n def __init__(self, use_reference_count=False):\n \"\"\"\n Args:\n use_reference_count (bool):\n True: If an entry has child nodes, the meta data are referring to the results of\n the child node\n False: Sum all meta data up\n \"\"\"\n self._use_child_references = use_reference_count\n\n def solve(self, license_input):\n _, result = self._solve(license_input.split(' '), 0)\n return result\n\n def _solve(self, structure, pos):\n if pos >= len(structure):\n return pos, 0\n child_node_count = int(structure[pos])\n pos += 1\n meta_count = int(structure[pos])\n result = 0\n child_results = []\n for i in range(child_node_count):\n pos += 1\n pos, tmp = self._solve(structure, pos)\n if not self._use_child_references:\n result += tmp\n child_results.append(tmp)\n if meta_count > 0:\n for i in range(pos, pos + meta_count):\n current = int(structure[i + 1])\n if self._use_child_references and child_node_count > 0:\n if current <= len(child_results):\n result += child_results[current - 1]\n else:\n result += current\n pos += 1\n return pos, result\n",
"step-5": "class Day8MemoryManeuver:\n def __init__(self, use_reference_count=False):\n \"\"\"\n Args:\n use_reference_count (bool):\n True: If an entry has child nodes, the meta data are referring to the results of\n the child node\n False: Sum all meta data up\n \"\"\"\n self._use_child_references = use_reference_count\n\n def solve(self, license_input):\n _, result = self._solve(license_input.split(\" \"), 0)\n return result\n\n def _solve(self, structure, pos):\n if pos >= len(structure):\n return pos, 0\n child_node_count = int(structure[pos])\n pos += 1\n meta_count = int(structure[pos])\n result = 0\n child_results = []\n for i in range(child_node_count):\n pos += 1\n pos, tmp = self._solve(structure, pos)\n if not self._use_child_references:\n result += tmp\n child_results.append(tmp)\n if meta_count > 0:\n for i in range(pos, pos + meta_count):\n current = int(structure[i + 1])\n if self._use_child_references and child_node_count > 0:\n if current <= len(child_results):\n result += child_results[current - 1]\n else:\n result += current\n pos += 1\n return pos, result\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def print_theta(theta, name='theta'):
theta_pd = pd.DataFrame(theta.reshape(1, -1), index=[name], columns=[
'mean', 'variance', 'max_range', 'min_range', 'anisotropy',
'head_west'])
print(theta_pd)
<|reserved_special_token_0|>
def visualize_one_m(m, vmin=-4, vmax=0, cmap='viridis', title=
'True spatial field, m'):
fig, ax = plt.subplots(figsize=[6, 6])
m_show = ax.imshow(m.T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title, fontsize=13)
well_location = [49, 49]
direct_data_loc = [30, 70]
ax.scatter(well_location[0], well_location[1], s=100, color='black',
label='indirect pumping well')
ax.scatter(direct_data_loc[0], direct_data_loc[1], s=100, color='red',
label='direct logK')
ax.legend()
fig.colorbar(m_show, ax=ax, shrink=0.6)
<|reserved_special_token_0|>
def visualize_multiple_m(m, head=4, vmin=-4, vmax=0, cmap='viridis', theta=None
):
plt.figure(figsize=[20, 8])
for i in np.arange(head):
ax = plt.subplot(1, 4, i + 1)
ax.imshow(m[i, :, :].T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax
)
ax.set_xticks([])
ax.set_yticks([])
well_location = [49, 49]
direct_data_loc = [30, 70]
ax.scatter(well_location[0], well_location[1], s=50, color='black',
label='pumping well')
ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color=
'red', label='direct logK')
if theta is not None:
ax.set_title('θ = ' + str(tuple(np.round(theta[i, :], 1))))
<|reserved_special_token_0|>
def visualize_multiple_d(d, head=4):
plt.figure(figsize=[25, 3])
for i in np.arange(head):
ax = plt.subplot(1, 4, i + 1)
ax.plot(np.arange(70) / 10, d[:, i].reshape(70, 1)[:, 0], label=
'pumping well')
ax.set_xlabel('Days')
ax.set_ylabel('Head')
def colors_from_values(values, palette_name):
normalized = (values - min(values)) / (max(values) - min(values))
indices = np.round(normalized * (len(values) - 1)).astype(np.int32)
palette = sns.color_palette(palette_name, len(values))
return np.array(palette).take(indices, axis=0)
def visualize_mean_var(mu, covariance, vmin=20, vmax=40, cmap='viridis'):
var = np.diag(covariance)
plt.figure(figsize=[18, 4])
ax = plt.subplot(2, 4, 1)
ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=
vmin, vmax=vmax)
rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,
linewidth=2, linestyle='dashed', edgecolor='black', facecolor=
'None', label='pilot area')
ax.add_patch(rect)
rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),
num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=
'black', facecolor='None', label='pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
ax = plt.subplot(2, 4, 2)
ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=
0, vmax=16)
rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,
linewidth=2, linestyle='dashed', edgecolor='black', facecolor=
'None', label='pilot area')
ax.add_patch(rect)
rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),
num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=
'black', facecolor='None', label='pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
<|reserved_special_token_0|>
def visualize_ensemble_d(d, d_obs, ymin=None, ymax=11.5):
plt.plot(np.arange(70) / 10, d, color='C0')
plt.plot(np.arange(70) / 10, d_obs, color='C1', linewidth=2, label=
'observed data')
plt.xlabel('Days')
plt.ylabel('Head')
plt.legend()
plt.ylim(ymin, ymax)
def pos_pairplot(theta_pos, theta_name):
sns.pairplot(pd.DataFrame(theta_pos.T, columns=theta_name), kind='hist')
def prior_pos_theta(theta, theta_pos, theta_true, theta_name):
num_theta = theta.shape[1]
plt.figure(figsize=[25, 10])
for i in np.arange(num_theta):
ax = plt.subplot(2, 3, i + 1)
ax.hist(theta[:, i], density=True, bins=1, label='prior', alpha=0.7)
y_, _, _ = ax.hist(theta_pos[i, :], density=True, bins=20, label=
'posterior', alpha=0.7)
ax.vlines(x=theta_true[i], ymin=0, ymax=np.max(y_), linestyles='--',
label='true', color='black')
ax.legend()
ax.set_title(theta_name[i])
ax.set_ylabel('pdf')
def ML_dimension_reduction_vis(pred_train, y_train, pred_test, y_test,
S_d_obs, theta_name):
fig = plt.figure(figsize=[24, 10])
num_theta = len(theta_name)
for i in np.arange(num_theta):
ax = plt.subplot(2, 3, i + 1)
ax.plot(pred_train[:, i], y_train[:, i], '.', label='train')
ax.plot(pred_test[:, i], y_test[:, i], '.', label='test')
ax.vlines(x=S_d_obs[0, i], ymin=-1, ymax=1, linestyles='--', color=
'black', zorder=100)
ax.plot([-1.2, 1.2], [-1.2, 1.2])
ax.legend()
ax.set_xlabel('S(d_' + str(i + 1) + ')')
ax.set_ylabel(theta_name[i] + '_rescaled')
ax.set_xlim(-1.2, 1.2)
ax.set_ylim(-1.2, 1.2)
def history_plot(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def print_theta(theta, name='theta'):
theta_pd = pd.DataFrame(theta.reshape(1, -1), index=[name], columns=[
'mean', 'variance', 'max_range', 'min_range', 'anisotropy',
'head_west'])
print(theta_pd)
def visualize_d_2D(d):
num_block = 3
d_vis = np.zeros(num_m)
d_vis[:] = np.nan
for i in range(num_block * num_block * 2):
d_vis[np.where(G[i, :] > 0)[0]] = d[i]
d_vis = d_vis.reshape(num_x, num_y)
return d_vis
<|reserved_special_token_0|>
def visualize_one_m(m, vmin=-4, vmax=0, cmap='viridis', title=
'True spatial field, m'):
fig, ax = plt.subplots(figsize=[6, 6])
m_show = ax.imshow(m.T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title, fontsize=13)
well_location = [49, 49]
direct_data_loc = [30, 70]
ax.scatter(well_location[0], well_location[1], s=100, color='black',
label='indirect pumping well')
ax.scatter(direct_data_loc[0], direct_data_loc[1], s=100, color='red',
label='direct logK')
ax.legend()
fig.colorbar(m_show, ax=ax, shrink=0.6)
<|reserved_special_token_0|>
def visualize_multiple_m(m, head=4, vmin=-4, vmax=0, cmap='viridis', theta=None
):
plt.figure(figsize=[20, 8])
for i in np.arange(head):
ax = plt.subplot(1, 4, i + 1)
ax.imshow(m[i, :, :].T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax
)
ax.set_xticks([])
ax.set_yticks([])
well_location = [49, 49]
direct_data_loc = [30, 70]
ax.scatter(well_location[0], well_location[1], s=50, color='black',
label='pumping well')
ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color=
'red', label='direct logK')
if theta is not None:
ax.set_title('θ = ' + str(tuple(np.round(theta[i, :], 1))))
<|reserved_special_token_0|>
def visualize_multiple_d(d, head=4):
plt.figure(figsize=[25, 3])
for i in np.arange(head):
ax = plt.subplot(1, 4, i + 1)
ax.plot(np.arange(70) / 10, d[:, i].reshape(70, 1)[:, 0], label=
'pumping well')
ax.set_xlabel('Days')
ax.set_ylabel('Head')
def colors_from_values(values, palette_name):
normalized = (values - min(values)) / (max(values) - min(values))
indices = np.round(normalized * (len(values) - 1)).astype(np.int32)
palette = sns.color_palette(palette_name, len(values))
return np.array(palette).take(indices, axis=0)
def visualize_mean_var(mu, covariance, vmin=20, vmax=40, cmap='viridis'):
var = np.diag(covariance)
plt.figure(figsize=[18, 4])
ax = plt.subplot(2, 4, 1)
ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=
vmin, vmax=vmax)
rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,
linewidth=2, linestyle='dashed', edgecolor='black', facecolor=
'None', label='pilot area')
ax.add_patch(rect)
rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),
num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=
'black', facecolor='None', label='pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
ax = plt.subplot(2, 4, 2)
ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=
0, vmax=16)
rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,
linewidth=2, linestyle='dashed', edgecolor='black', facecolor=
'None', label='pilot area')
ax.add_patch(rect)
rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),
num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=
'black', facecolor='None', label='pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
def visualize_mean_var_MC(m, start_loc, num_grid, vmin=-3, vmax=1, vmin_var
=0, vmax_var=0.2, cmap='viridis', rect=False):
mu = np.mean(m, axis=0)
var = np.var(m, axis=0)
plt.figure(figsize=[10, 4])
ax = plt.subplot(1, 2, 1)
ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=
vmin, vmax=vmax)
if rect:
rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,
linewidth=2, linestyle='dashed', edgecolor='black', facecolor=
'None', label='pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
well_location = [49, 49]
ax.scatter(well_location[0], well_location[1], s=20, color='black',
label='pumping well')
direct_data_loc = [30, 70]
ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color='red',
label='direct logK')
ax = plt.subplot(1, 2, 2)
ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap='magma',
vmin=vmin_var, vmax=vmax_var)
if rect:
rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,
linewidth=2, linestyle='dashed', edgecolor='black', facecolor=
'None', label='pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
def visualize_ensemble_d(d, d_obs, ymin=None, ymax=11.5):
plt.plot(np.arange(70) / 10, d, color='C0')
plt.plot(np.arange(70) / 10, d_obs, color='C1', linewidth=2, label=
'observed data')
plt.xlabel('Days')
plt.ylabel('Head')
plt.legend()
plt.ylim(ymin, ymax)
def pos_pairplot(theta_pos, theta_name):
sns.pairplot(pd.DataFrame(theta_pos.T, columns=theta_name), kind='hist')
def prior_pos_theta(theta, theta_pos, theta_true, theta_name):
num_theta = theta.shape[1]
plt.figure(figsize=[25, 10])
for i in np.arange(num_theta):
ax = plt.subplot(2, 3, i + 1)
ax.hist(theta[:, i], density=True, bins=1, label='prior', alpha=0.7)
y_, _, _ = ax.hist(theta_pos[i, :], density=True, bins=20, label=
'posterior', alpha=0.7)
ax.vlines(x=theta_true[i], ymin=0, ymax=np.max(y_), linestyles='--',
label='true', color='black')
ax.legend()
ax.set_title(theta_name[i])
ax.set_ylabel('pdf')
def ML_dimension_reduction_vis(pred_train, y_train, pred_test, y_test,
S_d_obs, theta_name):
fig = plt.figure(figsize=[24, 10])
num_theta = len(theta_name)
for i in np.arange(num_theta):
ax = plt.subplot(2, 3, i + 1)
ax.plot(pred_train[:, i], y_train[:, i], '.', label='train')
ax.plot(pred_test[:, i], y_test[:, i], '.', label='test')
ax.vlines(x=S_d_obs[0, i], ymin=-1, ymax=1, linestyles='--', color=
'black', zorder=100)
ax.plot([-1.2, 1.2], [-1.2, 1.2])
ax.legend()
ax.set_xlabel('S(d_' + str(i + 1) + ')')
ax.set_ylabel(theta_name[i] + '_rescaled')
ax.set_xlim(-1.2, 1.2)
ax.set_ylim(-1.2, 1.2)
def history_plot(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.rcParams.update({'font.size': 15})
<|reserved_special_token_0|>
def print_theta(theta, name='theta'):
theta_pd = pd.DataFrame(theta.reshape(1, -1), index=[name], columns=[
'mean', 'variance', 'max_range', 'min_range', 'anisotropy',
'head_west'])
print(theta_pd)
def visualize_d_2D(d):
num_block = 3
d_vis = np.zeros(num_m)
d_vis[:] = np.nan
for i in range(num_block * num_block * 2):
d_vis[np.where(G[i, :] > 0)[0]] = d[i]
d_vis = d_vis.reshape(num_x, num_y)
return d_vis
def visualize_one_d(d):
plt.plot(np.arange(70) / 10, d.reshape(70, 1)[:, 0], label='pumping well')
plt.xlabel('Days')
plt.ylabel('Head')
plt.legend()
def visualize_one_m(m, vmin=-4, vmax=0, cmap='viridis', title=
'True spatial field, m'):
fig, ax = plt.subplots(figsize=[6, 6])
m_show = ax.imshow(m.T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title, fontsize=13)
well_location = [49, 49]
direct_data_loc = [30, 70]
ax.scatter(well_location[0], well_location[1], s=100, color='black',
label='indirect pumping well')
ax.scatter(direct_data_loc[0], direct_data_loc[1], s=100, color='red',
label='direct logK')
ax.legend()
fig.colorbar(m_show, ax=ax, shrink=0.6)
def print_theta_multiple(theta, name='theta', head=8):
theta_pd = pd.DataFrame(theta, index=[('theta_' + str(i)) for i in np.
arange(1, theta.shape[0] + 1)], columns=['mean', 'variance',
'max_range', 'min_range', 'anisotropy', 'head_west'])
print(theta_pd.head(head))
def visualize_multiple_m(m, head=4, vmin=-4, vmax=0, cmap='viridis', theta=None
):
plt.figure(figsize=[20, 8])
for i in np.arange(head):
ax = plt.subplot(1, 4, i + 1)
ax.imshow(m[i, :, :].T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax
)
ax.set_xticks([])
ax.set_yticks([])
well_location = [49, 49]
direct_data_loc = [30, 70]
ax.scatter(well_location[0], well_location[1], s=50, color='black',
label='pumping well')
ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color=
'red', label='direct logK')
if theta is not None:
ax.set_title('θ = ' + str(tuple(np.round(theta[i, :], 1))))
def visualize_multiple_pc(m, PCA, head=8, vmin=-4, vmax=0, cmap='viridis',
rect=False):
plt.figure(figsize=[25, 10])
for i in np.arange(head):
ax = plt.subplot(1, 10, i + 1)
ax.imshow(m[i, :].reshape(num_x, num_y).T, origin='lower', cmap=
cmap, vmin=vmin, vmax=vmax)
if rect:
rect = patches.Rectangle((32, 32), 36, 36, linewidth=2,
linestyle='dashed', edgecolor='black', facecolor='None')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title('PCA ' + str(i + 1) + ': ' + str(np.int(PCA[
'explained_variance'][i] * 100)) + '%')
def visualize_multiple_d(d, head=4):
plt.figure(figsize=[25, 3])
for i in np.arange(head):
ax = plt.subplot(1, 4, i + 1)
ax.plot(np.arange(70) / 10, d[:, i].reshape(70, 1)[:, 0], label=
'pumping well')
ax.set_xlabel('Days')
ax.set_ylabel('Head')
def colors_from_values(values, palette_name):
normalized = (values - min(values)) / (max(values) - min(values))
indices = np.round(normalized * (len(values) - 1)).astype(np.int32)
palette = sns.color_palette(palette_name, len(values))
return np.array(palette).take(indices, axis=0)
def visualize_mean_var(mu, covariance, vmin=20, vmax=40, cmap='viridis'):
var = np.diag(covariance)
plt.figure(figsize=[18, 4])
ax = plt.subplot(2, 4, 1)
ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=
vmin, vmax=vmax)
rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,
linewidth=2, linestyle='dashed', edgecolor='black', facecolor=
'None', label='pilot area')
ax.add_patch(rect)
rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),
num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=
'black', facecolor='None', label='pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
ax = plt.subplot(2, 4, 2)
ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=
0, vmax=16)
rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,
linewidth=2, linestyle='dashed', edgecolor='black', facecolor=
'None', label='pilot area')
ax.add_patch(rect)
rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),
num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=
'black', facecolor='None', label='pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
def visualize_mean_var_MC(m, start_loc, num_grid, vmin=-3, vmax=1, vmin_var
=0, vmax_var=0.2, cmap='viridis', rect=False):
mu = np.mean(m, axis=0)
var = np.var(m, axis=0)
plt.figure(figsize=[10, 4])
ax = plt.subplot(1, 2, 1)
ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=
vmin, vmax=vmax)
if rect:
rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,
linewidth=2, linestyle='dashed', edgecolor='black', facecolor=
'None', label='pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
well_location = [49, 49]
ax.scatter(well_location[0], well_location[1], s=20, color='black',
label='pumping well')
direct_data_loc = [30, 70]
ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color='red',
label='direct logK')
ax = plt.subplot(1, 2, 2)
ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap='magma',
vmin=vmin_var, vmax=vmax_var)
if rect:
rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,
linewidth=2, linestyle='dashed', edgecolor='black', facecolor=
'None', label='pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
def visualize_ensemble_d(d, d_obs, ymin=None, ymax=11.5):
plt.plot(np.arange(70) / 10, d, color='C0')
plt.plot(np.arange(70) / 10, d_obs, color='C1', linewidth=2, label=
'observed data')
plt.xlabel('Days')
plt.ylabel('Head')
plt.legend()
plt.ylim(ymin, ymax)
def pos_pairplot(theta_pos, theta_name):
sns.pairplot(pd.DataFrame(theta_pos.T, columns=theta_name), kind='hist')
def prior_pos_theta(theta, theta_pos, theta_true, theta_name):
num_theta = theta.shape[1]
plt.figure(figsize=[25, 10])
for i in np.arange(num_theta):
ax = plt.subplot(2, 3, i + 1)
ax.hist(theta[:, i], density=True, bins=1, label='prior', alpha=0.7)
y_, _, _ = ax.hist(theta_pos[i, :], density=True, bins=20, label=
'posterior', alpha=0.7)
ax.vlines(x=theta_true[i], ymin=0, ymax=np.max(y_), linestyles='--',
label='true', color='black')
ax.legend()
ax.set_title(theta_name[i])
ax.set_ylabel('pdf')
def ML_dimension_reduction_vis(pred_train, y_train, pred_test, y_test,
S_d_obs, theta_name):
fig = plt.figure(figsize=[24, 10])
num_theta = len(theta_name)
for i in np.arange(num_theta):
ax = plt.subplot(2, 3, i + 1)
ax.plot(pred_train[:, i], y_train[:, i], '.', label='train')
ax.plot(pred_test[:, i], y_test[:, i], '.', label='test')
ax.vlines(x=S_d_obs[0, i], ymin=-1, ymax=1, linestyles='--', color=
'black', zorder=100)
ax.plot([-1.2, 1.2], [-1.2, 1.2])
ax.legend()
ax.set_xlabel('S(d_' + str(i + 1) + ')')
ax.set_ylabel(theta_name[i] + '_rescaled')
ax.set_xlim(-1.2, 1.2)
ax.set_ylim(-1.2, 1.2)
def history_plot(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
import gstools as gs
import matplotlib.pyplot as plt
from matplotlib import patches
import seaborn as sns
plt.rcParams.update({'font.size': 15})
import os
path = os.path.dirname(os.getcwd())
subpath = '/examples/case2_nonlinear_forward_pumping_test/'
num_prior_sample = 5000
num_x = 100
num_y = 100
def print_theta(theta, name='theta'):
theta_pd = pd.DataFrame(theta.reshape(1, -1), index=[name], columns=[
'mean', 'variance', 'max_range', 'min_range', 'anisotropy',
'head_west'])
print(theta_pd)
def visualize_d_2D(d):
num_block = 3
d_vis = np.zeros(num_m)
d_vis[:] = np.nan
for i in range(num_block * num_block * 2):
d_vis[np.where(G[i, :] > 0)[0]] = d[i]
d_vis = d_vis.reshape(num_x, num_y)
return d_vis
def visualize_one_d(d):
plt.plot(np.arange(70) / 10, d.reshape(70, 1)[:, 0], label='pumping well')
plt.xlabel('Days')
plt.ylabel('Head')
plt.legend()
def visualize_one_m(m, vmin=-4, vmax=0, cmap='viridis', title=
'True spatial field, m'):
fig, ax = plt.subplots(figsize=[6, 6])
m_show = ax.imshow(m.T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title, fontsize=13)
well_location = [49, 49]
direct_data_loc = [30, 70]
ax.scatter(well_location[0], well_location[1], s=100, color='black',
label='indirect pumping well')
ax.scatter(direct_data_loc[0], direct_data_loc[1], s=100, color='red',
label='direct logK')
ax.legend()
fig.colorbar(m_show, ax=ax, shrink=0.6)
def print_theta_multiple(theta, name='theta', head=8):
theta_pd = pd.DataFrame(theta, index=[('theta_' + str(i)) for i in np.
arange(1, theta.shape[0] + 1)], columns=['mean', 'variance',
'max_range', 'min_range', 'anisotropy', 'head_west'])
print(theta_pd.head(head))
def visualize_multiple_m(m, head=4, vmin=-4, vmax=0, cmap='viridis', theta=None
):
plt.figure(figsize=[20, 8])
for i in np.arange(head):
ax = plt.subplot(1, 4, i + 1)
ax.imshow(m[i, :, :].T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax
)
ax.set_xticks([])
ax.set_yticks([])
well_location = [49, 49]
direct_data_loc = [30, 70]
ax.scatter(well_location[0], well_location[1], s=50, color='black',
label='pumping well')
ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color=
'red', label='direct logK')
if theta is not None:
ax.set_title('θ = ' + str(tuple(np.round(theta[i, :], 1))))
def visualize_multiple_pc(m, PCA, head=8, vmin=-4, vmax=0, cmap='viridis',
rect=False):
plt.figure(figsize=[25, 10])
for i in np.arange(head):
ax = plt.subplot(1, 10, i + 1)
ax.imshow(m[i, :].reshape(num_x, num_y).T, origin='lower', cmap=
cmap, vmin=vmin, vmax=vmax)
if rect:
rect = patches.Rectangle((32, 32), 36, 36, linewidth=2,
linestyle='dashed', edgecolor='black', facecolor='None')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title('PCA ' + str(i + 1) + ': ' + str(np.int(PCA[
'explained_variance'][i] * 100)) + '%')
def visualize_multiple_d(d, head=4):
plt.figure(figsize=[25, 3])
for i in np.arange(head):
ax = plt.subplot(1, 4, i + 1)
ax.plot(np.arange(70) / 10, d[:, i].reshape(70, 1)[:, 0], label=
'pumping well')
ax.set_xlabel('Days')
ax.set_ylabel('Head')
def colors_from_values(values, palette_name):
normalized = (values - min(values)) / (max(values) - min(values))
indices = np.round(normalized * (len(values) - 1)).astype(np.int32)
palette = sns.color_palette(palette_name, len(values))
return np.array(palette).take(indices, axis=0)
def visualize_mean_var(mu, covariance, vmin=20, vmax=40, cmap='viridis'):
var = np.diag(covariance)
plt.figure(figsize=[18, 4])
ax = plt.subplot(2, 4, 1)
ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=
vmin, vmax=vmax)
rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,
linewidth=2, linestyle='dashed', edgecolor='black', facecolor=
'None', label='pilot area')
ax.add_patch(rect)
rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),
num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=
'black', facecolor='None', label='pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
ax = plt.subplot(2, 4, 2)
ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=
0, vmax=16)
rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,
linewidth=2, linestyle='dashed', edgecolor='black', facecolor=
'None', label='pilot area')
ax.add_patch(rect)
rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),
num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=
'black', facecolor='None', label='pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
def visualize_mean_var_MC(m, start_loc, num_grid, vmin=-3, vmax=1, vmin_var
=0, vmax_var=0.2, cmap='viridis', rect=False):
mu = np.mean(m, axis=0)
var = np.var(m, axis=0)
plt.figure(figsize=[10, 4])
ax = plt.subplot(1, 2, 1)
ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=
vmin, vmax=vmax)
if rect:
rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,
linewidth=2, linestyle='dashed', edgecolor='black', facecolor=
'None', label='pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
well_location = [49, 49]
ax.scatter(well_location[0], well_location[1], s=20, color='black',
label='pumping well')
direct_data_loc = [30, 70]
ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color='red',
label='direct logK')
ax = plt.subplot(1, 2, 2)
ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap='magma',
vmin=vmin_var, vmax=vmax_var)
if rect:
rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,
linewidth=2, linestyle='dashed', edgecolor='black', facecolor=
'None', label='pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
def visualize_ensemble_d(d, d_obs, ymin=None, ymax=11.5):
plt.plot(np.arange(70) / 10, d, color='C0')
plt.plot(np.arange(70) / 10, d_obs, color='C1', linewidth=2, label=
'observed data')
plt.xlabel('Days')
plt.ylabel('Head')
plt.legend()
plt.ylim(ymin, ymax)
def pos_pairplot(theta_pos, theta_name):
sns.pairplot(pd.DataFrame(theta_pos.T, columns=theta_name), kind='hist')
def prior_pos_theta(theta, theta_pos, theta_true, theta_name):
num_theta = theta.shape[1]
plt.figure(figsize=[25, 10])
for i in np.arange(num_theta):
ax = plt.subplot(2, 3, i + 1)
ax.hist(theta[:, i], density=True, bins=1, label='prior', alpha=0.7)
y_, _, _ = ax.hist(theta_pos[i, :], density=True, bins=20, label=
'posterior', alpha=0.7)
ax.vlines(x=theta_true[i], ymin=0, ymax=np.max(y_), linestyles='--',
label='true', color='black')
ax.legend()
ax.set_title(theta_name[i])
ax.set_ylabel('pdf')
def ML_dimension_reduction_vis(pred_train, y_train, pred_test, y_test,
S_d_obs, theta_name):
fig = plt.figure(figsize=[24, 10])
num_theta = len(theta_name)
for i in np.arange(num_theta):
ax = plt.subplot(2, 3, i + 1)
ax.plot(pred_train[:, i], y_train[:, i], '.', label='train')
ax.plot(pred_test[:, i], y_test[:, i], '.', label='test')
ax.vlines(x=S_d_obs[0, i], ymin=-1, ymax=1, linestyles='--', color=
'black', zorder=100)
ax.plot([-1.2, 1.2], [-1.2, 1.2])
ax.legend()
ax.set_xlabel('S(d_' + str(i + 1) + ')')
ax.set_ylabel(theta_name[i] + '_rescaled')
ax.set_xlim(-1.2, 1.2)
ax.set_ylim(-1.2, 1.2)
def history_plot(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
<|reserved_special_token_1|>
# Author: Lijing Wang (lijing52@stanford.edu), 2021
import numpy as np
import pandas as pd
import gstools as gs
import matplotlib.pyplot as plt
from matplotlib import patches
import seaborn as sns
plt.rcParams.update({'font.size': 15})
import os
path = os.path.dirname(os.getcwd())
subpath = '/examples/case2_nonlinear_forward_pumping_test/'
num_prior_sample = 5000
num_x = 100
num_y = 100
def print_theta(theta, name = 'theta'):
theta_pd = pd.DataFrame(theta.reshape(1,-1), index = [name], columns = ['mean','variance','max_range','min_range','anisotropy','head_west'])
print(theta_pd)
def visualize_d_2D(d):
num_block = 3
d_vis = np.zeros(num_m)
d_vis[:] = np.nan
for i in range(num_block*num_block*2):
d_vis[np.where(G[i,:]>0)[0]] = d[i]
d_vis = d_vis.reshape(num_x,num_y)
return d_vis
def visualize_one_d(d):
plt.plot(np.arange(70)/10, d.reshape(70,1)[:,0],label = 'pumping well')
plt.xlabel('Days')
plt.ylabel('Head')
plt.legend()
def visualize_one_m(m, vmin = -4, vmax = 0, cmap = 'viridis',title = 'True spatial field, m'):
fig, ax = plt.subplots(figsize = [6,6])
m_show = ax.imshow(m.T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title,fontsize = 13)
well_location = [49,49]
direct_data_loc = [30,70]
ax.scatter(well_location[0],well_location[1],s = 100, color = 'black', label = 'indirect pumping well')
ax.scatter(direct_data_loc[0],direct_data_loc[1],s = 100, color = 'red', label = 'direct logK')
ax.legend()
fig.colorbar(m_show, ax = ax, shrink = 0.6)
def print_theta_multiple(theta, name = 'theta',head = 8):
theta_pd = pd.DataFrame(theta, index = ['theta_'+str(i) for i in np.arange(1,theta.shape[0]+1)], columns = ['mean','variance','max_range','min_range','anisotropy','head_west'])
print(theta_pd.head(head))
def visualize_multiple_m(m, head = 4, vmin = -4, vmax = 0, cmap = 'viridis', theta = None):
plt.figure(figsize = [20,8])
for i in np.arange(head):
ax = plt.subplot(1, 4, i+1)
ax.imshow(m[i,:,:].T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)
ax.set_xticks([])
ax.set_yticks([])
well_location = [49,49]
direct_data_loc = [30,70]
ax.scatter(well_location[0],well_location[1],s = 50, color = 'black', label = 'pumping well')
ax.scatter(direct_data_loc[0],direct_data_loc[1],s = 50, color = 'red', label = 'direct logK')
if theta is not None:
ax.set_title('\u03B8 = '+str(tuple(np.round(theta[i,:],1))))
def visualize_multiple_pc(m, PCA, head = 8, vmin = -4, vmax = 0, cmap = 'viridis',rect = False):
plt.figure(figsize = [25,10])
for i in np.arange(head):
ax = plt.subplot(1, 10, i+1)
ax.imshow(m[i,:].reshape(num_x,num_y).T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)
if rect:
rect = patches.Rectangle((32,32),36, 36, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title('PCA '+str(i+1)+': '+str(np.int(PCA['explained_variance'][i]*100))+'%')
def visualize_multiple_d(d, head = 4):
plt.figure(figsize = [25,3])
for i in np.arange(head):
ax = plt.subplot(1, 4, i+1)
ax.plot(np.arange(70)/10, d[:,i].reshape(70,1)[:,0],label = 'pumping well')
#ax.plot(np.arange(70)/10, d[:,i].reshape(70,5)[:,1],label = 'obs well: SW')
#ax.plot(np.arange(70)/10, d[:,i].reshape(70,5)[:,2],label = 'obs well: NE')
##ax.plot(np.arange(70)/10, d[:,i].reshape(70,5)[:,3],label = 'obs well: NW')
#ax.plot(np.arange(70)/10, d[:,i].reshape(70,5)[:,4],label = 'obs well: SE')
ax.set_xlabel('Days')
ax.set_ylabel('Head')
#ax.legend()
def colors_from_values(values, palette_name):
# normalize the values to range [0, 1]
normalized = (values - min(values)) / (max(values) - min(values))
# convert to indices
indices = np.round(normalized * (len(values) - 1)).astype(np.int32)
# use the indices to get the colors
palette = sns.color_palette(palette_name, len(values))
return np.array(palette).take(indices, axis=0)
def visualize_mean_var(mu, covariance, vmin = 20, vmax = 40, cmap = 'viridis'):
var = np.diag(covariance)
plt.figure(figsize = [18,4])
ax = plt.subplot(2, 4, 1)
ax.imshow(mu.reshape(num_x,num_y).T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)
rect = patches.Rectangle((start_loc_x,start_loc_y),num_grid, num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')
ax.add_patch(rect)
rect = patches.Rectangle((start_loc_x+num_grid*2,start_loc_y),num_grid,num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
ax = plt.subplot(2, 4, 2)
ax.imshow(var.reshape(num_x,num_y).T, origin = 'lower', cmap = cmap, vmin = 0, vmax = 16)
rect = patches.Rectangle((start_loc_x,start_loc_y),num_grid, num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')
ax.add_patch(rect)
rect = patches.Rectangle((start_loc_x+num_grid*2,start_loc_y),num_grid,num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
def visualize_mean_var_MC(m, start_loc, num_grid,vmin = -3, vmax = 1,vmin_var = 0, vmax_var = 0.2, cmap = 'viridis', rect = False):
mu = np.mean(m,axis = 0)
var = np.var(m,axis = 0)
plt.figure(figsize = [10,4])
ax = plt.subplot(1, 2, 1)
ax.imshow(mu.reshape(num_x,num_y).T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)
if rect:
rect = patches.Rectangle((start_loc,start_loc),num_grid, num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
well_location = [49,49]
ax.scatter(well_location[0],well_location[1],s = 20, color = 'black', label = 'pumping well')
direct_data_loc = [30,70]
ax.scatter(direct_data_loc[0],direct_data_loc[1],s = 50, color = 'red', label = 'direct logK')
ax = plt.subplot(1, 2, 2)
ax.imshow(var.reshape(num_x,num_y).T, origin = 'lower', cmap = 'magma', vmin = vmin_var, vmax = vmax_var)
if rect:
rect = patches.Rectangle((start_loc,start_loc),num_grid, num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')
ax.add_patch(rect)
ax.set_xticks([])
ax.set_yticks([])
def visualize_ensemble_d(d,d_obs,ymin = None,ymax = 11.5):
plt.plot(np.arange(70)/10, d,color = 'C0')
plt.plot(np.arange(70)/10, d_obs,color = 'C1',linewidth = 2,label = 'observed data')
plt.xlabel('Days')
plt.ylabel('Head')
plt.legend()
plt.ylim(ymin,ymax)
# Visualization: updating theta
def pos_pairplot(theta_pos, theta_name):
sns.pairplot(pd.DataFrame(theta_pos.T,columns = theta_name),kind="hist")
def prior_pos_theta(theta, theta_pos, theta_true, theta_name):
num_theta = theta.shape[1]
plt.figure(figsize=[25,10])
for i in np.arange(num_theta):
ax = plt.subplot(2, 3, i+1)
ax.hist(theta[:,i],density=True, bins = 1,label = 'prior',alpha = 0.7)
y_, _, _ = ax.hist(theta_pos[i,:],density=True, bins = 20,label = 'posterior',alpha = 0.7)
ax.vlines(x = theta_true[i], ymin = 0, ymax = np.max(y_),linestyles='--',label = 'true',color = 'black')
ax.legend()
ax.set_title(theta_name[i])
ax.set_ylabel('pdf')
def ML_dimension_reduction_vis(pred_train, y_train, pred_test, y_test, S_d_obs, theta_name):
fig = plt.figure(figsize=[24,10])
num_theta = len(theta_name)
for i in np.arange(num_theta):
ax = plt.subplot(2, 3, i+1)
ax.plot(pred_train[:,i], y_train[:,i],'.',label = 'train')
ax.plot(pred_test[:,i], y_test[:,i],'.',label = 'test')
ax.vlines(x = S_d_obs[0,i],ymin = -1, ymax = 1, linestyles='--',color = 'black',zorder = 100)
ax.plot([-1.2,1.2],[-1.2,1.2])
ax.legend()
ax.set_xlabel('S(d_'+str(i+1)+')')
ax.set_ylabel(theta_name[i]+'_rescaled')
ax.set_xlim(-1.2,1.2)
ax.set_ylim(-1.2,1.2)
def history_plot(history):
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
|
flexible
|
{
"blob_id": "09fb99a15c2727da2ef96028aca5513337449f62",
"index": 3772,
"step-1": "<mask token>\n\n\ndef print_theta(theta, name='theta'):\n theta_pd = pd.DataFrame(theta.reshape(1, -1), index=[name], columns=[\n 'mean', 'variance', 'max_range', 'min_range', 'anisotropy',\n 'head_west'])\n print(theta_pd)\n\n\n<mask token>\n\n\ndef visualize_one_m(m, vmin=-4, vmax=0, cmap='viridis', title=\n 'True spatial field, m'):\n fig, ax = plt.subplots(figsize=[6, 6])\n m_show = ax.imshow(m.T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)\n ax.set_xticks([])\n ax.set_yticks([])\n if title:\n ax.set_title(title, fontsize=13)\n well_location = [49, 49]\n direct_data_loc = [30, 70]\n ax.scatter(well_location[0], well_location[1], s=100, color='black',\n label='indirect pumping well')\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=100, color='red',\n label='direct logK')\n ax.legend()\n fig.colorbar(m_show, ax=ax, shrink=0.6)\n\n\n<mask token>\n\n\ndef visualize_multiple_m(m, head=4, vmin=-4, vmax=0, cmap='viridis', theta=None\n ):\n plt.figure(figsize=[20, 8])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i + 1)\n ax.imshow(m[i, :, :].T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax\n )\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49, 49]\n direct_data_loc = [30, 70]\n ax.scatter(well_location[0], well_location[1], s=50, color='black',\n label='pumping well')\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color=\n 'red', label='direct logK')\n if theta is not None:\n ax.set_title('θ = ' + str(tuple(np.round(theta[i, :], 1))))\n\n\n<mask token>\n\n\ndef visualize_multiple_d(d, head=4):\n plt.figure(figsize=[25, 3])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i + 1)\n ax.plot(np.arange(70) / 10, d[:, i].reshape(70, 1)[:, 0], label=\n 'pumping well')\n ax.set_xlabel('Days')\n ax.set_ylabel('Head')\n\n\ndef colors_from_values(values, palette_name):\n normalized = (values - min(values)) / (max(values) - min(values))\n indices = np.round(normalized * (len(values) - 1)).astype(np.int32)\n palette = sns.color_palette(palette_name, len(values))\n return np.array(palette).take(indices, axis=0)\n\n\ndef visualize_mean_var(mu, covariance, vmin=20, vmax=40, cmap='viridis'):\n var = np.diag(covariance)\n plt.figure(figsize=[18, 4])\n ax = plt.subplot(2, 4, 1)\n ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n vmin, vmax=vmax)\n rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),\n num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=\n 'black', facecolor='None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n ax = plt.subplot(2, 4, 2)\n ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n 0, vmax=16)\n rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),\n num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=\n 'black', facecolor='None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\n\n<mask token>\n\n\ndef visualize_ensemble_d(d, d_obs, ymin=None, ymax=11.5):\n plt.plot(np.arange(70) / 10, d, color='C0')\n plt.plot(np.arange(70) / 10, d_obs, color='C1', linewidth=2, label=\n 'observed data')\n plt.xlabel('Days')\n plt.ylabel('Head')\n plt.legend()\n plt.ylim(ymin, ymax)\n\n\ndef pos_pairplot(theta_pos, theta_name):\n sns.pairplot(pd.DataFrame(theta_pos.T, columns=theta_name), kind='hist')\n\n\ndef prior_pos_theta(theta, theta_pos, theta_true, theta_name):\n num_theta = theta.shape[1]\n plt.figure(figsize=[25, 10])\n for i in np.arange(num_theta):\n ax = plt.subplot(2, 3, i + 1)\n ax.hist(theta[:, i], density=True, bins=1, label='prior', alpha=0.7)\n y_, _, _ = ax.hist(theta_pos[i, :], density=True, bins=20, label=\n 'posterior', alpha=0.7)\n ax.vlines(x=theta_true[i], ymin=0, ymax=np.max(y_), linestyles='--',\n label='true', color='black')\n ax.legend()\n ax.set_title(theta_name[i])\n ax.set_ylabel('pdf')\n\n\ndef ML_dimension_reduction_vis(pred_train, y_train, pred_test, y_test,\n S_d_obs, theta_name):\n fig = plt.figure(figsize=[24, 10])\n num_theta = len(theta_name)\n for i in np.arange(num_theta):\n ax = plt.subplot(2, 3, i + 1)\n ax.plot(pred_train[:, i], y_train[:, i], '.', label='train')\n ax.plot(pred_test[:, i], y_test[:, i], '.', label='test')\n ax.vlines(x=S_d_obs[0, i], ymin=-1, ymax=1, linestyles='--', color=\n 'black', zorder=100)\n ax.plot([-1.2, 1.2], [-1.2, 1.2])\n ax.legend()\n ax.set_xlabel('S(d_' + str(i + 1) + ')')\n ax.set_ylabel(theta_name[i] + '_rescaled')\n ax.set_xlim(-1.2, 1.2)\n ax.set_ylim(-1.2, 1.2)\n\n\ndef history_plot(history):\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.show()\n",
"step-2": "<mask token>\n\n\ndef print_theta(theta, name='theta'):\n theta_pd = pd.DataFrame(theta.reshape(1, -1), index=[name], columns=[\n 'mean', 'variance', 'max_range', 'min_range', 'anisotropy',\n 'head_west'])\n print(theta_pd)\n\n\ndef visualize_d_2D(d):\n num_block = 3\n d_vis = np.zeros(num_m)\n d_vis[:] = np.nan\n for i in range(num_block * num_block * 2):\n d_vis[np.where(G[i, :] > 0)[0]] = d[i]\n d_vis = d_vis.reshape(num_x, num_y)\n return d_vis\n\n\n<mask token>\n\n\ndef visualize_one_m(m, vmin=-4, vmax=0, cmap='viridis', title=\n 'True spatial field, m'):\n fig, ax = plt.subplots(figsize=[6, 6])\n m_show = ax.imshow(m.T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)\n ax.set_xticks([])\n ax.set_yticks([])\n if title:\n ax.set_title(title, fontsize=13)\n well_location = [49, 49]\n direct_data_loc = [30, 70]\n ax.scatter(well_location[0], well_location[1], s=100, color='black',\n label='indirect pumping well')\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=100, color='red',\n label='direct logK')\n ax.legend()\n fig.colorbar(m_show, ax=ax, shrink=0.6)\n\n\n<mask token>\n\n\ndef visualize_multiple_m(m, head=4, vmin=-4, vmax=0, cmap='viridis', theta=None\n ):\n plt.figure(figsize=[20, 8])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i + 1)\n ax.imshow(m[i, :, :].T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax\n )\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49, 49]\n direct_data_loc = [30, 70]\n ax.scatter(well_location[0], well_location[1], s=50, color='black',\n label='pumping well')\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color=\n 'red', label='direct logK')\n if theta is not None:\n ax.set_title('θ = ' + str(tuple(np.round(theta[i, :], 1))))\n\n\n<mask token>\n\n\ndef visualize_multiple_d(d, head=4):\n plt.figure(figsize=[25, 3])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i + 1)\n ax.plot(np.arange(70) / 10, d[:, i].reshape(70, 1)[:, 0], label=\n 'pumping well')\n ax.set_xlabel('Days')\n ax.set_ylabel('Head')\n\n\ndef colors_from_values(values, palette_name):\n normalized = (values - min(values)) / (max(values) - min(values))\n indices = np.round(normalized * (len(values) - 1)).astype(np.int32)\n palette = sns.color_palette(palette_name, len(values))\n return np.array(palette).take(indices, axis=0)\n\n\ndef visualize_mean_var(mu, covariance, vmin=20, vmax=40, cmap='viridis'):\n var = np.diag(covariance)\n plt.figure(figsize=[18, 4])\n ax = plt.subplot(2, 4, 1)\n ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n vmin, vmax=vmax)\n rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),\n num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=\n 'black', facecolor='None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n ax = plt.subplot(2, 4, 2)\n ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n 0, vmax=16)\n rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),\n num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=\n 'black', facecolor='None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\n\ndef visualize_mean_var_MC(m, start_loc, num_grid, vmin=-3, vmax=1, vmin_var\n =0, vmax_var=0.2, cmap='viridis', rect=False):\n mu = np.mean(m, axis=0)\n var = np.var(m, axis=0)\n plt.figure(figsize=[10, 4])\n ax = plt.subplot(1, 2, 1)\n ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n vmin, vmax=vmax)\n if rect:\n rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49, 49]\n ax.scatter(well_location[0], well_location[1], s=20, color='black',\n label='pumping well')\n direct_data_loc = [30, 70]\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color='red',\n label='direct logK')\n ax = plt.subplot(1, 2, 2)\n ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap='magma',\n vmin=vmin_var, vmax=vmax_var)\n if rect:\n rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\n\ndef visualize_ensemble_d(d, d_obs, ymin=None, ymax=11.5):\n plt.plot(np.arange(70) / 10, d, color='C0')\n plt.plot(np.arange(70) / 10, d_obs, color='C1', linewidth=2, label=\n 'observed data')\n plt.xlabel('Days')\n plt.ylabel('Head')\n plt.legend()\n plt.ylim(ymin, ymax)\n\n\ndef pos_pairplot(theta_pos, theta_name):\n sns.pairplot(pd.DataFrame(theta_pos.T, columns=theta_name), kind='hist')\n\n\ndef prior_pos_theta(theta, theta_pos, theta_true, theta_name):\n num_theta = theta.shape[1]\n plt.figure(figsize=[25, 10])\n for i in np.arange(num_theta):\n ax = plt.subplot(2, 3, i + 1)\n ax.hist(theta[:, i], density=True, bins=1, label='prior', alpha=0.7)\n y_, _, _ = ax.hist(theta_pos[i, :], density=True, bins=20, label=\n 'posterior', alpha=0.7)\n ax.vlines(x=theta_true[i], ymin=0, ymax=np.max(y_), linestyles='--',\n label='true', color='black')\n ax.legend()\n ax.set_title(theta_name[i])\n ax.set_ylabel('pdf')\n\n\ndef ML_dimension_reduction_vis(pred_train, y_train, pred_test, y_test,\n S_d_obs, theta_name):\n fig = plt.figure(figsize=[24, 10])\n num_theta = len(theta_name)\n for i in np.arange(num_theta):\n ax = plt.subplot(2, 3, i + 1)\n ax.plot(pred_train[:, i], y_train[:, i], '.', label='train')\n ax.plot(pred_test[:, i], y_test[:, i], '.', label='test')\n ax.vlines(x=S_d_obs[0, i], ymin=-1, ymax=1, linestyles='--', color=\n 'black', zorder=100)\n ax.plot([-1.2, 1.2], [-1.2, 1.2])\n ax.legend()\n ax.set_xlabel('S(d_' + str(i + 1) + ')')\n ax.set_ylabel(theta_name[i] + '_rescaled')\n ax.set_xlim(-1.2, 1.2)\n ax.set_ylim(-1.2, 1.2)\n\n\ndef history_plot(history):\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.show()\n",
"step-3": "<mask token>\nplt.rcParams.update({'font.size': 15})\n<mask token>\n\n\ndef print_theta(theta, name='theta'):\n theta_pd = pd.DataFrame(theta.reshape(1, -1), index=[name], columns=[\n 'mean', 'variance', 'max_range', 'min_range', 'anisotropy',\n 'head_west'])\n print(theta_pd)\n\n\ndef visualize_d_2D(d):\n num_block = 3\n d_vis = np.zeros(num_m)\n d_vis[:] = np.nan\n for i in range(num_block * num_block * 2):\n d_vis[np.where(G[i, :] > 0)[0]] = d[i]\n d_vis = d_vis.reshape(num_x, num_y)\n return d_vis\n\n\ndef visualize_one_d(d):\n plt.plot(np.arange(70) / 10, d.reshape(70, 1)[:, 0], label='pumping well')\n plt.xlabel('Days')\n plt.ylabel('Head')\n plt.legend()\n\n\ndef visualize_one_m(m, vmin=-4, vmax=0, cmap='viridis', title=\n 'True spatial field, m'):\n fig, ax = plt.subplots(figsize=[6, 6])\n m_show = ax.imshow(m.T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)\n ax.set_xticks([])\n ax.set_yticks([])\n if title:\n ax.set_title(title, fontsize=13)\n well_location = [49, 49]\n direct_data_loc = [30, 70]\n ax.scatter(well_location[0], well_location[1], s=100, color='black',\n label='indirect pumping well')\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=100, color='red',\n label='direct logK')\n ax.legend()\n fig.colorbar(m_show, ax=ax, shrink=0.6)\n\n\ndef print_theta_multiple(theta, name='theta', head=8):\n theta_pd = pd.DataFrame(theta, index=[('theta_' + str(i)) for i in np.\n arange(1, theta.shape[0] + 1)], columns=['mean', 'variance',\n 'max_range', 'min_range', 'anisotropy', 'head_west'])\n print(theta_pd.head(head))\n\n\ndef visualize_multiple_m(m, head=4, vmin=-4, vmax=0, cmap='viridis', theta=None\n ):\n plt.figure(figsize=[20, 8])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i + 1)\n ax.imshow(m[i, :, :].T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax\n )\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49, 49]\n direct_data_loc = [30, 70]\n ax.scatter(well_location[0], well_location[1], s=50, color='black',\n label='pumping well')\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color=\n 'red', label='direct logK')\n if theta is not None:\n ax.set_title('θ = ' + str(tuple(np.round(theta[i, :], 1))))\n\n\ndef visualize_multiple_pc(m, PCA, head=8, vmin=-4, vmax=0, cmap='viridis',\n rect=False):\n plt.figure(figsize=[25, 10])\n for i in np.arange(head):\n ax = plt.subplot(1, 10, i + 1)\n ax.imshow(m[i, :].reshape(num_x, num_y).T, origin='lower', cmap=\n cmap, vmin=vmin, vmax=vmax)\n if rect:\n rect = patches.Rectangle((32, 32), 36, 36, linewidth=2,\n linestyle='dashed', edgecolor='black', facecolor='None')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_title('PCA ' + str(i + 1) + ': ' + str(np.int(PCA[\n 'explained_variance'][i] * 100)) + '%')\n\n\ndef visualize_multiple_d(d, head=4):\n plt.figure(figsize=[25, 3])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i + 1)\n ax.plot(np.arange(70) / 10, d[:, i].reshape(70, 1)[:, 0], label=\n 'pumping well')\n ax.set_xlabel('Days')\n ax.set_ylabel('Head')\n\n\ndef colors_from_values(values, palette_name):\n normalized = (values - min(values)) / (max(values) - min(values))\n indices = np.round(normalized * (len(values) - 1)).astype(np.int32)\n palette = sns.color_palette(palette_name, len(values))\n return np.array(palette).take(indices, axis=0)\n\n\ndef visualize_mean_var(mu, covariance, vmin=20, vmax=40, cmap='viridis'):\n var = np.diag(covariance)\n plt.figure(figsize=[18, 4])\n ax = plt.subplot(2, 4, 1)\n ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n vmin, vmax=vmax)\n rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),\n num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=\n 'black', facecolor='None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n ax = plt.subplot(2, 4, 2)\n ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n 0, vmax=16)\n rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),\n num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=\n 'black', facecolor='None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\n\ndef visualize_mean_var_MC(m, start_loc, num_grid, vmin=-3, vmax=1, vmin_var\n =0, vmax_var=0.2, cmap='viridis', rect=False):\n mu = np.mean(m, axis=0)\n var = np.var(m, axis=0)\n plt.figure(figsize=[10, 4])\n ax = plt.subplot(1, 2, 1)\n ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n vmin, vmax=vmax)\n if rect:\n rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49, 49]\n ax.scatter(well_location[0], well_location[1], s=20, color='black',\n label='pumping well')\n direct_data_loc = [30, 70]\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color='red',\n label='direct logK')\n ax = plt.subplot(1, 2, 2)\n ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap='magma',\n vmin=vmin_var, vmax=vmax_var)\n if rect:\n rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\n\ndef visualize_ensemble_d(d, d_obs, ymin=None, ymax=11.5):\n plt.plot(np.arange(70) / 10, d, color='C0')\n plt.plot(np.arange(70) / 10, d_obs, color='C1', linewidth=2, label=\n 'observed data')\n plt.xlabel('Days')\n plt.ylabel('Head')\n plt.legend()\n plt.ylim(ymin, ymax)\n\n\ndef pos_pairplot(theta_pos, theta_name):\n sns.pairplot(pd.DataFrame(theta_pos.T, columns=theta_name), kind='hist')\n\n\ndef prior_pos_theta(theta, theta_pos, theta_true, theta_name):\n num_theta = theta.shape[1]\n plt.figure(figsize=[25, 10])\n for i in np.arange(num_theta):\n ax = plt.subplot(2, 3, i + 1)\n ax.hist(theta[:, i], density=True, bins=1, label='prior', alpha=0.7)\n y_, _, _ = ax.hist(theta_pos[i, :], density=True, bins=20, label=\n 'posterior', alpha=0.7)\n ax.vlines(x=theta_true[i], ymin=0, ymax=np.max(y_), linestyles='--',\n label='true', color='black')\n ax.legend()\n ax.set_title(theta_name[i])\n ax.set_ylabel('pdf')\n\n\ndef ML_dimension_reduction_vis(pred_train, y_train, pred_test, y_test,\n S_d_obs, theta_name):\n fig = plt.figure(figsize=[24, 10])\n num_theta = len(theta_name)\n for i in np.arange(num_theta):\n ax = plt.subplot(2, 3, i + 1)\n ax.plot(pred_train[:, i], y_train[:, i], '.', label='train')\n ax.plot(pred_test[:, i], y_test[:, i], '.', label='test')\n ax.vlines(x=S_d_obs[0, i], ymin=-1, ymax=1, linestyles='--', color=\n 'black', zorder=100)\n ax.plot([-1.2, 1.2], [-1.2, 1.2])\n ax.legend()\n ax.set_xlabel('S(d_' + str(i + 1) + ')')\n ax.set_ylabel(theta_name[i] + '_rescaled')\n ax.set_xlim(-1.2, 1.2)\n ax.set_ylim(-1.2, 1.2)\n\n\ndef history_plot(history):\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.show()\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport gstools as gs\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches\nimport seaborn as sns\nplt.rcParams.update({'font.size': 15})\nimport os\npath = os.path.dirname(os.getcwd())\nsubpath = '/examples/case2_nonlinear_forward_pumping_test/'\nnum_prior_sample = 5000\nnum_x = 100\nnum_y = 100\n\n\ndef print_theta(theta, name='theta'):\n theta_pd = pd.DataFrame(theta.reshape(1, -1), index=[name], columns=[\n 'mean', 'variance', 'max_range', 'min_range', 'anisotropy',\n 'head_west'])\n print(theta_pd)\n\n\ndef visualize_d_2D(d):\n num_block = 3\n d_vis = np.zeros(num_m)\n d_vis[:] = np.nan\n for i in range(num_block * num_block * 2):\n d_vis[np.where(G[i, :] > 0)[0]] = d[i]\n d_vis = d_vis.reshape(num_x, num_y)\n return d_vis\n\n\ndef visualize_one_d(d):\n plt.plot(np.arange(70) / 10, d.reshape(70, 1)[:, 0], label='pumping well')\n plt.xlabel('Days')\n plt.ylabel('Head')\n plt.legend()\n\n\ndef visualize_one_m(m, vmin=-4, vmax=0, cmap='viridis', title=\n 'True spatial field, m'):\n fig, ax = plt.subplots(figsize=[6, 6])\n m_show = ax.imshow(m.T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)\n ax.set_xticks([])\n ax.set_yticks([])\n if title:\n ax.set_title(title, fontsize=13)\n well_location = [49, 49]\n direct_data_loc = [30, 70]\n ax.scatter(well_location[0], well_location[1], s=100, color='black',\n label='indirect pumping well')\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=100, color='red',\n label='direct logK')\n ax.legend()\n fig.colorbar(m_show, ax=ax, shrink=0.6)\n\n\ndef print_theta_multiple(theta, name='theta', head=8):\n theta_pd = pd.DataFrame(theta, index=[('theta_' + str(i)) for i in np.\n arange(1, theta.shape[0] + 1)], columns=['mean', 'variance',\n 'max_range', 'min_range', 'anisotropy', 'head_west'])\n print(theta_pd.head(head))\n\n\ndef visualize_multiple_m(m, head=4, vmin=-4, vmax=0, cmap='viridis', theta=None\n ):\n plt.figure(figsize=[20, 8])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i + 1)\n ax.imshow(m[i, :, :].T, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax\n )\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49, 49]\n direct_data_loc = [30, 70]\n ax.scatter(well_location[0], well_location[1], s=50, color='black',\n label='pumping well')\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color=\n 'red', label='direct logK')\n if theta is not None:\n ax.set_title('θ = ' + str(tuple(np.round(theta[i, :], 1))))\n\n\ndef visualize_multiple_pc(m, PCA, head=8, vmin=-4, vmax=0, cmap='viridis',\n rect=False):\n plt.figure(figsize=[25, 10])\n for i in np.arange(head):\n ax = plt.subplot(1, 10, i + 1)\n ax.imshow(m[i, :].reshape(num_x, num_y).T, origin='lower', cmap=\n cmap, vmin=vmin, vmax=vmax)\n if rect:\n rect = patches.Rectangle((32, 32), 36, 36, linewidth=2,\n linestyle='dashed', edgecolor='black', facecolor='None')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_title('PCA ' + str(i + 1) + ': ' + str(np.int(PCA[\n 'explained_variance'][i] * 100)) + '%')\n\n\ndef visualize_multiple_d(d, head=4):\n plt.figure(figsize=[25, 3])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i + 1)\n ax.plot(np.arange(70) / 10, d[:, i].reshape(70, 1)[:, 0], label=\n 'pumping well')\n ax.set_xlabel('Days')\n ax.set_ylabel('Head')\n\n\ndef colors_from_values(values, palette_name):\n normalized = (values - min(values)) / (max(values) - min(values))\n indices = np.round(normalized * (len(values) - 1)).astype(np.int32)\n palette = sns.color_palette(palette_name, len(values))\n return np.array(palette).take(indices, axis=0)\n\n\ndef visualize_mean_var(mu, covariance, vmin=20, vmax=40, cmap='viridis'):\n var = np.diag(covariance)\n plt.figure(figsize=[18, 4])\n ax = plt.subplot(2, 4, 1)\n ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n vmin, vmax=vmax)\n rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),\n num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=\n 'black', facecolor='None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n ax = plt.subplot(2, 4, 2)\n ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n 0, vmax=16)\n rect = patches.Rectangle((start_loc_x, start_loc_y), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x + num_grid * 2, start_loc_y),\n num_grid, num_grid, linewidth=2, linestyle='dashed', edgecolor=\n 'black', facecolor='None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\n\ndef visualize_mean_var_MC(m, start_loc, num_grid, vmin=-3, vmax=1, vmin_var\n =0, vmax_var=0.2, cmap='viridis', rect=False):\n mu = np.mean(m, axis=0)\n var = np.var(m, axis=0)\n plt.figure(figsize=[10, 4])\n ax = plt.subplot(1, 2, 1)\n ax.imshow(mu.reshape(num_x, num_y).T, origin='lower', cmap=cmap, vmin=\n vmin, vmax=vmax)\n if rect:\n rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49, 49]\n ax.scatter(well_location[0], well_location[1], s=20, color='black',\n label='pumping well')\n direct_data_loc = [30, 70]\n ax.scatter(direct_data_loc[0], direct_data_loc[1], s=50, color='red',\n label='direct logK')\n ax = plt.subplot(1, 2, 2)\n ax.imshow(var.reshape(num_x, num_y).T, origin='lower', cmap='magma',\n vmin=vmin_var, vmax=vmax_var)\n if rect:\n rect = patches.Rectangle((start_loc, start_loc), num_grid, num_grid,\n linewidth=2, linestyle='dashed', edgecolor='black', facecolor=\n 'None', label='pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\n\ndef visualize_ensemble_d(d, d_obs, ymin=None, ymax=11.5):\n plt.plot(np.arange(70) / 10, d, color='C0')\n plt.plot(np.arange(70) / 10, d_obs, color='C1', linewidth=2, label=\n 'observed data')\n plt.xlabel('Days')\n plt.ylabel('Head')\n plt.legend()\n plt.ylim(ymin, ymax)\n\n\ndef pos_pairplot(theta_pos, theta_name):\n sns.pairplot(pd.DataFrame(theta_pos.T, columns=theta_name), kind='hist')\n\n\ndef prior_pos_theta(theta, theta_pos, theta_true, theta_name):\n num_theta = theta.shape[1]\n plt.figure(figsize=[25, 10])\n for i in np.arange(num_theta):\n ax = plt.subplot(2, 3, i + 1)\n ax.hist(theta[:, i], density=True, bins=1, label='prior', alpha=0.7)\n y_, _, _ = ax.hist(theta_pos[i, :], density=True, bins=20, label=\n 'posterior', alpha=0.7)\n ax.vlines(x=theta_true[i], ymin=0, ymax=np.max(y_), linestyles='--',\n label='true', color='black')\n ax.legend()\n ax.set_title(theta_name[i])\n ax.set_ylabel('pdf')\n\n\ndef ML_dimension_reduction_vis(pred_train, y_train, pred_test, y_test,\n S_d_obs, theta_name):\n fig = plt.figure(figsize=[24, 10])\n num_theta = len(theta_name)\n for i in np.arange(num_theta):\n ax = plt.subplot(2, 3, i + 1)\n ax.plot(pred_train[:, i], y_train[:, i], '.', label='train')\n ax.plot(pred_test[:, i], y_test[:, i], '.', label='test')\n ax.vlines(x=S_d_obs[0, i], ymin=-1, ymax=1, linestyles='--', color=\n 'black', zorder=100)\n ax.plot([-1.2, 1.2], [-1.2, 1.2])\n ax.legend()\n ax.set_xlabel('S(d_' + str(i + 1) + ')')\n ax.set_ylabel(theta_name[i] + '_rescaled')\n ax.set_xlim(-1.2, 1.2)\n ax.set_ylim(-1.2, 1.2)\n\n\ndef history_plot(history):\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.show()\n",
"step-5": "# Author: Lijing Wang (lijing52@stanford.edu), 2021\n\nimport numpy as np\nimport pandas as pd\nimport gstools as gs\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches\nimport seaborn as sns\nplt.rcParams.update({'font.size': 15})\n\nimport os\npath = os.path.dirname(os.getcwd()) \n\nsubpath = '/examples/case2_nonlinear_forward_pumping_test/'\n\nnum_prior_sample = 5000\nnum_x = 100\nnum_y = 100\n\ndef print_theta(theta, name = 'theta'):\n theta_pd = pd.DataFrame(theta.reshape(1,-1), index = [name], columns = ['mean','variance','max_range','min_range','anisotropy','head_west'])\n print(theta_pd)\n\n\ndef visualize_d_2D(d):\n num_block = 3\n d_vis = np.zeros(num_m)\n d_vis[:] = np.nan\n for i in range(num_block*num_block*2):\n d_vis[np.where(G[i,:]>0)[0]] = d[i]\n d_vis = d_vis.reshape(num_x,num_y)\n return d_vis\n\ndef visualize_one_d(d):\n plt.plot(np.arange(70)/10, d.reshape(70,1)[:,0],label = 'pumping well')\n plt.xlabel('Days')\n plt.ylabel('Head')\n plt.legend()\n\ndef visualize_one_m(m, vmin = -4, vmax = 0, cmap = 'viridis',title = 'True spatial field, m'):\n fig, ax = plt.subplots(figsize = [6,6])\n m_show = ax.imshow(m.T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)\n ax.set_xticks([])\n ax.set_yticks([])\n if title:\n ax.set_title(title,fontsize = 13)\n \n well_location = [49,49]\n direct_data_loc = [30,70]\n ax.scatter(well_location[0],well_location[1],s = 100, color = 'black', label = 'indirect pumping well')\n ax.scatter(direct_data_loc[0],direct_data_loc[1],s = 100, color = 'red', label = 'direct logK')\n ax.legend()\n fig.colorbar(m_show, ax = ax, shrink = 0.6)\n\ndef print_theta_multiple(theta, name = 'theta',head = 8):\n theta_pd = pd.DataFrame(theta, index = ['theta_'+str(i) for i in np.arange(1,theta.shape[0]+1)], columns = ['mean','variance','max_range','min_range','anisotropy','head_west'])\n print(theta_pd.head(head))\n\ndef visualize_multiple_m(m, head = 4, vmin = -4, vmax = 0, cmap = 'viridis', theta = None):\n plt.figure(figsize = [20,8])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i+1)\n ax.imshow(m[i,:,:].T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49,49]\n direct_data_loc = [30,70]\n ax.scatter(well_location[0],well_location[1],s = 50, color = 'black', label = 'pumping well')\n ax.scatter(direct_data_loc[0],direct_data_loc[1],s = 50, color = 'red', label = 'direct logK')\n if theta is not None: \n ax.set_title('\\u03B8 = '+str(tuple(np.round(theta[i,:],1))))\n\ndef visualize_multiple_pc(m, PCA, head = 8, vmin = -4, vmax = 0, cmap = 'viridis',rect = False):\n plt.figure(figsize = [25,10])\n for i in np.arange(head):\n ax = plt.subplot(1, 10, i+1)\n ax.imshow(m[i,:].reshape(num_x,num_y).T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)\n if rect:\n rect = patches.Rectangle((32,32),36, 36, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_title('PCA '+str(i+1)+': '+str(np.int(PCA['explained_variance'][i]*100))+'%')\n\ndef visualize_multiple_d(d, head = 4):\n plt.figure(figsize = [25,3])\n for i in np.arange(head):\n ax = plt.subplot(1, 4, i+1)\n ax.plot(np.arange(70)/10, d[:,i].reshape(70,1)[:,0],label = 'pumping well')\n #ax.plot(np.arange(70)/10, d[:,i].reshape(70,5)[:,1],label = 'obs well: SW')\n #ax.plot(np.arange(70)/10, d[:,i].reshape(70,5)[:,2],label = 'obs well: NE')\n ##ax.plot(np.arange(70)/10, d[:,i].reshape(70,5)[:,3],label = 'obs well: NW')\n #ax.plot(np.arange(70)/10, d[:,i].reshape(70,5)[:,4],label = 'obs well: SE')\n ax.set_xlabel('Days')\n ax.set_ylabel('Head')\n #ax.legend()\n\ndef colors_from_values(values, palette_name):\n # normalize the values to range [0, 1]\n normalized = (values - min(values)) / (max(values) - min(values))\n # convert to indices\n indices = np.round(normalized * (len(values) - 1)).astype(np.int32)\n # use the indices to get the colors\n palette = sns.color_palette(palette_name, len(values))\n return np.array(palette).take(indices, axis=0)\n\ndef visualize_mean_var(mu, covariance, vmin = 20, vmax = 40, cmap = 'viridis'):\n var = np.diag(covariance)\n plt.figure(figsize = [18,4])\n ax = plt.subplot(2, 4, 1)\n ax.imshow(mu.reshape(num_x,num_y).T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)\n rect = patches.Rectangle((start_loc_x,start_loc_y),num_grid, num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x+num_grid*2,start_loc_y),num_grid,num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n ax = plt.subplot(2, 4, 2)\n ax.imshow(var.reshape(num_x,num_y).T, origin = 'lower', cmap = cmap, vmin = 0, vmax = 16)\n rect = patches.Rectangle((start_loc_x,start_loc_y),num_grid, num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')\n ax.add_patch(rect)\n rect = patches.Rectangle((start_loc_x+num_grid*2,start_loc_y),num_grid,num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\ndef visualize_mean_var_MC(m, start_loc, num_grid,vmin = -3, vmax = 1,vmin_var = 0, vmax_var = 0.2, cmap = 'viridis', rect = False):\n mu = np.mean(m,axis = 0)\n var = np.var(m,axis = 0)\n plt.figure(figsize = [10,4])\n ax = plt.subplot(1, 2, 1)\n ax.imshow(mu.reshape(num_x,num_y).T, origin = 'lower', cmap = cmap, vmin = vmin, vmax = vmax)\n if rect:\n rect = patches.Rectangle((start_loc,start_loc),num_grid, num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n well_location = [49,49]\n ax.scatter(well_location[0],well_location[1],s = 20, color = 'black', label = 'pumping well')\n direct_data_loc = [30,70]\n ax.scatter(direct_data_loc[0],direct_data_loc[1],s = 50, color = 'red', label = 'direct logK')\n\n ax = plt.subplot(1, 2, 2)\n ax.imshow(var.reshape(num_x,num_y).T, origin = 'lower', cmap = 'magma', vmin = vmin_var, vmax = vmax_var)\n if rect:\n rect = patches.Rectangle((start_loc,start_loc),num_grid, num_grid, linewidth=2,linestyle = 'dashed', edgecolor='black',facecolor='None', label = 'pilot area')\n ax.add_patch(rect)\n ax.set_xticks([])\n ax.set_yticks([])\n\ndef visualize_ensemble_d(d,d_obs,ymin = None,ymax = 11.5):\n plt.plot(np.arange(70)/10, d,color = 'C0')\n plt.plot(np.arange(70)/10, d_obs,color = 'C1',linewidth = 2,label = 'observed data')\n plt.xlabel('Days')\n plt.ylabel('Head')\n plt.legend()\n plt.ylim(ymin,ymax)\n\n# Visualization: updating theta\ndef pos_pairplot(theta_pos, theta_name):\n sns.pairplot(pd.DataFrame(theta_pos.T,columns = theta_name),kind=\"hist\")\n\ndef prior_pos_theta(theta, theta_pos, theta_true, theta_name):\n num_theta = theta.shape[1]\n plt.figure(figsize=[25,10])\n for i in np.arange(num_theta): \n ax = plt.subplot(2, 3, i+1)\n ax.hist(theta[:,i],density=True, bins = 1,label = 'prior',alpha = 0.7)\n y_, _, _ = ax.hist(theta_pos[i,:],density=True, bins = 20,label = 'posterior',alpha = 0.7)\n ax.vlines(x = theta_true[i], ymin = 0, ymax = np.max(y_),linestyles='--',label = 'true',color = 'black')\n ax.legend()\n ax.set_title(theta_name[i])\n ax.set_ylabel('pdf')\n\ndef ML_dimension_reduction_vis(pred_train, y_train, pred_test, y_test, S_d_obs, theta_name):\n fig = plt.figure(figsize=[24,10])\n num_theta = len(theta_name)\n for i in np.arange(num_theta): \n ax = plt.subplot(2, 3, i+1)\n ax.plot(pred_train[:,i], y_train[:,i],'.',label = 'train')\n ax.plot(pred_test[:,i], y_test[:,i],'.',label = 'test')\n ax.vlines(x = S_d_obs[0,i],ymin = -1, ymax = 1, linestyles='--',color = 'black',zorder = 100)\n ax.plot([-1.2,1.2],[-1.2,1.2])\n ax.legend()\n ax.set_xlabel('S(d_'+str(i+1)+')')\n ax.set_ylabel(theta_name[i]+'_rescaled')\n ax.set_xlim(-1.2,1.2)\n ax.set_ylim(-1.2,1.2)\n\ndef history_plot(history):\n # summarize history for loss\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.show()\n",
"step-ids": [
11,
13,
17,
19,
20
]
}
|
[
11,
13,
17,
19,
20
] |
class Tool:
<|reserved_special_token_0|>
def __repr__(self):
return f'Tool({self.name!r},{self.weight})'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Tool:
def __init__(self, name, weight):
self.name = name
self.weight = weight
def __repr__(self):
return f'Tool({self.name!r},{self.weight})'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Tool:
def __init__(self, name, weight):
self.name = name
self.weight = weight
def __repr__(self):
return f'Tool({self.name!r},{self.weight})'
<|reserved_special_token_0|>
print(repr(tools))
tools.sort(reverse=True, key=lambda x: len(x.name))
print(tools)
<|reserved_special_token_1|>
class Tool:
def __init__(self, name, weight):
self.name = name
self.weight = weight
def __repr__(self):
return f'Tool({self.name!r},{self.weight})'
tools = [Tool('수준계', 3.5), Tool('해머', 1.25), Tool('스크류드라이버', 0.5), Tool('끌',
0.25)]
print(repr(tools))
tools.sort(reverse=True, key=lambda x: len(x.name))
print(tools)
<|reserved_special_token_1|>
class Tool:
def __init__(self, name, weight):
self.name = name
self.weight = weight
def __repr__(self):
return f'Tool({self.name!r},{self.weight})'
tools = [
Tool('수준계', 3.5),
Tool('해머', 1.25),
Tool('스크류드라이버', .5),
Tool('끌', .25)
]
print(repr(tools))
tools.sort(reverse=True, key=lambda x: len(x.name))
print(tools)
|
flexible
|
{
"blob_id": "173b8e66ead62e3aa70805e42e06ea05257d5ee2",
"index": 2965,
"step-1": "class Tool:\n <mask token>\n\n def __repr__(self):\n return f'Tool({self.name!r},{self.weight})'\n\n\n<mask token>\n",
"step-2": "class Tool:\n\n def __init__(self, name, weight):\n self.name = name\n self.weight = weight\n\n def __repr__(self):\n return f'Tool({self.name!r},{self.weight})'\n\n\n<mask token>\n",
"step-3": "class Tool:\n\n def __init__(self, name, weight):\n self.name = name\n self.weight = weight\n\n def __repr__(self):\n return f'Tool({self.name!r},{self.weight})'\n\n\n<mask token>\nprint(repr(tools))\ntools.sort(reverse=True, key=lambda x: len(x.name))\nprint(tools)\n",
"step-4": "class Tool:\n\n def __init__(self, name, weight):\n self.name = name\n self.weight = weight\n\n def __repr__(self):\n return f'Tool({self.name!r},{self.weight})'\n\n\ntools = [Tool('수준계', 3.5), Tool('해머', 1.25), Tool('스크류드라이버', 0.5), Tool('끌',\n 0.25)]\nprint(repr(tools))\ntools.sort(reverse=True, key=lambda x: len(x.name))\nprint(tools)\n",
"step-5": "class Tool:\n def __init__(self, name, weight):\n self.name = name\n self.weight = weight\n\n def __repr__(self):\n return f'Tool({self.name!r},{self.weight})'\n\n\ntools = [\n Tool('수준계', 3.5),\n Tool('해머', 1.25),\n Tool('스크류드라이버', .5),\n Tool('끌', .25)\n]\nprint(repr(tools))\ntools.sort(reverse=True, key=lambda x: len(x.name))\nprint(tools)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def open_dir(input_path, patterns):
"""
Opens the specified input path and returns any located excel file
:param patterns: the file extensions to glob over (eg xls, csv)
:param input_path: the starting path
:return: generator of all found files
"""
for ext in patterns:
for file in Path(input_path).glob('**/*.' + ext):
yield file
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def prep_file_name(path, file):
"""
append the original path and file name
* strips special chars
* remove spaces (replace with underscore)
* convert to lowercase
:param path: the path part of the new file name
:param file: the original file name
:return: sanitized name
"""
name = path.__str__() + '~' + file.__str__()
name = name.lower()
name = name.replace(' ', '_')
name = re.sub('[^a-z0-9\\-_!.~]+', '', name)
return name
def open_dir(input_path, patterns):
"""
Opens the specified input path and returns any located excel file
:param patterns: the file extensions to glob over (eg xls, csv)
:param input_path: the starting path
:return: generator of all found files
"""
for ext in patterns:
for file in Path(input_path).glob('**/*.' + ext):
yield file
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def prep_file_name(path, file):
"""
append the original path and file name
* strips special chars
* remove spaces (replace with underscore)
* convert to lowercase
:param path: the path part of the new file name
:param file: the original file name
:return: sanitized name
"""
name = path.__str__() + '~' + file.__str__()
name = name.lower()
name = name.replace(' ', '_')
name = re.sub('[^a-z0-9\\-_!.~]+', '', name)
return name
def open_dir(input_path, patterns):
"""
Opens the specified input path and returns any located excel file
:param patterns: the file extensions to glob over (eg xls, csv)
:param input_path: the starting path
:return: generator of all found files
"""
for ext in patterns:
for file in Path(input_path).glob('**/*.' + ext):
yield file
def shred_sheets(subdomain, audit_date, input_file, _format):
"""
Opens an excel workbook, and converts all sheets to a new file of the specified format
:param subdomain: appended to data frame
:param audit_date: appended to data fram
:param input_file: the path to the excel book
:param _format: the format to convert all sheets
:return:
"""
name = extract_dir_name(input_file)
fname = PurePath(input_file).name.__str__()
try:
os.makedirs(name)
except:
pass
wb = pd.ExcelFile(input_file)
for ws in wb.sheet_names:
data = pd.read_excel(input_file, sheet_name=ws)
data.index.names = ['ix']
data['subdomin'] = subdomain
data['audit_date'] = audit_date
cols = data.columns
renamed = []
for col in cols:
col = re.sub('[^a-zA-Z0-9]', '', col)
renamed.append(col)
data.columns = renamed
if _format == 'mongo':
client = MongoClient('mongodb://localhost:27017/')
db = client.Sitebulb
cl = db.August5
try:
cl.insert_many(data.to_dict('records'))
except Exception as e:
click.secho(f'\nERROR in [{input_file},{ws}] -- {e}', fg='red')
continue
if _format == 'json' or _format == 'all':
try:
new_file = os.path.join(name, fname + '~' + ws + '.json')
data.to_json(new_file, orient='records')
except Exception as e:
click.secho(f'\nERROR in [{input_file},{ws}] -- {e}', fg='red')
continue
if _format == 'csv' or _format == 'all':
try:
new_file = os.path.join(name, fname + '~' + ws + '.csv')
data.to_csv(new_file)
except Exception as e:
click.secho(f'\nERROR in [{input_file},{ws}] -- {e}', fg='red')
continue
<|reserved_special_token_1|>
import os
import re
import click
import pandas as pd
from pymongo import MongoClient
from pathlib import Path, PurePath
def extract_dir_name(input_file):
"""
creates a directory path based on the specified file name
:param input_file: file bane
:return: full path, minus extension
"""
fname = PurePath(input_file).__str__()
s = fname.split('.')
name = '.'.join(s[:-1])
return name
def prep_file_name(path, file):
"""
append the original path and file name
* strips special chars
* remove spaces (replace with underscore)
* convert to lowercase
:param path: the path part of the new file name
:param file: the original file name
:return: sanitized name
"""
name = path.__str__() + '~' + file.__str__()
name = name.lower()
name = name.replace(' ', '_')
name = re.sub('[^a-z0-9\\-_!.~]+', '', name)
return name
def open_dir(input_path, patterns):
"""
Opens the specified input path and returns any located excel file
:param patterns: the file extensions to glob over (eg xls, csv)
:param input_path: the starting path
:return: generator of all found files
"""
for ext in patterns:
for file in Path(input_path).glob('**/*.' + ext):
yield file
def shred_sheets(subdomain, audit_date, input_file, _format):
"""
Opens an excel workbook, and converts all sheets to a new file of the specified format
:param subdomain: appended to data frame
:param audit_date: appended to data fram
:param input_file: the path to the excel book
:param _format: the format to convert all sheets
:return:
"""
name = extract_dir_name(input_file)
fname = PurePath(input_file).name.__str__()
try:
os.makedirs(name)
except:
pass
wb = pd.ExcelFile(input_file)
for ws in wb.sheet_names:
data = pd.read_excel(input_file, sheet_name=ws)
data.index.names = ['ix']
data['subdomin'] = subdomain
data['audit_date'] = audit_date
cols = data.columns
renamed = []
for col in cols:
col = re.sub('[^a-zA-Z0-9]', '', col)
renamed.append(col)
data.columns = renamed
if _format == 'mongo':
client = MongoClient('mongodb://localhost:27017/')
db = client.Sitebulb
cl = db.August5
try:
cl.insert_many(data.to_dict('records'))
except Exception as e:
click.secho(f'\nERROR in [{input_file},{ws}] -- {e}', fg='red')
continue
if _format == 'json' or _format == 'all':
try:
new_file = os.path.join(name, fname + '~' + ws + '.json')
data.to_json(new_file, orient='records')
except Exception as e:
click.secho(f'\nERROR in [{input_file},{ws}] -- {e}', fg='red')
continue
if _format == 'csv' or _format == 'all':
try:
new_file = os.path.join(name, fname + '~' + ws + '.csv')
data.to_csv(new_file)
except Exception as e:
click.secho(f'\nERROR in [{input_file},{ws}] -- {e}', fg='red')
continue
<|reserved_special_token_1|>
import os
import re
import click
import pandas as pd
from pymongo import MongoClient
from pathlib import Path, PurePath
def extract_dir_name(input_file):
"""
creates a directory path based on the specified file name
:param input_file: file bane
:return: full path, minus extension
"""
fname = PurePath(input_file).__str__()
s = fname.split('.')
name = '.'.join(s[:-1])
return name
def prep_file_name(path, file):
"""
append the original path and file name
* strips special chars
* remove spaces (replace with underscore)
* convert to lowercase
:param path: the path part of the new file name
:param file: the original file name
:return: sanitized name
"""
name = path.__str__() + '~' + file.__str__()
name = name.lower()
name = name.replace(' ', '_')
name = re.sub('[^a-z0-9\-_!.~]+', '', name)
return name
def open_dir(input_path, patterns):
"""
Opens the specified input path and returns any located excel file
:param patterns: the file extensions to glob over (eg xls, csv)
:param input_path: the starting path
:return: generator of all found files
"""
for ext in patterns:
for file in Path(input_path).glob('**/*.' + ext):
yield file
def shred_sheets(subdomain, audit_date, input_file, _format):
"""
Opens an excel workbook, and converts all sheets to a new file of the specified format
:param subdomain: appended to data frame
:param audit_date: appended to data fram
:param input_file: the path to the excel book
:param _format: the format to convert all sheets
:return:
"""
name = extract_dir_name(input_file)
fname = PurePath(input_file).name.__str__()
try:
os.makedirs(name)
except:
pass
wb = pd.ExcelFile(input_file)
for ws in wb.sheet_names:
data = pd.read_excel(input_file, sheet_name=ws)
# add constants
data.index.names = ['ix']
data['subdomin'] = subdomain
data['audit_date'] = audit_date
# strip chars we don't want in colum names
cols = data.columns
renamed = []
for col in cols:
col = re.sub('[^a-zA-Z0-9]', '', col)
renamed.append(col)
data.columns = renamed
# build output formats
if _format == 'mongo':
client = MongoClient('mongodb://localhost:27017/')
db = client.Sitebulb
cl = db.August5
try:
cl.insert_many(data.to_dict('records'))
except Exception as e:
click.secho(f'\nERROR in [{input_file},{ws}] -- {e}', fg='red')
continue
if _format == 'json' or _format == 'all':
try:
new_file = os.path.join(name, fname + '~' + ws + '.json')
data.to_json(new_file, orient="records")
except Exception as e:
click.secho(f'\nERROR in [{input_file},{ws}] -- {e}', fg='red')
continue
if _format == 'csv' or _format == 'all':
try:
new_file = os.path.join(name, fname + '~' + ws + '.csv')
data.to_csv(new_file)
except Exception as e:
click.secho(f'\nERROR in [{input_file},{ws}] -- {e}', fg='red')
continue
|
flexible
|
{
"blob_id": "f831b77850dfe22232092f66705e36970828a75b",
"index": 4975,
"step-1": "<mask token>\n\n\ndef open_dir(input_path, patterns):\n \"\"\"\n Opens the specified input path and returns any located excel file\n :param patterns: the file extensions to glob over (eg xls, csv)\n :param input_path: the starting path\n :return: generator of all found files\n \"\"\"\n for ext in patterns:\n for file in Path(input_path).glob('**/*.' + ext):\n yield file\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef prep_file_name(path, file):\n \"\"\"\n append the original path and file name\n * strips special chars\n * remove spaces (replace with underscore)\n * convert to lowercase\n :param path: the path part of the new file name\n :param file: the original file name\n :return: sanitized name\n \"\"\"\n name = path.__str__() + '~' + file.__str__()\n name = name.lower()\n name = name.replace(' ', '_')\n name = re.sub('[^a-z0-9\\\\-_!.~]+', '', name)\n return name\n\n\ndef open_dir(input_path, patterns):\n \"\"\"\n Opens the specified input path and returns any located excel file\n :param patterns: the file extensions to glob over (eg xls, csv)\n :param input_path: the starting path\n :return: generator of all found files\n \"\"\"\n for ext in patterns:\n for file in Path(input_path).glob('**/*.' + ext):\n yield file\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef prep_file_name(path, file):\n \"\"\"\n append the original path and file name\n * strips special chars\n * remove spaces (replace with underscore)\n * convert to lowercase\n :param path: the path part of the new file name\n :param file: the original file name\n :return: sanitized name\n \"\"\"\n name = path.__str__() + '~' + file.__str__()\n name = name.lower()\n name = name.replace(' ', '_')\n name = re.sub('[^a-z0-9\\\\-_!.~]+', '', name)\n return name\n\n\ndef open_dir(input_path, patterns):\n \"\"\"\n Opens the specified input path and returns any located excel file\n :param patterns: the file extensions to glob over (eg xls, csv)\n :param input_path: the starting path\n :return: generator of all found files\n \"\"\"\n for ext in patterns:\n for file in Path(input_path).glob('**/*.' + ext):\n yield file\n\n\ndef shred_sheets(subdomain, audit_date, input_file, _format):\n \"\"\"\n Opens an excel workbook, and converts all sheets to a new file of the specified format\n :param subdomain: appended to data frame\n :param audit_date: appended to data fram\n :param input_file: the path to the excel book\n :param _format: the format to convert all sheets\n :return:\n \"\"\"\n name = extract_dir_name(input_file)\n fname = PurePath(input_file).name.__str__()\n try:\n os.makedirs(name)\n except:\n pass\n wb = pd.ExcelFile(input_file)\n for ws in wb.sheet_names:\n data = pd.read_excel(input_file, sheet_name=ws)\n data.index.names = ['ix']\n data['subdomin'] = subdomain\n data['audit_date'] = audit_date\n cols = data.columns\n renamed = []\n for col in cols:\n col = re.sub('[^a-zA-Z0-9]', '', col)\n renamed.append(col)\n data.columns = renamed\n if _format == 'mongo':\n client = MongoClient('mongodb://localhost:27017/')\n db = client.Sitebulb\n cl = db.August5\n try:\n cl.insert_many(data.to_dict('records'))\n except Exception as e:\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\n continue\n if _format == 'json' or _format == 'all':\n try:\n new_file = os.path.join(name, fname + '~' + ws + '.json')\n data.to_json(new_file, orient='records')\n except Exception as e:\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\n continue\n if _format == 'csv' or _format == 'all':\n try:\n new_file = os.path.join(name, fname + '~' + ws + '.csv')\n data.to_csv(new_file)\n except Exception as e:\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\n continue\n",
"step-4": "import os\nimport re\nimport click\nimport pandas as pd\nfrom pymongo import MongoClient\nfrom pathlib import Path, PurePath\n\n\ndef extract_dir_name(input_file):\n \"\"\"\n creates a directory path based on the specified file name\n :param input_file: file bane\n :return: full path, minus extension\n \"\"\"\n fname = PurePath(input_file).__str__()\n s = fname.split('.')\n name = '.'.join(s[:-1])\n return name\n\n\ndef prep_file_name(path, file):\n \"\"\"\n append the original path and file name\n * strips special chars\n * remove spaces (replace with underscore)\n * convert to lowercase\n :param path: the path part of the new file name\n :param file: the original file name\n :return: sanitized name\n \"\"\"\n name = path.__str__() + '~' + file.__str__()\n name = name.lower()\n name = name.replace(' ', '_')\n name = re.sub('[^a-z0-9\\\\-_!.~]+', '', name)\n return name\n\n\ndef open_dir(input_path, patterns):\n \"\"\"\n Opens the specified input path and returns any located excel file\n :param patterns: the file extensions to glob over (eg xls, csv)\n :param input_path: the starting path\n :return: generator of all found files\n \"\"\"\n for ext in patterns:\n for file in Path(input_path).glob('**/*.' + ext):\n yield file\n\n\ndef shred_sheets(subdomain, audit_date, input_file, _format):\n \"\"\"\n Opens an excel workbook, and converts all sheets to a new file of the specified format\n :param subdomain: appended to data frame\n :param audit_date: appended to data fram\n :param input_file: the path to the excel book\n :param _format: the format to convert all sheets\n :return:\n \"\"\"\n name = extract_dir_name(input_file)\n fname = PurePath(input_file).name.__str__()\n try:\n os.makedirs(name)\n except:\n pass\n wb = pd.ExcelFile(input_file)\n for ws in wb.sheet_names:\n data = pd.read_excel(input_file, sheet_name=ws)\n data.index.names = ['ix']\n data['subdomin'] = subdomain\n data['audit_date'] = audit_date\n cols = data.columns\n renamed = []\n for col in cols:\n col = re.sub('[^a-zA-Z0-9]', '', col)\n renamed.append(col)\n data.columns = renamed\n if _format == 'mongo':\n client = MongoClient('mongodb://localhost:27017/')\n db = client.Sitebulb\n cl = db.August5\n try:\n cl.insert_many(data.to_dict('records'))\n except Exception as e:\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\n continue\n if _format == 'json' or _format == 'all':\n try:\n new_file = os.path.join(name, fname + '~' + ws + '.json')\n data.to_json(new_file, orient='records')\n except Exception as e:\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\n continue\n if _format == 'csv' or _format == 'all':\n try:\n new_file = os.path.join(name, fname + '~' + ws + '.csv')\n data.to_csv(new_file)\n except Exception as e:\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\n continue\n",
"step-5": "import os\r\nimport re\r\nimport click\r\nimport pandas as pd\r\nfrom pymongo import MongoClient\r\nfrom pathlib import Path, PurePath\r\n\r\n\r\ndef extract_dir_name(input_file):\r\n \"\"\"\r\n creates a directory path based on the specified file name\r\n :param input_file: file bane\r\n :return: full path, minus extension\r\n \"\"\"\r\n fname = PurePath(input_file).__str__()\r\n s = fname.split('.')\r\n name = '.'.join(s[:-1])\r\n return name\r\n\r\n\r\ndef prep_file_name(path, file):\r\n \"\"\"\r\n append the original path and file name\r\n * strips special chars\r\n * remove spaces (replace with underscore)\r\n * convert to lowercase\r\n :param path: the path part of the new file name\r\n :param file: the original file name\r\n :return: sanitized name\r\n \"\"\"\r\n name = path.__str__() + '~' + file.__str__()\r\n name = name.lower()\r\n name = name.replace(' ', '_')\r\n name = re.sub('[^a-z0-9\\-_!.~]+', '', name)\r\n return name\r\n\r\n\r\ndef open_dir(input_path, patterns):\r\n \"\"\"\r\n Opens the specified input path and returns any located excel file\r\n :param patterns: the file extensions to glob over (eg xls, csv)\r\n :param input_path: the starting path\r\n :return: generator of all found files\r\n \"\"\"\r\n for ext in patterns:\r\n for file in Path(input_path).glob('**/*.' + ext):\r\n yield file\r\n\r\n\r\ndef shred_sheets(subdomain, audit_date, input_file, _format):\r\n \"\"\"\r\n Opens an excel workbook, and converts all sheets to a new file of the specified format\r\n :param subdomain: appended to data frame\r\n :param audit_date: appended to data fram\r\n :param input_file: the path to the excel book\r\n :param _format: the format to convert all sheets\r\n :return:\r\n \"\"\"\r\n name = extract_dir_name(input_file)\r\n fname = PurePath(input_file).name.__str__()\r\n try:\r\n os.makedirs(name)\r\n except:\r\n pass\r\n\r\n wb = pd.ExcelFile(input_file)\r\n for ws in wb.sheet_names:\r\n data = pd.read_excel(input_file, sheet_name=ws)\r\n # add constants\r\n data.index.names = ['ix']\r\n data['subdomin'] = subdomain\r\n data['audit_date'] = audit_date\r\n\r\n # strip chars we don't want in colum names\r\n cols = data.columns\r\n renamed = []\r\n for col in cols:\r\n col = re.sub('[^a-zA-Z0-9]', '', col)\r\n renamed.append(col)\r\n\r\n data.columns = renamed\r\n\r\n # build output formats\r\n if _format == 'mongo':\r\n client = MongoClient('mongodb://localhost:27017/')\r\n db = client.Sitebulb\r\n cl = db.August5\r\n\r\n try:\r\n cl.insert_many(data.to_dict('records'))\r\n except Exception as e:\r\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\r\n continue\r\n\r\n if _format == 'json' or _format == 'all':\r\n try:\r\n new_file = os.path.join(name, fname + '~' + ws + '.json')\r\n data.to_json(new_file, orient=\"records\")\r\n except Exception as e:\r\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\r\n continue\r\n\r\n if _format == 'csv' or _format == 'all':\r\n try:\r\n new_file = os.path.join(name, fname + '~' + ws + '.csv')\r\n data.to_csv(new_file)\r\n except Exception as e:\r\n click.secho(f'\\nERROR in [{input_file},{ws}] -- {e}', fg='red')\r\n continue\r\n",
"step-ids": [
1,
2,
3,
5,
6
]
}
|
[
1,
2,
3,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(kwadraty, slownik, sep='\n')
<|reserved_special_token_1|>
lista = [x for x in range(11)]
kwadraty = [(i ** 2) for i in lista]
kwadraty = [(i, i ** 2, i ** 3) for i in range(-10, 11)]
zbior_wyr = {'aa', '1233', '111111'}
slownik = {i: len(i) for i in zbior_wyr}
print(kwadraty, slownik, sep='\n')
<|reserved_special_token_1|>
lista = [x for x in range(11)] ##todo: wazne
kwadraty = [i**2 for i in lista]
kwadraty = [(i, i**2, i**3) for i in range(-10, 11)]
zbior_wyr = {'aa', '1233', '111111'}
slownik = {i : len(i)for i in zbior_wyr}
print(kwadraty, slownik, sep='\n')
|
flexible
|
{
"blob_id": "248b9b9d613f71e0130353f0792083b7d3f6ccd6",
"index": 7000,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(kwadraty, slownik, sep='\\n')\n",
"step-3": "lista = [x for x in range(11)]\nkwadraty = [(i ** 2) for i in lista]\nkwadraty = [(i, i ** 2, i ** 3) for i in range(-10, 11)]\nzbior_wyr = {'aa', '1233', '111111'}\nslownik = {i: len(i) for i in zbior_wyr}\nprint(kwadraty, slownik, sep='\\n')\n",
"step-4": "lista = [x for x in range(11)] ##todo: wazne\n\nkwadraty = [i**2 for i in lista]\nkwadraty = [(i, i**2, i**3) for i in range(-10, 11)]\nzbior_wyr = {'aa', '1233', '111111'}\nslownik = {i : len(i)for i in zbior_wyr}\n\nprint(kwadraty, slownik, sep='\\n')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Category(Enum):
ONES = 1
TWOS = 2
THREES = 3
FOURS = 4
FIVES = 5
SIXES = 6
YACHT = auto()
FULL_HOUSE = auto()
FOUR_OF_A_KIND = auto()
LITTLE_STRAIGHT = auto()
BIG_STRAIGHT = auto()
CHOICE = auto()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Category(Enum):
ONES = 1
TWOS = 2
THREES = 3
FOURS = 4
FIVES = 5
SIXES = 6
YACHT = auto()
FULL_HOUSE = auto()
FOUR_OF_A_KIND = auto()
LITTLE_STRAIGHT = auto()
BIG_STRAIGHT = auto()
CHOICE = auto()
def score(dice, category):
die_counts = Counter(dice)
if category.value in range(1, 7):
return sum(d for d in dice if d == category.value)
if category is Category.YACHT:
return 50 if len(die_counts) == 1 else 0
if category is Category.CHOICE:
return sum(dice)
if category is Category.BIG_STRAIGHT:
return 30 if 1 not in die_counts and len(die_counts) == 5 else 0
if category is Category.LITTLE_STRAIGHT:
return 30 if 6 not in die_counts and len(die_counts) == 5 else 0
if category is Category.FULL_HOUSE:
return sum(dice) if len(die_counts) == 2 and 3 in die_counts.values(
) else 0
if category is Category.FOUR_OF_A_KIND:
four_die = [d for d, c in die_counts.items() if c >= 4]
return four_die[0] * 4 if four_die else 0
<|reserved_special_token_1|>
from collections import Counter
from enum import auto, Enum
class Category(Enum):
ONES = 1
TWOS = 2
THREES = 3
FOURS = 4
FIVES = 5
SIXES = 6
YACHT = auto()
FULL_HOUSE = auto()
FOUR_OF_A_KIND = auto()
LITTLE_STRAIGHT = auto()
BIG_STRAIGHT = auto()
CHOICE = auto()
def score(dice, category):
die_counts = Counter(dice)
if category.value in range(1, 7):
return sum(d for d in dice if d == category.value)
if category is Category.YACHT:
return 50 if len(die_counts) == 1 else 0
if category is Category.CHOICE:
return sum(dice)
if category is Category.BIG_STRAIGHT:
return 30 if 1 not in die_counts and len(die_counts) == 5 else 0
if category is Category.LITTLE_STRAIGHT:
return 30 if 6 not in die_counts and len(die_counts) == 5 else 0
if category is Category.FULL_HOUSE:
return sum(dice) if len(die_counts) == 2 and 3 in die_counts.values(
) else 0
if category is Category.FOUR_OF_A_KIND:
four_die = [d for d, c in die_counts.items() if c >= 4]
return four_die[0] * 4 if four_die else 0
|
flexible
|
{
"blob_id": "40bc8122d98d407341a56251f9abfab019e0acd8",
"index": 625,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Category(Enum):\n ONES = 1\n TWOS = 2\n THREES = 3\n FOURS = 4\n FIVES = 5\n SIXES = 6\n YACHT = auto()\n FULL_HOUSE = auto()\n FOUR_OF_A_KIND = auto()\n LITTLE_STRAIGHT = auto()\n BIG_STRAIGHT = auto()\n CHOICE = auto()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Category(Enum):\n ONES = 1\n TWOS = 2\n THREES = 3\n FOURS = 4\n FIVES = 5\n SIXES = 6\n YACHT = auto()\n FULL_HOUSE = auto()\n FOUR_OF_A_KIND = auto()\n LITTLE_STRAIGHT = auto()\n BIG_STRAIGHT = auto()\n CHOICE = auto()\n\n\ndef score(dice, category):\n die_counts = Counter(dice)\n if category.value in range(1, 7):\n return sum(d for d in dice if d == category.value)\n if category is Category.YACHT:\n return 50 if len(die_counts) == 1 else 0\n if category is Category.CHOICE:\n return sum(dice)\n if category is Category.BIG_STRAIGHT:\n return 30 if 1 not in die_counts and len(die_counts) == 5 else 0\n if category is Category.LITTLE_STRAIGHT:\n return 30 if 6 not in die_counts and len(die_counts) == 5 else 0\n if category is Category.FULL_HOUSE:\n return sum(dice) if len(die_counts) == 2 and 3 in die_counts.values(\n ) else 0\n if category is Category.FOUR_OF_A_KIND:\n four_die = [d for d, c in die_counts.items() if c >= 4]\n return four_die[0] * 4 if four_die else 0\n",
"step-4": "from collections import Counter\nfrom enum import auto, Enum\n\n\nclass Category(Enum):\n ONES = 1\n TWOS = 2\n THREES = 3\n FOURS = 4\n FIVES = 5\n SIXES = 6\n YACHT = auto()\n FULL_HOUSE = auto()\n FOUR_OF_A_KIND = auto()\n LITTLE_STRAIGHT = auto()\n BIG_STRAIGHT = auto()\n CHOICE = auto()\n\n\ndef score(dice, category):\n die_counts = Counter(dice)\n if category.value in range(1, 7):\n return sum(d for d in dice if d == category.value)\n if category is Category.YACHT:\n return 50 if len(die_counts) == 1 else 0\n if category is Category.CHOICE:\n return sum(dice)\n if category is Category.BIG_STRAIGHT:\n return 30 if 1 not in die_counts and len(die_counts) == 5 else 0\n if category is Category.LITTLE_STRAIGHT:\n return 30 if 6 not in die_counts and len(die_counts) == 5 else 0\n if category is Category.FULL_HOUSE:\n return sum(dice) if len(die_counts) == 2 and 3 in die_counts.values(\n ) else 0\n if category is Category.FOUR_OF_A_KIND:\n four_die = [d for d, c in die_counts.items() if c >= 4]\n return four_die[0] * 4 if four_die else 0\n",
"step-5": null,
"step-ids": [
0,
2,
3,
4
]
}
|
[
0,
2,
3,
4
] |
__author__ = 'xcbtrader'
# -*- coding: utf-8 -*-
from bitcoin import *
def crear_addr_word(word):
priv = sha256(word)
pub = privtopub(priv)
addr = pubtoaddr(pub)
wif = encode_privkey(priv, 'wif')
return addr, priv, wif
word = input('Entra la palabra para crear direccion bitcoin:? ')
addr, priv, wif = crear_addr_word(word)
print('####################################################')
print('WORD: ' + word)
print('ADDR: ' + addr)
print('PRIV: ' + priv)
print('WIF: ' + wif)
print('####################################################')
|
normal
|
{
"blob_id": "cc7a44754dc1371733420fd3a1e51ab6b5e7c4d8",
"index": 6898,
"step-1": "<mask token>\n\n\ndef crear_addr_word(word):\n priv = sha256(word)\n pub = privtopub(priv)\n addr = pubtoaddr(pub)\n wif = encode_privkey(priv, 'wif')\n return addr, priv, wif\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef crear_addr_word(word):\n priv = sha256(word)\n pub = privtopub(priv)\n addr = pubtoaddr(pub)\n wif = encode_privkey(priv, 'wif')\n return addr, priv, wif\n\n\n<mask token>\nprint('####################################################')\nprint('WORD: ' + word)\nprint('ADDR: ' + addr)\nprint('PRIV: ' + priv)\nprint('WIF: ' + wif)\nprint('####################################################')\n",
"step-3": "__author__ = 'xcbtrader'\n<mask token>\n\n\ndef crear_addr_word(word):\n priv = sha256(word)\n pub = privtopub(priv)\n addr = pubtoaddr(pub)\n wif = encode_privkey(priv, 'wif')\n return addr, priv, wif\n\n\nword = input('Entra la palabra para crear direccion bitcoin:? ')\naddr, priv, wif = crear_addr_word(word)\nprint('####################################################')\nprint('WORD: ' + word)\nprint('ADDR: ' + addr)\nprint('PRIV: ' + priv)\nprint('WIF: ' + wif)\nprint('####################################################')\n",
"step-4": "__author__ = 'xcbtrader'\nfrom bitcoin import *\n\n\ndef crear_addr_word(word):\n priv = sha256(word)\n pub = privtopub(priv)\n addr = pubtoaddr(pub)\n wif = encode_privkey(priv, 'wif')\n return addr, priv, wif\n\n\nword = input('Entra la palabra para crear direccion bitcoin:? ')\naddr, priv, wif = crear_addr_word(word)\nprint('####################################################')\nprint('WORD: ' + word)\nprint('ADDR: ' + addr)\nprint('PRIV: ' + priv)\nprint('WIF: ' + wif)\nprint('####################################################')\n",
"step-5": "__author__ = 'xcbtrader'\n# -*- coding: utf-8 -*-\n\nfrom bitcoin import *\n\ndef crear_addr_word(word):\n\tpriv = sha256(word)\n\tpub = privtopub(priv)\n\taddr = pubtoaddr(pub)\n\twif = encode_privkey(priv, 'wif')\n\treturn addr, priv, wif\n\nword = input('Entra la palabra para crear direccion bitcoin:? ')\naddr, priv, wif = crear_addr_word(word)\nprint('####################################################')\nprint('WORD: ' + word)\nprint('ADDR: ' + addr)\nprint('PRIV: ' + priv)\nprint('WIF: ' + wif)\nprint('####################################################')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def test_line():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line.start == p1
assert line.end == p2
<|reserved_special_token_0|>
def test___getitem__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line[0] == p1
assert line[1] == p2
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_line():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line.start == p1
assert line.end == p2
def test_equality():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert (p1, p2) == line
assert line == Line(p1, p2)
assert line != (p2, p1)
assert line != 1
<|reserved_special_token_0|>
def test___getitem__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line[0] == p1
assert line[1] == p2
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_line():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line.start == p1
assert line.end == p2
def test_equality():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert (p1, p2) == line
assert line == Line(p1, p2)
assert line != (p2, p1)
assert line != 1
def test___repr__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line == eval(repr(line))
def test___getitem__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line[0] == p1
assert line[1] == p2
<|reserved_special_token_1|>
from compas.geometry import Line
from compas.geometry import Point
def test_line():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line.start == p1
assert line.end == p2
def test_equality():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert (p1, p2) == line
assert line == Line(p1, p2)
assert line != (p2, p1)
assert line != 1
def test___repr__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line == eval(repr(line))
def test___getitem__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line[0] == p1
assert line[1] == p2
<|reserved_special_token_1|>
from compas.geometry import Line
# This import is use to test __repr__.
from compas.geometry import Point # noqa: F401
def test_line():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line.start == p1
assert line.end == p2
def test_equality():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert (p1, p2) == line
assert line == Line(p1, p2)
assert line != (p2, p1)
assert line != 1
def test___repr__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line == eval(repr(line))
def test___getitem__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line[0] == p1
assert line[1] == p2
|
flexible
|
{
"blob_id": "03629e62b11e66eeb0e111fee551c75c8463cbb8",
"index": 1059,
"step-1": "<mask token>\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\n<mask token>\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"step-2": "<mask token>\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\ndef test_equality():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert (p1, p2) == line\n assert line == Line(p1, p2)\n assert line != (p2, p1)\n assert line != 1\n\n\n<mask token>\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"step-3": "<mask token>\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\ndef test_equality():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert (p1, p2) == line\n assert line == Line(p1, p2)\n assert line != (p2, p1)\n assert line != 1\n\n\ndef test___repr__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line == eval(repr(line))\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"step-4": "from compas.geometry import Line\nfrom compas.geometry import Point\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\ndef test_equality():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert (p1, p2) == line\n assert line == Line(p1, p2)\n assert line != (p2, p1)\n assert line != 1\n\n\ndef test___repr__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line == eval(repr(line))\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"step-5": "from compas.geometry import Line\n\n# This import is use to test __repr__.\nfrom compas.geometry import Point # noqa: F401\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\ndef test_equality():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert (p1, p2) == line\n assert line == Line(p1, p2)\n assert line != (p2, p1)\n assert line != 1\n\n\ndef test___repr__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line == eval(repr(line))\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def find_lt(a, x):
"""
Find rightmost value less than x in list a
Input: list a and value x
Output: rightmost value less than x in a
"""
i = bisect_left(a, x)
if i:
return a[i - 1]
raise ValueError
def find_ge(a, x):
"""
Find leftmost item greater than or equal to x in list a
Input: list a and value x
Output: leftmost value less than or equal to x in a
"""
i = bisect_left(a, x)
if i != len(a):
return a[i]
raise ValueError
def get_altL(fn):
"""
Make a list of alternate allele frequencies and number of reads
Input: tsv file with reference freq in first column and alterate freq in second column
Output: a list of tuples with number of reads and alternate allele frequency
"""
f = open(fn, 'r')
linesL = [x.strip().split('\t') for x in f.readlines()]
f.close()
if linesL[0][0][0] == '#':
linesL = linesL[1:]
for i in range(len(linesL)):
if linesL[i][4] == '0':
linesL[i][4] = '1'
return zip([(int(x[4]) + int(x[5])) for x in linesL], [(float(x[5]) / (
float(x[4]) + float(x[5]))) for x in linesL])
def generate_possible_freqL(pL, sL, er):
"""
Generate list of possible allele frequencies
Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate
Output: list of possible allele frequences
"""
h = sum(pL)
L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]
M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]
p_freqL = []
for i in range(len(pL)):
p_freqL += [sL[i] / pL[i]] * pL[i]
p_freqA = np.array(p_freqL)
sA = np.array(sL)
aL = []
for g in M:
aL.append(sum(np.array([int(x) for x in list(g)]) * p_freqL))
return sorted(list(set(aL + [er, 1 - er])))
def freq_to_genotype(pL, sL, er):
"""
Creates dict of expected alternate allele frequencies and consistent genotypes
Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate
Output: dict of expected alternate allele frequencies and consistent genotypes. Genotypes represented as binary strings in the order of the ploidy list
"""
h = sum(pL)
L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]
M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]
p_freqL = []
for i in range(len(pL)):
p_freqL += [sL[i] / pL[i]] * pL[i]
p_freqA = np.array(p_freqL)
sA = np.array(sL)
aD = {}
for g in M:
alt_freq = sum(np.array([int(x) for x in list(g)]) * p_freqL)
if aD.has_key(alt_freq):
aD[alt_freq].append(g)
else:
aD[alt_freq] = [g]
aD[er] = ['0' * (len(L[-1]) - 1) + bin(0)[2:]]
aD[1 - er] = [bin(2 ** h - 1)[2:]]
return aD
<|reserved_special_token_0|>
def grid_search_parameters(step):
"""
Make a list of parameters to try
Input: step size
Output: subpopulation frequencies to try
"""
f1 = list(np.arange(step, 1, step))
f2 = list(np.arange(step, 1, step))
f2.reverse()
return zip(f1, f2)
def estimate_genotype(alt_freq, exp_freqL):
"""
Maximum likelihood estimator of alt_freq given possibilities in exp_freqL
Input: observed alternate frequency and list of expected alternate frequencies
Output: ML estimator of true alternate allele frequency
"""
try:
i = find_lt(exp_freqL, alt_freq)
except ValueError:
i = float('-inf')
try:
j = find_ge(exp_freqL, alt_freq)
except ValueError:
j = float('inf')
if alt_freq - i < j - alt_freq:
return i
else:
return j
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def find_lt(a, x):
"""
Find rightmost value less than x in list a
Input: list a and value x
Output: rightmost value less than x in a
"""
i = bisect_left(a, x)
if i:
return a[i - 1]
raise ValueError
def find_ge(a, x):
"""
Find leftmost item greater than or equal to x in list a
Input: list a and value x
Output: leftmost value less than or equal to x in a
"""
i = bisect_left(a, x)
if i != len(a):
return a[i]
raise ValueError
def get_altL(fn):
"""
Make a list of alternate allele frequencies and number of reads
Input: tsv file with reference freq in first column and alterate freq in second column
Output: a list of tuples with number of reads and alternate allele frequency
"""
f = open(fn, 'r')
linesL = [x.strip().split('\t') for x in f.readlines()]
f.close()
if linesL[0][0][0] == '#':
linesL = linesL[1:]
for i in range(len(linesL)):
if linesL[i][4] == '0':
linesL[i][4] = '1'
return zip([(int(x[4]) + int(x[5])) for x in linesL], [(float(x[5]) / (
float(x[4]) + float(x[5]))) for x in linesL])
def generate_possible_freqL(pL, sL, er):
"""
Generate list of possible allele frequencies
Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate
Output: list of possible allele frequences
"""
h = sum(pL)
L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]
M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]
p_freqL = []
for i in range(len(pL)):
p_freqL += [sL[i] / pL[i]] * pL[i]
p_freqA = np.array(p_freqL)
sA = np.array(sL)
aL = []
for g in M:
aL.append(sum(np.array([int(x) for x in list(g)]) * p_freqL))
return sorted(list(set(aL + [er, 1 - er])))
def freq_to_genotype(pL, sL, er):
"""
Creates dict of expected alternate allele frequencies and consistent genotypes
Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate
Output: dict of expected alternate allele frequencies and consistent genotypes. Genotypes represented as binary strings in the order of the ploidy list
"""
h = sum(pL)
L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]
M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]
p_freqL = []
for i in range(len(pL)):
p_freqL += [sL[i] / pL[i]] * pL[i]
p_freqA = np.array(p_freqL)
sA = np.array(sL)
aD = {}
for g in M:
alt_freq = sum(np.array([int(x) for x in list(g)]) * p_freqL)
if aD.has_key(alt_freq):
aD[alt_freq].append(g)
else:
aD[alt_freq] = [g]
aD[er] = ['0' * (len(L[-1]) - 1) + bin(0)[2:]]
aD[1 - er] = [bin(2 ** h - 1)[2:]]
return aD
def collapse_genotypes(pL, gL):
"""
Reduces a list of genotypes to distinct genotypes given ploidy
Input: ploidy list pL and list of genotypes gL where each genotype is a binary string ordered according to ploidy list
Output: genotype list with non-redundant genotypes
"""
if len(gL) < 2:
return gL
else:
uniqueL = []
for g in gL:
s = ''
for i in xrange(len(pL)):
s += ''.join(sorted(g[0:pL[i]]))
g = g[pL[i]:]
if s not in uniqueL:
uniqueL.append(s)
return uniqueL
def grid_search_parameters(step):
"""
Make a list of parameters to try
Input: step size
Output: subpopulation frequencies to try
"""
f1 = list(np.arange(step, 1, step))
f2 = list(np.arange(step, 1, step))
f2.reverse()
return zip(f1, f2)
def estimate_genotype(alt_freq, exp_freqL):
"""
Maximum likelihood estimator of alt_freq given possibilities in exp_freqL
Input: observed alternate frequency and list of expected alternate frequencies
Output: ML estimator of true alternate allele frequency
"""
try:
i = find_lt(exp_freqL, alt_freq)
except ValueError:
i = float('-inf')
try:
j = find_ge(exp_freqL, alt_freq)
except ValueError:
j = float('inf')
if alt_freq - i < j - alt_freq:
return i
else:
return j
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def find_lt(a, x):
"""
Find rightmost value less than x in list a
Input: list a and value x
Output: rightmost value less than x in a
"""
i = bisect_left(a, x)
if i:
return a[i - 1]
raise ValueError
def find_ge(a, x):
"""
Find leftmost item greater than or equal to x in list a
Input: list a and value x
Output: leftmost value less than or equal to x in a
"""
i = bisect_left(a, x)
if i != len(a):
return a[i]
raise ValueError
def get_altL(fn):
"""
Make a list of alternate allele frequencies and number of reads
Input: tsv file with reference freq in first column and alterate freq in second column
Output: a list of tuples with number of reads and alternate allele frequency
"""
f = open(fn, 'r')
linesL = [x.strip().split('\t') for x in f.readlines()]
f.close()
if linesL[0][0][0] == '#':
linesL = linesL[1:]
for i in range(len(linesL)):
if linesL[i][4] == '0':
linesL[i][4] = '1'
return zip([(int(x[4]) + int(x[5])) for x in linesL], [(float(x[5]) / (
float(x[4]) + float(x[5]))) for x in linesL])
def generate_possible_freqL(pL, sL, er):
"""
Generate list of possible allele frequencies
Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate
Output: list of possible allele frequences
"""
h = sum(pL)
L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]
M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]
p_freqL = []
for i in range(len(pL)):
p_freqL += [sL[i] / pL[i]] * pL[i]
p_freqA = np.array(p_freqL)
sA = np.array(sL)
aL = []
for g in M:
aL.append(sum(np.array([int(x) for x in list(g)]) * p_freqL))
return sorted(list(set(aL + [er, 1 - er])))
def freq_to_genotype(pL, sL, er):
"""
Creates dict of expected alternate allele frequencies and consistent genotypes
Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate
Output: dict of expected alternate allele frequencies and consistent genotypes. Genotypes represented as binary strings in the order of the ploidy list
"""
h = sum(pL)
L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]
M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]
p_freqL = []
for i in range(len(pL)):
p_freqL += [sL[i] / pL[i]] * pL[i]
p_freqA = np.array(p_freqL)
sA = np.array(sL)
aD = {}
for g in M:
alt_freq = sum(np.array([int(x) for x in list(g)]) * p_freqL)
if aD.has_key(alt_freq):
aD[alt_freq].append(g)
else:
aD[alt_freq] = [g]
aD[er] = ['0' * (len(L[-1]) - 1) + bin(0)[2:]]
aD[1 - er] = [bin(2 ** h - 1)[2:]]
return aD
def collapse_genotypes(pL, gL):
"""
Reduces a list of genotypes to distinct genotypes given ploidy
Input: ploidy list pL and list of genotypes gL where each genotype is a binary string ordered according to ploidy list
Output: genotype list with non-redundant genotypes
"""
if len(gL) < 2:
return gL
else:
uniqueL = []
for g in gL:
s = ''
for i in xrange(len(pL)):
s += ''.join(sorted(g[0:pL[i]]))
g = g[pL[i]:]
if s not in uniqueL:
uniqueL.append(s)
return uniqueL
def grid_search_parameters(step):
"""
Make a list of parameters to try
Input: step size
Output: subpopulation frequencies to try
"""
f1 = list(np.arange(step, 1, step))
f2 = list(np.arange(step, 1, step))
f2.reverse()
return zip(f1, f2)
def estimate_genotype(alt_freq, exp_freqL):
"""
Maximum likelihood estimator of alt_freq given possibilities in exp_freqL
Input: observed alternate frequency and list of expected alternate frequencies
Output: ML estimator of true alternate allele frequency
"""
try:
i = find_lt(exp_freqL, alt_freq)
except ValueError:
i = float('-inf')
try:
j = find_ge(exp_freqL, alt_freq)
except ValueError:
j = float('inf')
if alt_freq - i < j - alt_freq:
return i
else:
return j
def main():
ploidyL = [2, 2]
error_rate = 0.001
cov_cutoff = 4
parser = argparse.ArgumentParser(description=
'This script determines the relative frequencies of different populations and estimates the genotypes.'
)
parser.add_argument('infile', help=
'Input tsv file. Columns should be: chrom, position, ref base, alt base, number of reads supporting reference, number of reads supporting alternate.'
)
parser.add_argument('-o', nargs='?', type=argparse.FileType('w'),
default=sys.stdout, help='Output file. Default: standard out')
parser.add_argument('-pL', default=ploidyL, type=int, nargs='+', help=
'A list of ploidies. Each entry in the list represents the anticipated ploidy of a subpopulation. For instance, if you expect two diploid subpopulations and one triploid subpopulation, enter 2 2 3. Default: {0}'
.format(' '.join([str(x) for x in ploidyL])))
parser.add_argument('-er', default=error_rate, type=float, help=
'Sequencing error rate. For instance, 0.01 means that 1/100 base calls will be incorrect. Default: {0}'
.format(error_rate))
parser.add_argument('-cc', default=cov_cutoff, type=int, help=
'Coverage cutoff. If the coverage of either the alternate or reference allele is less than or equal to this value, the site will not be considered as a variant site. Default: {0}'
.format(cov_cutoff))
parser.add_argument('-d', action='store_true', help=
'Enable python debugger.')
args = parser.parse_args()
inN = args.infile
outF = args.o
ploidyL = args.pL
error_rate = args.er
debug = args.d
inN = os.path.realpath(inN)
if len(ploidyL) > 2:
print >> sys.stderr, 'Sorry, only two subpopulations are currently supported.'
sys.exit(1)
altL = get_altL(inN)
tempL = []
for a in altL:
if a[0] * a[1] > cov_cutoff and a[0] * (1 - a[1]) > cov_cutoff and a[0
] > cov_cutoff:
tempL.append(a)
altL = tempL
parL = grid_search_parameters(0.01)
best_par = []
best_ll = float('-inf')
for par in parL:
exp_freqL = generate_possible_freqL(ploidyL, par, error_rate)
ll = 0
for alt in altL:
exp_freq = estimate_genotype(alt[1], exp_freqL)
ll += np.log(binom.pmf(round(alt[0] * alt[1]), alt[0], exp_freq))
if ll > best_ll:
best_ll = ll
best_par = par
altD = freq_to_genotype(ploidyL, best_par, error_rate)
for k in altD.keys():
altD[k] = collapse_genotypes(ploidyL, altD[k])
exp_freqL = sorted(altD.keys())
print >> outF, '#log-likelihood\t{0}\n#population frequencies\t{1}'.format(
best_ll, '\t'.join([str(x) for x in best_par]))
inF = open(inN, 'r')
linesL = inF.readlines()
inF.close()
if linesL[0][0] == '#':
linesL = linesL[1:]
for i in xrange(len(altL)):
alt = altL[i]
[chr, pos, refbase, altbase, refcov, altcov] = linesL[i].strip().split(
'\t')
genotypeL = altD[estimate_genotype(alt[1], exp_freqL)]
for g in genotypeL:
g = re.sub('0', refbase, g)
g = re.sub('1', altbase, g)
tempL = []
for i in xrange(len(ploidyL)):
tempL.append(g[0:ploidyL[i]])
g = g[ploidyL[i]:]
print >> outF, '\t'.join([chr, pos] + tempL)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import sys, argparse, pdb, glob, os, re
import numpy as np
from bisect import bisect_left
from scipy.stats import binom
def find_lt(a, x):
"""
Find rightmost value less than x in list a
Input: list a and value x
Output: rightmost value less than x in a
"""
i = bisect_left(a, x)
if i:
return a[i - 1]
raise ValueError
def find_ge(a, x):
"""
Find leftmost item greater than or equal to x in list a
Input: list a and value x
Output: leftmost value less than or equal to x in a
"""
i = bisect_left(a, x)
if i != len(a):
return a[i]
raise ValueError
def get_altL(fn):
"""
Make a list of alternate allele frequencies and number of reads
Input: tsv file with reference freq in first column and alterate freq in second column
Output: a list of tuples with number of reads and alternate allele frequency
"""
f = open(fn, 'r')
linesL = [x.strip().split('\t') for x in f.readlines()]
f.close()
if linesL[0][0][0] == '#':
linesL = linesL[1:]
for i in range(len(linesL)):
if linesL[i][4] == '0':
linesL[i][4] = '1'
return zip([(int(x[4]) + int(x[5])) for x in linesL], [(float(x[5]) / (
float(x[4]) + float(x[5]))) for x in linesL])
def generate_possible_freqL(pL, sL, er):
"""
Generate list of possible allele frequencies
Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate
Output: list of possible allele frequences
"""
h = sum(pL)
L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]
M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]
p_freqL = []
for i in range(len(pL)):
p_freqL += [sL[i] / pL[i]] * pL[i]
p_freqA = np.array(p_freqL)
sA = np.array(sL)
aL = []
for g in M:
aL.append(sum(np.array([int(x) for x in list(g)]) * p_freqL))
return sorted(list(set(aL + [er, 1 - er])))
def freq_to_genotype(pL, sL, er):
"""
Creates dict of expected alternate allele frequencies and consistent genotypes
Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate
Output: dict of expected alternate allele frequencies and consistent genotypes. Genotypes represented as binary strings in the order of the ploidy list
"""
h = sum(pL)
L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]
M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]
p_freqL = []
for i in range(len(pL)):
p_freqL += [sL[i] / pL[i]] * pL[i]
p_freqA = np.array(p_freqL)
sA = np.array(sL)
aD = {}
for g in M:
alt_freq = sum(np.array([int(x) for x in list(g)]) * p_freqL)
if aD.has_key(alt_freq):
aD[alt_freq].append(g)
else:
aD[alt_freq] = [g]
aD[er] = ['0' * (len(L[-1]) - 1) + bin(0)[2:]]
aD[1 - er] = [bin(2 ** h - 1)[2:]]
return aD
def collapse_genotypes(pL, gL):
"""
Reduces a list of genotypes to distinct genotypes given ploidy
Input: ploidy list pL and list of genotypes gL where each genotype is a binary string ordered according to ploidy list
Output: genotype list with non-redundant genotypes
"""
if len(gL) < 2:
return gL
else:
uniqueL = []
for g in gL:
s = ''
for i in xrange(len(pL)):
s += ''.join(sorted(g[0:pL[i]]))
g = g[pL[i]:]
if s not in uniqueL:
uniqueL.append(s)
return uniqueL
def grid_search_parameters(step):
"""
Make a list of parameters to try
Input: step size
Output: subpopulation frequencies to try
"""
f1 = list(np.arange(step, 1, step))
f2 = list(np.arange(step, 1, step))
f2.reverse()
return zip(f1, f2)
def estimate_genotype(alt_freq, exp_freqL):
"""
Maximum likelihood estimator of alt_freq given possibilities in exp_freqL
Input: observed alternate frequency and list of expected alternate frequencies
Output: ML estimator of true alternate allele frequency
"""
try:
i = find_lt(exp_freqL, alt_freq)
except ValueError:
i = float('-inf')
try:
j = find_ge(exp_freqL, alt_freq)
except ValueError:
j = float('inf')
if alt_freq - i < j - alt_freq:
return i
else:
return j
def main():
ploidyL = [2, 2]
error_rate = 0.001
cov_cutoff = 4
parser = argparse.ArgumentParser(description=
'This script determines the relative frequencies of different populations and estimates the genotypes.'
)
parser.add_argument('infile', help=
'Input tsv file. Columns should be: chrom, position, ref base, alt base, number of reads supporting reference, number of reads supporting alternate.'
)
parser.add_argument('-o', nargs='?', type=argparse.FileType('w'),
default=sys.stdout, help='Output file. Default: standard out')
parser.add_argument('-pL', default=ploidyL, type=int, nargs='+', help=
'A list of ploidies. Each entry in the list represents the anticipated ploidy of a subpopulation. For instance, if you expect two diploid subpopulations and one triploid subpopulation, enter 2 2 3. Default: {0}'
.format(' '.join([str(x) for x in ploidyL])))
parser.add_argument('-er', default=error_rate, type=float, help=
'Sequencing error rate. For instance, 0.01 means that 1/100 base calls will be incorrect. Default: {0}'
.format(error_rate))
parser.add_argument('-cc', default=cov_cutoff, type=int, help=
'Coverage cutoff. If the coverage of either the alternate or reference allele is less than or equal to this value, the site will not be considered as a variant site. Default: {0}'
.format(cov_cutoff))
parser.add_argument('-d', action='store_true', help=
'Enable python debugger.')
args = parser.parse_args()
inN = args.infile
outF = args.o
ploidyL = args.pL
error_rate = args.er
debug = args.d
inN = os.path.realpath(inN)
if len(ploidyL) > 2:
print >> sys.stderr, 'Sorry, only two subpopulations are currently supported.'
sys.exit(1)
altL = get_altL(inN)
tempL = []
for a in altL:
if a[0] * a[1] > cov_cutoff and a[0] * (1 - a[1]) > cov_cutoff and a[0
] > cov_cutoff:
tempL.append(a)
altL = tempL
parL = grid_search_parameters(0.01)
best_par = []
best_ll = float('-inf')
for par in parL:
exp_freqL = generate_possible_freqL(ploidyL, par, error_rate)
ll = 0
for alt in altL:
exp_freq = estimate_genotype(alt[1], exp_freqL)
ll += np.log(binom.pmf(round(alt[0] * alt[1]), alt[0], exp_freq))
if ll > best_ll:
best_ll = ll
best_par = par
altD = freq_to_genotype(ploidyL, best_par, error_rate)
for k in altD.keys():
altD[k] = collapse_genotypes(ploidyL, altD[k])
exp_freqL = sorted(altD.keys())
print >> outF, '#log-likelihood\t{0}\n#population frequencies\t{1}'.format(
best_ll, '\t'.join([str(x) for x in best_par]))
inF = open(inN, 'r')
linesL = inF.readlines()
inF.close()
if linesL[0][0] == '#':
linesL = linesL[1:]
for i in xrange(len(altL)):
alt = altL[i]
[chr, pos, refbase, altbase, refcov, altcov] = linesL[i].strip().split(
'\t')
genotypeL = altD[estimate_genotype(alt[1], exp_freqL)]
for g in genotypeL:
g = re.sub('0', refbase, g)
g = re.sub('1', altbase, g)
tempL = []
for i in xrange(len(ploidyL)):
tempL.append(g[0:ploidyL[i]])
g = g[ploidyL[i]:]
print >> outF, '\t'.join([chr, pos] + tempL)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
# Chris DeBoever
# cdeboeve@ucsd.edu
import sys, argparse, pdb, glob, os, re
import numpy as np
from bisect import bisect_left
from scipy.stats import binom
### helper functions ###
def find_lt(a,x):
"""
Find rightmost value less than x in list a
Input: list a and value x
Output: rightmost value less than x in a
"""
i = bisect_left(a,x)
if i:
return a[i-1]
raise ValueError
def find_ge(a,x):
"""
Find leftmost item greater than or equal to x in list a
Input: list a and value x
Output: leftmost value less than or equal to x in a
"""
i = bisect_left(a,x)
if i != len(a):
return a[i]
raise ValueError
def get_altL(fn):
"""
Make a list of alternate allele frequencies and number of reads
Input: tsv file with reference freq in first column and alterate freq in second column
Output: a list of tuples with number of reads and alternate allele frequency
"""
f = open(fn,'r')
linesL = [ x.strip().split('\t') for x in f.readlines() ]
f.close()
if linesL[0][0][0] == '#':
linesL = linesL[1:]
for i in range(len(linesL)):
if linesL[i][4] == '0': # if the number of reads supporting alternate is 0, we'll switch to 1 so avoid numeric issues
linesL[i][4] = '1'
return zip([ int(x[4])+int(x[5]) for x in linesL ], [ float(x[5])/(float(x[4])+float(x[5])) for x in linesL ]) # each tuple is [freq,num_reads]
# def generate_cancer_possible_freqL(pL,sL,er):
# I want to make a function which generates the likely frequencies seen in a cancer sample. This would exclude double-hit mutations (i.e. a single site gains somatic mutations on both chromosomes). This simplifications can only be made in the diploid case, however, because ploidy-variable populations might be weird...
def generate_possible_freqL(pL,sL,er):
"""
Generate list of possible allele frequencies
Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate
Output: list of possible allele frequences
"""
h = sum(pL) # number of different haplotypes
L = [ bin(x)[2:] for x in range(1,2**h-1) ] # range from 1 to 2**h-1 because we don't want 0% or 100% allele freq
M = [ '0'*(len(L[-1])-len(x))+x for x in L ]
p_freqL = []
for i in range(len(pL)):
p_freqL += [sL[i]/pL[i]]*pL[i]
p_freqA = np.array(p_freqL)
sA = np.array(sL)
aL = []
for g in M:
aL.append(sum(np.array([ int(x) for x in list(g) ])*p_freqL))
return sorted(list(set(aL+[er,1-er])))
def freq_to_genotype(pL,sL,er):
"""
Creates dict of expected alternate allele frequencies and consistent genotypes
Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate
Output: dict of expected alternate allele frequencies and consistent genotypes. Genotypes represented as binary strings in the order of the ploidy list
"""
h = sum(pL) # number of different haplotypes
L = [ bin(x)[2:] for x in range(1,2**h-1) ] # range from 1 to 2**h-1 because we don't want 0% or 100% allele freq
M = [ '0'*(len(L[-1])-len(x))+x for x in L ]
p_freqL = []
for i in range(len(pL)):
p_freqL += [sL[i]/pL[i]]*pL[i]
p_freqA = np.array(p_freqL)
sA = np.array(sL)
aD = {} # dict where each key is an expected alternate allele frequency and each value is a list of genotypes consistent with this alternate allele frequency
for g in M:
alt_freq = sum(np.array([ int(x) for x in list(g) ])*p_freqL)
if aD.has_key(alt_freq):
aD[alt_freq].append(g)
else:
aD[alt_freq] = [g]
aD[er] = ['0'*(len(L[-1])-1) + bin(0)[2:]] # add genotype for 0% alternate allele freq
aD[1-er] = [bin(2**h-1)[2:]] # add genotype for 100% alternate allele freq
return aD
def collapse_genotypes(pL,gL):
"""
Reduces a list of genotypes to distinct genotypes given ploidy
Input: ploidy list pL and list of genotypes gL where each genotype is a binary string ordered according to ploidy list
Output: genotype list with non-redundant genotypes
"""
if len(gL) < 2:
return gL
else:
uniqueL = [] # list of unique genotypes relative to ploidy
for g in gL:
s = ''
for i in xrange(len(pL)):
s += ''.join(sorted(g[0:pL[i]]))
g = g[pL[i]:]
if s not in uniqueL:
uniqueL.append(s)
return uniqueL
def grid_search_parameters(step):
"""
Make a list of parameters to try
Input: step size
Output: subpopulation frequencies to try
"""
f1 = list(np.arange(step,1,step))
f2 = list(np.arange(step,1,step))
f2.reverse()
return zip(f1,f2)
def estimate_genotype(alt_freq,exp_freqL):
"""
Maximum likelihood estimator of alt_freq given possibilities in exp_freqL
Input: observed alternate frequency and list of expected alternate frequencies
Output: ML estimator of true alternate allele frequency
"""
try:
i = find_lt(exp_freqL,alt_freq) # Find rightmost value less than x
except ValueError:
i = float("-inf")
try:
j = find_ge(exp_freqL,alt_freq) # Find leftmost item greater than or equal to x
except ValueError:
j = float("inf")
if alt_freq-i < j-alt_freq:
return i
else:
return j
def main():
### magic variables ###
# these variables can be set at the command line as well
ploidyL = [2,2] # the entries in this list are the expected ploidy of each subpopulation. Default is two diploid subpopulations
error_rate = 0.001 # sequencing error rate
cov_cutoff = 4 # coverage cutoff for variant sites
### gather command line arguments ###
parser = argparse.ArgumentParser(description='This script determines the relative frequencies of different populations and estimates the genotypes.')
parser.add_argument('infile', help='Input tsv file. Columns should be: chrom, position, ref base, alt base, number of reads supporting reference, number of reads supporting alternate.')
parser.add_argument('-o', nargs='?', type=argparse.FileType('w'),default=sys.stdout, help='Output file. Default: standard out')
parser.add_argument('-pL', default=ploidyL, type=int, nargs='+', help='A list of ploidies. Each entry in the list represents the anticipated ploidy of a subpopulation. For instance, if you expect two diploid subpopulations and one triploid subpopulation, enter 2 2 3. Default: {0}'.format(' '.join([str(x) for x in ploidyL])))
parser.add_argument('-er', default=error_rate, type=float, help='Sequencing error rate. For instance, 0.01 means that 1/100 base calls will be incorrect. Default: {0}'.format(error_rate))
parser.add_argument('-cc', default=cov_cutoff, type=int, help='Coverage cutoff. If the coverage of either the alternate or reference allele is less than or equal to this value, the site will not be considered as a variant site. Default: {0}'.format(cov_cutoff))
parser.add_argument('-d', action='store_true', help='Enable python debugger.')
args = parser.parse_args()
inN = args.infile
outF = args.o
ploidyL = args.pL
error_rate = args.er
debug = args.d
inN = os.path.realpath(inN) # get the input file path
if len(ploidyL) > 2:
print >>sys.stderr, 'Sorry, only two subpopulations are currently supported.'
sys.exit(1)
altL = get_altL(inN) # a list of number of reads and alternate allele frequencies
tempL = []
for a in altL:
if a[0]*a[1] > cov_cutoff and a[0]*(1-a[1]) > cov_cutoff and a[0] > cov_cutoff:
tempL.append(a)
altL = tempL
### find population frequencies ###
parL = grid_search_parameters(0.01) # grid search
best_par = []
best_ll = float("-inf")
for par in parL:
exp_freqL = generate_possible_freqL(ploidyL,par,error_rate)
ll = 0 # log-likelihood
for alt in altL:
exp_freq = estimate_genotype(alt[1],exp_freqL)
ll += np.log(binom.pmf(round(alt[0]*alt[1]),alt[0],exp_freq))
# round(alt[0]*alt[1]) is the number of reads we saw supporting alternate allele (i.e. the number of successes under the binomial test)
# alt[0] is the total number of reads covering this site (i.e. the number of attempts in our binomial test)
# exp_freq is our probability of success (i.e. observing a read supporting alternate) from our ML estimation (see estimate_genotype)
if ll > best_ll:
best_ll = ll
best_par = par
### determine genotypes ###
altD = freq_to_genotype(ploidyL,best_par,error_rate) # dict whose keys are alternate allele frequencies and whose values are lists of consistent genotypes
for k in altD.keys():
altD[k] = collapse_genotypes(ploidyL,altD[k])
exp_freqL = sorted(altD.keys())
print >>outF, '#log-likelihood\t{0}\n#population frequencies\t{1}'.format(best_ll,'\t'.join([ str(x) for x in best_par ]))
inF = open(inN,'r')
linesL = inF.readlines()
inF.close()
if linesL[0][0] == '#':
linesL = linesL[1:]
for i in xrange(len(altL)):
alt = altL[i]
[chr,pos,refbase,altbase,refcov,altcov] = linesL[i].strip().split('\t')
genotypeL = altD[estimate_genotype(alt[1],exp_freqL)]
for g in genotypeL:
g = re.sub('0',refbase,g)
g = re.sub('1',altbase,g)
tempL = [] # each element of this list is the genotype of a population
for i in xrange(len(ploidyL)):
tempL.append(g[0:ploidyL[i]])
g = g[ploidyL[i]:]
print >>outF, '\t'.join([chr,pos] + tempL)
# use best population frequency parameters and walk through sites, assign genotypes, p-values or scores maybe?
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "da751e96c225ebc2d30f3cce01ba2f64d0a29257",
"index": 3763,
"step-1": "<mask token>\n\n\ndef find_lt(a, x):\n \"\"\"\n Find rightmost value less than x in list a\n Input: list a and value x\n Output: rightmost value less than x in a\n \"\"\"\n i = bisect_left(a, x)\n if i:\n return a[i - 1]\n raise ValueError\n\n\ndef find_ge(a, x):\n \"\"\"\n Find leftmost item greater than or equal to x in list a\n Input: list a and value x\n Output: leftmost value less than or equal to x in a\n \"\"\"\n i = bisect_left(a, x)\n if i != len(a):\n return a[i]\n raise ValueError\n\n\ndef get_altL(fn):\n \"\"\"\n Make a list of alternate allele frequencies and number of reads\n Input: tsv file with reference freq in first column and alterate freq in second column\n Output: a list of tuples with number of reads and alternate allele frequency\n \"\"\"\n f = open(fn, 'r')\n linesL = [x.strip().split('\\t') for x in f.readlines()]\n f.close()\n if linesL[0][0][0] == '#':\n linesL = linesL[1:]\n for i in range(len(linesL)):\n if linesL[i][4] == '0':\n linesL[i][4] = '1'\n return zip([(int(x[4]) + int(x[5])) for x in linesL], [(float(x[5]) / (\n float(x[4]) + float(x[5]))) for x in linesL])\n\n\ndef generate_possible_freqL(pL, sL, er):\n \"\"\"\n Generate list of possible allele frequencies\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: list of possible allele frequences\n \"\"\"\n h = sum(pL)\n L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]\n M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i] / pL[i]] * pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aL = []\n for g in M:\n aL.append(sum(np.array([int(x) for x in list(g)]) * p_freqL))\n return sorted(list(set(aL + [er, 1 - er])))\n\n\ndef freq_to_genotype(pL, sL, er):\n \"\"\"\n Creates dict of expected alternate allele frequencies and consistent genotypes\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: dict of expected alternate allele frequencies and consistent genotypes. Genotypes represented as binary strings in the order of the ploidy list\n \"\"\"\n h = sum(pL)\n L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]\n M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i] / pL[i]] * pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aD = {}\n for g in M:\n alt_freq = sum(np.array([int(x) for x in list(g)]) * p_freqL)\n if aD.has_key(alt_freq):\n aD[alt_freq].append(g)\n else:\n aD[alt_freq] = [g]\n aD[er] = ['0' * (len(L[-1]) - 1) + bin(0)[2:]]\n aD[1 - er] = [bin(2 ** h - 1)[2:]]\n return aD\n\n\n<mask token>\n\n\ndef grid_search_parameters(step):\n \"\"\"\n Make a list of parameters to try\n Input: step size\n Output: subpopulation frequencies to try\n \"\"\"\n f1 = list(np.arange(step, 1, step))\n f2 = list(np.arange(step, 1, step))\n f2.reverse()\n return zip(f1, f2)\n\n\ndef estimate_genotype(alt_freq, exp_freqL):\n \"\"\"\n Maximum likelihood estimator of alt_freq given possibilities in exp_freqL\n Input: observed alternate frequency and list of expected alternate frequencies\n Output: ML estimator of true alternate allele frequency\n \"\"\"\n try:\n i = find_lt(exp_freqL, alt_freq)\n except ValueError:\n i = float('-inf')\n try:\n j = find_ge(exp_freqL, alt_freq)\n except ValueError:\n j = float('inf')\n if alt_freq - i < j - alt_freq:\n return i\n else:\n return j\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_lt(a, x):\n \"\"\"\n Find rightmost value less than x in list a\n Input: list a and value x\n Output: rightmost value less than x in a\n \"\"\"\n i = bisect_left(a, x)\n if i:\n return a[i - 1]\n raise ValueError\n\n\ndef find_ge(a, x):\n \"\"\"\n Find leftmost item greater than or equal to x in list a\n Input: list a and value x\n Output: leftmost value less than or equal to x in a\n \"\"\"\n i = bisect_left(a, x)\n if i != len(a):\n return a[i]\n raise ValueError\n\n\ndef get_altL(fn):\n \"\"\"\n Make a list of alternate allele frequencies and number of reads\n Input: tsv file with reference freq in first column and alterate freq in second column\n Output: a list of tuples with number of reads and alternate allele frequency\n \"\"\"\n f = open(fn, 'r')\n linesL = [x.strip().split('\\t') for x in f.readlines()]\n f.close()\n if linesL[0][0][0] == '#':\n linesL = linesL[1:]\n for i in range(len(linesL)):\n if linesL[i][4] == '0':\n linesL[i][4] = '1'\n return zip([(int(x[4]) + int(x[5])) for x in linesL], [(float(x[5]) / (\n float(x[4]) + float(x[5]))) for x in linesL])\n\n\ndef generate_possible_freqL(pL, sL, er):\n \"\"\"\n Generate list of possible allele frequencies\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: list of possible allele frequences\n \"\"\"\n h = sum(pL)\n L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]\n M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i] / pL[i]] * pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aL = []\n for g in M:\n aL.append(sum(np.array([int(x) for x in list(g)]) * p_freqL))\n return sorted(list(set(aL + [er, 1 - er])))\n\n\ndef freq_to_genotype(pL, sL, er):\n \"\"\"\n Creates dict of expected alternate allele frequencies and consistent genotypes\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: dict of expected alternate allele frequencies and consistent genotypes. Genotypes represented as binary strings in the order of the ploidy list\n \"\"\"\n h = sum(pL)\n L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]\n M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i] / pL[i]] * pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aD = {}\n for g in M:\n alt_freq = sum(np.array([int(x) for x in list(g)]) * p_freqL)\n if aD.has_key(alt_freq):\n aD[alt_freq].append(g)\n else:\n aD[alt_freq] = [g]\n aD[er] = ['0' * (len(L[-1]) - 1) + bin(0)[2:]]\n aD[1 - er] = [bin(2 ** h - 1)[2:]]\n return aD\n\n\ndef collapse_genotypes(pL, gL):\n \"\"\"\n Reduces a list of genotypes to distinct genotypes given ploidy\n Input: ploidy list pL and list of genotypes gL where each genotype is a binary string ordered according to ploidy list\n Output: genotype list with non-redundant genotypes\n \"\"\"\n if len(gL) < 2:\n return gL\n else:\n uniqueL = []\n for g in gL:\n s = ''\n for i in xrange(len(pL)):\n s += ''.join(sorted(g[0:pL[i]]))\n g = g[pL[i]:]\n if s not in uniqueL:\n uniqueL.append(s)\n return uniqueL\n\n\ndef grid_search_parameters(step):\n \"\"\"\n Make a list of parameters to try\n Input: step size\n Output: subpopulation frequencies to try\n \"\"\"\n f1 = list(np.arange(step, 1, step))\n f2 = list(np.arange(step, 1, step))\n f2.reverse()\n return zip(f1, f2)\n\n\ndef estimate_genotype(alt_freq, exp_freqL):\n \"\"\"\n Maximum likelihood estimator of alt_freq given possibilities in exp_freqL\n Input: observed alternate frequency and list of expected alternate frequencies\n Output: ML estimator of true alternate allele frequency\n \"\"\"\n try:\n i = find_lt(exp_freqL, alt_freq)\n except ValueError:\n i = float('-inf')\n try:\n j = find_ge(exp_freqL, alt_freq)\n except ValueError:\n j = float('inf')\n if alt_freq - i < j - alt_freq:\n return i\n else:\n return j\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_lt(a, x):\n \"\"\"\n Find rightmost value less than x in list a\n Input: list a and value x\n Output: rightmost value less than x in a\n \"\"\"\n i = bisect_left(a, x)\n if i:\n return a[i - 1]\n raise ValueError\n\n\ndef find_ge(a, x):\n \"\"\"\n Find leftmost item greater than or equal to x in list a\n Input: list a and value x\n Output: leftmost value less than or equal to x in a\n \"\"\"\n i = bisect_left(a, x)\n if i != len(a):\n return a[i]\n raise ValueError\n\n\ndef get_altL(fn):\n \"\"\"\n Make a list of alternate allele frequencies and number of reads\n Input: tsv file with reference freq in first column and alterate freq in second column\n Output: a list of tuples with number of reads and alternate allele frequency\n \"\"\"\n f = open(fn, 'r')\n linesL = [x.strip().split('\\t') for x in f.readlines()]\n f.close()\n if linesL[0][0][0] == '#':\n linesL = linesL[1:]\n for i in range(len(linesL)):\n if linesL[i][4] == '0':\n linesL[i][4] = '1'\n return zip([(int(x[4]) + int(x[5])) for x in linesL], [(float(x[5]) / (\n float(x[4]) + float(x[5]))) for x in linesL])\n\n\ndef generate_possible_freqL(pL, sL, er):\n \"\"\"\n Generate list of possible allele frequencies\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: list of possible allele frequences\n \"\"\"\n h = sum(pL)\n L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]\n M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i] / pL[i]] * pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aL = []\n for g in M:\n aL.append(sum(np.array([int(x) for x in list(g)]) * p_freqL))\n return sorted(list(set(aL + [er, 1 - er])))\n\n\ndef freq_to_genotype(pL, sL, er):\n \"\"\"\n Creates dict of expected alternate allele frequencies and consistent genotypes\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: dict of expected alternate allele frequencies and consistent genotypes. Genotypes represented as binary strings in the order of the ploidy list\n \"\"\"\n h = sum(pL)\n L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]\n M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i] / pL[i]] * pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aD = {}\n for g in M:\n alt_freq = sum(np.array([int(x) for x in list(g)]) * p_freqL)\n if aD.has_key(alt_freq):\n aD[alt_freq].append(g)\n else:\n aD[alt_freq] = [g]\n aD[er] = ['0' * (len(L[-1]) - 1) + bin(0)[2:]]\n aD[1 - er] = [bin(2 ** h - 1)[2:]]\n return aD\n\n\ndef collapse_genotypes(pL, gL):\n \"\"\"\n Reduces a list of genotypes to distinct genotypes given ploidy\n Input: ploidy list pL and list of genotypes gL where each genotype is a binary string ordered according to ploidy list\n Output: genotype list with non-redundant genotypes\n \"\"\"\n if len(gL) < 2:\n return gL\n else:\n uniqueL = []\n for g in gL:\n s = ''\n for i in xrange(len(pL)):\n s += ''.join(sorted(g[0:pL[i]]))\n g = g[pL[i]:]\n if s not in uniqueL:\n uniqueL.append(s)\n return uniqueL\n\n\ndef grid_search_parameters(step):\n \"\"\"\n Make a list of parameters to try\n Input: step size\n Output: subpopulation frequencies to try\n \"\"\"\n f1 = list(np.arange(step, 1, step))\n f2 = list(np.arange(step, 1, step))\n f2.reverse()\n return zip(f1, f2)\n\n\ndef estimate_genotype(alt_freq, exp_freqL):\n \"\"\"\n Maximum likelihood estimator of alt_freq given possibilities in exp_freqL\n Input: observed alternate frequency and list of expected alternate frequencies\n Output: ML estimator of true alternate allele frequency\n \"\"\"\n try:\n i = find_lt(exp_freqL, alt_freq)\n except ValueError:\n i = float('-inf')\n try:\n j = find_ge(exp_freqL, alt_freq)\n except ValueError:\n j = float('inf')\n if alt_freq - i < j - alt_freq:\n return i\n else:\n return j\n\n\ndef main():\n ploidyL = [2, 2]\n error_rate = 0.001\n cov_cutoff = 4\n parser = argparse.ArgumentParser(description=\n 'This script determines the relative frequencies of different populations and estimates the genotypes.'\n )\n parser.add_argument('infile', help=\n 'Input tsv file. Columns should be: chrom, position, ref base, alt base, number of reads supporting reference, number of reads supporting alternate.'\n )\n parser.add_argument('-o', nargs='?', type=argparse.FileType('w'),\n default=sys.stdout, help='Output file. Default: standard out')\n parser.add_argument('-pL', default=ploidyL, type=int, nargs='+', help=\n 'A list of ploidies. Each entry in the list represents the anticipated ploidy of a subpopulation. For instance, if you expect two diploid subpopulations and one triploid subpopulation, enter 2 2 3. Default: {0}'\n .format(' '.join([str(x) for x in ploidyL])))\n parser.add_argument('-er', default=error_rate, type=float, help=\n 'Sequencing error rate. For instance, 0.01 means that 1/100 base calls will be incorrect. Default: {0}'\n .format(error_rate))\n parser.add_argument('-cc', default=cov_cutoff, type=int, help=\n 'Coverage cutoff. If the coverage of either the alternate or reference allele is less than or equal to this value, the site will not be considered as a variant site. Default: {0}'\n .format(cov_cutoff))\n parser.add_argument('-d', action='store_true', help=\n 'Enable python debugger.')\n args = parser.parse_args()\n inN = args.infile\n outF = args.o\n ploidyL = args.pL\n error_rate = args.er\n debug = args.d\n inN = os.path.realpath(inN)\n if len(ploidyL) > 2:\n print >> sys.stderr, 'Sorry, only two subpopulations are currently supported.'\n sys.exit(1)\n altL = get_altL(inN)\n tempL = []\n for a in altL:\n if a[0] * a[1] > cov_cutoff and a[0] * (1 - a[1]) > cov_cutoff and a[0\n ] > cov_cutoff:\n tempL.append(a)\n altL = tempL\n parL = grid_search_parameters(0.01)\n best_par = []\n best_ll = float('-inf')\n for par in parL:\n exp_freqL = generate_possible_freqL(ploidyL, par, error_rate)\n ll = 0\n for alt in altL:\n exp_freq = estimate_genotype(alt[1], exp_freqL)\n ll += np.log(binom.pmf(round(alt[0] * alt[1]), alt[0], exp_freq))\n if ll > best_ll:\n best_ll = ll\n best_par = par\n altD = freq_to_genotype(ploidyL, best_par, error_rate)\n for k in altD.keys():\n altD[k] = collapse_genotypes(ploidyL, altD[k])\n exp_freqL = sorted(altD.keys())\n print >> outF, '#log-likelihood\\t{0}\\n#population frequencies\\t{1}'.format(\n best_ll, '\\t'.join([str(x) for x in best_par]))\n inF = open(inN, 'r')\n linesL = inF.readlines()\n inF.close()\n if linesL[0][0] == '#':\n linesL = linesL[1:]\n for i in xrange(len(altL)):\n alt = altL[i]\n [chr, pos, refbase, altbase, refcov, altcov] = linesL[i].strip().split(\n '\\t')\n genotypeL = altD[estimate_genotype(alt[1], exp_freqL)]\n for g in genotypeL:\n g = re.sub('0', refbase, g)\n g = re.sub('1', altbase, g)\n tempL = []\n for i in xrange(len(ploidyL)):\n tempL.append(g[0:ploidyL[i]])\n g = g[ploidyL[i]:]\n print >> outF, '\\t'.join([chr, pos] + tempL)\n\n\n<mask token>\n",
"step-4": "import sys, argparse, pdb, glob, os, re\nimport numpy as np\nfrom bisect import bisect_left\nfrom scipy.stats import binom\n\n\ndef find_lt(a, x):\n \"\"\"\n Find rightmost value less than x in list a\n Input: list a and value x\n Output: rightmost value less than x in a\n \"\"\"\n i = bisect_left(a, x)\n if i:\n return a[i - 1]\n raise ValueError\n\n\ndef find_ge(a, x):\n \"\"\"\n Find leftmost item greater than or equal to x in list a\n Input: list a and value x\n Output: leftmost value less than or equal to x in a\n \"\"\"\n i = bisect_left(a, x)\n if i != len(a):\n return a[i]\n raise ValueError\n\n\ndef get_altL(fn):\n \"\"\"\n Make a list of alternate allele frequencies and number of reads\n Input: tsv file with reference freq in first column and alterate freq in second column\n Output: a list of tuples with number of reads and alternate allele frequency\n \"\"\"\n f = open(fn, 'r')\n linesL = [x.strip().split('\\t') for x in f.readlines()]\n f.close()\n if linesL[0][0][0] == '#':\n linesL = linesL[1:]\n for i in range(len(linesL)):\n if linesL[i][4] == '0':\n linesL[i][4] = '1'\n return zip([(int(x[4]) + int(x[5])) for x in linesL], [(float(x[5]) / (\n float(x[4]) + float(x[5]))) for x in linesL])\n\n\ndef generate_possible_freqL(pL, sL, er):\n \"\"\"\n Generate list of possible allele frequencies\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: list of possible allele frequences\n \"\"\"\n h = sum(pL)\n L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]\n M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i] / pL[i]] * pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aL = []\n for g in M:\n aL.append(sum(np.array([int(x) for x in list(g)]) * p_freqL))\n return sorted(list(set(aL + [er, 1 - er])))\n\n\ndef freq_to_genotype(pL, sL, er):\n \"\"\"\n Creates dict of expected alternate allele frequencies and consistent genotypes\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: dict of expected alternate allele frequencies and consistent genotypes. Genotypes represented as binary strings in the order of the ploidy list\n \"\"\"\n h = sum(pL)\n L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]\n M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i] / pL[i]] * pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aD = {}\n for g in M:\n alt_freq = sum(np.array([int(x) for x in list(g)]) * p_freqL)\n if aD.has_key(alt_freq):\n aD[alt_freq].append(g)\n else:\n aD[alt_freq] = [g]\n aD[er] = ['0' * (len(L[-1]) - 1) + bin(0)[2:]]\n aD[1 - er] = [bin(2 ** h - 1)[2:]]\n return aD\n\n\ndef collapse_genotypes(pL, gL):\n \"\"\"\n Reduces a list of genotypes to distinct genotypes given ploidy\n Input: ploidy list pL and list of genotypes gL where each genotype is a binary string ordered according to ploidy list\n Output: genotype list with non-redundant genotypes\n \"\"\"\n if len(gL) < 2:\n return gL\n else:\n uniqueL = []\n for g in gL:\n s = ''\n for i in xrange(len(pL)):\n s += ''.join(sorted(g[0:pL[i]]))\n g = g[pL[i]:]\n if s not in uniqueL:\n uniqueL.append(s)\n return uniqueL\n\n\ndef grid_search_parameters(step):\n \"\"\"\n Make a list of parameters to try\n Input: step size\n Output: subpopulation frequencies to try\n \"\"\"\n f1 = list(np.arange(step, 1, step))\n f2 = list(np.arange(step, 1, step))\n f2.reverse()\n return zip(f1, f2)\n\n\ndef estimate_genotype(alt_freq, exp_freqL):\n \"\"\"\n Maximum likelihood estimator of alt_freq given possibilities in exp_freqL\n Input: observed alternate frequency and list of expected alternate frequencies\n Output: ML estimator of true alternate allele frequency\n \"\"\"\n try:\n i = find_lt(exp_freqL, alt_freq)\n except ValueError:\n i = float('-inf')\n try:\n j = find_ge(exp_freqL, alt_freq)\n except ValueError:\n j = float('inf')\n if alt_freq - i < j - alt_freq:\n return i\n else:\n return j\n\n\ndef main():\n ploidyL = [2, 2]\n error_rate = 0.001\n cov_cutoff = 4\n parser = argparse.ArgumentParser(description=\n 'This script determines the relative frequencies of different populations and estimates the genotypes.'\n )\n parser.add_argument('infile', help=\n 'Input tsv file. Columns should be: chrom, position, ref base, alt base, number of reads supporting reference, number of reads supporting alternate.'\n )\n parser.add_argument('-o', nargs='?', type=argparse.FileType('w'),\n default=sys.stdout, help='Output file. Default: standard out')\n parser.add_argument('-pL', default=ploidyL, type=int, nargs='+', help=\n 'A list of ploidies. Each entry in the list represents the anticipated ploidy of a subpopulation. For instance, if you expect two diploid subpopulations and one triploid subpopulation, enter 2 2 3. Default: {0}'\n .format(' '.join([str(x) for x in ploidyL])))\n parser.add_argument('-er', default=error_rate, type=float, help=\n 'Sequencing error rate. For instance, 0.01 means that 1/100 base calls will be incorrect. Default: {0}'\n .format(error_rate))\n parser.add_argument('-cc', default=cov_cutoff, type=int, help=\n 'Coverage cutoff. If the coverage of either the alternate or reference allele is less than or equal to this value, the site will not be considered as a variant site. Default: {0}'\n .format(cov_cutoff))\n parser.add_argument('-d', action='store_true', help=\n 'Enable python debugger.')\n args = parser.parse_args()\n inN = args.infile\n outF = args.o\n ploidyL = args.pL\n error_rate = args.er\n debug = args.d\n inN = os.path.realpath(inN)\n if len(ploidyL) > 2:\n print >> sys.stderr, 'Sorry, only two subpopulations are currently supported.'\n sys.exit(1)\n altL = get_altL(inN)\n tempL = []\n for a in altL:\n if a[0] * a[1] > cov_cutoff and a[0] * (1 - a[1]) > cov_cutoff and a[0\n ] > cov_cutoff:\n tempL.append(a)\n altL = tempL\n parL = grid_search_parameters(0.01)\n best_par = []\n best_ll = float('-inf')\n for par in parL:\n exp_freqL = generate_possible_freqL(ploidyL, par, error_rate)\n ll = 0\n for alt in altL:\n exp_freq = estimate_genotype(alt[1], exp_freqL)\n ll += np.log(binom.pmf(round(alt[0] * alt[1]), alt[0], exp_freq))\n if ll > best_ll:\n best_ll = ll\n best_par = par\n altD = freq_to_genotype(ploidyL, best_par, error_rate)\n for k in altD.keys():\n altD[k] = collapse_genotypes(ploidyL, altD[k])\n exp_freqL = sorted(altD.keys())\n print >> outF, '#log-likelihood\\t{0}\\n#population frequencies\\t{1}'.format(\n best_ll, '\\t'.join([str(x) for x in best_par]))\n inF = open(inN, 'r')\n linesL = inF.readlines()\n inF.close()\n if linesL[0][0] == '#':\n linesL = linesL[1:]\n for i in xrange(len(altL)):\n alt = altL[i]\n [chr, pos, refbase, altbase, refcov, altcov] = linesL[i].strip().split(\n '\\t')\n genotypeL = altD[estimate_genotype(alt[1], exp_freqL)]\n for g in genotypeL:\n g = re.sub('0', refbase, g)\n g = re.sub('1', altbase, g)\n tempL = []\n for i in xrange(len(ploidyL)):\n tempL.append(g[0:ploidyL[i]])\n g = g[ploidyL[i]:]\n print >> outF, '\\t'.join([chr, pos] + tempL)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# Chris DeBoever\n# cdeboeve@ucsd.edu\n\nimport sys, argparse, pdb, glob, os, re\nimport numpy as np\nfrom bisect import bisect_left \nfrom scipy.stats import binom\n\n### helper functions ###\n\ndef find_lt(a,x):\n \"\"\"\n Find rightmost value less than x in list a\n Input: list a and value x\n Output: rightmost value less than x in a\n \"\"\"\n i = bisect_left(a,x)\n if i:\n return a[i-1]\n raise ValueError\n\ndef find_ge(a,x):\n \"\"\"\n Find leftmost item greater than or equal to x in list a\n Input: list a and value x\n Output: leftmost value less than or equal to x in a\n \"\"\"\n i = bisect_left(a,x)\n if i != len(a):\n return a[i]\n raise ValueError\n\ndef get_altL(fn):\n \"\"\"\n Make a list of alternate allele frequencies and number of reads\n Input: tsv file with reference freq in first column and alterate freq in second column\n Output: a list of tuples with number of reads and alternate allele frequency\n \"\"\"\n f = open(fn,'r')\n linesL = [ x.strip().split('\\t') for x in f.readlines() ]\n f.close()\n if linesL[0][0][0] == '#':\n linesL = linesL[1:]\n for i in range(len(linesL)):\n if linesL[i][4] == '0': # if the number of reads supporting alternate is 0, we'll switch to 1 so avoid numeric issues\n linesL[i][4] = '1'\n return zip([ int(x[4])+int(x[5]) for x in linesL ], [ float(x[5])/(float(x[4])+float(x[5])) for x in linesL ]) # each tuple is [freq,num_reads]\n\n# def generate_cancer_possible_freqL(pL,sL,er): \n# I want to make a function which generates the likely frequencies seen in a cancer sample. This would exclude double-hit mutations (i.e. a single site gains somatic mutations on both chromosomes). This simplifications can only be made in the diploid case, however, because ploidy-variable populations might be weird...\n\ndef generate_possible_freqL(pL,sL,er): \n \"\"\"\n Generate list of possible allele frequencies\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: list of possible allele frequences\n \"\"\"\n h = sum(pL) # number of different haplotypes\n L = [ bin(x)[2:] for x in range(1,2**h-1) ] # range from 1 to 2**h-1 because we don't want 0% or 100% allele freq\n M = [ '0'*(len(L[-1])-len(x))+x for x in L ]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i]/pL[i]]*pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aL = []\n for g in M:\n aL.append(sum(np.array([ int(x) for x in list(g) ])*p_freqL))\n return sorted(list(set(aL+[er,1-er]))) \n\ndef freq_to_genotype(pL,sL,er): \n \"\"\"\n Creates dict of expected alternate allele frequencies and consistent genotypes\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: dict of expected alternate allele frequencies and consistent genotypes. Genotypes represented as binary strings in the order of the ploidy list\n \"\"\"\n h = sum(pL) # number of different haplotypes\n L = [ bin(x)[2:] for x in range(1,2**h-1) ] # range from 1 to 2**h-1 because we don't want 0% or 100% allele freq\n M = [ '0'*(len(L[-1])-len(x))+x for x in L ]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i]/pL[i]]*pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aD = {} # dict where each key is an expected alternate allele frequency and each value is a list of genotypes consistent with this alternate allele frequency\n for g in M:\n alt_freq = sum(np.array([ int(x) for x in list(g) ])*p_freqL)\n if aD.has_key(alt_freq):\n aD[alt_freq].append(g)\n else:\n aD[alt_freq] = [g]\n aD[er] = ['0'*(len(L[-1])-1) + bin(0)[2:]] # add genotype for 0% alternate allele freq\n aD[1-er] = [bin(2**h-1)[2:]] # add genotype for 100% alternate allele freq\n return aD\n\ndef collapse_genotypes(pL,gL):\n \"\"\"\n Reduces a list of genotypes to distinct genotypes given ploidy\n Input: ploidy list pL and list of genotypes gL where each genotype is a binary string ordered according to ploidy list\n Output: genotype list with non-redundant genotypes\n \"\"\"\n if len(gL) < 2:\n return gL\n else:\n uniqueL = [] # list of unique genotypes relative to ploidy\n for g in gL:\n s = ''\n for i in xrange(len(pL)):\n s += ''.join(sorted(g[0:pL[i]]))\n g = g[pL[i]:]\n if s not in uniqueL:\n uniqueL.append(s)\n return uniqueL\n \ndef grid_search_parameters(step):\n \"\"\"\n Make a list of parameters to try\n Input: step size\n Output: subpopulation frequencies to try\n \"\"\"\n f1 = list(np.arange(step,1,step))\n f2 = list(np.arange(step,1,step))\n f2.reverse()\n return zip(f1,f2)\n\ndef estimate_genotype(alt_freq,exp_freqL):\n \"\"\"\n Maximum likelihood estimator of alt_freq given possibilities in exp_freqL\n Input: observed alternate frequency and list of expected alternate frequencies\n Output: ML estimator of true alternate allele frequency\n \"\"\"\n try:\n i = find_lt(exp_freqL,alt_freq) # Find rightmost value less than x\n except ValueError:\n i = float(\"-inf\")\n try:\n j = find_ge(exp_freqL,alt_freq) # Find leftmost item greater than or equal to x\n except ValueError:\n j = float(\"inf\")\n if alt_freq-i < j-alt_freq:\n return i\n else:\n return j\n\ndef main():\n ### magic variables ###\n # these variables can be set at the command line as well\n ploidyL = [2,2] # the entries in this list are the expected ploidy of each subpopulation. Default is two diploid subpopulations\n error_rate = 0.001 # sequencing error rate\n cov_cutoff = 4 # coverage cutoff for variant sites\n\n ### gather command line arguments ###\n parser = argparse.ArgumentParser(description='This script determines the relative frequencies of different populations and estimates the genotypes.')\n parser.add_argument('infile', help='Input tsv file. Columns should be: chrom, position, ref base, alt base, number of reads supporting reference, number of reads supporting alternate.')\n parser.add_argument('-o', nargs='?', type=argparse.FileType('w'),default=sys.stdout, help='Output file. Default: standard out')\n parser.add_argument('-pL', default=ploidyL, type=int, nargs='+', help='A list of ploidies. Each entry in the list represents the anticipated ploidy of a subpopulation. For instance, if you expect two diploid subpopulations and one triploid subpopulation, enter 2 2 3. Default: {0}'.format(' '.join([str(x) for x in ploidyL])))\n parser.add_argument('-er', default=error_rate, type=float, help='Sequencing error rate. For instance, 0.01 means that 1/100 base calls will be incorrect. Default: {0}'.format(error_rate))\n parser.add_argument('-cc', default=cov_cutoff, type=int, help='Coverage cutoff. If the coverage of either the alternate or reference allele is less than or equal to this value, the site will not be considered as a variant site. Default: {0}'.format(cov_cutoff))\n parser.add_argument('-d', action='store_true', help='Enable python debugger.')\n \n args = parser.parse_args()\n \n inN = args.infile\n outF = args.o\n ploidyL = args.pL\n error_rate = args.er\n debug = args.d\n\n inN = os.path.realpath(inN) # get the input file path\n\n if len(ploidyL) > 2:\n print >>sys.stderr, 'Sorry, only two subpopulations are currently supported.'\n sys.exit(1)\n\n altL = get_altL(inN) # a list of number of reads and alternate allele frequencies\n tempL = []\n for a in altL:\n if a[0]*a[1] > cov_cutoff and a[0]*(1-a[1]) > cov_cutoff and a[0] > cov_cutoff:\n tempL.append(a)\n altL = tempL\n\n ### find population frequencies ###\n\n parL = grid_search_parameters(0.01) # grid search\n best_par = []\n best_ll = float(\"-inf\")\n\n for par in parL:\n exp_freqL = generate_possible_freqL(ploidyL,par,error_rate)\n ll = 0 # log-likelihood\n\n for alt in altL:\n exp_freq = estimate_genotype(alt[1],exp_freqL)\n ll += np.log(binom.pmf(round(alt[0]*alt[1]),alt[0],exp_freq)) \n # round(alt[0]*alt[1]) is the number of reads we saw supporting alternate allele (i.e. the number of successes under the binomial test)\n # alt[0] is the total number of reads covering this site (i.e. the number of attempts in our binomial test)\n # exp_freq is our probability of success (i.e. observing a read supporting alternate) from our ML estimation (see estimate_genotype)\n \n if ll > best_ll:\n best_ll = ll\n best_par = par\n\n ### determine genotypes ###\n altD = freq_to_genotype(ploidyL,best_par,error_rate) # dict whose keys are alternate allele frequencies and whose values are lists of consistent genotypes\n for k in altD.keys():\n altD[k] = collapse_genotypes(ploidyL,altD[k])\n exp_freqL = sorted(altD.keys()) \n\n print >>outF, '#log-likelihood\\t{0}\\n#population frequencies\\t{1}'.format(best_ll,'\\t'.join([ str(x) for x in best_par ]))\n\n inF = open(inN,'r')\n linesL = inF.readlines()\n inF.close()\n if linesL[0][0] == '#':\n linesL = linesL[1:]\n for i in xrange(len(altL)):\n alt = altL[i]\n [chr,pos,refbase,altbase,refcov,altcov] = linesL[i].strip().split('\\t')\n genotypeL = altD[estimate_genotype(alt[1],exp_freqL)] \n for g in genotypeL:\n g = re.sub('0',refbase,g)\n g = re.sub('1',altbase,g)\n tempL = [] # each element of this list is the genotype of a population\n for i in xrange(len(ploidyL)):\n tempL.append(g[0:ploidyL[i]])\n g = g[ploidyL[i]:]\n print >>outF, '\\t'.join([chr,pos] + tempL)\n\n # use best population frequency parameters and walk through sites, assign genotypes, p-values or scores maybe?\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
7,
8,
9,
11,
12
]
}
|
[
7,
8,
9,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
if os.environ.get('DEBUG'):
import settings_local as settings
else:
import settings_prod as settings
except ImportError:
import settings
<|reserved_special_token_0|>
if redis_env:
redis = Redis.from_url(redis_env)
elif getattr(settings, 'REDIS_URL', None):
redis = Redis.from_url(settings.REDIS_URL)
else:
redis = Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=
settings.REDIS_DB, password=settings.REDIS_PASS)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
if os.environ.get('DEBUG'):
import settings_local as settings
else:
import settings_prod as settings
except ImportError:
import settings
redis_env = os.environ.get('REDISTOGO_URL')
if redis_env:
redis = Redis.from_url(redis_env)
elif getattr(settings, 'REDIS_URL', None):
redis = Redis.from_url(settings.REDIS_URL)
else:
redis = Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=
settings.REDIS_DB, password=settings.REDIS_PASS)
<|reserved_special_token_1|>
import os
from redis import Redis
try:
if os.environ.get('DEBUG'):
import settings_local as settings
else:
import settings_prod as settings
except ImportError:
import settings
redis_env = os.environ.get('REDISTOGO_URL')
if redis_env:
redis = Redis.from_url(redis_env)
elif getattr(settings, 'REDIS_URL', None):
redis = Redis.from_url(settings.REDIS_URL)
else:
redis = Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=
settings.REDIS_DB, password=settings.REDIS_PASS)
|
flexible
|
{
"blob_id": "4c3a27bf1f7e617f4b85dc2b59efa184751b69ac",
"index": 3868,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n if os.environ.get('DEBUG'):\n import settings_local as settings\n else:\n import settings_prod as settings\nexcept ImportError:\n import settings\n<mask token>\nif redis_env:\n redis = Redis.from_url(redis_env)\nelif getattr(settings, 'REDIS_URL', None):\n redis = Redis.from_url(settings.REDIS_URL)\nelse:\n redis = Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=\n settings.REDIS_DB, password=settings.REDIS_PASS)\n",
"step-3": "<mask token>\ntry:\n if os.environ.get('DEBUG'):\n import settings_local as settings\n else:\n import settings_prod as settings\nexcept ImportError:\n import settings\nredis_env = os.environ.get('REDISTOGO_URL')\nif redis_env:\n redis = Redis.from_url(redis_env)\nelif getattr(settings, 'REDIS_URL', None):\n redis = Redis.from_url(settings.REDIS_URL)\nelse:\n redis = Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=\n settings.REDIS_DB, password=settings.REDIS_PASS)\n",
"step-4": "import os\nfrom redis import Redis\ntry:\n if os.environ.get('DEBUG'):\n import settings_local as settings\n else:\n import settings_prod as settings\nexcept ImportError:\n import settings\nredis_env = os.environ.get('REDISTOGO_URL')\nif redis_env:\n redis = Redis.from_url(redis_env)\nelif getattr(settings, 'REDIS_URL', None):\n redis = Redis.from_url(settings.REDIS_URL)\nelse:\n redis = Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=\n settings.REDIS_DB, password=settings.REDIS_PASS)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('your name is:' + x)
print(p)
<|reserved_special_token_1|>
x = str(input('please input your name:'))
y = int(input('please input your age:'))
p = int(2017 - y + 100)
print('your name is:' + x)
print(p)
<|reserved_special_token_1|>
x = str(input("please input your name:"))
y = int(input("please input your age:"))
p = int(2017-y+100)
print("your name is:"+x)
print (p)
|
flexible
|
{
"blob_id": "929f580e8e559f8309e19f72208bf4ff0d537668",
"index": 4935,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('your name is:' + x)\nprint(p)\n",
"step-3": "x = str(input('please input your name:'))\ny = int(input('please input your age:'))\np = int(2017 - y + 100)\nprint('your name is:' + x)\nprint(p)\n",
"step-4": "x = str(input(\"please input your name:\"))\ny = int(input(\"please input your age:\"))\n\np = int(2017-y+100)\n\nprint(\"your name is:\"+x)\nprint (p)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
date = input()
if date == ("DEC 25") or date == ("OCT 31"):
print("yup")
else:
print("nope")
|
normal
|
{
"blob_id": "bc5b368a710b8dfc4492b996c42c46638e1f538c",
"index": 9811,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif date == 'DEC 25' or date == 'OCT 31':\n print('yup')\nelse:\n print('nope')\n",
"step-3": "date = input()\nif date == 'DEC 25' or date == 'OCT 31':\n print('yup')\nelse:\n print('nope')\n",
"step-4": "date = input()\nif date == (\"DEC 25\") or date == (\"OCT 31\"):\n print(\"yup\")\n\nelse:\n print(\"nope\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import math
def vol_shell(r1, r2):
a=abs((4/3)*math.pi*((r1**3)-(r2**3)))
return round(a,3)
print(vol_shell(3,3))
|
normal
|
{
"blob_id": "cd234911c1f990b8029dfa792d132847bf39a6aa",
"index": 445,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef vol_shell(r1, r2):\n a = abs(4 / 3 * math.pi * (r1 ** 3 - r2 ** 3))\n return round(a, 3)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef vol_shell(r1, r2):\n a = abs(4 / 3 * math.pi * (r1 ** 3 - r2 ** 3))\n return round(a, 3)\n\n\nprint(vol_shell(3, 3))\n",
"step-4": "import math\n\n\ndef vol_shell(r1, r2):\n a = abs(4 / 3 * math.pi * (r1 ** 3 - r2 ** 3))\n return round(a, 3)\n\n\nprint(vol_shell(3, 3))\n",
"step-5": "\nimport math\ndef vol_shell(r1, r2):\n a=abs((4/3)*math.pi*((r1**3)-(r2**3)))\n return round(a,3)\nprint(vol_shell(3,3))\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class AppData:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AppData:
def __init__(self, app, backend, moduleRefs, locations, modules,
version, checkout, silent):
"""Collects TF data according to specifications.
The specifications are passed as arguments when the object is initialized.
Parameters
----------
backend: string
`github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.
app: obj
The high-level API object
moduleRefs: tuple
Each member consists of a module ref, which is a tuple of information
that defines a module.
locations: string|tuple
One or more directory paths. They will be combined with the `modules`
argument and used as locations to search for TF data files.
modules: string|tuple
One or more directory path segments. They will be appended to the
paths given by the `locations` argument to form search locations
for TF data files.
version: string
The version of TF data that should be retrievend. Version is a directory
level just below the search locations.
checkout: string
A specifier to use a specific release or commit of a data repository.
silent: string, optional tf.core.timestamp.SILENT_D
See `tf.core.timestamp.Timestamp`
"""
self.backend = backend
self.app = app
self.moduleRefs = [] if moduleRefs is None else moduleRefs.split(','
) if type(moduleRefs) is str else list(moduleRefs)
self.locationsArg = locations
self.modulesArg = modules
self.version = version
self.checkout = checkout
self.silent = silent
def getMain(self):
"""Get the main data of the corpus.
This is specified by the `org`, `repo` and `relative` settings under
`provenanceSpec` in `config.yaml`.
See Also
--------
tf.advanced.settings: options allowed in `config.yaml`
"""
app = self.app
checkout = self.checkout
aContext = app.context
org = aContext.org
repo = aContext.repo
relative = prefixSlash(aContext.relative)
appPath = aContext.appPath
appName = aContext.appName
if appName.startswith('app:'):
appParent = appPath.rsplit('/', 1)[0]
relative = f'{appParent}{relative}'
elif org is None or repo is None:
appPathRep = f'{appPath}/' if appPath else ''
relative = f'{appPathRep}{appName}'
self.checkout = 'local'
if not self.getModule(org, repo, prefixSlash(relative), checkout,
isBase=True):
self.good = False
<|reserved_special_token_0|>
def getRefs(self):
"""Get data from additional modules.
These are specified in the `moduleRefs` parameter of `AppData`.
We store the set of special modules in order to skip them
later when we are loading the standard modules.
"""
backend = self.backend
refs = self.moduleRefs
for ref in refs:
refPure = ref.rsplit(':', 1)[0]
if refPure in self.seen:
continue
parts = splitModRef(ref)
if not parts:
self.good = False
continue
parts[2] = prefixSlash(normpath(parts[2]))
theBackend = None if parts[-1] is None or parts[-1
] == backend else parts[-1]
if not self.getModule(*parts[0:-1], backend=theBackend):
self.good = False
def getModules(self):
"""Get data from additional local directories.
These are specified in the `locations` and `modules` parameters of `AppData`.
"""
self.provenance = []
provenance = self.provenance
self.mLocations = []
mLocations = self.mLocations
self.locations = None
self.modules = None
self.good = True
self.seen = set()
self.getMain()
self.getRefs()
self.getStandard()
version = self.version
good = self.good
app = self.app
if good:
app.mLocations = mLocations
app.provenance = provenance
else:
return
mModules = []
if mLocations:
mModules.append(version or '')
locations = self.locationsArg
modules = self.modulesArg
givenLocations = [] if locations is None else [expandDir(app, x.
strip()) for x in itemize(locations, '\n')] if type(locations
) is str else [str(x) for x in locations]
givenModules = [] if modules is None else [normpath(x.strip()) for
x in itemize(modules, '\n')] if type(modules) is str else [normpath
(str(x)) for x in modules]
self.locations = mLocations + givenLocations
self.modules = mModules + givenModules
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AppData:
def __init__(self, app, backend, moduleRefs, locations, modules,
version, checkout, silent):
"""Collects TF data according to specifications.
The specifications are passed as arguments when the object is initialized.
Parameters
----------
backend: string
`github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.
app: obj
The high-level API object
moduleRefs: tuple
Each member consists of a module ref, which is a tuple of information
that defines a module.
locations: string|tuple
One or more directory paths. They will be combined with the `modules`
argument and used as locations to search for TF data files.
modules: string|tuple
One or more directory path segments. They will be appended to the
paths given by the `locations` argument to form search locations
for TF data files.
version: string
The version of TF data that should be retrievend. Version is a directory
level just below the search locations.
checkout: string
A specifier to use a specific release or commit of a data repository.
silent: string, optional tf.core.timestamp.SILENT_D
See `tf.core.timestamp.Timestamp`
"""
self.backend = backend
self.app = app
self.moduleRefs = [] if moduleRefs is None else moduleRefs.split(','
) if type(moduleRefs) is str else list(moduleRefs)
self.locationsArg = locations
self.modulesArg = modules
self.version = version
self.checkout = checkout
self.silent = silent
def getMain(self):
"""Get the main data of the corpus.
This is specified by the `org`, `repo` and `relative` settings under
`provenanceSpec` in `config.yaml`.
See Also
--------
tf.advanced.settings: options allowed in `config.yaml`
"""
app = self.app
checkout = self.checkout
aContext = app.context
org = aContext.org
repo = aContext.repo
relative = prefixSlash(aContext.relative)
appPath = aContext.appPath
appName = aContext.appName
if appName.startswith('app:'):
appParent = appPath.rsplit('/', 1)[0]
relative = f'{appParent}{relative}'
elif org is None or repo is None:
appPathRep = f'{appPath}/' if appPath else ''
relative = f'{appPathRep}{appName}'
self.checkout = 'local'
if not self.getModule(org, repo, prefixSlash(relative), checkout,
isBase=True):
self.good = False
def getStandard(self):
"""Get the data of the standard modules specified by the settings of the corpus.
These are specified in the `moduleSpecs` setting under
`provenanceSpecs` in `config.yaml`.
They will be loaded *after* the extra modules specified in the **mod**
parameter, and only in as far they have not been specifief in the
**mod** parameter. In this way you can pass overriding
checkout specifiers to the standard modules.
See Also
--------
tf.advanced.settings: options allowed in `config.yaml`
"""
app = self.app
loadData = app.loadData
if not loadData or loadData == 'core':
return
aContext = app.context
moduleSpecs = aContext.moduleSpecs
seen = self.seen
checkout = self.checkout
backend = self.backend
for m in (moduleSpecs or []):
org = m['org']
repo = m['repo']
relative = m['relative']
theCheckout = m.get('checkout', checkout)
theBackend = m.get('backend', backend)
bRep = backendRep(theBackend, 'spec', default=backend)
ref = f'{bRep}{org}/{repo}{relative}'
if ref in seen:
continue
if not self.getModule(org, repo, relative, theCheckout, backend
=theBackend, specs=m):
self.good = False
def getRefs(self):
"""Get data from additional modules.
These are specified in the `moduleRefs` parameter of `AppData`.
We store the set of special modules in order to skip them
later when we are loading the standard modules.
"""
backend = self.backend
refs = self.moduleRefs
for ref in refs:
refPure = ref.rsplit(':', 1)[0]
if refPure in self.seen:
continue
parts = splitModRef(ref)
if not parts:
self.good = False
continue
parts[2] = prefixSlash(normpath(parts[2]))
theBackend = None if parts[-1] is None or parts[-1
] == backend else parts[-1]
if not self.getModule(*parts[0:-1], backend=theBackend):
self.good = False
def getModules(self):
"""Get data from additional local directories.
These are specified in the `locations` and `modules` parameters of `AppData`.
"""
self.provenance = []
provenance = self.provenance
self.mLocations = []
mLocations = self.mLocations
self.locations = None
self.modules = None
self.good = True
self.seen = set()
self.getMain()
self.getRefs()
self.getStandard()
version = self.version
good = self.good
app = self.app
if good:
app.mLocations = mLocations
app.provenance = provenance
else:
return
mModules = []
if mLocations:
mModules.append(version or '')
locations = self.locationsArg
modules = self.modulesArg
givenLocations = [] if locations is None else [expandDir(app, x.
strip()) for x in itemize(locations, '\n')] if type(locations
) is str else [str(x) for x in locations]
givenModules = [] if modules is None else [normpath(x.strip()) for
x in itemize(modules, '\n')] if type(modules) is str else [normpath
(str(x)) for x in modules]
self.locations = mLocations + givenLocations
self.modules = mModules + givenModules
def getModule(self, org, repo, relative, checkout, backend=None, isBase
=False, specs=None):
"""Prepare to load a single module.
Eventually, all TF data will be downloaded from local directories, bases
on a list of location paths and module paths.
This function computes the contribution of a single module to both the
location paths and the module paths.
Parameters
----------
org: string
GitHub organization or GitLab group of the module
repo: string:
GitHub repository or GitLab project of the module
relative: string
Path within the repository of the module
checkout: string
A specifier to use a specific release or commit of a data repository.
backend: string
The backend if different from the backend of the main module
isBase: boolean, optional False
Whether this module is the main data of the corpus.
specs: dict, optional False
Additional informational attributes of the module, e.g. a DOI
"""
backend = self.backend if backend is None else backendRep(backend,
'norm')
bRep = backendRep(backend, 'spec', default=self.backend)
version = self.version
silent = self.silent
mLocations = self.mLocations
provenance = self.provenance
seen = self.seen
app = self.app
_browse = app._browse
aContext = app.context
branch = aContext.provenanceSpec['branch']
relative = prefixSlash(normpath(relative))
moduleRef = f'{bRep}{org}/{repo}{relative}'
if moduleRef in self.seen:
return True
if org is None or repo is None:
relativeBare = relative.removeprefix('/')
repoLocation = relativeBare
mLocations.append(relativeBare)
commit, local, release = None, None, None
else:
commit, release, local, localBase, localDir = checkoutRepo(backend,
_browse=_browse, org=org, repo=repo, folder=relative,
version=version, checkout=checkout, withPaths=False, keep=
False, silent=silent)
if not localBase:
return False
repoLocation = f'{localBase}/{org}/{repo}'
mLocations.append(f'{localBase}/{localDir}')
seen.add(moduleRef)
if isBase:
app.repoLocation = repoLocation
info = {}
for item in (('doi', None), ('corpus', f'{org}/{repo}{relative}')):
key, default = item
info[key] = getattr(aContext, key) if isBase else specs[key
] if specs and key in specs else default
provenance.append((('corpus', info['corpus']), ('version', version),
('commit', commit or '??'), ('release', release or 'none'), (
'live', provenanceLink(backend, org, repo, version, branch,
commit, local, release, relative)), ('doi', info['doi'])))
return True
def getModulesData(*args):
"""Retrieve all data for a corpus.
Parameters
----------
args: list
All parameters needed to retrieve all associated data.
They are the same as are needed to construct an `AppData` object.
"""
mData = AppData(*args)
mData.getModules()
if not mData.good or mData.locations is None:
return None
return mData.locations, mData.modules
<|reserved_special_token_1|>
from ..core.helpers import itemize
from ..core.files import backendRep, expandDir, prefixSlash, normpath
from .helpers import splitModRef
from .repo import checkoutRepo
from .links import provenanceLink
class AppData:
def __init__(self, app, backend, moduleRefs, locations, modules,
version, checkout, silent):
"""Collects TF data according to specifications.
The specifications are passed as arguments when the object is initialized.
Parameters
----------
backend: string
`github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.
app: obj
The high-level API object
moduleRefs: tuple
Each member consists of a module ref, which is a tuple of information
that defines a module.
locations: string|tuple
One or more directory paths. They will be combined with the `modules`
argument and used as locations to search for TF data files.
modules: string|tuple
One or more directory path segments. They will be appended to the
paths given by the `locations` argument to form search locations
for TF data files.
version: string
The version of TF data that should be retrievend. Version is a directory
level just below the search locations.
checkout: string
A specifier to use a specific release or commit of a data repository.
silent: string, optional tf.core.timestamp.SILENT_D
See `tf.core.timestamp.Timestamp`
"""
self.backend = backend
self.app = app
self.moduleRefs = [] if moduleRefs is None else moduleRefs.split(','
) if type(moduleRefs) is str else list(moduleRefs)
self.locationsArg = locations
self.modulesArg = modules
self.version = version
self.checkout = checkout
self.silent = silent
def getMain(self):
"""Get the main data of the corpus.
This is specified by the `org`, `repo` and `relative` settings under
`provenanceSpec` in `config.yaml`.
See Also
--------
tf.advanced.settings: options allowed in `config.yaml`
"""
app = self.app
checkout = self.checkout
aContext = app.context
org = aContext.org
repo = aContext.repo
relative = prefixSlash(aContext.relative)
appPath = aContext.appPath
appName = aContext.appName
if appName.startswith('app:'):
appParent = appPath.rsplit('/', 1)[0]
relative = f'{appParent}{relative}'
elif org is None or repo is None:
appPathRep = f'{appPath}/' if appPath else ''
relative = f'{appPathRep}{appName}'
self.checkout = 'local'
if not self.getModule(org, repo, prefixSlash(relative), checkout,
isBase=True):
self.good = False
def getStandard(self):
"""Get the data of the standard modules specified by the settings of the corpus.
These are specified in the `moduleSpecs` setting under
`provenanceSpecs` in `config.yaml`.
They will be loaded *after* the extra modules specified in the **mod**
parameter, and only in as far they have not been specifief in the
**mod** parameter. In this way you can pass overriding
checkout specifiers to the standard modules.
See Also
--------
tf.advanced.settings: options allowed in `config.yaml`
"""
app = self.app
loadData = app.loadData
if not loadData or loadData == 'core':
return
aContext = app.context
moduleSpecs = aContext.moduleSpecs
seen = self.seen
checkout = self.checkout
backend = self.backend
for m in (moduleSpecs or []):
org = m['org']
repo = m['repo']
relative = m['relative']
theCheckout = m.get('checkout', checkout)
theBackend = m.get('backend', backend)
bRep = backendRep(theBackend, 'spec', default=backend)
ref = f'{bRep}{org}/{repo}{relative}'
if ref in seen:
continue
if not self.getModule(org, repo, relative, theCheckout, backend
=theBackend, specs=m):
self.good = False
def getRefs(self):
"""Get data from additional modules.
These are specified in the `moduleRefs` parameter of `AppData`.
We store the set of special modules in order to skip them
later when we are loading the standard modules.
"""
backend = self.backend
refs = self.moduleRefs
for ref in refs:
refPure = ref.rsplit(':', 1)[0]
if refPure in self.seen:
continue
parts = splitModRef(ref)
if not parts:
self.good = False
continue
parts[2] = prefixSlash(normpath(parts[2]))
theBackend = None if parts[-1] is None or parts[-1
] == backend else parts[-1]
if not self.getModule(*parts[0:-1], backend=theBackend):
self.good = False
def getModules(self):
"""Get data from additional local directories.
These are specified in the `locations` and `modules` parameters of `AppData`.
"""
self.provenance = []
provenance = self.provenance
self.mLocations = []
mLocations = self.mLocations
self.locations = None
self.modules = None
self.good = True
self.seen = set()
self.getMain()
self.getRefs()
self.getStandard()
version = self.version
good = self.good
app = self.app
if good:
app.mLocations = mLocations
app.provenance = provenance
else:
return
mModules = []
if mLocations:
mModules.append(version or '')
locations = self.locationsArg
modules = self.modulesArg
givenLocations = [] if locations is None else [expandDir(app, x.
strip()) for x in itemize(locations, '\n')] if type(locations
) is str else [str(x) for x in locations]
givenModules = [] if modules is None else [normpath(x.strip()) for
x in itemize(modules, '\n')] if type(modules) is str else [normpath
(str(x)) for x in modules]
self.locations = mLocations + givenLocations
self.modules = mModules + givenModules
def getModule(self, org, repo, relative, checkout, backend=None, isBase
=False, specs=None):
"""Prepare to load a single module.
Eventually, all TF data will be downloaded from local directories, bases
on a list of location paths and module paths.
This function computes the contribution of a single module to both the
location paths and the module paths.
Parameters
----------
org: string
GitHub organization or GitLab group of the module
repo: string:
GitHub repository or GitLab project of the module
relative: string
Path within the repository of the module
checkout: string
A specifier to use a specific release or commit of a data repository.
backend: string
The backend if different from the backend of the main module
isBase: boolean, optional False
Whether this module is the main data of the corpus.
specs: dict, optional False
Additional informational attributes of the module, e.g. a DOI
"""
backend = self.backend if backend is None else backendRep(backend,
'norm')
bRep = backendRep(backend, 'spec', default=self.backend)
version = self.version
silent = self.silent
mLocations = self.mLocations
provenance = self.provenance
seen = self.seen
app = self.app
_browse = app._browse
aContext = app.context
branch = aContext.provenanceSpec['branch']
relative = prefixSlash(normpath(relative))
moduleRef = f'{bRep}{org}/{repo}{relative}'
if moduleRef in self.seen:
return True
if org is None or repo is None:
relativeBare = relative.removeprefix('/')
repoLocation = relativeBare
mLocations.append(relativeBare)
commit, local, release = None, None, None
else:
commit, release, local, localBase, localDir = checkoutRepo(backend,
_browse=_browse, org=org, repo=repo, folder=relative,
version=version, checkout=checkout, withPaths=False, keep=
False, silent=silent)
if not localBase:
return False
repoLocation = f'{localBase}/{org}/{repo}'
mLocations.append(f'{localBase}/{localDir}')
seen.add(moduleRef)
if isBase:
app.repoLocation = repoLocation
info = {}
for item in (('doi', None), ('corpus', f'{org}/{repo}{relative}')):
key, default = item
info[key] = getattr(aContext, key) if isBase else specs[key
] if specs and key in specs else default
provenance.append((('corpus', info['corpus']), ('version', version),
('commit', commit or '??'), ('release', release or 'none'), (
'live', provenanceLink(backend, org, repo, version, branch,
commit, local, release, relative)), ('doi', info['doi'])))
return True
def getModulesData(*args):
"""Retrieve all data for a corpus.
Parameters
----------
args: list
All parameters needed to retrieve all associated data.
They are the same as are needed to construct an `AppData` object.
"""
mData = AppData(*args)
mData.getModules()
if not mData.good or mData.locations is None:
return None
return mData.locations, mData.modules
<|reserved_special_token_1|>
from ..core.helpers import itemize
from ..core.files import backendRep, expandDir, prefixSlash, normpath
from .helpers import splitModRef
from .repo import checkoutRepo
from .links import provenanceLink
# GET DATA FOR MAIN SOURCE AND ALL MODULES
class AppData:
def __init__(
self, app, backend, moduleRefs, locations, modules, version, checkout, silent
):
"""Collects TF data according to specifications.
The specifications are passed as arguments when the object is initialized.
Parameters
----------
backend: string
`github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.
app: obj
The high-level API object
moduleRefs: tuple
Each member consists of a module ref, which is a tuple of information
that defines a module.
locations: string|tuple
One or more directory paths. They will be combined with the `modules`
argument and used as locations to search for TF data files.
modules: string|tuple
One or more directory path segments. They will be appended to the
paths given by the `locations` argument to form search locations
for TF data files.
version: string
The version of TF data that should be retrievend. Version is a directory
level just below the search locations.
checkout: string
A specifier to use a specific release or commit of a data repository.
silent: string, optional tf.core.timestamp.SILENT_D
See `tf.core.timestamp.Timestamp`
"""
self.backend = backend
self.app = app
self.moduleRefs = (
[]
if moduleRefs is None
else moduleRefs.split(",")
if type(moduleRefs) is str
else list(moduleRefs)
)
self.locationsArg = locations
self.modulesArg = modules
self.version = version
self.checkout = checkout
self.silent = silent
def getMain(self):
"""Get the main data of the corpus.
This is specified by the `org`, `repo` and `relative` settings under
`provenanceSpec` in `config.yaml`.
See Also
--------
tf.advanced.settings: options allowed in `config.yaml`
"""
app = self.app
checkout = self.checkout
aContext = app.context
org = aContext.org
repo = aContext.repo
relative = prefixSlash(aContext.relative)
appPath = aContext.appPath
appName = aContext.appName
if appName.startswith("app:"):
appParent = appPath.rsplit("/", 1)[0]
relative = f"{appParent}{relative}"
elif org is None or repo is None:
appPathRep = f"{appPath}/" if appPath else ""
relative = f"{appPathRep}{appName}"
self.checkout = "local"
if not self.getModule(org, repo, prefixSlash(relative), checkout, isBase=True):
self.good = False
def getStandard(self):
"""Get the data of the standard modules specified by the settings of the corpus.
These are specified in the `moduleSpecs` setting under
`provenanceSpecs` in `config.yaml`.
They will be loaded *after* the extra modules specified in the **mod**
parameter, and only in as far they have not been specifief in the
**mod** parameter. In this way you can pass overriding
checkout specifiers to the standard modules.
See Also
--------
tf.advanced.settings: options allowed in `config.yaml`
"""
app = self.app
loadData = app.loadData
if not loadData or loadData == "core":
return
aContext = app.context
moduleSpecs = aContext.moduleSpecs
seen = self.seen
checkout = self.checkout
backend = self.backend
for m in moduleSpecs or []:
org = m["org"]
repo = m["repo"]
relative = m["relative"]
theCheckout = m.get("checkout", checkout)
theBackend = m.get("backend", backend)
bRep = backendRep(theBackend, "spec", default=backend)
ref = f"{bRep}{org}/{repo}{relative}"
if ref in seen:
continue
if not self.getModule(
org,
repo,
relative,
theCheckout,
backend=theBackend,
specs=m,
):
self.good = False
def getRefs(self):
"""Get data from additional modules.
These are specified in the `moduleRefs` parameter of `AppData`.
We store the set of special modules in order to skip them
later when we are loading the standard modules.
"""
backend = self.backend
refs = self.moduleRefs
for ref in refs:
refPure = ref.rsplit(":", 1)[0]
if refPure in self.seen:
continue
parts = splitModRef(ref)
if not parts:
self.good = False
continue
parts[2] = prefixSlash(normpath(parts[2])) # the relative bit
theBackend = (
None if parts[-1] is None or parts[-1] == backend else parts[-1]
)
if not self.getModule(*parts[0:-1], backend=theBackend):
self.good = False
def getModules(self):
"""Get data from additional local directories.
These are specified in the `locations` and `modules` parameters of `AppData`.
"""
self.provenance = []
provenance = self.provenance
self.mLocations = []
mLocations = self.mLocations
self.locations = None
self.modules = None
self.good = True
self.seen = set()
self.getMain()
self.getRefs()
self.getStandard()
version = self.version
good = self.good
app = self.app
if good:
app.mLocations = mLocations
app.provenance = provenance
else:
return
mModules = []
if mLocations:
mModules.append(version or "")
locations = self.locationsArg
modules = self.modulesArg
givenLocations = (
[]
if locations is None
else [expandDir(app, x.strip()) for x in itemize(locations, "\n")]
if type(locations) is str
else [str(x) for x in locations]
)
givenModules = (
[]
if modules is None
else [normpath(x.strip()) for x in itemize(modules, "\n")]
if type(modules) is str
else [normpath(str(x)) for x in modules]
)
self.locations = mLocations + givenLocations
self.modules = mModules + givenModules
def getModule(
self, org, repo, relative, checkout, backend=None, isBase=False, specs=None
):
"""Prepare to load a single module.
Eventually, all TF data will be downloaded from local directories, bases
on a list of location paths and module paths.
This function computes the contribution of a single module to both the
location paths and the module paths.
Parameters
----------
org: string
GitHub organization or GitLab group of the module
repo: string:
GitHub repository or GitLab project of the module
relative: string
Path within the repository of the module
checkout: string
A specifier to use a specific release or commit of a data repository.
backend: string
The backend if different from the backend of the main module
isBase: boolean, optional False
Whether this module is the main data of the corpus.
specs: dict, optional False
Additional informational attributes of the module, e.g. a DOI
"""
backend = self.backend if backend is None else backendRep(backend, "norm")
bRep = backendRep(backend, "spec", default=self.backend)
version = self.version
silent = self.silent
mLocations = self.mLocations
provenance = self.provenance
seen = self.seen
app = self.app
_browse = app._browse
aContext = app.context
branch = aContext.provenanceSpec["branch"]
relative = prefixSlash(normpath(relative))
moduleRef = f"{bRep}{org}/{repo}{relative}"
if moduleRef in self.seen:
return True
if org is None or repo is None:
relativeBare = relative.removeprefix("/")
repoLocation = relativeBare
mLocations.append(relativeBare)
(commit, local, release) = (None, None, None)
else:
(commit, release, local, localBase, localDir) = checkoutRepo(
backend,
_browse=_browse,
org=org,
repo=repo,
folder=relative,
version=version,
checkout=checkout,
withPaths=False,
keep=False,
silent=silent,
)
if not localBase:
return False
repoLocation = f"{localBase}/{org}/{repo}"
mLocations.append(f"{localBase}/{localDir}")
seen.add(moduleRef)
if isBase:
app.repoLocation = repoLocation
info = {}
for item in (
("doi", None),
("corpus", f"{org}/{repo}{relative}"),
):
(key, default) = item
info[key] = (
getattr(aContext, key)
if isBase
else specs[key]
if specs and key in specs
else default
)
provenance.append(
(
("corpus", info["corpus"]),
("version", version),
("commit", commit or "??"),
("release", release or "none"),
(
"live",
provenanceLink(
backend, org, repo, version, branch, commit, local, release, relative
),
),
("doi", info["doi"]),
)
)
return True
def getModulesData(*args):
"""Retrieve all data for a corpus.
Parameters
----------
args: list
All parameters needed to retrieve all associated data.
They are the same as are needed to construct an `AppData` object.
"""
mData = AppData(*args)
mData.getModules()
if not mData.good or mData.locations is None:
return None
return (mData.locations, mData.modules)
|
flexible
|
{
"blob_id": "7be54b2bd99680beed3e8e9cb14225756a71a4ea",
"index": 1135,
"step-1": "<mask token>\n\n\nclass AppData:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AppData:\n\n def __init__(self, app, backend, moduleRefs, locations, modules,\n version, checkout, silent):\n \"\"\"Collects TF data according to specifications.\n\n The specifications are passed as arguments when the object is initialized.\n\n Parameters\n ----------\n backend: string\n `github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.\n app: obj\n The high-level API object\n moduleRefs: tuple\n Each member consists of a module ref, which is a tuple of information\n that defines a module.\n locations: string|tuple\n One or more directory paths. They will be combined with the `modules`\n argument and used as locations to search for TF data files.\n modules: string|tuple\n One or more directory path segments. They will be appended to the\n paths given by the `locations` argument to form search locations\n for TF data files.\n version: string\n The version of TF data that should be retrievend. Version is a directory\n level just below the search locations.\n checkout: string\n A specifier to use a specific release or commit of a data repository.\n silent: string, optional tf.core.timestamp.SILENT_D\n See `tf.core.timestamp.Timestamp`\n\n \"\"\"\n self.backend = backend\n self.app = app\n self.moduleRefs = [] if moduleRefs is None else moduleRefs.split(','\n ) if type(moduleRefs) is str else list(moduleRefs)\n self.locationsArg = locations\n self.modulesArg = modules\n self.version = version\n self.checkout = checkout\n self.silent = silent\n\n def getMain(self):\n \"\"\"Get the main data of the corpus.\n\n This is specified by the `org`, `repo` and `relative` settings under\n `provenanceSpec` in `config.yaml`.\n\n See Also\n --------\n tf.advanced.settings: options allowed in `config.yaml`\n \"\"\"\n app = self.app\n checkout = self.checkout\n aContext = app.context\n org = aContext.org\n repo = aContext.repo\n relative = prefixSlash(aContext.relative)\n appPath = aContext.appPath\n appName = aContext.appName\n if appName.startswith('app:'):\n appParent = appPath.rsplit('/', 1)[0]\n relative = f'{appParent}{relative}'\n elif org is None or repo is None:\n appPathRep = f'{appPath}/' if appPath else ''\n relative = f'{appPathRep}{appName}'\n self.checkout = 'local'\n if not self.getModule(org, repo, prefixSlash(relative), checkout,\n isBase=True):\n self.good = False\n <mask token>\n\n def getRefs(self):\n \"\"\"Get data from additional modules.\n\n These are specified in the `moduleRefs` parameter of `AppData`.\n We store the set of special modules in order to skip them\n later when we are loading the standard modules.\n \"\"\"\n backend = self.backend\n refs = self.moduleRefs\n for ref in refs:\n refPure = ref.rsplit(':', 1)[0]\n if refPure in self.seen:\n continue\n parts = splitModRef(ref)\n if not parts:\n self.good = False\n continue\n parts[2] = prefixSlash(normpath(parts[2]))\n theBackend = None if parts[-1] is None or parts[-1\n ] == backend else parts[-1]\n if not self.getModule(*parts[0:-1], backend=theBackend):\n self.good = False\n\n def getModules(self):\n \"\"\"Get data from additional local directories.\n\n These are specified in the `locations` and `modules` parameters of `AppData`.\n \"\"\"\n self.provenance = []\n provenance = self.provenance\n self.mLocations = []\n mLocations = self.mLocations\n self.locations = None\n self.modules = None\n self.good = True\n self.seen = set()\n self.getMain()\n self.getRefs()\n self.getStandard()\n version = self.version\n good = self.good\n app = self.app\n if good:\n app.mLocations = mLocations\n app.provenance = provenance\n else:\n return\n mModules = []\n if mLocations:\n mModules.append(version or '')\n locations = self.locationsArg\n modules = self.modulesArg\n givenLocations = [] if locations is None else [expandDir(app, x.\n strip()) for x in itemize(locations, '\\n')] if type(locations\n ) is str else [str(x) for x in locations]\n givenModules = [] if modules is None else [normpath(x.strip()) for\n x in itemize(modules, '\\n')] if type(modules) is str else [normpath\n (str(x)) for x in modules]\n self.locations = mLocations + givenLocations\n self.modules = mModules + givenModules\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AppData:\n\n def __init__(self, app, backend, moduleRefs, locations, modules,\n version, checkout, silent):\n \"\"\"Collects TF data according to specifications.\n\n The specifications are passed as arguments when the object is initialized.\n\n Parameters\n ----------\n backend: string\n `github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.\n app: obj\n The high-level API object\n moduleRefs: tuple\n Each member consists of a module ref, which is a tuple of information\n that defines a module.\n locations: string|tuple\n One or more directory paths. They will be combined with the `modules`\n argument and used as locations to search for TF data files.\n modules: string|tuple\n One or more directory path segments. They will be appended to the\n paths given by the `locations` argument to form search locations\n for TF data files.\n version: string\n The version of TF data that should be retrievend. Version is a directory\n level just below the search locations.\n checkout: string\n A specifier to use a specific release or commit of a data repository.\n silent: string, optional tf.core.timestamp.SILENT_D\n See `tf.core.timestamp.Timestamp`\n\n \"\"\"\n self.backend = backend\n self.app = app\n self.moduleRefs = [] if moduleRefs is None else moduleRefs.split(','\n ) if type(moduleRefs) is str else list(moduleRefs)\n self.locationsArg = locations\n self.modulesArg = modules\n self.version = version\n self.checkout = checkout\n self.silent = silent\n\n def getMain(self):\n \"\"\"Get the main data of the corpus.\n\n This is specified by the `org`, `repo` and `relative` settings under\n `provenanceSpec` in `config.yaml`.\n\n See Also\n --------\n tf.advanced.settings: options allowed in `config.yaml`\n \"\"\"\n app = self.app\n checkout = self.checkout\n aContext = app.context\n org = aContext.org\n repo = aContext.repo\n relative = prefixSlash(aContext.relative)\n appPath = aContext.appPath\n appName = aContext.appName\n if appName.startswith('app:'):\n appParent = appPath.rsplit('/', 1)[0]\n relative = f'{appParent}{relative}'\n elif org is None or repo is None:\n appPathRep = f'{appPath}/' if appPath else ''\n relative = f'{appPathRep}{appName}'\n self.checkout = 'local'\n if not self.getModule(org, repo, prefixSlash(relative), checkout,\n isBase=True):\n self.good = False\n\n def getStandard(self):\n \"\"\"Get the data of the standard modules specified by the settings of the corpus.\n\n These are specified in the `moduleSpecs` setting under\n `provenanceSpecs` in `config.yaml`.\n\n They will be loaded *after* the extra modules specified in the **mod**\n parameter, and only in as far they have not been specifief in the\n **mod** parameter. In this way you can pass overriding\n checkout specifiers to the standard modules.\n\n See Also\n --------\n tf.advanced.settings: options allowed in `config.yaml`\n \"\"\"\n app = self.app\n loadData = app.loadData\n if not loadData or loadData == 'core':\n return\n aContext = app.context\n moduleSpecs = aContext.moduleSpecs\n seen = self.seen\n checkout = self.checkout\n backend = self.backend\n for m in (moduleSpecs or []):\n org = m['org']\n repo = m['repo']\n relative = m['relative']\n theCheckout = m.get('checkout', checkout)\n theBackend = m.get('backend', backend)\n bRep = backendRep(theBackend, 'spec', default=backend)\n ref = f'{bRep}{org}/{repo}{relative}'\n if ref in seen:\n continue\n if not self.getModule(org, repo, relative, theCheckout, backend\n =theBackend, specs=m):\n self.good = False\n\n def getRefs(self):\n \"\"\"Get data from additional modules.\n\n These are specified in the `moduleRefs` parameter of `AppData`.\n We store the set of special modules in order to skip them\n later when we are loading the standard modules.\n \"\"\"\n backend = self.backend\n refs = self.moduleRefs\n for ref in refs:\n refPure = ref.rsplit(':', 1)[0]\n if refPure in self.seen:\n continue\n parts = splitModRef(ref)\n if not parts:\n self.good = False\n continue\n parts[2] = prefixSlash(normpath(parts[2]))\n theBackend = None if parts[-1] is None or parts[-1\n ] == backend else parts[-1]\n if not self.getModule(*parts[0:-1], backend=theBackend):\n self.good = False\n\n def getModules(self):\n \"\"\"Get data from additional local directories.\n\n These are specified in the `locations` and `modules` parameters of `AppData`.\n \"\"\"\n self.provenance = []\n provenance = self.provenance\n self.mLocations = []\n mLocations = self.mLocations\n self.locations = None\n self.modules = None\n self.good = True\n self.seen = set()\n self.getMain()\n self.getRefs()\n self.getStandard()\n version = self.version\n good = self.good\n app = self.app\n if good:\n app.mLocations = mLocations\n app.provenance = provenance\n else:\n return\n mModules = []\n if mLocations:\n mModules.append(version or '')\n locations = self.locationsArg\n modules = self.modulesArg\n givenLocations = [] if locations is None else [expandDir(app, x.\n strip()) for x in itemize(locations, '\\n')] if type(locations\n ) is str else [str(x) for x in locations]\n givenModules = [] if modules is None else [normpath(x.strip()) for\n x in itemize(modules, '\\n')] if type(modules) is str else [normpath\n (str(x)) for x in modules]\n self.locations = mLocations + givenLocations\n self.modules = mModules + givenModules\n\n def getModule(self, org, repo, relative, checkout, backend=None, isBase\n =False, specs=None):\n \"\"\"Prepare to load a single module.\n\n Eventually, all TF data will be downloaded from local directories, bases\n on a list of location paths and module paths.\n\n This function computes the contribution of a single module to both the\n location paths and the module paths.\n\n Parameters\n ----------\n org: string\n GitHub organization or GitLab group of the module\n repo: string:\n GitHub repository or GitLab project of the module\n relative: string\n Path within the repository of the module\n checkout: string\n A specifier to use a specific release or commit of a data repository.\n backend: string\n The backend if different from the backend of the main module\n isBase: boolean, optional False\n Whether this module is the main data of the corpus.\n specs: dict, optional False\n Additional informational attributes of the module, e.g. a DOI\n \"\"\"\n backend = self.backend if backend is None else backendRep(backend,\n 'norm')\n bRep = backendRep(backend, 'spec', default=self.backend)\n version = self.version\n silent = self.silent\n mLocations = self.mLocations\n provenance = self.provenance\n seen = self.seen\n app = self.app\n _browse = app._browse\n aContext = app.context\n branch = aContext.provenanceSpec['branch']\n relative = prefixSlash(normpath(relative))\n moduleRef = f'{bRep}{org}/{repo}{relative}'\n if moduleRef in self.seen:\n return True\n if org is None or repo is None:\n relativeBare = relative.removeprefix('/')\n repoLocation = relativeBare\n mLocations.append(relativeBare)\n commit, local, release = None, None, None\n else:\n commit, release, local, localBase, localDir = checkoutRepo(backend,\n _browse=_browse, org=org, repo=repo, folder=relative,\n version=version, checkout=checkout, withPaths=False, keep=\n False, silent=silent)\n if not localBase:\n return False\n repoLocation = f'{localBase}/{org}/{repo}'\n mLocations.append(f'{localBase}/{localDir}')\n seen.add(moduleRef)\n if isBase:\n app.repoLocation = repoLocation\n info = {}\n for item in (('doi', None), ('corpus', f'{org}/{repo}{relative}')):\n key, default = item\n info[key] = getattr(aContext, key) if isBase else specs[key\n ] if specs and key in specs else default\n provenance.append((('corpus', info['corpus']), ('version', version),\n ('commit', commit or '??'), ('release', release or 'none'), (\n 'live', provenanceLink(backend, org, repo, version, branch,\n commit, local, release, relative)), ('doi', info['doi'])))\n return True\n\n\ndef getModulesData(*args):\n \"\"\"Retrieve all data for a corpus.\n\n Parameters\n ----------\n args: list\n All parameters needed to retrieve all associated data.\n They are the same as are needed to construct an `AppData` object.\n \"\"\"\n mData = AppData(*args)\n mData.getModules()\n if not mData.good or mData.locations is None:\n return None\n return mData.locations, mData.modules\n",
"step-4": "from ..core.helpers import itemize\nfrom ..core.files import backendRep, expandDir, prefixSlash, normpath\nfrom .helpers import splitModRef\nfrom .repo import checkoutRepo\nfrom .links import provenanceLink\n\n\nclass AppData:\n\n def __init__(self, app, backend, moduleRefs, locations, modules,\n version, checkout, silent):\n \"\"\"Collects TF data according to specifications.\n\n The specifications are passed as arguments when the object is initialized.\n\n Parameters\n ----------\n backend: string\n `github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.\n app: obj\n The high-level API object\n moduleRefs: tuple\n Each member consists of a module ref, which is a tuple of information\n that defines a module.\n locations: string|tuple\n One or more directory paths. They will be combined with the `modules`\n argument and used as locations to search for TF data files.\n modules: string|tuple\n One or more directory path segments. They will be appended to the\n paths given by the `locations` argument to form search locations\n for TF data files.\n version: string\n The version of TF data that should be retrievend. Version is a directory\n level just below the search locations.\n checkout: string\n A specifier to use a specific release or commit of a data repository.\n silent: string, optional tf.core.timestamp.SILENT_D\n See `tf.core.timestamp.Timestamp`\n\n \"\"\"\n self.backend = backend\n self.app = app\n self.moduleRefs = [] if moduleRefs is None else moduleRefs.split(','\n ) if type(moduleRefs) is str else list(moduleRefs)\n self.locationsArg = locations\n self.modulesArg = modules\n self.version = version\n self.checkout = checkout\n self.silent = silent\n\n def getMain(self):\n \"\"\"Get the main data of the corpus.\n\n This is specified by the `org`, `repo` and `relative` settings under\n `provenanceSpec` in `config.yaml`.\n\n See Also\n --------\n tf.advanced.settings: options allowed in `config.yaml`\n \"\"\"\n app = self.app\n checkout = self.checkout\n aContext = app.context\n org = aContext.org\n repo = aContext.repo\n relative = prefixSlash(aContext.relative)\n appPath = aContext.appPath\n appName = aContext.appName\n if appName.startswith('app:'):\n appParent = appPath.rsplit('/', 1)[0]\n relative = f'{appParent}{relative}'\n elif org is None or repo is None:\n appPathRep = f'{appPath}/' if appPath else ''\n relative = f'{appPathRep}{appName}'\n self.checkout = 'local'\n if not self.getModule(org, repo, prefixSlash(relative), checkout,\n isBase=True):\n self.good = False\n\n def getStandard(self):\n \"\"\"Get the data of the standard modules specified by the settings of the corpus.\n\n These are specified in the `moduleSpecs` setting under\n `provenanceSpecs` in `config.yaml`.\n\n They will be loaded *after* the extra modules specified in the **mod**\n parameter, and only in as far they have not been specifief in the\n **mod** parameter. In this way you can pass overriding\n checkout specifiers to the standard modules.\n\n See Also\n --------\n tf.advanced.settings: options allowed in `config.yaml`\n \"\"\"\n app = self.app\n loadData = app.loadData\n if not loadData or loadData == 'core':\n return\n aContext = app.context\n moduleSpecs = aContext.moduleSpecs\n seen = self.seen\n checkout = self.checkout\n backend = self.backend\n for m in (moduleSpecs or []):\n org = m['org']\n repo = m['repo']\n relative = m['relative']\n theCheckout = m.get('checkout', checkout)\n theBackend = m.get('backend', backend)\n bRep = backendRep(theBackend, 'spec', default=backend)\n ref = f'{bRep}{org}/{repo}{relative}'\n if ref in seen:\n continue\n if not self.getModule(org, repo, relative, theCheckout, backend\n =theBackend, specs=m):\n self.good = False\n\n def getRefs(self):\n \"\"\"Get data from additional modules.\n\n These are specified in the `moduleRefs` parameter of `AppData`.\n We store the set of special modules in order to skip them\n later when we are loading the standard modules.\n \"\"\"\n backend = self.backend\n refs = self.moduleRefs\n for ref in refs:\n refPure = ref.rsplit(':', 1)[0]\n if refPure in self.seen:\n continue\n parts = splitModRef(ref)\n if not parts:\n self.good = False\n continue\n parts[2] = prefixSlash(normpath(parts[2]))\n theBackend = None if parts[-1] is None or parts[-1\n ] == backend else parts[-1]\n if not self.getModule(*parts[0:-1], backend=theBackend):\n self.good = False\n\n def getModules(self):\n \"\"\"Get data from additional local directories.\n\n These are specified in the `locations` and `modules` parameters of `AppData`.\n \"\"\"\n self.provenance = []\n provenance = self.provenance\n self.mLocations = []\n mLocations = self.mLocations\n self.locations = None\n self.modules = None\n self.good = True\n self.seen = set()\n self.getMain()\n self.getRefs()\n self.getStandard()\n version = self.version\n good = self.good\n app = self.app\n if good:\n app.mLocations = mLocations\n app.provenance = provenance\n else:\n return\n mModules = []\n if mLocations:\n mModules.append(version or '')\n locations = self.locationsArg\n modules = self.modulesArg\n givenLocations = [] if locations is None else [expandDir(app, x.\n strip()) for x in itemize(locations, '\\n')] if type(locations\n ) is str else [str(x) for x in locations]\n givenModules = [] if modules is None else [normpath(x.strip()) for\n x in itemize(modules, '\\n')] if type(modules) is str else [normpath\n (str(x)) for x in modules]\n self.locations = mLocations + givenLocations\n self.modules = mModules + givenModules\n\n def getModule(self, org, repo, relative, checkout, backend=None, isBase\n =False, specs=None):\n \"\"\"Prepare to load a single module.\n\n Eventually, all TF data will be downloaded from local directories, bases\n on a list of location paths and module paths.\n\n This function computes the contribution of a single module to both the\n location paths and the module paths.\n\n Parameters\n ----------\n org: string\n GitHub organization or GitLab group of the module\n repo: string:\n GitHub repository or GitLab project of the module\n relative: string\n Path within the repository of the module\n checkout: string\n A specifier to use a specific release or commit of a data repository.\n backend: string\n The backend if different from the backend of the main module\n isBase: boolean, optional False\n Whether this module is the main data of the corpus.\n specs: dict, optional False\n Additional informational attributes of the module, e.g. a DOI\n \"\"\"\n backend = self.backend if backend is None else backendRep(backend,\n 'norm')\n bRep = backendRep(backend, 'spec', default=self.backend)\n version = self.version\n silent = self.silent\n mLocations = self.mLocations\n provenance = self.provenance\n seen = self.seen\n app = self.app\n _browse = app._browse\n aContext = app.context\n branch = aContext.provenanceSpec['branch']\n relative = prefixSlash(normpath(relative))\n moduleRef = f'{bRep}{org}/{repo}{relative}'\n if moduleRef in self.seen:\n return True\n if org is None or repo is None:\n relativeBare = relative.removeprefix('/')\n repoLocation = relativeBare\n mLocations.append(relativeBare)\n commit, local, release = None, None, None\n else:\n commit, release, local, localBase, localDir = checkoutRepo(backend,\n _browse=_browse, org=org, repo=repo, folder=relative,\n version=version, checkout=checkout, withPaths=False, keep=\n False, silent=silent)\n if not localBase:\n return False\n repoLocation = f'{localBase}/{org}/{repo}'\n mLocations.append(f'{localBase}/{localDir}')\n seen.add(moduleRef)\n if isBase:\n app.repoLocation = repoLocation\n info = {}\n for item in (('doi', None), ('corpus', f'{org}/{repo}{relative}')):\n key, default = item\n info[key] = getattr(aContext, key) if isBase else specs[key\n ] if specs and key in specs else default\n provenance.append((('corpus', info['corpus']), ('version', version),\n ('commit', commit or '??'), ('release', release or 'none'), (\n 'live', provenanceLink(backend, org, repo, version, branch,\n commit, local, release, relative)), ('doi', info['doi'])))\n return True\n\n\ndef getModulesData(*args):\n \"\"\"Retrieve all data for a corpus.\n\n Parameters\n ----------\n args: list\n All parameters needed to retrieve all associated data.\n They are the same as are needed to construct an `AppData` object.\n \"\"\"\n mData = AppData(*args)\n mData.getModules()\n if not mData.good or mData.locations is None:\n return None\n return mData.locations, mData.modules\n",
"step-5": "from ..core.helpers import itemize\nfrom ..core.files import backendRep, expandDir, prefixSlash, normpath\nfrom .helpers import splitModRef\nfrom .repo import checkoutRepo\nfrom .links import provenanceLink\n\n\n# GET DATA FOR MAIN SOURCE AND ALL MODULES\n\n\nclass AppData:\n def __init__(\n self, app, backend, moduleRefs, locations, modules, version, checkout, silent\n ):\n \"\"\"Collects TF data according to specifications.\n\n The specifications are passed as arguments when the object is initialized.\n\n Parameters\n ----------\n backend: string\n `github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.\n app: obj\n The high-level API object\n moduleRefs: tuple\n Each member consists of a module ref, which is a tuple of information\n that defines a module.\n locations: string|tuple\n One or more directory paths. They will be combined with the `modules`\n argument and used as locations to search for TF data files.\n modules: string|tuple\n One or more directory path segments. They will be appended to the\n paths given by the `locations` argument to form search locations\n for TF data files.\n version: string\n The version of TF data that should be retrievend. Version is a directory\n level just below the search locations.\n checkout: string\n A specifier to use a specific release or commit of a data repository.\n silent: string, optional tf.core.timestamp.SILENT_D\n See `tf.core.timestamp.Timestamp`\n\n \"\"\"\n self.backend = backend\n self.app = app\n self.moduleRefs = (\n []\n if moduleRefs is None\n else moduleRefs.split(\",\")\n if type(moduleRefs) is str\n else list(moduleRefs)\n )\n self.locationsArg = locations\n self.modulesArg = modules\n self.version = version\n self.checkout = checkout\n self.silent = silent\n\n def getMain(self):\n \"\"\"Get the main data of the corpus.\n\n This is specified by the `org`, `repo` and `relative` settings under\n `provenanceSpec` in `config.yaml`.\n\n See Also\n --------\n tf.advanced.settings: options allowed in `config.yaml`\n \"\"\"\n\n app = self.app\n checkout = self.checkout\n aContext = app.context\n org = aContext.org\n repo = aContext.repo\n relative = prefixSlash(aContext.relative)\n appPath = aContext.appPath\n appName = aContext.appName\n\n if appName.startswith(\"app:\"):\n appParent = appPath.rsplit(\"/\", 1)[0]\n relative = f\"{appParent}{relative}\"\n elif org is None or repo is None:\n appPathRep = f\"{appPath}/\" if appPath else \"\"\n relative = f\"{appPathRep}{appName}\"\n self.checkout = \"local\"\n\n if not self.getModule(org, repo, prefixSlash(relative), checkout, isBase=True):\n self.good = False\n\n def getStandard(self):\n \"\"\"Get the data of the standard modules specified by the settings of the corpus.\n\n These are specified in the `moduleSpecs` setting under\n `provenanceSpecs` in `config.yaml`.\n\n They will be loaded *after* the extra modules specified in the **mod**\n parameter, and only in as far they have not been specifief in the\n **mod** parameter. In this way you can pass overriding\n checkout specifiers to the standard modules.\n\n See Also\n --------\n tf.advanced.settings: options allowed in `config.yaml`\n \"\"\"\n\n app = self.app\n loadData = app.loadData\n\n if not loadData or loadData == \"core\":\n return\n\n aContext = app.context\n moduleSpecs = aContext.moduleSpecs\n seen = self.seen\n checkout = self.checkout\n backend = self.backend\n\n for m in moduleSpecs or []:\n org = m[\"org\"]\n repo = m[\"repo\"]\n relative = m[\"relative\"]\n theCheckout = m.get(\"checkout\", checkout)\n theBackend = m.get(\"backend\", backend)\n bRep = backendRep(theBackend, \"spec\", default=backend)\n\n ref = f\"{bRep}{org}/{repo}{relative}\"\n if ref in seen:\n continue\n\n if not self.getModule(\n org,\n repo,\n relative,\n theCheckout,\n backend=theBackend,\n specs=m,\n ):\n self.good = False\n\n def getRefs(self):\n \"\"\"Get data from additional modules.\n\n These are specified in the `moduleRefs` parameter of `AppData`.\n We store the set of special modules in order to skip them\n later when we are loading the standard modules.\n \"\"\"\n\n backend = self.backend\n refs = self.moduleRefs\n for ref in refs:\n refPure = ref.rsplit(\":\", 1)[0]\n if refPure in self.seen:\n continue\n\n parts = splitModRef(ref)\n if not parts:\n self.good = False\n continue\n\n parts[2] = prefixSlash(normpath(parts[2])) # the relative bit\n theBackend = (\n None if parts[-1] is None or parts[-1] == backend else parts[-1]\n )\n\n if not self.getModule(*parts[0:-1], backend=theBackend):\n self.good = False\n\n def getModules(self):\n \"\"\"Get data from additional local directories.\n\n These are specified in the `locations` and `modules` parameters of `AppData`.\n \"\"\"\n\n self.provenance = []\n provenance = self.provenance\n self.mLocations = []\n mLocations = self.mLocations\n\n self.locations = None\n self.modules = None\n\n self.good = True\n self.seen = set()\n\n self.getMain()\n self.getRefs()\n self.getStandard()\n\n version = self.version\n good = self.good\n app = self.app\n\n if good:\n app.mLocations = mLocations\n app.provenance = provenance\n else:\n return\n\n mModules = []\n if mLocations:\n mModules.append(version or \"\")\n\n locations = self.locationsArg\n modules = self.modulesArg\n\n givenLocations = (\n []\n if locations is None\n else [expandDir(app, x.strip()) for x in itemize(locations, \"\\n\")]\n if type(locations) is str\n else [str(x) for x in locations]\n )\n givenModules = (\n []\n if modules is None\n else [normpath(x.strip()) for x in itemize(modules, \"\\n\")]\n if type(modules) is str\n else [normpath(str(x)) for x in modules]\n )\n\n self.locations = mLocations + givenLocations\n self.modules = mModules + givenModules\n\n def getModule(\n self, org, repo, relative, checkout, backend=None, isBase=False, specs=None\n ):\n \"\"\"Prepare to load a single module.\n\n Eventually, all TF data will be downloaded from local directories, bases\n on a list of location paths and module paths.\n\n This function computes the contribution of a single module to both the\n location paths and the module paths.\n\n Parameters\n ----------\n org: string\n GitHub organization or GitLab group of the module\n repo: string:\n GitHub repository or GitLab project of the module\n relative: string\n Path within the repository of the module\n checkout: string\n A specifier to use a specific release or commit of a data repository.\n backend: string\n The backend if different from the backend of the main module\n isBase: boolean, optional False\n Whether this module is the main data of the corpus.\n specs: dict, optional False\n Additional informational attributes of the module, e.g. a DOI\n \"\"\"\n\n backend = self.backend if backend is None else backendRep(backend, \"norm\")\n bRep = backendRep(backend, \"spec\", default=self.backend)\n version = self.version\n silent = self.silent\n mLocations = self.mLocations\n provenance = self.provenance\n seen = self.seen\n app = self.app\n _browse = app._browse\n aContext = app.context\n branch = aContext.provenanceSpec[\"branch\"]\n\n relative = prefixSlash(normpath(relative))\n\n moduleRef = f\"{bRep}{org}/{repo}{relative}\"\n if moduleRef in self.seen:\n return True\n\n if org is None or repo is None:\n relativeBare = relative.removeprefix(\"/\")\n repoLocation = relativeBare\n mLocations.append(relativeBare)\n (commit, local, release) = (None, None, None)\n else:\n (commit, release, local, localBase, localDir) = checkoutRepo(\n backend,\n _browse=_browse,\n org=org,\n repo=repo,\n folder=relative,\n version=version,\n checkout=checkout,\n withPaths=False,\n keep=False,\n silent=silent,\n )\n if not localBase:\n return False\n\n repoLocation = f\"{localBase}/{org}/{repo}\"\n mLocations.append(f\"{localBase}/{localDir}\")\n\n seen.add(moduleRef)\n if isBase:\n app.repoLocation = repoLocation\n\n info = {}\n for item in (\n (\"doi\", None),\n (\"corpus\", f\"{org}/{repo}{relative}\"),\n ):\n (key, default) = item\n info[key] = (\n getattr(aContext, key)\n if isBase\n else specs[key]\n if specs and key in specs\n else default\n )\n provenance.append(\n (\n (\"corpus\", info[\"corpus\"]),\n (\"version\", version),\n (\"commit\", commit or \"??\"),\n (\"release\", release or \"none\"),\n (\n \"live\",\n provenanceLink(\n backend, org, repo, version, branch, commit, local, release, relative\n ),\n ),\n (\"doi\", info[\"doi\"]),\n )\n )\n return True\n\n\ndef getModulesData(*args):\n \"\"\"Retrieve all data for a corpus.\n\n Parameters\n ----------\n args: list\n All parameters needed to retrieve all associated data.\n They are the same as are needed to construct an `AppData` object.\n \"\"\"\n\n mData = AppData(*args)\n mData.getModules()\n\n if not mData.good or mData.locations is None:\n return None\n\n return (mData.locations, mData.modules)\n",
"step-ids": [
1,
5,
8,
9,
10
]
}
|
[
1,
5,
8,
9,
10
] |
import random
from common.ast import *
from mutate.mutate_ctrl import *
def _check_parent_type(node, nodes, types):
par = node
while(nodes[par] != None):
par = nodes[par]
if type(par) in types:
return True
return False
def mutate_operator(root, nodes, path):
candidates = [node
for node in nodes.keys()
if type(node) in OP_TYPES.keys()
and _check_parent_type(node, nodes, OP_PARENT_TYPES)]
if len(candidates) == 0:
return -1
mut_node = random.choice(candidates)
type_idx = OP_TYPES[type(mut_node)]
new_node_type = random.choice([types for types in OP_MAP[type_idx] if types != type(mut_node)])
mut_node.__class__ = new_node_type
save_ast(root, path)
return mut_node.lineno
def mutate_signal(root, nodes, path):
candidates = [node
for node in nodes.keys()
if type(node) == Identifier
and _check_parent_type(node, nodes, SIG_PARENT_TYPES)]
if len(candidates) == 0:
return -1
sigs = get_signals(root)
trial = 0
while (trial < 1000):
trial += 1
mut_node = random.choice(candidates)
name = mut_node.name
if name in sigs.keys():
sig_type = sigs[name]
choices = [sig for sig in sigs[sig_type] if sig != name]
if len(choices) == 0:
continue
new_name = random.choice(choices)
mut_node.name = new_name
save_ast(root, path)
return mut_node.lineno
def mutate_constant(root, nodes, path):
return -1
def mutate_operand(root, nodes, path):
return -1
|
normal
|
{
"blob_id": "c0524301a79788aa34a039fc46799021fb45362c",
"index": 7141,
"step-1": "<mask token>\n\n\ndef mutate_operator(root, nodes, path):\n candidates = [node for node in nodes.keys() if type(node) in OP_TYPES.\n keys() and _check_parent_type(node, nodes, OP_PARENT_TYPES)]\n if len(candidates) == 0:\n return -1\n mut_node = random.choice(candidates)\n type_idx = OP_TYPES[type(mut_node)]\n new_node_type = random.choice([types for types in OP_MAP[type_idx] if \n types != type(mut_node)])\n mut_node.__class__ = new_node_type\n save_ast(root, path)\n return mut_node.lineno\n\n\n<mask token>\n\n\ndef mutate_constant(root, nodes, path):\n return -1\n\n\ndef mutate_operand(root, nodes, path):\n return -1\n",
"step-2": "<mask token>\n\n\ndef mutate_operator(root, nodes, path):\n candidates = [node for node in nodes.keys() if type(node) in OP_TYPES.\n keys() and _check_parent_type(node, nodes, OP_PARENT_TYPES)]\n if len(candidates) == 0:\n return -1\n mut_node = random.choice(candidates)\n type_idx = OP_TYPES[type(mut_node)]\n new_node_type = random.choice([types for types in OP_MAP[type_idx] if \n types != type(mut_node)])\n mut_node.__class__ = new_node_type\n save_ast(root, path)\n return mut_node.lineno\n\n\ndef mutate_signal(root, nodes, path):\n candidates = [node for node in nodes.keys() if type(node) == Identifier and\n _check_parent_type(node, nodes, SIG_PARENT_TYPES)]\n if len(candidates) == 0:\n return -1\n sigs = get_signals(root)\n trial = 0\n while trial < 1000:\n trial += 1\n mut_node = random.choice(candidates)\n name = mut_node.name\n if name in sigs.keys():\n sig_type = sigs[name]\n choices = [sig for sig in sigs[sig_type] if sig != name]\n if len(choices) == 0:\n continue\n new_name = random.choice(choices)\n mut_node.name = new_name\n save_ast(root, path)\n return mut_node.lineno\n\n\ndef mutate_constant(root, nodes, path):\n return -1\n\n\ndef mutate_operand(root, nodes, path):\n return -1\n",
"step-3": "<mask token>\n\n\ndef _check_parent_type(node, nodes, types):\n par = node\n while nodes[par] != None:\n par = nodes[par]\n if type(par) in types:\n return True\n return False\n\n\ndef mutate_operator(root, nodes, path):\n candidates = [node for node in nodes.keys() if type(node) in OP_TYPES.\n keys() and _check_parent_type(node, nodes, OP_PARENT_TYPES)]\n if len(candidates) == 0:\n return -1\n mut_node = random.choice(candidates)\n type_idx = OP_TYPES[type(mut_node)]\n new_node_type = random.choice([types for types in OP_MAP[type_idx] if \n types != type(mut_node)])\n mut_node.__class__ = new_node_type\n save_ast(root, path)\n return mut_node.lineno\n\n\ndef mutate_signal(root, nodes, path):\n candidates = [node for node in nodes.keys() if type(node) == Identifier and\n _check_parent_type(node, nodes, SIG_PARENT_TYPES)]\n if len(candidates) == 0:\n return -1\n sigs = get_signals(root)\n trial = 0\n while trial < 1000:\n trial += 1\n mut_node = random.choice(candidates)\n name = mut_node.name\n if name in sigs.keys():\n sig_type = sigs[name]\n choices = [sig for sig in sigs[sig_type] if sig != name]\n if len(choices) == 0:\n continue\n new_name = random.choice(choices)\n mut_node.name = new_name\n save_ast(root, path)\n return mut_node.lineno\n\n\ndef mutate_constant(root, nodes, path):\n return -1\n\n\ndef mutate_operand(root, nodes, path):\n return -1\n",
"step-4": "import random\nfrom common.ast import *\nfrom mutate.mutate_ctrl import *\n\n\ndef _check_parent_type(node, nodes, types):\n par = node\n while nodes[par] != None:\n par = nodes[par]\n if type(par) in types:\n return True\n return False\n\n\ndef mutate_operator(root, nodes, path):\n candidates = [node for node in nodes.keys() if type(node) in OP_TYPES.\n keys() and _check_parent_type(node, nodes, OP_PARENT_TYPES)]\n if len(candidates) == 0:\n return -1\n mut_node = random.choice(candidates)\n type_idx = OP_TYPES[type(mut_node)]\n new_node_type = random.choice([types for types in OP_MAP[type_idx] if \n types != type(mut_node)])\n mut_node.__class__ = new_node_type\n save_ast(root, path)\n return mut_node.lineno\n\n\ndef mutate_signal(root, nodes, path):\n candidates = [node for node in nodes.keys() if type(node) == Identifier and\n _check_parent_type(node, nodes, SIG_PARENT_TYPES)]\n if len(candidates) == 0:\n return -1\n sigs = get_signals(root)\n trial = 0\n while trial < 1000:\n trial += 1\n mut_node = random.choice(candidates)\n name = mut_node.name\n if name in sigs.keys():\n sig_type = sigs[name]\n choices = [sig for sig in sigs[sig_type] if sig != name]\n if len(choices) == 0:\n continue\n new_name = random.choice(choices)\n mut_node.name = new_name\n save_ast(root, path)\n return mut_node.lineno\n\n\ndef mutate_constant(root, nodes, path):\n return -1\n\n\ndef mutate_operand(root, nodes, path):\n return -1\n",
"step-5": "import random\n\nfrom common.ast import *\nfrom mutate.mutate_ctrl import *\n\ndef _check_parent_type(node, nodes, types):\n par = node\n while(nodes[par] != None):\n par = nodes[par]\n if type(par) in types:\n return True\n return False\n\ndef mutate_operator(root, nodes, path):\n candidates = [node \n for node in nodes.keys() \n if type(node) in OP_TYPES.keys()\n and _check_parent_type(node, nodes, OP_PARENT_TYPES)]\n \n if len(candidates) == 0:\n return -1\n\n mut_node = random.choice(candidates)\n type_idx = OP_TYPES[type(mut_node)]\n new_node_type = random.choice([types for types in OP_MAP[type_idx] if types != type(mut_node)])\n\n mut_node.__class__ = new_node_type\n\n save_ast(root, path)\n \n return mut_node.lineno\n\ndef mutate_signal(root, nodes, path):\n candidates = [node \n for node in nodes.keys()\n if type(node) == Identifier\n and _check_parent_type(node, nodes, SIG_PARENT_TYPES)]\n\n if len(candidates) == 0:\n return -1 \n\n sigs = get_signals(root)\n\n trial = 0\n while (trial < 1000):\n trial += 1\n\n mut_node = random.choice(candidates)\n name = mut_node.name\n if name in sigs.keys():\n sig_type = sigs[name]\n choices = [sig for sig in sigs[sig_type] if sig != name]\n if len(choices) == 0:\n continue\n new_name = random.choice(choices)\n mut_node.name = new_name\n\n save_ast(root, path)\n\n return mut_node.lineno\n\ndef mutate_constant(root, nodes, path):\n return -1\n\ndef mutate_operand(root, nodes, path):\n return -1 \n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 17:34:32 2019
@author: fanlizhou
Analyze codon usage of sequence from 'SP_gene_seq.txt' and 'LP_gene_seq.txt'
Plot heatmap of amino acid usage and codon usage
Plot codon usage in each gene for each amino acid. Genes were arranged so that
the gene expression of SP decrease from 0 to 50 (x-axis) and the gene expression
of LP increase from 51 to 100 (x-axis)
Usage: codon_usage.py [-h] [--label LABEL] sp_file lp_file
Options:
--label Define the label of out-put files. Default="top"
sp_file Path to the SP data files
lp_file Path to the LP data files
"""
import io, os, argparse, collections
from scipy import stats
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description=
'Analyze codon usage of SP and LP\n')
parser.add_argument('sp_file', help = 'one input SP data file\n')
parser.add_argument('lp_file', help = 'one input LP data file\n')
parser.add_argument('--label', '-l',
type = str, required = False, default = 'top',
help = 'Define the label of out-put files. Default="top"\n')
args = parser.parse_args()
for path in [args.sp_file, args.lp_file]:
if not os.path.isfile(path):
parser.error('File "%s" cannot be found.' % (path))
return args
# a Codon_Usage class to store codon usage information for each genotype
class Codon_Usage:
def __init__(self, filename):
self.seq, self.gene_num = self.get_seq(filename)
def get_seq(self, filename):
file = io.open(filename)
# list of selected gene sequences, excluded genes that are non-triple
all_seq = []
gene_seq = ''
count_all = 0
count_non_triple = 0
for line in file:
# read a gene information line
if line[0]=='>':
count_all += 1
# if a gene has been read, then append it to all_seq if the
# sequence is triple
if gene_seq!='':
if len(gene_seq)%3:
count_non_triple += 1
else:
all_seq.append(gene_seq)
gene_seq = ''
# read a gene sequence line
else:
gene_seq += line.strip()
file.close()
print('%s:\n%d genes added\n%d are non-triple\n'%
(filename[:2],count_all, count_non_triple))
return (all_seq, count_all - count_non_triple)
def get_AA(self, codon):
# dict key: codon -> AA
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TTA':'Leu', 'TTG':'Leu',
'TCT':'Ser', 'TCC':'Ser', 'TCA':'Ser', 'TCG':'Ser',
'TAT':'Tyr', 'TAC':'Tyr', 'TAA':'STOP', 'TAG':'STOP',
'TGT':'Cys', 'TGC':'Cys', 'TGA':'STOP', 'TGG':'Trp',
'CTT':'Leu', 'CTC':'Leu', 'CTA':'Leu', 'CTG':'Leu',
'CCT':'Pro', 'CCC':'Pro', 'CCA':'Pro', 'CCG':'Pro',
'CAT':'His', 'CAC':'His', 'CAA':'Gln', 'CAG':'Gln',
'CGT':'Arg', 'CGC':'Arg', 'CGA':'Arg', 'CGG':'Arg',
'ATT':'Ile', 'ATC':'Ile', 'ATA':'Ile', 'ATG':'Met',
'ACT':'Thr', 'ACC':'Thr', 'ACA':'Thr', 'ACG':'Thr',
'AAT':'Asn', 'AAC':'Asn', 'AAA':'Lys', 'AAG':'Lys',
'AGT':'Ser', 'AGC':'Ser', 'AGA':'Arg', 'AGG':'Arg',
'GTT':'Val', 'GTC':'Val', 'GTA':'Val', 'GTG':'Val',
'GCT':'Ala', 'GCC':'Ala', 'GCA':'Ala', 'GCG':'Ala',
'GAT':'Asp', 'GAC':'Asp', 'GAA':'Glu', 'GAG':'Glu',
'GGT':'Gly', 'GGC':'Gly', 'GGA':'Gly', 'GGG':'Gly'}
if codon in codon_map:
return codon_map[codon]
else:
return ''
def get_usage_dict(self, seq):
# usage_dict structure:
# dict key: AA -> [
# dict key: codon ->
# [codon_count,
# codon_count/AA_count]
# AA_count
# ]
usage_dict = \
collections.defaultdict(lambda:
[
collections.defaultdict(
lambda: [0, 0]),
0
])
# save AAs usage information
for index in range(0, len(seq), 3):
codon = seq[index:index+3]
AA = self.get_AA(codon)
if AA:
# count how many times the AA appears
usage_dict[AA][1] += 1
# count how many times the codon is used
usage_dict[AA][0][codon][0] += 1
# calculate the codon usage percentage for an AA
for AA in usage_dict:
for codon in usage_dict[AA][0]:
usage_dict[AA][0][codon][1] = \
usage_dict[AA][0][codon][0]/usage_dict[AA][1]
return usage_dict
def get_AA_dict(self):
# AA_dict structure:
# 1st dict key: AA -> 2nd dict key: codon -> a list of codon usage
# percentage of each gene
AA_dict = \
collections.defaultdict(
lambda:collections.defaultdict(list))
# dict key: AA -> codon list
AA_map = {
'Phe':['TTT', 'TTC'],
'Leu':['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],
'Ser':['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],
'Tyr':['TAT', 'TAC'],
'STOP':['TAA', 'TAG', 'TGA'],
'Cys':['TGT', 'TGC'],
'Trp':['TGG'],
'Pro':['CCT', 'CCC', 'CCA', 'CCG'],
'His':['CAT', 'CAC'],
'Gln':['CAA', 'CAG'],
'Arg':['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],
'Ile':['ATT', 'ATC', 'ATA'],
'Met':['ATG'],
'Thr':['ACT', 'ACC', 'ACA', 'ACG'],
'Asn':['AAT', 'AAC'],
'Lys':['AAA', 'AAG'],
'Val':['GTT', 'GTC', 'GTA', 'GTG'],
'Ala':['GCT', 'GCC', 'GCA', 'GCG'],
'Asp':['GAT', 'GAC'],
'Glu':['GAA', 'GAG'],
'Gly':['GGT', 'GGC', 'GGA', 'GGG']
}
# list of codon usage for each gene
usage_dict_list = []
# get codon usage information for each gene
for seq in self.seq:
usage_dict_list.append(self.get_usage_dict(seq))
# get the list of codon usage percentage from each gene
for AA in list(AA_map.keys()):
for codon in AA_map[AA]:
# get codon usage information from each gene
for usage_dict in usage_dict_list:
# append codon usage percentage in the gene
AA_dict[AA][codon].append(
usage_dict[AA][0][codon][1])
return AA_dict
def heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label):
# list of Chi-Square test results
AA_chisquare = []
# AA plotting annotation information
AA_text = []
# list of student's t-test results
codon_ttest = []
# codon plotting annotaion information
codon_text = []
i = 0
j = 0
# number of genes analyzed
count_all = 0
# number of genes that show significant results
count_sig = 0
for AA in list(sp_AA_dict.keys()):
# mean values of codon usage for each AA
sp_codon_mean = []
lp_codon_mean = []
for codon in sp_AA_dict[AA]:
# calculate ttest results
p_val = stats.ttest_ind(sp_AA_dict[AA][codon],
lp_AA_dict[AA][codon],
equal_var = False)[1]
# display eight codons in a row
if not i % 8:
codon_ttest.append([])
codon_text.append([])
i += 1
# handle NULL values
if np.isnan(p_val):
codon_ttest[-1].append(0)
codon_text[-1].append(codon + '\n NA')
# save ttest p-values and annotation information
else:
codon_ttest[-1].append(p_val)
codon_text[-1].append(codon + '\n' + str(round(p_val, 2)))
count_all += 1
if p_val < 0.5:
count_sig += 1
sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))
lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon]))
# get Chi-Square test results of each AA
p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]),
axis = None)[1]
# display six AA in a row
if not j % 6:
AA_chisquare.append([])
AA_text.append([])
j += 1
# handle Null values
if np.isnan(p_val):
AA_chisquare[-1].append(0)
AA_text[-1].append(AA + '\n NA')
# save Chi-Square test p-values and annotation information
else:
AA_chisquare[-1].append(p_val)
AA_text[-1].append(AA + '\n' + str(round(p_val, 2)))
# handle empty cells
for n in range(j % 6, 6):
AA_chisquare[-1].append(0)
AA_text[-1].append('')
# get list of AAs that show significant difference between SP and LP groups
AAs = choose_codons(codon_ttest, codon_text)
AA_chisquare = np.array(AA_chisquare)
codon_ttest = np.array(codon_ttest)
AA_text = np.array(AA_text)
codon_text = np.array(codon_text)
print('%d out of %d codon show significant usage difference \
between SP and LP genes (p_value < 0.5)\n' %
(count_sig, count_all))
plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)
plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)
return AAs
def plot_heatmap(data, text, cbarlabel, label):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 5))
im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)
annotate_heatmap(im, text)
fig.tight_layout()
plt.show
plt.savefig(f'../results/{cbarlabel}_{label}.png')
def heatmap(data, ax, cmap, cbarlabel):
if not ax:
ax = plt.gca()
im = ax.imshow(data, cmap)
cbar = ax.figure.colorbar(im, ax=ax)
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
ax.set_xticklabels(range(data.shape[1]))
ax.set_yticklabels(range(data.shape[0]))
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# draw white space between squares
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor = True)
ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor = True)
ax.grid(which = 'minor', color = 'w', linestyle = '-', linewidth = 3)
ax.tick_params(which = 'minor', bottom = False, left = False)
cbar.ax.set_ylabel(cbarlabel, va = 'top')
return im, cbar
def annotate_heatmap(im, text_label):
textcolors = ['black','white']
data = im.get_array()
# set threshold to decide color
threshold = im.norm(data.max()) / 2
kw = dict(horizontalalignment = 'center',
verticalalignment = 'center')
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color = textcolors[im.norm(data[i,j]) > threshold])
im.axes.text(j, i, text_label[i,j], **kw)
def choose_codons(ttest, text):
# dict key: AA -> codon
# only contains AAs with only two codon choices
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TAT':'Tyr', 'TAC':'Tyr',
'TGT':'Cys', 'TGC':'Cys', 'CAT':'His', 'CAC':'His',
'CAA':'Gln', 'CAG':'Gln', 'AAT':'Asn', 'AAC':'Asn',
'AAA':'Lys', 'AAG':'Lys', 'GAT':'Asp', 'GAC':'Asp',
'GAA':'Glu', 'GAG':'Glu'}
codon_dict = collections.defaultdict(list)
for i in range(len(ttest)):
for j in range(len(ttest[i])):
if ttest[i][j] < 0.01:
codon = text[i][j][:3]
if codon in codon_map:
codon_dict[codon_map[codon]].append(codon)
file = io.open('AAs_to_compare.txt', 'w')
file.write('Compare following AAs\n')
# AAs that have only two codon choices and show significant
# codon usage difference between SP and LP groups
AAs = []
for AA in codon_dict.keys():
AAs.append(AA)
if len(codon_dict[AA]) == 2:
file.write('%s: %s, %s\n' %
(AA, codon_dict[AA][0], codon_dict[AA][1]))
else:
file.write('%s: %s\n' % (AA, codon_dict[AA][0]))
file.close()
return AAs
def plot_SP_LP(sp_AA_dict, lp_AA_dict):
# plot each AA
for AA in list(sp_AA_dict.keys()):
# list of codon usage information
codon_data = []
# List of codon names
codons = []
for codon in sp_AA_dict[AA]:
# LP group data is displayed from lowest expressed genes
# to highest expressed genes
lp_AA_dict[AA][codon].reverse()
codons.append(codon)
codon_data.append([])
# display SP group data first and then LP group data
codon_data[-1].append(sp_AA_dict[AA][codon])
codon_data[-1].append(lp_AA_dict[AA][codon])
# plot usage curves
codon_usage_plot(codon_data, AA, codons)
def codon_usage_plot(data, AA, codons):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (15,5))
for i in range(len(data)):
# 0-50 shows SP group data
x_sp = np.linspace(0, 50, len(data[i][0]))
# 50-100 shows LP group data
x_lp = np.linspace(50, 100, len(data[i][1]))
ax.plot(x_sp, data[i][0], label = 'sp_' + codons[i])
ax.plot(x_lp, data[i][1], label = 'lp_' + codons[i])
ax.legend(loc = 1)
ax.set_title(AA)
def plot_distribution(sp_dict, lp_dict, AA):
fig, axes = plt.subplots(nrows = 2, ncols =1, figsize = (40, 20))
for codon in sp_dict[AA]:
x = np.arange(len(sp_dict[AA][codon]))
sp_y = np.array(sp_dict[AA][codon])
lp_y = np.array(lp_dict[AA][codon])
axes[0].plot(x, sp_y)
axes[1].plot(x, lp_y)
plt.show
def get_skellam_distribution(sp_dict, lp_dict, AA):
sp_mu = {}
lp_mu = {}
codons = []
# get mean values
for codon in sp_dict[AA]:
codons.append(codon)
sp_mu[codon] = np.mean(sp_dict[AA][codon])
lp_mu[codon] = np.mean(lp_dict[AA][codon])
skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)
skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)
def skellam_plot(mu1, mu2, name):
print(mu1,' ', mu2, ' ', mu1-mu2, ' ', name)
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (5, 5))
x = np.arange(stats.skellam.ppf(0.01, mu1, mu2),
stats.skellam.ppf(0.99, mu1, mu2))
ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker = 'o', label = name)
ax.legend(loc = 1)
plt.show
# main flow
args = parse_args()
sp_codon_usage = Codon_Usage(args.sp_file)
lp_codon_usage = Codon_Usage(args.lp_file)
sp_AA_dict = sp_codon_usage.get_AA_dict()
lp_AA_dict = lp_codon_usage.get_AA_dict()
print("Analyzing SP and LP %s group data\n" % (args.label))
AAs = heatmap_SP_LP(sp_AA_dict, lp_AA_dict, args.label)
plot_SP_LP(sp_AA_dict, lp_AA_dict)
# optional
# get Skellam distributions of AAs that have only two codon choices
# and show distictive usage between SP and LP
'''
sp_all_codon_usage = Codon_Usage('SP_all_gene_seq.txt')
lp_all_codon_usage = Codon_Usage('LP_all_gene_seq.txt')
sp_all_AA_dict = sp_all_codon_usage.get_AA_dict()
lp_all_AA_dict = lp_all_codon_usage.get_AA_dict()
for AA in AAs:
plot_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)
get_skellam_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)
'''
|
normal
|
{
"blob_id": "ae7a2de8742e353818d4f5a28feb9bce04d787bb",
"index": 8382,
"step-1": "<mask token>\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\n 'Analyze codon usage of SP and LP\\n')\n parser.add_argument('sp_file', help='one input SP data file\\n')\n parser.add_argument('lp_file', help='one input LP data file\\n')\n parser.add_argument('--label', '-l', type=str, required=False, default=\n 'top', help=\"\"\"Define the label of out-put files. Default=\"top\\\"\n\"\"\")\n args = parser.parse_args()\n for path in [args.sp_file, args.lp_file]:\n if not os.path.isfile(path):\n parser.error('File \"%s\" cannot be found.' % path)\n return args\n\n\nclass Codon_Usage:\n\n def __init__(self, filename):\n self.seq, self.gene_num = self.get_seq(filename)\n\n def get_seq(self, filename):\n file = io.open(filename)\n all_seq = []\n gene_seq = ''\n count_all = 0\n count_non_triple = 0\n for line in file:\n if line[0] == '>':\n count_all += 1\n if gene_seq != '':\n if len(gene_seq) % 3:\n count_non_triple += 1\n else:\n all_seq.append(gene_seq)\n gene_seq = ''\n else:\n gene_seq += line.strip()\n file.close()\n print('%s:\\n%d genes added\\n%d are non-triple\\n' % (filename[:2],\n count_all, count_non_triple))\n return all_seq, count_all - count_non_triple\n\n def get_AA(self, codon):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',\n 'TCT': 'Ser', 'TCC': 'Ser', 'TCA': 'Ser', 'TCG': 'Ser', 'TAT':\n 'Tyr', 'TAC': 'Tyr', 'TAA': 'STOP', 'TAG': 'STOP', 'TGT': 'Cys',\n 'TGC': 'Cys', 'TGA': 'STOP', 'TGG': 'Trp', 'CTT': 'Leu', 'CTC':\n 'Leu', 'CTA': 'Leu', 'CTG': 'Leu', 'CCT': 'Pro', 'CCC': 'Pro',\n 'CCA': 'Pro', 'CCG': 'Pro', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA': 'Arg',\n 'CGG': 'Arg', 'ATT': 'Ile', 'ATC': 'Ile', 'ATA': 'Ile', 'ATG':\n 'Met', 'ACT': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',\n 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'AGT':\n 'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg', 'GTT': 'Val',\n 'GTC': 'Val', 'GTA': 'Val', 'GTG': 'Val', 'GCT': 'Ala', 'GCC':\n 'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'GAT': 'Asp', 'GAC': 'Asp',\n 'GAA': 'Glu', 'GAG': 'Glu', 'GGT': 'Gly', 'GGC': 'Gly', 'GGA':\n 'Gly', 'GGG': 'Gly'}\n if codon in codon_map:\n return codon_map[codon]\n else:\n return ''\n\n def get_usage_dict(self, seq):\n usage_dict = collections.defaultdict(lambda : [collections.\n defaultdict(lambda : [0, 0]), 0])\n for index in range(0, len(seq), 3):\n codon = seq[index:index + 3]\n AA = self.get_AA(codon)\n if AA:\n usage_dict[AA][1] += 1\n usage_dict[AA][0][codon][0] += 1\n for AA in usage_dict:\n for codon in usage_dict[AA][0]:\n usage_dict[AA][0][codon][1] = usage_dict[AA][0][codon][0\n ] / usage_dict[AA][1]\n return usage_dict\n\n def get_AA_dict(self):\n AA_dict = collections.defaultdict(lambda : collections.defaultdict(\n list))\n AA_map = {'Phe': ['TTT', 'TTC'], 'Leu': ['TTA', 'TTG', 'CTT', 'CTC',\n 'CTA', 'CTG'], 'Ser': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'\n ], 'Tyr': ['TAT', 'TAC'], 'STOP': ['TAA', 'TAG', 'TGA'], 'Cys':\n ['TGT', 'TGC'], 'Trp': ['TGG'], 'Pro': ['CCT', 'CCC', 'CCA',\n 'CCG'], 'His': ['CAT', 'CAC'], 'Gln': ['CAA', 'CAG'], 'Arg': [\n 'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Ile': ['ATT', 'ATC',\n 'ATA'], 'Met': ['ATG'], 'Thr': ['ACT', 'ACC', 'ACA', 'ACG'],\n 'Asn': ['AAT', 'AAC'], 'Lys': ['AAA', 'AAG'], 'Val': ['GTT',\n 'GTC', 'GTA', 'GTG'], 'Ala': ['GCT', 'GCC', 'GCA', 'GCG'],\n 'Asp': ['GAT', 'GAC'], 'Glu': ['GAA', 'GAG'], 'Gly': ['GGT',\n 'GGC', 'GGA', 'GGG']}\n usage_dict_list = []\n for seq in self.seq:\n usage_dict_list.append(self.get_usage_dict(seq))\n for AA in list(AA_map.keys()):\n for codon in AA_map[AA]:\n for usage_dict in usage_dict_list:\n AA_dict[AA][codon].append(usage_dict[AA][0][codon][1])\n return AA_dict\n\n\n<mask token>\n\n\ndef annotate_heatmap(im, text_label):\n textcolors = ['black', 'white']\n data = im.get_array()\n threshold = im.norm(data.max()) / 2\n kw = dict(horizontalalignment='center', verticalalignment='center')\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[im.norm(data[i, j]) > threshold])\n im.axes.text(j, i, text_label[i, j], **kw)\n\n\ndef choose_codons(ttest, text):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TAT': 'Tyr', 'TAC': 'Tyr',\n 'TGT': 'Cys', 'TGC': 'Cys', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys',\n 'AAG': 'Lys', 'GAT': 'Asp', 'GAC': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu'}\n codon_dict = collections.defaultdict(list)\n for i in range(len(ttest)):\n for j in range(len(ttest[i])):\n if ttest[i][j] < 0.01:\n codon = text[i][j][:3]\n if codon in codon_map:\n codon_dict[codon_map[codon]].append(codon)\n file = io.open('AAs_to_compare.txt', 'w')\n file.write('Compare following AAs\\n')\n AAs = []\n for AA in codon_dict.keys():\n AAs.append(AA)\n if len(codon_dict[AA]) == 2:\n file.write('%s: %s, %s\\n' % (AA, codon_dict[AA][0], codon_dict[\n AA][1]))\n else:\n file.write('%s: %s\\n' % (AA, codon_dict[AA][0]))\n file.close()\n return AAs\n\n\n<mask token>\n\n\ndef plot_distribution(sp_dict, lp_dict, AA):\n fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(40, 20))\n for codon in sp_dict[AA]:\n x = np.arange(len(sp_dict[AA][codon]))\n sp_y = np.array(sp_dict[AA][codon])\n lp_y = np.array(lp_dict[AA][codon])\n axes[0].plot(x, sp_y)\n axes[1].plot(x, lp_y)\n plt.show\n\n\ndef get_skellam_distribution(sp_dict, lp_dict, AA):\n sp_mu = {}\n lp_mu = {}\n codons = []\n for codon in sp_dict[AA]:\n codons.append(codon)\n sp_mu[codon] = np.mean(sp_dict[AA][codon])\n lp_mu[codon] = np.mean(lp_dict[AA][codon])\n skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)\n skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\n 'Analyze codon usage of SP and LP\\n')\n parser.add_argument('sp_file', help='one input SP data file\\n')\n parser.add_argument('lp_file', help='one input LP data file\\n')\n parser.add_argument('--label', '-l', type=str, required=False, default=\n 'top', help=\"\"\"Define the label of out-put files. Default=\"top\\\"\n\"\"\")\n args = parser.parse_args()\n for path in [args.sp_file, args.lp_file]:\n if not os.path.isfile(path):\n parser.error('File \"%s\" cannot be found.' % path)\n return args\n\n\nclass Codon_Usage:\n\n def __init__(self, filename):\n self.seq, self.gene_num = self.get_seq(filename)\n\n def get_seq(self, filename):\n file = io.open(filename)\n all_seq = []\n gene_seq = ''\n count_all = 0\n count_non_triple = 0\n for line in file:\n if line[0] == '>':\n count_all += 1\n if gene_seq != '':\n if len(gene_seq) % 3:\n count_non_triple += 1\n else:\n all_seq.append(gene_seq)\n gene_seq = ''\n else:\n gene_seq += line.strip()\n file.close()\n print('%s:\\n%d genes added\\n%d are non-triple\\n' % (filename[:2],\n count_all, count_non_triple))\n return all_seq, count_all - count_non_triple\n\n def get_AA(self, codon):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',\n 'TCT': 'Ser', 'TCC': 'Ser', 'TCA': 'Ser', 'TCG': 'Ser', 'TAT':\n 'Tyr', 'TAC': 'Tyr', 'TAA': 'STOP', 'TAG': 'STOP', 'TGT': 'Cys',\n 'TGC': 'Cys', 'TGA': 'STOP', 'TGG': 'Trp', 'CTT': 'Leu', 'CTC':\n 'Leu', 'CTA': 'Leu', 'CTG': 'Leu', 'CCT': 'Pro', 'CCC': 'Pro',\n 'CCA': 'Pro', 'CCG': 'Pro', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA': 'Arg',\n 'CGG': 'Arg', 'ATT': 'Ile', 'ATC': 'Ile', 'ATA': 'Ile', 'ATG':\n 'Met', 'ACT': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',\n 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'AGT':\n 'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg', 'GTT': 'Val',\n 'GTC': 'Val', 'GTA': 'Val', 'GTG': 'Val', 'GCT': 'Ala', 'GCC':\n 'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'GAT': 'Asp', 'GAC': 'Asp',\n 'GAA': 'Glu', 'GAG': 'Glu', 'GGT': 'Gly', 'GGC': 'Gly', 'GGA':\n 'Gly', 'GGG': 'Gly'}\n if codon in codon_map:\n return codon_map[codon]\n else:\n return ''\n\n def get_usage_dict(self, seq):\n usage_dict = collections.defaultdict(lambda : [collections.\n defaultdict(lambda : [0, 0]), 0])\n for index in range(0, len(seq), 3):\n codon = seq[index:index + 3]\n AA = self.get_AA(codon)\n if AA:\n usage_dict[AA][1] += 1\n usage_dict[AA][0][codon][0] += 1\n for AA in usage_dict:\n for codon in usage_dict[AA][0]:\n usage_dict[AA][0][codon][1] = usage_dict[AA][0][codon][0\n ] / usage_dict[AA][1]\n return usage_dict\n\n def get_AA_dict(self):\n AA_dict = collections.defaultdict(lambda : collections.defaultdict(\n list))\n AA_map = {'Phe': ['TTT', 'TTC'], 'Leu': ['TTA', 'TTG', 'CTT', 'CTC',\n 'CTA', 'CTG'], 'Ser': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'\n ], 'Tyr': ['TAT', 'TAC'], 'STOP': ['TAA', 'TAG', 'TGA'], 'Cys':\n ['TGT', 'TGC'], 'Trp': ['TGG'], 'Pro': ['CCT', 'CCC', 'CCA',\n 'CCG'], 'His': ['CAT', 'CAC'], 'Gln': ['CAA', 'CAG'], 'Arg': [\n 'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Ile': ['ATT', 'ATC',\n 'ATA'], 'Met': ['ATG'], 'Thr': ['ACT', 'ACC', 'ACA', 'ACG'],\n 'Asn': ['AAT', 'AAC'], 'Lys': ['AAA', 'AAG'], 'Val': ['GTT',\n 'GTC', 'GTA', 'GTG'], 'Ala': ['GCT', 'GCC', 'GCA', 'GCG'],\n 'Asp': ['GAT', 'GAC'], 'Glu': ['GAA', 'GAG'], 'Gly': ['GGT',\n 'GGC', 'GGA', 'GGG']}\n usage_dict_list = []\n for seq in self.seq:\n usage_dict_list.append(self.get_usage_dict(seq))\n for AA in list(AA_map.keys()):\n for codon in AA_map[AA]:\n for usage_dict in usage_dict_list:\n AA_dict[AA][codon].append(usage_dict[AA][0][codon][1])\n return AA_dict\n\n\n<mask token>\n\n\ndef plot_heatmap(data, text, cbarlabel, label):\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5))\n im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)\n annotate_heatmap(im, text)\n fig.tight_layout()\n plt.show\n plt.savefig(f'../results/{cbarlabel}_{label}.png')\n\n\n<mask token>\n\n\ndef annotate_heatmap(im, text_label):\n textcolors = ['black', 'white']\n data = im.get_array()\n threshold = im.norm(data.max()) / 2\n kw = dict(horizontalalignment='center', verticalalignment='center')\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[im.norm(data[i, j]) > threshold])\n im.axes.text(j, i, text_label[i, j], **kw)\n\n\ndef choose_codons(ttest, text):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TAT': 'Tyr', 'TAC': 'Tyr',\n 'TGT': 'Cys', 'TGC': 'Cys', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys',\n 'AAG': 'Lys', 'GAT': 'Asp', 'GAC': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu'}\n codon_dict = collections.defaultdict(list)\n for i in range(len(ttest)):\n for j in range(len(ttest[i])):\n if ttest[i][j] < 0.01:\n codon = text[i][j][:3]\n if codon in codon_map:\n codon_dict[codon_map[codon]].append(codon)\n file = io.open('AAs_to_compare.txt', 'w')\n file.write('Compare following AAs\\n')\n AAs = []\n for AA in codon_dict.keys():\n AAs.append(AA)\n if len(codon_dict[AA]) == 2:\n file.write('%s: %s, %s\\n' % (AA, codon_dict[AA][0], codon_dict[\n AA][1]))\n else:\n file.write('%s: %s\\n' % (AA, codon_dict[AA][0]))\n file.close()\n return AAs\n\n\ndef plot_SP_LP(sp_AA_dict, lp_AA_dict):\n for AA in list(sp_AA_dict.keys()):\n codon_data = []\n codons = []\n for codon in sp_AA_dict[AA]:\n lp_AA_dict[AA][codon].reverse()\n codons.append(codon)\n codon_data.append([])\n codon_data[-1].append(sp_AA_dict[AA][codon])\n codon_data[-1].append(lp_AA_dict[AA][codon])\n codon_usage_plot(codon_data, AA, codons)\n\n\n<mask token>\n\n\ndef plot_distribution(sp_dict, lp_dict, AA):\n fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(40, 20))\n for codon in sp_dict[AA]:\n x = np.arange(len(sp_dict[AA][codon]))\n sp_y = np.array(sp_dict[AA][codon])\n lp_y = np.array(lp_dict[AA][codon])\n axes[0].plot(x, sp_y)\n axes[1].plot(x, lp_y)\n plt.show\n\n\ndef get_skellam_distribution(sp_dict, lp_dict, AA):\n sp_mu = {}\n lp_mu = {}\n codons = []\n for codon in sp_dict[AA]:\n codons.append(codon)\n sp_mu[codon] = np.mean(sp_dict[AA][codon])\n lp_mu[codon] = np.mean(lp_dict[AA][codon])\n skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)\n skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\n 'Analyze codon usage of SP and LP\\n')\n parser.add_argument('sp_file', help='one input SP data file\\n')\n parser.add_argument('lp_file', help='one input LP data file\\n')\n parser.add_argument('--label', '-l', type=str, required=False, default=\n 'top', help=\"\"\"Define the label of out-put files. Default=\"top\\\"\n\"\"\")\n args = parser.parse_args()\n for path in [args.sp_file, args.lp_file]:\n if not os.path.isfile(path):\n parser.error('File \"%s\" cannot be found.' % path)\n return args\n\n\nclass Codon_Usage:\n\n def __init__(self, filename):\n self.seq, self.gene_num = self.get_seq(filename)\n\n def get_seq(self, filename):\n file = io.open(filename)\n all_seq = []\n gene_seq = ''\n count_all = 0\n count_non_triple = 0\n for line in file:\n if line[0] == '>':\n count_all += 1\n if gene_seq != '':\n if len(gene_seq) % 3:\n count_non_triple += 1\n else:\n all_seq.append(gene_seq)\n gene_seq = ''\n else:\n gene_seq += line.strip()\n file.close()\n print('%s:\\n%d genes added\\n%d are non-triple\\n' % (filename[:2],\n count_all, count_non_triple))\n return all_seq, count_all - count_non_triple\n\n def get_AA(self, codon):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',\n 'TCT': 'Ser', 'TCC': 'Ser', 'TCA': 'Ser', 'TCG': 'Ser', 'TAT':\n 'Tyr', 'TAC': 'Tyr', 'TAA': 'STOP', 'TAG': 'STOP', 'TGT': 'Cys',\n 'TGC': 'Cys', 'TGA': 'STOP', 'TGG': 'Trp', 'CTT': 'Leu', 'CTC':\n 'Leu', 'CTA': 'Leu', 'CTG': 'Leu', 'CCT': 'Pro', 'CCC': 'Pro',\n 'CCA': 'Pro', 'CCG': 'Pro', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA': 'Arg',\n 'CGG': 'Arg', 'ATT': 'Ile', 'ATC': 'Ile', 'ATA': 'Ile', 'ATG':\n 'Met', 'ACT': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',\n 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'AGT':\n 'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg', 'GTT': 'Val',\n 'GTC': 'Val', 'GTA': 'Val', 'GTG': 'Val', 'GCT': 'Ala', 'GCC':\n 'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'GAT': 'Asp', 'GAC': 'Asp',\n 'GAA': 'Glu', 'GAG': 'Glu', 'GGT': 'Gly', 'GGC': 'Gly', 'GGA':\n 'Gly', 'GGG': 'Gly'}\n if codon in codon_map:\n return codon_map[codon]\n else:\n return ''\n\n def get_usage_dict(self, seq):\n usage_dict = collections.defaultdict(lambda : [collections.\n defaultdict(lambda : [0, 0]), 0])\n for index in range(0, len(seq), 3):\n codon = seq[index:index + 3]\n AA = self.get_AA(codon)\n if AA:\n usage_dict[AA][1] += 1\n usage_dict[AA][0][codon][0] += 1\n for AA in usage_dict:\n for codon in usage_dict[AA][0]:\n usage_dict[AA][0][codon][1] = usage_dict[AA][0][codon][0\n ] / usage_dict[AA][1]\n return usage_dict\n\n def get_AA_dict(self):\n AA_dict = collections.defaultdict(lambda : collections.defaultdict(\n list))\n AA_map = {'Phe': ['TTT', 'TTC'], 'Leu': ['TTA', 'TTG', 'CTT', 'CTC',\n 'CTA', 'CTG'], 'Ser': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'\n ], 'Tyr': ['TAT', 'TAC'], 'STOP': ['TAA', 'TAG', 'TGA'], 'Cys':\n ['TGT', 'TGC'], 'Trp': ['TGG'], 'Pro': ['CCT', 'CCC', 'CCA',\n 'CCG'], 'His': ['CAT', 'CAC'], 'Gln': ['CAA', 'CAG'], 'Arg': [\n 'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Ile': ['ATT', 'ATC',\n 'ATA'], 'Met': ['ATG'], 'Thr': ['ACT', 'ACC', 'ACA', 'ACG'],\n 'Asn': ['AAT', 'AAC'], 'Lys': ['AAA', 'AAG'], 'Val': ['GTT',\n 'GTC', 'GTA', 'GTG'], 'Ala': ['GCT', 'GCC', 'GCA', 'GCG'],\n 'Asp': ['GAT', 'GAC'], 'Glu': ['GAA', 'GAG'], 'Gly': ['GGT',\n 'GGC', 'GGA', 'GGG']}\n usage_dict_list = []\n for seq in self.seq:\n usage_dict_list.append(self.get_usage_dict(seq))\n for AA in list(AA_map.keys()):\n for codon in AA_map[AA]:\n for usage_dict in usage_dict_list:\n AA_dict[AA][codon].append(usage_dict[AA][0][codon][1])\n return AA_dict\n\n\n<mask token>\n\n\ndef plot_heatmap(data, text, cbarlabel, label):\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5))\n im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)\n annotate_heatmap(im, text)\n fig.tight_layout()\n plt.show\n plt.savefig(f'../results/{cbarlabel}_{label}.png')\n\n\ndef heatmap(data, ax, cmap, cbarlabel):\n if not ax:\n ax = plt.gca()\n im = ax.imshow(data, cmap)\n cbar = ax.figure.colorbar(im, ax=ax)\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n ax.set_xticklabels(range(data.shape[1]))\n ax.set_yticklabels(range(data.shape[0]))\n ax.tick_params(top=False, bottom=True, labeltop=False, labelbottom=True)\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor=True)\n ax.grid(which='minor', color='w', linestyle='-', linewidth=3)\n ax.tick_params(which='minor', bottom=False, left=False)\n cbar.ax.set_ylabel(cbarlabel, va='top')\n return im, cbar\n\n\ndef annotate_heatmap(im, text_label):\n textcolors = ['black', 'white']\n data = im.get_array()\n threshold = im.norm(data.max()) / 2\n kw = dict(horizontalalignment='center', verticalalignment='center')\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[im.norm(data[i, j]) > threshold])\n im.axes.text(j, i, text_label[i, j], **kw)\n\n\ndef choose_codons(ttest, text):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TAT': 'Tyr', 'TAC': 'Tyr',\n 'TGT': 'Cys', 'TGC': 'Cys', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys',\n 'AAG': 'Lys', 'GAT': 'Asp', 'GAC': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu'}\n codon_dict = collections.defaultdict(list)\n for i in range(len(ttest)):\n for j in range(len(ttest[i])):\n if ttest[i][j] < 0.01:\n codon = text[i][j][:3]\n if codon in codon_map:\n codon_dict[codon_map[codon]].append(codon)\n file = io.open('AAs_to_compare.txt', 'w')\n file.write('Compare following AAs\\n')\n AAs = []\n for AA in codon_dict.keys():\n AAs.append(AA)\n if len(codon_dict[AA]) == 2:\n file.write('%s: %s, %s\\n' % (AA, codon_dict[AA][0], codon_dict[\n AA][1]))\n else:\n file.write('%s: %s\\n' % (AA, codon_dict[AA][0]))\n file.close()\n return AAs\n\n\ndef plot_SP_LP(sp_AA_dict, lp_AA_dict):\n for AA in list(sp_AA_dict.keys()):\n codon_data = []\n codons = []\n for codon in sp_AA_dict[AA]:\n lp_AA_dict[AA][codon].reverse()\n codons.append(codon)\n codon_data.append([])\n codon_data[-1].append(sp_AA_dict[AA][codon])\n codon_data[-1].append(lp_AA_dict[AA][codon])\n codon_usage_plot(codon_data, AA, codons)\n\n\ndef codon_usage_plot(data, AA, codons):\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 5))\n for i in range(len(data)):\n x_sp = np.linspace(0, 50, len(data[i][0]))\n x_lp = np.linspace(50, 100, len(data[i][1]))\n ax.plot(x_sp, data[i][0], label='sp_' + codons[i])\n ax.plot(x_lp, data[i][1], label='lp_' + codons[i])\n ax.legend(loc=1)\n ax.set_title(AA)\n\n\ndef plot_distribution(sp_dict, lp_dict, AA):\n fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(40, 20))\n for codon in sp_dict[AA]:\n x = np.arange(len(sp_dict[AA][codon]))\n sp_y = np.array(sp_dict[AA][codon])\n lp_y = np.array(lp_dict[AA][codon])\n axes[0].plot(x, sp_y)\n axes[1].plot(x, lp_y)\n plt.show\n\n\ndef get_skellam_distribution(sp_dict, lp_dict, AA):\n sp_mu = {}\n lp_mu = {}\n codons = []\n for codon in sp_dict[AA]:\n codons.append(codon)\n sp_mu[codon] = np.mean(sp_dict[AA][codon])\n lp_mu[codon] = np.mean(lp_dict[AA][codon])\n skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)\n skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)\n\n\ndef skellam_plot(mu1, mu2, name):\n print(mu1, ' ', mu2, ' ', mu1 - mu2, ' ', name)\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))\n x = np.arange(stats.skellam.ppf(0.01, mu1, mu2), stats.skellam.ppf(0.99,\n mu1, mu2))\n ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker='o', label=name)\n ax.legend(loc=1)\n plt.show\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport io, os, argparse, collections\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\n 'Analyze codon usage of SP and LP\\n')\n parser.add_argument('sp_file', help='one input SP data file\\n')\n parser.add_argument('lp_file', help='one input LP data file\\n')\n parser.add_argument('--label', '-l', type=str, required=False, default=\n 'top', help=\"\"\"Define the label of out-put files. Default=\"top\\\"\n\"\"\")\n args = parser.parse_args()\n for path in [args.sp_file, args.lp_file]:\n if not os.path.isfile(path):\n parser.error('File \"%s\" cannot be found.' % path)\n return args\n\n\nclass Codon_Usage:\n\n def __init__(self, filename):\n self.seq, self.gene_num = self.get_seq(filename)\n\n def get_seq(self, filename):\n file = io.open(filename)\n all_seq = []\n gene_seq = ''\n count_all = 0\n count_non_triple = 0\n for line in file:\n if line[0] == '>':\n count_all += 1\n if gene_seq != '':\n if len(gene_seq) % 3:\n count_non_triple += 1\n else:\n all_seq.append(gene_seq)\n gene_seq = ''\n else:\n gene_seq += line.strip()\n file.close()\n print('%s:\\n%d genes added\\n%d are non-triple\\n' % (filename[:2],\n count_all, count_non_triple))\n return all_seq, count_all - count_non_triple\n\n def get_AA(self, codon):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',\n 'TCT': 'Ser', 'TCC': 'Ser', 'TCA': 'Ser', 'TCG': 'Ser', 'TAT':\n 'Tyr', 'TAC': 'Tyr', 'TAA': 'STOP', 'TAG': 'STOP', 'TGT': 'Cys',\n 'TGC': 'Cys', 'TGA': 'STOP', 'TGG': 'Trp', 'CTT': 'Leu', 'CTC':\n 'Leu', 'CTA': 'Leu', 'CTG': 'Leu', 'CCT': 'Pro', 'CCC': 'Pro',\n 'CCA': 'Pro', 'CCG': 'Pro', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA': 'Arg',\n 'CGG': 'Arg', 'ATT': 'Ile', 'ATC': 'Ile', 'ATA': 'Ile', 'ATG':\n 'Met', 'ACT': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',\n 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'AGT':\n 'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg', 'GTT': 'Val',\n 'GTC': 'Val', 'GTA': 'Val', 'GTG': 'Val', 'GCT': 'Ala', 'GCC':\n 'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'GAT': 'Asp', 'GAC': 'Asp',\n 'GAA': 'Glu', 'GAG': 'Glu', 'GGT': 'Gly', 'GGC': 'Gly', 'GGA':\n 'Gly', 'GGG': 'Gly'}\n if codon in codon_map:\n return codon_map[codon]\n else:\n return ''\n\n def get_usage_dict(self, seq):\n usage_dict = collections.defaultdict(lambda : [collections.\n defaultdict(lambda : [0, 0]), 0])\n for index in range(0, len(seq), 3):\n codon = seq[index:index + 3]\n AA = self.get_AA(codon)\n if AA:\n usage_dict[AA][1] += 1\n usage_dict[AA][0][codon][0] += 1\n for AA in usage_dict:\n for codon in usage_dict[AA][0]:\n usage_dict[AA][0][codon][1] = usage_dict[AA][0][codon][0\n ] / usage_dict[AA][1]\n return usage_dict\n\n def get_AA_dict(self):\n AA_dict = collections.defaultdict(lambda : collections.defaultdict(\n list))\n AA_map = {'Phe': ['TTT', 'TTC'], 'Leu': ['TTA', 'TTG', 'CTT', 'CTC',\n 'CTA', 'CTG'], 'Ser': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'\n ], 'Tyr': ['TAT', 'TAC'], 'STOP': ['TAA', 'TAG', 'TGA'], 'Cys':\n ['TGT', 'TGC'], 'Trp': ['TGG'], 'Pro': ['CCT', 'CCC', 'CCA',\n 'CCG'], 'His': ['CAT', 'CAC'], 'Gln': ['CAA', 'CAG'], 'Arg': [\n 'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Ile': ['ATT', 'ATC',\n 'ATA'], 'Met': ['ATG'], 'Thr': ['ACT', 'ACC', 'ACA', 'ACG'],\n 'Asn': ['AAT', 'AAC'], 'Lys': ['AAA', 'AAG'], 'Val': ['GTT',\n 'GTC', 'GTA', 'GTG'], 'Ala': ['GCT', 'GCC', 'GCA', 'GCG'],\n 'Asp': ['GAT', 'GAC'], 'Glu': ['GAA', 'GAG'], 'Gly': ['GGT',\n 'GGC', 'GGA', 'GGG']}\n usage_dict_list = []\n for seq in self.seq:\n usage_dict_list.append(self.get_usage_dict(seq))\n for AA in list(AA_map.keys()):\n for codon in AA_map[AA]:\n for usage_dict in usage_dict_list:\n AA_dict[AA][codon].append(usage_dict[AA][0][codon][1])\n return AA_dict\n\n\ndef heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label):\n AA_chisquare = []\n AA_text = []\n codon_ttest = []\n codon_text = []\n i = 0\n j = 0\n count_all = 0\n count_sig = 0\n for AA in list(sp_AA_dict.keys()):\n sp_codon_mean = []\n lp_codon_mean = []\n for codon in sp_AA_dict[AA]:\n p_val = stats.ttest_ind(sp_AA_dict[AA][codon], lp_AA_dict[AA][\n codon], equal_var=False)[1]\n if not i % 8:\n codon_ttest.append([])\n codon_text.append([])\n i += 1\n if np.isnan(p_val):\n codon_ttest[-1].append(0)\n codon_text[-1].append(codon + '\\n NA')\n else:\n codon_ttest[-1].append(p_val)\n codon_text[-1].append(codon + '\\n' + str(round(p_val, 2)))\n count_all += 1\n if p_val < 0.5:\n count_sig += 1\n sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))\n lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon]))\n p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]),\n axis=None)[1]\n if not j % 6:\n AA_chisquare.append([])\n AA_text.append([])\n j += 1\n if np.isnan(p_val):\n AA_chisquare[-1].append(0)\n AA_text[-1].append(AA + '\\n NA')\n else:\n AA_chisquare[-1].append(p_val)\n AA_text[-1].append(AA + '\\n' + str(round(p_val, 2)))\n for n in range(j % 6, 6):\n AA_chisquare[-1].append(0)\n AA_text[-1].append('')\n AAs = choose_codons(codon_ttest, codon_text)\n AA_chisquare = np.array(AA_chisquare)\n codon_ttest = np.array(codon_ttest)\n AA_text = np.array(AA_text)\n codon_text = np.array(codon_text)\n print(\n \"\"\"%d out of %d codon show significant usage difference between SP and LP genes (p_value < 0.5)\n\"\"\"\n % (count_sig, count_all))\n plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)\n plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)\n return AAs\n\n\ndef plot_heatmap(data, text, cbarlabel, label):\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5))\n im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)\n annotate_heatmap(im, text)\n fig.tight_layout()\n plt.show\n plt.savefig(f'../results/{cbarlabel}_{label}.png')\n\n\ndef heatmap(data, ax, cmap, cbarlabel):\n if not ax:\n ax = plt.gca()\n im = ax.imshow(data, cmap)\n cbar = ax.figure.colorbar(im, ax=ax)\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n ax.set_xticklabels(range(data.shape[1]))\n ax.set_yticklabels(range(data.shape[0]))\n ax.tick_params(top=False, bottom=True, labeltop=False, labelbottom=True)\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor=True)\n ax.grid(which='minor', color='w', linestyle='-', linewidth=3)\n ax.tick_params(which='minor', bottom=False, left=False)\n cbar.ax.set_ylabel(cbarlabel, va='top')\n return im, cbar\n\n\ndef annotate_heatmap(im, text_label):\n textcolors = ['black', 'white']\n data = im.get_array()\n threshold = im.norm(data.max()) / 2\n kw = dict(horizontalalignment='center', verticalalignment='center')\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[im.norm(data[i, j]) > threshold])\n im.axes.text(j, i, text_label[i, j], **kw)\n\n\ndef choose_codons(ttest, text):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TAT': 'Tyr', 'TAC': 'Tyr',\n 'TGT': 'Cys', 'TGC': 'Cys', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys',\n 'AAG': 'Lys', 'GAT': 'Asp', 'GAC': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu'}\n codon_dict = collections.defaultdict(list)\n for i in range(len(ttest)):\n for j in range(len(ttest[i])):\n if ttest[i][j] < 0.01:\n codon = text[i][j][:3]\n if codon in codon_map:\n codon_dict[codon_map[codon]].append(codon)\n file = io.open('AAs_to_compare.txt', 'w')\n file.write('Compare following AAs\\n')\n AAs = []\n for AA in codon_dict.keys():\n AAs.append(AA)\n if len(codon_dict[AA]) == 2:\n file.write('%s: %s, %s\\n' % (AA, codon_dict[AA][0], codon_dict[\n AA][1]))\n else:\n file.write('%s: %s\\n' % (AA, codon_dict[AA][0]))\n file.close()\n return AAs\n\n\ndef plot_SP_LP(sp_AA_dict, lp_AA_dict):\n for AA in list(sp_AA_dict.keys()):\n codon_data = []\n codons = []\n for codon in sp_AA_dict[AA]:\n lp_AA_dict[AA][codon].reverse()\n codons.append(codon)\n codon_data.append([])\n codon_data[-1].append(sp_AA_dict[AA][codon])\n codon_data[-1].append(lp_AA_dict[AA][codon])\n codon_usage_plot(codon_data, AA, codons)\n\n\ndef codon_usage_plot(data, AA, codons):\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 5))\n for i in range(len(data)):\n x_sp = np.linspace(0, 50, len(data[i][0]))\n x_lp = np.linspace(50, 100, len(data[i][1]))\n ax.plot(x_sp, data[i][0], label='sp_' + codons[i])\n ax.plot(x_lp, data[i][1], label='lp_' + codons[i])\n ax.legend(loc=1)\n ax.set_title(AA)\n\n\ndef plot_distribution(sp_dict, lp_dict, AA):\n fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(40, 20))\n for codon in sp_dict[AA]:\n x = np.arange(len(sp_dict[AA][codon]))\n sp_y = np.array(sp_dict[AA][codon])\n lp_y = np.array(lp_dict[AA][codon])\n axes[0].plot(x, sp_y)\n axes[1].plot(x, lp_y)\n plt.show\n\n\ndef get_skellam_distribution(sp_dict, lp_dict, AA):\n sp_mu = {}\n lp_mu = {}\n codons = []\n for codon in sp_dict[AA]:\n codons.append(codon)\n sp_mu[codon] = np.mean(sp_dict[AA][codon])\n lp_mu[codon] = np.mean(lp_dict[AA][codon])\n skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)\n skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)\n\n\ndef skellam_plot(mu1, mu2, name):\n print(mu1, ' ', mu2, ' ', mu1 - mu2, ' ', name)\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))\n x = np.arange(stats.skellam.ppf(0.01, mu1, mu2), stats.skellam.ppf(0.99,\n mu1, mu2))\n ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker='o', label=name)\n ax.legend(loc=1)\n plt.show\n\n\nargs = parse_args()\nsp_codon_usage = Codon_Usage(args.sp_file)\nlp_codon_usage = Codon_Usage(args.lp_file)\nsp_AA_dict = sp_codon_usage.get_AA_dict()\nlp_AA_dict = lp_codon_usage.get_AA_dict()\nprint('Analyzing SP and LP %s group data\\n' % args.label)\nAAs = heatmap_SP_LP(sp_AA_dict, lp_AA_dict, args.label)\nplot_SP_LP(sp_AA_dict, lp_AA_dict)\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on Wed Mar 13 17:34:32 2019\n\n@author: fanlizhou\n\nAnalyze codon usage of sequence from 'SP_gene_seq.txt' and 'LP_gene_seq.txt'\nPlot heatmap of amino acid usage and codon usage\nPlot codon usage in each gene for each amino acid. Genes were arranged so that\nthe gene expression of SP decrease from 0 to 50 (x-axis) and the gene expression\nof LP increase from 51 to 100 (x-axis)\n\nUsage: codon_usage.py [-h] [--label LABEL] sp_file lp_file \n\nOptions:\n--label Define the label of out-put files. Default=\"top\"\nsp_file Path to the SP data files\nlp_file Path to the LP data files\n\n\"\"\"\n\nimport io, os, argparse, collections\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\n 'Analyze codon usage of SP and LP\\n')\n parser.add_argument('sp_file', help = 'one input SP data file\\n')\n parser.add_argument('lp_file', help = 'one input LP data file\\n')\n parser.add_argument('--label', '-l', \n type = str, required = False, default = 'top', \n help = 'Define the label of out-put files. Default=\"top\"\\n')\n \n args = parser.parse_args()\n \n for path in [args.sp_file, args.lp_file]:\n if not os.path.isfile(path):\n parser.error('File \"%s\" cannot be found.' % (path))\n \n return args\n\n\n# a Codon_Usage class to store codon usage information for each genotype\nclass Codon_Usage:\n \n def __init__(self, filename): \n self.seq, self.gene_num = self.get_seq(filename)\n \n \n def get_seq(self, filename): \n file = io.open(filename)\n # list of selected gene sequences, excluded genes that are non-triple\n all_seq = []\n gene_seq = ''\n count_all = 0\n count_non_triple = 0\n \n for line in file:\n # read a gene information line\n if line[0]=='>':\n count_all += 1\n \n # if a gene has been read, then append it to all_seq if the\n # sequence is triple\n if gene_seq!='': \n if len(gene_seq)%3:\n count_non_triple += 1\n else:\n all_seq.append(gene_seq)\n \n gene_seq = ''\n \n # read a gene sequence line \n else:\n gene_seq += line.strip()\n \n \n file.close() \n print('%s:\\n%d genes added\\n%d are non-triple\\n'%\n (filename[:2],count_all, count_non_triple))\n \n return (all_seq, count_all - count_non_triple)\n \n\n def get_AA(self, codon):\n # dict key: codon -> AA\n codon_map = {\n 'TTT':'Phe', 'TTC':'Phe', 'TTA':'Leu', 'TTG':'Leu',\n 'TCT':'Ser', 'TCC':'Ser', 'TCA':'Ser', 'TCG':'Ser',\n 'TAT':'Tyr', 'TAC':'Tyr', 'TAA':'STOP', 'TAG':'STOP',\n 'TGT':'Cys', 'TGC':'Cys', 'TGA':'STOP', 'TGG':'Trp',\n 'CTT':'Leu', 'CTC':'Leu', 'CTA':'Leu', 'CTG':'Leu',\n 'CCT':'Pro', 'CCC':'Pro', 'CCA':'Pro', 'CCG':'Pro',\n 'CAT':'His', 'CAC':'His', 'CAA':'Gln', 'CAG':'Gln',\n 'CGT':'Arg', 'CGC':'Arg', 'CGA':'Arg', 'CGG':'Arg',\n 'ATT':'Ile', 'ATC':'Ile', 'ATA':'Ile', 'ATG':'Met',\n 'ACT':'Thr', 'ACC':'Thr', 'ACA':'Thr', 'ACG':'Thr',\n 'AAT':'Asn', 'AAC':'Asn', 'AAA':'Lys', 'AAG':'Lys',\n 'AGT':'Ser', 'AGC':'Ser', 'AGA':'Arg', 'AGG':'Arg',\n 'GTT':'Val', 'GTC':'Val', 'GTA':'Val', 'GTG':'Val',\n 'GCT':'Ala', 'GCC':'Ala', 'GCA':'Ala', 'GCG':'Ala',\n 'GAT':'Asp', 'GAC':'Asp', 'GAA':'Glu', 'GAG':'Glu',\n 'GGT':'Gly', 'GGC':'Gly', 'GGA':'Gly', 'GGG':'Gly'}\n\n if codon in codon_map:\n return codon_map[codon] \n else:\n return ''\n \n \n def get_usage_dict(self, seq):\n # usage_dict structure:\n # dict key: AA -> [\n # dict key: codon -> \n # [codon_count,\n # codon_count/AA_count]\n # AA_count\n # ] \n usage_dict = \\\n collections.defaultdict(lambda: \n [\n collections.defaultdict(\n lambda: [0, 0]), \n 0\n ])\n # save AAs usage information\n for index in range(0, len(seq), 3):\n codon = seq[index:index+3]\n AA = self.get_AA(codon)\n if AA:\n # count how many times the AA appears\n usage_dict[AA][1] += 1\n # count how many times the codon is used\n usage_dict[AA][0][codon][0] += 1\n \n # calculate the codon usage percentage for an AA\n for AA in usage_dict:\n for codon in usage_dict[AA][0]:\n usage_dict[AA][0][codon][1] = \\\n usage_dict[AA][0][codon][0]/usage_dict[AA][1]\n\n return usage_dict\n\n\n def get_AA_dict(self): \n # AA_dict structure:\n # 1st dict key: AA -> 2nd dict key: codon -> a list of codon usage \n # percentage of each gene \n AA_dict = \\\n collections.defaultdict(\n lambda:collections.defaultdict(list))\n \n # dict key: AA -> codon list\n AA_map = {\n 'Phe':['TTT', 'TTC'],\n 'Leu':['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],\n 'Ser':['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'], \n 'Tyr':['TAT', 'TAC'], \n 'STOP':['TAA', 'TAG', 'TGA'],\n 'Cys':['TGT', 'TGC'], \n 'Trp':['TGG'],\n 'Pro':['CCT', 'CCC', 'CCA', 'CCG'],\n 'His':['CAT', 'CAC'], \n 'Gln':['CAA', 'CAG'],\n 'Arg':['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],\n 'Ile':['ATT', 'ATC', 'ATA'], \n 'Met':['ATG'],\n 'Thr':['ACT', 'ACC', 'ACA', 'ACG'],\n 'Asn':['AAT', 'AAC'], \n 'Lys':['AAA', 'AAG'],\n 'Val':['GTT', 'GTC', 'GTA', 'GTG'],\n 'Ala':['GCT', 'GCC', 'GCA', 'GCG'],\n 'Asp':['GAT', 'GAC'], \n 'Glu':['GAA', 'GAG'],\n 'Gly':['GGT', 'GGC', 'GGA', 'GGG']\n }\n \n # list of codon usage for each gene\n usage_dict_list = []\n \n # get codon usage information for each gene\n for seq in self.seq:\n usage_dict_list.append(self.get_usage_dict(seq))\n \n # get the list of codon usage percentage from each gene \n for AA in list(AA_map.keys()):\n for codon in AA_map[AA]:\n # get codon usage information from each gene\n for usage_dict in usage_dict_list:\n # append codon usage percentage in the gene\n AA_dict[AA][codon].append(\n usage_dict[AA][0][codon][1])\n \n return AA_dict \n \n\ndef heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label): \n # list of Chi-Square test results\n AA_chisquare = []\n # AA plotting annotation information\n AA_text = []\n \n # list of student's t-test results\n codon_ttest = []\n # codon plotting annotaion information\n codon_text = []\n \n i = 0\n j = 0\n # number of genes analyzed\n count_all = 0\n # number of genes that show significant results\n count_sig = 0\n \n for AA in list(sp_AA_dict.keys()): \n # mean values of codon usage for each AA\n sp_codon_mean = []\n lp_codon_mean = [] \n \n for codon in sp_AA_dict[AA]:\n # calculate ttest results \n p_val = stats.ttest_ind(sp_AA_dict[AA][codon],\n lp_AA_dict[AA][codon],\n equal_var = False)[1]\n \n # display eight codons in a row\n if not i % 8:\n codon_ttest.append([])\n codon_text.append([])\n i += 1\n \n # handle NULL values\n if np.isnan(p_val):\n codon_ttest[-1].append(0)\n codon_text[-1].append(codon + '\\n NA')\n # save ttest p-values and annotation information \n else: \n codon_ttest[-1].append(p_val)\n codon_text[-1].append(codon + '\\n' + str(round(p_val, 2)))\n count_all += 1\n if p_val < 0.5:\n count_sig += 1\n \n sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))\n lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon])) \n \n # get Chi-Square test results of each AA\n p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]), \n axis = None)[1]\n \n # display six AA in a row\n if not j % 6:\n AA_chisquare.append([])\n AA_text.append([])\n j += 1\n \n # handle Null values\n if np.isnan(p_val): \n AA_chisquare[-1].append(0)\n AA_text[-1].append(AA + '\\n NA')\n # save Chi-Square test p-values and annotation information\n else: \n AA_chisquare[-1].append(p_val)\n AA_text[-1].append(AA + '\\n' + str(round(p_val, 2)))\n \n # handle empty cells\n for n in range(j % 6, 6):\n AA_chisquare[-1].append(0)\n AA_text[-1].append('')\n \n # get list of AAs that show significant difference between SP and LP groups\n AAs = choose_codons(codon_ttest, codon_text) \n\n AA_chisquare = np.array(AA_chisquare)\n codon_ttest = np.array(codon_ttest)\n \n AA_text = np.array(AA_text)\n codon_text = np.array(codon_text)\n\n print('%d out of %d codon show significant usage difference \\\n between SP and LP genes (p_value < 0.5)\\n' % \n (count_sig, count_all))\n plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)\n plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)\n \n return AAs\n\n\ndef plot_heatmap(data, text, cbarlabel, label):\n \n fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 5))\n\n im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)\n \n annotate_heatmap(im, text)\n\n fig.tight_layout()\n plt.show\n plt.savefig(f'../results/{cbarlabel}_{label}.png') \n \ndef heatmap(data, ax, cmap, cbarlabel):\n \n if not ax:\n ax = plt.gca()\n \n im = ax.imshow(data, cmap)\n \n cbar = ax.figure.colorbar(im, ax=ax)\n\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n ax.set_xticklabels(range(data.shape[1]))\n ax.set_yticklabels(range(data.shape[0]))\n\n ax.tick_params(top=False, bottom=True,\n labeltop=False, labelbottom=True)\n\n # draw white space between squares\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n \n ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor = True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor = True)\n ax.grid(which = 'minor', color = 'w', linestyle = '-', linewidth = 3)\n ax.tick_params(which = 'minor', bottom = False, left = False) \n cbar.ax.set_ylabel(cbarlabel, va = 'top')\n\n return im, cbar\n\n\ndef annotate_heatmap(im, text_label):\n textcolors = ['black','white']\n\n data = im.get_array()\n # set threshold to decide color\n threshold = im.norm(data.max()) / 2\n \n kw = dict(horizontalalignment = 'center',\n verticalalignment = 'center')\n \n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color = textcolors[im.norm(data[i,j]) > threshold])\n im.axes.text(j, i, text_label[i,j], **kw)\n\n\ndef choose_codons(ttest, text): \n # dict key: AA -> codon\n # only contains AAs with only two codon choices \n codon_map = {\n 'TTT':'Phe', 'TTC':'Phe', 'TAT':'Tyr', 'TAC':'Tyr',\n 'TGT':'Cys', 'TGC':'Cys', 'CAT':'His', 'CAC':'His', \n 'CAA':'Gln', 'CAG':'Gln', 'AAT':'Asn', 'AAC':'Asn', \n 'AAA':'Lys', 'AAG':'Lys', 'GAT':'Asp', 'GAC':'Asp', \n 'GAA':'Glu', 'GAG':'Glu'} \n \n codon_dict = collections.defaultdict(list)\n for i in range(len(ttest)):\n for j in range(len(ttest[i])):\n if ttest[i][j] < 0.01:\n codon = text[i][j][:3]\n if codon in codon_map:\n codon_dict[codon_map[codon]].append(codon)\n \n file = io.open('AAs_to_compare.txt', 'w') \n file.write('Compare following AAs\\n')\n # AAs that have only two codon choices and show significant \n # codon usage difference between SP and LP groups\n AAs = []\n \n for AA in codon_dict.keys():\n AAs.append(AA) \n if len(codon_dict[AA]) == 2:\n file.write('%s: %s, %s\\n' % \n (AA, codon_dict[AA][0], codon_dict[AA][1]))\n else:\n file.write('%s: %s\\n' % (AA, codon_dict[AA][0]))\n \n file.close()\n \n return AAs\n \n\ndef plot_SP_LP(sp_AA_dict, lp_AA_dict):\n # plot each AA\n for AA in list(sp_AA_dict.keys()): \n # list of codon usage information\n codon_data = []\n # List of codon names\n codons = []\n \n for codon in sp_AA_dict[AA]: \n # LP group data is displayed from lowest expressed genes \n # to highest expressed genes\n lp_AA_dict[AA][codon].reverse()\n \n codons.append(codon) \n codon_data.append([])\n # display SP group data first and then LP group data\n codon_data[-1].append(sp_AA_dict[AA][codon]) \n codon_data[-1].append(lp_AA_dict[AA][codon])\n \n # plot usage curves \n codon_usage_plot(codon_data, AA, codons)\n\n \ndef codon_usage_plot(data, AA, codons):\n fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (15,5))\n \n for i in range(len(data)):\n # 0-50 shows SP group data\n x_sp = np.linspace(0, 50, len(data[i][0]))\n # 50-100 shows LP group data\n x_lp = np.linspace(50, 100, len(data[i][1]))\n \n ax.plot(x_sp, data[i][0], label = 'sp_' + codons[i])\n ax.plot(x_lp, data[i][1], label = 'lp_' + codons[i])\n ax.legend(loc = 1)\n ax.set_title(AA)\n\n \ndef plot_distribution(sp_dict, lp_dict, AA):\n fig, axes = plt.subplots(nrows = 2, ncols =1, figsize = (40, 20))\n\n for codon in sp_dict[AA]:\n x = np.arange(len(sp_dict[AA][codon]))\n sp_y = np.array(sp_dict[AA][codon])\n lp_y = np.array(lp_dict[AA][codon])\n \n axes[0].plot(x, sp_y)\n axes[1].plot(x, lp_y)\n \n plt.show\n\n\ndef get_skellam_distribution(sp_dict, lp_dict, AA): \n sp_mu = {}\n lp_mu = {}\n codons = []\n \n # get mean values\n for codon in sp_dict[AA]:\n codons.append(codon)\n sp_mu[codon] = np.mean(sp_dict[AA][codon])\n lp_mu[codon] = np.mean(lp_dict[AA][codon])\n \n skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)\n skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)\n\n\ndef skellam_plot(mu1, mu2, name): \n print(mu1,' ', mu2, ' ', mu1-mu2, ' ', name)\n\n fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (5, 5)) \n x = np.arange(stats.skellam.ppf(0.01, mu1, mu2), \n stats.skellam.ppf(0.99, mu1, mu2))\n ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker = 'o', label = name)\n ax.legend(loc = 1)\n \n plt.show\n \n \n# main flow\nargs = parse_args()\nsp_codon_usage = Codon_Usage(args.sp_file)\nlp_codon_usage = Codon_Usage(args.lp_file)\n\nsp_AA_dict = sp_codon_usage.get_AA_dict() \nlp_AA_dict = lp_codon_usage.get_AA_dict()\n\nprint(\"Analyzing SP and LP %s group data\\n\" % (args.label))\n \nAAs = heatmap_SP_LP(sp_AA_dict, lp_AA_dict, args.label)\nplot_SP_LP(sp_AA_dict, lp_AA_dict)\n\n# optional\n# get Skellam distributions of AAs that have only two codon choices \n# and show distictive usage between SP and LP\n'''\nsp_all_codon_usage = Codon_Usage('SP_all_gene_seq.txt')\nlp_all_codon_usage = Codon_Usage('LP_all_gene_seq.txt')\n\nsp_all_AA_dict = sp_all_codon_usage.get_AA_dict() \nlp_all_AA_dict = lp_all_codon_usage.get_AA_dict()\n\nfor AA in AAs:\n plot_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)\n get_skellam_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)\n'''",
"step-ids": [
11,
13,
16,
20,
21
]
}
|
[
11,
13,
16,
20,
21
] |
<|reserved_special_token_0|>
class KnowValues(unittest.TestCase):
def test_ls_contributing(self):
""" To test the list of contributing centers """
sv = nao(gto=mol)
pb = prod_basis()
pb.sv = sv
pb.sv.ao_log.sp2rcut[0] = 10.0
pb.prod_log = sv.ao_log
pb.prod_log.sp2rcut[0] = 10.0
pb.ac_rcut = max(sv.ao_log.sp2rcut)
pb.ac_npc_max = 10
lsc = pb.ls_contributing(0, 1)
self.assertEqual(len(lsc), 10)
lsref = [0, 1, 13, 7, 5, 43, 42, 39, 38, 10]
for i, ref in enumerate(lsref):
self.assertEqual(lsc[i], ref)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
fp.write(ag_s7l7_wonatoms)
fp.close()
<|reserved_special_token_0|>
class KnowValues(unittest.TestCase):
def test_ls_contributing(self):
""" To test the list of contributing centers """
sv = nao(gto=mol)
pb = prod_basis()
pb.sv = sv
pb.sv.ao_log.sp2rcut[0] = 10.0
pb.prod_log = sv.ao_log
pb.prod_log.sp2rcut[0] = 10.0
pb.ac_rcut = max(sv.ao_log.sp2rcut)
pb.ac_npc_max = 10
lsc = pb.ls_contributing(0, 1)
self.assertEqual(len(lsc), 10)
lsref = [0, 1, 13, 7, 5, 43, 42, 39, 38, 10]
for i, ref in enumerate(lsref):
self.assertEqual(lsc[i], ref)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ag_s7l7_wonatoms = """
H 2.346340 -0.000093 -1.449987
H 2.346702 -0.000095 1.450132
H -2.345370 -0.000086 -1.449228
H -2.345734 -0.000089 1.449376
H -1.449887 2.346112 -0.000046
H 1.450134 2.346853 -0.000044
H -1.449224 -2.345222 -0.000041
H 1.449464 -2.345958 -0.000038
H -0.000112 -1.449738 2.345957
H -0.000111 1.449980 2.346608
H -0.000111 -1.449377 -2.345464
H -0.000107 1.449607 -2.346103
H 4.731536 -0.000009 -2.923633
H 4.794344 0.000006 0.000053
H 4.731450 -0.000009 2.923590
H -4.731847 -0.000004 -2.923807
H -4.794483 0.000008 0.000053
H -4.731757 -0.000006 2.923758
H -2.923688 4.731598 0.000002
H 0.000077 4.794367 0.000013
H 2.923553 4.731432 0.000002
H -2.923845 -4.731869 0.000004
H 0.000084 -4.794470 0.000009
H 2.923708 -4.731700 0.000004
H -0.000016 -2.923710 4.731655
H -0.000002 0.000081 4.794386
H -0.000017 2.923594 4.731497
H -0.000016 -2.923799 -4.731798
H -0.000002 0.000083 -4.794441
H -0.000018 2.923687 -4.731644
H 2.396856 -1.481019 3.878620
H 2.396773 1.481058 3.878614
H 2.396905 -1.481021 -3.878636
H 2.396824 1.481062 -3.878634
H -2.396782 -1.481108 3.878712
H -2.396699 1.481149 3.878709
H -2.396832 -1.481114 -3.878735
H -2.396748 1.481155 -3.878730
H 3.878596 2.396822 -1.481024
H 3.878589 2.396778 1.481062
H -3.878672 2.396916 -1.481031
H -3.878666 2.396868 1.481064
H 3.878682 -2.396737 -1.481107
H 3.878676 -2.396695 1.481146
H -3.878757 -2.396826 -1.481115
H -3.878754 -2.396779 1.481148
H -1.481047 3.878627 2.396831
H 1.481072 3.878617 2.396742
H -1.481055 -3.878680 2.396921
H 1.481078 -3.878674 2.396834
H -1.481096 3.878678 -2.396773
H 1.481119 3.878671 -2.396685
H -1.481102 -3.878731 -2.396860
H 1.481126 -3.878722 -2.396772
H 7.150331 0.000013 -4.418604
H 7.225782 0.000009 -1.477531
H 7.225777 0.000009 1.477551
H 7.150346 0.000010 4.418636
H -7.150239 0.000015 -4.418552
H -7.225701 0.000010 -1.477539
H -7.225697 0.000009 1.477559
H -7.150257 0.000015 4.418586
H -4.418596 7.150312 0.000012
H -1.477538 7.225777 0.000010
H 1.477536 7.225775 0.000011
H 4.418635 7.150362 0.000012
H -4.418553 -7.150222 0.000020
H -1.477554 -7.225705 0.000012
H 1.477559 -7.225701 0.000011
H 4.418598 -7.150270 0.000013
H 0.000008 -4.418580 7.150295
H 0.000006 -1.477536 7.225760
H 0.000007 1.477549 7.225757
H 0.000007 4.418626 7.150335
H 0.000008 -4.418561 -7.150247
H 0.000006 -1.477545 -7.225726
H 0.000006 1.477561 -7.225723
H 0.000007 4.418613 -7.150287
H 4.808303 -1.493555 5.388587
H 2.417464 -2.971656 6.301956
H 4.808308 1.493581 5.388605
H 2.431587 0.000014 6.366095
H 2.417478 2.971674 6.301966
H 4.808303 -1.493546 -5.388552
H 2.417452 -2.971655 -6.301934
H 4.808310 1.493573 -5.388572
H 2.431585 0.000016 -6.366071
H 2.417464 2.971677 -6.301941
H -4.808288 -1.493538 5.388559
H -2.417439 -2.971638 6.301924
H -4.808292 1.493569 5.388578
H -2.431572 0.000013 6.366082
H -2.417452 2.971656 6.301933
H -4.808287 -1.493528 -5.388525
H -2.417427 -2.971639 -6.301899
H -4.808292 1.493561 -5.388546
H -2.431572 0.000014 -6.366056
H -2.417439 2.971659 -6.301909
H 5.388603 4.808319 -1.493559
H 6.301970 2.417487 -2.971653
H 5.388608 4.808321 1.493584
H 6.366098 2.431602 0.000014
H 6.301967 2.417490 2.971675
H -5.388548 4.808294 -1.493543
H -6.301922 2.417455 -2.971644
H -5.388553 4.808296 1.493566
H -6.366058 2.431589 0.000013
H -6.301920 2.417459 2.971662
H 5.388578 -4.808301 -1.493544
H 6.301948 -2.417448 -2.971646
H 5.388584 -4.808302 1.493574
H 6.366092 -2.431572 0.000013
H 6.301945 -2.417454 2.971667
H -5.388520 -4.808272 -1.493529
H -6.301896 -2.417412 -2.971637
H -5.388529 -4.808274 1.493557
H -6.366050 -2.431556 0.000012
H -6.301896 -2.417417 2.971658
H -1.493562 5.388587 4.808302
H -2.971658 6.301956 2.417476
H 1.493576 5.388607 4.808320
H 0.000001 6.366100 2.431597
H 2.971666 6.301972 2.417493
H -1.493543 -5.388526 4.808289
H -2.971649 -6.301906 2.417444
H 1.493555 -5.388547 4.808306
H 0.000002 -6.366051 2.431589
H 2.971661 -6.301921 2.417458
H -1.493560 5.388586 -4.808287
H -2.971654 6.301946 -2.417449
H 1.493572 5.388604 -4.808304
H -0.000002 6.366099 -2.431566
H 2.971663 6.301961 -2.417463
H -1.493541 -5.388524 -4.808272
H -2.971647 -6.301895 -2.417411
H 1.493554 -5.388544 -4.808291
H 0.000005 -6.366052 -2.431562
H 2.971660 -6.301911 -2.417425
H 3.933950 -3.933932 3.933958
H 3.933959 3.933967 3.933963
H 3.933948 -3.933931 -3.933931
H 3.933957 3.933967 -3.933938
H -3.933922 -3.933912 3.933940
H -3.933929 3.933948 3.933945
H -3.933921 -3.933910 -3.933915
H -3.933929 3.933948 -3.933922
H 9.586490 0.000010 -5.924178
H 9.665986 0.000008 -2.972164
H 9.696371 0.000007 0.000010
H 9.665971 0.000008 2.972190
H 9.586467 0.000009 5.924179
H -9.586484 0.000013 -5.924188
H -9.665980 0.000009 -2.972165
H -9.696370 0.000008 0.000012
H -9.665971 0.000008 2.972185
H -9.586466 0.000011 5.924187
H -5.924179 9.586482 0.000006
H -2.972172 9.665973 0.000007
H 0.000003 9.696364 0.000008
H 2.972183 9.665974 0.000008
H 5.924181 9.586480 0.000005
H -5.924189 -9.586477 0.000008
H -2.972172 -9.665971 0.000010
H 0.000008 -9.696374 0.000008
H 2.972190 -9.665972 0.000008
H 5.924193 -9.586474 0.000007
H 0.000007 -5.924168 9.586474
H 0.000007 -2.972167 9.665967
H 0.000006 0.000007 9.696360
H 0.000008 2.972186 9.665963
H 0.000006 5.924181 9.586462
H 0.000007 -5.924186 -9.586489
H 0.000007 -2.972170 -9.665982
H 0.000006 0.000009 -9.696379
H 0.000007 2.972190 -9.665979
H 0.000006 5.924201 -9.586480
H 7.237307 -1.500148 6.901190
H 4.847669 -2.996238 7.843968
H 2.428125 -4.472804 8.738149
H 7.237301 1.500165 6.901187
H 4.862151 0.000009 7.909330
H 2.452082 -1.489596 8.829834
H 4.847661 2.996251 7.843964
H 2.452083 1.489612 8.829831
H 2.428122 4.472818 8.738145
H 7.237323 -1.500151 -6.901186
H 4.847685 -2.996244 -7.843971
H 2.428136 -4.472811 -8.738153
H 7.237318 1.500169 -6.901185
H 4.862162 0.000010 -7.909328
H 2.452091 -1.489597 -8.829834
H 4.847679 2.996257 -7.843969
H 2.452080 1.489614 -8.829849
H 2.428132 4.472826 -8.738153
H -7.237293 -1.500149 6.901191
H -4.847661 -2.996241 7.843973
H -2.428114 -4.472804 8.738152
H -7.237288 1.500171 6.901188
H -4.862140 0.000012 7.909329
H -2.452056 -1.489593 8.829846
H -4.847654 2.996255 7.843972
H -2.452058 1.489609 8.829843
H -2.428111 4.472819 8.738150
H -7.237306 -1.500153 -6.901191
H -4.847677 -2.996244 -7.843973
H -2.428122 -4.472813 -8.738157
H -7.237299 1.500176 -6.901188
H -4.862151 0.000012 -7.909331
H -2.452064 -1.489595 -8.829852
H -4.847670 2.996260 -7.843970
H -2.452064 1.489612 -8.829849
H -2.428117 4.472829 -8.738155
H 6.901190 7.237319 -1.500150
H 7.843970 4.847677 -2.996237
H 8.738156 2.428132 -4.472807
H 6.901187 7.237315 1.500164
H 7.909334 4.862159 0.000009
H 8.829843 2.452089 -1.489594
H 7.843962 4.847670 2.996250
H 8.829839 2.452089 1.489612
H 8.738147 2.428123 4.472820
H -6.901186 7.237313 -1.500153
H -7.843972 4.847681 -2.996242
H -8.738156 2.428143 -4.472808
H -6.901182 7.237309 1.500168
H -7.909325 4.862158 0.000009
H -8.829845 2.452079 -1.489595
H -7.843964 4.847675 2.996256
H -8.829842 2.452078 1.489612
H -8.738145 2.428137 4.472818
H 6.901200 -7.237306 -1.500153
H 7.843981 -4.847666 -2.996242
H 8.738162 -2.428114 -4.472812
H 6.901198 -7.237297 1.500171
H 7.909337 -4.862146 0.000011
H 8.829845 -2.452068 -1.489594
H 7.843972 -4.847660 2.996258
H 8.829841 -2.452068 1.489613
H 8.738152 -2.428107 4.472824
H -6.901190 -7.237297 -1.500154
H -7.843979 -4.847668 -2.996248
H -8.738159 -2.428121 -4.472812
H -6.901189 -7.237293 1.500171
H -7.909326 -4.862145 0.000009
H -8.829847 -2.452058 -1.489596
H -7.843972 -4.847664 2.996263
H -8.829843 -2.452057 1.489614
H -8.738148 -2.428115 4.472824
H -1.500153 6.901189 7.237302
H -2.996243 7.843968 4.847670
H -4.472809 8.738156 2.428132
H 1.500162 6.901186 7.237307
H 0.000005 7.909331 4.862151
H -1.489597 8.829847 2.452075
H 2.996247 7.843960 4.847669
H 1.489606 8.829848 2.452078
H 4.472817 8.738150 2.428124
H -1.500152 -6.901177 7.237309
H -2.996247 -7.843968 4.847679
H -4.472811 -8.738149 2.428139
H 1.500163 -6.901178 7.237314
H 0.000006 -7.909318 4.862160
H -1.489601 -8.829840 2.452076
H 2.996251 -7.843960 4.847678
H 1.489612 -8.829840 2.452078
H 4.472822 -8.738143 2.428130
H -1.500158 6.901205 -7.237297
H -2.996246 7.843981 -4.847659
H -4.472815 8.738162 -2.428115
H 1.500171 6.901202 -7.237297
H 0.000007 7.909340 -4.862141
H -1.489596 8.829841 -2.452068
H 2.996253 7.843975 -4.847660
H 1.489606 8.829841 -2.452069
H 4.472822 8.738158 -2.428111
H -1.500158 -6.901191 -7.237303
H -2.996251 -7.843978 -4.847665
H -4.472815 -8.738153 -2.428116
H 1.500171 -6.901191 -7.237303
H 0.000007 -7.909327 -4.862149
H -1.489602 -8.829834 -2.452067
H 2.996258 -7.843975 -4.847668
H 1.489613 -8.829834 -2.452069
H 4.472827 -8.738151 -2.428114
H 6.377354 -3.967092 5.457178
H 6.377349 3.967112 5.457173
H 5.457177 -6.377342 3.967111
H 5.457169 6.377356 3.967110
H 3.967106 -5.457157 6.377359
H 3.967106 5.457174 6.377352
H 6.377366 -3.967096 -5.457166
H 6.377363 3.967116 -5.457163
H 5.457183 -6.377353 -3.967095
H 5.457176 6.377363 -3.967098
H 3.967113 -5.457167 -6.377350
H 3.967114 5.457185 -6.377343
H -6.377340 -3.967091 5.457179
H -6.377336 3.967112 5.457174
H -5.457166 -6.377340 3.967111
H -5.457161 6.377353 3.967111
H -3.967095 -5.457156 6.377359
H -3.967095 5.457171 6.377352
H -6.377355 -3.967095 -5.457168
H -6.377350 3.967118 -5.457163
H -5.457171 -6.377349 -3.967099
H -5.457166 6.377363 -3.967100
H -3.967102 -5.457168 -6.377348
H -3.967101 5.457185 -6.377342
H 12.038641 0.000003 -7.440548
H 12.099988 0.000003 -4.483580
H 12.144240 0.000006 -1.497071
H 12.144233 0.000006 1.497083
H 12.099971 0.000003 4.483587
H 12.038623 0.000004 7.440541
H -12.038651 -0.000000 -7.440559
H -12.099994 0.000006 -4.483580
H -12.144253 0.000006 -1.497073
H -12.144248 0.000005 1.497083
H -12.099982 0.000005 4.483584
H -12.038629 0.000000 7.440551
H -7.440551 12.038630 0.000002
H -4.483583 12.099975 0.000003
H -1.497079 12.144221 0.000004
H 1.497075 12.144221 0.000005
H 4.483580 12.099968 0.000003
H 7.440535 12.038629 0.000002
H -7.440565 -12.038641 0.000002
H -4.483589 -12.099999 0.000005
H -1.497078 -12.144261 0.000007
H 1.497079 -12.144262 0.000005
H 4.483587 -12.099997 0.000004
H 7.440549 -12.038645 0.000001
H -0.000009 -7.440541 12.038631
H 0.000001 -4.483578 12.099976
H 0.000001 -1.497071 12.144229
H 0.000001 1.497078 12.144221
H 0.000002 4.483585 12.099964
H -0.000007 7.440539 12.038614
H -0.000008 -7.440559 -12.038653
H 0.000002 -4.483583 -12.100005
H 0.000001 -1.497073 -12.144265
H 0.000002 1.497082 -12.144259
H 0.000001 4.483590 -12.099993
H -0.000006 7.440555 -12.038641
H 9.677193 -1.496906 8.403454
H 7.283257 -3.003960 9.361664
H 4.860366 -4.500621 10.287633
H 2.422429 -5.980893 11.174337
H 9.677186 1.496910 8.403449
H 7.301204 0.000004 9.440827
H 4.899855 -1.498367 10.403244
H 2.464544 -2.988767 11.288143
H 7.283251 3.003967 9.361652
H 4.899853 1.498377 10.403239
H 2.475659 0.000006 11.329680
H 4.860357 4.500623 10.287616
H 2.464540 2.988773 11.288133
H 2.422424 5.980895 11.174321
H 9.677209 -1.496912 -8.403465
H 7.283277 -3.003970 -9.361682
H 4.860384 -4.500636 -10.287647
H 2.422443 -5.980905 -11.174358
H 9.677204 1.496921 -8.403460
H 7.301220 0.000005 -9.440842
H 4.899871 -1.498371 -10.403268
H 2.464557 -2.988775 -11.288169
H 7.283270 3.003976 -9.361672
H 4.899868 1.498380 -10.403263
H 2.475670 0.000005 -11.329709
H 4.860376 4.500637 -10.287640
H 2.464554 2.988779 -11.288159
H 2.422434 5.980909 -11.174343
H -9.677190 -1.496908 8.403467
H -7.283257 -3.003963 9.361672
H -4.860371 -4.500629 10.287635
H -2.422430 -5.980892 11.174337
H -9.677187 1.496917 8.403462
H -7.301202 0.000005 9.440837
H -4.899849 -1.498370 10.403248
H -2.464542 -2.988769 11.288143
H -7.283250 3.003968 9.361662
H -4.899846 1.498382 10.403245
H -2.475653 0.000007 11.329681
H -4.860362 4.500633 10.287620
H -2.464539 2.988776 11.288131
H -2.422424 5.980896 11.174322
H -9.677207 -1.496913 -8.403482
H -7.283279 -3.003972 -9.361692
H -4.860387 -4.500634 -10.287661
H -2.422442 -5.980906 -11.174359
H -9.677203 1.496924 -8.403476
H -7.301216 0.000005 -9.440853
H -4.899865 -1.498371 -10.403273
H -2.464552 -2.988774 -11.288171
H -7.283271 3.003977 -9.361683
H -4.899862 1.498381 -10.403269
H -2.475664 0.000006 -11.329711
H -4.860376 4.500638 -10.287646
H -2.464549 2.988781 -11.288160
H -2.422433 5.980910 -11.174344
H 8.403451 9.677198 -1.496905
H 9.361660 7.283263 -3.003959
H 10.287632 4.860371 -4.500620
H 11.174342 2.422434 -5.980899
H 8.403446 9.677194 1.496911
H 9.440827 7.301209 0.000005
H 10.403246 4.899862 -1.498367
H 11.288149 2.464551 -2.988769
H 9.361650 7.283254 3.003966
H 10.403241 4.899860 1.498377
H 11.329691 2.475665 0.000007
H 10.287617 4.860361 4.500626
H 11.288137 2.464548 2.988779
H 11.174326 2.422424 5.980898
H -8.403460 9.677196 -1.496910
H -9.361670 7.283266 -3.003964
H -10.287643 4.860386 -4.500636
H -11.174348 2.422445 -5.980904
H -8.403456 9.677194 1.496915
H -9.440835 7.301210 0.000006
H -10.403255 4.899863 -1.498369
H -11.288154 2.464562 -2.988776
H -9.361661 7.283260 3.003973
H -10.403249 4.899861 1.498381
H -11.329693 2.475669 0.000006
H -10.287628 4.860378 4.500640
H -11.288144 2.464559 2.988785
H -11.174330 2.422439 5.980903
H 8.403472 -9.677206 -1.496911
H 9.361686 -7.283270 -3.003967
H 10.287643 -4.860371 -4.500629
H 11.174350 -2.422427 -5.980904
H 8.403468 -9.677203 1.496918
H 9.440847 -7.301215 0.000005
H 10.403263 -4.899857 -1.498370
H 11.288155 -2.464540 -2.988768
H 9.361677 -7.283262 3.003974
H 10.403258 -4.899854 1.498381
H 11.329699 -2.475655 0.000006
H 10.287637 -4.860362 4.500633
H 11.288145 -2.464540 2.988780
H 11.174334 -2.422420 5.980905
H -8.403482 -9.677201 -1.496913
H -9.361695 -7.283272 -3.003971
H -10.287648 -4.860385 -4.500644
H -11.174357 -2.422436 -5.980909
H -8.403479 -9.677198 1.496923
H -9.440853 -7.301212 0.000006
H -10.403270 -4.899858 -1.498373
H -11.288161 -2.464554 -2.988778
H -9.361685 -7.283267 3.003981
H -10.403265 -4.899857 1.498385
H -11.329701 -2.475659 0.000006
H -10.287644 -4.860378 4.500647
H -11.288152 -2.464553 2.988786
H -11.174339 -2.422432 5.980907
H -1.496905 8.403449 9.677180
H -3.003957 9.361655 7.283252
H -4.500628 10.287621 4.860368
H -5.980895 11.174331 2.422431
H 1.496909 8.403447 9.677182
H 0.000004 9.440823 7.301201
H -1.498372 10.403237 4.899852
H -2.988773 11.288135 2.464550
H 3.003964 9.361649 7.283252
H 1.498374 10.403237 4.899852
H 0.000002 11.329673 2.475664
H 4.500620 10.287615 4.860360
H 2.988773 11.288132 2.464547
H 5.980894 11.174325 2.422427
H -1.496909 -8.403459 9.677196
H -3.003968 -9.361674 7.283271
H -4.500641 -10.287646 4.860388
H -5.980907 -11.174350 2.422445
H 1.496913 -8.403457 9.677200
H 0.000003 -9.440837 7.301216
H -1.498376 -10.403259 4.899869
H -2.988779 -11.288160 2.464559
H 3.003975 -9.361670 7.283272
H 1.498380 -10.403259 4.899869
H 0.000003 -11.329705 2.475670
H 4.500634 -10.287640 4.860379
H 2.988780 -11.288157 2.464554
H 5.980909 -11.174345 2.422438
H -1.496912 8.403471 -9.677194
H -3.003965 9.361677 -7.283259
H -4.500629 10.287638 -4.860363
H -5.980898 11.174338 -2.422428
H 1.496914 8.403469 -9.677196
H 0.000004 9.440843 -7.301203
H -1.498370 10.403254 -4.899850
H -2.988770 11.288143 -2.464539
H 3.003972 9.361671 -7.283257
H 1.498377 10.403253 -4.899848
H 0.000002 11.329681 -2.475654
H 4.500628 10.287631 -4.860359
H 2.988772 11.288140 -2.464537
H 5.980897 11.174332 -2.422423
H -1.496916 -8.403481 -9.677208
H -3.003978 -9.361696 -7.283276
H -4.500640 -10.287662 -4.860381
H -5.980909 -11.174356 -2.422438
H 1.496920 -8.403479 -9.677211
H 0.000003 -9.440857 -7.301218
H -1.498376 -10.403275 -4.899866
H -2.988776 -11.288169 -2.464544
H 3.003983 -9.361693 -7.283276
H 1.498382 -10.403275 -4.899865
H 0.000004 -11.329712 -2.475660
H 4.500643 -10.287646 -4.860375
H 2.988780 -11.288164 -2.464543
H 5.980912 -11.174352 -2.422432
H 8.823514 -3.987216 6.975976
H 8.823502 3.987220 6.975966
H 7.928324 -6.428771 5.503194
H 7.928307 6.428772 5.503181
H 6.975982 -8.823518 3.987231
H 6.975962 8.823505 3.987222
H 6.428780 -5.503180 7.928320
H 6.428768 5.503182 7.928308
H 5.503192 -7.928317 6.428783
H 5.503180 7.928307 6.428768
H 3.987227 -6.975968 8.823516
H 3.987219 6.975963 8.823502
H 8.823531 -3.987227 -6.975977
H 8.823524 3.987231 -6.975969
H 7.928335 -6.428786 -5.503192
H 7.928318 6.428785 -5.503181
H 6.975988 -8.823529 -3.987226
H 6.975969 8.823515 -3.987217
H 6.428797 -5.503193 -7.928331
H 6.428784 5.503196 -7.928316
H 5.503204 -7.928332 -6.428787
H 5.503193 7.928321 -6.428774
H 3.987241 -6.975984 -8.823526
H 3.987231 6.975978 -8.823511
H -8.823510 -3.987222 6.975980
H -8.823504 3.987228 6.975972
H -7.928324 -6.428779 5.503202
H -7.928304 6.428779 5.503191
H -6.975981 -8.823519 3.987237
H -6.975964 8.823508 3.987226
H -6.428779 -5.503186 7.928325
H -6.428768 5.503188 7.928311
H -5.503191 -7.928320 6.428792
H -5.503179 7.928309 6.428775
H -3.987221 -6.975969 8.823517
H -3.987213 6.975965 8.823503
H -8.823529 -3.987231 -6.975982
H -8.823521 3.987238 -6.975974
H -7.928336 -6.428791 -5.503200
H -7.928317 6.428791 -5.503190
H -6.975987 -8.823528 -3.987232
H -6.975972 8.823516 -3.987222
H -6.428795 -5.503199 -7.928335
H -6.428782 5.503201 -7.928321
H -5.503203 -7.928336 -6.428794
H -5.503191 7.928325 -6.428778
H -3.987236 -6.975985 -8.823528
H -3.987226 6.975980 -8.823514
"""
fname = 'ag_s7l7_wonatoms.xyz'
fp = open(fname, 'w')
fp.write(ag_s7l7_wonatoms)
fp.close()
mol = gto.M(verbose=1, atom=open(fname).read())
class KnowValues(unittest.TestCase):
def test_ls_contributing(self):
""" To test the list of contributing centers """
sv = nao(gto=mol)
pb = prod_basis()
pb.sv = sv
pb.sv.ao_log.sp2rcut[0] = 10.0
pb.prod_log = sv.ao_log
pb.prod_log.sp2rcut[0] = 10.0
pb.ac_rcut = max(sv.ao_log.sp2rcut)
pb.ac_npc_max = 10
lsc = pb.ls_contributing(0, 1)
self.assertEqual(len(lsc), 10)
lsref = [0, 1, 13, 7, 5, 43, 42, 39, 38, 10]
for i, ref in enumerate(lsref):
self.assertEqual(lsc[i], ref)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
from __future__ import print_function, division
import unittest
from pyscf import gto
import os
from pyscf.nao import nao, prod_basis
ag_s7l7_wonatoms = """
H 2.346340 -0.000093 -1.449987
H 2.346702 -0.000095 1.450132
H -2.345370 -0.000086 -1.449228
H -2.345734 -0.000089 1.449376
H -1.449887 2.346112 -0.000046
H 1.450134 2.346853 -0.000044
H -1.449224 -2.345222 -0.000041
H 1.449464 -2.345958 -0.000038
H -0.000112 -1.449738 2.345957
H -0.000111 1.449980 2.346608
H -0.000111 -1.449377 -2.345464
H -0.000107 1.449607 -2.346103
H 4.731536 -0.000009 -2.923633
H 4.794344 0.000006 0.000053
H 4.731450 -0.000009 2.923590
H -4.731847 -0.000004 -2.923807
H -4.794483 0.000008 0.000053
H -4.731757 -0.000006 2.923758
H -2.923688 4.731598 0.000002
H 0.000077 4.794367 0.000013
H 2.923553 4.731432 0.000002
H -2.923845 -4.731869 0.000004
H 0.000084 -4.794470 0.000009
H 2.923708 -4.731700 0.000004
H -0.000016 -2.923710 4.731655
H -0.000002 0.000081 4.794386
H -0.000017 2.923594 4.731497
H -0.000016 -2.923799 -4.731798
H -0.000002 0.000083 -4.794441
H -0.000018 2.923687 -4.731644
H 2.396856 -1.481019 3.878620
H 2.396773 1.481058 3.878614
H 2.396905 -1.481021 -3.878636
H 2.396824 1.481062 -3.878634
H -2.396782 -1.481108 3.878712
H -2.396699 1.481149 3.878709
H -2.396832 -1.481114 -3.878735
H -2.396748 1.481155 -3.878730
H 3.878596 2.396822 -1.481024
H 3.878589 2.396778 1.481062
H -3.878672 2.396916 -1.481031
H -3.878666 2.396868 1.481064
H 3.878682 -2.396737 -1.481107
H 3.878676 -2.396695 1.481146
H -3.878757 -2.396826 -1.481115
H -3.878754 -2.396779 1.481148
H -1.481047 3.878627 2.396831
H 1.481072 3.878617 2.396742
H -1.481055 -3.878680 2.396921
H 1.481078 -3.878674 2.396834
H -1.481096 3.878678 -2.396773
H 1.481119 3.878671 -2.396685
H -1.481102 -3.878731 -2.396860
H 1.481126 -3.878722 -2.396772
H 7.150331 0.000013 -4.418604
H 7.225782 0.000009 -1.477531
H 7.225777 0.000009 1.477551
H 7.150346 0.000010 4.418636
H -7.150239 0.000015 -4.418552
H -7.225701 0.000010 -1.477539
H -7.225697 0.000009 1.477559
H -7.150257 0.000015 4.418586
H -4.418596 7.150312 0.000012
H -1.477538 7.225777 0.000010
H 1.477536 7.225775 0.000011
H 4.418635 7.150362 0.000012
H -4.418553 -7.150222 0.000020
H -1.477554 -7.225705 0.000012
H 1.477559 -7.225701 0.000011
H 4.418598 -7.150270 0.000013
H 0.000008 -4.418580 7.150295
H 0.000006 -1.477536 7.225760
H 0.000007 1.477549 7.225757
H 0.000007 4.418626 7.150335
H 0.000008 -4.418561 -7.150247
H 0.000006 -1.477545 -7.225726
H 0.000006 1.477561 -7.225723
H 0.000007 4.418613 -7.150287
H 4.808303 -1.493555 5.388587
H 2.417464 -2.971656 6.301956
H 4.808308 1.493581 5.388605
H 2.431587 0.000014 6.366095
H 2.417478 2.971674 6.301966
H 4.808303 -1.493546 -5.388552
H 2.417452 -2.971655 -6.301934
H 4.808310 1.493573 -5.388572
H 2.431585 0.000016 -6.366071
H 2.417464 2.971677 -6.301941
H -4.808288 -1.493538 5.388559
H -2.417439 -2.971638 6.301924
H -4.808292 1.493569 5.388578
H -2.431572 0.000013 6.366082
H -2.417452 2.971656 6.301933
H -4.808287 -1.493528 -5.388525
H -2.417427 -2.971639 -6.301899
H -4.808292 1.493561 -5.388546
H -2.431572 0.000014 -6.366056
H -2.417439 2.971659 -6.301909
H 5.388603 4.808319 -1.493559
H 6.301970 2.417487 -2.971653
H 5.388608 4.808321 1.493584
H 6.366098 2.431602 0.000014
H 6.301967 2.417490 2.971675
H -5.388548 4.808294 -1.493543
H -6.301922 2.417455 -2.971644
H -5.388553 4.808296 1.493566
H -6.366058 2.431589 0.000013
H -6.301920 2.417459 2.971662
H 5.388578 -4.808301 -1.493544
H 6.301948 -2.417448 -2.971646
H 5.388584 -4.808302 1.493574
H 6.366092 -2.431572 0.000013
H 6.301945 -2.417454 2.971667
H -5.388520 -4.808272 -1.493529
H -6.301896 -2.417412 -2.971637
H -5.388529 -4.808274 1.493557
H -6.366050 -2.431556 0.000012
H -6.301896 -2.417417 2.971658
H -1.493562 5.388587 4.808302
H -2.971658 6.301956 2.417476
H 1.493576 5.388607 4.808320
H 0.000001 6.366100 2.431597
H 2.971666 6.301972 2.417493
H -1.493543 -5.388526 4.808289
H -2.971649 -6.301906 2.417444
H 1.493555 -5.388547 4.808306
H 0.000002 -6.366051 2.431589
H 2.971661 -6.301921 2.417458
H -1.493560 5.388586 -4.808287
H -2.971654 6.301946 -2.417449
H 1.493572 5.388604 -4.808304
H -0.000002 6.366099 -2.431566
H 2.971663 6.301961 -2.417463
H -1.493541 -5.388524 -4.808272
H -2.971647 -6.301895 -2.417411
H 1.493554 -5.388544 -4.808291
H 0.000005 -6.366052 -2.431562
H 2.971660 -6.301911 -2.417425
H 3.933950 -3.933932 3.933958
H 3.933959 3.933967 3.933963
H 3.933948 -3.933931 -3.933931
H 3.933957 3.933967 -3.933938
H -3.933922 -3.933912 3.933940
H -3.933929 3.933948 3.933945
H -3.933921 -3.933910 -3.933915
H -3.933929 3.933948 -3.933922
H 9.586490 0.000010 -5.924178
H 9.665986 0.000008 -2.972164
H 9.696371 0.000007 0.000010
H 9.665971 0.000008 2.972190
H 9.586467 0.000009 5.924179
H -9.586484 0.000013 -5.924188
H -9.665980 0.000009 -2.972165
H -9.696370 0.000008 0.000012
H -9.665971 0.000008 2.972185
H -9.586466 0.000011 5.924187
H -5.924179 9.586482 0.000006
H -2.972172 9.665973 0.000007
H 0.000003 9.696364 0.000008
H 2.972183 9.665974 0.000008
H 5.924181 9.586480 0.000005
H -5.924189 -9.586477 0.000008
H -2.972172 -9.665971 0.000010
H 0.000008 -9.696374 0.000008
H 2.972190 -9.665972 0.000008
H 5.924193 -9.586474 0.000007
H 0.000007 -5.924168 9.586474
H 0.000007 -2.972167 9.665967
H 0.000006 0.000007 9.696360
H 0.000008 2.972186 9.665963
H 0.000006 5.924181 9.586462
H 0.000007 -5.924186 -9.586489
H 0.000007 -2.972170 -9.665982
H 0.000006 0.000009 -9.696379
H 0.000007 2.972190 -9.665979
H 0.000006 5.924201 -9.586480
H 7.237307 -1.500148 6.901190
H 4.847669 -2.996238 7.843968
H 2.428125 -4.472804 8.738149
H 7.237301 1.500165 6.901187
H 4.862151 0.000009 7.909330
H 2.452082 -1.489596 8.829834
H 4.847661 2.996251 7.843964
H 2.452083 1.489612 8.829831
H 2.428122 4.472818 8.738145
H 7.237323 -1.500151 -6.901186
H 4.847685 -2.996244 -7.843971
H 2.428136 -4.472811 -8.738153
H 7.237318 1.500169 -6.901185
H 4.862162 0.000010 -7.909328
H 2.452091 -1.489597 -8.829834
H 4.847679 2.996257 -7.843969
H 2.452080 1.489614 -8.829849
H 2.428132 4.472826 -8.738153
H -7.237293 -1.500149 6.901191
H -4.847661 -2.996241 7.843973
H -2.428114 -4.472804 8.738152
H -7.237288 1.500171 6.901188
H -4.862140 0.000012 7.909329
H -2.452056 -1.489593 8.829846
H -4.847654 2.996255 7.843972
H -2.452058 1.489609 8.829843
H -2.428111 4.472819 8.738150
H -7.237306 -1.500153 -6.901191
H -4.847677 -2.996244 -7.843973
H -2.428122 -4.472813 -8.738157
H -7.237299 1.500176 -6.901188
H -4.862151 0.000012 -7.909331
H -2.452064 -1.489595 -8.829852
H -4.847670 2.996260 -7.843970
H -2.452064 1.489612 -8.829849
H -2.428117 4.472829 -8.738155
H 6.901190 7.237319 -1.500150
H 7.843970 4.847677 -2.996237
H 8.738156 2.428132 -4.472807
H 6.901187 7.237315 1.500164
H 7.909334 4.862159 0.000009
H 8.829843 2.452089 -1.489594
H 7.843962 4.847670 2.996250
H 8.829839 2.452089 1.489612
H 8.738147 2.428123 4.472820
H -6.901186 7.237313 -1.500153
H -7.843972 4.847681 -2.996242
H -8.738156 2.428143 -4.472808
H -6.901182 7.237309 1.500168
H -7.909325 4.862158 0.000009
H -8.829845 2.452079 -1.489595
H -7.843964 4.847675 2.996256
H -8.829842 2.452078 1.489612
H -8.738145 2.428137 4.472818
H 6.901200 -7.237306 -1.500153
H 7.843981 -4.847666 -2.996242
H 8.738162 -2.428114 -4.472812
H 6.901198 -7.237297 1.500171
H 7.909337 -4.862146 0.000011
H 8.829845 -2.452068 -1.489594
H 7.843972 -4.847660 2.996258
H 8.829841 -2.452068 1.489613
H 8.738152 -2.428107 4.472824
H -6.901190 -7.237297 -1.500154
H -7.843979 -4.847668 -2.996248
H -8.738159 -2.428121 -4.472812
H -6.901189 -7.237293 1.500171
H -7.909326 -4.862145 0.000009
H -8.829847 -2.452058 -1.489596
H -7.843972 -4.847664 2.996263
H -8.829843 -2.452057 1.489614
H -8.738148 -2.428115 4.472824
H -1.500153 6.901189 7.237302
H -2.996243 7.843968 4.847670
H -4.472809 8.738156 2.428132
H 1.500162 6.901186 7.237307
H 0.000005 7.909331 4.862151
H -1.489597 8.829847 2.452075
H 2.996247 7.843960 4.847669
H 1.489606 8.829848 2.452078
H 4.472817 8.738150 2.428124
H -1.500152 -6.901177 7.237309
H -2.996247 -7.843968 4.847679
H -4.472811 -8.738149 2.428139
H 1.500163 -6.901178 7.237314
H 0.000006 -7.909318 4.862160
H -1.489601 -8.829840 2.452076
H 2.996251 -7.843960 4.847678
H 1.489612 -8.829840 2.452078
H 4.472822 -8.738143 2.428130
H -1.500158 6.901205 -7.237297
H -2.996246 7.843981 -4.847659
H -4.472815 8.738162 -2.428115
H 1.500171 6.901202 -7.237297
H 0.000007 7.909340 -4.862141
H -1.489596 8.829841 -2.452068
H 2.996253 7.843975 -4.847660
H 1.489606 8.829841 -2.452069
H 4.472822 8.738158 -2.428111
H -1.500158 -6.901191 -7.237303
H -2.996251 -7.843978 -4.847665
H -4.472815 -8.738153 -2.428116
H 1.500171 -6.901191 -7.237303
H 0.000007 -7.909327 -4.862149
H -1.489602 -8.829834 -2.452067
H 2.996258 -7.843975 -4.847668
H 1.489613 -8.829834 -2.452069
H 4.472827 -8.738151 -2.428114
H 6.377354 -3.967092 5.457178
H 6.377349 3.967112 5.457173
H 5.457177 -6.377342 3.967111
H 5.457169 6.377356 3.967110
H 3.967106 -5.457157 6.377359
H 3.967106 5.457174 6.377352
H 6.377366 -3.967096 -5.457166
H 6.377363 3.967116 -5.457163
H 5.457183 -6.377353 -3.967095
H 5.457176 6.377363 -3.967098
H 3.967113 -5.457167 -6.377350
H 3.967114 5.457185 -6.377343
H -6.377340 -3.967091 5.457179
H -6.377336 3.967112 5.457174
H -5.457166 -6.377340 3.967111
H -5.457161 6.377353 3.967111
H -3.967095 -5.457156 6.377359
H -3.967095 5.457171 6.377352
H -6.377355 -3.967095 -5.457168
H -6.377350 3.967118 -5.457163
H -5.457171 -6.377349 -3.967099
H -5.457166 6.377363 -3.967100
H -3.967102 -5.457168 -6.377348
H -3.967101 5.457185 -6.377342
H 12.038641 0.000003 -7.440548
H 12.099988 0.000003 -4.483580
H 12.144240 0.000006 -1.497071
H 12.144233 0.000006 1.497083
H 12.099971 0.000003 4.483587
H 12.038623 0.000004 7.440541
H -12.038651 -0.000000 -7.440559
H -12.099994 0.000006 -4.483580
H -12.144253 0.000006 -1.497073
H -12.144248 0.000005 1.497083
H -12.099982 0.000005 4.483584
H -12.038629 0.000000 7.440551
H -7.440551 12.038630 0.000002
H -4.483583 12.099975 0.000003
H -1.497079 12.144221 0.000004
H 1.497075 12.144221 0.000005
H 4.483580 12.099968 0.000003
H 7.440535 12.038629 0.000002
H -7.440565 -12.038641 0.000002
H -4.483589 -12.099999 0.000005
H -1.497078 -12.144261 0.000007
H 1.497079 -12.144262 0.000005
H 4.483587 -12.099997 0.000004
H 7.440549 -12.038645 0.000001
H -0.000009 -7.440541 12.038631
H 0.000001 -4.483578 12.099976
H 0.000001 -1.497071 12.144229
H 0.000001 1.497078 12.144221
H 0.000002 4.483585 12.099964
H -0.000007 7.440539 12.038614
H -0.000008 -7.440559 -12.038653
H 0.000002 -4.483583 -12.100005
H 0.000001 -1.497073 -12.144265
H 0.000002 1.497082 -12.144259
H 0.000001 4.483590 -12.099993
H -0.000006 7.440555 -12.038641
H 9.677193 -1.496906 8.403454
H 7.283257 -3.003960 9.361664
H 4.860366 -4.500621 10.287633
H 2.422429 -5.980893 11.174337
H 9.677186 1.496910 8.403449
H 7.301204 0.000004 9.440827
H 4.899855 -1.498367 10.403244
H 2.464544 -2.988767 11.288143
H 7.283251 3.003967 9.361652
H 4.899853 1.498377 10.403239
H 2.475659 0.000006 11.329680
H 4.860357 4.500623 10.287616
H 2.464540 2.988773 11.288133
H 2.422424 5.980895 11.174321
H 9.677209 -1.496912 -8.403465
H 7.283277 -3.003970 -9.361682
H 4.860384 -4.500636 -10.287647
H 2.422443 -5.980905 -11.174358
H 9.677204 1.496921 -8.403460
H 7.301220 0.000005 -9.440842
H 4.899871 -1.498371 -10.403268
H 2.464557 -2.988775 -11.288169
H 7.283270 3.003976 -9.361672
H 4.899868 1.498380 -10.403263
H 2.475670 0.000005 -11.329709
H 4.860376 4.500637 -10.287640
H 2.464554 2.988779 -11.288159
H 2.422434 5.980909 -11.174343
H -9.677190 -1.496908 8.403467
H -7.283257 -3.003963 9.361672
H -4.860371 -4.500629 10.287635
H -2.422430 -5.980892 11.174337
H -9.677187 1.496917 8.403462
H -7.301202 0.000005 9.440837
H -4.899849 -1.498370 10.403248
H -2.464542 -2.988769 11.288143
H -7.283250 3.003968 9.361662
H -4.899846 1.498382 10.403245
H -2.475653 0.000007 11.329681
H -4.860362 4.500633 10.287620
H -2.464539 2.988776 11.288131
H -2.422424 5.980896 11.174322
H -9.677207 -1.496913 -8.403482
H -7.283279 -3.003972 -9.361692
H -4.860387 -4.500634 -10.287661
H -2.422442 -5.980906 -11.174359
H -9.677203 1.496924 -8.403476
H -7.301216 0.000005 -9.440853
H -4.899865 -1.498371 -10.403273
H -2.464552 -2.988774 -11.288171
H -7.283271 3.003977 -9.361683
H -4.899862 1.498381 -10.403269
H -2.475664 0.000006 -11.329711
H -4.860376 4.500638 -10.287646
H -2.464549 2.988781 -11.288160
H -2.422433 5.980910 -11.174344
H 8.403451 9.677198 -1.496905
H 9.361660 7.283263 -3.003959
H 10.287632 4.860371 -4.500620
H 11.174342 2.422434 -5.980899
H 8.403446 9.677194 1.496911
H 9.440827 7.301209 0.000005
H 10.403246 4.899862 -1.498367
H 11.288149 2.464551 -2.988769
H 9.361650 7.283254 3.003966
H 10.403241 4.899860 1.498377
H 11.329691 2.475665 0.000007
H 10.287617 4.860361 4.500626
H 11.288137 2.464548 2.988779
H 11.174326 2.422424 5.980898
H -8.403460 9.677196 -1.496910
H -9.361670 7.283266 -3.003964
H -10.287643 4.860386 -4.500636
H -11.174348 2.422445 -5.980904
H -8.403456 9.677194 1.496915
H -9.440835 7.301210 0.000006
H -10.403255 4.899863 -1.498369
H -11.288154 2.464562 -2.988776
H -9.361661 7.283260 3.003973
H -10.403249 4.899861 1.498381
H -11.329693 2.475669 0.000006
H -10.287628 4.860378 4.500640
H -11.288144 2.464559 2.988785
H -11.174330 2.422439 5.980903
H 8.403472 -9.677206 -1.496911
H 9.361686 -7.283270 -3.003967
H 10.287643 -4.860371 -4.500629
H 11.174350 -2.422427 -5.980904
H 8.403468 -9.677203 1.496918
H 9.440847 -7.301215 0.000005
H 10.403263 -4.899857 -1.498370
H 11.288155 -2.464540 -2.988768
H 9.361677 -7.283262 3.003974
H 10.403258 -4.899854 1.498381
H 11.329699 -2.475655 0.000006
H 10.287637 -4.860362 4.500633
H 11.288145 -2.464540 2.988780
H 11.174334 -2.422420 5.980905
H -8.403482 -9.677201 -1.496913
H -9.361695 -7.283272 -3.003971
H -10.287648 -4.860385 -4.500644
H -11.174357 -2.422436 -5.980909
H -8.403479 -9.677198 1.496923
H -9.440853 -7.301212 0.000006
H -10.403270 -4.899858 -1.498373
H -11.288161 -2.464554 -2.988778
H -9.361685 -7.283267 3.003981
H -10.403265 -4.899857 1.498385
H -11.329701 -2.475659 0.000006
H -10.287644 -4.860378 4.500647
H -11.288152 -2.464553 2.988786
H -11.174339 -2.422432 5.980907
H -1.496905 8.403449 9.677180
H -3.003957 9.361655 7.283252
H -4.500628 10.287621 4.860368
H -5.980895 11.174331 2.422431
H 1.496909 8.403447 9.677182
H 0.000004 9.440823 7.301201
H -1.498372 10.403237 4.899852
H -2.988773 11.288135 2.464550
H 3.003964 9.361649 7.283252
H 1.498374 10.403237 4.899852
H 0.000002 11.329673 2.475664
H 4.500620 10.287615 4.860360
H 2.988773 11.288132 2.464547
H 5.980894 11.174325 2.422427
H -1.496909 -8.403459 9.677196
H -3.003968 -9.361674 7.283271
H -4.500641 -10.287646 4.860388
H -5.980907 -11.174350 2.422445
H 1.496913 -8.403457 9.677200
H 0.000003 -9.440837 7.301216
H -1.498376 -10.403259 4.899869
H -2.988779 -11.288160 2.464559
H 3.003975 -9.361670 7.283272
H 1.498380 -10.403259 4.899869
H 0.000003 -11.329705 2.475670
H 4.500634 -10.287640 4.860379
H 2.988780 -11.288157 2.464554
H 5.980909 -11.174345 2.422438
H -1.496912 8.403471 -9.677194
H -3.003965 9.361677 -7.283259
H -4.500629 10.287638 -4.860363
H -5.980898 11.174338 -2.422428
H 1.496914 8.403469 -9.677196
H 0.000004 9.440843 -7.301203
H -1.498370 10.403254 -4.899850
H -2.988770 11.288143 -2.464539
H 3.003972 9.361671 -7.283257
H 1.498377 10.403253 -4.899848
H 0.000002 11.329681 -2.475654
H 4.500628 10.287631 -4.860359
H 2.988772 11.288140 -2.464537
H 5.980897 11.174332 -2.422423
H -1.496916 -8.403481 -9.677208
H -3.003978 -9.361696 -7.283276
H -4.500640 -10.287662 -4.860381
H -5.980909 -11.174356 -2.422438
H 1.496920 -8.403479 -9.677211
H 0.000003 -9.440857 -7.301218
H -1.498376 -10.403275 -4.899866
H -2.988776 -11.288169 -2.464544
H 3.003983 -9.361693 -7.283276
H 1.498382 -10.403275 -4.899865
H 0.000004 -11.329712 -2.475660
H 4.500643 -10.287646 -4.860375
H 2.988780 -11.288164 -2.464543
H 5.980912 -11.174352 -2.422432
H 8.823514 -3.987216 6.975976
H 8.823502 3.987220 6.975966
H 7.928324 -6.428771 5.503194
H 7.928307 6.428772 5.503181
H 6.975982 -8.823518 3.987231
H 6.975962 8.823505 3.987222
H 6.428780 -5.503180 7.928320
H 6.428768 5.503182 7.928308
H 5.503192 -7.928317 6.428783
H 5.503180 7.928307 6.428768
H 3.987227 -6.975968 8.823516
H 3.987219 6.975963 8.823502
H 8.823531 -3.987227 -6.975977
H 8.823524 3.987231 -6.975969
H 7.928335 -6.428786 -5.503192
H 7.928318 6.428785 -5.503181
H 6.975988 -8.823529 -3.987226
H 6.975969 8.823515 -3.987217
H 6.428797 -5.503193 -7.928331
H 6.428784 5.503196 -7.928316
H 5.503204 -7.928332 -6.428787
H 5.503193 7.928321 -6.428774
H 3.987241 -6.975984 -8.823526
H 3.987231 6.975978 -8.823511
H -8.823510 -3.987222 6.975980
H -8.823504 3.987228 6.975972
H -7.928324 -6.428779 5.503202
H -7.928304 6.428779 5.503191
H -6.975981 -8.823519 3.987237
H -6.975964 8.823508 3.987226
H -6.428779 -5.503186 7.928325
H -6.428768 5.503188 7.928311
H -5.503191 -7.928320 6.428792
H -5.503179 7.928309 6.428775
H -3.987221 -6.975969 8.823517
H -3.987213 6.975965 8.823503
H -8.823529 -3.987231 -6.975982
H -8.823521 3.987238 -6.975974
H -7.928336 -6.428791 -5.503200
H -7.928317 6.428791 -5.503190
H -6.975987 -8.823528 -3.987232
H -6.975972 8.823516 -3.987222
H -6.428795 -5.503199 -7.928335
H -6.428782 5.503201 -7.928321
H -5.503203 -7.928336 -6.428794
H -5.503191 7.928325 -6.428778
H -3.987236 -6.975985 -8.823528
H -3.987226 6.975980 -8.823514
"""
fname = 'ag_s7l7_wonatoms.xyz'
fp = open(fname, 'w')
fp.write(ag_s7l7_wonatoms)
fp.close()
mol = gto.M(verbose=1, atom=open(fname).read())
class KnowValues(unittest.TestCase):
def test_ls_contributing(self):
""" To test the list of contributing centers """
sv = nao(gto=mol)
pb = prod_basis()
pb.sv = sv
pb.sv.ao_log.sp2rcut[0] = 10.0
pb.prod_log = sv.ao_log
pb.prod_log.sp2rcut[0] = 10.0
pb.ac_rcut = max(sv.ao_log.sp2rcut)
pb.ac_npc_max = 10
lsc = pb.ls_contributing(0, 1)
self.assertEqual(len(lsc), 10)
lsref = [0, 1, 13, 7, 5, 43, 42, 39, 38, 10]
for i, ref in enumerate(lsref):
self.assertEqual(lsc[i], ref)
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import unittest
from pyscf import gto
import os
from pyscf.nao import nao, prod_basis
ag_s7l7_wonatoms = """
H 2.346340 -0.000093 -1.449987
H 2.346702 -0.000095 1.450132
H -2.345370 -0.000086 -1.449228
H -2.345734 -0.000089 1.449376
H -1.449887 2.346112 -0.000046
H 1.450134 2.346853 -0.000044
H -1.449224 -2.345222 -0.000041
H 1.449464 -2.345958 -0.000038
H -0.000112 -1.449738 2.345957
H -0.000111 1.449980 2.346608
H -0.000111 -1.449377 -2.345464
H -0.000107 1.449607 -2.346103
H 4.731536 -0.000009 -2.923633
H 4.794344 0.000006 0.000053
H 4.731450 -0.000009 2.923590
H -4.731847 -0.000004 -2.923807
H -4.794483 0.000008 0.000053
H -4.731757 -0.000006 2.923758
H -2.923688 4.731598 0.000002
H 0.000077 4.794367 0.000013
H 2.923553 4.731432 0.000002
H -2.923845 -4.731869 0.000004
H 0.000084 -4.794470 0.000009
H 2.923708 -4.731700 0.000004
H -0.000016 -2.923710 4.731655
H -0.000002 0.000081 4.794386
H -0.000017 2.923594 4.731497
H -0.000016 -2.923799 -4.731798
H -0.000002 0.000083 -4.794441
H -0.000018 2.923687 -4.731644
H 2.396856 -1.481019 3.878620
H 2.396773 1.481058 3.878614
H 2.396905 -1.481021 -3.878636
H 2.396824 1.481062 -3.878634
H -2.396782 -1.481108 3.878712
H -2.396699 1.481149 3.878709
H -2.396832 -1.481114 -3.878735
H -2.396748 1.481155 -3.878730
H 3.878596 2.396822 -1.481024
H 3.878589 2.396778 1.481062
H -3.878672 2.396916 -1.481031
H -3.878666 2.396868 1.481064
H 3.878682 -2.396737 -1.481107
H 3.878676 -2.396695 1.481146
H -3.878757 -2.396826 -1.481115
H -3.878754 -2.396779 1.481148
H -1.481047 3.878627 2.396831
H 1.481072 3.878617 2.396742
H -1.481055 -3.878680 2.396921
H 1.481078 -3.878674 2.396834
H -1.481096 3.878678 -2.396773
H 1.481119 3.878671 -2.396685
H -1.481102 -3.878731 -2.396860
H 1.481126 -3.878722 -2.396772
H 7.150331 0.000013 -4.418604
H 7.225782 0.000009 -1.477531
H 7.225777 0.000009 1.477551
H 7.150346 0.000010 4.418636
H -7.150239 0.000015 -4.418552
H -7.225701 0.000010 -1.477539
H -7.225697 0.000009 1.477559
H -7.150257 0.000015 4.418586
H -4.418596 7.150312 0.000012
H -1.477538 7.225777 0.000010
H 1.477536 7.225775 0.000011
H 4.418635 7.150362 0.000012
H -4.418553 -7.150222 0.000020
H -1.477554 -7.225705 0.000012
H 1.477559 -7.225701 0.000011
H 4.418598 -7.150270 0.000013
H 0.000008 -4.418580 7.150295
H 0.000006 -1.477536 7.225760
H 0.000007 1.477549 7.225757
H 0.000007 4.418626 7.150335
H 0.000008 -4.418561 -7.150247
H 0.000006 -1.477545 -7.225726
H 0.000006 1.477561 -7.225723
H 0.000007 4.418613 -7.150287
H 4.808303 -1.493555 5.388587
H 2.417464 -2.971656 6.301956
H 4.808308 1.493581 5.388605
H 2.431587 0.000014 6.366095
H 2.417478 2.971674 6.301966
H 4.808303 -1.493546 -5.388552
H 2.417452 -2.971655 -6.301934
H 4.808310 1.493573 -5.388572
H 2.431585 0.000016 -6.366071
H 2.417464 2.971677 -6.301941
H -4.808288 -1.493538 5.388559
H -2.417439 -2.971638 6.301924
H -4.808292 1.493569 5.388578
H -2.431572 0.000013 6.366082
H -2.417452 2.971656 6.301933
H -4.808287 -1.493528 -5.388525
H -2.417427 -2.971639 -6.301899
H -4.808292 1.493561 -5.388546
H -2.431572 0.000014 -6.366056
H -2.417439 2.971659 -6.301909
H 5.388603 4.808319 -1.493559
H 6.301970 2.417487 -2.971653
H 5.388608 4.808321 1.493584
H 6.366098 2.431602 0.000014
H 6.301967 2.417490 2.971675
H -5.388548 4.808294 -1.493543
H -6.301922 2.417455 -2.971644
H -5.388553 4.808296 1.493566
H -6.366058 2.431589 0.000013
H -6.301920 2.417459 2.971662
H 5.388578 -4.808301 -1.493544
H 6.301948 -2.417448 -2.971646
H 5.388584 -4.808302 1.493574
H 6.366092 -2.431572 0.000013
H 6.301945 -2.417454 2.971667
H -5.388520 -4.808272 -1.493529
H -6.301896 -2.417412 -2.971637
H -5.388529 -4.808274 1.493557
H -6.366050 -2.431556 0.000012
H -6.301896 -2.417417 2.971658
H -1.493562 5.388587 4.808302
H -2.971658 6.301956 2.417476
H 1.493576 5.388607 4.808320
H 0.000001 6.366100 2.431597
H 2.971666 6.301972 2.417493
H -1.493543 -5.388526 4.808289
H -2.971649 -6.301906 2.417444
H 1.493555 -5.388547 4.808306
H 0.000002 -6.366051 2.431589
H 2.971661 -6.301921 2.417458
H -1.493560 5.388586 -4.808287
H -2.971654 6.301946 -2.417449
H 1.493572 5.388604 -4.808304
H -0.000002 6.366099 -2.431566
H 2.971663 6.301961 -2.417463
H -1.493541 -5.388524 -4.808272
H -2.971647 -6.301895 -2.417411
H 1.493554 -5.388544 -4.808291
H 0.000005 -6.366052 -2.431562
H 2.971660 -6.301911 -2.417425
H 3.933950 -3.933932 3.933958
H 3.933959 3.933967 3.933963
H 3.933948 -3.933931 -3.933931
H 3.933957 3.933967 -3.933938
H -3.933922 -3.933912 3.933940
H -3.933929 3.933948 3.933945
H -3.933921 -3.933910 -3.933915
H -3.933929 3.933948 -3.933922
H 9.586490 0.000010 -5.924178
H 9.665986 0.000008 -2.972164
H 9.696371 0.000007 0.000010
H 9.665971 0.000008 2.972190
H 9.586467 0.000009 5.924179
H -9.586484 0.000013 -5.924188
H -9.665980 0.000009 -2.972165
H -9.696370 0.000008 0.000012
H -9.665971 0.000008 2.972185
H -9.586466 0.000011 5.924187
H -5.924179 9.586482 0.000006
H -2.972172 9.665973 0.000007
H 0.000003 9.696364 0.000008
H 2.972183 9.665974 0.000008
H 5.924181 9.586480 0.000005
H -5.924189 -9.586477 0.000008
H -2.972172 -9.665971 0.000010
H 0.000008 -9.696374 0.000008
H 2.972190 -9.665972 0.000008
H 5.924193 -9.586474 0.000007
H 0.000007 -5.924168 9.586474
H 0.000007 -2.972167 9.665967
H 0.000006 0.000007 9.696360
H 0.000008 2.972186 9.665963
H 0.000006 5.924181 9.586462
H 0.000007 -5.924186 -9.586489
H 0.000007 -2.972170 -9.665982
H 0.000006 0.000009 -9.696379
H 0.000007 2.972190 -9.665979
H 0.000006 5.924201 -9.586480
H 7.237307 -1.500148 6.901190
H 4.847669 -2.996238 7.843968
H 2.428125 -4.472804 8.738149
H 7.237301 1.500165 6.901187
H 4.862151 0.000009 7.909330
H 2.452082 -1.489596 8.829834
H 4.847661 2.996251 7.843964
H 2.452083 1.489612 8.829831
H 2.428122 4.472818 8.738145
H 7.237323 -1.500151 -6.901186
H 4.847685 -2.996244 -7.843971
H 2.428136 -4.472811 -8.738153
H 7.237318 1.500169 -6.901185
H 4.862162 0.000010 -7.909328
H 2.452091 -1.489597 -8.829834
H 4.847679 2.996257 -7.843969
H 2.452080 1.489614 -8.829849
H 2.428132 4.472826 -8.738153
H -7.237293 -1.500149 6.901191
H -4.847661 -2.996241 7.843973
H -2.428114 -4.472804 8.738152
H -7.237288 1.500171 6.901188
H -4.862140 0.000012 7.909329
H -2.452056 -1.489593 8.829846
H -4.847654 2.996255 7.843972
H -2.452058 1.489609 8.829843
H -2.428111 4.472819 8.738150
H -7.237306 -1.500153 -6.901191
H -4.847677 -2.996244 -7.843973
H -2.428122 -4.472813 -8.738157
H -7.237299 1.500176 -6.901188
H -4.862151 0.000012 -7.909331
H -2.452064 -1.489595 -8.829852
H -4.847670 2.996260 -7.843970
H -2.452064 1.489612 -8.829849
H -2.428117 4.472829 -8.738155
H 6.901190 7.237319 -1.500150
H 7.843970 4.847677 -2.996237
H 8.738156 2.428132 -4.472807
H 6.901187 7.237315 1.500164
H 7.909334 4.862159 0.000009
H 8.829843 2.452089 -1.489594
H 7.843962 4.847670 2.996250
H 8.829839 2.452089 1.489612
H 8.738147 2.428123 4.472820
H -6.901186 7.237313 -1.500153
H -7.843972 4.847681 -2.996242
H -8.738156 2.428143 -4.472808
H -6.901182 7.237309 1.500168
H -7.909325 4.862158 0.000009
H -8.829845 2.452079 -1.489595
H -7.843964 4.847675 2.996256
H -8.829842 2.452078 1.489612
H -8.738145 2.428137 4.472818
H 6.901200 -7.237306 -1.500153
H 7.843981 -4.847666 -2.996242
H 8.738162 -2.428114 -4.472812
H 6.901198 -7.237297 1.500171
H 7.909337 -4.862146 0.000011
H 8.829845 -2.452068 -1.489594
H 7.843972 -4.847660 2.996258
H 8.829841 -2.452068 1.489613
H 8.738152 -2.428107 4.472824
H -6.901190 -7.237297 -1.500154
H -7.843979 -4.847668 -2.996248
H -8.738159 -2.428121 -4.472812
H -6.901189 -7.237293 1.500171
H -7.909326 -4.862145 0.000009
H -8.829847 -2.452058 -1.489596
H -7.843972 -4.847664 2.996263
H -8.829843 -2.452057 1.489614
H -8.738148 -2.428115 4.472824
H -1.500153 6.901189 7.237302
H -2.996243 7.843968 4.847670
H -4.472809 8.738156 2.428132
H 1.500162 6.901186 7.237307
H 0.000005 7.909331 4.862151
H -1.489597 8.829847 2.452075
H 2.996247 7.843960 4.847669
H 1.489606 8.829848 2.452078
H 4.472817 8.738150 2.428124
H -1.500152 -6.901177 7.237309
H -2.996247 -7.843968 4.847679
H -4.472811 -8.738149 2.428139
H 1.500163 -6.901178 7.237314
H 0.000006 -7.909318 4.862160
H -1.489601 -8.829840 2.452076
H 2.996251 -7.843960 4.847678
H 1.489612 -8.829840 2.452078
H 4.472822 -8.738143 2.428130
H -1.500158 6.901205 -7.237297
H -2.996246 7.843981 -4.847659
H -4.472815 8.738162 -2.428115
H 1.500171 6.901202 -7.237297
H 0.000007 7.909340 -4.862141
H -1.489596 8.829841 -2.452068
H 2.996253 7.843975 -4.847660
H 1.489606 8.829841 -2.452069
H 4.472822 8.738158 -2.428111
H -1.500158 -6.901191 -7.237303
H -2.996251 -7.843978 -4.847665
H -4.472815 -8.738153 -2.428116
H 1.500171 -6.901191 -7.237303
H 0.000007 -7.909327 -4.862149
H -1.489602 -8.829834 -2.452067
H 2.996258 -7.843975 -4.847668
H 1.489613 -8.829834 -2.452069
H 4.472827 -8.738151 -2.428114
H 6.377354 -3.967092 5.457178
H 6.377349 3.967112 5.457173
H 5.457177 -6.377342 3.967111
H 5.457169 6.377356 3.967110
H 3.967106 -5.457157 6.377359
H 3.967106 5.457174 6.377352
H 6.377366 -3.967096 -5.457166
H 6.377363 3.967116 -5.457163
H 5.457183 -6.377353 -3.967095
H 5.457176 6.377363 -3.967098
H 3.967113 -5.457167 -6.377350
H 3.967114 5.457185 -6.377343
H -6.377340 -3.967091 5.457179
H -6.377336 3.967112 5.457174
H -5.457166 -6.377340 3.967111
H -5.457161 6.377353 3.967111
H -3.967095 -5.457156 6.377359
H -3.967095 5.457171 6.377352
H -6.377355 -3.967095 -5.457168
H -6.377350 3.967118 -5.457163
H -5.457171 -6.377349 -3.967099
H -5.457166 6.377363 -3.967100
H -3.967102 -5.457168 -6.377348
H -3.967101 5.457185 -6.377342
H 12.038641 0.000003 -7.440548
H 12.099988 0.000003 -4.483580
H 12.144240 0.000006 -1.497071
H 12.144233 0.000006 1.497083
H 12.099971 0.000003 4.483587
H 12.038623 0.000004 7.440541
H -12.038651 -0.000000 -7.440559
H -12.099994 0.000006 -4.483580
H -12.144253 0.000006 -1.497073
H -12.144248 0.000005 1.497083
H -12.099982 0.000005 4.483584
H -12.038629 0.000000 7.440551
H -7.440551 12.038630 0.000002
H -4.483583 12.099975 0.000003
H -1.497079 12.144221 0.000004
H 1.497075 12.144221 0.000005
H 4.483580 12.099968 0.000003
H 7.440535 12.038629 0.000002
H -7.440565 -12.038641 0.000002
H -4.483589 -12.099999 0.000005
H -1.497078 -12.144261 0.000007
H 1.497079 -12.144262 0.000005
H 4.483587 -12.099997 0.000004
H 7.440549 -12.038645 0.000001
H -0.000009 -7.440541 12.038631
H 0.000001 -4.483578 12.099976
H 0.000001 -1.497071 12.144229
H 0.000001 1.497078 12.144221
H 0.000002 4.483585 12.099964
H -0.000007 7.440539 12.038614
H -0.000008 -7.440559 -12.038653
H 0.000002 -4.483583 -12.100005
H 0.000001 -1.497073 -12.144265
H 0.000002 1.497082 -12.144259
H 0.000001 4.483590 -12.099993
H -0.000006 7.440555 -12.038641
H 9.677193 -1.496906 8.403454
H 7.283257 -3.003960 9.361664
H 4.860366 -4.500621 10.287633
H 2.422429 -5.980893 11.174337
H 9.677186 1.496910 8.403449
H 7.301204 0.000004 9.440827
H 4.899855 -1.498367 10.403244
H 2.464544 -2.988767 11.288143
H 7.283251 3.003967 9.361652
H 4.899853 1.498377 10.403239
H 2.475659 0.000006 11.329680
H 4.860357 4.500623 10.287616
H 2.464540 2.988773 11.288133
H 2.422424 5.980895 11.174321
H 9.677209 -1.496912 -8.403465
H 7.283277 -3.003970 -9.361682
H 4.860384 -4.500636 -10.287647
H 2.422443 -5.980905 -11.174358
H 9.677204 1.496921 -8.403460
H 7.301220 0.000005 -9.440842
H 4.899871 -1.498371 -10.403268
H 2.464557 -2.988775 -11.288169
H 7.283270 3.003976 -9.361672
H 4.899868 1.498380 -10.403263
H 2.475670 0.000005 -11.329709
H 4.860376 4.500637 -10.287640
H 2.464554 2.988779 -11.288159
H 2.422434 5.980909 -11.174343
H -9.677190 -1.496908 8.403467
H -7.283257 -3.003963 9.361672
H -4.860371 -4.500629 10.287635
H -2.422430 -5.980892 11.174337
H -9.677187 1.496917 8.403462
H -7.301202 0.000005 9.440837
H -4.899849 -1.498370 10.403248
H -2.464542 -2.988769 11.288143
H -7.283250 3.003968 9.361662
H -4.899846 1.498382 10.403245
H -2.475653 0.000007 11.329681
H -4.860362 4.500633 10.287620
H -2.464539 2.988776 11.288131
H -2.422424 5.980896 11.174322
H -9.677207 -1.496913 -8.403482
H -7.283279 -3.003972 -9.361692
H -4.860387 -4.500634 -10.287661
H -2.422442 -5.980906 -11.174359
H -9.677203 1.496924 -8.403476
H -7.301216 0.000005 -9.440853
H -4.899865 -1.498371 -10.403273
H -2.464552 -2.988774 -11.288171
H -7.283271 3.003977 -9.361683
H -4.899862 1.498381 -10.403269
H -2.475664 0.000006 -11.329711
H -4.860376 4.500638 -10.287646
H -2.464549 2.988781 -11.288160
H -2.422433 5.980910 -11.174344
H 8.403451 9.677198 -1.496905
H 9.361660 7.283263 -3.003959
H 10.287632 4.860371 -4.500620
H 11.174342 2.422434 -5.980899
H 8.403446 9.677194 1.496911
H 9.440827 7.301209 0.000005
H 10.403246 4.899862 -1.498367
H 11.288149 2.464551 -2.988769
H 9.361650 7.283254 3.003966
H 10.403241 4.899860 1.498377
H 11.329691 2.475665 0.000007
H 10.287617 4.860361 4.500626
H 11.288137 2.464548 2.988779
H 11.174326 2.422424 5.980898
H -8.403460 9.677196 -1.496910
H -9.361670 7.283266 -3.003964
H -10.287643 4.860386 -4.500636
H -11.174348 2.422445 -5.980904
H -8.403456 9.677194 1.496915
H -9.440835 7.301210 0.000006
H -10.403255 4.899863 -1.498369
H -11.288154 2.464562 -2.988776
H -9.361661 7.283260 3.003973
H -10.403249 4.899861 1.498381
H -11.329693 2.475669 0.000006
H -10.287628 4.860378 4.500640
H -11.288144 2.464559 2.988785
H -11.174330 2.422439 5.980903
H 8.403472 -9.677206 -1.496911
H 9.361686 -7.283270 -3.003967
H 10.287643 -4.860371 -4.500629
H 11.174350 -2.422427 -5.980904
H 8.403468 -9.677203 1.496918
H 9.440847 -7.301215 0.000005
H 10.403263 -4.899857 -1.498370
H 11.288155 -2.464540 -2.988768
H 9.361677 -7.283262 3.003974
H 10.403258 -4.899854 1.498381
H 11.329699 -2.475655 0.000006
H 10.287637 -4.860362 4.500633
H 11.288145 -2.464540 2.988780
H 11.174334 -2.422420 5.980905
H -8.403482 -9.677201 -1.496913
H -9.361695 -7.283272 -3.003971
H -10.287648 -4.860385 -4.500644
H -11.174357 -2.422436 -5.980909
H -8.403479 -9.677198 1.496923
H -9.440853 -7.301212 0.000006
H -10.403270 -4.899858 -1.498373
H -11.288161 -2.464554 -2.988778
H -9.361685 -7.283267 3.003981
H -10.403265 -4.899857 1.498385
H -11.329701 -2.475659 0.000006
H -10.287644 -4.860378 4.500647
H -11.288152 -2.464553 2.988786
H -11.174339 -2.422432 5.980907
H -1.496905 8.403449 9.677180
H -3.003957 9.361655 7.283252
H -4.500628 10.287621 4.860368
H -5.980895 11.174331 2.422431
H 1.496909 8.403447 9.677182
H 0.000004 9.440823 7.301201
H -1.498372 10.403237 4.899852
H -2.988773 11.288135 2.464550
H 3.003964 9.361649 7.283252
H 1.498374 10.403237 4.899852
H 0.000002 11.329673 2.475664
H 4.500620 10.287615 4.860360
H 2.988773 11.288132 2.464547
H 5.980894 11.174325 2.422427
H -1.496909 -8.403459 9.677196
H -3.003968 -9.361674 7.283271
H -4.500641 -10.287646 4.860388
H -5.980907 -11.174350 2.422445
H 1.496913 -8.403457 9.677200
H 0.000003 -9.440837 7.301216
H -1.498376 -10.403259 4.899869
H -2.988779 -11.288160 2.464559
H 3.003975 -9.361670 7.283272
H 1.498380 -10.403259 4.899869
H 0.000003 -11.329705 2.475670
H 4.500634 -10.287640 4.860379
H 2.988780 -11.288157 2.464554
H 5.980909 -11.174345 2.422438
H -1.496912 8.403471 -9.677194
H -3.003965 9.361677 -7.283259
H -4.500629 10.287638 -4.860363
H -5.980898 11.174338 -2.422428
H 1.496914 8.403469 -9.677196
H 0.000004 9.440843 -7.301203
H -1.498370 10.403254 -4.899850
H -2.988770 11.288143 -2.464539
H 3.003972 9.361671 -7.283257
H 1.498377 10.403253 -4.899848
H 0.000002 11.329681 -2.475654
H 4.500628 10.287631 -4.860359
H 2.988772 11.288140 -2.464537
H 5.980897 11.174332 -2.422423
H -1.496916 -8.403481 -9.677208
H -3.003978 -9.361696 -7.283276
H -4.500640 -10.287662 -4.860381
H -5.980909 -11.174356 -2.422438
H 1.496920 -8.403479 -9.677211
H 0.000003 -9.440857 -7.301218
H -1.498376 -10.403275 -4.899866
H -2.988776 -11.288169 -2.464544
H 3.003983 -9.361693 -7.283276
H 1.498382 -10.403275 -4.899865
H 0.000004 -11.329712 -2.475660
H 4.500643 -10.287646 -4.860375
H 2.988780 -11.288164 -2.464543
H 5.980912 -11.174352 -2.422432
H 8.823514 -3.987216 6.975976
H 8.823502 3.987220 6.975966
H 7.928324 -6.428771 5.503194
H 7.928307 6.428772 5.503181
H 6.975982 -8.823518 3.987231
H 6.975962 8.823505 3.987222
H 6.428780 -5.503180 7.928320
H 6.428768 5.503182 7.928308
H 5.503192 -7.928317 6.428783
H 5.503180 7.928307 6.428768
H 3.987227 -6.975968 8.823516
H 3.987219 6.975963 8.823502
H 8.823531 -3.987227 -6.975977
H 8.823524 3.987231 -6.975969
H 7.928335 -6.428786 -5.503192
H 7.928318 6.428785 -5.503181
H 6.975988 -8.823529 -3.987226
H 6.975969 8.823515 -3.987217
H 6.428797 -5.503193 -7.928331
H 6.428784 5.503196 -7.928316
H 5.503204 -7.928332 -6.428787
H 5.503193 7.928321 -6.428774
H 3.987241 -6.975984 -8.823526
H 3.987231 6.975978 -8.823511
H -8.823510 -3.987222 6.975980
H -8.823504 3.987228 6.975972
H -7.928324 -6.428779 5.503202
H -7.928304 6.428779 5.503191
H -6.975981 -8.823519 3.987237
H -6.975964 8.823508 3.987226
H -6.428779 -5.503186 7.928325
H -6.428768 5.503188 7.928311
H -5.503191 -7.928320 6.428792
H -5.503179 7.928309 6.428775
H -3.987221 -6.975969 8.823517
H -3.987213 6.975965 8.823503
H -8.823529 -3.987231 -6.975982
H -8.823521 3.987238 -6.975974
H -7.928336 -6.428791 -5.503200
H -7.928317 6.428791 -5.503190
H -6.975987 -8.823528 -3.987232
H -6.975972 8.823516 -3.987222
H -6.428795 -5.503199 -7.928335
H -6.428782 5.503201 -7.928321
H -5.503203 -7.928336 -6.428794
H -5.503191 7.928325 -6.428778
H -3.987236 -6.975985 -8.823528
H -3.987226 6.975980 -8.823514
"""
fname = "ag_s7l7_wonatoms.xyz"
fp = open(fname, "w")
fp.write(ag_s7l7_wonatoms)
fp.close()
# this does not work from python interp
#d = os.path.dirname(os.path.abspath(__file__))
mol = gto.M(
verbose = 1,
atom = open(fname).read()
)
class KnowValues(unittest.TestCase):
def test_ls_contributing(self):
""" To test the list of contributing centers """
sv = nao(gto=mol)
pb = prod_basis()
pb.sv = sv
pb.sv.ao_log.sp2rcut[0] = 10.0
pb.prod_log = sv.ao_log
pb.prod_log.sp2rcut[0] = 10.0
pb.ac_rcut = max(sv.ao_log.sp2rcut)
pb.ac_npc_max = 10
lsc = pb.ls_contributing(0,1)
self.assertEqual(len(lsc),10)
lsref = [ 0, 1, 13, 7, 5, 43, 42, 39, 38, 10]
for i,ref in enumerate(lsref) : self.assertEqual(lsc[i],ref)
if __name__ == "__main__": unittest.main()
|
flexible
|
{
"blob_id": "f82ddc34fde76ddfbbe75116526af45b83c1b102",
"index": 7895,
"step-1": "<mask token>\n\n\nclass KnowValues(unittest.TestCase):\n\n def test_ls_contributing(self):\n \"\"\" To test the list of contributing centers \"\"\"\n sv = nao(gto=mol)\n pb = prod_basis()\n pb.sv = sv\n pb.sv.ao_log.sp2rcut[0] = 10.0\n pb.prod_log = sv.ao_log\n pb.prod_log.sp2rcut[0] = 10.0\n pb.ac_rcut = max(sv.ao_log.sp2rcut)\n pb.ac_npc_max = 10\n lsc = pb.ls_contributing(0, 1)\n self.assertEqual(len(lsc), 10)\n lsref = [0, 1, 13, 7, 5, 43, 42, 39, 38, 10]\n for i, ref in enumerate(lsref):\n self.assertEqual(lsc[i], ref)\n\n\n<mask token>\n",
"step-2": "<mask token>\nfp.write(ag_s7l7_wonatoms)\nfp.close()\n<mask token>\n\n\nclass KnowValues(unittest.TestCase):\n\n def test_ls_contributing(self):\n \"\"\" To test the list of contributing centers \"\"\"\n sv = nao(gto=mol)\n pb = prod_basis()\n pb.sv = sv\n pb.sv.ao_log.sp2rcut[0] = 10.0\n pb.prod_log = sv.ao_log\n pb.prod_log.sp2rcut[0] = 10.0\n pb.ac_rcut = max(sv.ao_log.sp2rcut)\n pb.ac_npc_max = 10\n lsc = pb.ls_contributing(0, 1)\n self.assertEqual(len(lsc), 10)\n lsref = [0, 1, 13, 7, 5, 43, 42, 39, 38, 10]\n for i, ref in enumerate(lsref):\n self.assertEqual(lsc[i], ref)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-3": "<mask token>\nag_s7l7_wonatoms = \"\"\"\nH 2.346340 -0.000093 -1.449987\nH 2.346702 -0.000095 1.450132\nH -2.345370 -0.000086 -1.449228\nH -2.345734 -0.000089 1.449376\nH -1.449887 2.346112 -0.000046\nH 1.450134 2.346853 -0.000044\nH -1.449224 -2.345222 -0.000041\nH 1.449464 -2.345958 -0.000038\nH -0.000112 -1.449738 2.345957\nH -0.000111 1.449980 2.346608\nH -0.000111 -1.449377 -2.345464\nH -0.000107 1.449607 -2.346103\nH 4.731536 -0.000009 -2.923633\nH 4.794344 0.000006 0.000053\nH 4.731450 -0.000009 2.923590\nH -4.731847 -0.000004 -2.923807\nH -4.794483 0.000008 0.000053\nH -4.731757 -0.000006 2.923758\nH -2.923688 4.731598 0.000002\nH 0.000077 4.794367 0.000013\nH 2.923553 4.731432 0.000002\nH -2.923845 -4.731869 0.000004\nH 0.000084 -4.794470 0.000009\nH 2.923708 -4.731700 0.000004\nH -0.000016 -2.923710 4.731655\nH -0.000002 0.000081 4.794386\nH -0.000017 2.923594 4.731497\nH -0.000016 -2.923799 -4.731798\nH -0.000002 0.000083 -4.794441\nH -0.000018 2.923687 -4.731644\nH 2.396856 -1.481019 3.878620\nH 2.396773 1.481058 3.878614\nH 2.396905 -1.481021 -3.878636\nH 2.396824 1.481062 -3.878634\nH -2.396782 -1.481108 3.878712\nH -2.396699 1.481149 3.878709\nH -2.396832 -1.481114 -3.878735\nH -2.396748 1.481155 -3.878730\nH 3.878596 2.396822 -1.481024\nH 3.878589 2.396778 1.481062\nH -3.878672 2.396916 -1.481031\nH -3.878666 2.396868 1.481064\nH 3.878682 -2.396737 -1.481107\nH 3.878676 -2.396695 1.481146\nH -3.878757 -2.396826 -1.481115\nH -3.878754 -2.396779 1.481148\nH -1.481047 3.878627 2.396831\nH 1.481072 3.878617 2.396742\nH -1.481055 -3.878680 2.396921\nH 1.481078 -3.878674 2.396834\nH -1.481096 3.878678 -2.396773\nH 1.481119 3.878671 -2.396685\nH -1.481102 -3.878731 -2.396860\nH 1.481126 -3.878722 -2.396772\nH 7.150331 0.000013 -4.418604\nH 7.225782 0.000009 -1.477531\nH 7.225777 0.000009 1.477551\nH 7.150346 0.000010 4.418636\nH -7.150239 0.000015 -4.418552\nH -7.225701 0.000010 -1.477539\nH -7.225697 0.000009 1.477559\nH -7.150257 0.000015 4.418586\nH -4.418596 7.150312 0.000012\nH -1.477538 7.225777 0.000010\nH 1.477536 7.225775 0.000011\nH 4.418635 7.150362 0.000012\nH -4.418553 -7.150222 0.000020\nH -1.477554 -7.225705 0.000012\nH 1.477559 -7.225701 0.000011\nH 4.418598 -7.150270 0.000013\nH 0.000008 -4.418580 7.150295\nH 0.000006 -1.477536 7.225760\nH 0.000007 1.477549 7.225757\nH 0.000007 4.418626 7.150335\nH 0.000008 -4.418561 -7.150247\nH 0.000006 -1.477545 -7.225726\nH 0.000006 1.477561 -7.225723\nH 0.000007 4.418613 -7.150287\nH 4.808303 -1.493555 5.388587\nH 2.417464 -2.971656 6.301956\nH 4.808308 1.493581 5.388605\nH 2.431587 0.000014 6.366095\nH 2.417478 2.971674 6.301966\nH 4.808303 -1.493546 -5.388552\nH 2.417452 -2.971655 -6.301934\nH 4.808310 1.493573 -5.388572\nH 2.431585 0.000016 -6.366071\nH 2.417464 2.971677 -6.301941\nH -4.808288 -1.493538 5.388559\nH -2.417439 -2.971638 6.301924\nH -4.808292 1.493569 5.388578\nH -2.431572 0.000013 6.366082\nH -2.417452 2.971656 6.301933\nH -4.808287 -1.493528 -5.388525\nH -2.417427 -2.971639 -6.301899\nH -4.808292 1.493561 -5.388546\nH -2.431572 0.000014 -6.366056\nH -2.417439 2.971659 -6.301909\nH 5.388603 4.808319 -1.493559\nH 6.301970 2.417487 -2.971653\nH 5.388608 4.808321 1.493584\nH 6.366098 2.431602 0.000014\nH 6.301967 2.417490 2.971675\nH -5.388548 4.808294 -1.493543\nH -6.301922 2.417455 -2.971644\nH -5.388553 4.808296 1.493566\nH -6.366058 2.431589 0.000013\nH -6.301920 2.417459 2.971662\nH 5.388578 -4.808301 -1.493544\nH 6.301948 -2.417448 -2.971646\nH 5.388584 -4.808302 1.493574\nH 6.366092 -2.431572 0.000013\nH 6.301945 -2.417454 2.971667\nH -5.388520 -4.808272 -1.493529\nH -6.301896 -2.417412 -2.971637\nH -5.388529 -4.808274 1.493557\nH -6.366050 -2.431556 0.000012\nH -6.301896 -2.417417 2.971658\nH -1.493562 5.388587 4.808302\nH -2.971658 6.301956 2.417476\nH 1.493576 5.388607 4.808320\nH 0.000001 6.366100 2.431597\nH 2.971666 6.301972 2.417493\nH -1.493543 -5.388526 4.808289\nH -2.971649 -6.301906 2.417444\nH 1.493555 -5.388547 4.808306\nH 0.000002 -6.366051 2.431589\nH 2.971661 -6.301921 2.417458\nH -1.493560 5.388586 -4.808287\nH -2.971654 6.301946 -2.417449\nH 1.493572 5.388604 -4.808304\nH -0.000002 6.366099 -2.431566\nH 2.971663 6.301961 -2.417463\nH -1.493541 -5.388524 -4.808272\nH -2.971647 -6.301895 -2.417411\nH 1.493554 -5.388544 -4.808291\nH 0.000005 -6.366052 -2.431562\nH 2.971660 -6.301911 -2.417425\nH 3.933950 -3.933932 3.933958\nH 3.933959 3.933967 3.933963\nH 3.933948 -3.933931 -3.933931\nH 3.933957 3.933967 -3.933938\nH -3.933922 -3.933912 3.933940\nH -3.933929 3.933948 3.933945\nH -3.933921 -3.933910 -3.933915\nH -3.933929 3.933948 -3.933922\nH 9.586490 0.000010 -5.924178\nH 9.665986 0.000008 -2.972164\nH 9.696371 0.000007 0.000010\nH 9.665971 0.000008 2.972190\nH 9.586467 0.000009 5.924179\nH -9.586484 0.000013 -5.924188\nH -9.665980 0.000009 -2.972165\nH -9.696370 0.000008 0.000012\nH -9.665971 0.000008 2.972185\nH -9.586466 0.000011 5.924187\nH -5.924179 9.586482 0.000006\nH -2.972172 9.665973 0.000007\nH 0.000003 9.696364 0.000008\nH 2.972183 9.665974 0.000008\nH 5.924181 9.586480 0.000005\nH -5.924189 -9.586477 0.000008\nH -2.972172 -9.665971 0.000010\nH 0.000008 -9.696374 0.000008\nH 2.972190 -9.665972 0.000008\nH 5.924193 -9.586474 0.000007\nH 0.000007 -5.924168 9.586474\nH 0.000007 -2.972167 9.665967\nH 0.000006 0.000007 9.696360\nH 0.000008 2.972186 9.665963\nH 0.000006 5.924181 9.586462\nH 0.000007 -5.924186 -9.586489\nH 0.000007 -2.972170 -9.665982\nH 0.000006 0.000009 -9.696379\nH 0.000007 2.972190 -9.665979\nH 0.000006 5.924201 -9.586480\nH 7.237307 -1.500148 6.901190\nH 4.847669 -2.996238 7.843968\nH 2.428125 -4.472804 8.738149\nH 7.237301 1.500165 6.901187\nH 4.862151 0.000009 7.909330\nH 2.452082 -1.489596 8.829834\nH 4.847661 2.996251 7.843964\nH 2.452083 1.489612 8.829831\nH 2.428122 4.472818 8.738145\nH 7.237323 -1.500151 -6.901186\nH 4.847685 -2.996244 -7.843971\nH 2.428136 -4.472811 -8.738153\nH 7.237318 1.500169 -6.901185\nH 4.862162 0.000010 -7.909328\nH 2.452091 -1.489597 -8.829834\nH 4.847679 2.996257 -7.843969\nH 2.452080 1.489614 -8.829849\nH 2.428132 4.472826 -8.738153\nH -7.237293 -1.500149 6.901191\nH -4.847661 -2.996241 7.843973\nH -2.428114 -4.472804 8.738152\nH -7.237288 1.500171 6.901188\nH -4.862140 0.000012 7.909329\nH -2.452056 -1.489593 8.829846\nH -4.847654 2.996255 7.843972\nH -2.452058 1.489609 8.829843\nH -2.428111 4.472819 8.738150\nH -7.237306 -1.500153 -6.901191\nH -4.847677 -2.996244 -7.843973\nH -2.428122 -4.472813 -8.738157\nH -7.237299 1.500176 -6.901188\nH -4.862151 0.000012 -7.909331\nH -2.452064 -1.489595 -8.829852\nH -4.847670 2.996260 -7.843970\nH -2.452064 1.489612 -8.829849\nH -2.428117 4.472829 -8.738155\nH 6.901190 7.237319 -1.500150\nH 7.843970 4.847677 -2.996237\nH 8.738156 2.428132 -4.472807\nH 6.901187 7.237315 1.500164\nH 7.909334 4.862159 0.000009\nH 8.829843 2.452089 -1.489594\nH 7.843962 4.847670 2.996250\nH 8.829839 2.452089 1.489612\nH 8.738147 2.428123 4.472820\nH -6.901186 7.237313 -1.500153\nH -7.843972 4.847681 -2.996242\nH -8.738156 2.428143 -4.472808\nH -6.901182 7.237309 1.500168\nH -7.909325 4.862158 0.000009\nH -8.829845 2.452079 -1.489595\nH -7.843964 4.847675 2.996256\nH -8.829842 2.452078 1.489612\nH -8.738145 2.428137 4.472818\nH 6.901200 -7.237306 -1.500153\nH 7.843981 -4.847666 -2.996242\nH 8.738162 -2.428114 -4.472812\nH 6.901198 -7.237297 1.500171\nH 7.909337 -4.862146 0.000011\nH 8.829845 -2.452068 -1.489594\nH 7.843972 -4.847660 2.996258\nH 8.829841 -2.452068 1.489613\nH 8.738152 -2.428107 4.472824\nH -6.901190 -7.237297 -1.500154\nH -7.843979 -4.847668 -2.996248\nH -8.738159 -2.428121 -4.472812\nH -6.901189 -7.237293 1.500171\nH -7.909326 -4.862145 0.000009\nH -8.829847 -2.452058 -1.489596\nH -7.843972 -4.847664 2.996263\nH -8.829843 -2.452057 1.489614\nH -8.738148 -2.428115 4.472824\nH -1.500153 6.901189 7.237302\nH -2.996243 7.843968 4.847670\nH -4.472809 8.738156 2.428132\nH 1.500162 6.901186 7.237307\nH 0.000005 7.909331 4.862151\nH -1.489597 8.829847 2.452075\nH 2.996247 7.843960 4.847669\nH 1.489606 8.829848 2.452078\nH 4.472817 8.738150 2.428124\nH -1.500152 -6.901177 7.237309\nH -2.996247 -7.843968 4.847679\nH -4.472811 -8.738149 2.428139\nH 1.500163 -6.901178 7.237314\nH 0.000006 -7.909318 4.862160\nH -1.489601 -8.829840 2.452076\nH 2.996251 -7.843960 4.847678\nH 1.489612 -8.829840 2.452078\nH 4.472822 -8.738143 2.428130\nH -1.500158 6.901205 -7.237297\nH -2.996246 7.843981 -4.847659\nH -4.472815 8.738162 -2.428115\nH 1.500171 6.901202 -7.237297\nH 0.000007 7.909340 -4.862141\nH -1.489596 8.829841 -2.452068\nH 2.996253 7.843975 -4.847660\nH 1.489606 8.829841 -2.452069\nH 4.472822 8.738158 -2.428111\nH -1.500158 -6.901191 -7.237303\nH -2.996251 -7.843978 -4.847665\nH -4.472815 -8.738153 -2.428116\nH 1.500171 -6.901191 -7.237303\nH 0.000007 -7.909327 -4.862149\nH -1.489602 -8.829834 -2.452067\nH 2.996258 -7.843975 -4.847668\nH 1.489613 -8.829834 -2.452069\nH 4.472827 -8.738151 -2.428114\nH 6.377354 -3.967092 5.457178\nH 6.377349 3.967112 5.457173\nH 5.457177 -6.377342 3.967111\nH 5.457169 6.377356 3.967110\nH 3.967106 -5.457157 6.377359\nH 3.967106 5.457174 6.377352\nH 6.377366 -3.967096 -5.457166\nH 6.377363 3.967116 -5.457163\nH 5.457183 -6.377353 -3.967095\nH 5.457176 6.377363 -3.967098\nH 3.967113 -5.457167 -6.377350\nH 3.967114 5.457185 -6.377343\nH -6.377340 -3.967091 5.457179\nH -6.377336 3.967112 5.457174\nH -5.457166 -6.377340 3.967111\nH -5.457161 6.377353 3.967111\nH -3.967095 -5.457156 6.377359\nH -3.967095 5.457171 6.377352\nH -6.377355 -3.967095 -5.457168\nH -6.377350 3.967118 -5.457163\nH -5.457171 -6.377349 -3.967099\nH -5.457166 6.377363 -3.967100\nH -3.967102 -5.457168 -6.377348\nH -3.967101 5.457185 -6.377342\nH 12.038641 0.000003 -7.440548\nH 12.099988 0.000003 -4.483580\nH 12.144240 0.000006 -1.497071\nH 12.144233 0.000006 1.497083\nH 12.099971 0.000003 4.483587\nH 12.038623 0.000004 7.440541\nH -12.038651 -0.000000 -7.440559\nH -12.099994 0.000006 -4.483580\nH -12.144253 0.000006 -1.497073\nH -12.144248 0.000005 1.497083\nH -12.099982 0.000005 4.483584\nH -12.038629 0.000000 7.440551\nH -7.440551 12.038630 0.000002\nH -4.483583 12.099975 0.000003\nH -1.497079 12.144221 0.000004\nH 1.497075 12.144221 0.000005\nH 4.483580 12.099968 0.000003\nH 7.440535 12.038629 0.000002\nH -7.440565 -12.038641 0.000002\nH -4.483589 -12.099999 0.000005\nH -1.497078 -12.144261 0.000007\nH 1.497079 -12.144262 0.000005\nH 4.483587 -12.099997 0.000004\nH 7.440549 -12.038645 0.000001\nH -0.000009 -7.440541 12.038631\nH 0.000001 -4.483578 12.099976\nH 0.000001 -1.497071 12.144229\nH 0.000001 1.497078 12.144221\nH 0.000002 4.483585 12.099964\nH -0.000007 7.440539 12.038614\nH -0.000008 -7.440559 -12.038653\nH 0.000002 -4.483583 -12.100005\nH 0.000001 -1.497073 -12.144265\nH 0.000002 1.497082 -12.144259\nH 0.000001 4.483590 -12.099993\nH -0.000006 7.440555 -12.038641\nH 9.677193 -1.496906 8.403454\nH 7.283257 -3.003960 9.361664\nH 4.860366 -4.500621 10.287633\nH 2.422429 -5.980893 11.174337\nH 9.677186 1.496910 8.403449\nH 7.301204 0.000004 9.440827\nH 4.899855 -1.498367 10.403244\nH 2.464544 -2.988767 11.288143\nH 7.283251 3.003967 9.361652\nH 4.899853 1.498377 10.403239\nH 2.475659 0.000006 11.329680\nH 4.860357 4.500623 10.287616\nH 2.464540 2.988773 11.288133\nH 2.422424 5.980895 11.174321\nH 9.677209 -1.496912 -8.403465\nH 7.283277 -3.003970 -9.361682\nH 4.860384 -4.500636 -10.287647\nH 2.422443 -5.980905 -11.174358\nH 9.677204 1.496921 -8.403460\nH 7.301220 0.000005 -9.440842\nH 4.899871 -1.498371 -10.403268\nH 2.464557 -2.988775 -11.288169\nH 7.283270 3.003976 -9.361672\nH 4.899868 1.498380 -10.403263\nH 2.475670 0.000005 -11.329709\nH 4.860376 4.500637 -10.287640\nH 2.464554 2.988779 -11.288159\nH 2.422434 5.980909 -11.174343\nH -9.677190 -1.496908 8.403467\nH -7.283257 -3.003963 9.361672\nH -4.860371 -4.500629 10.287635\nH -2.422430 -5.980892 11.174337\nH -9.677187 1.496917 8.403462\nH -7.301202 0.000005 9.440837\nH -4.899849 -1.498370 10.403248\nH -2.464542 -2.988769 11.288143\nH -7.283250 3.003968 9.361662\nH -4.899846 1.498382 10.403245\nH -2.475653 0.000007 11.329681\nH -4.860362 4.500633 10.287620\nH -2.464539 2.988776 11.288131\nH -2.422424 5.980896 11.174322\nH -9.677207 -1.496913 -8.403482\nH -7.283279 -3.003972 -9.361692\nH -4.860387 -4.500634 -10.287661\nH -2.422442 -5.980906 -11.174359\nH -9.677203 1.496924 -8.403476\nH -7.301216 0.000005 -9.440853\nH -4.899865 -1.498371 -10.403273\nH -2.464552 -2.988774 -11.288171\nH -7.283271 3.003977 -9.361683\nH -4.899862 1.498381 -10.403269\nH -2.475664 0.000006 -11.329711\nH -4.860376 4.500638 -10.287646\nH -2.464549 2.988781 -11.288160\nH -2.422433 5.980910 -11.174344\nH 8.403451 9.677198 -1.496905\nH 9.361660 7.283263 -3.003959\nH 10.287632 4.860371 -4.500620\nH 11.174342 2.422434 -5.980899\nH 8.403446 9.677194 1.496911\nH 9.440827 7.301209 0.000005\nH 10.403246 4.899862 -1.498367\nH 11.288149 2.464551 -2.988769\nH 9.361650 7.283254 3.003966\nH 10.403241 4.899860 1.498377\nH 11.329691 2.475665 0.000007\nH 10.287617 4.860361 4.500626\nH 11.288137 2.464548 2.988779\nH 11.174326 2.422424 5.980898\nH -8.403460 9.677196 -1.496910\nH -9.361670 7.283266 -3.003964\nH -10.287643 4.860386 -4.500636\nH -11.174348 2.422445 -5.980904\nH -8.403456 9.677194 1.496915\nH -9.440835 7.301210 0.000006\nH -10.403255 4.899863 -1.498369\nH -11.288154 2.464562 -2.988776\nH -9.361661 7.283260 3.003973\nH -10.403249 4.899861 1.498381\nH -11.329693 2.475669 0.000006\nH -10.287628 4.860378 4.500640\nH -11.288144 2.464559 2.988785\nH -11.174330 2.422439 5.980903\nH 8.403472 -9.677206 -1.496911\nH 9.361686 -7.283270 -3.003967\nH 10.287643 -4.860371 -4.500629\nH 11.174350 -2.422427 -5.980904\nH 8.403468 -9.677203 1.496918\nH 9.440847 -7.301215 0.000005\nH 10.403263 -4.899857 -1.498370\nH 11.288155 -2.464540 -2.988768\nH 9.361677 -7.283262 3.003974\nH 10.403258 -4.899854 1.498381\nH 11.329699 -2.475655 0.000006\nH 10.287637 -4.860362 4.500633\nH 11.288145 -2.464540 2.988780\nH 11.174334 -2.422420 5.980905\nH -8.403482 -9.677201 -1.496913\nH -9.361695 -7.283272 -3.003971\nH -10.287648 -4.860385 -4.500644\nH -11.174357 -2.422436 -5.980909\nH -8.403479 -9.677198 1.496923\nH -9.440853 -7.301212 0.000006\nH -10.403270 -4.899858 -1.498373\nH -11.288161 -2.464554 -2.988778\nH -9.361685 -7.283267 3.003981\nH -10.403265 -4.899857 1.498385\nH -11.329701 -2.475659 0.000006\nH -10.287644 -4.860378 4.500647\nH -11.288152 -2.464553 2.988786\nH -11.174339 -2.422432 5.980907\nH -1.496905 8.403449 9.677180\nH -3.003957 9.361655 7.283252\nH -4.500628 10.287621 4.860368\nH -5.980895 11.174331 2.422431\nH 1.496909 8.403447 9.677182\nH 0.000004 9.440823 7.301201\nH -1.498372 10.403237 4.899852\nH -2.988773 11.288135 2.464550\nH 3.003964 9.361649 7.283252\nH 1.498374 10.403237 4.899852\nH 0.000002 11.329673 2.475664\nH 4.500620 10.287615 4.860360\nH 2.988773 11.288132 2.464547\nH 5.980894 11.174325 2.422427\nH -1.496909 -8.403459 9.677196\nH -3.003968 -9.361674 7.283271\nH -4.500641 -10.287646 4.860388\nH -5.980907 -11.174350 2.422445\nH 1.496913 -8.403457 9.677200\nH 0.000003 -9.440837 7.301216\nH -1.498376 -10.403259 4.899869\nH -2.988779 -11.288160 2.464559\nH 3.003975 -9.361670 7.283272\nH 1.498380 -10.403259 4.899869\nH 0.000003 -11.329705 2.475670\nH 4.500634 -10.287640 4.860379\nH 2.988780 -11.288157 2.464554\nH 5.980909 -11.174345 2.422438\nH -1.496912 8.403471 -9.677194\nH -3.003965 9.361677 -7.283259\nH -4.500629 10.287638 -4.860363\nH -5.980898 11.174338 -2.422428\nH 1.496914 8.403469 -9.677196\nH 0.000004 9.440843 -7.301203\nH -1.498370 10.403254 -4.899850\nH -2.988770 11.288143 -2.464539\nH 3.003972 9.361671 -7.283257\nH 1.498377 10.403253 -4.899848\nH 0.000002 11.329681 -2.475654\nH 4.500628 10.287631 -4.860359\nH 2.988772 11.288140 -2.464537\nH 5.980897 11.174332 -2.422423\nH -1.496916 -8.403481 -9.677208\nH -3.003978 -9.361696 -7.283276\nH -4.500640 -10.287662 -4.860381\nH -5.980909 -11.174356 -2.422438\nH 1.496920 -8.403479 -9.677211\nH 0.000003 -9.440857 -7.301218\nH -1.498376 -10.403275 -4.899866\nH -2.988776 -11.288169 -2.464544\nH 3.003983 -9.361693 -7.283276\nH 1.498382 -10.403275 -4.899865\nH 0.000004 -11.329712 -2.475660\nH 4.500643 -10.287646 -4.860375\nH 2.988780 -11.288164 -2.464543\nH 5.980912 -11.174352 -2.422432\nH 8.823514 -3.987216 6.975976\nH 8.823502 3.987220 6.975966\nH 7.928324 -6.428771 5.503194\nH 7.928307 6.428772 5.503181\nH 6.975982 -8.823518 3.987231\nH 6.975962 8.823505 3.987222\nH 6.428780 -5.503180 7.928320\nH 6.428768 5.503182 7.928308\nH 5.503192 -7.928317 6.428783\nH 5.503180 7.928307 6.428768\nH 3.987227 -6.975968 8.823516\nH 3.987219 6.975963 8.823502\nH 8.823531 -3.987227 -6.975977\nH 8.823524 3.987231 -6.975969\nH 7.928335 -6.428786 -5.503192\nH 7.928318 6.428785 -5.503181\nH 6.975988 -8.823529 -3.987226\nH 6.975969 8.823515 -3.987217\nH 6.428797 -5.503193 -7.928331\nH 6.428784 5.503196 -7.928316\nH 5.503204 -7.928332 -6.428787\nH 5.503193 7.928321 -6.428774\nH 3.987241 -6.975984 -8.823526\nH 3.987231 6.975978 -8.823511\nH -8.823510 -3.987222 6.975980\nH -8.823504 3.987228 6.975972\nH -7.928324 -6.428779 5.503202\nH -7.928304 6.428779 5.503191\nH -6.975981 -8.823519 3.987237\nH -6.975964 8.823508 3.987226\nH -6.428779 -5.503186 7.928325\nH -6.428768 5.503188 7.928311\nH -5.503191 -7.928320 6.428792\nH -5.503179 7.928309 6.428775\nH -3.987221 -6.975969 8.823517\nH -3.987213 6.975965 8.823503\nH -8.823529 -3.987231 -6.975982\nH -8.823521 3.987238 -6.975974\nH -7.928336 -6.428791 -5.503200\nH -7.928317 6.428791 -5.503190\nH -6.975987 -8.823528 -3.987232\nH -6.975972 8.823516 -3.987222\nH -6.428795 -5.503199 -7.928335\nH -6.428782 5.503201 -7.928321\nH -5.503203 -7.928336 -6.428794\nH -5.503191 7.928325 -6.428778\nH -3.987236 -6.975985 -8.823528\nH -3.987226 6.975980 -8.823514\n\"\"\"\nfname = 'ag_s7l7_wonatoms.xyz'\nfp = open(fname, 'w')\nfp.write(ag_s7l7_wonatoms)\nfp.close()\nmol = gto.M(verbose=1, atom=open(fname).read())\n\n\nclass KnowValues(unittest.TestCase):\n\n def test_ls_contributing(self):\n \"\"\" To test the list of contributing centers \"\"\"\n sv = nao(gto=mol)\n pb = prod_basis()\n pb.sv = sv\n pb.sv.ao_log.sp2rcut[0] = 10.0\n pb.prod_log = sv.ao_log\n pb.prod_log.sp2rcut[0] = 10.0\n pb.ac_rcut = max(sv.ao_log.sp2rcut)\n pb.ac_npc_max = 10\n lsc = pb.ls_contributing(0, 1)\n self.assertEqual(len(lsc), 10)\n lsref = [0, 1, 13, 7, 5, 43, 42, 39, 38, 10]\n for i, ref in enumerate(lsref):\n self.assertEqual(lsc[i], ref)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "from __future__ import print_function, division\nimport unittest\nfrom pyscf import gto\nimport os\nfrom pyscf.nao import nao, prod_basis\nag_s7l7_wonatoms = \"\"\"\nH 2.346340 -0.000093 -1.449987\nH 2.346702 -0.000095 1.450132\nH -2.345370 -0.000086 -1.449228\nH -2.345734 -0.000089 1.449376\nH -1.449887 2.346112 -0.000046\nH 1.450134 2.346853 -0.000044\nH -1.449224 -2.345222 -0.000041\nH 1.449464 -2.345958 -0.000038\nH -0.000112 -1.449738 2.345957\nH -0.000111 1.449980 2.346608\nH -0.000111 -1.449377 -2.345464\nH -0.000107 1.449607 -2.346103\nH 4.731536 -0.000009 -2.923633\nH 4.794344 0.000006 0.000053\nH 4.731450 -0.000009 2.923590\nH -4.731847 -0.000004 -2.923807\nH -4.794483 0.000008 0.000053\nH -4.731757 -0.000006 2.923758\nH -2.923688 4.731598 0.000002\nH 0.000077 4.794367 0.000013\nH 2.923553 4.731432 0.000002\nH -2.923845 -4.731869 0.000004\nH 0.000084 -4.794470 0.000009\nH 2.923708 -4.731700 0.000004\nH -0.000016 -2.923710 4.731655\nH -0.000002 0.000081 4.794386\nH -0.000017 2.923594 4.731497\nH -0.000016 -2.923799 -4.731798\nH -0.000002 0.000083 -4.794441\nH -0.000018 2.923687 -4.731644\nH 2.396856 -1.481019 3.878620\nH 2.396773 1.481058 3.878614\nH 2.396905 -1.481021 -3.878636\nH 2.396824 1.481062 -3.878634\nH -2.396782 -1.481108 3.878712\nH -2.396699 1.481149 3.878709\nH -2.396832 -1.481114 -3.878735\nH -2.396748 1.481155 -3.878730\nH 3.878596 2.396822 -1.481024\nH 3.878589 2.396778 1.481062\nH -3.878672 2.396916 -1.481031\nH -3.878666 2.396868 1.481064\nH 3.878682 -2.396737 -1.481107\nH 3.878676 -2.396695 1.481146\nH -3.878757 -2.396826 -1.481115\nH -3.878754 -2.396779 1.481148\nH -1.481047 3.878627 2.396831\nH 1.481072 3.878617 2.396742\nH -1.481055 -3.878680 2.396921\nH 1.481078 -3.878674 2.396834\nH -1.481096 3.878678 -2.396773\nH 1.481119 3.878671 -2.396685\nH -1.481102 -3.878731 -2.396860\nH 1.481126 -3.878722 -2.396772\nH 7.150331 0.000013 -4.418604\nH 7.225782 0.000009 -1.477531\nH 7.225777 0.000009 1.477551\nH 7.150346 0.000010 4.418636\nH -7.150239 0.000015 -4.418552\nH -7.225701 0.000010 -1.477539\nH -7.225697 0.000009 1.477559\nH -7.150257 0.000015 4.418586\nH -4.418596 7.150312 0.000012\nH -1.477538 7.225777 0.000010\nH 1.477536 7.225775 0.000011\nH 4.418635 7.150362 0.000012\nH -4.418553 -7.150222 0.000020\nH -1.477554 -7.225705 0.000012\nH 1.477559 -7.225701 0.000011\nH 4.418598 -7.150270 0.000013\nH 0.000008 -4.418580 7.150295\nH 0.000006 -1.477536 7.225760\nH 0.000007 1.477549 7.225757\nH 0.000007 4.418626 7.150335\nH 0.000008 -4.418561 -7.150247\nH 0.000006 -1.477545 -7.225726\nH 0.000006 1.477561 -7.225723\nH 0.000007 4.418613 -7.150287\nH 4.808303 -1.493555 5.388587\nH 2.417464 -2.971656 6.301956\nH 4.808308 1.493581 5.388605\nH 2.431587 0.000014 6.366095\nH 2.417478 2.971674 6.301966\nH 4.808303 -1.493546 -5.388552\nH 2.417452 -2.971655 -6.301934\nH 4.808310 1.493573 -5.388572\nH 2.431585 0.000016 -6.366071\nH 2.417464 2.971677 -6.301941\nH -4.808288 -1.493538 5.388559\nH -2.417439 -2.971638 6.301924\nH -4.808292 1.493569 5.388578\nH -2.431572 0.000013 6.366082\nH -2.417452 2.971656 6.301933\nH -4.808287 -1.493528 -5.388525\nH -2.417427 -2.971639 -6.301899\nH -4.808292 1.493561 -5.388546\nH -2.431572 0.000014 -6.366056\nH -2.417439 2.971659 -6.301909\nH 5.388603 4.808319 -1.493559\nH 6.301970 2.417487 -2.971653\nH 5.388608 4.808321 1.493584\nH 6.366098 2.431602 0.000014\nH 6.301967 2.417490 2.971675\nH -5.388548 4.808294 -1.493543\nH -6.301922 2.417455 -2.971644\nH -5.388553 4.808296 1.493566\nH -6.366058 2.431589 0.000013\nH -6.301920 2.417459 2.971662\nH 5.388578 -4.808301 -1.493544\nH 6.301948 -2.417448 -2.971646\nH 5.388584 -4.808302 1.493574\nH 6.366092 -2.431572 0.000013\nH 6.301945 -2.417454 2.971667\nH -5.388520 -4.808272 -1.493529\nH -6.301896 -2.417412 -2.971637\nH -5.388529 -4.808274 1.493557\nH -6.366050 -2.431556 0.000012\nH -6.301896 -2.417417 2.971658\nH -1.493562 5.388587 4.808302\nH -2.971658 6.301956 2.417476\nH 1.493576 5.388607 4.808320\nH 0.000001 6.366100 2.431597\nH 2.971666 6.301972 2.417493\nH -1.493543 -5.388526 4.808289\nH -2.971649 -6.301906 2.417444\nH 1.493555 -5.388547 4.808306\nH 0.000002 -6.366051 2.431589\nH 2.971661 -6.301921 2.417458\nH -1.493560 5.388586 -4.808287\nH -2.971654 6.301946 -2.417449\nH 1.493572 5.388604 -4.808304\nH -0.000002 6.366099 -2.431566\nH 2.971663 6.301961 -2.417463\nH -1.493541 -5.388524 -4.808272\nH -2.971647 -6.301895 -2.417411\nH 1.493554 -5.388544 -4.808291\nH 0.000005 -6.366052 -2.431562\nH 2.971660 -6.301911 -2.417425\nH 3.933950 -3.933932 3.933958\nH 3.933959 3.933967 3.933963\nH 3.933948 -3.933931 -3.933931\nH 3.933957 3.933967 -3.933938\nH -3.933922 -3.933912 3.933940\nH -3.933929 3.933948 3.933945\nH -3.933921 -3.933910 -3.933915\nH -3.933929 3.933948 -3.933922\nH 9.586490 0.000010 -5.924178\nH 9.665986 0.000008 -2.972164\nH 9.696371 0.000007 0.000010\nH 9.665971 0.000008 2.972190\nH 9.586467 0.000009 5.924179\nH -9.586484 0.000013 -5.924188\nH -9.665980 0.000009 -2.972165\nH -9.696370 0.000008 0.000012\nH -9.665971 0.000008 2.972185\nH -9.586466 0.000011 5.924187\nH -5.924179 9.586482 0.000006\nH -2.972172 9.665973 0.000007\nH 0.000003 9.696364 0.000008\nH 2.972183 9.665974 0.000008\nH 5.924181 9.586480 0.000005\nH -5.924189 -9.586477 0.000008\nH -2.972172 -9.665971 0.000010\nH 0.000008 -9.696374 0.000008\nH 2.972190 -9.665972 0.000008\nH 5.924193 -9.586474 0.000007\nH 0.000007 -5.924168 9.586474\nH 0.000007 -2.972167 9.665967\nH 0.000006 0.000007 9.696360\nH 0.000008 2.972186 9.665963\nH 0.000006 5.924181 9.586462\nH 0.000007 -5.924186 -9.586489\nH 0.000007 -2.972170 -9.665982\nH 0.000006 0.000009 -9.696379\nH 0.000007 2.972190 -9.665979\nH 0.000006 5.924201 -9.586480\nH 7.237307 -1.500148 6.901190\nH 4.847669 -2.996238 7.843968\nH 2.428125 -4.472804 8.738149\nH 7.237301 1.500165 6.901187\nH 4.862151 0.000009 7.909330\nH 2.452082 -1.489596 8.829834\nH 4.847661 2.996251 7.843964\nH 2.452083 1.489612 8.829831\nH 2.428122 4.472818 8.738145\nH 7.237323 -1.500151 -6.901186\nH 4.847685 -2.996244 -7.843971\nH 2.428136 -4.472811 -8.738153\nH 7.237318 1.500169 -6.901185\nH 4.862162 0.000010 -7.909328\nH 2.452091 -1.489597 -8.829834\nH 4.847679 2.996257 -7.843969\nH 2.452080 1.489614 -8.829849\nH 2.428132 4.472826 -8.738153\nH -7.237293 -1.500149 6.901191\nH -4.847661 -2.996241 7.843973\nH -2.428114 -4.472804 8.738152\nH -7.237288 1.500171 6.901188\nH -4.862140 0.000012 7.909329\nH -2.452056 -1.489593 8.829846\nH -4.847654 2.996255 7.843972\nH -2.452058 1.489609 8.829843\nH -2.428111 4.472819 8.738150\nH -7.237306 -1.500153 -6.901191\nH -4.847677 -2.996244 -7.843973\nH -2.428122 -4.472813 -8.738157\nH -7.237299 1.500176 -6.901188\nH -4.862151 0.000012 -7.909331\nH -2.452064 -1.489595 -8.829852\nH -4.847670 2.996260 -7.843970\nH -2.452064 1.489612 -8.829849\nH -2.428117 4.472829 -8.738155\nH 6.901190 7.237319 -1.500150\nH 7.843970 4.847677 -2.996237\nH 8.738156 2.428132 -4.472807\nH 6.901187 7.237315 1.500164\nH 7.909334 4.862159 0.000009\nH 8.829843 2.452089 -1.489594\nH 7.843962 4.847670 2.996250\nH 8.829839 2.452089 1.489612\nH 8.738147 2.428123 4.472820\nH -6.901186 7.237313 -1.500153\nH -7.843972 4.847681 -2.996242\nH -8.738156 2.428143 -4.472808\nH -6.901182 7.237309 1.500168\nH -7.909325 4.862158 0.000009\nH -8.829845 2.452079 -1.489595\nH -7.843964 4.847675 2.996256\nH -8.829842 2.452078 1.489612\nH -8.738145 2.428137 4.472818\nH 6.901200 -7.237306 -1.500153\nH 7.843981 -4.847666 -2.996242\nH 8.738162 -2.428114 -4.472812\nH 6.901198 -7.237297 1.500171\nH 7.909337 -4.862146 0.000011\nH 8.829845 -2.452068 -1.489594\nH 7.843972 -4.847660 2.996258\nH 8.829841 -2.452068 1.489613\nH 8.738152 -2.428107 4.472824\nH -6.901190 -7.237297 -1.500154\nH -7.843979 -4.847668 -2.996248\nH -8.738159 -2.428121 -4.472812\nH -6.901189 -7.237293 1.500171\nH -7.909326 -4.862145 0.000009\nH -8.829847 -2.452058 -1.489596\nH -7.843972 -4.847664 2.996263\nH -8.829843 -2.452057 1.489614\nH -8.738148 -2.428115 4.472824\nH -1.500153 6.901189 7.237302\nH -2.996243 7.843968 4.847670\nH -4.472809 8.738156 2.428132\nH 1.500162 6.901186 7.237307\nH 0.000005 7.909331 4.862151\nH -1.489597 8.829847 2.452075\nH 2.996247 7.843960 4.847669\nH 1.489606 8.829848 2.452078\nH 4.472817 8.738150 2.428124\nH -1.500152 -6.901177 7.237309\nH -2.996247 -7.843968 4.847679\nH -4.472811 -8.738149 2.428139\nH 1.500163 -6.901178 7.237314\nH 0.000006 -7.909318 4.862160\nH -1.489601 -8.829840 2.452076\nH 2.996251 -7.843960 4.847678\nH 1.489612 -8.829840 2.452078\nH 4.472822 -8.738143 2.428130\nH -1.500158 6.901205 -7.237297\nH -2.996246 7.843981 -4.847659\nH -4.472815 8.738162 -2.428115\nH 1.500171 6.901202 -7.237297\nH 0.000007 7.909340 -4.862141\nH -1.489596 8.829841 -2.452068\nH 2.996253 7.843975 -4.847660\nH 1.489606 8.829841 -2.452069\nH 4.472822 8.738158 -2.428111\nH -1.500158 -6.901191 -7.237303\nH -2.996251 -7.843978 -4.847665\nH -4.472815 -8.738153 -2.428116\nH 1.500171 -6.901191 -7.237303\nH 0.000007 -7.909327 -4.862149\nH -1.489602 -8.829834 -2.452067\nH 2.996258 -7.843975 -4.847668\nH 1.489613 -8.829834 -2.452069\nH 4.472827 -8.738151 -2.428114\nH 6.377354 -3.967092 5.457178\nH 6.377349 3.967112 5.457173\nH 5.457177 -6.377342 3.967111\nH 5.457169 6.377356 3.967110\nH 3.967106 -5.457157 6.377359\nH 3.967106 5.457174 6.377352\nH 6.377366 -3.967096 -5.457166\nH 6.377363 3.967116 -5.457163\nH 5.457183 -6.377353 -3.967095\nH 5.457176 6.377363 -3.967098\nH 3.967113 -5.457167 -6.377350\nH 3.967114 5.457185 -6.377343\nH -6.377340 -3.967091 5.457179\nH -6.377336 3.967112 5.457174\nH -5.457166 -6.377340 3.967111\nH -5.457161 6.377353 3.967111\nH -3.967095 -5.457156 6.377359\nH -3.967095 5.457171 6.377352\nH -6.377355 -3.967095 -5.457168\nH -6.377350 3.967118 -5.457163\nH -5.457171 -6.377349 -3.967099\nH -5.457166 6.377363 -3.967100\nH -3.967102 -5.457168 -6.377348\nH -3.967101 5.457185 -6.377342\nH 12.038641 0.000003 -7.440548\nH 12.099988 0.000003 -4.483580\nH 12.144240 0.000006 -1.497071\nH 12.144233 0.000006 1.497083\nH 12.099971 0.000003 4.483587\nH 12.038623 0.000004 7.440541\nH -12.038651 -0.000000 -7.440559\nH -12.099994 0.000006 -4.483580\nH -12.144253 0.000006 -1.497073\nH -12.144248 0.000005 1.497083\nH -12.099982 0.000005 4.483584\nH -12.038629 0.000000 7.440551\nH -7.440551 12.038630 0.000002\nH -4.483583 12.099975 0.000003\nH -1.497079 12.144221 0.000004\nH 1.497075 12.144221 0.000005\nH 4.483580 12.099968 0.000003\nH 7.440535 12.038629 0.000002\nH -7.440565 -12.038641 0.000002\nH -4.483589 -12.099999 0.000005\nH -1.497078 -12.144261 0.000007\nH 1.497079 -12.144262 0.000005\nH 4.483587 -12.099997 0.000004\nH 7.440549 -12.038645 0.000001\nH -0.000009 -7.440541 12.038631\nH 0.000001 -4.483578 12.099976\nH 0.000001 -1.497071 12.144229\nH 0.000001 1.497078 12.144221\nH 0.000002 4.483585 12.099964\nH -0.000007 7.440539 12.038614\nH -0.000008 -7.440559 -12.038653\nH 0.000002 -4.483583 -12.100005\nH 0.000001 -1.497073 -12.144265\nH 0.000002 1.497082 -12.144259\nH 0.000001 4.483590 -12.099993\nH -0.000006 7.440555 -12.038641\nH 9.677193 -1.496906 8.403454\nH 7.283257 -3.003960 9.361664\nH 4.860366 -4.500621 10.287633\nH 2.422429 -5.980893 11.174337\nH 9.677186 1.496910 8.403449\nH 7.301204 0.000004 9.440827\nH 4.899855 -1.498367 10.403244\nH 2.464544 -2.988767 11.288143\nH 7.283251 3.003967 9.361652\nH 4.899853 1.498377 10.403239\nH 2.475659 0.000006 11.329680\nH 4.860357 4.500623 10.287616\nH 2.464540 2.988773 11.288133\nH 2.422424 5.980895 11.174321\nH 9.677209 -1.496912 -8.403465\nH 7.283277 -3.003970 -9.361682\nH 4.860384 -4.500636 -10.287647\nH 2.422443 -5.980905 -11.174358\nH 9.677204 1.496921 -8.403460\nH 7.301220 0.000005 -9.440842\nH 4.899871 -1.498371 -10.403268\nH 2.464557 -2.988775 -11.288169\nH 7.283270 3.003976 -9.361672\nH 4.899868 1.498380 -10.403263\nH 2.475670 0.000005 -11.329709\nH 4.860376 4.500637 -10.287640\nH 2.464554 2.988779 -11.288159\nH 2.422434 5.980909 -11.174343\nH -9.677190 -1.496908 8.403467\nH -7.283257 -3.003963 9.361672\nH -4.860371 -4.500629 10.287635\nH -2.422430 -5.980892 11.174337\nH -9.677187 1.496917 8.403462\nH -7.301202 0.000005 9.440837\nH -4.899849 -1.498370 10.403248\nH -2.464542 -2.988769 11.288143\nH -7.283250 3.003968 9.361662\nH -4.899846 1.498382 10.403245\nH -2.475653 0.000007 11.329681\nH -4.860362 4.500633 10.287620\nH -2.464539 2.988776 11.288131\nH -2.422424 5.980896 11.174322\nH -9.677207 -1.496913 -8.403482\nH -7.283279 -3.003972 -9.361692\nH -4.860387 -4.500634 -10.287661\nH -2.422442 -5.980906 -11.174359\nH -9.677203 1.496924 -8.403476\nH -7.301216 0.000005 -9.440853\nH -4.899865 -1.498371 -10.403273\nH -2.464552 -2.988774 -11.288171\nH -7.283271 3.003977 -9.361683\nH -4.899862 1.498381 -10.403269\nH -2.475664 0.000006 -11.329711\nH -4.860376 4.500638 -10.287646\nH -2.464549 2.988781 -11.288160\nH -2.422433 5.980910 -11.174344\nH 8.403451 9.677198 -1.496905\nH 9.361660 7.283263 -3.003959\nH 10.287632 4.860371 -4.500620\nH 11.174342 2.422434 -5.980899\nH 8.403446 9.677194 1.496911\nH 9.440827 7.301209 0.000005\nH 10.403246 4.899862 -1.498367\nH 11.288149 2.464551 -2.988769\nH 9.361650 7.283254 3.003966\nH 10.403241 4.899860 1.498377\nH 11.329691 2.475665 0.000007\nH 10.287617 4.860361 4.500626\nH 11.288137 2.464548 2.988779\nH 11.174326 2.422424 5.980898\nH -8.403460 9.677196 -1.496910\nH -9.361670 7.283266 -3.003964\nH -10.287643 4.860386 -4.500636\nH -11.174348 2.422445 -5.980904\nH -8.403456 9.677194 1.496915\nH -9.440835 7.301210 0.000006\nH -10.403255 4.899863 -1.498369\nH -11.288154 2.464562 -2.988776\nH -9.361661 7.283260 3.003973\nH -10.403249 4.899861 1.498381\nH -11.329693 2.475669 0.000006\nH -10.287628 4.860378 4.500640\nH -11.288144 2.464559 2.988785\nH -11.174330 2.422439 5.980903\nH 8.403472 -9.677206 -1.496911\nH 9.361686 -7.283270 -3.003967\nH 10.287643 -4.860371 -4.500629\nH 11.174350 -2.422427 -5.980904\nH 8.403468 -9.677203 1.496918\nH 9.440847 -7.301215 0.000005\nH 10.403263 -4.899857 -1.498370\nH 11.288155 -2.464540 -2.988768\nH 9.361677 -7.283262 3.003974\nH 10.403258 -4.899854 1.498381\nH 11.329699 -2.475655 0.000006\nH 10.287637 -4.860362 4.500633\nH 11.288145 -2.464540 2.988780\nH 11.174334 -2.422420 5.980905\nH -8.403482 -9.677201 -1.496913\nH -9.361695 -7.283272 -3.003971\nH -10.287648 -4.860385 -4.500644\nH -11.174357 -2.422436 -5.980909\nH -8.403479 -9.677198 1.496923\nH -9.440853 -7.301212 0.000006\nH -10.403270 -4.899858 -1.498373\nH -11.288161 -2.464554 -2.988778\nH -9.361685 -7.283267 3.003981\nH -10.403265 -4.899857 1.498385\nH -11.329701 -2.475659 0.000006\nH -10.287644 -4.860378 4.500647\nH -11.288152 -2.464553 2.988786\nH -11.174339 -2.422432 5.980907\nH -1.496905 8.403449 9.677180\nH -3.003957 9.361655 7.283252\nH -4.500628 10.287621 4.860368\nH -5.980895 11.174331 2.422431\nH 1.496909 8.403447 9.677182\nH 0.000004 9.440823 7.301201\nH -1.498372 10.403237 4.899852\nH -2.988773 11.288135 2.464550\nH 3.003964 9.361649 7.283252\nH 1.498374 10.403237 4.899852\nH 0.000002 11.329673 2.475664\nH 4.500620 10.287615 4.860360\nH 2.988773 11.288132 2.464547\nH 5.980894 11.174325 2.422427\nH -1.496909 -8.403459 9.677196\nH -3.003968 -9.361674 7.283271\nH -4.500641 -10.287646 4.860388\nH -5.980907 -11.174350 2.422445\nH 1.496913 -8.403457 9.677200\nH 0.000003 -9.440837 7.301216\nH -1.498376 -10.403259 4.899869\nH -2.988779 -11.288160 2.464559\nH 3.003975 -9.361670 7.283272\nH 1.498380 -10.403259 4.899869\nH 0.000003 -11.329705 2.475670\nH 4.500634 -10.287640 4.860379\nH 2.988780 -11.288157 2.464554\nH 5.980909 -11.174345 2.422438\nH -1.496912 8.403471 -9.677194\nH -3.003965 9.361677 -7.283259\nH -4.500629 10.287638 -4.860363\nH -5.980898 11.174338 -2.422428\nH 1.496914 8.403469 -9.677196\nH 0.000004 9.440843 -7.301203\nH -1.498370 10.403254 -4.899850\nH -2.988770 11.288143 -2.464539\nH 3.003972 9.361671 -7.283257\nH 1.498377 10.403253 -4.899848\nH 0.000002 11.329681 -2.475654\nH 4.500628 10.287631 -4.860359\nH 2.988772 11.288140 -2.464537\nH 5.980897 11.174332 -2.422423\nH -1.496916 -8.403481 -9.677208\nH -3.003978 -9.361696 -7.283276\nH -4.500640 -10.287662 -4.860381\nH -5.980909 -11.174356 -2.422438\nH 1.496920 -8.403479 -9.677211\nH 0.000003 -9.440857 -7.301218\nH -1.498376 -10.403275 -4.899866\nH -2.988776 -11.288169 -2.464544\nH 3.003983 -9.361693 -7.283276\nH 1.498382 -10.403275 -4.899865\nH 0.000004 -11.329712 -2.475660\nH 4.500643 -10.287646 -4.860375\nH 2.988780 -11.288164 -2.464543\nH 5.980912 -11.174352 -2.422432\nH 8.823514 -3.987216 6.975976\nH 8.823502 3.987220 6.975966\nH 7.928324 -6.428771 5.503194\nH 7.928307 6.428772 5.503181\nH 6.975982 -8.823518 3.987231\nH 6.975962 8.823505 3.987222\nH 6.428780 -5.503180 7.928320\nH 6.428768 5.503182 7.928308\nH 5.503192 -7.928317 6.428783\nH 5.503180 7.928307 6.428768\nH 3.987227 -6.975968 8.823516\nH 3.987219 6.975963 8.823502\nH 8.823531 -3.987227 -6.975977\nH 8.823524 3.987231 -6.975969\nH 7.928335 -6.428786 -5.503192\nH 7.928318 6.428785 -5.503181\nH 6.975988 -8.823529 -3.987226\nH 6.975969 8.823515 -3.987217\nH 6.428797 -5.503193 -7.928331\nH 6.428784 5.503196 -7.928316\nH 5.503204 -7.928332 -6.428787\nH 5.503193 7.928321 -6.428774\nH 3.987241 -6.975984 -8.823526\nH 3.987231 6.975978 -8.823511\nH -8.823510 -3.987222 6.975980\nH -8.823504 3.987228 6.975972\nH -7.928324 -6.428779 5.503202\nH -7.928304 6.428779 5.503191\nH -6.975981 -8.823519 3.987237\nH -6.975964 8.823508 3.987226\nH -6.428779 -5.503186 7.928325\nH -6.428768 5.503188 7.928311\nH -5.503191 -7.928320 6.428792\nH -5.503179 7.928309 6.428775\nH -3.987221 -6.975969 8.823517\nH -3.987213 6.975965 8.823503\nH -8.823529 -3.987231 -6.975982\nH -8.823521 3.987238 -6.975974\nH -7.928336 -6.428791 -5.503200\nH -7.928317 6.428791 -5.503190\nH -6.975987 -8.823528 -3.987232\nH -6.975972 8.823516 -3.987222\nH -6.428795 -5.503199 -7.928335\nH -6.428782 5.503201 -7.928321\nH -5.503203 -7.928336 -6.428794\nH -5.503191 7.928325 -6.428778\nH -3.987236 -6.975985 -8.823528\nH -3.987226 6.975980 -8.823514\n\"\"\"\nfname = 'ag_s7l7_wonatoms.xyz'\nfp = open(fname, 'w')\nfp.write(ag_s7l7_wonatoms)\nfp.close()\nmol = gto.M(verbose=1, atom=open(fname).read())\n\n\nclass KnowValues(unittest.TestCase):\n\n def test_ls_contributing(self):\n \"\"\" To test the list of contributing centers \"\"\"\n sv = nao(gto=mol)\n pb = prod_basis()\n pb.sv = sv\n pb.sv.ao_log.sp2rcut[0] = 10.0\n pb.prod_log = sv.ao_log\n pb.prod_log.sp2rcut[0] = 10.0\n pb.ac_rcut = max(sv.ao_log.sp2rcut)\n pb.ac_npc_max = 10\n lsc = pb.ls_contributing(0, 1)\n self.assertEqual(len(lsc), 10)\n lsref = [0, 1, 13, 7, 5, 43, 42, 39, 38, 10]\n for i, ref in enumerate(lsref):\n self.assertEqual(lsc[i], ref)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function, division\nimport unittest\nfrom pyscf import gto\nimport os\nfrom pyscf.nao import nao, prod_basis\n\n\nag_s7l7_wonatoms = \"\"\"\nH 2.346340 -0.000093 -1.449987\nH 2.346702 -0.000095 1.450132\nH -2.345370 -0.000086 -1.449228\nH -2.345734 -0.000089 1.449376\nH -1.449887 2.346112 -0.000046\nH 1.450134 2.346853 -0.000044\nH -1.449224 -2.345222 -0.000041\nH 1.449464 -2.345958 -0.000038\nH -0.000112 -1.449738 2.345957\nH -0.000111 1.449980 2.346608\nH -0.000111 -1.449377 -2.345464\nH -0.000107 1.449607 -2.346103\nH 4.731536 -0.000009 -2.923633\nH 4.794344 0.000006 0.000053\nH 4.731450 -0.000009 2.923590\nH -4.731847 -0.000004 -2.923807\nH -4.794483 0.000008 0.000053\nH -4.731757 -0.000006 2.923758\nH -2.923688 4.731598 0.000002\nH 0.000077 4.794367 0.000013\nH 2.923553 4.731432 0.000002\nH -2.923845 -4.731869 0.000004\nH 0.000084 -4.794470 0.000009\nH 2.923708 -4.731700 0.000004\nH -0.000016 -2.923710 4.731655\nH -0.000002 0.000081 4.794386\nH -0.000017 2.923594 4.731497\nH -0.000016 -2.923799 -4.731798\nH -0.000002 0.000083 -4.794441\nH -0.000018 2.923687 -4.731644\nH 2.396856 -1.481019 3.878620\nH 2.396773 1.481058 3.878614\nH 2.396905 -1.481021 -3.878636\nH 2.396824 1.481062 -3.878634\nH -2.396782 -1.481108 3.878712\nH -2.396699 1.481149 3.878709\nH -2.396832 -1.481114 -3.878735\nH -2.396748 1.481155 -3.878730\nH 3.878596 2.396822 -1.481024\nH 3.878589 2.396778 1.481062\nH -3.878672 2.396916 -1.481031\nH -3.878666 2.396868 1.481064\nH 3.878682 -2.396737 -1.481107\nH 3.878676 -2.396695 1.481146\nH -3.878757 -2.396826 -1.481115\nH -3.878754 -2.396779 1.481148\nH -1.481047 3.878627 2.396831\nH 1.481072 3.878617 2.396742\nH -1.481055 -3.878680 2.396921\nH 1.481078 -3.878674 2.396834\nH -1.481096 3.878678 -2.396773\nH 1.481119 3.878671 -2.396685\nH -1.481102 -3.878731 -2.396860\nH 1.481126 -3.878722 -2.396772\nH 7.150331 0.000013 -4.418604\nH 7.225782 0.000009 -1.477531\nH 7.225777 0.000009 1.477551\nH 7.150346 0.000010 4.418636\nH -7.150239 0.000015 -4.418552\nH -7.225701 0.000010 -1.477539\nH -7.225697 0.000009 1.477559\nH -7.150257 0.000015 4.418586\nH -4.418596 7.150312 0.000012\nH -1.477538 7.225777 0.000010\nH 1.477536 7.225775 0.000011\nH 4.418635 7.150362 0.000012\nH -4.418553 -7.150222 0.000020\nH -1.477554 -7.225705 0.000012\nH 1.477559 -7.225701 0.000011\nH 4.418598 -7.150270 0.000013\nH 0.000008 -4.418580 7.150295\nH 0.000006 -1.477536 7.225760\nH 0.000007 1.477549 7.225757\nH 0.000007 4.418626 7.150335\nH 0.000008 -4.418561 -7.150247\nH 0.000006 -1.477545 -7.225726\nH 0.000006 1.477561 -7.225723\nH 0.000007 4.418613 -7.150287\nH 4.808303 -1.493555 5.388587\nH 2.417464 -2.971656 6.301956\nH 4.808308 1.493581 5.388605\nH 2.431587 0.000014 6.366095\nH 2.417478 2.971674 6.301966\nH 4.808303 -1.493546 -5.388552\nH 2.417452 -2.971655 -6.301934\nH 4.808310 1.493573 -5.388572\nH 2.431585 0.000016 -6.366071\nH 2.417464 2.971677 -6.301941\nH -4.808288 -1.493538 5.388559\nH -2.417439 -2.971638 6.301924\nH -4.808292 1.493569 5.388578\nH -2.431572 0.000013 6.366082\nH -2.417452 2.971656 6.301933\nH -4.808287 -1.493528 -5.388525\nH -2.417427 -2.971639 -6.301899\nH -4.808292 1.493561 -5.388546\nH -2.431572 0.000014 -6.366056\nH -2.417439 2.971659 -6.301909\nH 5.388603 4.808319 -1.493559\nH 6.301970 2.417487 -2.971653\nH 5.388608 4.808321 1.493584\nH 6.366098 2.431602 0.000014\nH 6.301967 2.417490 2.971675\nH -5.388548 4.808294 -1.493543\nH -6.301922 2.417455 -2.971644\nH -5.388553 4.808296 1.493566\nH -6.366058 2.431589 0.000013\nH -6.301920 2.417459 2.971662\nH 5.388578 -4.808301 -1.493544\nH 6.301948 -2.417448 -2.971646\nH 5.388584 -4.808302 1.493574\nH 6.366092 -2.431572 0.000013\nH 6.301945 -2.417454 2.971667\nH -5.388520 -4.808272 -1.493529\nH -6.301896 -2.417412 -2.971637\nH -5.388529 -4.808274 1.493557\nH -6.366050 -2.431556 0.000012\nH -6.301896 -2.417417 2.971658\nH -1.493562 5.388587 4.808302\nH -2.971658 6.301956 2.417476\nH 1.493576 5.388607 4.808320\nH 0.000001 6.366100 2.431597\nH 2.971666 6.301972 2.417493\nH -1.493543 -5.388526 4.808289\nH -2.971649 -6.301906 2.417444\nH 1.493555 -5.388547 4.808306\nH 0.000002 -6.366051 2.431589\nH 2.971661 -6.301921 2.417458\nH -1.493560 5.388586 -4.808287\nH -2.971654 6.301946 -2.417449\nH 1.493572 5.388604 -4.808304\nH -0.000002 6.366099 -2.431566\nH 2.971663 6.301961 -2.417463\nH -1.493541 -5.388524 -4.808272\nH -2.971647 -6.301895 -2.417411\nH 1.493554 -5.388544 -4.808291\nH 0.000005 -6.366052 -2.431562\nH 2.971660 -6.301911 -2.417425\nH 3.933950 -3.933932 3.933958\nH 3.933959 3.933967 3.933963\nH 3.933948 -3.933931 -3.933931\nH 3.933957 3.933967 -3.933938\nH -3.933922 -3.933912 3.933940\nH -3.933929 3.933948 3.933945\nH -3.933921 -3.933910 -3.933915\nH -3.933929 3.933948 -3.933922\nH 9.586490 0.000010 -5.924178\nH 9.665986 0.000008 -2.972164\nH 9.696371 0.000007 0.000010\nH 9.665971 0.000008 2.972190\nH 9.586467 0.000009 5.924179\nH -9.586484 0.000013 -5.924188\nH -9.665980 0.000009 -2.972165\nH -9.696370 0.000008 0.000012\nH -9.665971 0.000008 2.972185\nH -9.586466 0.000011 5.924187\nH -5.924179 9.586482 0.000006\nH -2.972172 9.665973 0.000007\nH 0.000003 9.696364 0.000008\nH 2.972183 9.665974 0.000008\nH 5.924181 9.586480 0.000005\nH -5.924189 -9.586477 0.000008\nH -2.972172 -9.665971 0.000010\nH 0.000008 -9.696374 0.000008\nH 2.972190 -9.665972 0.000008\nH 5.924193 -9.586474 0.000007\nH 0.000007 -5.924168 9.586474\nH 0.000007 -2.972167 9.665967\nH 0.000006 0.000007 9.696360\nH 0.000008 2.972186 9.665963\nH 0.000006 5.924181 9.586462\nH 0.000007 -5.924186 -9.586489\nH 0.000007 -2.972170 -9.665982\nH 0.000006 0.000009 -9.696379\nH 0.000007 2.972190 -9.665979\nH 0.000006 5.924201 -9.586480\nH 7.237307 -1.500148 6.901190\nH 4.847669 -2.996238 7.843968\nH 2.428125 -4.472804 8.738149\nH 7.237301 1.500165 6.901187\nH 4.862151 0.000009 7.909330\nH 2.452082 -1.489596 8.829834\nH 4.847661 2.996251 7.843964\nH 2.452083 1.489612 8.829831\nH 2.428122 4.472818 8.738145\nH 7.237323 -1.500151 -6.901186\nH 4.847685 -2.996244 -7.843971\nH 2.428136 -4.472811 -8.738153\nH 7.237318 1.500169 -6.901185\nH 4.862162 0.000010 -7.909328\nH 2.452091 -1.489597 -8.829834\nH 4.847679 2.996257 -7.843969\nH 2.452080 1.489614 -8.829849\nH 2.428132 4.472826 -8.738153\nH -7.237293 -1.500149 6.901191\nH -4.847661 -2.996241 7.843973\nH -2.428114 -4.472804 8.738152\nH -7.237288 1.500171 6.901188\nH -4.862140 0.000012 7.909329\nH -2.452056 -1.489593 8.829846\nH -4.847654 2.996255 7.843972\nH -2.452058 1.489609 8.829843\nH -2.428111 4.472819 8.738150\nH -7.237306 -1.500153 -6.901191\nH -4.847677 -2.996244 -7.843973\nH -2.428122 -4.472813 -8.738157\nH -7.237299 1.500176 -6.901188\nH -4.862151 0.000012 -7.909331\nH -2.452064 -1.489595 -8.829852\nH -4.847670 2.996260 -7.843970\nH -2.452064 1.489612 -8.829849\nH -2.428117 4.472829 -8.738155\nH 6.901190 7.237319 -1.500150\nH 7.843970 4.847677 -2.996237\nH 8.738156 2.428132 -4.472807\nH 6.901187 7.237315 1.500164\nH 7.909334 4.862159 0.000009\nH 8.829843 2.452089 -1.489594\nH 7.843962 4.847670 2.996250\nH 8.829839 2.452089 1.489612\nH 8.738147 2.428123 4.472820\nH -6.901186 7.237313 -1.500153\nH -7.843972 4.847681 -2.996242\nH -8.738156 2.428143 -4.472808\nH -6.901182 7.237309 1.500168\nH -7.909325 4.862158 0.000009\nH -8.829845 2.452079 -1.489595\nH -7.843964 4.847675 2.996256\nH -8.829842 2.452078 1.489612\nH -8.738145 2.428137 4.472818\nH 6.901200 -7.237306 -1.500153\nH 7.843981 -4.847666 -2.996242\nH 8.738162 -2.428114 -4.472812\nH 6.901198 -7.237297 1.500171\nH 7.909337 -4.862146 0.000011\nH 8.829845 -2.452068 -1.489594\nH 7.843972 -4.847660 2.996258\nH 8.829841 -2.452068 1.489613\nH 8.738152 -2.428107 4.472824\nH -6.901190 -7.237297 -1.500154\nH -7.843979 -4.847668 -2.996248\nH -8.738159 -2.428121 -4.472812\nH -6.901189 -7.237293 1.500171\nH -7.909326 -4.862145 0.000009\nH -8.829847 -2.452058 -1.489596\nH -7.843972 -4.847664 2.996263\nH -8.829843 -2.452057 1.489614\nH -8.738148 -2.428115 4.472824\nH -1.500153 6.901189 7.237302\nH -2.996243 7.843968 4.847670\nH -4.472809 8.738156 2.428132\nH 1.500162 6.901186 7.237307\nH 0.000005 7.909331 4.862151\nH -1.489597 8.829847 2.452075\nH 2.996247 7.843960 4.847669\nH 1.489606 8.829848 2.452078\nH 4.472817 8.738150 2.428124\nH -1.500152 -6.901177 7.237309\nH -2.996247 -7.843968 4.847679\nH -4.472811 -8.738149 2.428139\nH 1.500163 -6.901178 7.237314\nH 0.000006 -7.909318 4.862160\nH -1.489601 -8.829840 2.452076\nH 2.996251 -7.843960 4.847678\nH 1.489612 -8.829840 2.452078\nH 4.472822 -8.738143 2.428130\nH -1.500158 6.901205 -7.237297\nH -2.996246 7.843981 -4.847659\nH -4.472815 8.738162 -2.428115\nH 1.500171 6.901202 -7.237297\nH 0.000007 7.909340 -4.862141\nH -1.489596 8.829841 -2.452068\nH 2.996253 7.843975 -4.847660\nH 1.489606 8.829841 -2.452069\nH 4.472822 8.738158 -2.428111\nH -1.500158 -6.901191 -7.237303\nH -2.996251 -7.843978 -4.847665\nH -4.472815 -8.738153 -2.428116\nH 1.500171 -6.901191 -7.237303\nH 0.000007 -7.909327 -4.862149\nH -1.489602 -8.829834 -2.452067\nH 2.996258 -7.843975 -4.847668\nH 1.489613 -8.829834 -2.452069\nH 4.472827 -8.738151 -2.428114\nH 6.377354 -3.967092 5.457178\nH 6.377349 3.967112 5.457173\nH 5.457177 -6.377342 3.967111\nH 5.457169 6.377356 3.967110\nH 3.967106 -5.457157 6.377359\nH 3.967106 5.457174 6.377352\nH 6.377366 -3.967096 -5.457166\nH 6.377363 3.967116 -5.457163\nH 5.457183 -6.377353 -3.967095\nH 5.457176 6.377363 -3.967098\nH 3.967113 -5.457167 -6.377350\nH 3.967114 5.457185 -6.377343\nH -6.377340 -3.967091 5.457179\nH -6.377336 3.967112 5.457174\nH -5.457166 -6.377340 3.967111\nH -5.457161 6.377353 3.967111\nH -3.967095 -5.457156 6.377359\nH -3.967095 5.457171 6.377352\nH -6.377355 -3.967095 -5.457168\nH -6.377350 3.967118 -5.457163\nH -5.457171 -6.377349 -3.967099\nH -5.457166 6.377363 -3.967100\nH -3.967102 -5.457168 -6.377348\nH -3.967101 5.457185 -6.377342\nH 12.038641 0.000003 -7.440548\nH 12.099988 0.000003 -4.483580\nH 12.144240 0.000006 -1.497071\nH 12.144233 0.000006 1.497083\nH 12.099971 0.000003 4.483587\nH 12.038623 0.000004 7.440541\nH -12.038651 -0.000000 -7.440559\nH -12.099994 0.000006 -4.483580\nH -12.144253 0.000006 -1.497073\nH -12.144248 0.000005 1.497083\nH -12.099982 0.000005 4.483584\nH -12.038629 0.000000 7.440551\nH -7.440551 12.038630 0.000002\nH -4.483583 12.099975 0.000003\nH -1.497079 12.144221 0.000004\nH 1.497075 12.144221 0.000005\nH 4.483580 12.099968 0.000003\nH 7.440535 12.038629 0.000002\nH -7.440565 -12.038641 0.000002\nH -4.483589 -12.099999 0.000005\nH -1.497078 -12.144261 0.000007\nH 1.497079 -12.144262 0.000005\nH 4.483587 -12.099997 0.000004\nH 7.440549 -12.038645 0.000001\nH -0.000009 -7.440541 12.038631\nH 0.000001 -4.483578 12.099976\nH 0.000001 -1.497071 12.144229\nH 0.000001 1.497078 12.144221\nH 0.000002 4.483585 12.099964\nH -0.000007 7.440539 12.038614\nH -0.000008 -7.440559 -12.038653\nH 0.000002 -4.483583 -12.100005\nH 0.000001 -1.497073 -12.144265\nH 0.000002 1.497082 -12.144259\nH 0.000001 4.483590 -12.099993\nH -0.000006 7.440555 -12.038641\nH 9.677193 -1.496906 8.403454\nH 7.283257 -3.003960 9.361664\nH 4.860366 -4.500621 10.287633\nH 2.422429 -5.980893 11.174337\nH 9.677186 1.496910 8.403449\nH 7.301204 0.000004 9.440827\nH 4.899855 -1.498367 10.403244\nH 2.464544 -2.988767 11.288143\nH 7.283251 3.003967 9.361652\nH 4.899853 1.498377 10.403239\nH 2.475659 0.000006 11.329680\nH 4.860357 4.500623 10.287616\nH 2.464540 2.988773 11.288133\nH 2.422424 5.980895 11.174321\nH 9.677209 -1.496912 -8.403465\nH 7.283277 -3.003970 -9.361682\nH 4.860384 -4.500636 -10.287647\nH 2.422443 -5.980905 -11.174358\nH 9.677204 1.496921 -8.403460\nH 7.301220 0.000005 -9.440842\nH 4.899871 -1.498371 -10.403268\nH 2.464557 -2.988775 -11.288169\nH 7.283270 3.003976 -9.361672\nH 4.899868 1.498380 -10.403263\nH 2.475670 0.000005 -11.329709\nH 4.860376 4.500637 -10.287640\nH 2.464554 2.988779 -11.288159\nH 2.422434 5.980909 -11.174343\nH -9.677190 -1.496908 8.403467\nH -7.283257 -3.003963 9.361672\nH -4.860371 -4.500629 10.287635\nH -2.422430 -5.980892 11.174337\nH -9.677187 1.496917 8.403462\nH -7.301202 0.000005 9.440837\nH -4.899849 -1.498370 10.403248\nH -2.464542 -2.988769 11.288143\nH -7.283250 3.003968 9.361662\nH -4.899846 1.498382 10.403245\nH -2.475653 0.000007 11.329681\nH -4.860362 4.500633 10.287620\nH -2.464539 2.988776 11.288131\nH -2.422424 5.980896 11.174322\nH -9.677207 -1.496913 -8.403482\nH -7.283279 -3.003972 -9.361692\nH -4.860387 -4.500634 -10.287661\nH -2.422442 -5.980906 -11.174359\nH -9.677203 1.496924 -8.403476\nH -7.301216 0.000005 -9.440853\nH -4.899865 -1.498371 -10.403273\nH -2.464552 -2.988774 -11.288171\nH -7.283271 3.003977 -9.361683\nH -4.899862 1.498381 -10.403269\nH -2.475664 0.000006 -11.329711\nH -4.860376 4.500638 -10.287646\nH -2.464549 2.988781 -11.288160\nH -2.422433 5.980910 -11.174344\nH 8.403451 9.677198 -1.496905\nH 9.361660 7.283263 -3.003959\nH 10.287632 4.860371 -4.500620\nH 11.174342 2.422434 -5.980899\nH 8.403446 9.677194 1.496911\nH 9.440827 7.301209 0.000005\nH 10.403246 4.899862 -1.498367\nH 11.288149 2.464551 -2.988769\nH 9.361650 7.283254 3.003966\nH 10.403241 4.899860 1.498377\nH 11.329691 2.475665 0.000007\nH 10.287617 4.860361 4.500626\nH 11.288137 2.464548 2.988779\nH 11.174326 2.422424 5.980898\nH -8.403460 9.677196 -1.496910\nH -9.361670 7.283266 -3.003964\nH -10.287643 4.860386 -4.500636\nH -11.174348 2.422445 -5.980904\nH -8.403456 9.677194 1.496915\nH -9.440835 7.301210 0.000006\nH -10.403255 4.899863 -1.498369\nH -11.288154 2.464562 -2.988776\nH -9.361661 7.283260 3.003973\nH -10.403249 4.899861 1.498381\nH -11.329693 2.475669 0.000006\nH -10.287628 4.860378 4.500640\nH -11.288144 2.464559 2.988785\nH -11.174330 2.422439 5.980903\nH 8.403472 -9.677206 -1.496911\nH 9.361686 -7.283270 -3.003967\nH 10.287643 -4.860371 -4.500629\nH 11.174350 -2.422427 -5.980904\nH 8.403468 -9.677203 1.496918\nH 9.440847 -7.301215 0.000005\nH 10.403263 -4.899857 -1.498370\nH 11.288155 -2.464540 -2.988768\nH 9.361677 -7.283262 3.003974\nH 10.403258 -4.899854 1.498381\nH 11.329699 -2.475655 0.000006\nH 10.287637 -4.860362 4.500633\nH 11.288145 -2.464540 2.988780\nH 11.174334 -2.422420 5.980905\nH -8.403482 -9.677201 -1.496913\nH -9.361695 -7.283272 -3.003971\nH -10.287648 -4.860385 -4.500644\nH -11.174357 -2.422436 -5.980909\nH -8.403479 -9.677198 1.496923\nH -9.440853 -7.301212 0.000006\nH -10.403270 -4.899858 -1.498373\nH -11.288161 -2.464554 -2.988778\nH -9.361685 -7.283267 3.003981\nH -10.403265 -4.899857 1.498385\nH -11.329701 -2.475659 0.000006\nH -10.287644 -4.860378 4.500647\nH -11.288152 -2.464553 2.988786\nH -11.174339 -2.422432 5.980907\nH -1.496905 8.403449 9.677180\nH -3.003957 9.361655 7.283252\nH -4.500628 10.287621 4.860368\nH -5.980895 11.174331 2.422431\nH 1.496909 8.403447 9.677182\nH 0.000004 9.440823 7.301201\nH -1.498372 10.403237 4.899852\nH -2.988773 11.288135 2.464550\nH 3.003964 9.361649 7.283252\nH 1.498374 10.403237 4.899852\nH 0.000002 11.329673 2.475664\nH 4.500620 10.287615 4.860360\nH 2.988773 11.288132 2.464547\nH 5.980894 11.174325 2.422427\nH -1.496909 -8.403459 9.677196\nH -3.003968 -9.361674 7.283271\nH -4.500641 -10.287646 4.860388\nH -5.980907 -11.174350 2.422445\nH 1.496913 -8.403457 9.677200\nH 0.000003 -9.440837 7.301216\nH -1.498376 -10.403259 4.899869\nH -2.988779 -11.288160 2.464559\nH 3.003975 -9.361670 7.283272\nH 1.498380 -10.403259 4.899869\nH 0.000003 -11.329705 2.475670\nH 4.500634 -10.287640 4.860379\nH 2.988780 -11.288157 2.464554\nH 5.980909 -11.174345 2.422438\nH -1.496912 8.403471 -9.677194\nH -3.003965 9.361677 -7.283259\nH -4.500629 10.287638 -4.860363\nH -5.980898 11.174338 -2.422428\nH 1.496914 8.403469 -9.677196\nH 0.000004 9.440843 -7.301203\nH -1.498370 10.403254 -4.899850\nH -2.988770 11.288143 -2.464539\nH 3.003972 9.361671 -7.283257\nH 1.498377 10.403253 -4.899848\nH 0.000002 11.329681 -2.475654\nH 4.500628 10.287631 -4.860359\nH 2.988772 11.288140 -2.464537\nH 5.980897 11.174332 -2.422423\nH -1.496916 -8.403481 -9.677208\nH -3.003978 -9.361696 -7.283276\nH -4.500640 -10.287662 -4.860381\nH -5.980909 -11.174356 -2.422438\nH 1.496920 -8.403479 -9.677211\nH 0.000003 -9.440857 -7.301218\nH -1.498376 -10.403275 -4.899866\nH -2.988776 -11.288169 -2.464544\nH 3.003983 -9.361693 -7.283276\nH 1.498382 -10.403275 -4.899865\nH 0.000004 -11.329712 -2.475660\nH 4.500643 -10.287646 -4.860375\nH 2.988780 -11.288164 -2.464543\nH 5.980912 -11.174352 -2.422432\nH 8.823514 -3.987216 6.975976\nH 8.823502 3.987220 6.975966\nH 7.928324 -6.428771 5.503194\nH 7.928307 6.428772 5.503181\nH 6.975982 -8.823518 3.987231\nH 6.975962 8.823505 3.987222\nH 6.428780 -5.503180 7.928320\nH 6.428768 5.503182 7.928308\nH 5.503192 -7.928317 6.428783\nH 5.503180 7.928307 6.428768\nH 3.987227 -6.975968 8.823516\nH 3.987219 6.975963 8.823502\nH 8.823531 -3.987227 -6.975977\nH 8.823524 3.987231 -6.975969\nH 7.928335 -6.428786 -5.503192\nH 7.928318 6.428785 -5.503181\nH 6.975988 -8.823529 -3.987226\nH 6.975969 8.823515 -3.987217\nH 6.428797 -5.503193 -7.928331\nH 6.428784 5.503196 -7.928316\nH 5.503204 -7.928332 -6.428787\nH 5.503193 7.928321 -6.428774\nH 3.987241 -6.975984 -8.823526\nH 3.987231 6.975978 -8.823511\nH -8.823510 -3.987222 6.975980\nH -8.823504 3.987228 6.975972\nH -7.928324 -6.428779 5.503202\nH -7.928304 6.428779 5.503191\nH -6.975981 -8.823519 3.987237\nH -6.975964 8.823508 3.987226\nH -6.428779 -5.503186 7.928325\nH -6.428768 5.503188 7.928311\nH -5.503191 -7.928320 6.428792\nH -5.503179 7.928309 6.428775\nH -3.987221 -6.975969 8.823517\nH -3.987213 6.975965 8.823503\nH -8.823529 -3.987231 -6.975982\nH -8.823521 3.987238 -6.975974\nH -7.928336 -6.428791 -5.503200\nH -7.928317 6.428791 -5.503190\nH -6.975987 -8.823528 -3.987232\nH -6.975972 8.823516 -3.987222\nH -6.428795 -5.503199 -7.928335\nH -6.428782 5.503201 -7.928321\nH -5.503203 -7.928336 -6.428794\nH -5.503191 7.928325 -6.428778\nH -3.987236 -6.975985 -8.823528\nH -3.987226 6.975980 -8.823514\n\"\"\"\n\nfname = \"ag_s7l7_wonatoms.xyz\"\nfp = open(fname, \"w\")\nfp.write(ag_s7l7_wonatoms)\nfp.close()\n\n# this does not work from python interp\n#d = os.path.dirname(os.path.abspath(__file__))\nmol = gto.M(\n verbose = 1,\n atom = open(fname).read()\n)\n\nclass KnowValues(unittest.TestCase):\n\n def test_ls_contributing(self):\n \"\"\" To test the list of contributing centers \"\"\"\n sv = nao(gto=mol)\n pb = prod_basis()\n pb.sv = sv\n pb.sv.ao_log.sp2rcut[0] = 10.0\n pb.prod_log = sv.ao_log\n pb.prod_log.sp2rcut[0] = 10.0\n pb.ac_rcut = max(sv.ao_log.sp2rcut)\n pb.ac_npc_max = 10\n lsc = pb.ls_contributing(0,1)\n self.assertEqual(len(lsc),10)\n lsref = [ 0, 1, 13, 7, 5, 43, 42, 39, 38, 10]\n for i,ref in enumerate(lsref) : self.assertEqual(lsc[i],ref)\n \nif __name__ == \"__main__\": unittest.main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
__author__ = 'mvoronin'
|
normal
|
{
"blob_id": "e5a7b0cbc82b57578f6dcbf676e8f589c6e9ac1b",
"index": 5663,
"step-1": "<mask token>\n",
"step-2": "__author__ = 'mvoronin'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from pyzabbix import ZabbixMetric, ZabbixSender, ZabbixAPI
from datetime import datetime
from re import findall
# current_time = datetime.now().strftime("%H:%M:%S %d.%m.%Y")
class ZabbixItem():
def __init__(self, user, password, ext_group, ext_template, zabbix_host):
self.user = user
self.password = password
self.zabbix_host = zabbix_host
self.zabbix_api = f"http://{zabbix_host}"
self.connection = self.connection_init()
self.template_id = self.get_template(ext_template)
self.group_id = self.get_group(ext_group)
# print(self.get_group(EXT_GROUP))
def connection_init(self):
'''
Zabbix connection init
:return: connection
'''
return ZabbixAPI(f"http://{self.zabbix_host}", user=self.user, password=self.password)
def get_template(self, template_name):
'''
Get template id by template name
:param template_name:
:return: template id as string
'''
ext_template = self.connection.do_request("template.get", {
"filter": {"host": [template_name]},
"output": "template_id"
}).get("result")
if ext_template:
result = ext_template[0].get("templateid")
else:
result = False
return result
def get_group(self, group_name):
"""
Get group Id
:param group_name:
:return: group ID
"""
group = self.connection.do_request("hostgroup.get", {
"filter": {"name": [group_name]},
"output": "extend"
}).get("result")
if group:
result = group[0].get("groupid")
else:
# print("create Group")
result = False
return result
def clear_ping(self, value):
"""
clear ping value from text
:param value: raw data, 50 ms as example
:return: integer value
"""
try:
result = int(value[:value.find(" ")])
except IndexError:
result = False
except ValueError:
# print(value)
result = False
return result
def host_create(self, data):
'''
Create host item
:param host_params:
:return: host id
'''
return self.connection.do_request('host.create', data)[0].get("result")
def assign_template_to_host(self, host_id):
"""
Assign template to host
:param host_id: host id
:return:
"""
return self.connection.do_request("template.update", teamplateid=self.template_id, hosts=[host_id])
def send_data(self, data):
"""
Send data to server
:param data: data dict
:return:
"""
# test_dict = {'ext': '1105', 'ip_address': '192.168.10.55', 'status': 'OK', 'ping': '5 ms', 'user': 'Secretary',
# 'user_agent': 'Cisco/SPA508G-7.4.9a'}
sender_data = []
host_id = data.get("ext")
# print(ZABBIX_HOST)
zbx_sender = ZabbixSender(self.zabbix_host)
extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get("ip_address"))
sender_data.append(extension_ip)
extension_ping = ZabbixMetric(host_id, "extPhonePing", self.clear_ping(data.get("ping", 10000)))
sender_data.append(extension_ping)
extension_status = ZabbixMetric(host_id, "extStatus", data.get("status", ""))
sender_data.append(extension_status)
extension_user = ZabbixMetric(host_id, "extUser", data.get("user", ""))
sender_data.append(extension_user)
extension_useragent = ZabbixMetric(host_id, "extUserAgent", data.get("user_agent", ""))
sender_data.append(extension_useragent)
zbx_sender.send(sender_data)
def worker(self, data):
"""
Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.
:param data: dict with data
:return: host id
"""
print(data)
host_raw = self.connection.do_request('host.get', {
'filter': {'host': data["ext"]},
'output': ['hostid']
}).get("result")
# print("host_raw", host_raw)
if host_raw:
host_id = host_raw[0].get("hostid")
else:
host_new = self.connection.do_request('host.create', {"host" : f"{data.get('ext')}",
"templates": [
{"templateid" : self.template_id}
],
"groups": [
{"groupid": self.group_id}
]
})
host_id = host_new.get("result").get("hostids")[0]
self.send_data(data)
|
normal
|
{
"blob_id": "14826b5b121ba2939519492c1e1d8700c32396d2",
"index": 8963,
"step-1": "<mask token>\n\n\nclass ZabbixItem:\n\n def __init__(self, user, password, ext_group, ext_template, zabbix_host):\n self.user = user\n self.password = password\n self.zabbix_host = zabbix_host\n self.zabbix_api = f'http://{zabbix_host}'\n self.connection = self.connection_init()\n self.template_id = self.get_template(ext_template)\n self.group_id = self.get_group(ext_group)\n <mask token>\n\n def get_template(self, template_name):\n \"\"\"\n Get template id by template name\n :param template_name:\n :return: template id as string\n \"\"\"\n ext_template = self.connection.do_request('template.get', {'filter':\n {'host': [template_name]}, 'output': 'template_id'}).get('result')\n if ext_template:\n result = ext_template[0].get('templateid')\n else:\n result = False\n return result\n <mask token>\n\n def clear_ping(self, value):\n \"\"\"\n clear ping value from text\n :param value: raw data, 50 ms as example\n :return: integer value\n \"\"\"\n try:\n result = int(value[:value.find(' ')])\n except IndexError:\n result = False\n except ValueError:\n result = False\n return result\n <mask token>\n <mask token>\n\n def send_data(self, data):\n \"\"\"\n Send data to server\n :param data: data dict\n :return:\n \"\"\"\n sender_data = []\n host_id = data.get('ext')\n zbx_sender = ZabbixSender(self.zabbix_host)\n extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get(\n 'ip_address'))\n sender_data.append(extension_ip)\n extension_ping = ZabbixMetric(host_id, 'extPhonePing', self.\n clear_ping(data.get('ping', 10000)))\n sender_data.append(extension_ping)\n extension_status = ZabbixMetric(host_id, 'extStatus', data.get(\n 'status', ''))\n sender_data.append(extension_status)\n extension_user = ZabbixMetric(host_id, 'extUser', data.get('user', ''))\n sender_data.append(extension_user)\n extension_useragent = ZabbixMetric(host_id, 'extUserAgent', data.\n get('user_agent', ''))\n sender_data.append(extension_useragent)\n zbx_sender.send(sender_data)\n\n def worker(self, data):\n \"\"\"\n Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.\n :param data: dict with data\n :return: host id\n \"\"\"\n print(data)\n host_raw = self.connection.do_request('host.get', {'filter': {\n 'host': data['ext']}, 'output': ['hostid']}).get('result')\n if host_raw:\n host_id = host_raw[0].get('hostid')\n else:\n host_new = self.connection.do_request('host.create', {'host':\n f\"{data.get('ext')}\", 'templates': [{'templateid': self.\n template_id}], 'groups': [{'groupid': self.group_id}]})\n host_id = host_new.get('result').get('hostids')[0]\n self.send_data(data)\n",
"step-2": "<mask token>\n\n\nclass ZabbixItem:\n\n def __init__(self, user, password, ext_group, ext_template, zabbix_host):\n self.user = user\n self.password = password\n self.zabbix_host = zabbix_host\n self.zabbix_api = f'http://{zabbix_host}'\n self.connection = self.connection_init()\n self.template_id = self.get_template(ext_template)\n self.group_id = self.get_group(ext_group)\n <mask token>\n\n def get_template(self, template_name):\n \"\"\"\n Get template id by template name\n :param template_name:\n :return: template id as string\n \"\"\"\n ext_template = self.connection.do_request('template.get', {'filter':\n {'host': [template_name]}, 'output': 'template_id'}).get('result')\n if ext_template:\n result = ext_template[0].get('templateid')\n else:\n result = False\n return result\n <mask token>\n\n def clear_ping(self, value):\n \"\"\"\n clear ping value from text\n :param value: raw data, 50 ms as example\n :return: integer value\n \"\"\"\n try:\n result = int(value[:value.find(' ')])\n except IndexError:\n result = False\n except ValueError:\n result = False\n return result\n\n def host_create(self, data):\n \"\"\"\n Create host item\n :param host_params:\n :return: host id\n \"\"\"\n return self.connection.do_request('host.create', data)[0].get('result')\n\n def assign_template_to_host(self, host_id):\n \"\"\"\n Assign template to host\n :param host_id: host id\n :return:\n \"\"\"\n return self.connection.do_request('template.update', teamplateid=\n self.template_id, hosts=[host_id])\n\n def send_data(self, data):\n \"\"\"\n Send data to server\n :param data: data dict\n :return:\n \"\"\"\n sender_data = []\n host_id = data.get('ext')\n zbx_sender = ZabbixSender(self.zabbix_host)\n extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get(\n 'ip_address'))\n sender_data.append(extension_ip)\n extension_ping = ZabbixMetric(host_id, 'extPhonePing', self.\n clear_ping(data.get('ping', 10000)))\n sender_data.append(extension_ping)\n extension_status = ZabbixMetric(host_id, 'extStatus', data.get(\n 'status', ''))\n sender_data.append(extension_status)\n extension_user = ZabbixMetric(host_id, 'extUser', data.get('user', ''))\n sender_data.append(extension_user)\n extension_useragent = ZabbixMetric(host_id, 'extUserAgent', data.\n get('user_agent', ''))\n sender_data.append(extension_useragent)\n zbx_sender.send(sender_data)\n\n def worker(self, data):\n \"\"\"\n Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.\n :param data: dict with data\n :return: host id\n \"\"\"\n print(data)\n host_raw = self.connection.do_request('host.get', {'filter': {\n 'host': data['ext']}, 'output': ['hostid']}).get('result')\n if host_raw:\n host_id = host_raw[0].get('hostid')\n else:\n host_new = self.connection.do_request('host.create', {'host':\n f\"{data.get('ext')}\", 'templates': [{'templateid': self.\n template_id}], 'groups': [{'groupid': self.group_id}]})\n host_id = host_new.get('result').get('hostids')[0]\n self.send_data(data)\n",
"step-3": "<mask token>\n\n\nclass ZabbixItem:\n\n def __init__(self, user, password, ext_group, ext_template, zabbix_host):\n self.user = user\n self.password = password\n self.zabbix_host = zabbix_host\n self.zabbix_api = f'http://{zabbix_host}'\n self.connection = self.connection_init()\n self.template_id = self.get_template(ext_template)\n self.group_id = self.get_group(ext_group)\n <mask token>\n\n def get_template(self, template_name):\n \"\"\"\n Get template id by template name\n :param template_name:\n :return: template id as string\n \"\"\"\n ext_template = self.connection.do_request('template.get', {'filter':\n {'host': [template_name]}, 'output': 'template_id'}).get('result')\n if ext_template:\n result = ext_template[0].get('templateid')\n else:\n result = False\n return result\n\n def get_group(self, group_name):\n \"\"\"\n Get group Id\n :param group_name:\n :return: group ID\n \"\"\"\n group = self.connection.do_request('hostgroup.get', {'filter': {\n 'name': [group_name]}, 'output': 'extend'}).get('result')\n if group:\n result = group[0].get('groupid')\n else:\n result = False\n return result\n\n def clear_ping(self, value):\n \"\"\"\n clear ping value from text\n :param value: raw data, 50 ms as example\n :return: integer value\n \"\"\"\n try:\n result = int(value[:value.find(' ')])\n except IndexError:\n result = False\n except ValueError:\n result = False\n return result\n\n def host_create(self, data):\n \"\"\"\n Create host item\n :param host_params:\n :return: host id\n \"\"\"\n return self.connection.do_request('host.create', data)[0].get('result')\n\n def assign_template_to_host(self, host_id):\n \"\"\"\n Assign template to host\n :param host_id: host id\n :return:\n \"\"\"\n return self.connection.do_request('template.update', teamplateid=\n self.template_id, hosts=[host_id])\n\n def send_data(self, data):\n \"\"\"\n Send data to server\n :param data: data dict\n :return:\n \"\"\"\n sender_data = []\n host_id = data.get('ext')\n zbx_sender = ZabbixSender(self.zabbix_host)\n extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get(\n 'ip_address'))\n sender_data.append(extension_ip)\n extension_ping = ZabbixMetric(host_id, 'extPhonePing', self.\n clear_ping(data.get('ping', 10000)))\n sender_data.append(extension_ping)\n extension_status = ZabbixMetric(host_id, 'extStatus', data.get(\n 'status', ''))\n sender_data.append(extension_status)\n extension_user = ZabbixMetric(host_id, 'extUser', data.get('user', ''))\n sender_data.append(extension_user)\n extension_useragent = ZabbixMetric(host_id, 'extUserAgent', data.\n get('user_agent', ''))\n sender_data.append(extension_useragent)\n zbx_sender.send(sender_data)\n\n def worker(self, data):\n \"\"\"\n Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.\n :param data: dict with data\n :return: host id\n \"\"\"\n print(data)\n host_raw = self.connection.do_request('host.get', {'filter': {\n 'host': data['ext']}, 'output': ['hostid']}).get('result')\n if host_raw:\n host_id = host_raw[0].get('hostid')\n else:\n host_new = self.connection.do_request('host.create', {'host':\n f\"{data.get('ext')}\", 'templates': [{'templateid': self.\n template_id}], 'groups': [{'groupid': self.group_id}]})\n host_id = host_new.get('result').get('hostids')[0]\n self.send_data(data)\n",
"step-4": "from pyzabbix import ZabbixMetric, ZabbixSender, ZabbixAPI\nfrom datetime import datetime\nfrom re import findall\n\n\nclass ZabbixItem:\n\n def __init__(self, user, password, ext_group, ext_template, zabbix_host):\n self.user = user\n self.password = password\n self.zabbix_host = zabbix_host\n self.zabbix_api = f'http://{zabbix_host}'\n self.connection = self.connection_init()\n self.template_id = self.get_template(ext_template)\n self.group_id = self.get_group(ext_group)\n\n def connection_init(self):\n \"\"\"\n Zabbix connection init\n :return: connection\n \"\"\"\n return ZabbixAPI(f'http://{self.zabbix_host}', user=self.user,\n password=self.password)\n\n def get_template(self, template_name):\n \"\"\"\n Get template id by template name\n :param template_name:\n :return: template id as string\n \"\"\"\n ext_template = self.connection.do_request('template.get', {'filter':\n {'host': [template_name]}, 'output': 'template_id'}).get('result')\n if ext_template:\n result = ext_template[0].get('templateid')\n else:\n result = False\n return result\n\n def get_group(self, group_name):\n \"\"\"\n Get group Id\n :param group_name:\n :return: group ID\n \"\"\"\n group = self.connection.do_request('hostgroup.get', {'filter': {\n 'name': [group_name]}, 'output': 'extend'}).get('result')\n if group:\n result = group[0].get('groupid')\n else:\n result = False\n return result\n\n def clear_ping(self, value):\n \"\"\"\n clear ping value from text\n :param value: raw data, 50 ms as example\n :return: integer value\n \"\"\"\n try:\n result = int(value[:value.find(' ')])\n except IndexError:\n result = False\n except ValueError:\n result = False\n return result\n\n def host_create(self, data):\n \"\"\"\n Create host item\n :param host_params:\n :return: host id\n \"\"\"\n return self.connection.do_request('host.create', data)[0].get('result')\n\n def assign_template_to_host(self, host_id):\n \"\"\"\n Assign template to host\n :param host_id: host id\n :return:\n \"\"\"\n return self.connection.do_request('template.update', teamplateid=\n self.template_id, hosts=[host_id])\n\n def send_data(self, data):\n \"\"\"\n Send data to server\n :param data: data dict\n :return:\n \"\"\"\n sender_data = []\n host_id = data.get('ext')\n zbx_sender = ZabbixSender(self.zabbix_host)\n extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get(\n 'ip_address'))\n sender_data.append(extension_ip)\n extension_ping = ZabbixMetric(host_id, 'extPhonePing', self.\n clear_ping(data.get('ping', 10000)))\n sender_data.append(extension_ping)\n extension_status = ZabbixMetric(host_id, 'extStatus', data.get(\n 'status', ''))\n sender_data.append(extension_status)\n extension_user = ZabbixMetric(host_id, 'extUser', data.get('user', ''))\n sender_data.append(extension_user)\n extension_useragent = ZabbixMetric(host_id, 'extUserAgent', data.\n get('user_agent', ''))\n sender_data.append(extension_useragent)\n zbx_sender.send(sender_data)\n\n def worker(self, data):\n \"\"\"\n Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.\n :param data: dict with data\n :return: host id\n \"\"\"\n print(data)\n host_raw = self.connection.do_request('host.get', {'filter': {\n 'host': data['ext']}, 'output': ['hostid']}).get('result')\n if host_raw:\n host_id = host_raw[0].get('hostid')\n else:\n host_new = self.connection.do_request('host.create', {'host':\n f\"{data.get('ext')}\", 'templates': [{'templateid': self.\n template_id}], 'groups': [{'groupid': self.group_id}]})\n host_id = host_new.get('result').get('hostids')[0]\n self.send_data(data)\n",
"step-5": "from pyzabbix import ZabbixMetric, ZabbixSender, ZabbixAPI\nfrom datetime import datetime\nfrom re import findall\n\n# current_time = datetime.now().strftime(\"%H:%M:%S %d.%m.%Y\")\n\nclass ZabbixItem():\n\n def __init__(self, user, password, ext_group, ext_template, zabbix_host):\n self.user = user\n self.password = password\n self.zabbix_host = zabbix_host\n self.zabbix_api = f\"http://{zabbix_host}\"\n\n self.connection = self.connection_init()\n self.template_id = self.get_template(ext_template)\n self.group_id = self.get_group(ext_group)\n # print(self.get_group(EXT_GROUP))\n\n def connection_init(self):\n '''\n Zabbix connection init\n :return: connection\n '''\n return ZabbixAPI(f\"http://{self.zabbix_host}\", user=self.user, password=self.password)\n\n def get_template(self, template_name):\n '''\n Get template id by template name\n :param template_name:\n :return: template id as string\n '''\n\n ext_template = self.connection.do_request(\"template.get\", {\n \"filter\": {\"host\": [template_name]},\n \"output\": \"template_id\"\n }).get(\"result\")\n\n if ext_template:\n result = ext_template[0].get(\"templateid\")\n else:\n result = False\n return result\n\n def get_group(self, group_name):\n \"\"\"\n Get group Id\n :param group_name:\n :return: group ID\n \"\"\"\n\n group = self.connection.do_request(\"hostgroup.get\", {\n \"filter\": {\"name\": [group_name]},\n \"output\": \"extend\"\n }).get(\"result\")\n\n if group:\n result = group[0].get(\"groupid\")\n else:\n # print(\"create Group\")\n result = False\n return result\n\n def clear_ping(self, value):\n \"\"\"\n clear ping value from text\n :param value: raw data, 50 ms as example\n :return: integer value\n \"\"\"\n\n try:\n result = int(value[:value.find(\" \")])\n except IndexError:\n result = False\n except ValueError:\n # print(value)\n result = False\n return result\n\n\n def host_create(self, data):\n '''\n Create host item\n :param host_params:\n :return: host id\n '''\n\n return self.connection.do_request('host.create', data)[0].get(\"result\")\n def assign_template_to_host(self, host_id):\n \"\"\"\n Assign template to host\n :param host_id: host id\n :return:\n \"\"\"\n\n return self.connection.do_request(\"template.update\", teamplateid=self.template_id, hosts=[host_id])\n\n def send_data(self, data):\n \"\"\"\n Send data to server\n :param data: data dict\n :return:\n \"\"\"\n # test_dict = {'ext': '1105', 'ip_address': '192.168.10.55', 'status': 'OK', 'ping': '5 ms', 'user': 'Secretary',\n # 'user_agent': 'Cisco/SPA508G-7.4.9a'}\n\n sender_data = []\n host_id = data.get(\"ext\")\n # print(ZABBIX_HOST)\n zbx_sender = ZabbixSender(self.zabbix_host)\n extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get(\"ip_address\"))\n sender_data.append(extension_ip)\n\n extension_ping = ZabbixMetric(host_id, \"extPhonePing\", self.clear_ping(data.get(\"ping\", 10000)))\n sender_data.append(extension_ping)\n\n extension_status = ZabbixMetric(host_id, \"extStatus\", data.get(\"status\", \"\"))\n sender_data.append(extension_status)\n\n extension_user = ZabbixMetric(host_id, \"extUser\", data.get(\"user\", \"\"))\n sender_data.append(extension_user)\n\n extension_useragent = ZabbixMetric(host_id, \"extUserAgent\", data.get(\"user_agent\", \"\"))\n sender_data.append(extension_useragent)\n zbx_sender.send(sender_data)\n\n def worker(self, data):\n \"\"\"\n Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.\n :param data: dict with data\n :return: host id\n \"\"\"\n print(data)\n host_raw = self.connection.do_request('host.get', {\n 'filter': {'host': data[\"ext\"]},\n 'output': ['hostid']\n }).get(\"result\")\n # print(\"host_raw\", host_raw)\n if host_raw:\n host_id = host_raw[0].get(\"hostid\")\n\n else:\n host_new = self.connection.do_request('host.create', {\"host\" : f\"{data.get('ext')}\",\n \"templates\": [\n {\"templateid\" : self.template_id}\n ],\n \"groups\": [\n {\"groupid\": self.group_id}\n ]\n\n })\n\n host_id = host_new.get(\"result\").get(\"hostids\")[0]\n self.send_data(data)\n",
"step-ids": [
6,
8,
9,
11,
12
]
}
|
[
6,
8,
9,
11,
12
] |
#!/usr/bin/env python3
"""
Python class to access Netonix® WISP Switch WebAPI
** NEITHER THIS CODE NOR THE AUTHOR IS ASSOCIATED WITH NETONIX® IN ANY WAY.**
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
"""
import requests
from requests.exceptions import Timeout
from copy import deepcopy
import time
import json
try:
from deepdiff import DeepDiff
DIFF = True
except:
DIFF = False
class Netonix():
def __init__(self):
self.ip = None
self.s = None
self.url = {}
self.url["login"] = "/index.php"
self.url["backup"] = "/api/v1/backup"
self.url["config"] = "/api/v1/config"
self.url["apply"] = "/api/v1/apply"
self.url["confirm"] = "/api/v1/applystatus"
self.url["reboot"] = "/api/v1/reboot"
self.url["restore"] = "/api/v1/restore"
self.url["mac"] = "/api/v1/mactable"
self.url["status"] = "/api/v1/status/30sec"
self.url["id"] = "/api/v1/bootid"
self.url["update"] = "/api/v1/uploadfirmware"
self.url["doupdate"] = "/api/v1/upgradefirmware"
self.config = {}
self.orig_config = None
self.mac = {}
self.status = {}
self.id = ""
def _get(self, url, params=None, timeout=15, **kwargs):
full_url = "https://"+self.ip+self.url[url]
return self.s.get(full_url, params=params, timeout=timeout, **kwargs)
def _post(self, url, data=None, json=None, timeout=15, **kwargs):
full_url = "https://"+self.ip+self.url[url]
return self.s.post(
full_url,
data=data,
json=json,
timeout=timeout,
**kwargs
)
@staticmethod
def _merge_by_key(old, new, key="Number", append=True):
for item in new:
found = False
for old_item in old:
if(key not in old_item):
continue
if(old_item[key] != item[key]):
continue
old_item.update(item)
found = True
break
if(found is False):
if(append is True):
old_item.append(new)
else:
raise LookupError()
def open(self, ip, user, password):
self.ip = ip
self.s = requests.session()
self.s.verify = False
data = {}
data["username"] = user
data["password"] = password
r = self._post("login", data)
if("Invalid username or password" in r.text):
raise Exception("Invalid username or password")
def getConfig(self):
r = self._get("config")
result = r.json()
if("Config_Version" in result):
self.config = result
def putConfig(self):
r = self._post("config", json=self.config)
try:
r = self._post("apply")
except Timeout:
pass
self.ip = self.config["IPv4_Address"]
for a in range(5):
try:
r = self._post("confirm")
except Timeout:
continue
break
if(r.status_code != requests.codes.ok):
raise Exception("Config Confirm Request Failed")
# return r.json()
def backup(self, output):
r = self.s.get("https://"+self.ip+self.url["backup"]+"/"+self.ip)
if(r.status_code != requests.codes.ok):
raise Exception("Backup Request Failed")
newFile = open(output, "wb")
newFile.write(r.content)
newFile.close()
def restore(self, i):
raise Exception("the restore method is still untested.")
newFile = open(i, "rb")
data = ""
for a in newFile:
data += a
newFile.close()
r = self._post("restore", data)
print(r.json())
if(r.status_code != requests.codes.ok):
raise Exception("Restore Request Failed")
r = self._get("reboot")
return r.json()
def getMAC(self):
r = self._get("mac", timeout=60)
if(r.status_code != requests.codes.ok):
raise Exception("Action failed")
self.mac = r.json()["MACTable"]
def getID(self):
r = self._get("id", params={"_": time.time()})
if(r.status_code != requests.codes.ok):
raise Exception("Action failed")
self.id = r.json()["BootID"]
def getStatus(self):
if(self.id == ""):
self.getID()
r = self.s.get("https://"+self.ip+self.url["status"]+"?%s&_=%d" % (self.id, time.time()))
if(r.status_code != requests.codes.ok):
raise Exception("Action failed")
self.status = r.json()
def update(self, i):
data = ""
with open(i, mode='rb') as file: # b is important -> binary
data = file.read()
r = self._post("update", data)
if(r.status_code != requests.codes.ok):
raise Exception("Firmware Upload Failed")
r = self._get("doupdate")
if(r.status_code != requests.codes.ok):
raise Exception("Update Request Failed")
def mergeConfig(self, config):
self.orig_config = deepcopy(self.config)
for k, v in config.items():
if(k == "Ports"):
self._merge_by_key(self.config[k], v, key="Number")
continue
if(k == "LACP"):
self._merge_by_key(self.config[k], v, key="Port")
continue
if(k == "VLANs"):
self._merge_by_key(self.config[k], v, key="ID")
continue
if(type(v) is dict):
continue
if(type(v) is list):
self.config[k] += v
continue
self.config[k] = v
def replaceConfig(self, config):
self.orig_config = deepcopy(self.config)
if("Config_Version" in config):
del config["Config_Version"]
self.config.update(config)
def getDiff(self):
if(self.orig_config is None):
return {}
if(DIFF is False):
raise ImportError("Missing DeepDiff Module")
return DeepDiff(
self.orig_config,
self.config,
exclude_paths="root['Config_Version']"
)
if __name__ == '__main__':
import getpass
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
ip = str(input("switch ip:"))
user = str(input("user:"))
pw = getpass.getpass("password:")
n = Netonix()
n.open(ip, user, pw)
n.getMAC()
print(json.dumps(n.mac, indent=4))
n.getMAC()
print(json.dumps(n.mac, indent=4))
|
normal
|
{
"blob_id": "743d261052e4532c1304647501719ad897224b4e",
"index": 8991,
"step-1": "<mask token>\n\n\nclass Netonix:\n <mask token>\n\n def _get(self, url, params=None, timeout=15, **kwargs):\n full_url = 'https://' + self.ip + self.url[url]\n return self.s.get(full_url, params=params, timeout=timeout, **kwargs)\n <mask token>\n\n @staticmethod\n def _merge_by_key(old, new, key='Number', append=True):\n for item in new:\n found = False\n for old_item in old:\n if key not in old_item:\n continue\n if old_item[key] != item[key]:\n continue\n old_item.update(item)\n found = True\n break\n if found is False:\n if append is True:\n old_item.append(new)\n else:\n raise LookupError()\n\n def open(self, ip, user, password):\n self.ip = ip\n self.s = requests.session()\n self.s.verify = False\n data = {}\n data['username'] = user\n data['password'] = password\n r = self._post('login', data)\n if 'Invalid username or password' in r.text:\n raise Exception('Invalid username or password')\n <mask token>\n <mask token>\n <mask token>\n\n def restore(self, i):\n raise Exception('the restore method is still untested.')\n newFile = open(i, 'rb')\n data = ''\n for a in newFile:\n data += a\n newFile.close()\n r = self._post('restore', data)\n print(r.json())\n if r.status_code != requests.codes.ok:\n raise Exception('Restore Request Failed')\n r = self._get('reboot')\n return r.json()\n <mask token>\n <mask token>\n\n def getStatus(self):\n if self.id == '':\n self.getID()\n r = self.s.get('https://' + self.ip + self.url['status'] + \n '?%s&_=%d' % (self.id, time.time()))\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.status = r.json()\n\n def update(self, i):\n data = ''\n with open(i, mode='rb') as file:\n data = file.read()\n r = self._post('update', data)\n if r.status_code != requests.codes.ok:\n raise Exception('Firmware Upload Failed')\n r = self._get('doupdate')\n if r.status_code != requests.codes.ok:\n raise Exception('Update Request Failed')\n\n def mergeConfig(self, config):\n self.orig_config = deepcopy(self.config)\n for k, v in config.items():\n if k == 'Ports':\n self._merge_by_key(self.config[k], v, key='Number')\n continue\n if k == 'LACP':\n self._merge_by_key(self.config[k], v, key='Port')\n continue\n if k == 'VLANs':\n self._merge_by_key(self.config[k], v, key='ID')\n continue\n if type(v) is dict:\n continue\n if type(v) is list:\n self.config[k] += v\n continue\n self.config[k] = v\n\n def replaceConfig(self, config):\n self.orig_config = deepcopy(self.config)\n if 'Config_Version' in config:\n del config['Config_Version']\n self.config.update(config)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Netonix:\n <mask token>\n\n def _get(self, url, params=None, timeout=15, **kwargs):\n full_url = 'https://' + self.ip + self.url[url]\n return self.s.get(full_url, params=params, timeout=timeout, **kwargs)\n <mask token>\n\n @staticmethod\n def _merge_by_key(old, new, key='Number', append=True):\n for item in new:\n found = False\n for old_item in old:\n if key not in old_item:\n continue\n if old_item[key] != item[key]:\n continue\n old_item.update(item)\n found = True\n break\n if found is False:\n if append is True:\n old_item.append(new)\n else:\n raise LookupError()\n\n def open(self, ip, user, password):\n self.ip = ip\n self.s = requests.session()\n self.s.verify = False\n data = {}\n data['username'] = user\n data['password'] = password\n r = self._post('login', data)\n if 'Invalid username or password' in r.text:\n raise Exception('Invalid username or password')\n\n def getConfig(self):\n r = self._get('config')\n result = r.json()\n if 'Config_Version' in result:\n self.config = result\n <mask token>\n\n def backup(self, output):\n r = self.s.get('https://' + self.ip + self.url['backup'] + '/' +\n self.ip)\n if r.status_code != requests.codes.ok:\n raise Exception('Backup Request Failed')\n newFile = open(output, 'wb')\n newFile.write(r.content)\n newFile.close()\n\n def restore(self, i):\n raise Exception('the restore method is still untested.')\n newFile = open(i, 'rb')\n data = ''\n for a in newFile:\n data += a\n newFile.close()\n r = self._post('restore', data)\n print(r.json())\n if r.status_code != requests.codes.ok:\n raise Exception('Restore Request Failed')\n r = self._get('reboot')\n return r.json()\n <mask token>\n <mask token>\n\n def getStatus(self):\n if self.id == '':\n self.getID()\n r = self.s.get('https://' + self.ip + self.url['status'] + \n '?%s&_=%d' % (self.id, time.time()))\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.status = r.json()\n\n def update(self, i):\n data = ''\n with open(i, mode='rb') as file:\n data = file.read()\n r = self._post('update', data)\n if r.status_code != requests.codes.ok:\n raise Exception('Firmware Upload Failed')\n r = self._get('doupdate')\n if r.status_code != requests.codes.ok:\n raise Exception('Update Request Failed')\n\n def mergeConfig(self, config):\n self.orig_config = deepcopy(self.config)\n for k, v in config.items():\n if k == 'Ports':\n self._merge_by_key(self.config[k], v, key='Number')\n continue\n if k == 'LACP':\n self._merge_by_key(self.config[k], v, key='Port')\n continue\n if k == 'VLANs':\n self._merge_by_key(self.config[k], v, key='ID')\n continue\n if type(v) is dict:\n continue\n if type(v) is list:\n self.config[k] += v\n continue\n self.config[k] = v\n\n def replaceConfig(self, config):\n self.orig_config = deepcopy(self.config)\n if 'Config_Version' in config:\n del config['Config_Version']\n self.config.update(config)\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Netonix:\n <mask token>\n\n def _get(self, url, params=None, timeout=15, **kwargs):\n full_url = 'https://' + self.ip + self.url[url]\n return self.s.get(full_url, params=params, timeout=timeout, **kwargs)\n <mask token>\n\n @staticmethod\n def _merge_by_key(old, new, key='Number', append=True):\n for item in new:\n found = False\n for old_item in old:\n if key not in old_item:\n continue\n if old_item[key] != item[key]:\n continue\n old_item.update(item)\n found = True\n break\n if found is False:\n if append is True:\n old_item.append(new)\n else:\n raise LookupError()\n\n def open(self, ip, user, password):\n self.ip = ip\n self.s = requests.session()\n self.s.verify = False\n data = {}\n data['username'] = user\n data['password'] = password\n r = self._post('login', data)\n if 'Invalid username or password' in r.text:\n raise Exception('Invalid username or password')\n\n def getConfig(self):\n r = self._get('config')\n result = r.json()\n if 'Config_Version' in result:\n self.config = result\n <mask token>\n\n def backup(self, output):\n r = self.s.get('https://' + self.ip + self.url['backup'] + '/' +\n self.ip)\n if r.status_code != requests.codes.ok:\n raise Exception('Backup Request Failed')\n newFile = open(output, 'wb')\n newFile.write(r.content)\n newFile.close()\n\n def restore(self, i):\n raise Exception('the restore method is still untested.')\n newFile = open(i, 'rb')\n data = ''\n for a in newFile:\n data += a\n newFile.close()\n r = self._post('restore', data)\n print(r.json())\n if r.status_code != requests.codes.ok:\n raise Exception('Restore Request Failed')\n r = self._get('reboot')\n return r.json()\n <mask token>\n\n def getID(self):\n r = self._get('id', params={'_': time.time()})\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.id = r.json()['BootID']\n\n def getStatus(self):\n if self.id == '':\n self.getID()\n r = self.s.get('https://' + self.ip + self.url['status'] + \n '?%s&_=%d' % (self.id, time.time()))\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.status = r.json()\n\n def update(self, i):\n data = ''\n with open(i, mode='rb') as file:\n data = file.read()\n r = self._post('update', data)\n if r.status_code != requests.codes.ok:\n raise Exception('Firmware Upload Failed')\n r = self._get('doupdate')\n if r.status_code != requests.codes.ok:\n raise Exception('Update Request Failed')\n\n def mergeConfig(self, config):\n self.orig_config = deepcopy(self.config)\n for k, v in config.items():\n if k == 'Ports':\n self._merge_by_key(self.config[k], v, key='Number')\n continue\n if k == 'LACP':\n self._merge_by_key(self.config[k], v, key='Port')\n continue\n if k == 'VLANs':\n self._merge_by_key(self.config[k], v, key='ID')\n continue\n if type(v) is dict:\n continue\n if type(v) is list:\n self.config[k] += v\n continue\n self.config[k] = v\n\n def replaceConfig(self, config):\n self.orig_config = deepcopy(self.config)\n if 'Config_Version' in config:\n del config['Config_Version']\n self.config.update(config)\n\n def getDiff(self):\n if self.orig_config is None:\n return {}\n if DIFF is False:\n raise ImportError('Missing DeepDiff Module')\n return DeepDiff(self.orig_config, self.config, exclude_paths=\n \"root['Config_Version']\")\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Netonix:\n\n def __init__(self):\n self.ip = None\n self.s = None\n self.url = {}\n self.url['login'] = '/index.php'\n self.url['backup'] = '/api/v1/backup'\n self.url['config'] = '/api/v1/config'\n self.url['apply'] = '/api/v1/apply'\n self.url['confirm'] = '/api/v1/applystatus'\n self.url['reboot'] = '/api/v1/reboot'\n self.url['restore'] = '/api/v1/restore'\n self.url['mac'] = '/api/v1/mactable'\n self.url['status'] = '/api/v1/status/30sec'\n self.url['id'] = '/api/v1/bootid'\n self.url['update'] = '/api/v1/uploadfirmware'\n self.url['doupdate'] = '/api/v1/upgradefirmware'\n self.config = {}\n self.orig_config = None\n self.mac = {}\n self.status = {}\n self.id = ''\n\n def _get(self, url, params=None, timeout=15, **kwargs):\n full_url = 'https://' + self.ip + self.url[url]\n return self.s.get(full_url, params=params, timeout=timeout, **kwargs)\n <mask token>\n\n @staticmethod\n def _merge_by_key(old, new, key='Number', append=True):\n for item in new:\n found = False\n for old_item in old:\n if key not in old_item:\n continue\n if old_item[key] != item[key]:\n continue\n old_item.update(item)\n found = True\n break\n if found is False:\n if append is True:\n old_item.append(new)\n else:\n raise LookupError()\n\n def open(self, ip, user, password):\n self.ip = ip\n self.s = requests.session()\n self.s.verify = False\n data = {}\n data['username'] = user\n data['password'] = password\n r = self._post('login', data)\n if 'Invalid username or password' in r.text:\n raise Exception('Invalid username or password')\n\n def getConfig(self):\n r = self._get('config')\n result = r.json()\n if 'Config_Version' in result:\n self.config = result\n <mask token>\n\n def backup(self, output):\n r = self.s.get('https://' + self.ip + self.url['backup'] + '/' +\n self.ip)\n if r.status_code != requests.codes.ok:\n raise Exception('Backup Request Failed')\n newFile = open(output, 'wb')\n newFile.write(r.content)\n newFile.close()\n\n def restore(self, i):\n raise Exception('the restore method is still untested.')\n newFile = open(i, 'rb')\n data = ''\n for a in newFile:\n data += a\n newFile.close()\n r = self._post('restore', data)\n print(r.json())\n if r.status_code != requests.codes.ok:\n raise Exception('Restore Request Failed')\n r = self._get('reboot')\n return r.json()\n <mask token>\n\n def getID(self):\n r = self._get('id', params={'_': time.time()})\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.id = r.json()['BootID']\n\n def getStatus(self):\n if self.id == '':\n self.getID()\n r = self.s.get('https://' + self.ip + self.url['status'] + \n '?%s&_=%d' % (self.id, time.time()))\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.status = r.json()\n\n def update(self, i):\n data = ''\n with open(i, mode='rb') as file:\n data = file.read()\n r = self._post('update', data)\n if r.status_code != requests.codes.ok:\n raise Exception('Firmware Upload Failed')\n r = self._get('doupdate')\n if r.status_code != requests.codes.ok:\n raise Exception('Update Request Failed')\n\n def mergeConfig(self, config):\n self.orig_config = deepcopy(self.config)\n for k, v in config.items():\n if k == 'Ports':\n self._merge_by_key(self.config[k], v, key='Number')\n continue\n if k == 'LACP':\n self._merge_by_key(self.config[k], v, key='Port')\n continue\n if k == 'VLANs':\n self._merge_by_key(self.config[k], v, key='ID')\n continue\n if type(v) is dict:\n continue\n if type(v) is list:\n self.config[k] += v\n continue\n self.config[k] = v\n\n def replaceConfig(self, config):\n self.orig_config = deepcopy(self.config)\n if 'Config_Version' in config:\n del config['Config_Version']\n self.config.update(config)\n\n def getDiff(self):\n if self.orig_config is None:\n return {}\n if DIFF is False:\n raise ImportError('Missing DeepDiff Module')\n return DeepDiff(self.orig_config, self.config, exclude_paths=\n \"root['Config_Version']\")\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n\"\"\"\nPython class to access Netonix® WISP Switch WebAPI\n\n** NEITHER THIS CODE NOR THE AUTHOR IS ASSOCIATED WITH NETONIX® IN ANY WAY.**\n\nThis is free and unencumbered software released into the public domain.\n\nAnyone is free to copy, modify, publish, use, compile, sell, or\ndistribute this software, either in source code form or as a compiled\nbinary, for any purpose, commercial or non-commercial, and by any\nmeans.\n\nIn jurisdictions that recognize copyright laws, the author or authors\nof this software dedicate any and all copyright interest in the\nsoftware to the public domain. We make this dedication for the benefit\nof the public at large and to the detriment of our heirs and\nsuccessors. We intend this dedication to be an overt act of\nrelinquishment in perpetuity of all present and future rights to this\nsoftware under copyright law.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\nOTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\nARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\nOTHER DEALINGS IN THE SOFTWARE.\n\nFor more information, please refer to <http://unlicense.org/>\n\"\"\"\n\nimport requests\nfrom requests.exceptions import Timeout\nfrom copy import deepcopy\nimport time\nimport json\ntry:\n from deepdiff import DeepDiff\n DIFF = True\nexcept:\n DIFF = False\n\nclass Netonix():\n def __init__(self):\n self.ip = None\n self.s = None\n self.url = {}\n self.url[\"login\"] = \"/index.php\"\n self.url[\"backup\"] = \"/api/v1/backup\"\n self.url[\"config\"] = \"/api/v1/config\"\n self.url[\"apply\"] = \"/api/v1/apply\"\n self.url[\"confirm\"] = \"/api/v1/applystatus\"\n self.url[\"reboot\"] = \"/api/v1/reboot\"\n self.url[\"restore\"] = \"/api/v1/restore\"\n self.url[\"mac\"] = \"/api/v1/mactable\"\n self.url[\"status\"] = \"/api/v1/status/30sec\"\n self.url[\"id\"] = \"/api/v1/bootid\"\n self.url[\"update\"] = \"/api/v1/uploadfirmware\"\n self.url[\"doupdate\"] = \"/api/v1/upgradefirmware\"\n self.config = {}\n self.orig_config = None\n self.mac = {}\n self.status = {}\n self.id = \"\"\n\n def _get(self, url, params=None, timeout=15, **kwargs):\n full_url = \"https://\"+self.ip+self.url[url]\n return self.s.get(full_url, params=params, timeout=timeout, **kwargs)\n\n def _post(self, url, data=None, json=None, timeout=15, **kwargs):\n full_url = \"https://\"+self.ip+self.url[url]\n return self.s.post(\n full_url,\n data=data,\n json=json,\n timeout=timeout,\n **kwargs\n )\n\n @staticmethod\n def _merge_by_key(old, new, key=\"Number\", append=True):\n for item in new:\n found = False\n for old_item in old:\n if(key not in old_item):\n continue\n if(old_item[key] != item[key]):\n continue\n old_item.update(item)\n found = True\n break\n if(found is False):\n if(append is True):\n old_item.append(new)\n else:\n raise LookupError()\n\n def open(self, ip, user, password):\n self.ip = ip\n self.s = requests.session()\n self.s.verify = False\n data = {}\n data[\"username\"] = user\n data[\"password\"] = password\n r = self._post(\"login\", data)\n if(\"Invalid username or password\" in r.text):\n raise Exception(\"Invalid username or password\")\n\n def getConfig(self):\n r = self._get(\"config\")\n result = r.json()\n if(\"Config_Version\" in result):\n self.config = result\n\n def putConfig(self):\n r = self._post(\"config\", json=self.config)\n try:\n r = self._post(\"apply\")\n except Timeout:\n pass\n self.ip = self.config[\"IPv4_Address\"]\n for a in range(5):\n try:\n r = self._post(\"confirm\")\n except Timeout:\n continue\n break\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Config Confirm Request Failed\")\n # return r.json()\n\n def backup(self, output):\n r = self.s.get(\"https://\"+self.ip+self.url[\"backup\"]+\"/\"+self.ip)\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Backup Request Failed\")\n newFile = open(output, \"wb\")\n newFile.write(r.content)\n newFile.close()\n\n def restore(self, i):\n raise Exception(\"the restore method is still untested.\")\n newFile = open(i, \"rb\")\n data = \"\"\n for a in newFile:\n data += a\n newFile.close()\n r = self._post(\"restore\", data)\n print(r.json())\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Restore Request Failed\")\n r = self._get(\"reboot\")\n return r.json()\n\n def getMAC(self):\n r = self._get(\"mac\", timeout=60)\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Action failed\")\n self.mac = r.json()[\"MACTable\"]\n\n def getID(self):\n r = self._get(\"id\", params={\"_\": time.time()})\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Action failed\")\n self.id = r.json()[\"BootID\"]\n\n def getStatus(self):\n if(self.id == \"\"):\n self.getID()\n r = self.s.get(\"https://\"+self.ip+self.url[\"status\"]+\"?%s&_=%d\" % (self.id, time.time()))\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Action failed\")\n self.status = r.json()\n\n def update(self, i):\n data = \"\"\n with open(i, mode='rb') as file: # b is important -> binary\n data = file.read()\n r = self._post(\"update\", data)\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Firmware Upload Failed\")\n r = self._get(\"doupdate\")\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Update Request Failed\")\n\n def mergeConfig(self, config):\n self.orig_config = deepcopy(self.config)\n\n for k, v in config.items():\n if(k == \"Ports\"):\n self._merge_by_key(self.config[k], v, key=\"Number\")\n continue\n if(k == \"LACP\"):\n self._merge_by_key(self.config[k], v, key=\"Port\")\n continue\n if(k == \"VLANs\"):\n self._merge_by_key(self.config[k], v, key=\"ID\")\n continue\n if(type(v) is dict):\n continue\n if(type(v) is list):\n self.config[k] += v\n continue\n self.config[k] = v\n\n def replaceConfig(self, config):\n self.orig_config = deepcopy(self.config)\n\n if(\"Config_Version\" in config):\n del config[\"Config_Version\"]\n self.config.update(config)\n\n def getDiff(self):\n if(self.orig_config is None):\n return {}\n if(DIFF is False):\n raise ImportError(\"Missing DeepDiff Module\")\n return DeepDiff(\n self.orig_config,\n self.config,\n exclude_paths=\"root['Config_Version']\"\n )\n\n\nif __name__ == '__main__':\n import getpass\n import urllib3\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n ip = str(input(\"switch ip:\"))\n user = str(input(\"user:\"))\n pw = getpass.getpass(\"password:\")\n n = Netonix()\n n.open(ip, user, pw)\n n.getMAC()\n print(json.dumps(n.mac, indent=4))\n n.getMAC()\n print(json.dumps(n.mac, indent=4))\n",
"step-ids": [
9,
11,
13,
14,
20
]
}
|
[
9,
11,
13,
14,
20
] |
import tensorflow as tf
import numpy as np
import math
import sys
import os
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
# from transform_nets import input_transform_net, feature_transform_net
import tf_util_loss
class Network:
def placeholder_inputs(self,batch_size, num_point):
# with tf.variable_scope('inputs') as ip:
source_pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
return source_pointclouds_pl
def get_model(self, source_pointclouds_pl, feature_size, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
# with tf.variable_scope('PointNet') as pn:
# Comment above two lines to have same points for loss and features and also change the variable names in the next line.
batch_size = source_pointclouds_pl.get_shape()[0].value
num_point = source_pointclouds_pl.get_shape()[1].value
end_points = {}
input_image = tf.expand_dims(source_pointclouds_pl, -1)
net = tf_util.conv2d(input_image, 128, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay, activation_fn=None)
# Symmetric function: max pooling
source_feature = tf_util.max_pool2d(net, [num_point, 1],
padding='VALID', scope='maxpool')
source_feature = tf.tile(source_feature, [1, num_point, 1, 1])
source_feature = tf.concat([net, source_feature], axis=3)
net = tf_util.conv2d(source_feature, 512, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay, activation_fn=None)
source_global_feature = tf_util.max_pool2d(net, [num_point, 1],
padding='VALID', scope='maxpool')
source_global_feature = tf.reshape(source_global_feature, [batch_size, -1])
return source_global_feature
def decode_data(self, source_global_feature, is_training, bn_decay=None):
batch_size = source_global_feature.get_shape()[0].value
net = tf_util.fully_connected(source_global_feature, 1024, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024*3, activation_fn=None, scope='fc3')
predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])
return predicted_pointclouds_pl
def get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):
with tf.variable_scope('loss') as LossEvaluation:
# loss = tf.reduce_mean(tf.square(tf.subtract(predicted_pointclouds_pl, source_pointclouds_pl)))
loss = tf_util_loss.chamfer(predicted_pointclouds_pl, source_pointclouds_pl)
return loss
if __name__=='__main__':
with tf.Graph().as_default():
net = Network()
inputs = tf.zeros((32,1024,3))
outputs = net.get_model(inputs, 1024, tf.constant(True))
print(outputs)
|
normal
|
{
"blob_id": "e4a0f26afe8c78e4abbd85834c96ed5ba84e1f0b",
"index": 3894,
"step-1": "<mask token>\n\n\nclass Network:\n\n def placeholder_inputs(self, batch_size, num_point):\n source_pointclouds_pl = tf.placeholder(tf.float32, shape=(\n batch_size, num_point, 3))\n return source_pointclouds_pl\n\n def get_model(self, source_pointclouds_pl, feature_size, is_training,\n bn_decay=None):\n \"\"\" Classification PointNet, input is BxNx3, output Bx40 \"\"\"\n batch_size = source_pointclouds_pl.get_shape()[0].value\n num_point = source_pointclouds_pl.get_shape()[1].value\n end_points = {}\n input_image = tf.expand_dims(source_pointclouds_pl, -1)\n net = tf_util.conv2d(input_image, 128, [1, 3], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv1',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, \n 1], bn=True, is_training=is_training, scope='conv2', bn_decay=\n bn_decay, activation_fn=None)\n source_feature = tf_util.max_pool2d(net, [num_point, 1], padding=\n 'VALID', scope='maxpool')\n source_feature = tf.tile(source_feature, [1, num_point, 1, 1])\n source_feature = tf.concat([net, source_feature], axis=3)\n net = tf_util.conv2d(source_feature, 512, [1, 1], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv3',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1,\n 1], bn=True, is_training=is_training, scope='conv4', bn_decay=\n bn_decay, activation_fn=None)\n source_global_feature = tf_util.max_pool2d(net, [num_point, 1],\n padding='VALID', scope='maxpool')\n source_global_feature = tf.reshape(source_global_feature, [\n batch_size, -1])\n return source_global_feature\n\n def decode_data(self, source_global_feature, is_training, bn_decay=None):\n batch_size = source_global_feature.get_shape()[0].value\n net = tf_util.fully_connected(source_global_feature, 1024, bn=True,\n is_training=is_training, scope='fc1', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024, bn=True, is_training=\n is_training, scope='fc2', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024 * 3, activation_fn=None,\n scope='fc3')\n predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])\n return predicted_pointclouds_pl\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\n<mask token>\n\n\nclass Network:\n\n def placeholder_inputs(self, batch_size, num_point):\n source_pointclouds_pl = tf.placeholder(tf.float32, shape=(\n batch_size, num_point, 3))\n return source_pointclouds_pl\n\n def get_model(self, source_pointclouds_pl, feature_size, is_training,\n bn_decay=None):\n \"\"\" Classification PointNet, input is BxNx3, output Bx40 \"\"\"\n batch_size = source_pointclouds_pl.get_shape()[0].value\n num_point = source_pointclouds_pl.get_shape()[1].value\n end_points = {}\n input_image = tf.expand_dims(source_pointclouds_pl, -1)\n net = tf_util.conv2d(input_image, 128, [1, 3], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv1',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, \n 1], bn=True, is_training=is_training, scope='conv2', bn_decay=\n bn_decay, activation_fn=None)\n source_feature = tf_util.max_pool2d(net, [num_point, 1], padding=\n 'VALID', scope='maxpool')\n source_feature = tf.tile(source_feature, [1, num_point, 1, 1])\n source_feature = tf.concat([net, source_feature], axis=3)\n net = tf_util.conv2d(source_feature, 512, [1, 1], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv3',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1,\n 1], bn=True, is_training=is_training, scope='conv4', bn_decay=\n bn_decay, activation_fn=None)\n source_global_feature = tf_util.max_pool2d(net, [num_point, 1],\n padding='VALID', scope='maxpool')\n source_global_feature = tf.reshape(source_global_feature, [\n batch_size, -1])\n return source_global_feature\n\n def decode_data(self, source_global_feature, is_training, bn_decay=None):\n batch_size = source_global_feature.get_shape()[0].value\n net = tf_util.fully_connected(source_global_feature, 1024, bn=True,\n is_training=is_training, scope='fc1', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024, bn=True, is_training=\n is_training, scope='fc2', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024 * 3, activation_fn=None,\n scope='fc3')\n predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])\n return predicted_pointclouds_pl\n\n def get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):\n with tf.variable_scope('loss') as LossEvaluation:\n loss = tf_util_loss.chamfer(predicted_pointclouds_pl,\n source_pointclouds_pl)\n return loss\n\n\nif __name__ == '__main__':\n with tf.Graph().as_default():\n net = Network()\n inputs = tf.zeros((32, 1024, 3))\n outputs = net.get_model(inputs, 1024, tf.constant(True))\n print(outputs)\n",
"step-3": "<mask token>\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\n<mask token>\n\n\nclass Network:\n\n def placeholder_inputs(self, batch_size, num_point):\n source_pointclouds_pl = tf.placeholder(tf.float32, shape=(\n batch_size, num_point, 3))\n return source_pointclouds_pl\n\n def get_model(self, source_pointclouds_pl, feature_size, is_training,\n bn_decay=None):\n \"\"\" Classification PointNet, input is BxNx3, output Bx40 \"\"\"\n batch_size = source_pointclouds_pl.get_shape()[0].value\n num_point = source_pointclouds_pl.get_shape()[1].value\n end_points = {}\n input_image = tf.expand_dims(source_pointclouds_pl, -1)\n net = tf_util.conv2d(input_image, 128, [1, 3], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv1',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, \n 1], bn=True, is_training=is_training, scope='conv2', bn_decay=\n bn_decay, activation_fn=None)\n source_feature = tf_util.max_pool2d(net, [num_point, 1], padding=\n 'VALID', scope='maxpool')\n source_feature = tf.tile(source_feature, [1, num_point, 1, 1])\n source_feature = tf.concat([net, source_feature], axis=3)\n net = tf_util.conv2d(source_feature, 512, [1, 1], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv3',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1,\n 1], bn=True, is_training=is_training, scope='conv4', bn_decay=\n bn_decay, activation_fn=None)\n source_global_feature = tf_util.max_pool2d(net, [num_point, 1],\n padding='VALID', scope='maxpool')\n source_global_feature = tf.reshape(source_global_feature, [\n batch_size, -1])\n return source_global_feature\n\n def decode_data(self, source_global_feature, is_training, bn_decay=None):\n batch_size = source_global_feature.get_shape()[0].value\n net = tf_util.fully_connected(source_global_feature, 1024, bn=True,\n is_training=is_training, scope='fc1', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024, bn=True, is_training=\n is_training, scope='fc2', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024 * 3, activation_fn=None,\n scope='fc3')\n predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])\n return predicted_pointclouds_pl\n\n def get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):\n with tf.variable_scope('loss') as LossEvaluation:\n loss = tf_util_loss.chamfer(predicted_pointclouds_pl,\n source_pointclouds_pl)\n return loss\n\n\nif __name__ == '__main__':\n with tf.Graph().as_default():\n net = Network()\n inputs = tf.zeros((32, 1024, 3))\n outputs = net.get_model(inputs, 1024, tf.constant(True))\n print(outputs)\n",
"step-4": "import tensorflow as tf\nimport numpy as np\nimport math\nimport sys\nimport os\nimport numpy as np\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\nimport tf_util\nimport tf_util_loss\n\n\nclass Network:\n\n def placeholder_inputs(self, batch_size, num_point):\n source_pointclouds_pl = tf.placeholder(tf.float32, shape=(\n batch_size, num_point, 3))\n return source_pointclouds_pl\n\n def get_model(self, source_pointclouds_pl, feature_size, is_training,\n bn_decay=None):\n \"\"\" Classification PointNet, input is BxNx3, output Bx40 \"\"\"\n batch_size = source_pointclouds_pl.get_shape()[0].value\n num_point = source_pointclouds_pl.get_shape()[1].value\n end_points = {}\n input_image = tf.expand_dims(source_pointclouds_pl, -1)\n net = tf_util.conv2d(input_image, 128, [1, 3], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv1',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, \n 1], bn=True, is_training=is_training, scope='conv2', bn_decay=\n bn_decay, activation_fn=None)\n source_feature = tf_util.max_pool2d(net, [num_point, 1], padding=\n 'VALID', scope='maxpool')\n source_feature = tf.tile(source_feature, [1, num_point, 1, 1])\n source_feature = tf.concat([net, source_feature], axis=3)\n net = tf_util.conv2d(source_feature, 512, [1, 1], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv3',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1,\n 1], bn=True, is_training=is_training, scope='conv4', bn_decay=\n bn_decay, activation_fn=None)\n source_global_feature = tf_util.max_pool2d(net, [num_point, 1],\n padding='VALID', scope='maxpool')\n source_global_feature = tf.reshape(source_global_feature, [\n batch_size, -1])\n return source_global_feature\n\n def decode_data(self, source_global_feature, is_training, bn_decay=None):\n batch_size = source_global_feature.get_shape()[0].value\n net = tf_util.fully_connected(source_global_feature, 1024, bn=True,\n is_training=is_training, scope='fc1', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024, bn=True, is_training=\n is_training, scope='fc2', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024 * 3, activation_fn=None,\n scope='fc3')\n predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])\n return predicted_pointclouds_pl\n\n def get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):\n with tf.variable_scope('loss') as LossEvaluation:\n loss = tf_util_loss.chamfer(predicted_pointclouds_pl,\n source_pointclouds_pl)\n return loss\n\n\nif __name__ == '__main__':\n with tf.Graph().as_default():\n net = Network()\n inputs = tf.zeros((32, 1024, 3))\n outputs = net.get_model(inputs, 1024, tf.constant(True))\n print(outputs)\n",
"step-5": "import tensorflow as tf\nimport numpy as np\nimport math\nimport sys\nimport os\nimport numpy as np\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\nimport tf_util\n# from transform_nets import input_transform_net, feature_transform_net\nimport tf_util_loss\n\nclass Network:\n\tdef placeholder_inputs(self,batch_size, num_point):\n\t\t# with tf.variable_scope('inputs') as ip:\n\t\tsource_pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))\n\t\treturn source_pointclouds_pl\n\n\tdef get_model(self, source_pointclouds_pl, feature_size, is_training, bn_decay=None):\n\t\t\"\"\" Classification PointNet, input is BxNx3, output Bx40 \"\"\"\n\t\t# with tf.variable_scope('PointNet') as pn:\n\n\t\t# Comment above two lines to have same points for loss and features and also change the variable names in the next line.\n\t\tbatch_size = source_pointclouds_pl.get_shape()[0].value\n\t\tnum_point = source_pointclouds_pl.get_shape()[1].value\n\t\tend_points = {}\n\n\t\tinput_image = tf.expand_dims(source_pointclouds_pl, -1)\n\n\t\tnet = tf_util.conv2d(input_image, 128, [1,3],\n\t\t\t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t\t\t\t\t\t bn=True, is_training=is_training,\n\t\t\t\t\t\t\t scope='conv1', bn_decay=bn_decay)\n\n\t\tnet = tf_util.conv2d(net, 256, [1,1],\n\t\t\t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t\t\t\t\t\t bn=True, is_training=is_training,\n\t\t\t\t\t\t\t scope='conv2', bn_decay=bn_decay, activation_fn=None)\n\n\t\t# Symmetric function: max pooling\n\t\tsource_feature = tf_util.max_pool2d(net, [num_point, 1],\n\t\t\t\t\t\t\t\t padding='VALID', scope='maxpool')\n\t\tsource_feature = tf.tile(source_feature, [1, num_point, 1, 1])\n\t\tsource_feature = tf.concat([net, source_feature], axis=3)\n\t\t\n\t\tnet = tf_util.conv2d(source_feature, 512, [1,1],\n\t\t \t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t\t\t\t\t\t bn=True, is_training=is_training,\n\t\t\t\t\t\t\t scope='conv3', bn_decay=bn_decay)\n\n\t\tnet = tf_util.conv2d(net, 1024, [1,1],\n\t\t \t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t \t\t\t\t\t bn=True, is_training=is_training,\n\t\t \t\t\t\t\t scope='conv4', bn_decay=bn_decay, activation_fn=None)\n\t\tsource_global_feature = tf_util.max_pool2d(net, [num_point, 1],\n\t\t \t\t\t\t\t\t padding='VALID', scope='maxpool')\n\t\tsource_global_feature = tf.reshape(source_global_feature, [batch_size, -1])\n\n\t\treturn source_global_feature\n\n\tdef decode_data(self, source_global_feature, is_training, bn_decay=None):\n\t\tbatch_size = source_global_feature.get_shape()[0].value\n\t\tnet = tf_util.fully_connected(source_global_feature, 1024, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)\n\t\tnet = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)\t\t\n\t\tnet = tf_util.fully_connected(net, 1024*3, activation_fn=None, scope='fc3')\n\t\tpredicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])\n\t\treturn predicted_pointclouds_pl\n\n\tdef get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):\n\t\twith tf.variable_scope('loss') as LossEvaluation:\n\t\t\t# loss = tf.reduce_mean(tf.square(tf.subtract(predicted_pointclouds_pl, source_pointclouds_pl)))\n\t\t\tloss = tf_util_loss.chamfer(predicted_pointclouds_pl, source_pointclouds_pl)\n\t\treturn loss\n\nif __name__=='__main__':\n\twith tf.Graph().as_default():\n\t\tnet = Network()\n\t\tinputs = tf.zeros((32,1024,3))\n\t\toutputs = net.get_model(inputs, 1024, tf.constant(True))\n\t\tprint(outputs)",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
"""
It's annoying that we have to do it here but for something like Ant, we're not going to be able to
specify it easily inside of the rbf_hyper_parameters file. Because, for something like Ant, we have
2 COM dimensions, and Bipedal we have 1.
So, we're going to do something similar to shaping_functions.
The way it'll work is, to make it modular, we'll take in a single string that we then separate out to
get the scaling. I like that. So, it'll be something like, for Ant:
{
uniform: func_get_ones()
special_loc_scaling: func_special_loc(com, rest_state, actions)
}
There's an argument for making these things know about the environment already. Only because we need
the state and action dimensions. So maybe you pass the environment into the constructor?
It's sort of annoying -- do we do the automatic scaling or not? I'd say leave the option, for something like Ant,
it's unavoidable to use it, even though it does make the problem non-stationary.
And it figures out the rest from there.
So, in the end this will just return an array.
"""
import numpy as np
def uniform_scaling(*args, **kwargs):
return 1.
def action_scaling(env, action_scaler):
"""
This is actually going to just be "action scaling". Because,
it's all about the ratio, and the ratio doesn't change!
"""
try:
state_dim = len(env.observation_space.low)
except AttributeError:
print("Using dm_control so need to get state_dim differently")
state_dim = len(env.observation_space['observations'].low)
action_dim = len(env.action_space.low)
# state_scaling = float(state_scaling)
action_scaler = float(action_scaler)
state_scaler_array = np.ones((state_dim,), dtype=np.float32)
action_scaler_array = np.ones((action_dim,), dtype=np.float32) * action_scaler
return np.concatenate([state_scaler_array, action_scaler_array], axis=0)
def per_dim_scaling(env, *args):
try:
state_dim = len(env.observation_space.low)
except AttributeError:
print("Using dm_control so need to get state_dim differently")
state_dim = len(env.observation_space['observations'].low)
action_dim = len(env.action_space.low)
assert len(args) == state_dim + action_dim
return np.array(args, dtype=np.float32)
def ant_maze_scaling(env, com_scaling, other_feature_scaling, action_scaling):
"""
Not sure how this is correct, but: I'm assuming that the COM is the first 2 states. Then,
the rest of the state is the pos/vel of everything.
"""
state_dim = len(env.observation_space.low)
action_dim = len(env.action_space.low)
num_com_features = 2
num_other_features = state_dim - num_com_features
com_scaler = np.ones((num_com_features,), dtype=np.float32) * float(com_scaling)
other_feature_scaler = np.ones((num_other_features,), dtype=np.float32) * float(other_feature_scaling)
action_scaler = np.ones((action_dim,), dtype=np.float32) * float(action_scaling)
return np.concatenate([com_scaler, other_feature_scaler, action_scaler], axis=0)
# assert
print("Just a note that you should PROBABLY be normalizing one way or another for this one.")
"""
This has an interesting interface -- scaling_string is a string where the arguments are double-underscore-separated.
That lets us pass stuff in through a CLI interface a bit easier.
"""
_SCALING_FUNCTIONS = {
'action_scaling': action_scaling,
'per_dim_scaling': per_dim_scaling,
'ant_maze_scaling': ant_maze_scaling,
}
def get_scaling_array(env, scaling_function_string):
scaling_string_parsed = scaling_function_string.split("__")
scaling_method, scaling_args = scaling_string_parsed[0], scaling_string_parsed[1:]
scaling_array = _SCALING_FUNCTIONS[scaling_method](env, *scaling_args)
return scaling_array
# class ScalingFunctions:
# """
# This has an interesting interface -- scaling_string is a string where the arguments are double-underscore-separated.
# That lets us pass stuff in through a CLI interface a bit easier.
# """
# SCALING_FUNCTIONS = {
# 'state_action_scaling': state_action_scaling,
# 'per_dim_scaling': per_dim_scaling
# }
# def __init__(self, env, scaling_string):
# scaling_string_parsed = scaling_string.split("__")
# scaling_method, scaling_args = scaling_string_parsed[0], scaling_string_parsed[1:]
# scaling_array = self.SCALING_FUNCTIONS[scaling_method](env, *scaling_args)
# # return scaling_array
|
normal
|
{
"blob_id": "5529813e10e4a30a60c28242be9d1a8822fb58af",
"index": 9685,
"step-1": "<mask token>\n\n\ndef action_scaling(env, action_scaler):\n \"\"\"\n This is actually going to just be \"action scaling\". Because,\n it's all about the ratio, and the ratio doesn't change!\n \"\"\"\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print('Using dm_control so need to get state_dim differently')\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n action_scaler = float(action_scaler)\n state_scaler_array = np.ones((state_dim,), dtype=np.float32)\n action_scaler_array = np.ones((action_dim,), dtype=np.float32\n ) * action_scaler\n return np.concatenate([state_scaler_array, action_scaler_array], axis=0)\n\n\ndef per_dim_scaling(env, *args):\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print('Using dm_control so need to get state_dim differently')\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n assert len(args) == state_dim + action_dim\n return np.array(args, dtype=np.float32)\n\n\n<mask token>\n\n\ndef get_scaling_array(env, scaling_function_string):\n scaling_string_parsed = scaling_function_string.split('__')\n scaling_method, scaling_args = scaling_string_parsed[0\n ], scaling_string_parsed[1:]\n scaling_array = _SCALING_FUNCTIONS[scaling_method](env, *scaling_args)\n return scaling_array\n",
"step-2": "<mask token>\n\n\ndef uniform_scaling(*args, **kwargs):\n return 1.0\n\n\ndef action_scaling(env, action_scaler):\n \"\"\"\n This is actually going to just be \"action scaling\". Because,\n it's all about the ratio, and the ratio doesn't change!\n \"\"\"\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print('Using dm_control so need to get state_dim differently')\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n action_scaler = float(action_scaler)\n state_scaler_array = np.ones((state_dim,), dtype=np.float32)\n action_scaler_array = np.ones((action_dim,), dtype=np.float32\n ) * action_scaler\n return np.concatenate([state_scaler_array, action_scaler_array], axis=0)\n\n\ndef per_dim_scaling(env, *args):\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print('Using dm_control so need to get state_dim differently')\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n assert len(args) == state_dim + action_dim\n return np.array(args, dtype=np.float32)\n\n\n<mask token>\n\n\ndef get_scaling_array(env, scaling_function_string):\n scaling_string_parsed = scaling_function_string.split('__')\n scaling_method, scaling_args = scaling_string_parsed[0\n ], scaling_string_parsed[1:]\n scaling_array = _SCALING_FUNCTIONS[scaling_method](env, *scaling_args)\n return scaling_array\n",
"step-3": "<mask token>\n\n\ndef uniform_scaling(*args, **kwargs):\n return 1.0\n\n\ndef action_scaling(env, action_scaler):\n \"\"\"\n This is actually going to just be \"action scaling\". Because,\n it's all about the ratio, and the ratio doesn't change!\n \"\"\"\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print('Using dm_control so need to get state_dim differently')\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n action_scaler = float(action_scaler)\n state_scaler_array = np.ones((state_dim,), dtype=np.float32)\n action_scaler_array = np.ones((action_dim,), dtype=np.float32\n ) * action_scaler\n return np.concatenate([state_scaler_array, action_scaler_array], axis=0)\n\n\ndef per_dim_scaling(env, *args):\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print('Using dm_control so need to get state_dim differently')\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n assert len(args) == state_dim + action_dim\n return np.array(args, dtype=np.float32)\n\n\ndef ant_maze_scaling(env, com_scaling, other_feature_scaling, action_scaling):\n \"\"\"\n Not sure how this is correct, but: I'm assuming that the COM is the first 2 states. Then,\n the rest of the state is the pos/vel of everything.\n \"\"\"\n state_dim = len(env.observation_space.low)\n action_dim = len(env.action_space.low)\n num_com_features = 2\n num_other_features = state_dim - num_com_features\n com_scaler = np.ones((num_com_features,), dtype=np.float32) * float(\n com_scaling)\n other_feature_scaler = np.ones((num_other_features,), dtype=np.float32\n ) * float(other_feature_scaling)\n action_scaler = np.ones((action_dim,), dtype=np.float32) * float(\n action_scaling)\n return np.concatenate([com_scaler, other_feature_scaler, action_scaler],\n axis=0)\n print(\n 'Just a note that you should PROBABLY be normalizing one way or another for this one.'\n )\n\n\n<mask token>\n_SCALING_FUNCTIONS = {'action_scaling': action_scaling, 'per_dim_scaling':\n per_dim_scaling, 'ant_maze_scaling': ant_maze_scaling}\n\n\ndef get_scaling_array(env, scaling_function_string):\n scaling_string_parsed = scaling_function_string.split('__')\n scaling_method, scaling_args = scaling_string_parsed[0\n ], scaling_string_parsed[1:]\n scaling_array = _SCALING_FUNCTIONS[scaling_method](env, *scaling_args)\n return scaling_array\n",
"step-4": "<mask token>\nimport numpy as np\n\n\ndef uniform_scaling(*args, **kwargs):\n return 1.0\n\n\ndef action_scaling(env, action_scaler):\n \"\"\"\n This is actually going to just be \"action scaling\". Because,\n it's all about the ratio, and the ratio doesn't change!\n \"\"\"\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print('Using dm_control so need to get state_dim differently')\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n action_scaler = float(action_scaler)\n state_scaler_array = np.ones((state_dim,), dtype=np.float32)\n action_scaler_array = np.ones((action_dim,), dtype=np.float32\n ) * action_scaler\n return np.concatenate([state_scaler_array, action_scaler_array], axis=0)\n\n\ndef per_dim_scaling(env, *args):\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print('Using dm_control so need to get state_dim differently')\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n assert len(args) == state_dim + action_dim\n return np.array(args, dtype=np.float32)\n\n\ndef ant_maze_scaling(env, com_scaling, other_feature_scaling, action_scaling):\n \"\"\"\n Not sure how this is correct, but: I'm assuming that the COM is the first 2 states. Then,\n the rest of the state is the pos/vel of everything.\n \"\"\"\n state_dim = len(env.observation_space.low)\n action_dim = len(env.action_space.low)\n num_com_features = 2\n num_other_features = state_dim - num_com_features\n com_scaler = np.ones((num_com_features,), dtype=np.float32) * float(\n com_scaling)\n other_feature_scaler = np.ones((num_other_features,), dtype=np.float32\n ) * float(other_feature_scaling)\n action_scaler = np.ones((action_dim,), dtype=np.float32) * float(\n action_scaling)\n return np.concatenate([com_scaler, other_feature_scaler, action_scaler],\n axis=0)\n print(\n 'Just a note that you should PROBABLY be normalizing one way or another for this one.'\n )\n\n\n<mask token>\n_SCALING_FUNCTIONS = {'action_scaling': action_scaling, 'per_dim_scaling':\n per_dim_scaling, 'ant_maze_scaling': ant_maze_scaling}\n\n\ndef get_scaling_array(env, scaling_function_string):\n scaling_string_parsed = scaling_function_string.split('__')\n scaling_method, scaling_args = scaling_string_parsed[0\n ], scaling_string_parsed[1:]\n scaling_array = _SCALING_FUNCTIONS[scaling_method](env, *scaling_args)\n return scaling_array\n",
"step-5": "\"\"\"\nIt's annoying that we have to do it here but for something like Ant, we're not going to be able to\nspecify it easily inside of the rbf_hyper_parameters file. Because, for something like Ant, we have\n2 COM dimensions, and Bipedal we have 1. \n\nSo, we're going to do something similar to shaping_functions.\n\nThe way it'll work is, to make it modular, we'll take in a single string that we then separate out to\nget the scaling. I like that. So, it'll be something like, for Ant:\n {\n uniform: func_get_ones()\n special_loc_scaling: func_special_loc(com, rest_state, actions)\n }\n\nThere's an argument for making these things know about the environment already. Only because we need\nthe state and action dimensions. So maybe you pass the environment into the constructor?\n\nIt's sort of annoying -- do we do the automatic scaling or not? I'd say leave the option, for something like Ant,\nit's unavoidable to use it, even though it does make the problem non-stationary.\n\nAnd it figures out the rest from there.\n\nSo, in the end this will just return an array. \n\"\"\"\n\nimport numpy as np\n\n\ndef uniform_scaling(*args, **kwargs):\n return 1.\n\n\ndef action_scaling(env, action_scaler):\n \"\"\"\n This is actually going to just be \"action scaling\". Because,\n it's all about the ratio, and the ratio doesn't change!\n \"\"\"\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print(\"Using dm_control so need to get state_dim differently\")\n state_dim = len(env.observation_space['observations'].low)\n\n action_dim = len(env.action_space.low)\n\n # state_scaling = float(state_scaling)\n action_scaler = float(action_scaler)\n\n state_scaler_array = np.ones((state_dim,), dtype=np.float32)\n action_scaler_array = np.ones((action_dim,), dtype=np.float32) * action_scaler\n\n return np.concatenate([state_scaler_array, action_scaler_array], axis=0)\n\ndef per_dim_scaling(env, *args):\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print(\"Using dm_control so need to get state_dim differently\")\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n assert len(args) == state_dim + action_dim\n return np.array(args, dtype=np.float32)\n\ndef ant_maze_scaling(env, com_scaling, other_feature_scaling, action_scaling):\n \"\"\"\n Not sure how this is correct, but: I'm assuming that the COM is the first 2 states. Then,\n the rest of the state is the pos/vel of everything.\n \"\"\"\n state_dim = len(env.observation_space.low)\n action_dim = len(env.action_space.low)\n\n num_com_features = 2\n num_other_features = state_dim - num_com_features\n\n com_scaler = np.ones((num_com_features,), dtype=np.float32) * float(com_scaling)\n other_feature_scaler = np.ones((num_other_features,), dtype=np.float32) * float(other_feature_scaling)\n action_scaler = np.ones((action_dim,), dtype=np.float32) * float(action_scaling)\n\n return np.concatenate([com_scaler, other_feature_scaler, action_scaler], axis=0)\n\n # assert \n\n print(\"Just a note that you should PROBABLY be normalizing one way or another for this one.\")\n\n\n\n\"\"\"\nThis has an interesting interface -- scaling_string is a string where the arguments are double-underscore-separated.\nThat lets us pass stuff in through a CLI interface a bit easier.\n\"\"\"\n_SCALING_FUNCTIONS = {\n 'action_scaling': action_scaling,\n 'per_dim_scaling': per_dim_scaling,\n 'ant_maze_scaling': ant_maze_scaling,\n}\n\ndef get_scaling_array(env, scaling_function_string):\n scaling_string_parsed = scaling_function_string.split(\"__\")\n scaling_method, scaling_args = scaling_string_parsed[0], scaling_string_parsed[1:]\n scaling_array = _SCALING_FUNCTIONS[scaling_method](env, *scaling_args)\n return scaling_array\n\n\n\n# class ScalingFunctions:\n# \"\"\"\n# This has an interesting interface -- scaling_string is a string where the arguments are double-underscore-separated.\n# That lets us pass stuff in through a CLI interface a bit easier.\n# \"\"\"\n# SCALING_FUNCTIONS = {\n# 'state_action_scaling': state_action_scaling,\n# 'per_dim_scaling': per_dim_scaling \n# }\n\n# def __init__(self, env, scaling_string):\n# scaling_string_parsed = scaling_string.split(\"__\")\n# scaling_method, scaling_args = scaling_string_parsed[0], scaling_string_parsed[1:]\n# scaling_array = self.SCALING_FUNCTIONS[scaling_method](env, *scaling_args)\n# # return scaling_array\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
import sys
import urllib
import urlparse
import xbmcgui
import xbmcplugin
import xbmcaddon
import shutil
from shutil import copyfile
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
args = urlparse.parse_qs(sys.argv[2][1:])
addon = xbmcaddon.Addon()
xbmcplugin.setContent(addon_handle, 'videos')
def build_url(query):
return base_url + '?' + urllib.urlencode(query)
xxxmenu = xbmcaddon.Addon('plugin.video.xxxmenu')
addon_icon = 'special://home/addons/plugin.video.xxxmenu/icon.png'
recycle_icon = 'special://home/addons/plugin.video.xxxmenu/recycle.png'
pandamovies_icon = 'special://home/addons/plugin.video.xxxmenu/pandamovie.png'
mangoporn_icon = 'special://home/addons/plugin.video.xxxmenu/mangoporn.png'
streamingporn_icon = 'special://home/addons/plugin.video.xxxmenu/streamingporn.png'
sexkino_icon = 'special://home/addons/plugin.video.xxxmenu/sexkino.png'
pornkino_icon = 'special://home/addons/plugin.video.xxxmenu/pornkino.png'
sexuria_icon = 'special://home/addons/plugin.video.xxxmenu/sexuria.png'
mode = args.get('mode', None)
if mode is None:
url = build_url({'mode': 'xxxmenu', 'foldername': '[COLOR blue][B]XXX MENU[/B][/COLOR]'})
li = xbmcgui.ListItem('[COLOR blue][B]XXX MENU[/B][/COLOR]', iconImage=addon_icon)
li.setInfo(type='video', infoLabels={'plot': '[COLOR blue][B]XXX MENU[/B][/COLOR]'})
li.setArt({'fanart': xxxmenu.getAddonInfo('fanart')})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=False)
url = build_url({'mode': 'mangoporn', 'foldername': 'Mangoporn'})
li = xbmcgui.ListItem('Mangoporn', iconImage=mangoporn_icon)
li.setInfo(type='video', infoLabels={'plot': 'www.mangoporn.net'})
li.setArt({'fanart': xxxmenu.getAddonInfo('fanart')})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=False)
url = build_url({'mode': 'pandamovies', 'foldername': 'Pandamovies'})
li = xbmcgui.ListItem('Pandamovies', iconImage=pandamovies_icon)
li.setInfo(type='video', infoLabels={'plot': 'www.pandamovies.pw'})
li.setArt({'fanart': xxxmenu.getAddonInfo('fanart')})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=False)
url = build_url({'mode': 'pornkino', 'foldername': 'Pornkino'})
li = xbmcgui.ListItem('Pornkino', iconImage=pornkino_icon)
li.setInfo(type='video', infoLabels={'plot': 'www.pornkino.to'})
li.setArt({'fanart': xxxmenu.getAddonInfo('fanart')})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=False)
url = build_url({'mode': 'sexkino', 'foldername': 'Sexkino'})
li = xbmcgui.ListItem('Sexkino', iconImage=sexkino_icon)
li.setInfo(type='video', infoLabels={'plot': 'www.sexkino.to'})
li.setArt({'fanart': xxxmenu.getAddonInfo('fanart')})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=False)
url = build_url({'mode': 'sexuria', 'foldername': 'Sexuria'})
li = xbmcgui.ListItem('Sexuria', iconImage=sexuria_icon)
li.setInfo(type='video', infoLabels={'plot': 'www.sexuria.com'})
li.setArt({'fanart': xxxmenu.getAddonInfo('fanart')})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=False)
url = build_url({'mode': 'streamingporn', 'foldername': 'Streamingporn'})
li = xbmcgui.ListItem('Streamingporn', iconImage=streamingporn_icon)
li.setInfo(type='video', infoLabels={'plot': 'www.streamingporn.xyz'})
li.setArt({'fanart': xxxmenu.getAddonInfo('fanart')})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=False)
url = build_url({'mode': 'clean', 'foldername': 'Clear [COLOR blue][B]XXX MENU[/B][/COLOR] cache'})
li = xbmcgui.ListItem('Clear [COLOR blue][B]XXX MENU[/B][/COLOR] cache', iconImage=recycle_icon)
li.setInfo(type='video', infoLabels={'plot': 'Clear [COLOR blue][B]XXX MENU[/B][/COLOR] cache'})
li.setArt({'fanart': xxxmenu.getAddonInfo('fanart')})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,
listitem=li, isFolder=False)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0] == 'xxxmenu':
xbmc.executebuiltin('Container.Refresh')
elif mode[0] == 'pandamovies':
xbmc.executebuiltin('RunAddon(plugin.video.pandamovies.pw)')
elif mode[0] == 'mangoporn':
xbmc.executebuiltin('RunAddon(plugin.video.mangoporn.net)')
elif mode[0] == 'streamingporn':
xbmc.executebuiltin('RunAddon(plugin.video.streamingporn.xyz)')
elif mode[0] == 'sexkino':
xbmc.executebuiltin('RunAddon(plugin.video.sexkino.to)')
elif mode[0] == 'pornkino':
xbmc.executebuiltin('RunAddon(plugin.video.pornkino.to)')
elif mode[0] == 'sexuria':
xbmc.executebuiltin('RunAddon(plugin.video.sexuria.com)')
elif mode[0] == 'clean':
tmp_path = xbmc.translatePath("special://userdata/addon_data/plugin.video.xxxmenu/tmp/").decode("utf-8")
try:
shutil.rmtree(tmp_path, ignore_errors=True)
except:
pass
xbmc.executebuiltin('Notification([COLOR blue][B]XXX MENU[/B][/COLOR], Cache successfully cleared., 5000, %s)' % (addon_icon))
xbmc.executebuiltin('Container.Refresh')
|
normal
|
{
"blob_id": "15bcfd8859322034ec76a8c861d2151153ab54af",
"index": 5120,
"step-1": "import sys\r\nimport urllib\r\nimport urlparse\r\nimport xbmcgui\r\nimport xbmcplugin\r\nimport xbmcaddon\r\nimport shutil\r\nfrom shutil import copyfile\r\n\r\nbase_url = sys.argv[0]\r\naddon_handle = int(sys.argv[1])\r\nargs = urlparse.parse_qs(sys.argv[2][1:])\r\naddon = xbmcaddon.Addon()\r\n\r\nxbmcplugin.setContent(addon_handle, 'videos')\r\n\r\ndef build_url(query):\r\n return base_url + '?' + urllib.urlencode(query)\r\n\r\nxxxmenu = xbmcaddon.Addon('plugin.video.xxxmenu')\r\n\r\naddon_icon = 'special://home/addons/plugin.video.xxxmenu/icon.png'\r\nrecycle_icon = 'special://home/addons/plugin.video.xxxmenu/recycle.png'\r\n\r\npandamovies_icon = 'special://home/addons/plugin.video.xxxmenu/pandamovie.png'\r\nmangoporn_icon = 'special://home/addons/plugin.video.xxxmenu/mangoporn.png'\r\nstreamingporn_icon = 'special://home/addons/plugin.video.xxxmenu/streamingporn.png'\r\nsexkino_icon = 'special://home/addons/plugin.video.xxxmenu/sexkino.png'\r\npornkino_icon = 'special://home/addons/plugin.video.xxxmenu/pornkino.png'\r\nsexuria_icon = 'special://home/addons/plugin.video.xxxmenu/sexuria.png'\r\n\r\nmode = args.get('mode', None)\r\n\r\nif mode is None:\r\n\r\n url = build_url({'mode': 'xxxmenu', 'foldername': '[COLOR blue][B]XXX MENU[/B][/COLOR]'})\r\n li = xbmcgui.ListItem('[COLOR blue][B]XXX MENU[/B][/COLOR]', iconImage=addon_icon)\r\n li.setInfo(type='video', infoLabels={'plot': '[COLOR blue][B]XXX MENU[/B][/COLOR]'})\r\n li.setArt({'fanart': xxxmenu.getAddonInfo('fanart')}) \r\n xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,\r\n listitem=li, isFolder=False)\r\n\r\n url = build_url({'mode': 'mangoporn', 'foldername': 'Mangoporn'})\r\n li = xbmcgui.ListItem('Mangoporn', iconImage=mangoporn_icon)\r\n li.setInfo(type='video', infoLabels={'plot': 'www.mangoporn.net'})\r\n li.setArt({'fanart': xxxmenu.getAddonInfo('fanart')}) \r\n xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,\r\n listitem=li, isFolder=False)\r\n\r\n url = build_url({'mode': 'pandamovies', 'foldername': 'Pandamovies'})\r\n li = xbmcgui.ListItem('Pandamovies', iconImage=pandamovies_icon)\r\n li.setInfo(type='video', infoLabels={'plot': 'www.pandamovies.pw'})\r\n li.setArt({'fanart': xxxmenu.getAddonInfo('fanart')}) \r\n xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,\r\n listitem=li, isFolder=False)\r\n\r\n url = build_url({'mode': 'pornkino', 'foldername': 'Pornkino'})\r\n li = xbmcgui.ListItem('Pornkino', iconImage=pornkino_icon)\r\n li.setInfo(type='video', infoLabels={'plot': 'www.pornkino.to'})\r\n li.setArt({'fanart': xxxmenu.getAddonInfo('fanart')}) \r\n xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,\r\n listitem=li, isFolder=False)\r\n\r\n url = build_url({'mode': 'sexkino', 'foldername': 'Sexkino'})\r\n li = xbmcgui.ListItem('Sexkino', iconImage=sexkino_icon)\r\n li.setInfo(type='video', infoLabels={'plot': 'www.sexkino.to'})\r\n li.setArt({'fanart': xxxmenu.getAddonInfo('fanart')}) \r\n xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,\r\n listitem=li, isFolder=False)\r\n\r\n\r\n url = build_url({'mode': 'sexuria', 'foldername': 'Sexuria'})\r\n li = xbmcgui.ListItem('Sexuria', iconImage=sexuria_icon)\r\n li.setInfo(type='video', infoLabels={'plot': 'www.sexuria.com'})\r\n li.setArt({'fanart': xxxmenu.getAddonInfo('fanart')}) \r\n xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,\r\n listitem=li, isFolder=False)\r\n\r\n url = build_url({'mode': 'streamingporn', 'foldername': 'Streamingporn'})\r\n li = xbmcgui.ListItem('Streamingporn', iconImage=streamingporn_icon)\r\n li.setInfo(type='video', infoLabels={'plot': 'www.streamingporn.xyz'})\r\n li.setArt({'fanart': xxxmenu.getAddonInfo('fanart')}) \r\n xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,\r\n listitem=li, isFolder=False)\r\n\r\n url = build_url({'mode': 'clean', 'foldername': 'Clear [COLOR blue][B]XXX MENU[/B][/COLOR] cache'})\r\n li = xbmcgui.ListItem('Clear [COLOR blue][B]XXX MENU[/B][/COLOR] cache', iconImage=recycle_icon)\r\n li.setInfo(type='video', infoLabels={'plot': 'Clear [COLOR blue][B]XXX MENU[/B][/COLOR] cache'})\r\n li.setArt({'fanart': xxxmenu.getAddonInfo('fanart')}) \r\n xbmcplugin.addDirectoryItem(handle=addon_handle, url=url,\r\n listitem=li, isFolder=False)\r\n\r\n xbmcplugin.endOfDirectory(addon_handle)\r\n\r\nelif mode[0] == 'xxxmenu':\r\n xbmc.executebuiltin('Container.Refresh')\r\n\r\nelif mode[0] == 'pandamovies':\r\n xbmc.executebuiltin('RunAddon(plugin.video.pandamovies.pw)')\r\n\r\nelif mode[0] == 'mangoporn':\r\n xbmc.executebuiltin('RunAddon(plugin.video.mangoporn.net)')\r\n\r\nelif mode[0] == 'streamingporn':\r\n xbmc.executebuiltin('RunAddon(plugin.video.streamingporn.xyz)')\r\n\r\nelif mode[0] == 'sexkino':\r\n xbmc.executebuiltin('RunAddon(plugin.video.sexkino.to)')\r\n\r\nelif mode[0] == 'pornkino':\r\n xbmc.executebuiltin('RunAddon(plugin.video.pornkino.to)')\r\n\r\nelif mode[0] == 'sexuria':\r\n xbmc.executebuiltin('RunAddon(plugin.video.sexuria.com)')\r\n\r\nelif mode[0] == 'clean':\r\n tmp_path = xbmc.translatePath(\"special://userdata/addon_data/plugin.video.xxxmenu/tmp/\").decode(\"utf-8\")\r\n try:\r\n shutil.rmtree(tmp_path, ignore_errors=True)\r\n except:\r\n pass\r\n xbmc.executebuiltin('Notification([COLOR blue][B]XXX MENU[/B][/COLOR], Cache successfully cleared., 5000, %s)' % (addon_icon))\r\n xbmc.executebuiltin('Container.Refresh')\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Item:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, barcode):
self.barcode = barcode
self.marc = None
self.record = None
self.title = None
self.author = None
self.year = None
def _get_marc(self):
with urllib.request.urlopen(Item.webcat + self.barcode) as response:
html = response.read().decode('utf-8')
marc = html[html.find('>') + 1:html.rfind('<')].strip('\n\n ')
if 'Barcode not found' not in marc:
self.marc = marc
def _get_title(self):
if self.record['245']:
title = self.record['245']['a'].strip(' /:,.')
return title
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _get_year(self):
date = self.record.pubyear()
if date:
nums = '1234567890'
new_date = ''
for ch in date:
if ch in nums:
new_date += ch
if not new_date[0] == '1':
return None
if not len(new_date) == 4:
return None
return new_date
else:
return None
def get_marc_fields(self, len_title):
self._get_marc()
if self.marc:
with io.BytesIO(self.marc.encode('utf-8')) as fh:
reader = MARCReader(fh)
for record in reader:
self.record = record
self.title = self._get_title(
) if len_title == 'short' else self._get_long_title()
self.author = self._get_author()
self.year = self._get_year()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Item:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, barcode):
self.barcode = barcode
self.marc = None
self.record = None
self.title = None
self.author = None
self.year = None
def _get_marc(self):
with urllib.request.urlopen(Item.webcat + self.barcode) as response:
html = response.read().decode('utf-8')
marc = html[html.find('>') + 1:html.rfind('<')].strip('\n\n ')
if 'Barcode not found' not in marc:
self.marc = marc
def _get_title(self):
if self.record['245']:
title = self.record['245']['a'].strip(' /:,.')
return title
<|reserved_special_token_0|>
def _get_author(self):
if self.record['100']:
return self.record['100']['a']
elif self.record['110']:
return self.record['110']['a']
elif self.record['111']:
return self.record['111']['a']
else:
return None
def _get_year(self):
date = self.record.pubyear()
if date:
nums = '1234567890'
new_date = ''
for ch in date:
if ch in nums:
new_date += ch
if not new_date[0] == '1':
return None
if not len(new_date) == 4:
return None
return new_date
else:
return None
def get_marc_fields(self, len_title):
self._get_marc()
if self.marc:
with io.BytesIO(self.marc.encode('utf-8')) as fh:
reader = MARCReader(fh)
for record in reader:
self.record = record
self.title = self._get_title(
) if len_title == 'short' else self._get_long_title()
self.author = self._get_author()
self.year = self._get_year()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Item:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, barcode):
self.barcode = barcode
self.marc = None
self.record = None
self.title = None
self.author = None
self.year = None
def _get_marc(self):
with urllib.request.urlopen(Item.webcat + self.barcode) as response:
html = response.read().decode('utf-8')
marc = html[html.find('>') + 1:html.rfind('<')].strip('\n\n ')
if 'Barcode not found' not in marc:
self.marc = marc
def _get_title(self):
if self.record['245']:
title = self.record['245']['a'].strip(' /:,.')
return title
def _get_long_title(self):
title = self.record.title().strip(' /:,.')
return title
def _get_author(self):
if self.record['100']:
return self.record['100']['a']
elif self.record['110']:
return self.record['110']['a']
elif self.record['111']:
return self.record['111']['a']
else:
return None
def _get_year(self):
date = self.record.pubyear()
if date:
nums = '1234567890'
new_date = ''
for ch in date:
if ch in nums:
new_date += ch
if not new_date[0] == '1':
return None
if not len(new_date) == 4:
return None
return new_date
else:
return None
def get_marc_fields(self, len_title):
self._get_marc()
if self.marc:
with io.BytesIO(self.marc.encode('utf-8')) as fh:
reader = MARCReader(fh)
for record in reader:
self.record = record
self.title = self._get_title(
) if len_title == 'short' else self._get_long_title()
self.author = self._get_author()
self.year = self._get_year()
<|reserved_special_token_1|>
import io
import urllib.request
from pymarc import MARCReader
class Item:
"""
Represents an item from our
Library catalogue (https://www-lib.soton.ac.uk)
Usage:
#>>> import findbooks
#>>> item = findbooks.Item('12345678')
#>>> item.getMarcFields()
#>>> print(item.title)
"""
webcat = 'http://lms.soton.ac.uk/cgi-bin/goobi_marc.cgi?itemid='
def __init__(self, barcode):
self.barcode = barcode
self.marc = None
self.record = None
self.title = None
self.author = None
self.year = None
def _get_marc(self):
with urllib.request.urlopen(Item.webcat + self.barcode) as response:
html = response.read().decode('utf-8')
marc = html[html.find('>') + 1:html.rfind('<')].strip('\n\n ')
if 'Barcode not found' not in marc:
self.marc = marc
def _get_title(self):
if self.record['245']:
title = self.record['245']['a'].strip(' /:,.')
return title
def _get_long_title(self):
title = self.record.title().strip(' /:,.')
return title
def _get_author(self):
if self.record['100']:
return self.record['100']['a']
elif self.record['110']:
return self.record['110']['a']
elif self.record['111']:
return self.record['111']['a']
else:
return None
def _get_year(self):
date = self.record.pubyear()
if date:
nums = '1234567890'
new_date = ''
for ch in date:
if ch in nums:
new_date += ch
if not new_date[0] == '1':
return None
if not len(new_date) == 4:
return None
return new_date
else:
return None
def get_marc_fields(self, len_title):
self._get_marc()
if self.marc:
with io.BytesIO(self.marc.encode('utf-8')) as fh:
reader = MARCReader(fh)
for record in reader:
self.record = record
self.title = self._get_title(
) if len_title == 'short' else self._get_long_title()
self.author = self._get_author()
self.year = self._get_year()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import io
import urllib.request
from pymarc import MARCReader
class Item:
"""
Represents an item from our
Library catalogue (https://www-lib.soton.ac.uk)
Usage:
#>>> import findbooks
#>>> item = findbooks.Item('12345678')
#>>> item.getMarcFields()
#>>> print(item.title)
"""
webcat = "http://lms.soton.ac.uk/cgi-bin/goobi_marc.cgi?itemid="
def __init__(self, barcode):
self.barcode = barcode
self.marc = None
self.record = None
self.title = None
self.author = None
self.year = None
def _get_marc(self):
with urllib.request.urlopen(Item.webcat + self.barcode) as response:
html = response.read().decode("utf-8")
marc = html[html.find(">")+1:html.rfind("<")].strip('''
''')
if "Barcode not found" not in marc:
self.marc = marc
def _get_title(self):
if self.record['245']:
title = self.record['245']['a'].strip(' /:,.')
return title
def _get_long_title(self):
title = self.record.title().strip(' /:,.')
return title
def _get_author(self):
if self.record['100']:
return self.record['100']['a']
elif self.record['110']:
return self.record['110']['a']
elif self.record['111']:
return self.record['111']['a']
else:
return None
def _get_year(self):
date = self.record.pubyear()
if date:
# dates should only have numbers
nums = '1234567890'
new_date = ''
for ch in date:
if ch in nums:
new_date += ch
# dates should have '1' as the first char
if not new_date[0] == "1":
return None
# dates should eb 4 chars long
if not len(new_date) == 4:
return None
return new_date
else:
return None
def get_marc_fields(self, len_title):
self._get_marc()
if self.marc:
with io.BytesIO(self.marc.encode('utf-8')) as fh:
reader = MARCReader(fh)
for record in reader:
self.record = record
self.title = self._get_title() if len_title == "short" else self._get_long_title()
self.author = self._get_author()
self.year = self._get_year()
# item = Item('59571478')
# item.get_marc_fields()
# print(item.title)
|
flexible
|
{
"blob_id": "abfff0901e5f825a473119c93f53cba206609428",
"index": 7482,
"step-1": "<mask token>\n\n\nclass Item:\n <mask token>\n <mask token>\n\n def __init__(self, barcode):\n self.barcode = barcode\n self.marc = None\n self.record = None\n self.title = None\n self.author = None\n self.year = None\n\n def _get_marc(self):\n with urllib.request.urlopen(Item.webcat + self.barcode) as response:\n html = response.read().decode('utf-8')\n marc = html[html.find('>') + 1:html.rfind('<')].strip('\\n\\n ')\n if 'Barcode not found' not in marc:\n self.marc = marc\n\n def _get_title(self):\n if self.record['245']:\n title = self.record['245']['a'].strip(' /:,.')\n return title\n <mask token>\n <mask token>\n\n def _get_year(self):\n date = self.record.pubyear()\n if date:\n nums = '1234567890'\n new_date = ''\n for ch in date:\n if ch in nums:\n new_date += ch\n if not new_date[0] == '1':\n return None\n if not len(new_date) == 4:\n return None\n return new_date\n else:\n return None\n\n def get_marc_fields(self, len_title):\n self._get_marc()\n if self.marc:\n with io.BytesIO(self.marc.encode('utf-8')) as fh:\n reader = MARCReader(fh)\n for record in reader:\n self.record = record\n self.title = self._get_title(\n ) if len_title == 'short' else self._get_long_title()\n self.author = self._get_author()\n self.year = self._get_year()\n",
"step-2": "<mask token>\n\n\nclass Item:\n <mask token>\n <mask token>\n\n def __init__(self, barcode):\n self.barcode = barcode\n self.marc = None\n self.record = None\n self.title = None\n self.author = None\n self.year = None\n\n def _get_marc(self):\n with urllib.request.urlopen(Item.webcat + self.barcode) as response:\n html = response.read().decode('utf-8')\n marc = html[html.find('>') + 1:html.rfind('<')].strip('\\n\\n ')\n if 'Barcode not found' not in marc:\n self.marc = marc\n\n def _get_title(self):\n if self.record['245']:\n title = self.record['245']['a'].strip(' /:,.')\n return title\n <mask token>\n\n def _get_author(self):\n if self.record['100']:\n return self.record['100']['a']\n elif self.record['110']:\n return self.record['110']['a']\n elif self.record['111']:\n return self.record['111']['a']\n else:\n return None\n\n def _get_year(self):\n date = self.record.pubyear()\n if date:\n nums = '1234567890'\n new_date = ''\n for ch in date:\n if ch in nums:\n new_date += ch\n if not new_date[0] == '1':\n return None\n if not len(new_date) == 4:\n return None\n return new_date\n else:\n return None\n\n def get_marc_fields(self, len_title):\n self._get_marc()\n if self.marc:\n with io.BytesIO(self.marc.encode('utf-8')) as fh:\n reader = MARCReader(fh)\n for record in reader:\n self.record = record\n self.title = self._get_title(\n ) if len_title == 'short' else self._get_long_title()\n self.author = self._get_author()\n self.year = self._get_year()\n",
"step-3": "<mask token>\n\n\nclass Item:\n <mask token>\n <mask token>\n\n def __init__(self, barcode):\n self.barcode = barcode\n self.marc = None\n self.record = None\n self.title = None\n self.author = None\n self.year = None\n\n def _get_marc(self):\n with urllib.request.urlopen(Item.webcat + self.barcode) as response:\n html = response.read().decode('utf-8')\n marc = html[html.find('>') + 1:html.rfind('<')].strip('\\n\\n ')\n if 'Barcode not found' not in marc:\n self.marc = marc\n\n def _get_title(self):\n if self.record['245']:\n title = self.record['245']['a'].strip(' /:,.')\n return title\n\n def _get_long_title(self):\n title = self.record.title().strip(' /:,.')\n return title\n\n def _get_author(self):\n if self.record['100']:\n return self.record['100']['a']\n elif self.record['110']:\n return self.record['110']['a']\n elif self.record['111']:\n return self.record['111']['a']\n else:\n return None\n\n def _get_year(self):\n date = self.record.pubyear()\n if date:\n nums = '1234567890'\n new_date = ''\n for ch in date:\n if ch in nums:\n new_date += ch\n if not new_date[0] == '1':\n return None\n if not len(new_date) == 4:\n return None\n return new_date\n else:\n return None\n\n def get_marc_fields(self, len_title):\n self._get_marc()\n if self.marc:\n with io.BytesIO(self.marc.encode('utf-8')) as fh:\n reader = MARCReader(fh)\n for record in reader:\n self.record = record\n self.title = self._get_title(\n ) if len_title == 'short' else self._get_long_title()\n self.author = self._get_author()\n self.year = self._get_year()\n",
"step-4": "import io\nimport urllib.request\nfrom pymarc import MARCReader\n\n\nclass Item:\n \"\"\"\n Represents an item from our\n Library catalogue (https://www-lib.soton.ac.uk)\n Usage:\n\n #>>> import findbooks\n #>>> item = findbooks.Item('12345678')\n #>>> item.getMarcFields()\n #>>> print(item.title)\n\n \"\"\"\n webcat = 'http://lms.soton.ac.uk/cgi-bin/goobi_marc.cgi?itemid='\n\n def __init__(self, barcode):\n self.barcode = barcode\n self.marc = None\n self.record = None\n self.title = None\n self.author = None\n self.year = None\n\n def _get_marc(self):\n with urllib.request.urlopen(Item.webcat + self.barcode) as response:\n html = response.read().decode('utf-8')\n marc = html[html.find('>') + 1:html.rfind('<')].strip('\\n\\n ')\n if 'Barcode not found' not in marc:\n self.marc = marc\n\n def _get_title(self):\n if self.record['245']:\n title = self.record['245']['a'].strip(' /:,.')\n return title\n\n def _get_long_title(self):\n title = self.record.title().strip(' /:,.')\n return title\n\n def _get_author(self):\n if self.record['100']:\n return self.record['100']['a']\n elif self.record['110']:\n return self.record['110']['a']\n elif self.record['111']:\n return self.record['111']['a']\n else:\n return None\n\n def _get_year(self):\n date = self.record.pubyear()\n if date:\n nums = '1234567890'\n new_date = ''\n for ch in date:\n if ch in nums:\n new_date += ch\n if not new_date[0] == '1':\n return None\n if not len(new_date) == 4:\n return None\n return new_date\n else:\n return None\n\n def get_marc_fields(self, len_title):\n self._get_marc()\n if self.marc:\n with io.BytesIO(self.marc.encode('utf-8')) as fh:\n reader = MARCReader(fh)\n for record in reader:\n self.record = record\n self.title = self._get_title(\n ) if len_title == 'short' else self._get_long_title()\n self.author = self._get_author()\n self.year = self._get_year()\n",
"step-5": "# -*- coding: utf-8 -*-\nimport io\nimport urllib.request\nfrom pymarc import MARCReader\n\n\nclass Item:\n \"\"\"\n Represents an item from our\n Library catalogue (https://www-lib.soton.ac.uk)\n Usage:\n\n #>>> import findbooks\n #>>> item = findbooks.Item('12345678')\n #>>> item.getMarcFields()\n #>>> print(item.title)\n\n \"\"\"\n webcat = \"http://lms.soton.ac.uk/cgi-bin/goobi_marc.cgi?itemid=\"\n\n def __init__(self, barcode):\n self.barcode = barcode\n self.marc = None\n self.record = None\n self.title = None\n self.author = None\n self.year = None\n\n def _get_marc(self):\n with urllib.request.urlopen(Item.webcat + self.barcode) as response:\n html = response.read().decode(\"utf-8\")\n marc = html[html.find(\">\")+1:html.rfind(\"<\")].strip('''\n\n ''')\n if \"Barcode not found\" not in marc:\n self.marc = marc\n\n def _get_title(self):\n if self.record['245']:\n title = self.record['245']['a'].strip(' /:,.')\n return title\n\n def _get_long_title(self):\n title = self.record.title().strip(' /:,.')\n return title\n\n def _get_author(self):\n if self.record['100']:\n return self.record['100']['a']\n elif self.record['110']:\n return self.record['110']['a']\n elif self.record['111']:\n return self.record['111']['a']\n else:\n return None\n\n def _get_year(self):\n date = self.record.pubyear()\n if date:\n # dates should only have numbers\n nums = '1234567890'\n new_date = ''\n for ch in date:\n if ch in nums:\n new_date += ch\n # dates should have '1' as the first char\n if not new_date[0] == \"1\":\n return None\n # dates should eb 4 chars long\n if not len(new_date) == 4:\n return None\n return new_date\n else:\n return None\n\n def get_marc_fields(self, len_title):\n self._get_marc()\n if self.marc:\n with io.BytesIO(self.marc.encode('utf-8')) as fh:\n reader = MARCReader(fh)\n for record in reader:\n self.record = record\n self.title = self._get_title() if len_title == \"short\" else self._get_long_title()\n self.author = self._get_author()\n self.year = self._get_year()\n\n# item = Item('59571478')\n# item.get_marc_fields()\n# print(item.title)\n",
"step-ids": [
6,
7,
8,
11,
12
]
}
|
[
6,
7,
8,
11,
12
] |
# Generated by Django 2.1.7 on 2019-03-23 17:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('currency_exchange', '0007_auto_20190323_1751'),
]
operations = [
migrations.AddField(
model_name='tasks',
name='hours',
field=models.DecimalField(decimal_places=12, default=0, max_digits=24),
),
migrations.AddField(
model_name='tasks',
name='status',
field=models.CharField(default='in progress', max_length=100),
),
]
|
normal
|
{
"blob_id": "1f63ce2c791f0b8763aeae15df4875769f6de848",
"index": 4942,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('currency_exchange', '0007_auto_20190323_1751')]\n operations = [migrations.AddField(model_name='tasks', name='hours',\n field=models.DecimalField(decimal_places=12, default=0, max_digits=\n 24)), migrations.AddField(model_name='tasks', name='status', field=\n models.CharField(default='in progress', max_length=100))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('currency_exchange', '0007_auto_20190323_1751')]\n operations = [migrations.AddField(model_name='tasks', name='hours',\n field=models.DecimalField(decimal_places=12, default=0, max_digits=\n 24)), migrations.AddField(model_name='tasks', name='status', field=\n models.CharField(default='in progress', max_length=100))]\n",
"step-5": "# Generated by Django 2.1.7 on 2019-03-23 17:14\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('currency_exchange', '0007_auto_20190323_1751'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='tasks',\n name='hours',\n field=models.DecimalField(decimal_places=12, default=0, max_digits=24),\n ),\n migrations.AddField(\n model_name='tasks',\n name='status',\n field=models.CharField(default='in progress', max_length=100),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
old_file = open("new.csv", "r")
new_file = open("new1,csv", "w")
for line in old_file.readlines():
cleaned_line =line.replace(',','.')
new_file.write(cleaned_line)
old_file.close
new_file.close
|
normal
|
{
"blob_id": "b3d26d01d45c073192d06c8e94c06f7eae267b14",
"index": 968,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in old_file.readlines():\n cleaned_line = line.replace(',', '.')\n new_file.write(cleaned_line)\nold_file.close\nnew_file.close\n",
"step-3": "old_file = open('new.csv', 'r')\nnew_file = open('new1,csv', 'w')\nfor line in old_file.readlines():\n cleaned_line = line.replace(',', '.')\n new_file.write(cleaned_line)\nold_file.close\nnew_file.close\n",
"step-4": "old_file = open(\"new.csv\", \"r\")\nnew_file = open(\"new1,csv\", \"w\")\nfor line in old_file.readlines():\n cleaned_line =line.replace(',','.')\n new_file.write(cleaned_line)\nold_file.close\nnew_file.close",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def bubbleSort(arr):
k = len(arr)
for i in range(k):
for j in range(0, k - i - 1):
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def bubbleSort(arr):
k = len(arr)
for i in range(k):
for j in range(0, k - i - 1):
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
<|reserved_special_token_0|>
bubbleSort(intergerArr)
print('Sorted array: ' + str(intergerArr))
<|reserved_special_token_1|>
def bubbleSort(arr):
k = len(arr)
for i in range(k):
for j in range(0, k - i - 1):
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
intergerArr = [20, 345, 215, 112, 2, 33, 29]
bubbleSort(intergerArr)
print('Sorted array: ' + str(intergerArr))
<|reserved_special_token_1|>
# Python implementation of Bubble Sort
def bubbleSort(arr):
k = len(arr)
# Traverse through all elements
for i in range(k):
# Last i elements are already in correct place
for j in range(0, k - i - 1):
# Swap if element is greater than next element
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
# example array
intergerArr = [20, 345, 215, 112, 2, 33, 29]
bubbleSort(intergerArr)
print("Sorted array: " + str(intergerArr))
|
flexible
|
{
"blob_id": "178f9dcd9cbea140abebd509b56979417b5d7503",
"index": 6785,
"step-1": "<mask token>\n",
"step-2": "def bubbleSort(arr):\n k = len(arr)\n for i in range(k):\n for j in range(0, k - i - 1):\n if arr[j] > arr[j + 1]:\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\n\n\n<mask token>\n",
"step-3": "def bubbleSort(arr):\n k = len(arr)\n for i in range(k):\n for j in range(0, k - i - 1):\n if arr[j] > arr[j + 1]:\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\n\n\n<mask token>\nbubbleSort(intergerArr)\nprint('Sorted array: ' + str(intergerArr))\n",
"step-4": "def bubbleSort(arr):\n k = len(arr)\n for i in range(k):\n for j in range(0, k - i - 1):\n if arr[j] > arr[j + 1]:\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\n\n\nintergerArr = [20, 345, 215, 112, 2, 33, 29]\nbubbleSort(intergerArr)\nprint('Sorted array: ' + str(intergerArr))\n",
"step-5": "# Python implementation of Bubble Sort\n\n\ndef bubbleSort(arr):\n k = len(arr)\n # Traverse through all elements\n for i in range(k):\n # Last i elements are already in correct place\n for j in range(0, k - i - 1):\n # Swap if element is greater than next element\n if arr[j] > arr[j + 1]:\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\n\n\n# example array\nintergerArr = [20, 345, 215, 112, 2, 33, 29]\n\nbubbleSort(intergerArr)\n\nprint(\"Sorted array: \" + str(intergerArr))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class User(AbstractUser):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Profile(models.Model):
"""Profile model"""
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.
CASCADE, primary_key=True)
description = models.TextField('user description', max_length=255)
picture = models.ImageField(upload_to='users/pictures', blank=True,
null=True)
is_authenticated = models.BooleanField('user is autheticated', default=
False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.user.username
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(AbstractUser):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.username
class Profile(models.Model):
"""Profile model"""
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.
CASCADE, primary_key=True)
description = models.TextField('user description', max_length=255)
picture = models.ImageField(upload_to='users/pictures', blank=True,
null=True)
is_authenticated = models.BooleanField('user is autheticated', default=
False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.user.username
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(AbstractUser):
"""User model"""
email = models.EmailField('email address', unique=True, error_messages=
{'unique': 'A user with that email already exists'})
phone_regex = RegexValidator(regex='\\+?1?\\d{9,15}$', message=
'Phone number must be entered in the right format')
phone_number = models.CharField(validators=[phone_regex], max_length=17)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name', 'phone_number']
def __str__(self):
return self.username
class Profile(models.Model):
"""Profile model"""
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.
CASCADE, primary_key=True)
description = models.TextField('user description', max_length=255)
picture = models.ImageField(upload_to='users/pictures', blank=True,
null=True)
is_authenticated = models.BooleanField('user is autheticated', default=
False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.user.username
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from django.conf import settings
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.core.validators import RegexValidator
class User(AbstractUser):
"""User model"""
email = models.EmailField('email address', unique=True, error_messages=
{'unique': 'A user with that email already exists'})
phone_regex = RegexValidator(regex='\\+?1?\\d{9,15}$', message=
'Phone number must be entered in the right format')
phone_number = models.CharField(validators=[phone_regex], max_length=17)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name', 'phone_number']
def __str__(self):
return self.username
class Profile(models.Model):
"""Profile model"""
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.
CASCADE, primary_key=True)
description = models.TextField('user description', max_length=255)
picture = models.ImageField(upload_to='users/pictures', blank=True,
null=True)
is_authenticated = models.BooleanField('user is autheticated', default=
False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.user.username
<|reserved_special_token_1|>
"""
Users model
"""
# Django
from django.conf import settings
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.core.validators import RegexValidator
class User(AbstractUser):
"""User model"""
email = models.EmailField(
'email address',
unique=True,
error_messages={
'unique': 'A user with that email already exists'
}
)
phone_regex = RegexValidator(
regex=r'\+?1?\d{9,15}$',
message='Phone number must be entered in the right format'
)
phone_number = models.CharField(
validators=[phone_regex],
max_length=17
)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name', 'phone_number']
def __str__(self):
return self.username
class Profile(models.Model):
"""Profile model"""
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, primary_key=True)
description = models.TextField('user description', max_length=255)
picture = models.ImageField(
upload_to='users/pictures',
blank=True,
null=True
)
is_authenticated = models.BooleanField('user is autheticated', default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.user.username
|
flexible
|
{
"blob_id": "360813a573f672e3ec380da4237a6e131dbcb7e6",
"index": 2345,
"step-1": "<mask token>\n\n\nclass User(AbstractUser):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Profile(models.Model):\n \"\"\"Profile model\"\"\"\n user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE, primary_key=True)\n description = models.TextField('user description', max_length=255)\n picture = models.ImageField(upload_to='users/pictures', blank=True,\n null=True)\n is_authenticated = models.BooleanField('user is autheticated', default=\n False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.user.username\n",
"step-2": "<mask token>\n\n\nclass User(AbstractUser):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.username\n\n\nclass Profile(models.Model):\n \"\"\"Profile model\"\"\"\n user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE, primary_key=True)\n description = models.TextField('user description', max_length=255)\n picture = models.ImageField(upload_to='users/pictures', blank=True,\n null=True)\n is_authenticated = models.BooleanField('user is autheticated', default=\n False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.user.username\n",
"step-3": "<mask token>\n\n\nclass User(AbstractUser):\n \"\"\"User model\"\"\"\n email = models.EmailField('email address', unique=True, error_messages=\n {'unique': 'A user with that email already exists'})\n phone_regex = RegexValidator(regex='\\\\+?1?\\\\d{9,15}$', message=\n 'Phone number must be entered in the right format')\n phone_number = models.CharField(validators=[phone_regex], max_length=17)\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['username', 'first_name', 'last_name', 'phone_number']\n\n def __str__(self):\n return self.username\n\n\nclass Profile(models.Model):\n \"\"\"Profile model\"\"\"\n user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE, primary_key=True)\n description = models.TextField('user description', max_length=255)\n picture = models.ImageField(upload_to='users/pictures', blank=True,\n null=True)\n is_authenticated = models.BooleanField('user is autheticated', default=\n False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.user.username\n",
"step-4": "<mask token>\nfrom django.conf import settings\nfrom django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core.validators import RegexValidator\n\n\nclass User(AbstractUser):\n \"\"\"User model\"\"\"\n email = models.EmailField('email address', unique=True, error_messages=\n {'unique': 'A user with that email already exists'})\n phone_regex = RegexValidator(regex='\\\\+?1?\\\\d{9,15}$', message=\n 'Phone number must be entered in the right format')\n phone_number = models.CharField(validators=[phone_regex], max_length=17)\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['username', 'first_name', 'last_name', 'phone_number']\n\n def __str__(self):\n return self.username\n\n\nclass Profile(models.Model):\n \"\"\"Profile model\"\"\"\n user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE, primary_key=True)\n description = models.TextField('user description', max_length=255)\n picture = models.ImageField(upload_to='users/pictures', blank=True,\n null=True)\n is_authenticated = models.BooleanField('user is autheticated', default=\n False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.user.username\n",
"step-5": "\"\"\"\r\nUsers model\r\n\"\"\"\r\n\r\n# Django\r\nfrom django.conf import settings\r\nfrom django.db import models\r\nfrom django.contrib.auth.models import AbstractUser\r\nfrom django.core.validators import RegexValidator\r\n\r\nclass User(AbstractUser):\r\n \"\"\"User model\"\"\"\r\n\r\n email = models.EmailField(\r\n 'email address',\r\n unique=True,\r\n error_messages={\r\n 'unique': 'A user with that email already exists'\r\n }\r\n )\r\n\r\n phone_regex = RegexValidator(\r\n regex=r'\\+?1?\\d{9,15}$',\r\n message='Phone number must be entered in the right format'\r\n )\r\n phone_number = models.CharField(\r\n validators=[phone_regex],\r\n max_length=17\r\n )\r\n\r\n USERNAME_FIELD = 'email'\r\n REQUIRED_FIELDS = ['username', 'first_name', 'last_name', 'phone_number']\r\n\r\n def __str__(self):\r\n return self.username\r\n\r\nclass Profile(models.Model):\r\n \"\"\"Profile model\"\"\"\r\n\r\n user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, primary_key=True)\r\n description = models.TextField('user description', max_length=255)\r\n picture = models.ImageField(\r\n upload_to='users/pictures',\r\n blank=True,\r\n null=True\r\n )\r\n is_authenticated = models.BooleanField('user is autheticated', default=False)\r\n\r\n created_at = models.DateTimeField(auto_now_add=True)\r\n updated_at = models.DateTimeField(auto_now=True)\r\n\r\n def __str__(self):\r\n return self.user.username\r\n \r\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
from rest_framework import serializers
from .models import Good, Favorite, Comment
class GoodSerializer(serializers.ModelSerializer):
class Meta:
model = Good
fields = ('user', 'article', 'created_at')
class FavoriteSerializer(serializers.ModelSerializer):
class Meta:
model = Favorite
fields = ('user', 'article', 'created_at')
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ('text', 'image', 'user', 'article', 'created_at')
|
normal
|
{
"blob_id": "fc8b9029955de6b11cbfe8e24107c687f49685c1",
"index": 9179,
"step-1": "<mask token>\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Comment\n fields = 'text', 'image', 'user', 'article', 'created_at'\n",
"step-2": "<mask token>\n\n\nclass FavoriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Favorite\n fields = 'user', 'article', 'created_at'\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Comment\n fields = 'text', 'image', 'user', 'article', 'created_at'\n",
"step-3": "<mask token>\n\n\nclass GoodSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Good\n fields = 'user', 'article', 'created_at'\n\n\nclass FavoriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Favorite\n fields = 'user', 'article', 'created_at'\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Comment\n fields = 'text', 'image', 'user', 'article', 'created_at'\n",
"step-4": "from rest_framework import serializers\nfrom .models import Good, Favorite, Comment\n\n\nclass GoodSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Good\n fields = 'user', 'article', 'created_at'\n\n\nclass FavoriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Favorite\n fields = 'user', 'article', 'created_at'\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Comment\n fields = 'text', 'image', 'user', 'article', 'created_at'\n",
"step-5": "from rest_framework import serializers\n\nfrom .models import Good, Favorite, Comment\n\n\nclass GoodSerializer(serializers.ModelSerializer):\n class Meta:\n model = Good\n fields = ('user', 'article', 'created_at')\n\n\nclass FavoriteSerializer(serializers.ModelSerializer):\n class Meta:\n model = Favorite\n fields = ('user', 'article', 'created_at')\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n class Meta:\n model = Comment\n fields = ('text', 'image', 'user', 'article', 'created_at')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Post(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return '{}'.format(self.title)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Post(models.Model):
<|reserved_special_token_0|>
author = models.ForeignKey('Provider', on_delete=models.CASCADE)
title = models.CharField(max_length=255, null=False)
content = models.TextField(null=False)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{}'.format(self.title)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Post(models.Model):
"""
The education post by provider database model
"""
author = models.ForeignKey('Provider', on_delete=models.CASCADE)
title = models.CharField(max_length=255, null=False)
content = models.TextField(null=False)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{}'.format(self.title)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from django.db import models
class Post(models.Model):
"""
The education post by provider database model
"""
author = models.ForeignKey('Provider', on_delete=models.CASCADE)
title = models.CharField(max_length=255, null=False)
content = models.TextField(null=False)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{}'.format(self.title)
<|reserved_special_token_1|>
'''
This file creates the model of Post, which maps to the post table in the mysql database.
The model Provider contains four attributes: author, title, content, and created time.
'''
from django.db import models
class Post(models.Model):
'''
The education post by provider database model
'''
author = models.ForeignKey('Provider', on_delete=models.CASCADE)
title = models.CharField(max_length=255, null=False)
content = models.TextField(null=False)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "{}".format(self.title)
|
flexible
|
{
"blob_id": "4fa9c00a07c8263a6a3afd460b84f21637a771ec",
"index": 3081,
"step-1": "<mask token>\n\n\nclass Post(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return '{}'.format(self.title)\n",
"step-2": "<mask token>\n\n\nclass Post(models.Model):\n <mask token>\n author = models.ForeignKey('Provider', on_delete=models.CASCADE)\n title = models.CharField(max_length=255, null=False)\n content = models.TextField(null=False)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return '{}'.format(self.title)\n",
"step-3": "<mask token>\n\n\nclass Post(models.Model):\n \"\"\"\n The education post by provider database model\n \"\"\"\n author = models.ForeignKey('Provider', on_delete=models.CASCADE)\n title = models.CharField(max_length=255, null=False)\n content = models.TextField(null=False)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return '{}'.format(self.title)\n",
"step-4": "<mask token>\nfrom django.db import models\n\n\nclass Post(models.Model):\n \"\"\"\n The education post by provider database model\n \"\"\"\n author = models.ForeignKey('Provider', on_delete=models.CASCADE)\n title = models.CharField(max_length=255, null=False)\n content = models.TextField(null=False)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return '{}'.format(self.title)\n",
"step-5": "\n'''\nThis file creates the model of Post, which maps to the post table in the mysql database. \nThe model Provider contains four attributes: author, title, content, and created time. \n'''\nfrom django.db import models\n\nclass Post(models.Model):\n '''\n The education post by provider database model\n '''\n author = models.ForeignKey('Provider', on_delete=models.CASCADE)\n title = models.CharField(max_length=255, null=False)\n content = models.TextField(null=False)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return \"{}\".format(self.title)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django.contrib import admin
from .models import StoreId
# Register your models here.
class StoreIdAdmin(admin.ModelAdmin):
list_display = ('userid', 'aladin_id', 'yes24_id', 'ridibooks_id', 'start_date', 'end_date')
search_fields = ['userid', 'aladin_id', 'yes24_id', 'ridibooks_id']
admin.site.register(StoreId, StoreIdAdmin)
|
normal
|
{
"blob_id": "6475fd59ba2414ea9a174297a8d94e5a2e0a7d8f",
"index": 3783,
"step-1": "<mask token>\n\n\nclass StoreIdAdmin(admin.ModelAdmin):\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass StoreIdAdmin(admin.ModelAdmin):\n list_display = ('userid', 'aladin_id', 'yes24_id', 'ridibooks_id',\n 'start_date', 'end_date')\n search_fields = ['userid', 'aladin_id', 'yes24_id', 'ridibooks_id']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass StoreIdAdmin(admin.ModelAdmin):\n list_display = ('userid', 'aladin_id', 'yes24_id', 'ridibooks_id',\n 'start_date', 'end_date')\n search_fields = ['userid', 'aladin_id', 'yes24_id', 'ridibooks_id']\n\n\nadmin.site.register(StoreId, StoreIdAdmin)\n",
"step-4": "from django.contrib import admin\nfrom .models import StoreId\n\n\nclass StoreIdAdmin(admin.ModelAdmin):\n list_display = ('userid', 'aladin_id', 'yes24_id', 'ridibooks_id',\n 'start_date', 'end_date')\n search_fields = ['userid', 'aladin_id', 'yes24_id', 'ridibooks_id']\n\n\nadmin.site.register(StoreId, StoreIdAdmin)\n",
"step-5": "from django.contrib import admin\nfrom .models import StoreId\n\n# Register your models here.\nclass StoreIdAdmin(admin.ModelAdmin):\n list_display = ('userid', 'aladin_id', 'yes24_id', 'ridibooks_id', 'start_date', 'end_date')\n search_fields = ['userid', 'aladin_id', 'yes24_id', 'ridibooks_id']\n\nadmin.site.register(StoreId, StoreIdAdmin)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import math
def sieve(limit):
ans = []
a = [1] * limit
a[0] = a[1] = 0
for i in range(2, limit):
if a[i] == 0:
continue
ans.append(i)
for j in range(i*i, limit, i):
a[j] = 0;
return ans
is_square = lambda x: int(math.sqrt(x) + 1e-9) ** 2 == x
N = 10 ** 6
p = sieve(N)
ps = set(p)
for i in range(9, N, 2):
if i in ps:
continue
found = False
for j in p[1:]:
if j > i:
break
q = (i - j) // 2
if is_square(q):
found = True
break
if not found:
print(i)
break
|
normal
|
{
"blob_id": "ff6dc347637a81c9f6a541775646b4901d719790",
"index": 9478,
"step-1": "<mask token>\n\n\ndef sieve(limit):\n ans = []\n a = [1] * limit\n a[0] = a[1] = 0\n for i in range(2, limit):\n if a[i] == 0:\n continue\n ans.append(i)\n for j in range(i * i, limit, i):\n a[j] = 0\n return ans\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sieve(limit):\n ans = []\n a = [1] * limit\n a[0] = a[1] = 0\n for i in range(2, limit):\n if a[i] == 0:\n continue\n ans.append(i)\n for j in range(i * i, limit, i):\n a[j] = 0\n return ans\n\n\n<mask token>\nfor i in range(9, N, 2):\n if i in ps:\n continue\n found = False\n for j in p[1:]:\n if j > i:\n break\n q = (i - j) // 2\n if is_square(q):\n found = True\n break\n if not found:\n print(i)\n break\n",
"step-3": "<mask token>\n\n\ndef sieve(limit):\n ans = []\n a = [1] * limit\n a[0] = a[1] = 0\n for i in range(2, limit):\n if a[i] == 0:\n continue\n ans.append(i)\n for j in range(i * i, limit, i):\n a[j] = 0\n return ans\n\n\nis_square = lambda x: int(math.sqrt(x) + 1e-09) ** 2 == x\nN = 10 ** 6\np = sieve(N)\nps = set(p)\nfor i in range(9, N, 2):\n if i in ps:\n continue\n found = False\n for j in p[1:]:\n if j > i:\n break\n q = (i - j) // 2\n if is_square(q):\n found = True\n break\n if not found:\n print(i)\n break\n",
"step-4": "import math\n\n\ndef sieve(limit):\n ans = []\n a = [1] * limit\n a[0] = a[1] = 0\n for i in range(2, limit):\n if a[i] == 0:\n continue\n ans.append(i)\n for j in range(i * i, limit, i):\n a[j] = 0\n return ans\n\n\nis_square = lambda x: int(math.sqrt(x) + 1e-09) ** 2 == x\nN = 10 ** 6\np = sieve(N)\nps = set(p)\nfor i in range(9, N, 2):\n if i in ps:\n continue\n found = False\n for j in p[1:]:\n if j > i:\n break\n q = (i - j) // 2\n if is_square(q):\n found = True\n break\n if not found:\n print(i)\n break\n",
"step-5": "import math\n\ndef sieve(limit):\n ans = []\n a = [1] * limit\n a[0] = a[1] = 0\n for i in range(2, limit):\n if a[i] == 0:\n continue\n ans.append(i)\n for j in range(i*i, limit, i):\n a[j] = 0;\n return ans\n\nis_square = lambda x: int(math.sqrt(x) + 1e-9) ** 2 == x\n\nN = 10 ** 6\n\np = sieve(N)\nps = set(p)\nfor i in range(9, N, 2):\n if i in ps:\n continue\n found = False\n for j in p[1:]:\n if j > i:\n break\n q = (i - j) // 2\n if is_square(q):\n found = True\n break\n if not found:\n print(i)\n break\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class FormulaTemplate:
def __init__(self, vi, w, k, h, m, timeout=3000000):
self.k = k
self.h = h
self.m = m
self.w = w
self.vi = vi
n = len(vi)
self.n = n
self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in
range(h)]
self.bi = [Int('b' + str(i)) for i in range(h)]
self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in
range(m)]
self.ei = [Int('e' + str(i)) for i in range(m)]
self.ci = [Int('c' + str(i)) for i in range(m)]
self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for
j in range(k)]
self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for
j in range(k)]
self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for
j in range(k)]
self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in
range(k)]
self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in
range(k)]
self.s = Solver()
for i in range(h):
self.s.add(Or(*[(a > 0) for a in self.aeij[i]]))
for j in range(i + 1, h):
self.s.add(Or(*[(self.aeij[i][w] != self.aeij[j][w]) for w in
range(n)]))
for i in range(m):
self.s.add(Or(*[(am > 0) for am in self.amij[i]]))
self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])
self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in
range(m)])
self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])
for i in range(k):
for j in range(i + 1, k):
all_true = [And(self.heij[i][w], self.hgeij[i][w], self.
hleij[i][w]) for w in range(h)]
all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in
range(m)])
struct_const = [Or(self.heij[i][w] != self.heij[j][w], self
.hgeij[i][w] != self.hgeij[j][w], self.hleij[i][w] !=
self.hleij[j][w]) for w in range(h)]
struct_const.extend([Or(self.tij[i][w] != self.tij[j][w],
self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])
self.s.add(Or(*struct_const, *all_true))
self.s.set('timeout', timeout)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def W_size(m):
return m + 2
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def formula_model(self, *val):
if len(val) == 0:
val = self.vi
formu = []
for k in range(self.k):
clause = []
for h in range(self.h):
Coe = combine(self.A[h][j] * val[j] for j in range(self.n))
status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]
if status == (False, False, True):
clause.append(Coe <= self.B[h])
elif status == (False, True, False):
clause.append(Coe >= self.B[h])
elif status == (True, False, False):
clause.append(Coe != self.B[h])
elif status == (False, True, True):
clause.append(Coe == self.B[h])
elif status == (True, False, True):
clause.append(Coe < self.B[h])
elif status == (True, True, False):
clause.append(Coe > self.B[h])
elif status == (True, True, True):
clause.append(False)
for m in range(self.m):
status = self.T[k][m], self.Nt[k][m]
if status == (True, False):
clause.append(combine(self.M[m][j] * val[j] for j in
range(self.n)) % self.E[m] == self.C[m])
elif status == (False, True):
clause.append(combine(self.M[m][j] * val[j] for j in
range(self.n)) % self.E[m] != self.C[m])
elif status == (True, True):
clause.append(False)
formu.append(And(*clause))
return simplify(Or(*formu))
def refine_modu(self, coe, e, b, res, tmp, last=0):
if len(coe) == 1:
if coe[0] == 0:
if last % e == b:
tmp.append(0)
else:
return
for i in range(e):
if (i + last) % e == b:
tmp.append(i)
break
res.append(list(tmp))
tmp.pop()
elif coe[0] == 0:
tmp.append(0)
self.refine_modu(coe[1:], e, b, res, tmp, last)
tmp.pop()
else:
for i in range(e):
tmp.append(i)
self.refine_modu(coe[1:], e, b, res, tmp, last + i)
tmp.pop()
def build_formula(self, coe, V, e, C):
expr = And(*[(coe[i] * v % e == C[i]) for i, v in enumerate(V)])
return simplify(expr)
<|reserved_special_token_0|>
class EquTemplate:
def __init__(self, n):
self.vi = [Int('v' + str(i)) for i in range(n)]
self.b = Int('b')
self.s = Solver()
def add(self, vector):
vi, target = vector[:-1], vector[-1]
expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))
) + self.b == target
self.s.add(expr)
def check(self):
return self.s.check()
def solve_model(self):
model = self.s.model()
V = [(model[v].as_long() if model[v] is not None else 0) for v in
self.vi]
B = model[self.b].as_long() if model[self.b] is not None else 0
expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B
return simplify(expr)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FormulaTemplate:
def __init__(self, vi, w, k, h, m, timeout=3000000):
self.k = k
self.h = h
self.m = m
self.w = w
self.vi = vi
n = len(vi)
self.n = n
self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in
range(h)]
self.bi = [Int('b' + str(i)) for i in range(h)]
self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in
range(m)]
self.ei = [Int('e' + str(i)) for i in range(m)]
self.ci = [Int('c' + str(i)) for i in range(m)]
self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for
j in range(k)]
self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for
j in range(k)]
self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for
j in range(k)]
self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in
range(k)]
self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in
range(k)]
self.s = Solver()
for i in range(h):
self.s.add(Or(*[(a > 0) for a in self.aeij[i]]))
for j in range(i + 1, h):
self.s.add(Or(*[(self.aeij[i][w] != self.aeij[j][w]) for w in
range(n)]))
for i in range(m):
self.s.add(Or(*[(am > 0) for am in self.amij[i]]))
self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])
self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in
range(m)])
self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])
for i in range(k):
for j in range(i + 1, k):
all_true = [And(self.heij[i][w], self.hgeij[i][w], self.
hleij[i][w]) for w in range(h)]
all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in
range(m)])
struct_const = [Or(self.heij[i][w] != self.heij[j][w], self
.hgeij[i][w] != self.hgeij[j][w], self.hleij[i][w] !=
self.hleij[j][w]) for w in range(h)]
struct_const.extend([Or(self.tij[i][w] != self.tij[j][w],
self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])
self.s.add(Or(*struct_const, *all_true))
self.s.set('timeout', timeout)
def add(self, example, label):
self.s.add(self.encoding(example, label))
def check(self):
check = self.s.check()
if check == sat:
self.solve_model()
return check
def W_size(m):
return m + 2
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def formula_model(self, *val):
if len(val) == 0:
val = self.vi
formu = []
for k in range(self.k):
clause = []
for h in range(self.h):
Coe = combine(self.A[h][j] * val[j] for j in range(self.n))
status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]
if status == (False, False, True):
clause.append(Coe <= self.B[h])
elif status == (False, True, False):
clause.append(Coe >= self.B[h])
elif status == (True, False, False):
clause.append(Coe != self.B[h])
elif status == (False, True, True):
clause.append(Coe == self.B[h])
elif status == (True, False, True):
clause.append(Coe < self.B[h])
elif status == (True, True, False):
clause.append(Coe > self.B[h])
elif status == (True, True, True):
clause.append(False)
for m in range(self.m):
status = self.T[k][m], self.Nt[k][m]
if status == (True, False):
clause.append(combine(self.M[m][j] * val[j] for j in
range(self.n)) % self.E[m] == self.C[m])
elif status == (False, True):
clause.append(combine(self.M[m][j] * val[j] for j in
range(self.n)) % self.E[m] != self.C[m])
elif status == (True, True):
clause.append(False)
formu.append(And(*clause))
return simplify(Or(*formu))
def refine_modu(self, coe, e, b, res, tmp, last=0):
if len(coe) == 1:
if coe[0] == 0:
if last % e == b:
tmp.append(0)
else:
return
for i in range(e):
if (i + last) % e == b:
tmp.append(i)
break
res.append(list(tmp))
tmp.pop()
elif coe[0] == 0:
tmp.append(0)
self.refine_modu(coe[1:], e, b, res, tmp, last)
tmp.pop()
else:
for i in range(e):
tmp.append(i)
self.refine_modu(coe[1:], e, b, res, tmp, last + i)
tmp.pop()
def build_formula(self, coe, V, e, C):
expr = And(*[(coe[i] * v % e == C[i]) for i, v in enumerate(V)])
return simplify(expr)
def refine_model(self):
formu_arr = []
for k in range(self.k):
clause = []
for h in range(self.h):
Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))
status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]
if status == (False, False, True):
clause.append([Coe < self.B[h], Coe == self.B[h]])
elif status == (False, True, False):
clause.append([Coe > self.B[h], Coe == self.B[h]])
elif status == (True, False, False):
clause.append([Coe < self.B[h], Coe > self.B[h]])
elif status == (False, True, True):
clause.append([Coe == self.B[h]])
elif status == (True, False, True):
clause.append([Coe < self.B[h]])
elif status == (True, True, False):
clause.append([Coe > self.B[h]])
elif status == (True, True, True):
clause.append([False])
for m in range(self.m):
status = self.T[k][m], self.Nt[k][m]
if status == (True, False):
mod_res = []
self.refine_modu(self.M[m], self.E[m], self.C[m],
mod_res, [])
for C in mod_res:
clause.append([self.build_formula(self.M[m], self.
vi, self.E[m], C)])
elif status == (False, True):
mod_clause = []
for i in range(self.E[m]):
if i != self.C[m]:
mod_res = []
self.refine_modu(self.M[m], self.E[m], i,
mod_res, [])
for C in mod_res:
mod_clause.append(self.build_formula(self.M
[m], self.vi, self.E[m], C))
clause.append(mod_clause)
elif status == (True, True):
clause.append([False])
formu_arr.append(clause)
return formu_arr
class EquTemplate:
def __init__(self, n):
self.vi = [Int('v' + str(i)) for i in range(n)]
self.b = Int('b')
self.s = Solver()
def add(self, vector):
vi, target = vector[:-1], vector[-1]
expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))
) + self.b == target
self.s.add(expr)
def check(self):
return self.s.check()
def solve_model(self):
model = self.s.model()
V = [(model[v].as_long() if model[v] is not None else 0) for v in
self.vi]
B = model[self.b].as_long() if model[self.b] is not None else 0
expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B
return simplify(expr)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FormulaTemplate:
def __init__(self, vi, w, k, h, m, timeout=3000000):
self.k = k
self.h = h
self.m = m
self.w = w
self.vi = vi
n = len(vi)
self.n = n
self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in
range(h)]
self.bi = [Int('b' + str(i)) for i in range(h)]
self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in
range(m)]
self.ei = [Int('e' + str(i)) for i in range(m)]
self.ci = [Int('c' + str(i)) for i in range(m)]
self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for
j in range(k)]
self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for
j in range(k)]
self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for
j in range(k)]
self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in
range(k)]
self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in
range(k)]
self.s = Solver()
for i in range(h):
self.s.add(Or(*[(a > 0) for a in self.aeij[i]]))
for j in range(i + 1, h):
self.s.add(Or(*[(self.aeij[i][w] != self.aeij[j][w]) for w in
range(n)]))
for i in range(m):
self.s.add(Or(*[(am > 0) for am in self.amij[i]]))
self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])
self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in
range(m)])
self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])
for i in range(k):
for j in range(i + 1, k):
all_true = [And(self.heij[i][w], self.hgeij[i][w], self.
hleij[i][w]) for w in range(h)]
all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in
range(m)])
struct_const = [Or(self.heij[i][w] != self.heij[j][w], self
.hgeij[i][w] != self.hgeij[j][w], self.hleij[i][w] !=
self.hleij[j][w]) for w in range(h)]
struct_const.extend([Or(self.tij[i][w] != self.tij[j][w],
self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])
self.s.add(Or(*struct_const, *all_true))
self.s.set('timeout', timeout)
def add(self, example, label):
self.s.add(self.encoding(example, label))
def check(self):
check = self.s.check()
if check == sat:
self.solve_model()
return check
def W_size(m):
return m + 2
def encoding(self, example, label):
Equ = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)
) != self.bi[i]) for i in range(self.h)]
Ge = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)) >=
self.bi[i]) for i in range(self.h)]
Le = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)) <=
self.bi[i]) for i in range(self.h)]
Me = [(combine(example[j] * self.amij[i][j] for j in range(self.n)) %
self.ei[i] == self.ci[i]) for i in range(self.m)]
Tk = []
for k in range(self.k):
clause = []
clause.extend([Implies(self.heij[k][h], Equ[h]) for h in range(
self.h)])
clause.extend([Implies(self.hgeij[k][h], Ge[h]) for h in range(
self.h)])
clause.extend([Implies(self.hleij[k][h], Le[h]) for h in range(
self.h)])
clause.extend([Implies(self.tij[k][m], Me[m]) for m in range(
self.m)])
clause.extend([Implies(self.ntij[k][m], Not(Me[m])) for m in
range(self.m)])
Tk.append(And(*clause))
return Or(*Tk) == label
<|reserved_special_token_0|>
def formula_model(self, *val):
if len(val) == 0:
val = self.vi
formu = []
for k in range(self.k):
clause = []
for h in range(self.h):
Coe = combine(self.A[h][j] * val[j] for j in range(self.n))
status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]
if status == (False, False, True):
clause.append(Coe <= self.B[h])
elif status == (False, True, False):
clause.append(Coe >= self.B[h])
elif status == (True, False, False):
clause.append(Coe != self.B[h])
elif status == (False, True, True):
clause.append(Coe == self.B[h])
elif status == (True, False, True):
clause.append(Coe < self.B[h])
elif status == (True, True, False):
clause.append(Coe > self.B[h])
elif status == (True, True, True):
clause.append(False)
for m in range(self.m):
status = self.T[k][m], self.Nt[k][m]
if status == (True, False):
clause.append(combine(self.M[m][j] * val[j] for j in
range(self.n)) % self.E[m] == self.C[m])
elif status == (False, True):
clause.append(combine(self.M[m][j] * val[j] for j in
range(self.n)) % self.E[m] != self.C[m])
elif status == (True, True):
clause.append(False)
formu.append(And(*clause))
return simplify(Or(*formu))
def refine_modu(self, coe, e, b, res, tmp, last=0):
if len(coe) == 1:
if coe[0] == 0:
if last % e == b:
tmp.append(0)
else:
return
for i in range(e):
if (i + last) % e == b:
tmp.append(i)
break
res.append(list(tmp))
tmp.pop()
elif coe[0] == 0:
tmp.append(0)
self.refine_modu(coe[1:], e, b, res, tmp, last)
tmp.pop()
else:
for i in range(e):
tmp.append(i)
self.refine_modu(coe[1:], e, b, res, tmp, last + i)
tmp.pop()
def build_formula(self, coe, V, e, C):
expr = And(*[(coe[i] * v % e == C[i]) for i, v in enumerate(V)])
return simplify(expr)
def refine_model(self):
formu_arr = []
for k in range(self.k):
clause = []
for h in range(self.h):
Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))
status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]
if status == (False, False, True):
clause.append([Coe < self.B[h], Coe == self.B[h]])
elif status == (False, True, False):
clause.append([Coe > self.B[h], Coe == self.B[h]])
elif status == (True, False, False):
clause.append([Coe < self.B[h], Coe > self.B[h]])
elif status == (False, True, True):
clause.append([Coe == self.B[h]])
elif status == (True, False, True):
clause.append([Coe < self.B[h]])
elif status == (True, True, False):
clause.append([Coe > self.B[h]])
elif status == (True, True, True):
clause.append([False])
for m in range(self.m):
status = self.T[k][m], self.Nt[k][m]
if status == (True, False):
mod_res = []
self.refine_modu(self.M[m], self.E[m], self.C[m],
mod_res, [])
for C in mod_res:
clause.append([self.build_formula(self.M[m], self.
vi, self.E[m], C)])
elif status == (False, True):
mod_clause = []
for i in range(self.E[m]):
if i != self.C[m]:
mod_res = []
self.refine_modu(self.M[m], self.E[m], i,
mod_res, [])
for C in mod_res:
mod_clause.append(self.build_formula(self.M
[m], self.vi, self.E[m], C))
clause.append(mod_clause)
elif status == (True, True):
clause.append([False])
formu_arr.append(clause)
return formu_arr
class EquTemplate:
def __init__(self, n):
self.vi = [Int('v' + str(i)) for i in range(n)]
self.b = Int('b')
self.s = Solver()
def add(self, vector):
vi, target = vector[:-1], vector[-1]
expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))
) + self.b == target
self.s.add(expr)
def check(self):
return self.s.check()
def solve_model(self):
model = self.s.model()
V = [(model[v].as_long() if model[v] is not None else 0) for v in
self.vi]
B = model[self.b].as_long() if model[self.b] is not None else 0
expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B
return simplify(expr)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FormulaTemplate:
def __init__(self, vi, w, k, h, m, timeout=3000000):
self.k = k
self.h = h
self.m = m
self.w = w
self.vi = vi
n = len(vi)
self.n = n
self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in
range(h)]
self.bi = [Int('b' + str(i)) for i in range(h)]
self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in
range(m)]
self.ei = [Int('e' + str(i)) for i in range(m)]
self.ci = [Int('c' + str(i)) for i in range(m)]
self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for
j in range(k)]
self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for
j in range(k)]
self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for
j in range(k)]
self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in
range(k)]
self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in
range(k)]
self.s = Solver()
for i in range(h):
self.s.add(Or(*[(a > 0) for a in self.aeij[i]]))
for j in range(i + 1, h):
self.s.add(Or(*[(self.aeij[i][w] != self.aeij[j][w]) for w in
range(n)]))
for i in range(m):
self.s.add(Or(*[(am > 0) for am in self.amij[i]]))
self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])
self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in
range(m)])
self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])
for i in range(k):
for j in range(i + 1, k):
all_true = [And(self.heij[i][w], self.hgeij[i][w], self.
hleij[i][w]) for w in range(h)]
all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in
range(m)])
struct_const = [Or(self.heij[i][w] != self.heij[j][w], self
.hgeij[i][w] != self.hgeij[j][w], self.hleij[i][w] !=
self.hleij[j][w]) for w in range(h)]
struct_const.extend([Or(self.tij[i][w] != self.tij[j][w],
self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])
self.s.add(Or(*struct_const, *all_true))
self.s.set('timeout', timeout)
def add(self, example, label):
self.s.add(self.encoding(example, label))
def check(self):
check = self.s.check()
if check == sat:
self.solve_model()
return check
def W_size(m):
return m + 2
def encoding(self, example, label):
Equ = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)
) != self.bi[i]) for i in range(self.h)]
Ge = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)) >=
self.bi[i]) for i in range(self.h)]
Le = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)) <=
self.bi[i]) for i in range(self.h)]
Me = [(combine(example[j] * self.amij[i][j] for j in range(self.n)) %
self.ei[i] == self.ci[i]) for i in range(self.m)]
Tk = []
for k in range(self.k):
clause = []
clause.extend([Implies(self.heij[k][h], Equ[h]) for h in range(
self.h)])
clause.extend([Implies(self.hgeij[k][h], Ge[h]) for h in range(
self.h)])
clause.extend([Implies(self.hleij[k][h], Le[h]) for h in range(
self.h)])
clause.extend([Implies(self.tij[k][m], Me[m]) for m in range(
self.m)])
clause.extend([Implies(self.ntij[k][m], Not(Me[m])) for m in
range(self.m)])
Tk.append(And(*clause))
return Or(*Tk) == label
def solve_model(self):
print('w', self.w)
model = self.s.model()
self.M = [[(model[self.amij[i][j]].as_long() if model[self.amij[i][
j]] is not None else 0) for j in range(self.n)] for i in range(
self.m)]
for i in range(self.m):
self.ei[i] = FormulaTemplate.W_size(self.w)
self.E = [self.ei[i] for i in range(self.m)]
print('E = \n', self.E)
self.C = [(model[self.ci[i]].as_long() if model[self.ci[i]] is not
None else 0) for i in range(self.m)]
self.A = [[(model[self.aeij[i][j]].as_long() if model[self.aeij[i][
j]] is not None else 0) for j in range(self.n)] for i in range(
self.h)]
self.B = [(model[self.bi[i]].as_long() if model[self.bi[i]] is not
None else 0) for i in range(self.h)]
self.He = [[(bool(model[self.heij[i][j]]) if model[self.heij[i][j]]
is not None else False) for j in range(self.h)] for i in range
(self.k)]
self.Hge = [[(bool(model[self.hgeij[i][j]]) if model[self.hgeij[i][
j]] is not None else False) for j in range(self.h)] for i in
range(self.k)]
self.Hle = [[(bool(model[self.hleij[i][j]]) if model[self.hleij[i][
j]] is not None else False) for j in range(self.h)] for i in
range(self.k)]
self.T = [[(bool(model[self.tij[i][j]]) if model[self.tij[i][j]] is not
None else False) for j in range(self.m)] for i in range(self.k)]
self.Nt = [[(bool(model[self.ntij[i][j]]) if model[self.ntij[i][j]]
is not None else False) for j in range(self.m)] for i in range
(self.k)]
for i in range(self.m):
flag = True
pix = -1
for am in self.M[i]:
if pix == -1:
if am != 0:
pix = am
elif am != 0 and am != pix:
flag = False
break
if flag:
if self.C[i] == 0:
if not co_prime(pix, self.E[i]):
self.E[i] /= gcd(pix, self.E[i])
for j in range(self.n):
self.M[i][j] = 1
else:
div = gcd(pix, self.E[i], self.C[i])
self.E[i] /= div
self.C[i] /= div
pix /= div
for j in range(self.n):
self.M[i][j] /= div
div = gcd(int(pix), int(self.C[i]))
for j in range(self.n):
self.M[i][j] /= div
self.C[i] /= div
for i in range(self.h):
divisior = gcd(*self.A[i], self.B[i])
self.B[i] /= divisior
for j in range(self.n):
self.A[i][j] /= divisior
for i in range(len(self.E)):
self.E[i] = int(self.E[i])
def formula_model(self, *val):
if len(val) == 0:
val = self.vi
formu = []
for k in range(self.k):
clause = []
for h in range(self.h):
Coe = combine(self.A[h][j] * val[j] for j in range(self.n))
status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]
if status == (False, False, True):
clause.append(Coe <= self.B[h])
elif status == (False, True, False):
clause.append(Coe >= self.B[h])
elif status == (True, False, False):
clause.append(Coe != self.B[h])
elif status == (False, True, True):
clause.append(Coe == self.B[h])
elif status == (True, False, True):
clause.append(Coe < self.B[h])
elif status == (True, True, False):
clause.append(Coe > self.B[h])
elif status == (True, True, True):
clause.append(False)
for m in range(self.m):
status = self.T[k][m], self.Nt[k][m]
if status == (True, False):
clause.append(combine(self.M[m][j] * val[j] for j in
range(self.n)) % self.E[m] == self.C[m])
elif status == (False, True):
clause.append(combine(self.M[m][j] * val[j] for j in
range(self.n)) % self.E[m] != self.C[m])
elif status == (True, True):
clause.append(False)
formu.append(And(*clause))
return simplify(Or(*formu))
def refine_modu(self, coe, e, b, res, tmp, last=0):
if len(coe) == 1:
if coe[0] == 0:
if last % e == b:
tmp.append(0)
else:
return
for i in range(e):
if (i + last) % e == b:
tmp.append(i)
break
res.append(list(tmp))
tmp.pop()
elif coe[0] == 0:
tmp.append(0)
self.refine_modu(coe[1:], e, b, res, tmp, last)
tmp.pop()
else:
for i in range(e):
tmp.append(i)
self.refine_modu(coe[1:], e, b, res, tmp, last + i)
tmp.pop()
def build_formula(self, coe, V, e, C):
expr = And(*[(coe[i] * v % e == C[i]) for i, v in enumerate(V)])
return simplify(expr)
def refine_model(self):
formu_arr = []
for k in range(self.k):
clause = []
for h in range(self.h):
Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))
status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]
if status == (False, False, True):
clause.append([Coe < self.B[h], Coe == self.B[h]])
elif status == (False, True, False):
clause.append([Coe > self.B[h], Coe == self.B[h]])
elif status == (True, False, False):
clause.append([Coe < self.B[h], Coe > self.B[h]])
elif status == (False, True, True):
clause.append([Coe == self.B[h]])
elif status == (True, False, True):
clause.append([Coe < self.B[h]])
elif status == (True, True, False):
clause.append([Coe > self.B[h]])
elif status == (True, True, True):
clause.append([False])
for m in range(self.m):
status = self.T[k][m], self.Nt[k][m]
if status == (True, False):
mod_res = []
self.refine_modu(self.M[m], self.E[m], self.C[m],
mod_res, [])
for C in mod_res:
clause.append([self.build_formula(self.M[m], self.
vi, self.E[m], C)])
elif status == (False, True):
mod_clause = []
for i in range(self.E[m]):
if i != self.C[m]:
mod_res = []
self.refine_modu(self.M[m], self.E[m], i,
mod_res, [])
for C in mod_res:
mod_clause.append(self.build_formula(self.M
[m], self.vi, self.E[m], C))
clause.append(mod_clause)
elif status == (True, True):
clause.append([False])
formu_arr.append(clause)
return formu_arr
class EquTemplate:
def __init__(self, n):
self.vi = [Int('v' + str(i)) for i in range(n)]
self.b = Int('b')
self.s = Solver()
def add(self, vector):
vi, target = vector[:-1], vector[-1]
expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))
) + self.b == target
self.s.add(expr)
def check(self):
return self.s.check()
def solve_model(self):
model = self.s.model()
V = [(model[v].as_long() if model[v] is not None else 0) for v in
self.vi]
B = model[self.b].as_long() if model[self.b] is not None else 0
expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B
return simplify(expr)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import random
from z3 import *
def combine(iter):
tmp_list = [i for i in iter]
res = tmp_list[0]
for i in tmp_list[1:]:
res += i
return res
def co_prime(num1, num2):
for num in range(2, min(num1, num2) + 1):
if num1 % num == 0 and num2 % num == 0:
return False
return True
def gcd(*nums):
min_num = 1 << 32
for num in nums:
if num != 0:
min_num = min(min_num, abs(num))
for i in range(min_num, 1, -1):
flag = True
for num in nums:
if num % i != 0:
flag = False
break
if flag:
return i
return 1
class FormulaTemplate:
def __init__(self, vi ,w ,k, h, m ,timeout=3000000): ####加了w
self.k = k # amount of clause 多少个子句
self.h = h # number of inequality 第一类不等式数量上限
self.m = m # number of mode number 第二类不等式数量上限
self.w = w
self.vi = vi
n = len(vi)
self.n = n
self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in range(h)]
self.bi = [Int('b' + str(i)) for i in range(h)]
self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in range(m)]
self.ei = [Int('e' + str(i)) for i in range(m)] ##改成定值 , 写一个函数,从2开始一个个试????(还没实现)
self.ci = [Int('c' + str(i)) for i in range(m)]
self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for j in range(k)]
self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for j in range(k)]
self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for j in range(k)]
self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in range(k)]
self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in range(k)]
self.s = Solver()
for i in range(h):
# 不等式系数ae_ij不能全部为0
self.s.add(Or(*[a > 0 for a in self.aeij[i]]))
for j in range(i + 1, h):
self.s.add(Or(*[self.aeij[i][w] != self.aeij[j][w] for w in range(n)]))
for i in range(m):
# 模等式的系数am_ij不能全部小于等于0
self.s.add(Or(*[am > 0 for am in self.amij[i]]))
# 模等式的系数am_ij不能大于模e
self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])
# for j in range(i + 1, m):
# self.s.add(Or(self.ei[i] != self.ei[j],
# *[self.amij[i][w] != self.amij[j][w] for w in range(n)]))
# 余数c_i必须小于模e
self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in range(m)])
# 模必须大于等于2,并且小于一定范围
self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])
for i in range(k):
# 判断条件一定有一个是False,避免逻辑出现False
for j in range(i + 1, k):
all_true = [And(self.heij[i][w], self.hgeij[i][w], self.hleij[i][w]) for w in range(h)]
all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in range(m)])
struct_const = [Or(self.heij[i][w] != self.heij[j][w],
self.hgeij[i][w] != self.hgeij[j][w],
self.hleij[i][w] != self.hleij[j][w]) for w in range(h)]
struct_const.extend([Or(self.tij[i][w] != self.tij[j][w],
self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])
self.s.add(Or(*struct_const, *all_true))
self.s.set("timeout", timeout)
def add(self, example, label):
self.s.add(self.encoding(example, label))
def check(self):
check = self.s.check()
if check == sat:
self.solve_model()
return check
def W_size(m):
return m+2
def encoding(self, example, label):
Equ = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) != self.bi[i] for i in range(self.h)]
Ge = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) >= self.bi[i] for i in range(self.h)]
Le = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) <= self.bi[i] for i in range(self.h)]
Me = [combine(example[j] * self.amij[i][j] for j in range(self.n)) % self.ei[i] == self.ci[i] for i in
range(self.m)]
Tk = []
for k in range(self.k):
clause = []
clause.extend([Implies(self.heij[k][h], Equ[h]) for h in range(self.h)])
clause.extend([Implies(self.hgeij[k][h], Ge[h]) for h in range(self.h)])
clause.extend([Implies(self.hleij[k][h], Le[h]) for h in range(self.h)])
clause.extend([Implies(self.tij[k][m], Me[m]) for m in range(self.m)])
clause.extend([Implies(self.ntij[k][m], Not(Me[m])) for m in range(self.m)])
Tk.append(And(*clause))
# print("Or(*Tk) , label=\n",Or(*Tk),label)
return Or(*Tk) == label
def solve_model(self): #求出取值 ####加了w
print("w", self.w)
#W_size = [2,3,4,5,6,7,8,9]
model = self.s.model()
self.M = [[model[self.amij[i][j]].as_long() if model[self.amij[i][j]] is not None else 0
for j in range(self.n)]
for i in range(self.m)]
##用z3求解e(此处要改)
# self.E = [model[self.ei[i]].as_long() if model[self.ei[i]] is not None else 1 for i in range(self.m)]
# print("E= \n",self.E)
####改动
for i in range(self.m):
self.ei[i] = FormulaTemplate.W_size(self.w)
self.E = [self.ei[i] for i in range(self.m)]
print("E = \n",self.E)
####
self.C = [model[self.ci[i]].as_long() if model[self.ci[i]] is not None else 0 for i in range(self.m)]
self.A = [[model[self.aeij[i][j]].as_long() if model[self.aeij[i][j]] is not None else 0
for j in range(self.n)]
for i in range(self.h)]
self.B = [model[self.bi[i]].as_long() if model[self.bi[i]] is not None else 0 for i in range(self.h)]
self.He = [
[bool(model[self.heij[i][j]]) if model[self.heij[i][j]] is not None else False
for j in range(self.h)]
for i in range(self.k)
]
self.Hge = [
[bool(model[self.hgeij[i][j]]) if model[self.hgeij[i][j]] is not None else False
for j in range(self.h)]
for i in range(self.k)
]
self.Hle = [
[bool(model[self.hleij[i][j]]) if model[self.hleij[i][j]] is not None else False
for j in range(self.h)]
for i in range(self.k)
]
self.T = [
[bool(model[self.tij[i][j]]) if model[self.tij[i][j]] is not None else False
for j in range(self.m)]
for i in range(self.k)
]
self.Nt = [
[bool(model[self.ntij[i][j]]) if model[self.ntij[i][j]] is not None else False
for j in range(self.m)]
for i in range(self.k)
]
for i in range(self.m):
flag = True # 判断是否全部系数都相等
pix = -1
for am in self.M[i]:
if pix == -1:
if am != 0:
pix = am
elif am != 0 and am != pix:
flag = False
break
if flag: # 系数全部相同
if self.C[i] == 0:
# if co_prime(pix, self.E[i]):
# for j in range(self.n):
# if self.M[i][j] != 0:
# self.M[i][j] = 1
# else:
# div = gcd(pix, self.E[i])
# self.E[i] /= div
# for j in range(self.n):
# self.M[i][j] /= div
if not co_prime(pix, self.E[i]):
self.E[i] /= gcd(pix, self.E[i])
for j in range(self.n):
self.M[i][j] = 1
else:
div = gcd(pix, self.E[i], self.C[i])
self.E[i] /= div
self.C[i] /= div
pix /= div
for j in range(self.n):
self.M[i][j] /= div
div = gcd(int(pix), int(self.C[i]))
for j in range(self.n):
self.M[i][j] /= div
self.C[i] /= div
for i in range(self.h):
divisior = gcd(*self.A[i], self.B[i])
self.B[i] /= divisior
for j in range(self.n):
self.A[i][j] /= divisior
for i in range(len(self.E)):
self.E[i] = int(self.E[i])
def formula_model(self, *val): # 得到一个公式模型 kd:代入变量求得变量,代入数值就是求得一个值
if len(val) == 0:
val = self.vi
formu = []
for k in range(self.k):
clause = []
for h in range(self.h):
Coe = combine(self.A[h][j] * val[j] for j in range(self.n))
status = (self.He[k][h], self.Hge[k][h], self.Hle[k][h])
if status == (False, False, True): #选择大于小于等于
clause.append(Coe <= self.B[h])
elif status == (False, True, False):
clause.append(Coe >= self.B[h])
elif status == (True, False, False):
clause.append(Coe != self.B[h])
elif status == (False, True, True):
clause.append(Coe == self.B[h])
elif status == (True, False, True):
clause.append(Coe < self.B[h])
elif status == (True, True, False):
clause.append(Coe > self.B[h])
elif status == (True, True, True):
clause.append(False)
for m in range(self.m):
status = (self.T[k][m], self.Nt[k][m])
if status == (True, False): #选择取模
clause.append(combine(self.M[m][j] * val[j] for j in range(self.n)) % self.E[m] == self.C[m])
elif status == (False, True):
clause.append(combine(self.M[m][j] * val[j] for j in range(self.n)) % self.E[m] != self.C[m])
elif status == (True, True):
clause.append(False)
formu.append(And(*clause))
# print("simplify(Or(*formu))=\n",simplify(Or(*formu)))
return simplify(Or(*formu))
def refine_modu(self, coe, e, b, res, tmp, last=0):
if len(coe) == 1:
if coe[0] == 0:
if last % e == b:
tmp.append(0)
else:
return
for i in range(e):
if (i + last) % e == b:
tmp.append(i)
break
res.append(list(tmp))
tmp.pop()
elif coe[0] == 0:
tmp.append(0)
self.refine_modu(coe[1:], e, b, res, tmp, last)
tmp.pop()
else:
for i in range(e):
tmp.append(i)
self.refine_modu(coe[1:], e, b, res, tmp, last + i)
tmp.pop()
def build_formula(self, coe, V, e, C):
expr = And(*[(coe[i] * v) % e == C[i] for i, v in enumerate(V)])
return simplify(expr)
def refine_model(self):
formu_arr = []
for k in range(self.k):
clause = []
for h in range(self.h):
Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))
status = (self.He[k][h], self.Hge[k][h], self.Hle[k][h])
if status == (False, False, True):
clause.append([Coe < self.B[h], Coe == self.B[h]])
elif status == (False, True, False):
clause.append([Coe > self.B[h], Coe == self.B[h]])
elif status == (True, False, False):
clause.append([Coe < self.B[h], Coe > self.B[h]])
elif status == (False, True, True):
clause.append([Coe == self.B[h]])
elif status == (True, False, True):
clause.append([Coe < self.B[h]])
elif status == (True, True, False):
clause.append([Coe > self.B[h]])
elif status == (True, True, True):
clause.append([False])
for m in range(self.m):
status = (self.T[k][m], self.Nt[k][m])
# Com = combine(self.M[m][j] * self.vi[j] for j in range(self.n))
if status == (True, False):
# clause.append([Com % self.E[m] == self.C[m]])
mod_res = []
self.refine_modu(self.M[m], self.E[m], self.C[m], mod_res, [])
for C in mod_res:
clause.append([self.build_formula(self.M[m], self.vi, self.E[m], C)])
elif status == (False, True):
mod_clause = []
for i in range(self.E[m]):
if i != self.C[m]:
# mod_clause.append(Com % self.E[m] == i)
mod_res = []
self.refine_modu(self.M[m], self.E[m], i, mod_res, [])
for C in mod_res:
mod_clause.append(self.build_formula(self.M[m], self.vi, self.E[m], C))
clause.append(mod_clause)
elif status == (True, True):
clause.append([False])
formu_arr.append(clause)
return formu_arr
class EquTemplate:
def __init__(self, n):
self.vi = [Int('v' + str(i)) for i in range(n)]
self.b = Int('b')
self.s = Solver()
def add(self, vector):
vi, target = vector[:-1], vector[-1]
expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))) + self.b == target
self.s.add(expr)
def check(self):
return self.s.check()
def solve_model(self):
model = self.s.model()
V = [model[v].as_long() if model[v] is not None else 0 for v in self.vi]
B = model[self.b].as_long() if model[self.b] is not None else 0
expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B
return simplify(expr)
if __name__ == '__main__':
# smt = FormulaTemplate([Int('v1'), Int('v2')], 4, 3, 2)
# smt.add([1, 2], True)
# smt.add([2, 3], False)
# print(smt.s)
# print(smt.check())
#
# arr = smt.refine_model()
# for a in arr:
# print(a)
#
# formu = smt.formula_model()
# print(formu)
# print('-' * 50)
# print(simplify(formu))
# print('-' * 50)
smt = EquTemplate(2)
smt.add([0, 1, 1])
smt.add([1, 2, 1])
smt.add([3, 6, 3])
if smt.check() == sat:
print(smt.solve_model()) # 1*v0 + 2*v1 + 1
else:
print(unsat)
|
flexible
|
{
"blob_id": "81fce5314a7611de11648e412151112e29271871",
"index": 4626,
"step-1": "<mask token>\n\n\nclass FormulaTemplate:\n\n def __init__(self, vi, w, k, h, m, timeout=3000000):\n self.k = k\n self.h = h\n self.m = m\n self.w = w\n self.vi = vi\n n = len(vi)\n self.n = n\n self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in\n range(h)]\n self.bi = [Int('b' + str(i)) for i in range(h)]\n self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in\n range(m)]\n self.ei = [Int('e' + str(i)) for i in range(m)]\n self.ci = [Int('c' + str(i)) for i in range(m)]\n self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.s = Solver()\n for i in range(h):\n self.s.add(Or(*[(a > 0) for a in self.aeij[i]]))\n for j in range(i + 1, h):\n self.s.add(Or(*[(self.aeij[i][w] != self.aeij[j][w]) for w in\n range(n)]))\n for i in range(m):\n self.s.add(Or(*[(am > 0) for am in self.amij[i]]))\n self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])\n self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in\n range(m)])\n self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])\n for i in range(k):\n for j in range(i + 1, k):\n all_true = [And(self.heij[i][w], self.hgeij[i][w], self.\n hleij[i][w]) for w in range(h)]\n all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in\n range(m)])\n struct_const = [Or(self.heij[i][w] != self.heij[j][w], self\n .hgeij[i][w] != self.hgeij[j][w], self.hleij[i][w] !=\n self.hleij[j][w]) for w in range(h)]\n struct_const.extend([Or(self.tij[i][w] != self.tij[j][w], \n self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])\n self.s.add(Or(*struct_const, *all_true))\n self.s.set('timeout', timeout)\n <mask token>\n <mask token>\n\n def W_size(m):\n return m + 2\n <mask token>\n <mask token>\n\n def formula_model(self, *val):\n if len(val) == 0:\n val = self.vi\n formu = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * val[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append(Coe <= self.B[h])\n elif status == (False, True, False):\n clause.append(Coe >= self.B[h])\n elif status == (True, False, False):\n clause.append(Coe != self.B[h])\n elif status == (False, True, True):\n clause.append(Coe == self.B[h])\n elif status == (True, False, True):\n clause.append(Coe < self.B[h])\n elif status == (True, True, False):\n clause.append(Coe > self.B[h])\n elif status == (True, True, True):\n clause.append(False)\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] == self.C[m])\n elif status == (False, True):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] != self.C[m])\n elif status == (True, True):\n clause.append(False)\n formu.append(And(*clause))\n return simplify(Or(*formu))\n\n def refine_modu(self, coe, e, b, res, tmp, last=0):\n if len(coe) == 1:\n if coe[0] == 0:\n if last % e == b:\n tmp.append(0)\n else:\n return\n for i in range(e):\n if (i + last) % e == b:\n tmp.append(i)\n break\n res.append(list(tmp))\n tmp.pop()\n elif coe[0] == 0:\n tmp.append(0)\n self.refine_modu(coe[1:], e, b, res, tmp, last)\n tmp.pop()\n else:\n for i in range(e):\n tmp.append(i)\n self.refine_modu(coe[1:], e, b, res, tmp, last + i)\n tmp.pop()\n\n def build_formula(self, coe, V, e, C):\n expr = And(*[(coe[i] * v % e == C[i]) for i, v in enumerate(V)])\n return simplify(expr)\n <mask token>\n\n\nclass EquTemplate:\n\n def __init__(self, n):\n self.vi = [Int('v' + str(i)) for i in range(n)]\n self.b = Int('b')\n self.s = Solver()\n\n def add(self, vector):\n vi, target = vector[:-1], vector[-1]\n expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))\n ) + self.b == target\n self.s.add(expr)\n\n def check(self):\n return self.s.check()\n\n def solve_model(self):\n model = self.s.model()\n V = [(model[v].as_long() if model[v] is not None else 0) for v in\n self.vi]\n B = model[self.b].as_long() if model[self.b] is not None else 0\n expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B\n return simplify(expr)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FormulaTemplate:\n\n def __init__(self, vi, w, k, h, m, timeout=3000000):\n self.k = k\n self.h = h\n self.m = m\n self.w = w\n self.vi = vi\n n = len(vi)\n self.n = n\n self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in\n range(h)]\n self.bi = [Int('b' + str(i)) for i in range(h)]\n self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in\n range(m)]\n self.ei = [Int('e' + str(i)) for i in range(m)]\n self.ci = [Int('c' + str(i)) for i in range(m)]\n self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.s = Solver()\n for i in range(h):\n self.s.add(Or(*[(a > 0) for a in self.aeij[i]]))\n for j in range(i + 1, h):\n self.s.add(Or(*[(self.aeij[i][w] != self.aeij[j][w]) for w in\n range(n)]))\n for i in range(m):\n self.s.add(Or(*[(am > 0) for am in self.amij[i]]))\n self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])\n self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in\n range(m)])\n self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])\n for i in range(k):\n for j in range(i + 1, k):\n all_true = [And(self.heij[i][w], self.hgeij[i][w], self.\n hleij[i][w]) for w in range(h)]\n all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in\n range(m)])\n struct_const = [Or(self.heij[i][w] != self.heij[j][w], self\n .hgeij[i][w] != self.hgeij[j][w], self.hleij[i][w] !=\n self.hleij[j][w]) for w in range(h)]\n struct_const.extend([Or(self.tij[i][w] != self.tij[j][w], \n self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])\n self.s.add(Or(*struct_const, *all_true))\n self.s.set('timeout', timeout)\n\n def add(self, example, label):\n self.s.add(self.encoding(example, label))\n\n def check(self):\n check = self.s.check()\n if check == sat:\n self.solve_model()\n return check\n\n def W_size(m):\n return m + 2\n <mask token>\n <mask token>\n\n def formula_model(self, *val):\n if len(val) == 0:\n val = self.vi\n formu = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * val[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append(Coe <= self.B[h])\n elif status == (False, True, False):\n clause.append(Coe >= self.B[h])\n elif status == (True, False, False):\n clause.append(Coe != self.B[h])\n elif status == (False, True, True):\n clause.append(Coe == self.B[h])\n elif status == (True, False, True):\n clause.append(Coe < self.B[h])\n elif status == (True, True, False):\n clause.append(Coe > self.B[h])\n elif status == (True, True, True):\n clause.append(False)\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] == self.C[m])\n elif status == (False, True):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] != self.C[m])\n elif status == (True, True):\n clause.append(False)\n formu.append(And(*clause))\n return simplify(Or(*formu))\n\n def refine_modu(self, coe, e, b, res, tmp, last=0):\n if len(coe) == 1:\n if coe[0] == 0:\n if last % e == b:\n tmp.append(0)\n else:\n return\n for i in range(e):\n if (i + last) % e == b:\n tmp.append(i)\n break\n res.append(list(tmp))\n tmp.pop()\n elif coe[0] == 0:\n tmp.append(0)\n self.refine_modu(coe[1:], e, b, res, tmp, last)\n tmp.pop()\n else:\n for i in range(e):\n tmp.append(i)\n self.refine_modu(coe[1:], e, b, res, tmp, last + i)\n tmp.pop()\n\n def build_formula(self, coe, V, e, C):\n expr = And(*[(coe[i] * v % e == C[i]) for i, v in enumerate(V)])\n return simplify(expr)\n\n def refine_model(self):\n formu_arr = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append([Coe < self.B[h], Coe == self.B[h]])\n elif status == (False, True, False):\n clause.append([Coe > self.B[h], Coe == self.B[h]])\n elif status == (True, False, False):\n clause.append([Coe < self.B[h], Coe > self.B[h]])\n elif status == (False, True, True):\n clause.append([Coe == self.B[h]])\n elif status == (True, False, True):\n clause.append([Coe < self.B[h]])\n elif status == (True, True, False):\n clause.append([Coe > self.B[h]])\n elif status == (True, True, True):\n clause.append([False])\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], self.C[m],\n mod_res, [])\n for C in mod_res:\n clause.append([self.build_formula(self.M[m], self.\n vi, self.E[m], C)])\n elif status == (False, True):\n mod_clause = []\n for i in range(self.E[m]):\n if i != self.C[m]:\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], i,\n mod_res, [])\n for C in mod_res:\n mod_clause.append(self.build_formula(self.M\n [m], self.vi, self.E[m], C))\n clause.append(mod_clause)\n elif status == (True, True):\n clause.append([False])\n formu_arr.append(clause)\n return formu_arr\n\n\nclass EquTemplate:\n\n def __init__(self, n):\n self.vi = [Int('v' + str(i)) for i in range(n)]\n self.b = Int('b')\n self.s = Solver()\n\n def add(self, vector):\n vi, target = vector[:-1], vector[-1]\n expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))\n ) + self.b == target\n self.s.add(expr)\n\n def check(self):\n return self.s.check()\n\n def solve_model(self):\n model = self.s.model()\n V = [(model[v].as_long() if model[v] is not None else 0) for v in\n self.vi]\n B = model[self.b].as_long() if model[self.b] is not None else 0\n expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B\n return simplify(expr)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FormulaTemplate:\n\n def __init__(self, vi, w, k, h, m, timeout=3000000):\n self.k = k\n self.h = h\n self.m = m\n self.w = w\n self.vi = vi\n n = len(vi)\n self.n = n\n self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in\n range(h)]\n self.bi = [Int('b' + str(i)) for i in range(h)]\n self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in\n range(m)]\n self.ei = [Int('e' + str(i)) for i in range(m)]\n self.ci = [Int('c' + str(i)) for i in range(m)]\n self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.s = Solver()\n for i in range(h):\n self.s.add(Or(*[(a > 0) for a in self.aeij[i]]))\n for j in range(i + 1, h):\n self.s.add(Or(*[(self.aeij[i][w] != self.aeij[j][w]) for w in\n range(n)]))\n for i in range(m):\n self.s.add(Or(*[(am > 0) for am in self.amij[i]]))\n self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])\n self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in\n range(m)])\n self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])\n for i in range(k):\n for j in range(i + 1, k):\n all_true = [And(self.heij[i][w], self.hgeij[i][w], self.\n hleij[i][w]) for w in range(h)]\n all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in\n range(m)])\n struct_const = [Or(self.heij[i][w] != self.heij[j][w], self\n .hgeij[i][w] != self.hgeij[j][w], self.hleij[i][w] !=\n self.hleij[j][w]) for w in range(h)]\n struct_const.extend([Or(self.tij[i][w] != self.tij[j][w], \n self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])\n self.s.add(Or(*struct_const, *all_true))\n self.s.set('timeout', timeout)\n\n def add(self, example, label):\n self.s.add(self.encoding(example, label))\n\n def check(self):\n check = self.s.check()\n if check == sat:\n self.solve_model()\n return check\n\n def W_size(m):\n return m + 2\n\n def encoding(self, example, label):\n Equ = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)\n ) != self.bi[i]) for i in range(self.h)]\n Ge = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)) >=\n self.bi[i]) for i in range(self.h)]\n Le = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)) <=\n self.bi[i]) for i in range(self.h)]\n Me = [(combine(example[j] * self.amij[i][j] for j in range(self.n)) %\n self.ei[i] == self.ci[i]) for i in range(self.m)]\n Tk = []\n for k in range(self.k):\n clause = []\n clause.extend([Implies(self.heij[k][h], Equ[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.hgeij[k][h], Ge[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.hleij[k][h], Le[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.tij[k][m], Me[m]) for m in range(\n self.m)])\n clause.extend([Implies(self.ntij[k][m], Not(Me[m])) for m in\n range(self.m)])\n Tk.append(And(*clause))\n return Or(*Tk) == label\n <mask token>\n\n def formula_model(self, *val):\n if len(val) == 0:\n val = self.vi\n formu = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * val[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append(Coe <= self.B[h])\n elif status == (False, True, False):\n clause.append(Coe >= self.B[h])\n elif status == (True, False, False):\n clause.append(Coe != self.B[h])\n elif status == (False, True, True):\n clause.append(Coe == self.B[h])\n elif status == (True, False, True):\n clause.append(Coe < self.B[h])\n elif status == (True, True, False):\n clause.append(Coe > self.B[h])\n elif status == (True, True, True):\n clause.append(False)\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] == self.C[m])\n elif status == (False, True):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] != self.C[m])\n elif status == (True, True):\n clause.append(False)\n formu.append(And(*clause))\n return simplify(Or(*formu))\n\n def refine_modu(self, coe, e, b, res, tmp, last=0):\n if len(coe) == 1:\n if coe[0] == 0:\n if last % e == b:\n tmp.append(0)\n else:\n return\n for i in range(e):\n if (i + last) % e == b:\n tmp.append(i)\n break\n res.append(list(tmp))\n tmp.pop()\n elif coe[0] == 0:\n tmp.append(0)\n self.refine_modu(coe[1:], e, b, res, tmp, last)\n tmp.pop()\n else:\n for i in range(e):\n tmp.append(i)\n self.refine_modu(coe[1:], e, b, res, tmp, last + i)\n tmp.pop()\n\n def build_formula(self, coe, V, e, C):\n expr = And(*[(coe[i] * v % e == C[i]) for i, v in enumerate(V)])\n return simplify(expr)\n\n def refine_model(self):\n formu_arr = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append([Coe < self.B[h], Coe == self.B[h]])\n elif status == (False, True, False):\n clause.append([Coe > self.B[h], Coe == self.B[h]])\n elif status == (True, False, False):\n clause.append([Coe < self.B[h], Coe > self.B[h]])\n elif status == (False, True, True):\n clause.append([Coe == self.B[h]])\n elif status == (True, False, True):\n clause.append([Coe < self.B[h]])\n elif status == (True, True, False):\n clause.append([Coe > self.B[h]])\n elif status == (True, True, True):\n clause.append([False])\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], self.C[m],\n mod_res, [])\n for C in mod_res:\n clause.append([self.build_formula(self.M[m], self.\n vi, self.E[m], C)])\n elif status == (False, True):\n mod_clause = []\n for i in range(self.E[m]):\n if i != self.C[m]:\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], i,\n mod_res, [])\n for C in mod_res:\n mod_clause.append(self.build_formula(self.M\n [m], self.vi, self.E[m], C))\n clause.append(mod_clause)\n elif status == (True, True):\n clause.append([False])\n formu_arr.append(clause)\n return formu_arr\n\n\nclass EquTemplate:\n\n def __init__(self, n):\n self.vi = [Int('v' + str(i)) for i in range(n)]\n self.b = Int('b')\n self.s = Solver()\n\n def add(self, vector):\n vi, target = vector[:-1], vector[-1]\n expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))\n ) + self.b == target\n self.s.add(expr)\n\n def check(self):\n return self.s.check()\n\n def solve_model(self):\n model = self.s.model()\n V = [(model[v].as_long() if model[v] is not None else 0) for v in\n self.vi]\n B = model[self.b].as_long() if model[self.b] is not None else 0\n expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B\n return simplify(expr)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass FormulaTemplate:\n\n def __init__(self, vi, w, k, h, m, timeout=3000000):\n self.k = k\n self.h = h\n self.m = m\n self.w = w\n self.vi = vi\n n = len(vi)\n self.n = n\n self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in\n range(h)]\n self.bi = [Int('b' + str(i)) for i in range(h)]\n self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in\n range(m)]\n self.ei = [Int('e' + str(i)) for i in range(m)]\n self.ci = [Int('c' + str(i)) for i in range(m)]\n self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for\n j in range(k)]\n self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in\n range(k)]\n self.s = Solver()\n for i in range(h):\n self.s.add(Or(*[(a > 0) for a in self.aeij[i]]))\n for j in range(i + 1, h):\n self.s.add(Or(*[(self.aeij[i][w] != self.aeij[j][w]) for w in\n range(n)]))\n for i in range(m):\n self.s.add(Or(*[(am > 0) for am in self.amij[i]]))\n self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])\n self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in\n range(m)])\n self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])\n for i in range(k):\n for j in range(i + 1, k):\n all_true = [And(self.heij[i][w], self.hgeij[i][w], self.\n hleij[i][w]) for w in range(h)]\n all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in\n range(m)])\n struct_const = [Or(self.heij[i][w] != self.heij[j][w], self\n .hgeij[i][w] != self.hgeij[j][w], self.hleij[i][w] !=\n self.hleij[j][w]) for w in range(h)]\n struct_const.extend([Or(self.tij[i][w] != self.tij[j][w], \n self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])\n self.s.add(Or(*struct_const, *all_true))\n self.s.set('timeout', timeout)\n\n def add(self, example, label):\n self.s.add(self.encoding(example, label))\n\n def check(self):\n check = self.s.check()\n if check == sat:\n self.solve_model()\n return check\n\n def W_size(m):\n return m + 2\n\n def encoding(self, example, label):\n Equ = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)\n ) != self.bi[i]) for i in range(self.h)]\n Ge = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)) >=\n self.bi[i]) for i in range(self.h)]\n Le = [(combine(example[j] * self.aeij[i][j] for j in range(self.n)) <=\n self.bi[i]) for i in range(self.h)]\n Me = [(combine(example[j] * self.amij[i][j] for j in range(self.n)) %\n self.ei[i] == self.ci[i]) for i in range(self.m)]\n Tk = []\n for k in range(self.k):\n clause = []\n clause.extend([Implies(self.heij[k][h], Equ[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.hgeij[k][h], Ge[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.hleij[k][h], Le[h]) for h in range(\n self.h)])\n clause.extend([Implies(self.tij[k][m], Me[m]) for m in range(\n self.m)])\n clause.extend([Implies(self.ntij[k][m], Not(Me[m])) for m in\n range(self.m)])\n Tk.append(And(*clause))\n return Or(*Tk) == label\n\n def solve_model(self):\n print('w', self.w)\n model = self.s.model()\n self.M = [[(model[self.amij[i][j]].as_long() if model[self.amij[i][\n j]] is not None else 0) for j in range(self.n)] for i in range(\n self.m)]\n for i in range(self.m):\n self.ei[i] = FormulaTemplate.W_size(self.w)\n self.E = [self.ei[i] for i in range(self.m)]\n print('E = \\n', self.E)\n self.C = [(model[self.ci[i]].as_long() if model[self.ci[i]] is not\n None else 0) for i in range(self.m)]\n self.A = [[(model[self.aeij[i][j]].as_long() if model[self.aeij[i][\n j]] is not None else 0) for j in range(self.n)] for i in range(\n self.h)]\n self.B = [(model[self.bi[i]].as_long() if model[self.bi[i]] is not\n None else 0) for i in range(self.h)]\n self.He = [[(bool(model[self.heij[i][j]]) if model[self.heij[i][j]]\n is not None else False) for j in range(self.h)] for i in range\n (self.k)]\n self.Hge = [[(bool(model[self.hgeij[i][j]]) if model[self.hgeij[i][\n j]] is not None else False) for j in range(self.h)] for i in\n range(self.k)]\n self.Hle = [[(bool(model[self.hleij[i][j]]) if model[self.hleij[i][\n j]] is not None else False) for j in range(self.h)] for i in\n range(self.k)]\n self.T = [[(bool(model[self.tij[i][j]]) if model[self.tij[i][j]] is not\n None else False) for j in range(self.m)] for i in range(self.k)]\n self.Nt = [[(bool(model[self.ntij[i][j]]) if model[self.ntij[i][j]]\n is not None else False) for j in range(self.m)] for i in range\n (self.k)]\n for i in range(self.m):\n flag = True\n pix = -1\n for am in self.M[i]:\n if pix == -1:\n if am != 0:\n pix = am\n elif am != 0 and am != pix:\n flag = False\n break\n if flag:\n if self.C[i] == 0:\n if not co_prime(pix, self.E[i]):\n self.E[i] /= gcd(pix, self.E[i])\n for j in range(self.n):\n self.M[i][j] = 1\n else:\n div = gcd(pix, self.E[i], self.C[i])\n self.E[i] /= div\n self.C[i] /= div\n pix /= div\n for j in range(self.n):\n self.M[i][j] /= div\n div = gcd(int(pix), int(self.C[i]))\n for j in range(self.n):\n self.M[i][j] /= div\n self.C[i] /= div\n for i in range(self.h):\n divisior = gcd(*self.A[i], self.B[i])\n self.B[i] /= divisior\n for j in range(self.n):\n self.A[i][j] /= divisior\n for i in range(len(self.E)):\n self.E[i] = int(self.E[i])\n\n def formula_model(self, *val):\n if len(val) == 0:\n val = self.vi\n formu = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * val[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append(Coe <= self.B[h])\n elif status == (False, True, False):\n clause.append(Coe >= self.B[h])\n elif status == (True, False, False):\n clause.append(Coe != self.B[h])\n elif status == (False, True, True):\n clause.append(Coe == self.B[h])\n elif status == (True, False, True):\n clause.append(Coe < self.B[h])\n elif status == (True, True, False):\n clause.append(Coe > self.B[h])\n elif status == (True, True, True):\n clause.append(False)\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] == self.C[m])\n elif status == (False, True):\n clause.append(combine(self.M[m][j] * val[j] for j in\n range(self.n)) % self.E[m] != self.C[m])\n elif status == (True, True):\n clause.append(False)\n formu.append(And(*clause))\n return simplify(Or(*formu))\n\n def refine_modu(self, coe, e, b, res, tmp, last=0):\n if len(coe) == 1:\n if coe[0] == 0:\n if last % e == b:\n tmp.append(0)\n else:\n return\n for i in range(e):\n if (i + last) % e == b:\n tmp.append(i)\n break\n res.append(list(tmp))\n tmp.pop()\n elif coe[0] == 0:\n tmp.append(0)\n self.refine_modu(coe[1:], e, b, res, tmp, last)\n tmp.pop()\n else:\n for i in range(e):\n tmp.append(i)\n self.refine_modu(coe[1:], e, b, res, tmp, last + i)\n tmp.pop()\n\n def build_formula(self, coe, V, e, C):\n expr = And(*[(coe[i] * v % e == C[i]) for i, v in enumerate(V)])\n return simplify(expr)\n\n def refine_model(self):\n formu_arr = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))\n status = self.He[k][h], self.Hge[k][h], self.Hle[k][h]\n if status == (False, False, True):\n clause.append([Coe < self.B[h], Coe == self.B[h]])\n elif status == (False, True, False):\n clause.append([Coe > self.B[h], Coe == self.B[h]])\n elif status == (True, False, False):\n clause.append([Coe < self.B[h], Coe > self.B[h]])\n elif status == (False, True, True):\n clause.append([Coe == self.B[h]])\n elif status == (True, False, True):\n clause.append([Coe < self.B[h]])\n elif status == (True, True, False):\n clause.append([Coe > self.B[h]])\n elif status == (True, True, True):\n clause.append([False])\n for m in range(self.m):\n status = self.T[k][m], self.Nt[k][m]\n if status == (True, False):\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], self.C[m],\n mod_res, [])\n for C in mod_res:\n clause.append([self.build_formula(self.M[m], self.\n vi, self.E[m], C)])\n elif status == (False, True):\n mod_clause = []\n for i in range(self.E[m]):\n if i != self.C[m]:\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], i,\n mod_res, [])\n for C in mod_res:\n mod_clause.append(self.build_formula(self.M\n [m], self.vi, self.E[m], C))\n clause.append(mod_clause)\n elif status == (True, True):\n clause.append([False])\n formu_arr.append(clause)\n return formu_arr\n\n\nclass EquTemplate:\n\n def __init__(self, n):\n self.vi = [Int('v' + str(i)) for i in range(n)]\n self.b = Int('b')\n self.s = Solver()\n\n def add(self, vector):\n vi, target = vector[:-1], vector[-1]\n expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))\n ) + self.b == target\n self.s.add(expr)\n\n def check(self):\n return self.s.check()\n\n def solve_model(self):\n model = self.s.model()\n V = [(model[v].as_long() if model[v] is not None else 0) for v in\n self.vi]\n B = model[self.b].as_long() if model[self.b] is not None else 0\n expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B\n return simplify(expr)\n\n\n<mask token>\n",
"step-5": "import random\n\nfrom z3 import *\n\n\ndef combine(iter):\n tmp_list = [i for i in iter]\n res = tmp_list[0]\n for i in tmp_list[1:]:\n res += i\n return res\n\n\ndef co_prime(num1, num2):\n for num in range(2, min(num1, num2) + 1):\n if num1 % num == 0 and num2 % num == 0:\n return False\n return True\n\n\ndef gcd(*nums):\n min_num = 1 << 32\n for num in nums:\n if num != 0:\n min_num = min(min_num, abs(num))\n for i in range(min_num, 1, -1):\n flag = True\n for num in nums:\n if num % i != 0:\n flag = False\n break\n if flag:\n return i\n return 1\n\n\nclass FormulaTemplate:\n def __init__(self, vi ,w ,k, h, m ,timeout=3000000): ####加了w\n self.k = k # amount of clause 多少个子句\n self.h = h # number of inequality 第一类不等式数量上限\n self.m = m # number of mode number 第二类不等式数量上限\n\n self.w = w\n\n self.vi = vi\n n = len(vi)\n self.n = n\n self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in range(h)]\n self.bi = [Int('b' + str(i)) for i in range(h)]\n self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in range(m)]\n self.ei = [Int('e' + str(i)) for i in range(m)] ##改成定值 , 写一个函数,从2开始一个个试????(还没实现)\n self.ci = [Int('c' + str(i)) for i in range(m)]\n self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for j in range(k)]\n self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for j in range(k)]\n self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for j in range(k)]\n self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in range(k)]\n self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in range(k)]\n self.s = Solver()\n\n\n\n\n for i in range(h):\n # 不等式系数ae_ij不能全部为0\n self.s.add(Or(*[a > 0 for a in self.aeij[i]]))\n for j in range(i + 1, h):\n self.s.add(Or(*[self.aeij[i][w] != self.aeij[j][w] for w in range(n)]))\n for i in range(m):\n # 模等式的系数am_ij不能全部小于等于0\n self.s.add(Or(*[am > 0 for am in self.amij[i]]))\n # 模等式的系数am_ij不能大于模e\n self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])\n # for j in range(i + 1, m):\n # self.s.add(Or(self.ei[i] != self.ei[j],\n # *[self.amij[i][w] != self.amij[j][w] for w in range(n)]))\n # 余数c_i必须小于模e\n self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in range(m)])\n # 模必须大于等于2,并且小于一定范围\n self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])\n for i in range(k):\n # 判断条件一定有一个是False,避免逻辑出现False\n for j in range(i + 1, k):\n all_true = [And(self.heij[i][w], self.hgeij[i][w], self.hleij[i][w]) for w in range(h)]\n all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in range(m)])\n struct_const = [Or(self.heij[i][w] != self.heij[j][w],\n self.hgeij[i][w] != self.hgeij[j][w],\n self.hleij[i][w] != self.hleij[j][w]) for w in range(h)]\n struct_const.extend([Or(self.tij[i][w] != self.tij[j][w],\n self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])\n\n self.s.add(Or(*struct_const, *all_true))\n\n self.s.set(\"timeout\", timeout)\n\n def add(self, example, label):\n self.s.add(self.encoding(example, label))\n\n def check(self):\n check = self.s.check()\n if check == sat:\n self.solve_model()\n return check\n\n def W_size(m):\n return m+2\n\n\n\n def encoding(self, example, label):\n Equ = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) != self.bi[i] for i in range(self.h)]\n Ge = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) >= self.bi[i] for i in range(self.h)]\n Le = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) <= self.bi[i] for i in range(self.h)]\n Me = [combine(example[j] * self.amij[i][j] for j in range(self.n)) % self.ei[i] == self.ci[i] for i in\n range(self.m)]\n Tk = []\n for k in range(self.k):\n clause = []\n clause.extend([Implies(self.heij[k][h], Equ[h]) for h in range(self.h)])\n clause.extend([Implies(self.hgeij[k][h], Ge[h]) for h in range(self.h)])\n clause.extend([Implies(self.hleij[k][h], Le[h]) for h in range(self.h)])\n clause.extend([Implies(self.tij[k][m], Me[m]) for m in range(self.m)])\n clause.extend([Implies(self.ntij[k][m], Not(Me[m])) for m in range(self.m)])\n Tk.append(And(*clause))\n # print(\"Or(*Tk) , label=\\n\",Or(*Tk),label)\n return Or(*Tk) == label\n\n def solve_model(self): #求出取值 ####加了w\n print(\"w\", self.w)\n #W_size = [2,3,4,5,6,7,8,9]\n model = self.s.model()\n self.M = [[model[self.amij[i][j]].as_long() if model[self.amij[i][j]] is not None else 0\n for j in range(self.n)]\n for i in range(self.m)]\n ##用z3求解e(此处要改)\n # self.E = [model[self.ei[i]].as_long() if model[self.ei[i]] is not None else 1 for i in range(self.m)]\n # print(\"E= \\n\",self.E)\n ####改动\n for i in range(self.m):\n self.ei[i] = FormulaTemplate.W_size(self.w)\n self.E = [self.ei[i] for i in range(self.m)]\n print(\"E = \\n\",self.E)\n ####\n self.C = [model[self.ci[i]].as_long() if model[self.ci[i]] is not None else 0 for i in range(self.m)]\n self.A = [[model[self.aeij[i][j]].as_long() if model[self.aeij[i][j]] is not None else 0\n for j in range(self.n)]\n for i in range(self.h)]\n self.B = [model[self.bi[i]].as_long() if model[self.bi[i]] is not None else 0 for i in range(self.h)]\n self.He = [\n [bool(model[self.heij[i][j]]) if model[self.heij[i][j]] is not None else False\n for j in range(self.h)]\n for i in range(self.k)\n ]\n self.Hge = [\n [bool(model[self.hgeij[i][j]]) if model[self.hgeij[i][j]] is not None else False\n for j in range(self.h)]\n for i in range(self.k)\n ]\n self.Hle = [\n [bool(model[self.hleij[i][j]]) if model[self.hleij[i][j]] is not None else False\n for j in range(self.h)]\n for i in range(self.k)\n ]\n self.T = [\n [bool(model[self.tij[i][j]]) if model[self.tij[i][j]] is not None else False\n for j in range(self.m)]\n for i in range(self.k)\n ]\n self.Nt = [\n [bool(model[self.ntij[i][j]]) if model[self.ntij[i][j]] is not None else False\n for j in range(self.m)]\n for i in range(self.k)\n ]\n for i in range(self.m):\n flag = True # 判断是否全部系数都相等\n pix = -1\n for am in self.M[i]:\n if pix == -1:\n if am != 0:\n pix = am\n elif am != 0 and am != pix:\n flag = False\n break\n if flag: # 系数全部相同\n if self.C[i] == 0:\n # if co_prime(pix, self.E[i]):\n # for j in range(self.n):\n # if self.M[i][j] != 0:\n # self.M[i][j] = 1\n # else:\n # div = gcd(pix, self.E[i])\n # self.E[i] /= div\n # for j in range(self.n):\n # self.M[i][j] /= div\n if not co_prime(pix, self.E[i]):\n self.E[i] /= gcd(pix, self.E[i])\n for j in range(self.n):\n self.M[i][j] = 1\n else:\n div = gcd(pix, self.E[i], self.C[i])\n self.E[i] /= div\n self.C[i] /= div\n pix /= div\n for j in range(self.n):\n self.M[i][j] /= div\n div = gcd(int(pix), int(self.C[i]))\n for j in range(self.n):\n self.M[i][j] /= div\n self.C[i] /= div\n for i in range(self.h):\n divisior = gcd(*self.A[i], self.B[i])\n self.B[i] /= divisior\n for j in range(self.n):\n self.A[i][j] /= divisior\n for i in range(len(self.E)):\n self.E[i] = int(self.E[i])\n\n def formula_model(self, *val): # 得到一个公式模型 kd:代入变量求得变量,代入数值就是求得一个值\n if len(val) == 0:\n val = self.vi\n formu = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * val[j] for j in range(self.n))\n status = (self.He[k][h], self.Hge[k][h], self.Hle[k][h])\n if status == (False, False, True): #选择大于小于等于\n clause.append(Coe <= self.B[h])\n elif status == (False, True, False):\n clause.append(Coe >= self.B[h])\n elif status == (True, False, False):\n clause.append(Coe != self.B[h])\n elif status == (False, True, True):\n clause.append(Coe == self.B[h])\n elif status == (True, False, True):\n clause.append(Coe < self.B[h])\n elif status == (True, True, False):\n clause.append(Coe > self.B[h])\n elif status == (True, True, True):\n clause.append(False)\n for m in range(self.m):\n status = (self.T[k][m], self.Nt[k][m])\n if status == (True, False): #选择取模\n clause.append(combine(self.M[m][j] * val[j] for j in range(self.n)) % self.E[m] == self.C[m])\n elif status == (False, True):\n clause.append(combine(self.M[m][j] * val[j] for j in range(self.n)) % self.E[m] != self.C[m])\n elif status == (True, True):\n clause.append(False)\n formu.append(And(*clause))\n # print(\"simplify(Or(*formu))=\\n\",simplify(Or(*formu)))\n return simplify(Or(*formu))\n\n def refine_modu(self, coe, e, b, res, tmp, last=0):\n if len(coe) == 1:\n if coe[0] == 0:\n if last % e == b:\n tmp.append(0)\n else:\n return\n for i in range(e):\n if (i + last) % e == b:\n tmp.append(i)\n break\n res.append(list(tmp))\n tmp.pop()\n elif coe[0] == 0:\n tmp.append(0)\n self.refine_modu(coe[1:], e, b, res, tmp, last)\n tmp.pop()\n else:\n for i in range(e):\n tmp.append(i)\n self.refine_modu(coe[1:], e, b, res, tmp, last + i)\n tmp.pop()\n\n def build_formula(self, coe, V, e, C):\n expr = And(*[(coe[i] * v) % e == C[i] for i, v in enumerate(V)])\n return simplify(expr)\n\n def refine_model(self):\n formu_arr = []\n for k in range(self.k):\n clause = []\n for h in range(self.h):\n Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))\n status = (self.He[k][h], self.Hge[k][h], self.Hle[k][h])\n if status == (False, False, True):\n clause.append([Coe < self.B[h], Coe == self.B[h]])\n elif status == (False, True, False):\n clause.append([Coe > self.B[h], Coe == self.B[h]])\n elif status == (True, False, False):\n clause.append([Coe < self.B[h], Coe > self.B[h]])\n elif status == (False, True, True):\n clause.append([Coe == self.B[h]])\n elif status == (True, False, True):\n clause.append([Coe < self.B[h]])\n elif status == (True, True, False):\n clause.append([Coe > self.B[h]])\n elif status == (True, True, True):\n clause.append([False])\n for m in range(self.m):\n status = (self.T[k][m], self.Nt[k][m])\n # Com = combine(self.M[m][j] * self.vi[j] for j in range(self.n))\n if status == (True, False):\n # clause.append([Com % self.E[m] == self.C[m]])\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], self.C[m], mod_res, [])\n for C in mod_res:\n clause.append([self.build_formula(self.M[m], self.vi, self.E[m], C)])\n elif status == (False, True):\n mod_clause = []\n for i in range(self.E[m]):\n if i != self.C[m]:\n # mod_clause.append(Com % self.E[m] == i)\n mod_res = []\n self.refine_modu(self.M[m], self.E[m], i, mod_res, [])\n for C in mod_res:\n mod_clause.append(self.build_formula(self.M[m], self.vi, self.E[m], C))\n clause.append(mod_clause)\n elif status == (True, True):\n clause.append([False])\n formu_arr.append(clause)\n return formu_arr\n\n\nclass EquTemplate:\n def __init__(self, n):\n self.vi = [Int('v' + str(i)) for i in range(n)]\n self.b = Int('b')\n self.s = Solver()\n\n def add(self, vector):\n vi, target = vector[:-1], vector[-1]\n expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))) + self.b == target\n self.s.add(expr)\n\n def check(self):\n return self.s.check()\n\n def solve_model(self):\n model = self.s.model()\n V = [model[v].as_long() if model[v] is not None else 0 for v in self.vi]\n B = model[self.b].as_long() if model[self.b] is not None else 0\n expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B\n return simplify(expr)\n\n\nif __name__ == '__main__':\n # smt = FormulaTemplate([Int('v1'), Int('v2')], 4, 3, 2)\n # smt.add([1, 2], True)\n # smt.add([2, 3], False)\n # print(smt.s)\n # print(smt.check())\n #\n # arr = smt.refine_model()\n # for a in arr:\n # print(a)\n #\n # formu = smt.formula_model()\n # print(formu)\n # print('-' * 50)\n # print(simplify(formu))\n # print('-' * 50)\n\n smt = EquTemplate(2)\n smt.add([0, 1, 1])\n smt.add([1, 2, 1])\n smt.add([3, 6, 3])\n if smt.check() == sat:\n print(smt.solve_model()) # 1*v0 + 2*v1 + 1\n else:\n print(unsat)\n\n\n",
"step-ids": [
11,
14,
15,
16,
22
]
}
|
[
11,
14,
15,
16,
22
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def solution(a, b):
answer = 0
for i in range(0, len(a)):
answer += a[i] * b[i]
print(answer)
return answer
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def solution(a, b):
answer = 0
for i in range(0, len(a)):
answer += a[i] * b[i]
print(answer)
return answer
solution([1, 2, 3, 4], [-3, -1, 0, 2])
<|reserved_special_token_1|>
def solution(a, b):
answer = 0;
for i in range(0,len(a)):
answer+=a[i]*b[i];
print(answer);
return answer
solution([1,2,3,4],[-3,-1,0,2]);
|
flexible
|
{
"blob_id": "5fd34c698c2060d5399ba43f6746527961aa574b",
"index": 9239,
"step-1": "<mask token>\n",
"step-2": "def solution(a, b):\n answer = 0\n for i in range(0, len(a)):\n answer += a[i] * b[i]\n print(answer)\n return answer\n\n\n<mask token>\n",
"step-3": "def solution(a, b):\n answer = 0\n for i in range(0, len(a)):\n answer += a[i] * b[i]\n print(answer)\n return answer\n\n\nsolution([1, 2, 3, 4], [-3, -1, 0, 2])\n",
"step-4": "def solution(a, b):\n answer = 0;\n\n for i in range(0,len(a)):\n answer+=a[i]*b[i];\n\n print(answer); \n return answer\n\nsolution([1,2,3,4],[-3,-1,0,2]);",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#coding=utf-8
# ycat 2017-10-20 create
# AGV的控制
import sys,os
import json
import setup
if __name__ == '__main__':
setup.setCurPath(__file__)
import utility
import enhance
import threading
import time
import log
import re
import lock
import json_codec
import driver.agv.hdcAgvApi as api
g_threads =[]
g_carts = None
g_point = None
g_lock = threading.RLock()
locationEvent = enhance.event()
api.locationEvent.connect(locationEvent.emit)
@utility.init()
def init():
if utility.is_test():
return
api.init()
time.sleep(3)
def wait():
global g_threads
for t in g_threads:
t.join()
g_threads.clear()
@utility.fini()
def fini():
if utility.is_test():
return
api.fini()
wait()
g_stockLock = {}
def getStockA(loc):
if loc[0:6] != "stockA":
return None
m = re.search("stockA_row(\d+)_col(\d+).*",loc)
if m is None:
return None
row = int(m.group(1))
col = int(m.group(2))
if row is None:
return
if row%2 != 1:
row -= 1
return row*1000+col
@lock.lock(g_lock)
def checkTimeout(index,agvId,loc):
global g_stockLock
if index in g_stockLock:
if utility.ticks() - g_stockLock[index] > 10*60*1000:
unlockStockA(agvId,loc)
log.warning("delete timeout locked",index)
#解决在StockA两个车头对撞的问题
def lockStockA(agvId,loc):
global g_stockLock
index = getStockA(loc)
if index is None:
return
if index in g_stockLock:
checkTimeout(index,agvId,loc)
log.warning(agvId,loc,"is locked, wait for unlock")
for i in range(60*5):
if index not in g_stockLock:
break
time.sleep(1)
log.info(agvId,loc,"wait for unlock success")
global g_lock
log.debug(agvId,"lock",loc,index)
g_lock.acquire()
g_stockLock[index] = utility.ticks()
g_lock.release()
@lock.lock(g_lock)
def unlockStockA(agvId,loc):
global g_stockLock
index = getStockA(loc)
if index in g_stockLock:
log.debug(agvId,"unlock",loc,index)
del g_stockLock[index]
@lock.lock(g_lock)
def getPoint(originPoint):
global g_point
loadPoint()
if g_point[originPoint] is not None:
return g_point[originPoint]
return originPoint
@lock.lock(g_lock)
def getOriginPoint(point):
global g_point
loadPoint()
for itemIndex in g_point:
if g_point[itemIndex] == point:
return itemIndex
return point
@lock.lock(g_lock)
def loadPoint():
global g_point
filePath = os.path.dirname(__file__)
fileName = "point.cfg"
if filePath:
fileName = filePath + "/" + fileName
g_point = json_codec.load_file(fileName)
@lock.lock(g_lock)
def checkCart(cartId,scanId):
scanId = scanId.strip()
def loadCart():
global g_carts
p = os.path.dirname(__file__)
pp = "cart.cfg"
if p:
pp = p+"/"+pp
g_carts = json_codec.load_file(pp)
def saveCart():
global g_carts
p = os.path.dirname(__file__)
pp = "cart.cfg"
if p:
pp = p+"/"+pp
json_codec.dump_file(pp,g_carts)
def findCart(scanId):
global g_carts
for c in g_carts:
if g_carts[c] == scanId:
return c
return "unknown"
global g_carts
if g_carts is None:
loadCart()
if cartId in g_carts:
if scanId != g_carts[cartId]:
log.error("货架ID不正确,期望货架:"+cartId+", 实际货架:"+findCart(scanId))
raise Exception("货架ID不正确,期望货架:"+cartId+", 实际货架:"+findCart(scanId))
else:
g_carts[cartId] = scanId
saveCart()
#finishCallback参数: finishCallback(obj)
#obj会自动带上下面三个参数
#obj["agv"] = agvId
#obj["result"] = 0
#obj["resultDesc"] = "success"
def _run(func,args,callback,obj):
def threadFunc(func,args,callback,obj):
hasCallback = False
try:
func(*args)
if utility.is_exited():
return
hasCallback = True
callback(obj)
except Exception as e:
obj["result"] = -1
obj["resultDesc"] = str(e)
log.exception("agvCtrl:",e)
if "agv" in obj:
agvId= obj["agv"]
log.debug("小车:"+agvId+",出现未经处理的异常,正在返航 ")
restAgv(agvId)
freeAgv(agvId)
if not hasCallback:
callback(obj)
t = threading.Thread(target=threadFunc,args=(func,args,callback,obj))
global g_threads
t.start()
g_threads.append(t)
def _initObj(obj,agvId):
obj["agv"] = agvId
obj["result"] = 0
obj["resultDesc"] = "success"
def _call(agvId,locId):
if api.isCartLoc(locId):
api.move(agvId,locId+".1")
lockStockA(agvId,locId)
try:
api.mission(agvId,1) #旋转——》钻入货架——》扫码——》返回货架id号码
except Exception as e:
unlockStockA(agvId,locId)
raise e
else:
api.move(agvId,locId)
def apply(locId):
locId=getOriginPoint(locId)
return api.apply(locId+'.1')
def call(agvId,locId,finishCallback,obj):
_initObj(obj,agvId)
locId=getOriginPoint(locId)
try:
_run(func=_call,args=(agvId,locId),callback=finishCallback,obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
return agvId
def _moveCart(agvId,srcLoc,locId,cartId):
try:
c = api.mission(agvId,2) #顶升任务,这个也会返回货架ID
if c:
checkCart(cartId,c)
api.move(agvId,srcLoc+".2")
except Exception as e:
#TODO:ycat api.move(agvId,srcLoc+".2")
#TODO:ycat raise e
pass
finally:
unlockStockA(agvId,srcLoc)
loc,type = api.getMissionType("get","",srcLoc)
api.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动
loc,type = api.getMissionType("put",srcLoc,locId)
api.move(agvId,loc+".3")
api.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动
lockStockA(agvId,locId)
try:
api.move(agvId,locId+".4")
api.mission(agvId,5) #放下货架
api.move(agvId,locId+".5") #返航
finally:
unlockStockA(agvId,locId)
freeAgv(agvId)
#带货架运输
def moveCart(agvId,cartId,srcLoc,locId,finishCallback,obj):
_initObj(obj,agvId)
assert api.isCartLoc(cartId)
#移动货架前,一定是locked状态
#assert api.isLocked(agvId)
srcLoc = getOriginPoint(srcLoc)
locId = getOriginPoint(locId)
try:
_run(func=_moveCart,args=(agvId,srcLoc,locId,cartId),callback=finishCallback,obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
#不带货架运输
def move(agvId,locId,finishCallback,obj):
_initObj(obj,agvId)
#移动前,一定是locked状态
#assert api.isLocked(agvId)
try:
locId=getOriginPoint(locId)
_run(func=api.move,args=(agvId,locId),callback=finishCallback,obj=obj)
except Exception as e:
freeAgv(agvId)
raise e
#释放对agv的占用
def freeAgv(agvId):
try:
api.unlock(agvId)
except Exception as e:
log.exception("freeAgv",e)
#回归转盘
def restAgv(agvId):
agvId2 = api.getAgvId(agvId)
api.reset(agvId2)
def Init():
import interface.dashboard.dashboardApi
locationEvent.connect(interface.dashboard.dashboardApi.reportAgvLoc)
time.sleep(3)
################# unit test #################
def testgetPoint():
resulta= getPoint("StockA_row7_col4")
assert resulta== "begin_1"
resultb= getPoint("StockA_row8_col4")
assert resultb == "begin_2"
def testgetOrginPoint():
resulta= getOriginPoint("begin_1")
assert resulta== "StockA_row7_col4"
resultb= getOriginPoint("begin_2")
assert resultb == "StockA_row8_col4"
resultc = getOriginPoint("hhahahaa")
assert resultc == "hhahahaa"
def testgetStockA():
assert getStockA("stockA_row10_col3") == 9003
assert getStockA("stockA_row10_col4") == 9004
assert getStockA("stockA_row1_col1") == 1001
assert getStockA("stockA_row2_col2") == 1002
assert getStockA("stockA_row3_col2") == 3002
assert getStockA("stockA_row4_col2") == 3002
assert getStockA("stockA_row4_col2.1") == 3002
assert getStockA("stockB_row4_col2.1") == None
assert getStockA("begin_1") == None
assert getStockA("seat_1") == None
def testcheckCart():
global g_carts
g_carts = None
checkCart("CART9001","591")
checkCart("CART9002","592")
gg = json_codec.load_file("cart.cfg")
assert "CART9001" in gg
assert "CART9002" in gg
assert gg["CART9001"] == "591"
assert gg["CART9002"] == "592"
checkCart("CART9002","592")
checkCart("CART9001","591")
try:
checkCart("CART9002","591")
assert 0
except Exception as e:
s = str(e)
assert s.find("货架ID不正确,期望货架:CART9002, 实际货架:CART9001") != -1
import counter
@counter.count
def move_cart(cartId,srcLoc,destLoc,agvId=None):
print(cartId,srcLoc,destLoc)
counter.setPrint(True)
def callback1(obj):
if obj["result"] == -1:
print("error, system exit")
obj["finish"] = True
sys.exit(-1)
else:
log.warning(obj["agv"],"start move from",obj["loc1"],"to",obj["loc2"])
moveCart(obj["agv"],obj["cart"],obj["loc1"],obj["loc2"],callback2,obj)
def callback2(obj):
if obj["result"] == -1:
print("error, system exit")
obj["finish"] = True
sys.exit(-1)
else:
log.warning(obj["agv"],"arrived",obj["loc2"])
obj["finish"] = True
obj = {}
obj["loc1"] = srcLoc
obj["loc2"] = destLoc
obj["cart"] = cartId
print("call ",srcLoc)
if agvId is None:
agvId = apply(srcLoc)
call(agvId,srcLoc,callback1,obj)
while not utility.is_exited():
if "finish" in obj:
break
time.sleep(0.2)
print("------ move ",srcLoc," to ",destLoc," finish ------")
#def func1(start,stock1,stock2):
# print("-------------------- start thread ------------------------")
# time.sleep(1)
# cartId = "CART9009"
# move_cart(cartId,start,stock1)
# next = stock1
# for s in seats:
# move_cart(cartId,next,"seat"+str(s)+"_1")
# if next == stock1:
# next = stock2
# else:
# next = stock1
# move_cart(cartId,"seat"+str(s)+"_1",next)
# # move_cart(cartId, s, next)
# print("=======================================")
# print("finish func1")
# print("=======================================")
def func2(stock1,stock2):
print("-------------------- start thread ------------------------",stock1,stock2)
time.sleep(1)
cartId = "CART9009"
for i in range(20):
print("current loop is - ",i.__str__())
move_cart(cartId,stock1,stock2)
move_cart(cartId,stock2,stock1)
print("current loop end - ",i.__str__())
print("=======================================")
print("finish func2")
print("=======================================")
def func3(times,starts,seats):
current=starts
cartId = "CART9009"
time.sleep(1)
for loop in range(0,times-1):
# current=starts
tip1="currentLoop is "+loop.__str__()+" currentStart is "+current
print(tip1)
for i in range(0,len(seats)):
next = str(seats[i])
tip2= "currentLoop is "+loop.__str__()+"currentOrigin is "+ current + "currentNext is " + next +" seatIndex is "+i.__str__()
print(tip2)
print("excuting")
move_cart(cartId,current,next)
current = next
def testPageAgvControl(jsonstr):
jsonData = json.loads(jsonstr)
result = False
if len(jsonData)==0:
result=False
else:
for currentJson in jsonData:
start = currentJson["start"]
seat = currentJson["seat"]
loop=int(currentJson["loop"])
seats = str.split(seat, ',')
durabilityTestTask1 = threading.Thread(target=func3, args=[loop, start, seats])
durabilityTestTask1.start()
result=True
return result
def testtestPageAgvControl(jsonstr):
jsonData = json.loads(jsonstr)
result = False
if len(jsonData) == 0:
result = False
else:
for currentJson in jsonData:
start = currentJson["start"]
print(start)
time.sleep(3)
seat = currentJson["seat"]
seats = str.split(seat, ',')
print(seat)
time.sleep(3)
for currentseat in seats:
print(currentseat)
time.sleep(3)
time.sleep(10)
result = True
return result
def testPageUnloockAll():
api.unlockAll();
def testProcess(jsonData):
utility.start()
testPageAgvControl(jsonData)
utility.finish()
def test1():
Init()
durabilityTestTask1= threading.Thread(target=func3,args=[20,"stockA_row1_col3",["stockA_row1_col2","stockA_row1_col4"]])
durabilityTestTask1.start()
durabilityTestTask2= threading.Thread(target=func3,args=[20,"stockA_row1_col2",["seat2_1","stockA_row4_col2"]])
# durabilityTestTask2.start()
durabilityTestTask3= threading.Thread(target=func3,args=[20,"stockA_row5_col3",["seat16_1","stockA_row5_col2"]])
# durabilityTestTask3.start()
durabilityTestTask4= threading.Thread(target=func3,args=[20,"stockA_row6_col3",["seat12_1","stockA_row6_col2"]])
# durabilityTestTask4.start()
durabilityTestTask1.join()
#t1.join()
print("===============ALL FINISH ========================")
if __name__ == '__main__':
# utility.run_tests()
if sys.argv is not None and len(sys.argv)>0:
if "process" in sys.argv:
log.info("run at testPage mode")
args=""
with open('/agvscada/driver/args.txt', 'r', encoding='utf-8') as f:
args=f.read()
api.init()
time.sleep(3)
testPageAgvControl(args)
elif "unlock" in sys.argv:
testPageUnloockAll()
elif "test" in sys.argv:
utility.start()
test1()
utility.finish()
else:
utility.start()
testgetPoint()
utility.finish()
# test3()
|
normal
|
{
"blob_id": "e2feb12b88babbbfa4cc8447c91e8a5b6c30f78b",
"index": 1466,
"step-1": "<mask token>\n\n\n@utility.init()\ndef init():\n if utility.is_test():\n return\n api.init()\n time.sleep(3)\n\n\ndef wait():\n global g_threads\n for t in g_threads:\n t.join()\n g_threads.clear()\n\n\n@utility.fini()\ndef fini():\n if utility.is_test():\n return\n api.fini()\n wait()\n\n\n<mask token>\n\n\ndef getStockA(loc):\n if loc[0:6] != 'stockA':\n return None\n m = re.search('stockA_row(\\\\d+)_col(\\\\d+).*', loc)\n if m is None:\n return None\n row = int(m.group(1))\n col = int(m.group(2))\n if row is None:\n return\n if row % 2 != 1:\n row -= 1\n return row * 1000 + col\n\n\n@lock.lock(g_lock)\ndef checkTimeout(index, agvId, loc):\n global g_stockLock\n if index in g_stockLock:\n if utility.ticks() - g_stockLock[index] > 10 * 60 * 1000:\n unlockStockA(agvId, loc)\n log.warning('delete timeout locked', index)\n\n\ndef lockStockA(agvId, loc):\n global g_stockLock\n index = getStockA(loc)\n if index is None:\n return\n if index in g_stockLock:\n checkTimeout(index, agvId, loc)\n log.warning(agvId, loc, 'is locked, wait for unlock')\n for i in range(60 * 5):\n if index not in g_stockLock:\n break\n time.sleep(1)\n log.info(agvId, loc, 'wait for unlock success')\n global g_lock\n log.debug(agvId, 'lock', loc, index)\n g_lock.acquire()\n g_stockLock[index] = utility.ticks()\n g_lock.release()\n\n\n@lock.lock(g_lock)\ndef unlockStockA(agvId, loc):\n global g_stockLock\n index = getStockA(loc)\n if index in g_stockLock:\n log.debug(agvId, 'unlock', loc, index)\n del g_stockLock[index]\n\n\n@lock.lock(g_lock)\ndef getPoint(originPoint):\n global g_point\n loadPoint()\n if g_point[originPoint] is not None:\n return g_point[originPoint]\n return originPoint\n\n\n@lock.lock(g_lock)\ndef getOriginPoint(point):\n global g_point\n loadPoint()\n for itemIndex in g_point:\n if g_point[itemIndex] == point:\n return itemIndex\n return point\n\n\n@lock.lock(g_lock)\ndef loadPoint():\n global g_point\n filePath = os.path.dirname(__file__)\n fileName = 'point.cfg'\n if filePath:\n fileName = filePath + '/' + fileName\n g_point = json_codec.load_file(fileName)\n\n\n@lock.lock(g_lock)\ndef checkCart(cartId, scanId):\n scanId = scanId.strip()\n\n def loadCart():\n global g_carts\n p = os.path.dirname(__file__)\n pp = 'cart.cfg'\n if p:\n pp = p + '/' + pp\n g_carts = json_codec.load_file(pp)\n\n def saveCart():\n global g_carts\n p = os.path.dirname(__file__)\n pp = 'cart.cfg'\n if p:\n pp = p + '/' + pp\n json_codec.dump_file(pp, g_carts)\n\n def findCart(scanId):\n global g_carts\n for c in g_carts:\n if g_carts[c] == scanId:\n return c\n return 'unknown'\n global g_carts\n if g_carts is None:\n loadCart()\n if cartId in g_carts:\n if scanId != g_carts[cartId]:\n log.error('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart(scanId))\n raise Exception('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart\n (scanId))\n else:\n g_carts[cartId] = scanId\n saveCart()\n\n\ndef _run(func, args, callback, obj):\n\n def threadFunc(func, args, callback, obj):\n hasCallback = False\n try:\n func(*args)\n if utility.is_exited():\n return\n hasCallback = True\n callback(obj)\n except Exception as e:\n obj['result'] = -1\n obj['resultDesc'] = str(e)\n log.exception('agvCtrl:', e)\n if 'agv' in obj:\n agvId = obj['agv']\n log.debug('小车:' + agvId + ',出现未经处理的异常,正在返航 ')\n restAgv(agvId)\n freeAgv(agvId)\n if not hasCallback:\n callback(obj)\n t = threading.Thread(target=threadFunc, args=(func, args, callback, obj))\n global g_threads\n t.start()\n g_threads.append(t)\n\n\ndef _initObj(obj, agvId):\n obj['agv'] = agvId\n obj['result'] = 0\n obj['resultDesc'] = 'success'\n\n\n<mask token>\n\n\ndef apply(locId):\n locId = getOriginPoint(locId)\n return api.apply(locId + '.1')\n\n\ndef call(agvId, locId, finishCallback, obj):\n _initObj(obj, agvId)\n locId = getOriginPoint(locId)\n try:\n _run(func=_call, args=(agvId, locId), callback=finishCallback, obj=obj)\n except Exception as e:\n restAgv(agvId)\n freeAgv(agvId)\n raise e\n return agvId\n\n\n<mask token>\n\n\ndef moveCart(agvId, cartId, srcLoc, locId, finishCallback, obj):\n _initObj(obj, agvId)\n assert api.isCartLoc(cartId)\n srcLoc = getOriginPoint(srcLoc)\n locId = getOriginPoint(locId)\n try:\n _run(func=_moveCart, args=(agvId, srcLoc, locId, cartId), callback=\n finishCallback, obj=obj)\n except Exception as e:\n restAgv(agvId)\n freeAgv(agvId)\n raise e\n\n\n<mask token>\n\n\ndef freeAgv(agvId):\n try:\n api.unlock(agvId)\n except Exception as e:\n log.exception('freeAgv', e)\n\n\ndef restAgv(agvId):\n agvId2 = api.getAgvId(agvId)\n api.reset(agvId2)\n\n\n<mask token>\n\n\ndef testgetPoint():\n resulta = getPoint('StockA_row7_col4')\n assert resulta == 'begin_1'\n resultb = getPoint('StockA_row8_col4')\n assert resultb == 'begin_2'\n\n\ndef testgetOrginPoint():\n resulta = getOriginPoint('begin_1')\n assert resulta == 'StockA_row7_col4'\n resultb = getOriginPoint('begin_2')\n assert resultb == 'StockA_row8_col4'\n resultc = getOriginPoint('hhahahaa')\n assert resultc == 'hhahahaa'\n\n\n<mask token>\n\n\ndef testcheckCart():\n global g_carts\n g_carts = None\n checkCart('CART9001', '591')\n checkCart('CART9002', '592')\n gg = json_codec.load_file('cart.cfg')\n assert 'CART9001' in gg\n assert 'CART9002' in gg\n assert gg['CART9001'] == '591'\n assert gg['CART9002'] == '592'\n checkCart('CART9002', '592')\n checkCart('CART9001', '591')\n try:\n checkCart('CART9002', '591')\n assert 0\n except Exception as e:\n s = str(e)\n assert s.find('货架ID不正确,期望货架:CART9002, 实际货架:CART9001') != -1\n\n\n<mask token>\n\n\ndef func2(stock1, stock2):\n print('-------------------- start thread ------------------------',\n stock1, stock2)\n time.sleep(1)\n cartId = 'CART9009'\n for i in range(20):\n print('current loop is - ', i.__str__())\n move_cart(cartId, stock1, stock2)\n move_cart(cartId, stock2, stock1)\n print('current loop end - ', i.__str__())\n print('=======================================')\n print('finish func2')\n print('=======================================')\n\n\ndef func3(times, starts, seats):\n current = starts\n cartId = 'CART9009'\n time.sleep(1)\n for loop in range(0, times - 1):\n tip1 = 'currentLoop is ' + loop.__str__(\n ) + ' currentStart is ' + current\n print(tip1)\n for i in range(0, len(seats)):\n next = str(seats[i])\n tip2 = ('currentLoop is ' + loop.__str__() +\n 'currentOrigin is ' + current + 'currentNext is ' + next +\n ' seatIndex is ' + i.__str__())\n print(tip2)\n print('excuting')\n move_cart(cartId, current, next)\n current = next\n\n\ndef testPageAgvControl(jsonstr):\n jsonData = json.loads(jsonstr)\n result = False\n if len(jsonData) == 0:\n result = False\n else:\n for currentJson in jsonData:\n start = currentJson['start']\n seat = currentJson['seat']\n loop = int(currentJson['loop'])\n seats = str.split(seat, ',')\n durabilityTestTask1 = threading.Thread(target=func3, args=[loop,\n start, seats])\n durabilityTestTask1.start()\n result = True\n return result\n\n\n<mask token>\n\n\ndef testPageUnloockAll():\n api.unlockAll()\n\n\n<mask token>\n\n\ndef test1():\n Init()\n durabilityTestTask1 = threading.Thread(target=func3, args=[20,\n 'stockA_row1_col3', ['stockA_row1_col2', 'stockA_row1_col4']])\n durabilityTestTask1.start()\n durabilityTestTask2 = threading.Thread(target=func3, args=[20,\n 'stockA_row1_col2', ['seat2_1', 'stockA_row4_col2']])\n durabilityTestTask3 = threading.Thread(target=func3, args=[20,\n 'stockA_row5_col3', ['seat16_1', 'stockA_row5_col2']])\n durabilityTestTask4 = threading.Thread(target=func3, args=[20,\n 'stockA_row6_col3', ['seat12_1', 'stockA_row6_col2']])\n durabilityTestTask1.join()\n print('===============ALL FINISH ========================')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@utility.init()\ndef init():\n if utility.is_test():\n return\n api.init()\n time.sleep(3)\n\n\ndef wait():\n global g_threads\n for t in g_threads:\n t.join()\n g_threads.clear()\n\n\n@utility.fini()\ndef fini():\n if utility.is_test():\n return\n api.fini()\n wait()\n\n\n<mask token>\n\n\ndef getStockA(loc):\n if loc[0:6] != 'stockA':\n return None\n m = re.search('stockA_row(\\\\d+)_col(\\\\d+).*', loc)\n if m is None:\n return None\n row = int(m.group(1))\n col = int(m.group(2))\n if row is None:\n return\n if row % 2 != 1:\n row -= 1\n return row * 1000 + col\n\n\n@lock.lock(g_lock)\ndef checkTimeout(index, agvId, loc):\n global g_stockLock\n if index in g_stockLock:\n if utility.ticks() - g_stockLock[index] > 10 * 60 * 1000:\n unlockStockA(agvId, loc)\n log.warning('delete timeout locked', index)\n\n\ndef lockStockA(agvId, loc):\n global g_stockLock\n index = getStockA(loc)\n if index is None:\n return\n if index in g_stockLock:\n checkTimeout(index, agvId, loc)\n log.warning(agvId, loc, 'is locked, wait for unlock')\n for i in range(60 * 5):\n if index not in g_stockLock:\n break\n time.sleep(1)\n log.info(agvId, loc, 'wait for unlock success')\n global g_lock\n log.debug(agvId, 'lock', loc, index)\n g_lock.acquire()\n g_stockLock[index] = utility.ticks()\n g_lock.release()\n\n\n@lock.lock(g_lock)\ndef unlockStockA(agvId, loc):\n global g_stockLock\n index = getStockA(loc)\n if index in g_stockLock:\n log.debug(agvId, 'unlock', loc, index)\n del g_stockLock[index]\n\n\n@lock.lock(g_lock)\ndef getPoint(originPoint):\n global g_point\n loadPoint()\n if g_point[originPoint] is not None:\n return g_point[originPoint]\n return originPoint\n\n\n@lock.lock(g_lock)\ndef getOriginPoint(point):\n global g_point\n loadPoint()\n for itemIndex in g_point:\n if g_point[itemIndex] == point:\n return itemIndex\n return point\n\n\n@lock.lock(g_lock)\ndef loadPoint():\n global g_point\n filePath = os.path.dirname(__file__)\n fileName = 'point.cfg'\n if filePath:\n fileName = filePath + '/' + fileName\n g_point = json_codec.load_file(fileName)\n\n\n@lock.lock(g_lock)\ndef checkCart(cartId, scanId):\n scanId = scanId.strip()\n\n def loadCart():\n global g_carts\n p = os.path.dirname(__file__)\n pp = 'cart.cfg'\n if p:\n pp = p + '/' + pp\n g_carts = json_codec.load_file(pp)\n\n def saveCart():\n global g_carts\n p = os.path.dirname(__file__)\n pp = 'cart.cfg'\n if p:\n pp = p + '/' + pp\n json_codec.dump_file(pp, g_carts)\n\n def findCart(scanId):\n global g_carts\n for c in g_carts:\n if g_carts[c] == scanId:\n return c\n return 'unknown'\n global g_carts\n if g_carts is None:\n loadCart()\n if cartId in g_carts:\n if scanId != g_carts[cartId]:\n log.error('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart(scanId))\n raise Exception('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart\n (scanId))\n else:\n g_carts[cartId] = scanId\n saveCart()\n\n\ndef _run(func, args, callback, obj):\n\n def threadFunc(func, args, callback, obj):\n hasCallback = False\n try:\n func(*args)\n if utility.is_exited():\n return\n hasCallback = True\n callback(obj)\n except Exception as e:\n obj['result'] = -1\n obj['resultDesc'] = str(e)\n log.exception('agvCtrl:', e)\n if 'agv' in obj:\n agvId = obj['agv']\n log.debug('小车:' + agvId + ',出现未经处理的异常,正在返航 ')\n restAgv(agvId)\n freeAgv(agvId)\n if not hasCallback:\n callback(obj)\n t = threading.Thread(target=threadFunc, args=(func, args, callback, obj))\n global g_threads\n t.start()\n g_threads.append(t)\n\n\ndef _initObj(obj, agvId):\n obj['agv'] = agvId\n obj['result'] = 0\n obj['resultDesc'] = 'success'\n\n\n<mask token>\n\n\ndef apply(locId):\n locId = getOriginPoint(locId)\n return api.apply(locId + '.1')\n\n\ndef call(agvId, locId, finishCallback, obj):\n _initObj(obj, agvId)\n locId = getOriginPoint(locId)\n try:\n _run(func=_call, args=(agvId, locId), callback=finishCallback, obj=obj)\n except Exception as e:\n restAgv(agvId)\n freeAgv(agvId)\n raise e\n return agvId\n\n\ndef _moveCart(agvId, srcLoc, locId, cartId):\n try:\n c = api.mission(agvId, 2)\n if c:\n checkCart(cartId, c)\n api.move(agvId, srcLoc + '.2')\n except Exception as e:\n pass\n finally:\n unlockStockA(agvId, srcLoc)\n loc, type = api.getMissionType('get', '', srcLoc)\n api.mission(agvId, type)\n loc, type = api.getMissionType('put', srcLoc, locId)\n api.move(agvId, loc + '.3')\n api.mission(agvId, type)\n lockStockA(agvId, locId)\n try:\n api.move(agvId, locId + '.4')\n api.mission(agvId, 5)\n api.move(agvId, locId + '.5')\n finally:\n unlockStockA(agvId, locId)\n freeAgv(agvId)\n\n\ndef moveCart(agvId, cartId, srcLoc, locId, finishCallback, obj):\n _initObj(obj, agvId)\n assert api.isCartLoc(cartId)\n srcLoc = getOriginPoint(srcLoc)\n locId = getOriginPoint(locId)\n try:\n _run(func=_moveCart, args=(agvId, srcLoc, locId, cartId), callback=\n finishCallback, obj=obj)\n except Exception as e:\n restAgv(agvId)\n freeAgv(agvId)\n raise e\n\n\ndef move(agvId, locId, finishCallback, obj):\n _initObj(obj, agvId)\n try:\n locId = getOriginPoint(locId)\n _run(func=api.move, args=(agvId, locId), callback=finishCallback,\n obj=obj)\n except Exception as e:\n freeAgv(agvId)\n raise e\n\n\ndef freeAgv(agvId):\n try:\n api.unlock(agvId)\n except Exception as e:\n log.exception('freeAgv', e)\n\n\ndef restAgv(agvId):\n agvId2 = api.getAgvId(agvId)\n api.reset(agvId2)\n\n\n<mask token>\n\n\ndef testgetPoint():\n resulta = getPoint('StockA_row7_col4')\n assert resulta == 'begin_1'\n resultb = getPoint('StockA_row8_col4')\n assert resultb == 'begin_2'\n\n\ndef testgetOrginPoint():\n resulta = getOriginPoint('begin_1')\n assert resulta == 'StockA_row7_col4'\n resultb = getOriginPoint('begin_2')\n assert resultb == 'StockA_row8_col4'\n resultc = getOriginPoint('hhahahaa')\n assert resultc == 'hhahahaa'\n\n\ndef testgetStockA():\n assert getStockA('stockA_row10_col3') == 9003\n assert getStockA('stockA_row10_col4') == 9004\n assert getStockA('stockA_row1_col1') == 1001\n assert getStockA('stockA_row2_col2') == 1002\n assert getStockA('stockA_row3_col2') == 3002\n assert getStockA('stockA_row4_col2') == 3002\n assert getStockA('stockA_row4_col2.1') == 3002\n assert getStockA('stockB_row4_col2.1') == None\n assert getStockA('begin_1') == None\n assert getStockA('seat_1') == None\n\n\ndef testcheckCart():\n global g_carts\n g_carts = None\n checkCart('CART9001', '591')\n checkCart('CART9002', '592')\n gg = json_codec.load_file('cart.cfg')\n assert 'CART9001' in gg\n assert 'CART9002' in gg\n assert gg['CART9001'] == '591'\n assert gg['CART9002'] == '592'\n checkCart('CART9002', '592')\n checkCart('CART9001', '591')\n try:\n checkCart('CART9002', '591')\n assert 0\n except Exception as e:\n s = str(e)\n assert s.find('货架ID不正确,期望货架:CART9002, 实际货架:CART9001') != -1\n\n\n<mask token>\n\n\ndef func2(stock1, stock2):\n print('-------------------- start thread ------------------------',\n stock1, stock2)\n time.sleep(1)\n cartId = 'CART9009'\n for i in range(20):\n print('current loop is - ', i.__str__())\n move_cart(cartId, stock1, stock2)\n move_cart(cartId, stock2, stock1)\n print('current loop end - ', i.__str__())\n print('=======================================')\n print('finish func2')\n print('=======================================')\n\n\ndef func3(times, starts, seats):\n current = starts\n cartId = 'CART9009'\n time.sleep(1)\n for loop in range(0, times - 1):\n tip1 = 'currentLoop is ' + loop.__str__(\n ) + ' currentStart is ' + current\n print(tip1)\n for i in range(0, len(seats)):\n next = str(seats[i])\n tip2 = ('currentLoop is ' + loop.__str__() +\n 'currentOrigin is ' + current + 'currentNext is ' + next +\n ' seatIndex is ' + i.__str__())\n print(tip2)\n print('excuting')\n move_cart(cartId, current, next)\n current = next\n\n\ndef testPageAgvControl(jsonstr):\n jsonData = json.loads(jsonstr)\n result = False\n if len(jsonData) == 0:\n result = False\n else:\n for currentJson in jsonData:\n start = currentJson['start']\n seat = currentJson['seat']\n loop = int(currentJson['loop'])\n seats = str.split(seat, ',')\n durabilityTestTask1 = threading.Thread(target=func3, args=[loop,\n start, seats])\n durabilityTestTask1.start()\n result = True\n return result\n\n\n<mask token>\n\n\ndef testPageUnloockAll():\n api.unlockAll()\n\n\n<mask token>\n\n\ndef test1():\n Init()\n durabilityTestTask1 = threading.Thread(target=func3, args=[20,\n 'stockA_row1_col3', ['stockA_row1_col2', 'stockA_row1_col4']])\n durabilityTestTask1.start()\n durabilityTestTask2 = threading.Thread(target=func3, args=[20,\n 'stockA_row1_col2', ['seat2_1', 'stockA_row4_col2']])\n durabilityTestTask3 = threading.Thread(target=func3, args=[20,\n 'stockA_row5_col3', ['seat16_1', 'stockA_row5_col2']])\n durabilityTestTask4 = threading.Thread(target=func3, args=[20,\n 'stockA_row6_col3', ['seat12_1', 'stockA_row6_col2']])\n durabilityTestTask1.join()\n print('===============ALL FINISH ========================')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@utility.init()\ndef init():\n if utility.is_test():\n return\n api.init()\n time.sleep(3)\n\n\ndef wait():\n global g_threads\n for t in g_threads:\n t.join()\n g_threads.clear()\n\n\n@utility.fini()\ndef fini():\n if utility.is_test():\n return\n api.fini()\n wait()\n\n\n<mask token>\n\n\ndef getStockA(loc):\n if loc[0:6] != 'stockA':\n return None\n m = re.search('stockA_row(\\\\d+)_col(\\\\d+).*', loc)\n if m is None:\n return None\n row = int(m.group(1))\n col = int(m.group(2))\n if row is None:\n return\n if row % 2 != 1:\n row -= 1\n return row * 1000 + col\n\n\n@lock.lock(g_lock)\ndef checkTimeout(index, agvId, loc):\n global g_stockLock\n if index in g_stockLock:\n if utility.ticks() - g_stockLock[index] > 10 * 60 * 1000:\n unlockStockA(agvId, loc)\n log.warning('delete timeout locked', index)\n\n\ndef lockStockA(agvId, loc):\n global g_stockLock\n index = getStockA(loc)\n if index is None:\n return\n if index in g_stockLock:\n checkTimeout(index, agvId, loc)\n log.warning(agvId, loc, 'is locked, wait for unlock')\n for i in range(60 * 5):\n if index not in g_stockLock:\n break\n time.sleep(1)\n log.info(agvId, loc, 'wait for unlock success')\n global g_lock\n log.debug(agvId, 'lock', loc, index)\n g_lock.acquire()\n g_stockLock[index] = utility.ticks()\n g_lock.release()\n\n\n@lock.lock(g_lock)\ndef unlockStockA(agvId, loc):\n global g_stockLock\n index = getStockA(loc)\n if index in g_stockLock:\n log.debug(agvId, 'unlock', loc, index)\n del g_stockLock[index]\n\n\n@lock.lock(g_lock)\ndef getPoint(originPoint):\n global g_point\n loadPoint()\n if g_point[originPoint] is not None:\n return g_point[originPoint]\n return originPoint\n\n\n@lock.lock(g_lock)\ndef getOriginPoint(point):\n global g_point\n loadPoint()\n for itemIndex in g_point:\n if g_point[itemIndex] == point:\n return itemIndex\n return point\n\n\n@lock.lock(g_lock)\ndef loadPoint():\n global g_point\n filePath = os.path.dirname(__file__)\n fileName = 'point.cfg'\n if filePath:\n fileName = filePath + '/' + fileName\n g_point = json_codec.load_file(fileName)\n\n\n@lock.lock(g_lock)\ndef checkCart(cartId, scanId):\n scanId = scanId.strip()\n\n def loadCart():\n global g_carts\n p = os.path.dirname(__file__)\n pp = 'cart.cfg'\n if p:\n pp = p + '/' + pp\n g_carts = json_codec.load_file(pp)\n\n def saveCart():\n global g_carts\n p = os.path.dirname(__file__)\n pp = 'cart.cfg'\n if p:\n pp = p + '/' + pp\n json_codec.dump_file(pp, g_carts)\n\n def findCart(scanId):\n global g_carts\n for c in g_carts:\n if g_carts[c] == scanId:\n return c\n return 'unknown'\n global g_carts\n if g_carts is None:\n loadCart()\n if cartId in g_carts:\n if scanId != g_carts[cartId]:\n log.error('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart(scanId))\n raise Exception('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart\n (scanId))\n else:\n g_carts[cartId] = scanId\n saveCart()\n\n\ndef _run(func, args, callback, obj):\n\n def threadFunc(func, args, callback, obj):\n hasCallback = False\n try:\n func(*args)\n if utility.is_exited():\n return\n hasCallback = True\n callback(obj)\n except Exception as e:\n obj['result'] = -1\n obj['resultDesc'] = str(e)\n log.exception('agvCtrl:', e)\n if 'agv' in obj:\n agvId = obj['agv']\n log.debug('小车:' + agvId + ',出现未经处理的异常,正在返航 ')\n restAgv(agvId)\n freeAgv(agvId)\n if not hasCallback:\n callback(obj)\n t = threading.Thread(target=threadFunc, args=(func, args, callback, obj))\n global g_threads\n t.start()\n g_threads.append(t)\n\n\ndef _initObj(obj, agvId):\n obj['agv'] = agvId\n obj['result'] = 0\n obj['resultDesc'] = 'success'\n\n\ndef _call(agvId, locId):\n if api.isCartLoc(locId):\n api.move(agvId, locId + '.1')\n lockStockA(agvId, locId)\n try:\n api.mission(agvId, 1)\n except Exception as e:\n unlockStockA(agvId, locId)\n raise e\n else:\n api.move(agvId, locId)\n\n\ndef apply(locId):\n locId = getOriginPoint(locId)\n return api.apply(locId + '.1')\n\n\ndef call(agvId, locId, finishCallback, obj):\n _initObj(obj, agvId)\n locId = getOriginPoint(locId)\n try:\n _run(func=_call, args=(agvId, locId), callback=finishCallback, obj=obj)\n except Exception as e:\n restAgv(agvId)\n freeAgv(agvId)\n raise e\n return agvId\n\n\ndef _moveCart(agvId, srcLoc, locId, cartId):\n try:\n c = api.mission(agvId, 2)\n if c:\n checkCart(cartId, c)\n api.move(agvId, srcLoc + '.2')\n except Exception as e:\n pass\n finally:\n unlockStockA(agvId, srcLoc)\n loc, type = api.getMissionType('get', '', srcLoc)\n api.mission(agvId, type)\n loc, type = api.getMissionType('put', srcLoc, locId)\n api.move(agvId, loc + '.3')\n api.mission(agvId, type)\n lockStockA(agvId, locId)\n try:\n api.move(agvId, locId + '.4')\n api.mission(agvId, 5)\n api.move(agvId, locId + '.5')\n finally:\n unlockStockA(agvId, locId)\n freeAgv(agvId)\n\n\ndef moveCart(agvId, cartId, srcLoc, locId, finishCallback, obj):\n _initObj(obj, agvId)\n assert api.isCartLoc(cartId)\n srcLoc = getOriginPoint(srcLoc)\n locId = getOriginPoint(locId)\n try:\n _run(func=_moveCart, args=(agvId, srcLoc, locId, cartId), callback=\n finishCallback, obj=obj)\n except Exception as e:\n restAgv(agvId)\n freeAgv(agvId)\n raise e\n\n\ndef move(agvId, locId, finishCallback, obj):\n _initObj(obj, agvId)\n try:\n locId = getOriginPoint(locId)\n _run(func=api.move, args=(agvId, locId), callback=finishCallback,\n obj=obj)\n except Exception as e:\n freeAgv(agvId)\n raise e\n\n\ndef freeAgv(agvId):\n try:\n api.unlock(agvId)\n except Exception as e:\n log.exception('freeAgv', e)\n\n\ndef restAgv(agvId):\n agvId2 = api.getAgvId(agvId)\n api.reset(agvId2)\n\n\n<mask token>\n\n\ndef testgetPoint():\n resulta = getPoint('StockA_row7_col4')\n assert resulta == 'begin_1'\n resultb = getPoint('StockA_row8_col4')\n assert resultb == 'begin_2'\n\n\ndef testgetOrginPoint():\n resulta = getOriginPoint('begin_1')\n assert resulta == 'StockA_row7_col4'\n resultb = getOriginPoint('begin_2')\n assert resultb == 'StockA_row8_col4'\n resultc = getOriginPoint('hhahahaa')\n assert resultc == 'hhahahaa'\n\n\ndef testgetStockA():\n assert getStockA('stockA_row10_col3') == 9003\n assert getStockA('stockA_row10_col4') == 9004\n assert getStockA('stockA_row1_col1') == 1001\n assert getStockA('stockA_row2_col2') == 1002\n assert getStockA('stockA_row3_col2') == 3002\n assert getStockA('stockA_row4_col2') == 3002\n assert getStockA('stockA_row4_col2.1') == 3002\n assert getStockA('stockB_row4_col2.1') == None\n assert getStockA('begin_1') == None\n assert getStockA('seat_1') == None\n\n\ndef testcheckCart():\n global g_carts\n g_carts = None\n checkCart('CART9001', '591')\n checkCart('CART9002', '592')\n gg = json_codec.load_file('cart.cfg')\n assert 'CART9001' in gg\n assert 'CART9002' in gg\n assert gg['CART9001'] == '591'\n assert gg['CART9002'] == '592'\n checkCart('CART9002', '592')\n checkCart('CART9001', '591')\n try:\n checkCart('CART9002', '591')\n assert 0\n except Exception as e:\n s = str(e)\n assert s.find('货架ID不正确,期望货架:CART9002, 实际货架:CART9001') != -1\n\n\n<mask token>\n\n\ndef func2(stock1, stock2):\n print('-------------------- start thread ------------------------',\n stock1, stock2)\n time.sleep(1)\n cartId = 'CART9009'\n for i in range(20):\n print('current loop is - ', i.__str__())\n move_cart(cartId, stock1, stock2)\n move_cart(cartId, stock2, stock1)\n print('current loop end - ', i.__str__())\n print('=======================================')\n print('finish func2')\n print('=======================================')\n\n\ndef func3(times, starts, seats):\n current = starts\n cartId = 'CART9009'\n time.sleep(1)\n for loop in range(0, times - 1):\n tip1 = 'currentLoop is ' + loop.__str__(\n ) + ' currentStart is ' + current\n print(tip1)\n for i in range(0, len(seats)):\n next = str(seats[i])\n tip2 = ('currentLoop is ' + loop.__str__() +\n 'currentOrigin is ' + current + 'currentNext is ' + next +\n ' seatIndex is ' + i.__str__())\n print(tip2)\n print('excuting')\n move_cart(cartId, current, next)\n current = next\n\n\ndef testPageAgvControl(jsonstr):\n jsonData = json.loads(jsonstr)\n result = False\n if len(jsonData) == 0:\n result = False\n else:\n for currentJson in jsonData:\n start = currentJson['start']\n seat = currentJson['seat']\n loop = int(currentJson['loop'])\n seats = str.split(seat, ',')\n durabilityTestTask1 = threading.Thread(target=func3, args=[loop,\n start, seats])\n durabilityTestTask1.start()\n result = True\n return result\n\n\n<mask token>\n\n\ndef testPageUnloockAll():\n api.unlockAll()\n\n\n<mask token>\n\n\ndef test1():\n Init()\n durabilityTestTask1 = threading.Thread(target=func3, args=[20,\n 'stockA_row1_col3', ['stockA_row1_col2', 'stockA_row1_col4']])\n durabilityTestTask1.start()\n durabilityTestTask2 = threading.Thread(target=func3, args=[20,\n 'stockA_row1_col2', ['seat2_1', 'stockA_row4_col2']])\n durabilityTestTask3 = threading.Thread(target=func3, args=[20,\n 'stockA_row5_col3', ['seat16_1', 'stockA_row5_col2']])\n durabilityTestTask4 = threading.Thread(target=func3, args=[20,\n 'stockA_row6_col3', ['seat12_1', 'stockA_row6_col2']])\n durabilityTestTask1.join()\n print('===============ALL FINISH ========================')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\n@utility.init()\ndef init():\n if utility.is_test():\n return\n api.init()\n time.sleep(3)\n\n\ndef wait():\n global g_threads\n for t in g_threads:\n t.join()\n g_threads.clear()\n\n\n@utility.fini()\ndef fini():\n if utility.is_test():\n return\n api.fini()\n wait()\n\n\n<mask token>\n\n\ndef getStockA(loc):\n if loc[0:6] != 'stockA':\n return None\n m = re.search('stockA_row(\\\\d+)_col(\\\\d+).*', loc)\n if m is None:\n return None\n row = int(m.group(1))\n col = int(m.group(2))\n if row is None:\n return\n if row % 2 != 1:\n row -= 1\n return row * 1000 + col\n\n\n@lock.lock(g_lock)\ndef checkTimeout(index, agvId, loc):\n global g_stockLock\n if index in g_stockLock:\n if utility.ticks() - g_stockLock[index] > 10 * 60 * 1000:\n unlockStockA(agvId, loc)\n log.warning('delete timeout locked', index)\n\n\ndef lockStockA(agvId, loc):\n global g_stockLock\n index = getStockA(loc)\n if index is None:\n return\n if index in g_stockLock:\n checkTimeout(index, agvId, loc)\n log.warning(agvId, loc, 'is locked, wait for unlock')\n for i in range(60 * 5):\n if index not in g_stockLock:\n break\n time.sleep(1)\n log.info(agvId, loc, 'wait for unlock success')\n global g_lock\n log.debug(agvId, 'lock', loc, index)\n g_lock.acquire()\n g_stockLock[index] = utility.ticks()\n g_lock.release()\n\n\n@lock.lock(g_lock)\ndef unlockStockA(agvId, loc):\n global g_stockLock\n index = getStockA(loc)\n if index in g_stockLock:\n log.debug(agvId, 'unlock', loc, index)\n del g_stockLock[index]\n\n\n@lock.lock(g_lock)\ndef getPoint(originPoint):\n global g_point\n loadPoint()\n if g_point[originPoint] is not None:\n return g_point[originPoint]\n return originPoint\n\n\n@lock.lock(g_lock)\ndef getOriginPoint(point):\n global g_point\n loadPoint()\n for itemIndex in g_point:\n if g_point[itemIndex] == point:\n return itemIndex\n return point\n\n\n@lock.lock(g_lock)\ndef loadPoint():\n global g_point\n filePath = os.path.dirname(__file__)\n fileName = 'point.cfg'\n if filePath:\n fileName = filePath + '/' + fileName\n g_point = json_codec.load_file(fileName)\n\n\n@lock.lock(g_lock)\ndef checkCart(cartId, scanId):\n scanId = scanId.strip()\n\n def loadCart():\n global g_carts\n p = os.path.dirname(__file__)\n pp = 'cart.cfg'\n if p:\n pp = p + '/' + pp\n g_carts = json_codec.load_file(pp)\n\n def saveCart():\n global g_carts\n p = os.path.dirname(__file__)\n pp = 'cart.cfg'\n if p:\n pp = p + '/' + pp\n json_codec.dump_file(pp, g_carts)\n\n def findCart(scanId):\n global g_carts\n for c in g_carts:\n if g_carts[c] == scanId:\n return c\n return 'unknown'\n global g_carts\n if g_carts is None:\n loadCart()\n if cartId in g_carts:\n if scanId != g_carts[cartId]:\n log.error('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart(scanId))\n raise Exception('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart\n (scanId))\n else:\n g_carts[cartId] = scanId\n saveCart()\n\n\ndef _run(func, args, callback, obj):\n\n def threadFunc(func, args, callback, obj):\n hasCallback = False\n try:\n func(*args)\n if utility.is_exited():\n return\n hasCallback = True\n callback(obj)\n except Exception as e:\n obj['result'] = -1\n obj['resultDesc'] = str(e)\n log.exception('agvCtrl:', e)\n if 'agv' in obj:\n agvId = obj['agv']\n log.debug('小车:' + agvId + ',出现未经处理的异常,正在返航 ')\n restAgv(agvId)\n freeAgv(agvId)\n if not hasCallback:\n callback(obj)\n t = threading.Thread(target=threadFunc, args=(func, args, callback, obj))\n global g_threads\n t.start()\n g_threads.append(t)\n\n\ndef _initObj(obj, agvId):\n obj['agv'] = agvId\n obj['result'] = 0\n obj['resultDesc'] = 'success'\n\n\ndef _call(agvId, locId):\n if api.isCartLoc(locId):\n api.move(agvId, locId + '.1')\n lockStockA(agvId, locId)\n try:\n api.mission(agvId, 1)\n except Exception as e:\n unlockStockA(agvId, locId)\n raise e\n else:\n api.move(agvId, locId)\n\n\ndef apply(locId):\n locId = getOriginPoint(locId)\n return api.apply(locId + '.1')\n\n\ndef call(agvId, locId, finishCallback, obj):\n _initObj(obj, agvId)\n locId = getOriginPoint(locId)\n try:\n _run(func=_call, args=(agvId, locId), callback=finishCallback, obj=obj)\n except Exception as e:\n restAgv(agvId)\n freeAgv(agvId)\n raise e\n return agvId\n\n\ndef _moveCart(agvId, srcLoc, locId, cartId):\n try:\n c = api.mission(agvId, 2)\n if c:\n checkCart(cartId, c)\n api.move(agvId, srcLoc + '.2')\n except Exception as e:\n pass\n finally:\n unlockStockA(agvId, srcLoc)\n loc, type = api.getMissionType('get', '', srcLoc)\n api.mission(agvId, type)\n loc, type = api.getMissionType('put', srcLoc, locId)\n api.move(agvId, loc + '.3')\n api.mission(agvId, type)\n lockStockA(agvId, locId)\n try:\n api.move(agvId, locId + '.4')\n api.mission(agvId, 5)\n api.move(agvId, locId + '.5')\n finally:\n unlockStockA(agvId, locId)\n freeAgv(agvId)\n\n\ndef moveCart(agvId, cartId, srcLoc, locId, finishCallback, obj):\n _initObj(obj, agvId)\n assert api.isCartLoc(cartId)\n srcLoc = getOriginPoint(srcLoc)\n locId = getOriginPoint(locId)\n try:\n _run(func=_moveCart, args=(agvId, srcLoc, locId, cartId), callback=\n finishCallback, obj=obj)\n except Exception as e:\n restAgv(agvId)\n freeAgv(agvId)\n raise e\n\n\ndef move(agvId, locId, finishCallback, obj):\n _initObj(obj, agvId)\n try:\n locId = getOriginPoint(locId)\n _run(func=api.move, args=(agvId, locId), callback=finishCallback,\n obj=obj)\n except Exception as e:\n freeAgv(agvId)\n raise e\n\n\ndef freeAgv(agvId):\n try:\n api.unlock(agvId)\n except Exception as e:\n log.exception('freeAgv', e)\n\n\ndef restAgv(agvId):\n agvId2 = api.getAgvId(agvId)\n api.reset(agvId2)\n\n\ndef Init():\n import interface.dashboard.dashboardApi\n locationEvent.connect(interface.dashboard.dashboardApi.reportAgvLoc)\n time.sleep(3)\n\n\ndef testgetPoint():\n resulta = getPoint('StockA_row7_col4')\n assert resulta == 'begin_1'\n resultb = getPoint('StockA_row8_col4')\n assert resultb == 'begin_2'\n\n\ndef testgetOrginPoint():\n resulta = getOriginPoint('begin_1')\n assert resulta == 'StockA_row7_col4'\n resultb = getOriginPoint('begin_2')\n assert resultb == 'StockA_row8_col4'\n resultc = getOriginPoint('hhahahaa')\n assert resultc == 'hhahahaa'\n\n\ndef testgetStockA():\n assert getStockA('stockA_row10_col3') == 9003\n assert getStockA('stockA_row10_col4') == 9004\n assert getStockA('stockA_row1_col1') == 1001\n assert getStockA('stockA_row2_col2') == 1002\n assert getStockA('stockA_row3_col2') == 3002\n assert getStockA('stockA_row4_col2') == 3002\n assert getStockA('stockA_row4_col2.1') == 3002\n assert getStockA('stockB_row4_col2.1') == None\n assert getStockA('begin_1') == None\n assert getStockA('seat_1') == None\n\n\ndef testcheckCart():\n global g_carts\n g_carts = None\n checkCart('CART9001', '591')\n checkCart('CART9002', '592')\n gg = json_codec.load_file('cart.cfg')\n assert 'CART9001' in gg\n assert 'CART9002' in gg\n assert gg['CART9001'] == '591'\n assert gg['CART9002'] == '592'\n checkCart('CART9002', '592')\n checkCart('CART9001', '591')\n try:\n checkCart('CART9002', '591')\n assert 0\n except Exception as e:\n s = str(e)\n assert s.find('货架ID不正确,期望货架:CART9002, 实际货架:CART9001') != -1\n\n\n<mask token>\n\n\n@counter.count\ndef move_cart(cartId, srcLoc, destLoc, agvId=None):\n print(cartId, srcLoc, destLoc)\n counter.setPrint(True)\n\n def callback1(obj):\n if obj['result'] == -1:\n print('error, system exit')\n obj['finish'] = True\n sys.exit(-1)\n else:\n log.warning(obj['agv'], 'start move from', obj['loc1'], 'to',\n obj['loc2'])\n moveCart(obj['agv'], obj['cart'], obj['loc1'], obj['loc2'],\n callback2, obj)\n\n def callback2(obj):\n if obj['result'] == -1:\n print('error, system exit')\n obj['finish'] = True\n sys.exit(-1)\n else:\n log.warning(obj['agv'], 'arrived', obj['loc2'])\n obj['finish'] = True\n obj = {}\n obj['loc1'] = srcLoc\n obj['loc2'] = destLoc\n obj['cart'] = cartId\n print('call ', srcLoc)\n if agvId is None:\n agvId = apply(srcLoc)\n call(agvId, srcLoc, callback1, obj)\n while not utility.is_exited():\n if 'finish' in obj:\n break\n time.sleep(0.2)\n print('------ move ', srcLoc, ' to ', destLoc, ' finish ------')\n\n\ndef func2(stock1, stock2):\n print('-------------------- start thread ------------------------',\n stock1, stock2)\n time.sleep(1)\n cartId = 'CART9009'\n for i in range(20):\n print('current loop is - ', i.__str__())\n move_cart(cartId, stock1, stock2)\n move_cart(cartId, stock2, stock1)\n print('current loop end - ', i.__str__())\n print('=======================================')\n print('finish func2')\n print('=======================================')\n\n\ndef func3(times, starts, seats):\n current = starts\n cartId = 'CART9009'\n time.sleep(1)\n for loop in range(0, times - 1):\n tip1 = 'currentLoop is ' + loop.__str__(\n ) + ' currentStart is ' + current\n print(tip1)\n for i in range(0, len(seats)):\n next = str(seats[i])\n tip2 = ('currentLoop is ' + loop.__str__() +\n 'currentOrigin is ' + current + 'currentNext is ' + next +\n ' seatIndex is ' + i.__str__())\n print(tip2)\n print('excuting')\n move_cart(cartId, current, next)\n current = next\n\n\ndef testPageAgvControl(jsonstr):\n jsonData = json.loads(jsonstr)\n result = False\n if len(jsonData) == 0:\n result = False\n else:\n for currentJson in jsonData:\n start = currentJson['start']\n seat = currentJson['seat']\n loop = int(currentJson['loop'])\n seats = str.split(seat, ',')\n durabilityTestTask1 = threading.Thread(target=func3, args=[loop,\n start, seats])\n durabilityTestTask1.start()\n result = True\n return result\n\n\ndef testtestPageAgvControl(jsonstr):\n jsonData = json.loads(jsonstr)\n result = False\n if len(jsonData) == 0:\n result = False\n else:\n for currentJson in jsonData:\n start = currentJson['start']\n print(start)\n time.sleep(3)\n seat = currentJson['seat']\n seats = str.split(seat, ',')\n print(seat)\n time.sleep(3)\n for currentseat in seats:\n print(currentseat)\n time.sleep(3)\n time.sleep(10)\n result = True\n return result\n\n\ndef testPageUnloockAll():\n api.unlockAll()\n\n\ndef testProcess(jsonData):\n utility.start()\n testPageAgvControl(jsonData)\n utility.finish()\n\n\ndef test1():\n Init()\n durabilityTestTask1 = threading.Thread(target=func3, args=[20,\n 'stockA_row1_col3', ['stockA_row1_col2', 'stockA_row1_col4']])\n durabilityTestTask1.start()\n durabilityTestTask2 = threading.Thread(target=func3, args=[20,\n 'stockA_row1_col2', ['seat2_1', 'stockA_row4_col2']])\n durabilityTestTask3 = threading.Thread(target=func3, args=[20,\n 'stockA_row5_col3', ['seat16_1', 'stockA_row5_col2']])\n durabilityTestTask4 = threading.Thread(target=func3, args=[20,\n 'stockA_row6_col3', ['seat12_1', 'stockA_row6_col2']])\n durabilityTestTask1.join()\n print('===============ALL FINISH ========================')\n\n\n<mask token>\n",
"step-5": "#coding=utf-8\n# ycat\t\t\t2017-10-20\t create\n# AGV的控制 \nimport sys,os \nimport json\nimport setup\nif __name__ == '__main__':\n\tsetup.setCurPath(__file__)\nimport utility\nimport enhance\t\nimport threading\nimport time\nimport log\nimport re\nimport lock\nimport json_codec\nimport driver.agv.hdcAgvApi as api\ng_threads =[]\ng_carts = None\ng_point = None\ng_lock = threading.RLock()\nlocationEvent = enhance.event()\napi.locationEvent.connect(locationEvent.emit)\n\n@utility.init()\ndef init():\n\tif utility.is_test():\n\t\treturn\n\tapi.init()\n\ttime.sleep(3)\n\ndef wait():\n\tglobal g_threads\n\tfor t in g_threads:\n\t\tt.join()\n\tg_threads.clear()\n\t\n@utility.fini()\ndef fini():\n\tif utility.is_test():\n\t\treturn\n\tapi.fini()\n\twait()\n\ng_stockLock = {}\n\ndef getStockA(loc):\n\tif loc[0:6] != \"stockA\":\n\t\t\treturn None\n\tm = re.search(\"stockA_row(\\d+)_col(\\d+).*\",loc)\n\tif m is None:\n\t\treturn None\n\trow = int(m.group(1))\n\tcol = int(m.group(2))\n\tif row is None:\n\t\treturn\n\tif row%2 != 1:\n\t\trow -= 1\n\treturn row*1000+col\n\t\n@lock.lock(g_lock)\t\ndef checkTimeout(index,agvId,loc):\n\tglobal g_stockLock\n\tif index in g_stockLock:\n\t\tif utility.ticks() - g_stockLock[index] > 10*60*1000:\n\t\t\tunlockStockA(agvId,loc)\n\t\t\tlog.warning(\"delete timeout locked\",index)\n\t\t\t\n\t\n#解决在StockA两个车头对撞的问题 \ndef lockStockA(agvId,loc):\n\tglobal g_stockLock\n\tindex = getStockA(loc)\n\tif index is None:\n\t\treturn\n\tif index in g_stockLock:\n\t\tcheckTimeout(index,agvId,loc)\n\t\tlog.warning(agvId,loc,\"is locked, wait for unlock\")\n\t\tfor i in range(60*5):\n\t\t\tif index not in g_stockLock:\n\t\t\t\tbreak\n\t\t\ttime.sleep(1)\n\t\tlog.info(agvId,loc,\"wait for unlock success\")\n\tglobal g_lock\n\tlog.debug(agvId,\"lock\",loc,index)\n\tg_lock.acquire()\n\tg_stockLock[index] = utility.ticks()\n\tg_lock.release()\n\n@lock.lock(g_lock)\t\ndef unlockStockA(agvId,loc):\n\tglobal g_stockLock\n\tindex = getStockA(loc)\n\tif index in g_stockLock:\n\t\tlog.debug(agvId,\"unlock\",loc,index)\n\t\tdel g_stockLock[index]\n\n@lock.lock(g_lock)\ndef getPoint(originPoint):\n\tglobal g_point\n\tloadPoint()\n\tif g_point[originPoint] is not None:\n\t\treturn g_point[originPoint]\n\n\treturn originPoint\n\n\n@lock.lock(g_lock)\ndef getOriginPoint(point):\n\tglobal g_point\n\tloadPoint()\n\tfor itemIndex in g_point:\n\t\tif g_point[itemIndex] == point:\n\t\t\treturn itemIndex\n\treturn point\n\n@lock.lock(g_lock)\ndef loadPoint():\n\tglobal g_point\n\tfilePath = os.path.dirname(__file__)\n\tfileName = \"point.cfg\"\n\tif filePath:\n\t\tfileName = filePath + \"/\" + fileName\n\tg_point = json_codec.load_file(fileName)\n\n\n@lock.lock(g_lock)\t\ndef checkCart(cartId,scanId):\n\tscanId = scanId.strip()\n\tdef loadCart():\n\t\tglobal g_carts\n\t\tp = os.path.dirname(__file__)\n\t\tpp = \"cart.cfg\"\n\t\tif p:\n\t\t\tpp = p+\"/\"+pp \n\t\tg_carts = json_codec.load_file(pp)\n\t\t\n\tdef saveCart():\n\t\tglobal g_carts\n\t\tp = os.path.dirname(__file__)\n\t\tpp = \"cart.cfg\"\n\t\tif p:\n\t\t\tpp = p+\"/\"+pp \n\t\tjson_codec.dump_file(pp,g_carts)\n\t\t\n\tdef findCart(scanId):\n\t\tglobal g_carts\n\t\tfor c in g_carts:\n\t\t\tif g_carts[c] == scanId:\n\t\t\t\treturn c\n\t\treturn \"unknown\"\n\t\t\n\tglobal g_carts\n\tif g_carts is None:\n\t\tloadCart()\n\tif cartId in g_carts:\n\t\tif scanId != g_carts[cartId]:\n\t\t\tlog.error(\"货架ID不正确,期望货架:\"+cartId+\", 实际货架:\"+findCart(scanId))\n\t\t\traise Exception(\"货架ID不正确,期望货架:\"+cartId+\", 实际货架:\"+findCart(scanId))\n\telse:\n\t\tg_carts[cartId] = scanId\n\t\tsaveCart()\n\t\n#finishCallback参数: finishCallback(obj)\n#obj会自动带上下面三个参数 \n#obj[\"agv\"] = agvId\n#obj[\"result\"] = 0\n#obj[\"resultDesc\"] = \"success\"\n\t\ndef _run(func,args,callback,obj):\n\tdef threadFunc(func,args,callback,obj):\n\t\thasCallback = False\n\t\ttry:\n\t\t\tfunc(*args)\n\t\t\tif utility.is_exited():\n\t\t\t\treturn\n\t\t\thasCallback = True\n\t\t\tcallback(obj)\n\t\texcept Exception as e:\n\t\t\tobj[\"result\"] = -1\n\t\t\tobj[\"resultDesc\"] = str(e)\n\t\t\tlog.exception(\"agvCtrl:\",e)\n\t\t\tif \"agv\" in obj:\n\t\t\t\tagvId= obj[\"agv\"]\n\t\t\t\tlog.debug(\"小车:\"+agvId+\",出现未经处理的异常,正在返航 \")\n\t\t\t\trestAgv(agvId)\n\t\t\t\tfreeAgv(agvId)\n\t\t\tif not hasCallback:\n\t\t\t\tcallback(obj)\n\tt = threading.Thread(target=threadFunc,args=(func,args,callback,obj))\n\tglobal g_threads\n\tt.start()\n\tg_threads.append(t)\n\t\ndef _initObj(obj,agvId):\n\tobj[\"agv\"] = agvId\n\tobj[\"result\"] = 0\n\tobj[\"resultDesc\"] = \"success\"\n\t\ndef _call(agvId,locId):\n\tif api.isCartLoc(locId):\n\t\tapi.move(agvId,locId+\".1\")\n\t\tlockStockA(agvId,locId)\n\t\ttry:\n\t\t\tapi.mission(agvId,1) #旋转——》钻入货架——》扫码——》返回货架id号码 \n\t\texcept Exception as e:\n\t\t\tunlockStockA(agvId,locId)\n\t\t\traise e\n\telse:\n\t\tapi.move(agvId,locId)\n\ndef apply(locId):\n\tlocId=getOriginPoint(locId)\n\n\treturn api.apply(locId+'.1')\n\t\ndef call(agvId,locId,finishCallback,obj):\n\t_initObj(obj,agvId)\n\tlocId=getOriginPoint(locId)\n\ttry:\n\n\t\t_run(func=_call,args=(agvId,locId),callback=finishCallback,obj=obj)\n\texcept Exception as e:\n\t\trestAgv(agvId)\n\t\tfreeAgv(agvId)\n\t\traise e\n\treturn agvId\n\t \ndef _moveCart(agvId,srcLoc,locId,cartId):\n\ttry:\n\t\tc = api.mission(agvId,2) #顶升任务,这个也会返回货架ID \n\t\tif c:\n\t\t\tcheckCart(cartId,c)\n\t\tapi.move(agvId,srcLoc+\".2\") \n\texcept Exception as e:\n\t\t#TODO:ycat api.move(agvId,srcLoc+\".2\")\n\t\t#TODO:ycat raise e\n\t\tpass\n\tfinally:\n\t\tunlockStockA(agvId,srcLoc)\n\t\n\tloc,type = api.getMissionType(\"get\",\"\",srcLoc)\n\tapi.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动\n\t\n\tloc,type = api.getMissionType(\"put\",srcLoc,locId)\n\tapi.move(agvId,loc+\".3\")\n\tapi.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动\n\tlockStockA(agvId,locId)\n\ttry:\n\t\tapi.move(agvId,locId+\".4\")\n\t\tapi.mission(agvId,5) #放下货架 \n\t\tapi.move(agvId,locId+\".5\") #返航 \n\tfinally:\n\t\tunlockStockA(agvId,locId)\n\tfreeAgv(agvId)\n\t \n#带货架运输 \ndef moveCart(agvId,cartId,srcLoc,locId,finishCallback,obj):\t \n\t_initObj(obj,agvId)\n\tassert api.isCartLoc(cartId)\n\t#移动货架前,一定是locked状态 \n\t#assert api.isLocked(agvId)\n\tsrcLoc = getOriginPoint(srcLoc)\n\tlocId = getOriginPoint(locId)\n\ttry:\n\t\t_run(func=_moveCart,args=(agvId,srcLoc,locId,cartId),callback=finishCallback,obj=obj) \n\texcept Exception as e:\n\t\trestAgv(agvId)\n\t\tfreeAgv(agvId)\n\t\traise e\n\t\t\t \n\t\t\t\n#不带货架运输 \ndef move(agvId,locId,finishCallback,obj):\n\t_initObj(obj,agvId)\t\t\n\t#移动前,一定是locked状态 \n\t#assert api.isLocked(agvId)\n\ttry:\n\t\tlocId=getOriginPoint(locId)\n\t\t_run(func=api.move,args=(agvId,locId),callback=finishCallback,obj=obj) \n\texcept Exception as e:\n\t\tfreeAgv(agvId)\n\t\traise e\n\t\n#释放对agv的占用 \ndef freeAgv(agvId): \n\ttry:\n\t\tapi.unlock(agvId)\n\texcept Exception as e:\n\t\tlog.exception(\"freeAgv\",e)\n\t\n#回归转盘\ndef restAgv(agvId):\n\tagvId2 = api.getAgvId(agvId)\n\tapi.reset(agvId2)\n\n\ndef Init():\n\timport interface.dashboard.dashboardApi\n\tlocationEvent.connect(interface.dashboard.dashboardApi.reportAgvLoc)\n\ttime.sleep(3)\n################# unit test ################# \ndef testgetPoint():\n\tresulta= getPoint(\"StockA_row7_col4\")\n\tassert resulta== \"begin_1\"\n\tresultb= getPoint(\"StockA_row8_col4\")\n\tassert resultb == \"begin_2\"\n\n\ndef testgetOrginPoint():\n\tresulta= getOriginPoint(\"begin_1\")\n\tassert resulta== \"StockA_row7_col4\"\n\tresultb= getOriginPoint(\"begin_2\")\n\tassert \tresultb == \"StockA_row8_col4\"\n\tresultc = getOriginPoint(\"hhahahaa\")\n\n\tassert resultc == \"hhahahaa\"\n\n\ndef testgetStockA():\n\tassert getStockA(\"stockA_row10_col3\") == 9003\n\tassert getStockA(\"stockA_row10_col4\") == 9004\n\tassert getStockA(\"stockA_row1_col1\") == 1001\n\tassert getStockA(\"stockA_row2_col2\") == 1002\n\tassert getStockA(\"stockA_row3_col2\") == 3002\n\tassert getStockA(\"stockA_row4_col2\") == 3002\n\tassert getStockA(\"stockA_row4_col2.1\") == 3002\n\tassert getStockA(\"stockB_row4_col2.1\") == None\n\tassert getStockA(\"begin_1\") == None\n\tassert getStockA(\"seat_1\") == None\n\ndef testcheckCart():\n\tglobal g_carts\n\tg_carts = None\n\tcheckCart(\"CART9001\",\"591\")\n\tcheckCart(\"CART9002\",\"592\")\n\tgg = json_codec.load_file(\"cart.cfg\")\n\tassert \"CART9001\" in gg\n\tassert \"CART9002\" in gg\n\tassert gg[\"CART9001\"] == \"591\"\n\tassert gg[\"CART9002\"] == \"592\"\n\tcheckCart(\"CART9002\",\"592\")\n\tcheckCart(\"CART9001\",\"591\")\n\ttry:\n\t\tcheckCart(\"CART9002\",\"591\")\n\t\tassert 0\n\texcept Exception as e:\n\t\ts = str(e)\n\t\tassert s.find(\"货架ID不正确,期望货架:CART9002, 实际货架:CART9001\") != -1\n\t\t\nimport counter\n@counter.count\ndef move_cart(cartId,srcLoc,destLoc,agvId=None):\n\tprint(cartId,srcLoc,destLoc)\n\tcounter.setPrint(True)\n\tdef callback1(obj):\n\t\tif obj[\"result\"] == -1: \n\t\t\tprint(\"error, system exit\")\n\t\t\tobj[\"finish\"] = True\n\t\t\tsys.exit(-1) \n\t\telse:\n\t\t\tlog.warning(obj[\"agv\"],\"start move from\",obj[\"loc1\"],\"to\",obj[\"loc2\"]) \n\t\t\tmoveCart(obj[\"agv\"],obj[\"cart\"],obj[\"loc1\"],obj[\"loc2\"],callback2,obj)\n\t\n\tdef callback2(obj):\n\t\tif obj[\"result\"] == -1: \n\t\t\tprint(\"error, system exit\")\n\t\t\tobj[\"finish\"] = True\n\t\t\tsys.exit(-1) \n\t\telse:\n\t\t\tlog.warning(obj[\"agv\"],\"arrived\",obj[\"loc2\"])\n\t\tobj[\"finish\"] = True\n\t\t\t\n\tobj = {}\n\tobj[\"loc1\"] = srcLoc\n\tobj[\"loc2\"] = destLoc\n\tobj[\"cart\"] = cartId\n\tprint(\"call \",srcLoc)\n\tif agvId is None:\n\t\tagvId = apply(srcLoc)\n\n\tcall(agvId,srcLoc,callback1,obj)\n\twhile not utility.is_exited():\n\t\tif \"finish\" in obj:\n\t\t\tbreak\n\t\ttime.sleep(0.2)\n\tprint(\"------ move \",srcLoc,\" to \",destLoc,\" finish ------\")\n\t\n\t\n#def func1(start,stock1,stock2):\n#\tprint(\"-------------------- start thread ------------------------\")\n#\ttime.sleep(1) \n#\tcartId = \"CART9009\"\n#\tmove_cart(cartId,start,stock1)\n#\tnext = stock1\n#\tfor s in seats:\n#\t\tmove_cart(cartId,next,\"seat\"+str(s)+\"_1\")\n#\t\tif next == stock1:\n#\t\t\tnext = stock2\n#\t\telse:\n#\t\t\tnext = stock1\n#\t\tmove_cart(cartId,\"seat\"+str(s)+\"_1\",next)\n#\t\t# move_cart(cartId, s, next)\n#\tprint(\"=======================================\")\n#\tprint(\"finish func1\")\n#\tprint(\"=======================================\")\n\ndef func2(stock1,stock2):\n\tprint(\"-------------------- start thread ------------------------\",stock1,stock2)\n\ttime.sleep(1) \n\tcartId = \"CART9009\"\n\tfor i in range(20):\n\t\tprint(\"current loop is - \",i.__str__())\n\t\tmove_cart(cartId,stock1,stock2)\n\t\tmove_cart(cartId,stock2,stock1) \n\t\tprint(\"current loop end - \",i.__str__())\n\tprint(\"=======================================\")\n\tprint(\"finish func2\")\n\tprint(\"=======================================\")\t\n\ndef func3(times,starts,seats):\n\tcurrent=starts\n\tcartId = \"CART9009\"\n\ttime.sleep(1)\n\tfor loop in range(0,times-1):\n\t\t# current=starts\n\t\ttip1=\"currentLoop is \"+loop.__str__()+\" currentStart is \"+current\n\t\tprint(tip1)\n\t\tfor i in range(0,len(seats)):\n\t\t\tnext = str(seats[i])\n\t\t\ttip2= \"currentLoop is \"+loop.__str__()+\"currentOrigin is \"+ current\t+ \"currentNext is \" + next +\" seatIndex is \"+i.__str__()\n\t\t\tprint(tip2)\n\t\t\tprint(\"excuting\")\n\t\t\tmove_cart(cartId,current,next)\n\t\t\tcurrent = next\ndef testPageAgvControl(jsonstr):\n\tjsonData = json.loads(jsonstr)\n\tresult = False\n\tif len(jsonData)==0:\n\t\tresult=False\n\telse:\n\t\tfor currentJson in jsonData:\n\t\t\tstart = currentJson[\"start\"]\n\t\t\tseat = currentJson[\"seat\"]\n\t\t\tloop=int(currentJson[\"loop\"])\n\t\t\tseats = str.split(seat, ',')\n\t\t\tdurabilityTestTask1 = threading.Thread(target=func3, args=[loop, start, seats])\n\t\t\tdurabilityTestTask1.start()\n\t\tresult=True\n\n\treturn result\n\ndef testtestPageAgvControl(jsonstr):\n\tjsonData = json.loads(jsonstr)\n\tresult = False\n\tif len(jsonData) == 0:\n\t\tresult = False\n\telse:\n\t\tfor currentJson in jsonData:\n\t\t\tstart = currentJson[\"start\"]\n\t\t\tprint(start)\n\t\t\ttime.sleep(3)\n\t\t\tseat = currentJson[\"seat\"]\n\t\t\tseats = str.split(seat, ',')\n\t\t\tprint(seat)\n\t\t\ttime.sleep(3)\n\t\t\tfor\tcurrentseat in seats:\n\t\t\t\tprint(currentseat)\n\t\t\t\ttime.sleep(3)\n\t\t\ttime.sleep(10)\n\t\tresult = True\n\n\treturn result\n\ndef testPageUnloockAll():\n\tapi.unlockAll();\n\ndef testProcess(jsonData):\n\tutility.start()\n\ttestPageAgvControl(jsonData)\n\tutility.finish()\n\n\n\ndef test1():\n\tInit()\n\t\n\tdurabilityTestTask1= threading.Thread(target=func3,args=[20,\"stockA_row1_col3\",[\"stockA_row1_col2\",\"stockA_row1_col4\"]])\n\tdurabilityTestTask1.start()\n\n\tdurabilityTestTask2= threading.Thread(target=func3,args=[20,\"stockA_row1_col2\",[\"seat2_1\",\"stockA_row4_col2\"]])\n\t# durabilityTestTask2.start()\n\n\tdurabilityTestTask3= threading.Thread(target=func3,args=[20,\"stockA_row5_col3\",[\"seat16_1\",\"stockA_row5_col2\"]])\n\t# durabilityTestTask3.start()\n\n\tdurabilityTestTask4= threading.Thread(target=func3,args=[20,\"stockA_row6_col3\",[\"seat12_1\",\"stockA_row6_col2\"]])\n\t# durabilityTestTask4.start()\n\n\tdurabilityTestTask1.join()\n\n\t\n\t#t1.join()\t\n\tprint(\"===============ALL FINISH ========================\")\n\n\n\n\nif __name__ == '__main__':\n\t# utility.run_tests()\n\tif sys.argv is not None and len(sys.argv)>0:\n\t\tif \"process\" in sys.argv:\n\t\t\tlog.info(\"run at testPage mode\")\n\t\t\targs=\"\"\n\t\t\twith open('/agvscada/driver/args.txt', 'r', encoding='utf-8') as f:\n\t\t\t\targs=f.read()\n\t\t\tapi.init()\n\t\t\ttime.sleep(3)\n\t\t\ttestPageAgvControl(args)\n\t\telif \"unlock\" in sys.argv:\n\t\t\ttestPageUnloockAll()\n\t\telif \"test\" in sys.argv:\n\t\t\tutility.start()\n\t\t\ttest1()\n\t\t\tutility.finish()\n\n\n\n\telse:\n\t\tutility.start()\n\t\ttestgetPoint()\n\t\tutility.finish()\n\t# test3()\n\t\n\t\n\t\n\t\n",
"step-ids": [
26,
29,
30,
34,
38
]
}
|
[
26,
29,
30,
34,
38
] |
<|reserved_special_token_0|>
class DeathsByEthnicity(PowerBiQuerier):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DeathsByEthnicity(PowerBiQuerier):
<|reserved_special_token_0|>
def _parse_data(self, response_json: Dict[str, List]) ->Dict[str, int]:
results = super()._parse_data(response_json)
return {ethnicity.strip(): count for ethnicity, count in results}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DeathsByEthnicity(PowerBiQuerier):
def __init__(self) ->None:
self.source = 'd'
self.name = 'deaths by race'
self.property = 'race'
super().__init__()
def _parse_data(self, response_json: Dict[str, List]) ->Dict[str, int]:
results = super()._parse_data(response_json)
return {ethnicity.strip(): count for ethnicity, count in results}
<|reserved_special_token_1|>
from typing import Dict, List
from .power_bi_querier import PowerBiQuerier
class DeathsByEthnicity(PowerBiQuerier):
def __init__(self) ->None:
self.source = 'd'
self.name = 'deaths by race'
self.property = 'race'
super().__init__()
def _parse_data(self, response_json: Dict[str, List]) ->Dict[str, int]:
results = super()._parse_data(response_json)
return {ethnicity.strip(): count for ethnicity, count in results}
|
flexible
|
{
"blob_id": "d975b74370acc72101f808e70bef64cee39a5ab8",
"index": 6204,
"step-1": "<mask token>\n\n\nclass DeathsByEthnicity(PowerBiQuerier):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DeathsByEthnicity(PowerBiQuerier):\n <mask token>\n\n def _parse_data(self, response_json: Dict[str, List]) ->Dict[str, int]:\n results = super()._parse_data(response_json)\n return {ethnicity.strip(): count for ethnicity, count in results}\n",
"step-3": "<mask token>\n\n\nclass DeathsByEthnicity(PowerBiQuerier):\n\n def __init__(self) ->None:\n self.source = 'd'\n self.name = 'deaths by race'\n self.property = 'race'\n super().__init__()\n\n def _parse_data(self, response_json: Dict[str, List]) ->Dict[str, int]:\n results = super()._parse_data(response_json)\n return {ethnicity.strip(): count for ethnicity, count in results}\n",
"step-4": "from typing import Dict, List\nfrom .power_bi_querier import PowerBiQuerier\n\n\nclass DeathsByEthnicity(PowerBiQuerier):\n\n def __init__(self) ->None:\n self.source = 'd'\n self.name = 'deaths by race'\n self.property = 'race'\n super().__init__()\n\n def _parse_data(self, response_json: Dict[str, List]) ->Dict[str, int]:\n results = super()._parse_data(response_json)\n return {ethnicity.strip(): count for ethnicity, count in results}\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sum_series(n, x=0, y=1):
"""sum_series returns the nth number of the Fibonacci, the Lucas sequence
or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers
are optional.
Argument n as an integer is required.
(n, 0, 1) returns the Fibinacci sequence at postion n.
(n, 2, 1) returns the Lucas sequence at postion n
(n, 3, 1)returns the Foo sequence at potions n.
Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n."""
fib = [0, 1]
if n <= 1:
fibnum = fib[n]
else:
for i in range(n - 1):
nextnum = fib[0] + fib[1]
fib = [fib[1], nextnum]
fibnum = fib[1]
luke = [2, 1]
if n <= 1:
lukenum = luke[n]
else:
for i in range(n - 1):
nextnum = luke[0] + luke[1]
luke = [luke[1], nextnum]
lukenum = luke[1]
foo = [3, 2]
if n <= 1:
foonum = foo[n]
else:
for i in range(n - 1):
nextnum = foo[0] + foo[1]
foo = [foo[1], nextnum]
foonum = foo[1]
if x == 0 and y == 1:
return fibnum
if x == 2 and y == 1:
return lukenum
if x == 3 and y == 2:
return foonum
else:
return fibnum
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def lucas(n):
"""returns the nth number of the Lucas
sequence. where the first position is indexed at 0
n must be an iteger greater than or equal to 0"""
luke = [2, 1]
if n <= 1:
return luke[n]
else:
for i in range(n - 1):
nextnum = luke[0] + luke[1]
luke = [luke[1], nextnum]
return luke[1]
def sum_series(n, x=0, y=1):
"""sum_series returns the nth number of the Fibonacci, the Lucas sequence
or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers
are optional.
Argument n as an integer is required.
(n, 0, 1) returns the Fibinacci sequence at postion n.
(n, 2, 1) returns the Lucas sequence at postion n
(n, 3, 1)returns the Foo sequence at potions n.
Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n."""
fib = [0, 1]
if n <= 1:
fibnum = fib[n]
else:
for i in range(n - 1):
nextnum = fib[0] + fib[1]
fib = [fib[1], nextnum]
fibnum = fib[1]
luke = [2, 1]
if n <= 1:
lukenum = luke[n]
else:
for i in range(n - 1):
nextnum = luke[0] + luke[1]
luke = [luke[1], nextnum]
lukenum = luke[1]
foo = [3, 2]
if n <= 1:
foonum = foo[n]
else:
for i in range(n - 1):
nextnum = foo[0] + foo[1]
foo = [foo[1], nextnum]
foonum = foo[1]
if x == 0 and y == 1:
return fibnum
if x == 2 and y == 1:
return lukenum
if x == 3 and y == 2:
return foonum
else:
return fibnum
<|reserved_special_token_1|>
def fibonacci(n):
"""returns the nth number of the Fibonacci
sequence. where the first position is indexed at 0.
n must be an iteger greater than or equal to 0"""
fib = [0, 1]
if n <= 1:
return fib[n]
else:
for i in range(n - 1):
nextnum = fib[0] + fib[1]
fib = [fib[1], nextnum]
return fib[1]
def lucas(n):
"""returns the nth number of the Lucas
sequence. where the first position is indexed at 0
n must be an iteger greater than or equal to 0"""
luke = [2, 1]
if n <= 1:
return luke[n]
else:
for i in range(n - 1):
nextnum = luke[0] + luke[1]
luke = [luke[1], nextnum]
return luke[1]
def sum_series(n, x=0, y=1):
"""sum_series returns the nth number of the Fibonacci, the Lucas sequence
or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers
are optional.
Argument n as an integer is required.
(n, 0, 1) returns the Fibinacci sequence at postion n.
(n, 2, 1) returns the Lucas sequence at postion n
(n, 3, 1)returns the Foo sequence at potions n.
Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n."""
fib = [0, 1]
if n <= 1:
fibnum = fib[n]
else:
for i in range(n - 1):
nextnum = fib[0] + fib[1]
fib = [fib[1], nextnum]
fibnum = fib[1]
luke = [2, 1]
if n <= 1:
lukenum = luke[n]
else:
for i in range(n - 1):
nextnum = luke[0] + luke[1]
luke = [luke[1], nextnum]
lukenum = luke[1]
foo = [3, 2]
if n <= 1:
foonum = foo[n]
else:
for i in range(n - 1):
nextnum = foo[0] + foo[1]
foo = [foo[1], nextnum]
foonum = foo[1]
if x == 0 and y == 1:
return fibnum
if x == 2 and y == 1:
return lukenum
if x == 3 and y == 2:
return foonum
else:
return fibnum
<|reserved_special_token_1|>
def fibonacci(n):
'''returns the nth number of the Fibonacci
sequence. where the first position is indexed at 0.
n must be an iteger greater than or equal to 0'''
#these are the first two numbers in the sequence.
fib = [0,1]
#If the users enters a number less than 2 then just get that number from the list.
if n <= 1:
#return list item at n
return fib[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = fib[0] + fib[1]
#shift all the numbers in the list one position to the left.
fib = [fib[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
return fib[1]
def lucas(n):
'''returns the nth number of the Lucas
sequence. where the first position is indexed at 0
n must be an iteger greater than or equal to 0'''
#these are the first two numbers in the Lucas sequence.
luke = [2,1]
#If the users enters a number less that 2 then just get that number from the list.
if n <= 1:
#return list item at n
return luke[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = luke[0] + luke[1]
#shift all the numbers in the list one position to the left.
luke = [luke[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
return luke[1]
def sum_series(n, x = 0, y = 1):
'''sum_series returns the nth number of the Fibonacci, the Lucas sequence
or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers
are optional.
Argument n as an integer is required.
(n, 0, 1) returns the Fibinacci sequence at postion n.
(n, 2, 1) returns the Lucas sequence at postion n
(n, 3, 1)returns the Foo sequence at potions n.
Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n.'''
###Fibonacci sequence calculator....
#these are the first two numbers in the sequence.
fib = [0,1]
#If the users enters a number less that 2 then just get that number from the list.
if n <= 1:
#return list item at n
fibnum = fib[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = fib[0] + fib[1]
#shift all the numbers in the list one position to the left.
fib = [fib[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
fibnum = fib[1]
###Lucas sequence calculator...
#these are the first two numbers in the Lucas sequence.
luke = [2,1]
#If the users enters a number less that 2 then just get that number from the list.
if n <= 1:
#return list item at n
lukenum = luke[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = luke[0] + luke[1]
#shift all the numbers in the list one position to the left.
luke = [luke[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
lukenum = luke[1]
###Foo sequence
#these are the first two numbers in the foo sequence.
foo = [3,2]
#If the users enters a number less that 2 then just get that number from the list.
if n <= 1:
#return list item at n
foonum = foo[n]
else:
#The first two position are already defined so only calculate to the sequence n-1 times to get that position.
for i in range(n-1):
#get the two list items and add them together...
nextnum = foo[0] + foo[1]
#shift all the numbers in the list one position to the left.
foo = [foo[1], nextnum]
#The last number in the list is the postion the user asked for so return it.
foonum = foo[1]
if x == 0 and y == 1:
return fibnum
if x == 2 and y == 1:
return lukenum
if x==3 and y ==2:
return foonum
else:
return fibnum
|
flexible
|
{
"blob_id": "ca75e23d91eef8a5c5b78c0ea7c903b80640af25",
"index": 7957,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sum_series(n, x=0, y=1):\n \"\"\"sum_series returns the nth number of the Fibonacci, the Lucas sequence\n or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers\n are optional. \n Argument n as an integer is required. \n \n (n, 0, 1) returns the Fibinacci sequence at postion n.\n (n, 2, 1) returns the Lucas sequence at postion n\n (n, 3, 1)returns the Foo sequence at potions n.\n \n Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n.\"\"\"\n fib = [0, 1]\n if n <= 1:\n fibnum = fib[n]\n else:\n for i in range(n - 1):\n nextnum = fib[0] + fib[1]\n fib = [fib[1], nextnum]\n fibnum = fib[1]\n luke = [2, 1]\n if n <= 1:\n lukenum = luke[n]\n else:\n for i in range(n - 1):\n nextnum = luke[0] + luke[1]\n luke = [luke[1], nextnum]\n lukenum = luke[1]\n foo = [3, 2]\n if n <= 1:\n foonum = foo[n]\n else:\n for i in range(n - 1):\n nextnum = foo[0] + foo[1]\n foo = [foo[1], nextnum]\n foonum = foo[1]\n if x == 0 and y == 1:\n return fibnum\n if x == 2 and y == 1:\n return lukenum\n if x == 3 and y == 2:\n return foonum\n else:\n return fibnum\n",
"step-3": "<mask token>\n\n\ndef lucas(n):\n \"\"\"returns the nth number of the Lucas\n sequence. where the first position is indexed at 0\n n must be an iteger greater than or equal to 0\"\"\"\n luke = [2, 1]\n if n <= 1:\n return luke[n]\n else:\n for i in range(n - 1):\n nextnum = luke[0] + luke[1]\n luke = [luke[1], nextnum]\n return luke[1]\n\n\ndef sum_series(n, x=0, y=1):\n \"\"\"sum_series returns the nth number of the Fibonacci, the Lucas sequence\n or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers\n are optional. \n Argument n as an integer is required. \n \n (n, 0, 1) returns the Fibinacci sequence at postion n.\n (n, 2, 1) returns the Lucas sequence at postion n\n (n, 3, 1)returns the Foo sequence at potions n.\n \n Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n.\"\"\"\n fib = [0, 1]\n if n <= 1:\n fibnum = fib[n]\n else:\n for i in range(n - 1):\n nextnum = fib[0] + fib[1]\n fib = [fib[1], nextnum]\n fibnum = fib[1]\n luke = [2, 1]\n if n <= 1:\n lukenum = luke[n]\n else:\n for i in range(n - 1):\n nextnum = luke[0] + luke[1]\n luke = [luke[1], nextnum]\n lukenum = luke[1]\n foo = [3, 2]\n if n <= 1:\n foonum = foo[n]\n else:\n for i in range(n - 1):\n nextnum = foo[0] + foo[1]\n foo = [foo[1], nextnum]\n foonum = foo[1]\n if x == 0 and y == 1:\n return fibnum\n if x == 2 and y == 1:\n return lukenum\n if x == 3 and y == 2:\n return foonum\n else:\n return fibnum\n",
"step-4": "def fibonacci(n):\n \"\"\"returns the nth number of the Fibonacci\n sequence. where the first position is indexed at 0.\n n must be an iteger greater than or equal to 0\"\"\"\n fib = [0, 1]\n if n <= 1:\n return fib[n]\n else:\n for i in range(n - 1):\n nextnum = fib[0] + fib[1]\n fib = [fib[1], nextnum]\n return fib[1]\n\n\ndef lucas(n):\n \"\"\"returns the nth number of the Lucas\n sequence. where the first position is indexed at 0\n n must be an iteger greater than or equal to 0\"\"\"\n luke = [2, 1]\n if n <= 1:\n return luke[n]\n else:\n for i in range(n - 1):\n nextnum = luke[0] + luke[1]\n luke = [luke[1], nextnum]\n return luke[1]\n\n\ndef sum_series(n, x=0, y=1):\n \"\"\"sum_series returns the nth number of the Fibonacci, the Lucas sequence\n or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers\n are optional. \n Argument n as an integer is required. \n \n (n, 0, 1) returns the Fibinacci sequence at postion n.\n (n, 2, 1) returns the Lucas sequence at postion n\n (n, 3, 1)returns the Foo sequence at potions n.\n \n Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n.\"\"\"\n fib = [0, 1]\n if n <= 1:\n fibnum = fib[n]\n else:\n for i in range(n - 1):\n nextnum = fib[0] + fib[1]\n fib = [fib[1], nextnum]\n fibnum = fib[1]\n luke = [2, 1]\n if n <= 1:\n lukenum = luke[n]\n else:\n for i in range(n - 1):\n nextnum = luke[0] + luke[1]\n luke = [luke[1], nextnum]\n lukenum = luke[1]\n foo = [3, 2]\n if n <= 1:\n foonum = foo[n]\n else:\n for i in range(n - 1):\n nextnum = foo[0] + foo[1]\n foo = [foo[1], nextnum]\n foonum = foo[1]\n if x == 0 and y == 1:\n return fibnum\n if x == 2 and y == 1:\n return lukenum\n if x == 3 and y == 2:\n return foonum\n else:\n return fibnum\n",
"step-5": "def fibonacci(n):\n '''returns the nth number of the Fibonacci\n sequence. where the first position is indexed at 0.\n n must be an iteger greater than or equal to 0'''\n #these are the first two numbers in the sequence.\n fib = [0,1]\n #If the users enters a number less than 2 then just get that number from the list.\n if n <= 1:\n #return list item at n\n return fib[n]\n else:\n #The first two position are already defined so only calculate to the sequence n-1 times to get that position.\n for i in range(n-1):\n #get the two list items and add them together...\n nextnum = fib[0] + fib[1]\n #shift all the numbers in the list one position to the left.\n fib = [fib[1], nextnum]\n #The last number in the list is the postion the user asked for so return it. \n return fib[1]\n \ndef lucas(n):\n '''returns the nth number of the Lucas\n sequence. where the first position is indexed at 0\n n must be an iteger greater than or equal to 0'''\n #these are the first two numbers in the Lucas sequence.\n luke = [2,1]\n #If the users enters a number less that 2 then just get that number from the list.\n if n <= 1:\n #return list item at n\n return luke[n]\n else:\n #The first two position are already defined so only calculate to the sequence n-1 times to get that position.\n for i in range(n-1):\n #get the two list items and add them together...\n nextnum = luke[0] + luke[1]\n #shift all the numbers in the list one position to the left.\n luke = [luke[1], nextnum]\n #The last number in the list is the postion the user asked for so return it. \n return luke[1]\n \n\n\ndef sum_series(n, x = 0, y = 1):\n\n '''sum_series returns the nth number of the Fibonacci, the Lucas sequence\n or the Foo sequence where the first position is indexed at 0. Arguments x and y as integers\n are optional. \n Argument n as an integer is required. \n \n (n, 0, 1) returns the Fibinacci sequence at postion n.\n (n, 2, 1) returns the Lucas sequence at postion n\n (n, 3, 1)returns the Foo sequence at potions n.\n \n Any other combo (including no optional parameters) returns the Fibonacci sequence at postion n.'''\n \n ###Fibonacci sequence calculator....\n #these are the first two numbers in the sequence.\n fib = [0,1]\n #If the users enters a number less that 2 then just get that number from the list.\n if n <= 1:\n #return list item at n\n fibnum = fib[n]\n else:\n #The first two position are already defined so only calculate to the sequence n-1 times to get that position.\n for i in range(n-1):\n #get the two list items and add them together...\n nextnum = fib[0] + fib[1]\n #shift all the numbers in the list one position to the left.\n fib = [fib[1], nextnum]\n #The last number in the list is the postion the user asked for so return it. \n fibnum = fib[1] \n ###Lucas sequence calculator...\n #these are the first two numbers in the Lucas sequence.\n luke = [2,1]\n #If the users enters a number less that 2 then just get that number from the list.\n if n <= 1:\n #return list item at n\n lukenum = luke[n]\n else:\n #The first two position are already defined so only calculate to the sequence n-1 times to get that position.\n for i in range(n-1):\n #get the two list items and add them together...\n nextnum = luke[0] + luke[1]\n #shift all the numbers in the list one position to the left.\n luke = [luke[1], nextnum]\n #The last number in the list is the postion the user asked for so return it. \n lukenum = luke[1] \n \n ###Foo sequence\n #these are the first two numbers in the foo sequence.\n foo = [3,2]\n #If the users enters a number less that 2 then just get that number from the list.\n if n <= 1:\n #return list item at n\n foonum = foo[n]\n else:\n #The first two position are already defined so only calculate to the sequence n-1 times to get that position.\n for i in range(n-1):\n #get the two list items and add them together...\n nextnum = foo[0] + foo[1]\n #shift all the numbers in the list one position to the left.\n foo = [foo[1], nextnum]\n #The last number in the list is the postion the user asked for so return it. \n foonum = foo[1] \n \n if x == 0 and y == 1:\n return fibnum\n if x == 2 and y == 1:\n return lukenum\n if x==3 and y ==2:\n return foonum\n else:\n return fibnum",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import csv
import os
import requests
from bs4 import BeautifulSoup
# open html file and parsing lxml
with open ('/Users/neeraj.joshi/Downloads/index.html') as html_file:
soup = BeautifulSoup(html_file, 'lxml')
#row = soup.find_all('tr')
#column = row.find_all('td')
#print(soup)
# create a file by any name and in order to write it in write mode type w
filename = '/Users/neeraj.joshi/Downloads/test.csv'
csv_writer = csv.writer(open(filename, 'w'))
# storing data in data variable
#assume tr as a columns
for tree in soup.find_all('tr'):
data = []
#assume td as rows
for todd in tree.find_all('td'):
#print(todd.text) "appending data of td into array data made up there "
data.append(todd.text)
print(data)
csv_writer.writerow(data)
|
normal
|
{
"blob_id": "47be41bd5838b828acdc90c3ef5abdeec9da1e85",
"index": 1579,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('/Users/neeraj.joshi/Downloads/index.html') as html_file:\n soup = BeautifulSoup(html_file, 'lxml')\n<mask token>\nfor tree in soup.find_all('tr'):\n data = []\n for todd in tree.find_all('td'):\n data.append(todd.text)\n print(data)\n csv_writer.writerow(data)\n",
"step-3": "<mask token>\nwith open('/Users/neeraj.joshi/Downloads/index.html') as html_file:\n soup = BeautifulSoup(html_file, 'lxml')\nfilename = '/Users/neeraj.joshi/Downloads/test.csv'\ncsv_writer = csv.writer(open(filename, 'w'))\nfor tree in soup.find_all('tr'):\n data = []\n for todd in tree.find_all('td'):\n data.append(todd.text)\n print(data)\n csv_writer.writerow(data)\n",
"step-4": "import csv\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\nwith open('/Users/neeraj.joshi/Downloads/index.html') as html_file:\n soup = BeautifulSoup(html_file, 'lxml')\nfilename = '/Users/neeraj.joshi/Downloads/test.csv'\ncsv_writer = csv.writer(open(filename, 'w'))\nfor tree in soup.find_all('tr'):\n data = []\n for todd in tree.find_all('td'):\n data.append(todd.text)\n print(data)\n csv_writer.writerow(data)\n",
"step-5": "import csv\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\n# open html file and parsing lxml \nwith open ('/Users/neeraj.joshi/Downloads/index.html') as html_file:\n soup = BeautifulSoup(html_file, 'lxml')\n #row = soup.find_all('tr')\n #column = row.find_all('td')\n #print(soup)\n# create a file by any name and in order to write it in write mode type w\nfilename = '/Users/neeraj.joshi/Downloads/test.csv'\ncsv_writer = csv.writer(open(filename, 'w'))\n# storing data in data variable\n\n#assume tr as a columns\nfor tree in soup.find_all('tr'):\n data = []\n #assume td as rows \n for todd in tree.find_all('td'): \n #print(todd.text) \"appending data of td into array data made up there \"\n \n data.append(todd.text) \n print(data)\n csv_writer.writerow(data) \n \n \n\n\n\n\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
from collections import defaultdict
from cluster.common import Cluster
from cluster.tools import print_table
def check_status(args):
""" Print node details
:param args: Arguments from argparse
:type args: argparse.Namespace
"""
cluster = Cluster(jobs_qstat=True, nodes=True, link=True)
nodes = []
if args.filter_states:
cluster.filter_node_states(set(args.filter_states.lower().split(',')))
for node in cluster.nodes:
nodes.append([
node.name,
node.states,
node.load,
"%3d/%3d (%3d%%)" % (
node.cpu_res, node.cpu_all, 1. * node.cpu_res / node.cpu_all * 100.) if node.cpu_all else 'N/A', # Cores
"%5.1f/%5.1fG (%3d%%)" % (
node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.) if node.mem_all else 'N/A', # Memory
''.join(('*' * node.cpu_res) + ('-' * (node.cpu_all - node.cpu_res)))
])
if args.show_job_owners:
nodes[-1][-1] = ''
empty = [''] * 5
users = defaultdict(list)
for job in node.jobs_qstat:
users[job.user].append(job)
for orphan in node.orphans:
users['ORPHANS'].append(orphan)
for idx, uitem in enumerate(users.items()):
u, jobs = uitem
column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in jobs]))
if idx:
nodes.append(empty + [column_data])
else:
nodes[-1][-1] = column_data
# Printing bits
print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory', 'Jobs'], nodes)
def main():
""" Execute main program
"""
# noinspection PyCompatibility
import argparse
parser = argparse.ArgumentParser(description='Check nodes status.')
parser.add_argument('-o', '--show-job-owners', action='store_true', help='List jobs running on nodes')
parser.add_argument('-s', '--filter-states', help='Display only nodes in FILTER_STATES (comma separated).')
args = parser.parse_args()
check_status(args)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "381b59ab9fa85561932a9bfb9ab8cef635901a35",
"index": 7249,
"step-1": "<mask token>\n\n\ndef main():\n \"\"\" Execute main program\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description='Check nodes status.')\n parser.add_argument('-o', '--show-job-owners', action='store_true',\n help='List jobs running on nodes')\n parser.add_argument('-s', '--filter-states', help=\n 'Display only nodes in FILTER_STATES (comma separated).')\n args = parser.parse_args()\n check_status(args)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_status(args):\n \"\"\" Print node details\n\n :param args: Arguments from argparse\n :type args: argparse.Namespace\n \"\"\"\n cluster = Cluster(jobs_qstat=True, nodes=True, link=True)\n nodes = []\n if args.filter_states:\n cluster.filter_node_states(set(args.filter_states.lower().split(',')))\n for node in cluster.nodes:\n nodes.append([node.name, node.states, node.load, '%3d/%3d (%3d%%)' %\n (node.cpu_res, node.cpu_all, 1.0 * node.cpu_res / node.cpu_all *\n 100.0) if node.cpu_all else 'N/A', '%5.1f/%5.1fG (%3d%%)' % (\n node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.0\n ) if node.mem_all else 'N/A', ''.join('*' * node.cpu_res + '-' *\n (node.cpu_all - node.cpu_res))])\n if args.show_job_owners:\n nodes[-1][-1] = ''\n empty = [''] * 5\n users = defaultdict(list)\n for job in node.jobs_qstat:\n users[job.user].append(job)\n for orphan in node.orphans:\n users['ORPHANS'].append(orphan)\n for idx, uitem in enumerate(users.items()):\n u, jobs = uitem\n column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in\n jobs]))\n if idx:\n nodes.append(empty + [column_data])\n else:\n nodes[-1][-1] = column_data\n print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory',\n 'Jobs'], nodes)\n\n\ndef main():\n \"\"\" Execute main program\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description='Check nodes status.')\n parser.add_argument('-o', '--show-job-owners', action='store_true',\n help='List jobs running on nodes')\n parser.add_argument('-s', '--filter-states', help=\n 'Display only nodes in FILTER_STATES (comma separated).')\n args = parser.parse_args()\n check_status(args)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef check_status(args):\n \"\"\" Print node details\n\n :param args: Arguments from argparse\n :type args: argparse.Namespace\n \"\"\"\n cluster = Cluster(jobs_qstat=True, nodes=True, link=True)\n nodes = []\n if args.filter_states:\n cluster.filter_node_states(set(args.filter_states.lower().split(',')))\n for node in cluster.nodes:\n nodes.append([node.name, node.states, node.load, '%3d/%3d (%3d%%)' %\n (node.cpu_res, node.cpu_all, 1.0 * node.cpu_res / node.cpu_all *\n 100.0) if node.cpu_all else 'N/A', '%5.1f/%5.1fG (%3d%%)' % (\n node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.0\n ) if node.mem_all else 'N/A', ''.join('*' * node.cpu_res + '-' *\n (node.cpu_all - node.cpu_res))])\n if args.show_job_owners:\n nodes[-1][-1] = ''\n empty = [''] * 5\n users = defaultdict(list)\n for job in node.jobs_qstat:\n users[job.user].append(job)\n for orphan in node.orphans:\n users['ORPHANS'].append(orphan)\n for idx, uitem in enumerate(users.items()):\n u, jobs = uitem\n column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in\n jobs]))\n if idx:\n nodes.append(empty + [column_data])\n else:\n nodes[-1][-1] = column_data\n print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory',\n 'Jobs'], nodes)\n\n\ndef main():\n \"\"\" Execute main program\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description='Check nodes status.')\n parser.add_argument('-o', '--show-job-owners', action='store_true',\n help='List jobs running on nodes')\n parser.add_argument('-s', '--filter-states', help=\n 'Display only nodes in FILTER_STATES (comma separated).')\n args = parser.parse_args()\n check_status(args)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from collections import defaultdict\nfrom cluster.common import Cluster\nfrom cluster.tools import print_table\n\n\ndef check_status(args):\n \"\"\" Print node details\n\n :param args: Arguments from argparse\n :type args: argparse.Namespace\n \"\"\"\n cluster = Cluster(jobs_qstat=True, nodes=True, link=True)\n nodes = []\n if args.filter_states:\n cluster.filter_node_states(set(args.filter_states.lower().split(',')))\n for node in cluster.nodes:\n nodes.append([node.name, node.states, node.load, '%3d/%3d (%3d%%)' %\n (node.cpu_res, node.cpu_all, 1.0 * node.cpu_res / node.cpu_all *\n 100.0) if node.cpu_all else 'N/A', '%5.1f/%5.1fG (%3d%%)' % (\n node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.0\n ) if node.mem_all else 'N/A', ''.join('*' * node.cpu_res + '-' *\n (node.cpu_all - node.cpu_res))])\n if args.show_job_owners:\n nodes[-1][-1] = ''\n empty = [''] * 5\n users = defaultdict(list)\n for job in node.jobs_qstat:\n users[job.user].append(job)\n for orphan in node.orphans:\n users['ORPHANS'].append(orphan)\n for idx, uitem in enumerate(users.items()):\n u, jobs = uitem\n column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in\n jobs]))\n if idx:\n nodes.append(empty + [column_data])\n else:\n nodes[-1][-1] = column_data\n print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory',\n 'Jobs'], nodes)\n\n\ndef main():\n \"\"\" Execute main program\n \"\"\"\n import argparse\n parser = argparse.ArgumentParser(description='Check nodes status.')\n parser.add_argument('-o', '--show-job-owners', action='store_true',\n help='List jobs running on nodes')\n parser.add_argument('-s', '--filter-states', help=\n 'Display only nodes in FILTER_STATES (comma separated).')\n args = parser.parse_args()\n check_status(args)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\nfrom collections import defaultdict\n\nfrom cluster.common import Cluster\nfrom cluster.tools import print_table\n\n\ndef check_status(args):\n \"\"\" Print node details\n\n :param args: Arguments from argparse\n :type args: argparse.Namespace\n \"\"\"\n cluster = Cluster(jobs_qstat=True, nodes=True, link=True)\n nodes = []\n\n if args.filter_states:\n cluster.filter_node_states(set(args.filter_states.lower().split(',')))\n\n for node in cluster.nodes:\n nodes.append([\n node.name,\n node.states,\n node.load,\n \"%3d/%3d (%3d%%)\" % (\n node.cpu_res, node.cpu_all, 1. * node.cpu_res / node.cpu_all * 100.) if node.cpu_all else 'N/A', # Cores\n \"%5.1f/%5.1fG (%3d%%)\" % (\n node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.) if node.mem_all else 'N/A', # Memory\n ''.join(('*' * node.cpu_res) + ('-' * (node.cpu_all - node.cpu_res)))\n ])\n\n if args.show_job_owners:\n nodes[-1][-1] = ''\n empty = [''] * 5\n\n users = defaultdict(list)\n for job in node.jobs_qstat:\n users[job.user].append(job)\n for orphan in node.orphans:\n users['ORPHANS'].append(orphan)\n\n for idx, uitem in enumerate(users.items()):\n u, jobs = uitem\n column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in jobs]))\n\n if idx:\n nodes.append(empty + [column_data])\n else:\n nodes[-1][-1] = column_data\n\n # Printing bits\n print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory', 'Jobs'], nodes)\n\n\ndef main():\n \"\"\" Execute main program\n \"\"\"\n # noinspection PyCompatibility\n import argparse\n parser = argparse.ArgumentParser(description='Check nodes status.')\n parser.add_argument('-o', '--show-job-owners', action='store_true', help='List jobs running on nodes')\n parser.add_argument('-s', '--filter-states', help='Display only nodes in FILTER_STATES (comma separated).')\n args = parser.parse_args()\n\n check_status(args)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python
# coding=utf8
# author: Sun yang
import running
if __name__ == '__main__':
running.go()
|
normal
|
{
"blob_id": "12442e4debc7fbf102ab88b42464f4ca8eb91351",
"index": 8454,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n running.go()\n",
"step-3": "import running\nif __name__ == '__main__':\n running.go()\n",
"step-4": "#!/usr/bin/python\r\n# coding=utf8\r\n# author: Sun yang\r\n\r\nimport running\r\n\r\n\r\nif __name__ == '__main__':\r\n running.go()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class FlatbuffersConversionData(object):
"""Holds data needed to convert a set of json files to flatbuffer binaries.
Attributes:
schema: The path to the flatbuffer schema file.
input_files: A list of input files to convert.
output_path: The path to the output directory where the converted files will
be placed.
"""
def __init__(self, schema, input_files, output_path):
"""Initializes this object's schema, input_files and output_path."""
self.schema = schema
self.input_files = input_files
self.output_path = output_path
<|reserved_special_token_0|>
def find_executable(name, paths):
"""Searches for a file with named `name` in the given paths and returns it."""
for path in paths:
full_path = os.path.join(path, name)
if os.path.isfile(full_path):
return full_path
return name
<|reserved_special_token_0|>
class BuildError(Exception):
"""Error indicating there was a problem building assets."""
def __init__(self, argv, error_code):
Exception.__init__(self)
self.argv = argv
self.error_code = error_code
def run_subprocess(argv):
process = subprocess.Popen(argv)
process.wait()
if process.returncode:
raise BuildError(argv, process.returncode)
def convert_json_to_flatbuffer_binary(json, schema, out_dir):
"""Run the flatbuffer compiler on the given json file and schema.
Args:
json: The path to the json file to convert to a flatbuffer binary.
schema: The path to the schema to use in the conversion process.
out_dir: The directory to write the flatbuffer binary.
Raises:
BuildError: Process return code was nonzero.
"""
command = [FLATC, '-o', out_dir, '-b', schema, json]
run_subprocess(command)
def convert_png_image_to_webp(png, out, quality=80):
"""Run the webp converter on the given png file.
Args:
png: The path to the png file to convert into a webp file.
out: The path of the webp to write to.
quality: The quality of the processed image, where quality is between 0
(poor) to 100 (very good). Typical value is around 80.
Raises:
BuildError: Process return code was nonzero.
"""
command = [CWEBP, '-q', str(quality), png, '-o', out]
run_subprocess(command)
def needs_rebuild(source, target):
"""Checks if the source file needs to be rebuilt.
Args:
source: The source file to be compared.
target: The target file which we may need to rebuild.
Returns:
True if the source file is newer than the target, or if the target file does
not exist.
"""
return not os.path.isfile(target) or os.path.getmtime(source
) > os.path.getmtime(target)
def processed_json_path(path):
"""Take the path to a raw json asset and convert it to target bin path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')
<|reserved_special_token_0|>
def generate_webp_textures():
"""Run the webp converter on off of the png files."""
input_files = PNG_TEXTURES['input_files']
output_files = PNG_TEXTURES['output_files']
if not os.path.exists(TEXTURE_PATH):
os.makedirs(TEXTURE_PATH)
for png, out in zip(input_files, output_files):
if needs_rebuild(png, out):
convert_png_image_to_webp(png, out, WEBP_QUALITY)
def clean_webp_textures():
"""Delete all the processed webp textures."""
for webp in PNG_TEXTURES['output_files']:
if os.path.isfile(webp):
os.remove(webp)
<|reserved_special_token_0|>
def handle_build_error(error):
"""Prints an error message to stderr for BuildErrors."""
sys.stderr.write('Error running command `%s`. Returned %s.\n' % (' '.
join(error.argv), str(error.error_code)))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def processed_json_dir(path):
"""Take the path to a raw json asset and convert it to target directory."""
return os.path.dirname(path.replace(RAW_ASSETS_PATH, ASSETS_PATH))
class FlatbuffersConversionData(object):
"""Holds data needed to convert a set of json files to flatbuffer binaries.
Attributes:
schema: The path to the flatbuffer schema file.
input_files: A list of input files to convert.
output_path: The path to the output directory where the converted files will
be placed.
"""
def __init__(self, schema, input_files, output_path):
"""Initializes this object's schema, input_files and output_path."""
self.schema = schema
self.input_files = input_files
self.output_path = output_path
<|reserved_special_token_0|>
def find_executable(name, paths):
"""Searches for a file with named `name` in the given paths and returns it."""
for path in paths:
full_path = os.path.join(path, name)
if os.path.isfile(full_path):
return full_path
return name
<|reserved_special_token_0|>
class BuildError(Exception):
"""Error indicating there was a problem building assets."""
def __init__(self, argv, error_code):
Exception.__init__(self)
self.argv = argv
self.error_code = error_code
def run_subprocess(argv):
process = subprocess.Popen(argv)
process.wait()
if process.returncode:
raise BuildError(argv, process.returncode)
def convert_json_to_flatbuffer_binary(json, schema, out_dir):
"""Run the flatbuffer compiler on the given json file and schema.
Args:
json: The path to the json file to convert to a flatbuffer binary.
schema: The path to the schema to use in the conversion process.
out_dir: The directory to write the flatbuffer binary.
Raises:
BuildError: Process return code was nonzero.
"""
command = [FLATC, '-o', out_dir, '-b', schema, json]
run_subprocess(command)
def convert_png_image_to_webp(png, out, quality=80):
"""Run the webp converter on the given png file.
Args:
png: The path to the png file to convert into a webp file.
out: The path of the webp to write to.
quality: The quality of the processed image, where quality is between 0
(poor) to 100 (very good). Typical value is around 80.
Raises:
BuildError: Process return code was nonzero.
"""
command = [CWEBP, '-q', str(quality), png, '-o', out]
run_subprocess(command)
def needs_rebuild(source, target):
"""Checks if the source file needs to be rebuilt.
Args:
source: The source file to be compared.
target: The target file which we may need to rebuild.
Returns:
True if the source file is newer than the target, or if the target file does
not exist.
"""
return not os.path.isfile(target) or os.path.getmtime(source
) > os.path.getmtime(target)
def processed_json_path(path):
"""Take the path to a raw json asset and convert it to target bin path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')
<|reserved_special_token_0|>
def generate_webp_textures():
"""Run the webp converter on off of the png files."""
input_files = PNG_TEXTURES['input_files']
output_files = PNG_TEXTURES['output_files']
if not os.path.exists(TEXTURE_PATH):
os.makedirs(TEXTURE_PATH)
for png, out in zip(input_files, output_files):
if needs_rebuild(png, out):
convert_png_image_to_webp(png, out, WEBP_QUALITY)
def clean_webp_textures():
"""Delete all the processed webp textures."""
for webp in PNG_TEXTURES['output_files']:
if os.path.isfile(webp):
os.remove(webp)
<|reserved_special_token_0|>
def clean():
"""Delete all the processed files."""
clean_flatbuffer_binaries()
clean_webp_textures()
def handle_build_error(error):
"""Prints an error message to stderr for BuildErrors."""
sys.stderr.write('Error running command `%s`. Returned %s.\n' % (' '.
join(error.argv), str(error.error_code)))
def main(argv):
"""Builds or cleans the assets needed for the game.
To build all assets, either call this script without any arguments. Or
alternatively, call it with the argument 'all'. To just convert the flatbuffer
json files, call it with 'flatbuffers'. Likewise to convert the png files to
webp files, call it with 'webp'. To clean all converted files, call it with
'clean'.
Args:
argv: The command line argument containing which command to run.
Returns:
Returns 0 on success.
"""
target = argv[1] if len(argv) >= 2 else 'all'
if target not in ('all', 'flatbuffers', 'webp', 'clean'):
sys.stderr.write('No rule to build target %s.\n' % target)
if target in ('all', 'flatbuffers'):
try:
generate_flatbuffer_binaries()
except BuildError as error:
handle_build_error(error)
return 1
if target in ('all', 'webp'):
try:
generate_webp_textures()
except BuildError as error:
handle_build_error(error)
return 1
if target == 'clean':
try:
clean()
except OSError as error:
sys.stderr.write('Error cleaning: %s' % str(error))
return 1
return 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def processed_json_dir(path):
"""Take the path to a raw json asset and convert it to target directory."""
return os.path.dirname(path.replace(RAW_ASSETS_PATH, ASSETS_PATH))
class FlatbuffersConversionData(object):
"""Holds data needed to convert a set of json files to flatbuffer binaries.
Attributes:
schema: The path to the flatbuffer schema file.
input_files: A list of input files to convert.
output_path: The path to the output directory where the converted files will
be placed.
"""
def __init__(self, schema, input_files, output_path):
"""Initializes this object's schema, input_files and output_path."""
self.schema = schema
self.input_files = input_files
self.output_path = output_path
<|reserved_special_token_0|>
def processed_texture_path(path):
"""Take the path to a raw png asset and convert it to target webp path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')
<|reserved_special_token_0|>
def find_executable(name, paths):
"""Searches for a file with named `name` in the given paths and returns it."""
for path in paths:
full_path = os.path.join(path, name)
if os.path.isfile(full_path):
return full_path
return name
<|reserved_special_token_0|>
class BuildError(Exception):
"""Error indicating there was a problem building assets."""
def __init__(self, argv, error_code):
Exception.__init__(self)
self.argv = argv
self.error_code = error_code
def run_subprocess(argv):
process = subprocess.Popen(argv)
process.wait()
if process.returncode:
raise BuildError(argv, process.returncode)
def convert_json_to_flatbuffer_binary(json, schema, out_dir):
"""Run the flatbuffer compiler on the given json file and schema.
Args:
json: The path to the json file to convert to a flatbuffer binary.
schema: The path to the schema to use in the conversion process.
out_dir: The directory to write the flatbuffer binary.
Raises:
BuildError: Process return code was nonzero.
"""
command = [FLATC, '-o', out_dir, '-b', schema, json]
run_subprocess(command)
def convert_png_image_to_webp(png, out, quality=80):
"""Run the webp converter on the given png file.
Args:
png: The path to the png file to convert into a webp file.
out: The path of the webp to write to.
quality: The quality of the processed image, where quality is between 0
(poor) to 100 (very good). Typical value is around 80.
Raises:
BuildError: Process return code was nonzero.
"""
command = [CWEBP, '-q', str(quality), png, '-o', out]
run_subprocess(command)
def needs_rebuild(source, target):
"""Checks if the source file needs to be rebuilt.
Args:
source: The source file to be compared.
target: The target file which we may need to rebuild.
Returns:
True if the source file is newer than the target, or if the target file does
not exist.
"""
return not os.path.isfile(target) or os.path.getmtime(source
) > os.path.getmtime(target)
def processed_json_path(path):
"""Take the path to a raw json asset and convert it to target bin path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')
def generate_flatbuffer_binaries():
"""Run the flatbuffer compiler on the all of the flatbuffer json files."""
for element in FLATBUFFERS_CONVERSION_DATA:
schema = element.schema
output_path = element.output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
for json in element.input_files:
target = processed_json_path(json)
if needs_rebuild(json, target) or needs_rebuild(schema, target):
convert_json_to_flatbuffer_binary(json, schema, output_path)
def generate_webp_textures():
"""Run the webp converter on off of the png files."""
input_files = PNG_TEXTURES['input_files']
output_files = PNG_TEXTURES['output_files']
if not os.path.exists(TEXTURE_PATH):
os.makedirs(TEXTURE_PATH)
for png, out in zip(input_files, output_files):
if needs_rebuild(png, out):
convert_png_image_to_webp(png, out, WEBP_QUALITY)
def clean_webp_textures():
"""Delete all the processed webp textures."""
for webp in PNG_TEXTURES['output_files']:
if os.path.isfile(webp):
os.remove(webp)
<|reserved_special_token_0|>
def clean():
"""Delete all the processed files."""
clean_flatbuffer_binaries()
clean_webp_textures()
def handle_build_error(error):
"""Prints an error message to stderr for BuildErrors."""
sys.stderr.write('Error running command `%s`. Returned %s.\n' % (' '.
join(error.argv), str(error.error_code)))
def main(argv):
"""Builds or cleans the assets needed for the game.
To build all assets, either call this script without any arguments. Or
alternatively, call it with the argument 'all'. To just convert the flatbuffer
json files, call it with 'flatbuffers'. Likewise to convert the png files to
webp files, call it with 'webp'. To clean all converted files, call it with
'clean'.
Args:
argv: The command line argument containing which command to run.
Returns:
Returns 0 on success.
"""
target = argv[1] if len(argv) >= 2 else 'all'
if target not in ('all', 'flatbuffers', 'webp', 'clean'):
sys.stderr.write('No rule to build target %s.\n' % target)
if target in ('all', 'flatbuffers'):
try:
generate_flatbuffer_binaries()
except BuildError as error:
handle_build_error(error)
return 1
if target in ('all', 'webp'):
try:
generate_webp_textures()
except BuildError as error:
handle_build_error(error)
return 1
if target == 'clean':
try:
clean()
except OSError as error:
sys.stderr.write('Error cleaning: %s' % str(error))
return 1
return 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def processed_json_dir(path):
"""Take the path to a raw json asset and convert it to target directory."""
return os.path.dirname(path.replace(RAW_ASSETS_PATH, ASSETS_PATH))
class FlatbuffersConversionData(object):
"""Holds data needed to convert a set of json files to flatbuffer binaries.
Attributes:
schema: The path to the flatbuffer schema file.
input_files: A list of input files to convert.
output_path: The path to the output directory where the converted files will
be placed.
"""
def __init__(self, schema, input_files, output_path):
"""Initializes this object's schema, input_files and output_path."""
self.schema = schema
self.input_files = input_files
self.output_path = output_path
<|reserved_special_token_0|>
def processed_texture_path(path):
"""Take the path to a raw png asset and convert it to target webp path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')
<|reserved_special_token_0|>
def find_executable(name, paths):
"""Searches for a file with named `name` in the given paths and returns it."""
for path in paths:
full_path = os.path.join(path, name)
if os.path.isfile(full_path):
return full_path
return name
<|reserved_special_token_0|>
class BuildError(Exception):
"""Error indicating there was a problem building assets."""
def __init__(self, argv, error_code):
Exception.__init__(self)
self.argv = argv
self.error_code = error_code
def run_subprocess(argv):
process = subprocess.Popen(argv)
process.wait()
if process.returncode:
raise BuildError(argv, process.returncode)
def convert_json_to_flatbuffer_binary(json, schema, out_dir):
"""Run the flatbuffer compiler on the given json file and schema.
Args:
json: The path to the json file to convert to a flatbuffer binary.
schema: The path to the schema to use in the conversion process.
out_dir: The directory to write the flatbuffer binary.
Raises:
BuildError: Process return code was nonzero.
"""
command = [FLATC, '-o', out_dir, '-b', schema, json]
run_subprocess(command)
def convert_png_image_to_webp(png, out, quality=80):
"""Run the webp converter on the given png file.
Args:
png: The path to the png file to convert into a webp file.
out: The path of the webp to write to.
quality: The quality of the processed image, where quality is between 0
(poor) to 100 (very good). Typical value is around 80.
Raises:
BuildError: Process return code was nonzero.
"""
command = [CWEBP, '-q', str(quality), png, '-o', out]
run_subprocess(command)
def needs_rebuild(source, target):
"""Checks if the source file needs to be rebuilt.
Args:
source: The source file to be compared.
target: The target file which we may need to rebuild.
Returns:
True if the source file is newer than the target, or if the target file does
not exist.
"""
return not os.path.isfile(target) or os.path.getmtime(source
) > os.path.getmtime(target)
def processed_json_path(path):
"""Take the path to a raw json asset and convert it to target bin path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')
def generate_flatbuffer_binaries():
"""Run the flatbuffer compiler on the all of the flatbuffer json files."""
for element in FLATBUFFERS_CONVERSION_DATA:
schema = element.schema
output_path = element.output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
for json in element.input_files:
target = processed_json_path(json)
if needs_rebuild(json, target) or needs_rebuild(schema, target):
convert_json_to_flatbuffer_binary(json, schema, output_path)
def generate_webp_textures():
"""Run the webp converter on off of the png files."""
input_files = PNG_TEXTURES['input_files']
output_files = PNG_TEXTURES['output_files']
if not os.path.exists(TEXTURE_PATH):
os.makedirs(TEXTURE_PATH)
for png, out in zip(input_files, output_files):
if needs_rebuild(png, out):
convert_png_image_to_webp(png, out, WEBP_QUALITY)
def clean_webp_textures():
"""Delete all the processed webp textures."""
for webp in PNG_TEXTURES['output_files']:
if os.path.isfile(webp):
os.remove(webp)
def clean_flatbuffer_binaries():
"""Delete all the processed flatbuffer binaries."""
for element in FLATBUFFERS_CONVERSION_DATA:
for json in element.input_files:
path = processed_json_path(json)
if os.path.isfile(path):
os.remove(path)
def clean():
"""Delete all the processed files."""
clean_flatbuffer_binaries()
clean_webp_textures()
def handle_build_error(error):
"""Prints an error message to stderr for BuildErrors."""
sys.stderr.write('Error running command `%s`. Returned %s.\n' % (' '.
join(error.argv), str(error.error_code)))
def main(argv):
"""Builds or cleans the assets needed for the game.
To build all assets, either call this script without any arguments. Or
alternatively, call it with the argument 'all'. To just convert the flatbuffer
json files, call it with 'flatbuffers'. Likewise to convert the png files to
webp files, call it with 'webp'. To clean all converted files, call it with
'clean'.
Args:
argv: The command line argument containing which command to run.
Returns:
Returns 0 on success.
"""
target = argv[1] if len(argv) >= 2 else 'all'
if target not in ('all', 'flatbuffers', 'webp', 'clean'):
sys.stderr.write('No rule to build target %s.\n' % target)
if target in ('all', 'flatbuffers'):
try:
generate_flatbuffer_binaries()
except BuildError as error:
handle_build_error(error)
return 1
if target in ('all', 'webp'):
try:
generate_webp_textures()
except BuildError as error:
handle_build_error(error)
return 1
if target == 'clean':
try:
clean()
except OSError as error:
sys.stderr.write('Error cleaning: %s' % str(error))
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
<|reserved_special_token_1|>
#!/usr/bin/python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds all assets under src/rawassets/, writing the results to assets/.
Finds the flatbuffer compiler and cwebp tool and then uses them to convert the
JSON files to flatbuffer binary files and the png files to webp files so that
they can be loaded by the game. This script also includes various 'make' style
rules. If you just want to build the flatbuffer binaries you can pass
'flatbuffer' as an argument, or if you want to just build the webp files you can
pass 'cwebp' as an argument. Additionally, if you would like to clean all
generated files, you can call this script with the argument 'clean'.
"""
import distutils.spawn
import glob
import os
import platform
import subprocess
import sys
# The project root directory, which is one level up from this script's
# directory.
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir))
PREBUILTS_ROOT = os.path.abspath(os.path.join(os.path.join(PROJECT_ROOT),
os.path.pardir, os.path.pardir,
os.path.pardir, os.path.pardir,
'prebuilts'))
# Directories that may contains the FlatBuffers compiler.
FLATBUFFERS_PATHS = [
os.path.join(PROJECT_ROOT, 'bin'),
os.path.join(PROJECT_ROOT, 'bin', 'Release'),
os.path.join(PROJECT_ROOT, 'bin', 'Debug'),
]
# Directory that contains the cwebp tool.
CWEBP_BINARY_IN_PATH = distutils.spawn.find_executable('cwebp')
CWEBP_PATHS = [
os.path.join(PROJECT_ROOT, 'bin'),
os.path.join(PROJECT_ROOT, 'bin', 'Release'),
os.path.join(PROJECT_ROOT, 'bin', 'Debug'),
os.path.join(PREBUILTS_ROOT, 'libwebp',
'%s-x86' % platform.system().lower(),
'libwebp-0.4.1-%s-x86-32' % platform.system().lower(), 'bin'),
os.path.dirname(CWEBP_BINARY_IN_PATH) if CWEBP_BINARY_IN_PATH else '',
]
# Directory to place processed assets.
ASSETS_PATH = os.path.join(PROJECT_ROOT, 'assets')
# Directory where unprocessed assets can be found.
RAW_ASSETS_PATH = os.path.join(PROJECT_ROOT, 'src', 'rawassets')
# Directory where processed sound flatbuffer data can be found.
SOUND_PATH = os.path.join(ASSETS_PATH, 'sounds')
# Directory where unprocessed sound flatbuffer data can be found.
RAW_SOUND_PATH = os.path.join(RAW_ASSETS_PATH, 'sounds')
# Directory where processed material flatbuffer data can be found.
MATERIAL_PATH = os.path.join(ASSETS_PATH, 'materials')
# Directory where unprocessed material flatbuffer data can be found.
RAW_MATERIAL_PATH = os.path.join(RAW_ASSETS_PATH, 'materials')
# Directory where processed textures can be found.
TEXTURE_PATH = os.path.join(ASSETS_PATH, 'textures')
# Directory where unprocessed textures can be found.
RAW_TEXTURE_PATH = os.path.join(RAW_ASSETS_PATH, 'textures')
# Directory where unprocessed assets can be found.
SCHEMA_PATH = os.path.join(PROJECT_ROOT, 'src', 'flatbufferschemas')
# Windows uses the .exe extension on executables.
EXECUTABLE_EXTENSION = '.exe' if platform.system() == 'Windows' else ''
# Name of the flatbuffer executable.
FLATC_EXECUTABLE_NAME = 'flatc' + EXECUTABLE_EXTENSION
# Name of the cwebp executable.
CWEBP_EXECUTABLE_NAME = 'cwebp' + EXECUTABLE_EXTENSION
# What level of quality we want to apply to the webp files.
# Ranges from 0 to 100.
WEBP_QUALITY = 90
def processed_json_dir(path):
"""Take the path to a raw json asset and convert it to target directory."""
return os.path.dirname(path.replace(RAW_ASSETS_PATH, ASSETS_PATH))
class FlatbuffersConversionData(object):
"""Holds data needed to convert a set of json files to flatbuffer binaries.
Attributes:
schema: The path to the flatbuffer schema file.
input_files: A list of input files to convert.
output_path: The path to the output directory where the converted files will
be placed.
"""
def __init__(self, schema, input_files, output_path):
"""Initializes this object's schema, input_files and output_path."""
self.schema = schema
self.input_files = input_files
self.output_path = output_path
# A list of json files and their schemas that will be converted to binary files
# by the flatbuffer compiler.
FLATBUFFERS_CONVERSION_DATA = [
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'config.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'config.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'buses.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'buses.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'sound_assets.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH, 'sound_assets.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'character_state_machine_def.fbs'),
input_files=[os.path.join(RAW_ASSETS_PATH,
'character_state_machine_def.json')],
output_path=ASSETS_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'sound_collection_def.fbs'),
input_files=glob.glob(os.path.join(RAW_SOUND_PATH, '*.json')),
output_path=SOUND_PATH),
FlatbuffersConversionData(
schema=os.path.join(SCHEMA_PATH, 'materials.fbs'),
input_files=glob.glob(os.path.join(RAW_MATERIAL_PATH, '*.json')),
output_path=MATERIAL_PATH)
]
def processed_texture_path(path):
"""Take the path to a raw png asset and convert it to target webp path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')
# PNG files to convert to webp.
PNG_TEXTURES = {
'input_files': glob.glob(os.path.join(RAW_TEXTURE_PATH, '*.png')),
'output_files': [processed_texture_path(png_path)
for png_path in glob.glob(os.path.join(RAW_TEXTURE_PATH,
'*.png'))]
}
def find_executable(name, paths):
"""Searches for a file with named `name` in the given paths and returns it."""
for path in paths:
full_path = os.path.join(path, name)
if os.path.isfile(full_path):
return full_path
# If not found, just assume it's in the PATH.
return name
# Location of FlatBuffers compiler.
FLATC = find_executable(FLATC_EXECUTABLE_NAME, FLATBUFFERS_PATHS)
# Location of webp compression tool.
CWEBP = find_executable(CWEBP_EXECUTABLE_NAME, CWEBP_PATHS)
class BuildError(Exception):
"""Error indicating there was a problem building assets."""
def __init__(self, argv, error_code):
Exception.__init__(self)
self.argv = argv
self.error_code = error_code
def run_subprocess(argv):
process = subprocess.Popen(argv)
process.wait()
if process.returncode:
raise BuildError(argv, process.returncode)
def convert_json_to_flatbuffer_binary(json, schema, out_dir):
"""Run the flatbuffer compiler on the given json file and schema.
Args:
json: The path to the json file to convert to a flatbuffer binary.
schema: The path to the schema to use in the conversion process.
out_dir: The directory to write the flatbuffer binary.
Raises:
BuildError: Process return code was nonzero.
"""
command = [FLATC, '-o', out_dir, '-b', schema, json]
run_subprocess(command)
def convert_png_image_to_webp(png, out, quality=80):
"""Run the webp converter on the given png file.
Args:
png: The path to the png file to convert into a webp file.
out: The path of the webp to write to.
quality: The quality of the processed image, where quality is between 0
(poor) to 100 (very good). Typical value is around 80.
Raises:
BuildError: Process return code was nonzero.
"""
command = [CWEBP, '-q', str(quality), png, '-o', out]
run_subprocess(command)
def needs_rebuild(source, target):
"""Checks if the source file needs to be rebuilt.
Args:
source: The source file to be compared.
target: The target file which we may need to rebuild.
Returns:
True if the source file is newer than the target, or if the target file does
not exist.
"""
return not os.path.isfile(target) or (
os.path.getmtime(source) > os.path.getmtime(target))
def processed_json_path(path):
"""Take the path to a raw json asset and convert it to target bin path."""
return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')
def generate_flatbuffer_binaries():
"""Run the flatbuffer compiler on the all of the flatbuffer json files."""
for element in FLATBUFFERS_CONVERSION_DATA:
schema = element.schema
output_path = element.output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
for json in element.input_files:
target = processed_json_path(json)
if needs_rebuild(json, target) or needs_rebuild(schema, target):
convert_json_to_flatbuffer_binary(
json, schema, output_path)
def generate_webp_textures():
"""Run the webp converter on off of the png files."""
input_files = PNG_TEXTURES['input_files']
output_files = PNG_TEXTURES['output_files']
if not os.path.exists(TEXTURE_PATH):
os.makedirs(TEXTURE_PATH)
for png, out in zip(input_files, output_files):
if needs_rebuild(png, out):
convert_png_image_to_webp(png, out, WEBP_QUALITY)
def clean_webp_textures():
"""Delete all the processed webp textures."""
for webp in PNG_TEXTURES['output_files']:
if os.path.isfile(webp):
os.remove(webp)
def clean_flatbuffer_binaries():
"""Delete all the processed flatbuffer binaries."""
for element in FLATBUFFERS_CONVERSION_DATA:
for json in element.input_files:
path = processed_json_path(json)
if os.path.isfile(path):
os.remove(path)
def clean():
"""Delete all the processed files."""
clean_flatbuffer_binaries()
clean_webp_textures()
def handle_build_error(error):
"""Prints an error message to stderr for BuildErrors."""
sys.stderr.write('Error running command `%s`. Returned %s.\n' % (
' '.join(error.argv), str(error.error_code)))
def main(argv):
"""Builds or cleans the assets needed for the game.
To build all assets, either call this script without any arguments. Or
alternatively, call it with the argument 'all'. To just convert the flatbuffer
json files, call it with 'flatbuffers'. Likewise to convert the png files to
webp files, call it with 'webp'. To clean all converted files, call it with
'clean'.
Args:
argv: The command line argument containing which command to run.
Returns:
Returns 0 on success.
"""
target = argv[1] if len(argv) >= 2 else 'all'
if target not in ('all', 'flatbuffers', 'webp', 'clean'):
sys.stderr.write('No rule to build target %s.\n' % target)
if target in ('all', 'flatbuffers'):
try:
generate_flatbuffer_binaries()
except BuildError as error:
handle_build_error(error)
return 1
if target in ('all', 'webp'):
try:
generate_webp_textures()
except BuildError as error:
handle_build_error(error)
return 1
if target == 'clean':
try:
clean()
except OSError as error:
sys.stderr.write('Error cleaning: %s' % str(error))
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
flexible
|
{
"blob_id": "4989db28db0f823a54ff0942fbc40fc4640da38f",
"index": 3224,
"step-1": "<mask token>\n\n\nclass FlatbuffersConversionData(object):\n \"\"\"Holds data needed to convert a set of json files to flatbuffer binaries.\n\n Attributes:\n schema: The path to the flatbuffer schema file.\n input_files: A list of input files to convert.\n output_path: The path to the output directory where the converted files will\n be placed.\n \"\"\"\n\n def __init__(self, schema, input_files, output_path):\n \"\"\"Initializes this object's schema, input_files and output_path.\"\"\"\n self.schema = schema\n self.input_files = input_files\n self.output_path = output_path\n\n\n<mask token>\n\n\ndef find_executable(name, paths):\n \"\"\"Searches for a file with named `name` in the given paths and returns it.\"\"\"\n for path in paths:\n full_path = os.path.join(path, name)\n if os.path.isfile(full_path):\n return full_path\n return name\n\n\n<mask token>\n\n\nclass BuildError(Exception):\n \"\"\"Error indicating there was a problem building assets.\"\"\"\n\n def __init__(self, argv, error_code):\n Exception.__init__(self)\n self.argv = argv\n self.error_code = error_code\n\n\ndef run_subprocess(argv):\n process = subprocess.Popen(argv)\n process.wait()\n if process.returncode:\n raise BuildError(argv, process.returncode)\n\n\ndef convert_json_to_flatbuffer_binary(json, schema, out_dir):\n \"\"\"Run the flatbuffer compiler on the given json file and schema.\n\n Args:\n json: The path to the json file to convert to a flatbuffer binary.\n schema: The path to the schema to use in the conversion process.\n out_dir: The directory to write the flatbuffer binary.\n\n Raises:\n BuildError: Process return code was nonzero.\n \"\"\"\n command = [FLATC, '-o', out_dir, '-b', schema, json]\n run_subprocess(command)\n\n\ndef convert_png_image_to_webp(png, out, quality=80):\n \"\"\"Run the webp converter on the given png file.\n\n Args:\n png: The path to the png file to convert into a webp file.\n out: The path of the webp to write to.\n quality: The quality of the processed image, where quality is between 0\n (poor) to 100 (very good). Typical value is around 80.\n\n Raises:\n BuildError: Process return code was nonzero.\n \"\"\"\n command = [CWEBP, '-q', str(quality), png, '-o', out]\n run_subprocess(command)\n\n\ndef needs_rebuild(source, target):\n \"\"\"Checks if the source file needs to be rebuilt.\n\n Args:\n source: The source file to be compared.\n target: The target file which we may need to rebuild.\n\n Returns:\n True if the source file is newer than the target, or if the target file does\n not exist.\n \"\"\"\n return not os.path.isfile(target) or os.path.getmtime(source\n ) > os.path.getmtime(target)\n\n\ndef processed_json_path(path):\n \"\"\"Take the path to a raw json asset and convert it to target bin path.\"\"\"\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')\n\n\n<mask token>\n\n\ndef generate_webp_textures():\n \"\"\"Run the webp converter on off of the png files.\"\"\"\n input_files = PNG_TEXTURES['input_files']\n output_files = PNG_TEXTURES['output_files']\n if not os.path.exists(TEXTURE_PATH):\n os.makedirs(TEXTURE_PATH)\n for png, out in zip(input_files, output_files):\n if needs_rebuild(png, out):\n convert_png_image_to_webp(png, out, WEBP_QUALITY)\n\n\ndef clean_webp_textures():\n \"\"\"Delete all the processed webp textures.\"\"\"\n for webp in PNG_TEXTURES['output_files']:\n if os.path.isfile(webp):\n os.remove(webp)\n\n\n<mask token>\n\n\ndef handle_build_error(error):\n \"\"\"Prints an error message to stderr for BuildErrors.\"\"\"\n sys.stderr.write('Error running command `%s`. Returned %s.\\n' % (' '.\n join(error.argv), str(error.error_code)))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef processed_json_dir(path):\n \"\"\"Take the path to a raw json asset and convert it to target directory.\"\"\"\n return os.path.dirname(path.replace(RAW_ASSETS_PATH, ASSETS_PATH))\n\n\nclass FlatbuffersConversionData(object):\n \"\"\"Holds data needed to convert a set of json files to flatbuffer binaries.\n\n Attributes:\n schema: The path to the flatbuffer schema file.\n input_files: A list of input files to convert.\n output_path: The path to the output directory where the converted files will\n be placed.\n \"\"\"\n\n def __init__(self, schema, input_files, output_path):\n \"\"\"Initializes this object's schema, input_files and output_path.\"\"\"\n self.schema = schema\n self.input_files = input_files\n self.output_path = output_path\n\n\n<mask token>\n\n\ndef find_executable(name, paths):\n \"\"\"Searches for a file with named `name` in the given paths and returns it.\"\"\"\n for path in paths:\n full_path = os.path.join(path, name)\n if os.path.isfile(full_path):\n return full_path\n return name\n\n\n<mask token>\n\n\nclass BuildError(Exception):\n \"\"\"Error indicating there was a problem building assets.\"\"\"\n\n def __init__(self, argv, error_code):\n Exception.__init__(self)\n self.argv = argv\n self.error_code = error_code\n\n\ndef run_subprocess(argv):\n process = subprocess.Popen(argv)\n process.wait()\n if process.returncode:\n raise BuildError(argv, process.returncode)\n\n\ndef convert_json_to_flatbuffer_binary(json, schema, out_dir):\n \"\"\"Run the flatbuffer compiler on the given json file and schema.\n\n Args:\n json: The path to the json file to convert to a flatbuffer binary.\n schema: The path to the schema to use in the conversion process.\n out_dir: The directory to write the flatbuffer binary.\n\n Raises:\n BuildError: Process return code was nonzero.\n \"\"\"\n command = [FLATC, '-o', out_dir, '-b', schema, json]\n run_subprocess(command)\n\n\ndef convert_png_image_to_webp(png, out, quality=80):\n \"\"\"Run the webp converter on the given png file.\n\n Args:\n png: The path to the png file to convert into a webp file.\n out: The path of the webp to write to.\n quality: The quality of the processed image, where quality is between 0\n (poor) to 100 (very good). Typical value is around 80.\n\n Raises:\n BuildError: Process return code was nonzero.\n \"\"\"\n command = [CWEBP, '-q', str(quality), png, '-o', out]\n run_subprocess(command)\n\n\ndef needs_rebuild(source, target):\n \"\"\"Checks if the source file needs to be rebuilt.\n\n Args:\n source: The source file to be compared.\n target: The target file which we may need to rebuild.\n\n Returns:\n True if the source file is newer than the target, or if the target file does\n not exist.\n \"\"\"\n return not os.path.isfile(target) or os.path.getmtime(source\n ) > os.path.getmtime(target)\n\n\ndef processed_json_path(path):\n \"\"\"Take the path to a raw json asset and convert it to target bin path.\"\"\"\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')\n\n\n<mask token>\n\n\ndef generate_webp_textures():\n \"\"\"Run the webp converter on off of the png files.\"\"\"\n input_files = PNG_TEXTURES['input_files']\n output_files = PNG_TEXTURES['output_files']\n if not os.path.exists(TEXTURE_PATH):\n os.makedirs(TEXTURE_PATH)\n for png, out in zip(input_files, output_files):\n if needs_rebuild(png, out):\n convert_png_image_to_webp(png, out, WEBP_QUALITY)\n\n\ndef clean_webp_textures():\n \"\"\"Delete all the processed webp textures.\"\"\"\n for webp in PNG_TEXTURES['output_files']:\n if os.path.isfile(webp):\n os.remove(webp)\n\n\n<mask token>\n\n\ndef clean():\n \"\"\"Delete all the processed files.\"\"\"\n clean_flatbuffer_binaries()\n clean_webp_textures()\n\n\ndef handle_build_error(error):\n \"\"\"Prints an error message to stderr for BuildErrors.\"\"\"\n sys.stderr.write('Error running command `%s`. Returned %s.\\n' % (' '.\n join(error.argv), str(error.error_code)))\n\n\ndef main(argv):\n \"\"\"Builds or cleans the assets needed for the game.\n\n To build all assets, either call this script without any arguments. Or\n alternatively, call it with the argument 'all'. To just convert the flatbuffer\n json files, call it with 'flatbuffers'. Likewise to convert the png files to\n webp files, call it with 'webp'. To clean all converted files, call it with\n 'clean'.\n\n Args:\n argv: The command line argument containing which command to run.\n\n Returns:\n Returns 0 on success.\n \"\"\"\n target = argv[1] if len(argv) >= 2 else 'all'\n if target not in ('all', 'flatbuffers', 'webp', 'clean'):\n sys.stderr.write('No rule to build target %s.\\n' % target)\n if target in ('all', 'flatbuffers'):\n try:\n generate_flatbuffer_binaries()\n except BuildError as error:\n handle_build_error(error)\n return 1\n if target in ('all', 'webp'):\n try:\n generate_webp_textures()\n except BuildError as error:\n handle_build_error(error)\n return 1\n if target == 'clean':\n try:\n clean()\n except OSError as error:\n sys.stderr.write('Error cleaning: %s' % str(error))\n return 1\n return 0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef processed_json_dir(path):\n \"\"\"Take the path to a raw json asset and convert it to target directory.\"\"\"\n return os.path.dirname(path.replace(RAW_ASSETS_PATH, ASSETS_PATH))\n\n\nclass FlatbuffersConversionData(object):\n \"\"\"Holds data needed to convert a set of json files to flatbuffer binaries.\n\n Attributes:\n schema: The path to the flatbuffer schema file.\n input_files: A list of input files to convert.\n output_path: The path to the output directory where the converted files will\n be placed.\n \"\"\"\n\n def __init__(self, schema, input_files, output_path):\n \"\"\"Initializes this object's schema, input_files and output_path.\"\"\"\n self.schema = schema\n self.input_files = input_files\n self.output_path = output_path\n\n\n<mask token>\n\n\ndef processed_texture_path(path):\n \"\"\"Take the path to a raw png asset and convert it to target webp path.\"\"\"\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')\n\n\n<mask token>\n\n\ndef find_executable(name, paths):\n \"\"\"Searches for a file with named `name` in the given paths and returns it.\"\"\"\n for path in paths:\n full_path = os.path.join(path, name)\n if os.path.isfile(full_path):\n return full_path\n return name\n\n\n<mask token>\n\n\nclass BuildError(Exception):\n \"\"\"Error indicating there was a problem building assets.\"\"\"\n\n def __init__(self, argv, error_code):\n Exception.__init__(self)\n self.argv = argv\n self.error_code = error_code\n\n\ndef run_subprocess(argv):\n process = subprocess.Popen(argv)\n process.wait()\n if process.returncode:\n raise BuildError(argv, process.returncode)\n\n\ndef convert_json_to_flatbuffer_binary(json, schema, out_dir):\n \"\"\"Run the flatbuffer compiler on the given json file and schema.\n\n Args:\n json: The path to the json file to convert to a flatbuffer binary.\n schema: The path to the schema to use in the conversion process.\n out_dir: The directory to write the flatbuffer binary.\n\n Raises:\n BuildError: Process return code was nonzero.\n \"\"\"\n command = [FLATC, '-o', out_dir, '-b', schema, json]\n run_subprocess(command)\n\n\ndef convert_png_image_to_webp(png, out, quality=80):\n \"\"\"Run the webp converter on the given png file.\n\n Args:\n png: The path to the png file to convert into a webp file.\n out: The path of the webp to write to.\n quality: The quality of the processed image, where quality is between 0\n (poor) to 100 (very good). Typical value is around 80.\n\n Raises:\n BuildError: Process return code was nonzero.\n \"\"\"\n command = [CWEBP, '-q', str(quality), png, '-o', out]\n run_subprocess(command)\n\n\ndef needs_rebuild(source, target):\n \"\"\"Checks if the source file needs to be rebuilt.\n\n Args:\n source: The source file to be compared.\n target: The target file which we may need to rebuild.\n\n Returns:\n True if the source file is newer than the target, or if the target file does\n not exist.\n \"\"\"\n return not os.path.isfile(target) or os.path.getmtime(source\n ) > os.path.getmtime(target)\n\n\ndef processed_json_path(path):\n \"\"\"Take the path to a raw json asset and convert it to target bin path.\"\"\"\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')\n\n\ndef generate_flatbuffer_binaries():\n \"\"\"Run the flatbuffer compiler on the all of the flatbuffer json files.\"\"\"\n for element in FLATBUFFERS_CONVERSION_DATA:\n schema = element.schema\n output_path = element.output_path\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n for json in element.input_files:\n target = processed_json_path(json)\n if needs_rebuild(json, target) or needs_rebuild(schema, target):\n convert_json_to_flatbuffer_binary(json, schema, output_path)\n\n\ndef generate_webp_textures():\n \"\"\"Run the webp converter on off of the png files.\"\"\"\n input_files = PNG_TEXTURES['input_files']\n output_files = PNG_TEXTURES['output_files']\n if not os.path.exists(TEXTURE_PATH):\n os.makedirs(TEXTURE_PATH)\n for png, out in zip(input_files, output_files):\n if needs_rebuild(png, out):\n convert_png_image_to_webp(png, out, WEBP_QUALITY)\n\n\ndef clean_webp_textures():\n \"\"\"Delete all the processed webp textures.\"\"\"\n for webp in PNG_TEXTURES['output_files']:\n if os.path.isfile(webp):\n os.remove(webp)\n\n\n<mask token>\n\n\ndef clean():\n \"\"\"Delete all the processed files.\"\"\"\n clean_flatbuffer_binaries()\n clean_webp_textures()\n\n\ndef handle_build_error(error):\n \"\"\"Prints an error message to stderr for BuildErrors.\"\"\"\n sys.stderr.write('Error running command `%s`. Returned %s.\\n' % (' '.\n join(error.argv), str(error.error_code)))\n\n\ndef main(argv):\n \"\"\"Builds or cleans the assets needed for the game.\n\n To build all assets, either call this script without any arguments. Or\n alternatively, call it with the argument 'all'. To just convert the flatbuffer\n json files, call it with 'flatbuffers'. Likewise to convert the png files to\n webp files, call it with 'webp'. To clean all converted files, call it with\n 'clean'.\n\n Args:\n argv: The command line argument containing which command to run.\n\n Returns:\n Returns 0 on success.\n \"\"\"\n target = argv[1] if len(argv) >= 2 else 'all'\n if target not in ('all', 'flatbuffers', 'webp', 'clean'):\n sys.stderr.write('No rule to build target %s.\\n' % target)\n if target in ('all', 'flatbuffers'):\n try:\n generate_flatbuffer_binaries()\n except BuildError as error:\n handle_build_error(error)\n return 1\n if target in ('all', 'webp'):\n try:\n generate_webp_textures()\n except BuildError as error:\n handle_build_error(error)\n return 1\n if target == 'clean':\n try:\n clean()\n except OSError as error:\n sys.stderr.write('Error cleaning: %s' % str(error))\n return 1\n return 0\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef processed_json_dir(path):\n \"\"\"Take the path to a raw json asset and convert it to target directory.\"\"\"\n return os.path.dirname(path.replace(RAW_ASSETS_PATH, ASSETS_PATH))\n\n\nclass FlatbuffersConversionData(object):\n \"\"\"Holds data needed to convert a set of json files to flatbuffer binaries.\n\n Attributes:\n schema: The path to the flatbuffer schema file.\n input_files: A list of input files to convert.\n output_path: The path to the output directory where the converted files will\n be placed.\n \"\"\"\n\n def __init__(self, schema, input_files, output_path):\n \"\"\"Initializes this object's schema, input_files and output_path.\"\"\"\n self.schema = schema\n self.input_files = input_files\n self.output_path = output_path\n\n\n<mask token>\n\n\ndef processed_texture_path(path):\n \"\"\"Take the path to a raw png asset and convert it to target webp path.\"\"\"\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')\n\n\n<mask token>\n\n\ndef find_executable(name, paths):\n \"\"\"Searches for a file with named `name` in the given paths and returns it.\"\"\"\n for path in paths:\n full_path = os.path.join(path, name)\n if os.path.isfile(full_path):\n return full_path\n return name\n\n\n<mask token>\n\n\nclass BuildError(Exception):\n \"\"\"Error indicating there was a problem building assets.\"\"\"\n\n def __init__(self, argv, error_code):\n Exception.__init__(self)\n self.argv = argv\n self.error_code = error_code\n\n\ndef run_subprocess(argv):\n process = subprocess.Popen(argv)\n process.wait()\n if process.returncode:\n raise BuildError(argv, process.returncode)\n\n\ndef convert_json_to_flatbuffer_binary(json, schema, out_dir):\n \"\"\"Run the flatbuffer compiler on the given json file and schema.\n\n Args:\n json: The path to the json file to convert to a flatbuffer binary.\n schema: The path to the schema to use in the conversion process.\n out_dir: The directory to write the flatbuffer binary.\n\n Raises:\n BuildError: Process return code was nonzero.\n \"\"\"\n command = [FLATC, '-o', out_dir, '-b', schema, json]\n run_subprocess(command)\n\n\ndef convert_png_image_to_webp(png, out, quality=80):\n \"\"\"Run the webp converter on the given png file.\n\n Args:\n png: The path to the png file to convert into a webp file.\n out: The path of the webp to write to.\n quality: The quality of the processed image, where quality is between 0\n (poor) to 100 (very good). Typical value is around 80.\n\n Raises:\n BuildError: Process return code was nonzero.\n \"\"\"\n command = [CWEBP, '-q', str(quality), png, '-o', out]\n run_subprocess(command)\n\n\ndef needs_rebuild(source, target):\n \"\"\"Checks if the source file needs to be rebuilt.\n\n Args:\n source: The source file to be compared.\n target: The target file which we may need to rebuild.\n\n Returns:\n True if the source file is newer than the target, or if the target file does\n not exist.\n \"\"\"\n return not os.path.isfile(target) or os.path.getmtime(source\n ) > os.path.getmtime(target)\n\n\ndef processed_json_path(path):\n \"\"\"Take the path to a raw json asset and convert it to target bin path.\"\"\"\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')\n\n\ndef generate_flatbuffer_binaries():\n \"\"\"Run the flatbuffer compiler on the all of the flatbuffer json files.\"\"\"\n for element in FLATBUFFERS_CONVERSION_DATA:\n schema = element.schema\n output_path = element.output_path\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n for json in element.input_files:\n target = processed_json_path(json)\n if needs_rebuild(json, target) or needs_rebuild(schema, target):\n convert_json_to_flatbuffer_binary(json, schema, output_path)\n\n\ndef generate_webp_textures():\n \"\"\"Run the webp converter on off of the png files.\"\"\"\n input_files = PNG_TEXTURES['input_files']\n output_files = PNG_TEXTURES['output_files']\n if not os.path.exists(TEXTURE_PATH):\n os.makedirs(TEXTURE_PATH)\n for png, out in zip(input_files, output_files):\n if needs_rebuild(png, out):\n convert_png_image_to_webp(png, out, WEBP_QUALITY)\n\n\ndef clean_webp_textures():\n \"\"\"Delete all the processed webp textures.\"\"\"\n for webp in PNG_TEXTURES['output_files']:\n if os.path.isfile(webp):\n os.remove(webp)\n\n\ndef clean_flatbuffer_binaries():\n \"\"\"Delete all the processed flatbuffer binaries.\"\"\"\n for element in FLATBUFFERS_CONVERSION_DATA:\n for json in element.input_files:\n path = processed_json_path(json)\n if os.path.isfile(path):\n os.remove(path)\n\n\ndef clean():\n \"\"\"Delete all the processed files.\"\"\"\n clean_flatbuffer_binaries()\n clean_webp_textures()\n\n\ndef handle_build_error(error):\n \"\"\"Prints an error message to stderr for BuildErrors.\"\"\"\n sys.stderr.write('Error running command `%s`. Returned %s.\\n' % (' '.\n join(error.argv), str(error.error_code)))\n\n\ndef main(argv):\n \"\"\"Builds or cleans the assets needed for the game.\n\n To build all assets, either call this script without any arguments. Or\n alternatively, call it with the argument 'all'. To just convert the flatbuffer\n json files, call it with 'flatbuffers'. Likewise to convert the png files to\n webp files, call it with 'webp'. To clean all converted files, call it with\n 'clean'.\n\n Args:\n argv: The command line argument containing which command to run.\n\n Returns:\n Returns 0 on success.\n \"\"\"\n target = argv[1] if len(argv) >= 2 else 'all'\n if target not in ('all', 'flatbuffers', 'webp', 'clean'):\n sys.stderr.write('No rule to build target %s.\\n' % target)\n if target in ('all', 'flatbuffers'):\n try:\n generate_flatbuffer_binaries()\n except BuildError as error:\n handle_build_error(error)\n return 1\n if target in ('all', 'webp'):\n try:\n generate_webp_textures()\n except BuildError as error:\n handle_build_error(error)\n return 1\n if target == 'clean':\n try:\n clean()\n except OSError as error:\n sys.stderr.write('Error cleaning: %s' % str(error))\n return 1\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n",
"step-5": "#!/usr/bin/python\n# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Builds all assets under src/rawassets/, writing the results to assets/.\n\nFinds the flatbuffer compiler and cwebp tool and then uses them to convert the\nJSON files to flatbuffer binary files and the png files to webp files so that\nthey can be loaded by the game. This script also includes various 'make' style\nrules. If you just want to build the flatbuffer binaries you can pass\n'flatbuffer' as an argument, or if you want to just build the webp files you can\npass 'cwebp' as an argument. Additionally, if you would like to clean all\ngenerated files, you can call this script with the argument 'clean'.\n\"\"\"\n\nimport distutils.spawn\nimport glob\nimport os\nimport platform\nimport subprocess\nimport sys\n\n# The project root directory, which is one level up from this script's\n# directory.\nPROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),\n os.path.pardir))\n\nPREBUILTS_ROOT = os.path.abspath(os.path.join(os.path.join(PROJECT_ROOT),\n os.path.pardir, os.path.pardir,\n os.path.pardir, os.path.pardir,\n 'prebuilts'))\n\n# Directories that may contains the FlatBuffers compiler.\nFLATBUFFERS_PATHS = [\n os.path.join(PROJECT_ROOT, 'bin'),\n os.path.join(PROJECT_ROOT, 'bin', 'Release'),\n os.path.join(PROJECT_ROOT, 'bin', 'Debug'),\n]\n\n# Directory that contains the cwebp tool.\nCWEBP_BINARY_IN_PATH = distutils.spawn.find_executable('cwebp')\nCWEBP_PATHS = [\n os.path.join(PROJECT_ROOT, 'bin'),\n os.path.join(PROJECT_ROOT, 'bin', 'Release'),\n os.path.join(PROJECT_ROOT, 'bin', 'Debug'),\n os.path.join(PREBUILTS_ROOT, 'libwebp',\n '%s-x86' % platform.system().lower(),\n 'libwebp-0.4.1-%s-x86-32' % platform.system().lower(), 'bin'),\n os.path.dirname(CWEBP_BINARY_IN_PATH) if CWEBP_BINARY_IN_PATH else '',\n]\n\n# Directory to place processed assets.\nASSETS_PATH = os.path.join(PROJECT_ROOT, 'assets')\n\n# Directory where unprocessed assets can be found.\nRAW_ASSETS_PATH = os.path.join(PROJECT_ROOT, 'src', 'rawassets')\n\n# Directory where processed sound flatbuffer data can be found.\nSOUND_PATH = os.path.join(ASSETS_PATH, 'sounds')\n\n# Directory where unprocessed sound flatbuffer data can be found.\nRAW_SOUND_PATH = os.path.join(RAW_ASSETS_PATH, 'sounds')\n\n# Directory where processed material flatbuffer data can be found.\nMATERIAL_PATH = os.path.join(ASSETS_PATH, 'materials')\n\n# Directory where unprocessed material flatbuffer data can be found.\nRAW_MATERIAL_PATH = os.path.join(RAW_ASSETS_PATH, 'materials')\n\n# Directory where processed textures can be found.\nTEXTURE_PATH = os.path.join(ASSETS_PATH, 'textures')\n\n# Directory where unprocessed textures can be found.\nRAW_TEXTURE_PATH = os.path.join(RAW_ASSETS_PATH, 'textures')\n\n# Directory where unprocessed assets can be found.\nSCHEMA_PATH = os.path.join(PROJECT_ROOT, 'src', 'flatbufferschemas')\n\n# Windows uses the .exe extension on executables.\nEXECUTABLE_EXTENSION = '.exe' if platform.system() == 'Windows' else ''\n\n# Name of the flatbuffer executable.\nFLATC_EXECUTABLE_NAME = 'flatc' + EXECUTABLE_EXTENSION\n\n# Name of the cwebp executable.\nCWEBP_EXECUTABLE_NAME = 'cwebp' + EXECUTABLE_EXTENSION\n\n# What level of quality we want to apply to the webp files.\n# Ranges from 0 to 100.\nWEBP_QUALITY = 90\n\n\ndef processed_json_dir(path):\n \"\"\"Take the path to a raw json asset and convert it to target directory.\"\"\"\n return os.path.dirname(path.replace(RAW_ASSETS_PATH, ASSETS_PATH))\n\n\nclass FlatbuffersConversionData(object):\n \"\"\"Holds data needed to convert a set of json files to flatbuffer binaries.\n\n Attributes:\n schema: The path to the flatbuffer schema file.\n input_files: A list of input files to convert.\n output_path: The path to the output directory where the converted files will\n be placed.\n \"\"\"\n\n def __init__(self, schema, input_files, output_path):\n \"\"\"Initializes this object's schema, input_files and output_path.\"\"\"\n self.schema = schema\n self.input_files = input_files\n self.output_path = output_path\n\n\n# A list of json files and their schemas that will be converted to binary files\n# by the flatbuffer compiler.\nFLATBUFFERS_CONVERSION_DATA = [\n FlatbuffersConversionData(\n schema=os.path.join(SCHEMA_PATH, 'config.fbs'),\n input_files=[os.path.join(RAW_ASSETS_PATH, 'config.json')],\n output_path=ASSETS_PATH),\n FlatbuffersConversionData(\n schema=os.path.join(SCHEMA_PATH, 'buses.fbs'),\n input_files=[os.path.join(RAW_ASSETS_PATH, 'buses.json')],\n output_path=ASSETS_PATH),\n FlatbuffersConversionData(\n schema=os.path.join(SCHEMA_PATH, 'sound_assets.fbs'),\n input_files=[os.path.join(RAW_ASSETS_PATH, 'sound_assets.json')],\n output_path=ASSETS_PATH),\n FlatbuffersConversionData(\n schema=os.path.join(SCHEMA_PATH, 'character_state_machine_def.fbs'),\n input_files=[os.path.join(RAW_ASSETS_PATH,\n 'character_state_machine_def.json')],\n output_path=ASSETS_PATH),\n FlatbuffersConversionData(\n schema=os.path.join(SCHEMA_PATH, 'sound_collection_def.fbs'),\n input_files=glob.glob(os.path.join(RAW_SOUND_PATH, '*.json')),\n output_path=SOUND_PATH),\n FlatbuffersConversionData(\n schema=os.path.join(SCHEMA_PATH, 'materials.fbs'),\n input_files=glob.glob(os.path.join(RAW_MATERIAL_PATH, '*.json')),\n output_path=MATERIAL_PATH)\n]\n\n\ndef processed_texture_path(path):\n \"\"\"Take the path to a raw png asset and convert it to target webp path.\"\"\"\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')\n\n\n# PNG files to convert to webp.\nPNG_TEXTURES = {\n 'input_files': glob.glob(os.path.join(RAW_TEXTURE_PATH, '*.png')),\n 'output_files': [processed_texture_path(png_path)\n for png_path in glob.glob(os.path.join(RAW_TEXTURE_PATH,\n '*.png'))]\n}\n\n\ndef find_executable(name, paths):\n \"\"\"Searches for a file with named `name` in the given paths and returns it.\"\"\"\n for path in paths:\n full_path = os.path.join(path, name)\n if os.path.isfile(full_path):\n return full_path\n # If not found, just assume it's in the PATH.\n return name\n\n\n# Location of FlatBuffers compiler.\nFLATC = find_executable(FLATC_EXECUTABLE_NAME, FLATBUFFERS_PATHS)\n\n# Location of webp compression tool.\nCWEBP = find_executable(CWEBP_EXECUTABLE_NAME, CWEBP_PATHS)\n\n\nclass BuildError(Exception):\n \"\"\"Error indicating there was a problem building assets.\"\"\"\n\n def __init__(self, argv, error_code):\n Exception.__init__(self)\n self.argv = argv\n self.error_code = error_code\n\n\ndef run_subprocess(argv):\n process = subprocess.Popen(argv)\n process.wait()\n if process.returncode:\n raise BuildError(argv, process.returncode)\n\n\ndef convert_json_to_flatbuffer_binary(json, schema, out_dir):\n \"\"\"Run the flatbuffer compiler on the given json file and schema.\n\n Args:\n json: The path to the json file to convert to a flatbuffer binary.\n schema: The path to the schema to use in the conversion process.\n out_dir: The directory to write the flatbuffer binary.\n\n Raises:\n BuildError: Process return code was nonzero.\n \"\"\"\n command = [FLATC, '-o', out_dir, '-b', schema, json]\n run_subprocess(command)\n\n\ndef convert_png_image_to_webp(png, out, quality=80):\n \"\"\"Run the webp converter on the given png file.\n\n Args:\n png: The path to the png file to convert into a webp file.\n out: The path of the webp to write to.\n quality: The quality of the processed image, where quality is between 0\n (poor) to 100 (very good). Typical value is around 80.\n\n Raises:\n BuildError: Process return code was nonzero.\n \"\"\"\n command = [CWEBP, '-q', str(quality), png, '-o', out]\n run_subprocess(command)\n\n\ndef needs_rebuild(source, target):\n \"\"\"Checks if the source file needs to be rebuilt.\n\n Args:\n source: The source file to be compared.\n target: The target file which we may need to rebuild.\n\n Returns:\n True if the source file is newer than the target, or if the target file does\n not exist.\n \"\"\"\n return not os.path.isfile(target) or (\n os.path.getmtime(source) > os.path.getmtime(target))\n\n\ndef processed_json_path(path):\n \"\"\"Take the path to a raw json asset and convert it to target bin path.\"\"\"\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')\n\n\ndef generate_flatbuffer_binaries():\n \"\"\"Run the flatbuffer compiler on the all of the flatbuffer json files.\"\"\"\n for element in FLATBUFFERS_CONVERSION_DATA:\n schema = element.schema\n output_path = element.output_path\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n for json in element.input_files:\n target = processed_json_path(json)\n if needs_rebuild(json, target) or needs_rebuild(schema, target):\n convert_json_to_flatbuffer_binary(\n json, schema, output_path)\n\n\ndef generate_webp_textures():\n \"\"\"Run the webp converter on off of the png files.\"\"\"\n input_files = PNG_TEXTURES['input_files']\n output_files = PNG_TEXTURES['output_files']\n if not os.path.exists(TEXTURE_PATH):\n os.makedirs(TEXTURE_PATH)\n for png, out in zip(input_files, output_files):\n if needs_rebuild(png, out):\n convert_png_image_to_webp(png, out, WEBP_QUALITY)\n\n\ndef clean_webp_textures():\n \"\"\"Delete all the processed webp textures.\"\"\"\n for webp in PNG_TEXTURES['output_files']:\n if os.path.isfile(webp):\n os.remove(webp)\n\n\ndef clean_flatbuffer_binaries():\n \"\"\"Delete all the processed flatbuffer binaries.\"\"\"\n for element in FLATBUFFERS_CONVERSION_DATA:\n for json in element.input_files:\n path = processed_json_path(json)\n if os.path.isfile(path):\n os.remove(path)\n\n\ndef clean():\n \"\"\"Delete all the processed files.\"\"\"\n clean_flatbuffer_binaries()\n clean_webp_textures()\n\n\ndef handle_build_error(error):\n \"\"\"Prints an error message to stderr for BuildErrors.\"\"\"\n sys.stderr.write('Error running command `%s`. Returned %s.\\n' % (\n ' '.join(error.argv), str(error.error_code)))\n\n\ndef main(argv):\n \"\"\"Builds or cleans the assets needed for the game.\n\n To build all assets, either call this script without any arguments. Or\n alternatively, call it with the argument 'all'. To just convert the flatbuffer\n json files, call it with 'flatbuffers'. Likewise to convert the png files to\n webp files, call it with 'webp'. To clean all converted files, call it with\n 'clean'.\n\n Args:\n argv: The command line argument containing which command to run.\n\n Returns:\n Returns 0 on success.\n \"\"\"\n target = argv[1] if len(argv) >= 2 else 'all'\n if target not in ('all', 'flatbuffers', 'webp', 'clean'):\n sys.stderr.write('No rule to build target %s.\\n' % target)\n\n if target in ('all', 'flatbuffers'):\n try:\n generate_flatbuffer_binaries()\n except BuildError as error:\n handle_build_error(error)\n return 1\n if target in ('all', 'webp'):\n try:\n generate_webp_textures()\n except BuildError as error:\n handle_build_error(error)\n return 1\n if target == 'clean':\n try:\n clean()\n except OSError as error:\n sys.stderr.write('Error cleaning: %s' % str(error))\n return 1\n\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n\n",
"step-ids": [
15,
18,
20,
22,
25
]
}
|
[
15,
18,
20,
22,
25
] |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics.pairwise import cosine_similarity
def get_df_4_model(user_id, n_recommendations = 20000):
'''this function generates the latent dataframes used for the prediction model'''
# First the data needs to be loaded
print('Generating dataframe for recommendation model')
recipes_df_raw = pd.read_csv("data/preprocessed/recipe_pp_20201118_1206.csv")#.sample(n=n_recommendations, random_state=1)
reviews_df_raw = pd.read_csv("data/preprocessed/review_pp_20201118_1206.csv")
print(f'{len(recipes_df_raw.ingredients)} recipes are being considered for recommendation')
# !! currently the df is way to big, so we need to take a sample, but ensure that the recipes the user likes are used for finding similarities later
# For this I will create a sample df without user recipes and concatenate the a df with only user liked recipes
user_rates =list(reviews_df_raw[reviews_df_raw.user_id == user_id].recipe_id) # generate a list of user rated recipes
sample_df_no_user = recipes_df_raw[~recipes_df_raw.recipe_id.isin(user_rates)].sample(n=n_recommendations, random_state=1)
recipe_df_w_user = recipes_df_raw[recipes_df_raw.recipe_id.isin(user_rates)]
recipes_df_user = pd.concat([sample_df_no_user, recipe_df_w_user], axis=0)
merge_df = pd.merge(recipes_df_user[['recipe_id', 'metadata']], reviews_df_raw, on="recipe_id", how="right").dropna()
recipes_df = merge_df[['recipe_id', 'metadata']].groupby(by="recipe_id").first().reset_index()
reviews_df = merge_df.drop(['metadata'], axis="columns").reset_index()
print(len(user_rates))
print(sample_df_no_user.shape)
#Using CountVectorizer to encode metadata into column
count = CountVectorizer(stop_words='english')
count_matrix = count.fit_transform(recipes_df['metadata'])
#Create a new dataframe count_df with the vectors you get from this count transformation.
count_df = pd.DataFrame(count_matrix.toarray(), index=recipes_df.recipe_id.tolist())
#reduce dimensionality
n_red = 250 # reduction factor
svd = TruncatedSVD(n_components=n_red)
latent_df = svd.fit_transform(count_df)
n = n_red
latent_df = pd.DataFrame(latent_df[:,0:n], index=recipes_df.recipe_id.tolist())
latent_df
# start recommendin similar recipes on the basis of user ratings (item-item collaborative filtering
#### -> old: ratings = reviews_df.pivot(index = 'recipe_id', columns ='user_id', values = 'rating').fillna(0)
#
ratings1 = pd.merge(recipes_df[['recipe_id']], reviews_df, on="recipe_id", how="right")
ratings = ratings1.pivot(index = 'recipe_id', columns ='user_id', values = 'rating').fillna(0)
svd = TruncatedSVD(n_components=800)
latent_df_2 = svd.fit_transform(ratings)
index_list = reviews_df.groupby(by="recipe_id").mean().index.tolist()
latent_df_2 = pd.DataFrame(latent_df_2, index=index_list)
latent_df.to_csv(f'data/latents/latent_content.csv', index=True)
latent_df_2.to_csv(f'data/latents/latent_rating.csv', index=True)
return latent_df, latent_df_2, user_rates
def get_one_recommendation(recipe_id, latent_1, latent_2, n_recommendations):
# applying Cosine similarity
# Get the latent vectors for recipe_id:"45119" from content and collaborative matrices
v1 = np.array(latent_1.loc[recipe_id]).reshape(1, -1)
v2 = np.array(latent_2.loc[recipe_id]).reshape(1, -1)
# Compute the cosine similartity of this movie with the others in the list
sim1 = cosine_similarity(latent_1, v1).reshape(-1)
sim2 = cosine_similarity(latent_2, v2).reshape(-1)
hybrid = ((sim1 + sim2)/2.0)
dictDf = {'content': sim1 , 'collaborative': sim2, 'hybrid': hybrid}
recommendation_df = pd.DataFrame(dictDf, index = latent_1.index)
recommendation_df.sort_values('hybrid', ascending=False, inplace=True)
recommendation_df.head(10)
return recommendation_df.head(n_recommendations).reset_index().rename(columns={"index":"recipe_id"})
def get_user_recommendations(user_id, n_recommendations = 500):
'''thi function gets the recommendations fo one user by taking all of its liked and disliked dishes,
getting the recommendation based on each recipe and then summing the scores'''
# !!!!!!!!!! this function still assumes the user ONLY liked recipes
# !!!!!!!!!! No dislikes are considered so far!
latent_1, latent_2, recipe_list = get_df_4_model(user_id)#, n_recommendations)
recommendations = [get_one_recommendation(i, latent_1, latent_2, n_recommendations) for i in recipe_list]# actual_list]
#concetenate the list to a big df
recommendations_df=pd.concat(recommendations)
# sum the scores using groupby
grouped_recommendations= recommendations_df.groupby(by="recipe_id").sum().sort_values(by="hybrid", ascending=False)
return grouped_recommendations
#return recipe_list
def get_superuser_recommendation(n_recommendations=100):
user_id = 424680
latent_1, latent_2, recipe_list = get_df_4_model(user_id, n_recommendations)
recipe_list = recipe_list[0:10]
recommendations = [get_one_recommendation(i, latent_1, latent_2, n_recommendations) for i in recipe_list]# actual_list]
#concetenate the list to a big df
recommendations_df=pd.concat(recommendations)
# sum the scores using groupby
grouped_recommendations= recommendations_df.groupby(by="recipe_id").sum().sort_values(by="hybrid", ascending=False)
print(f'The recommendation results are based on {len(recipe_list)} recipes the user liked or disliked')
return grouped_recommendations[0:30]
if __name__ == "__main__":
result = get_superuser_recommendation(n_recommendations=4000)
print('Here are the top results for the user:')
print(result)
|
normal
|
{
"blob_id": "5c8de06176d06c5a2cf78ac138a5cb35e168d617",
"index": 5122,
"step-1": "<mask token>\n\n\ndef get_df_4_model(user_id, n_recommendations=20000):\n \"\"\"this function generates the latent dataframes used for the prediction model\"\"\"\n print('Generating dataframe for recommendation model')\n recipes_df_raw = pd.read_csv(\n 'data/preprocessed/recipe_pp_20201118_1206.csv')\n reviews_df_raw = pd.read_csv(\n 'data/preprocessed/review_pp_20201118_1206.csv')\n print(\n f'{len(recipes_df_raw.ingredients)} recipes are being considered for recommendation'\n )\n user_rates = list(reviews_df_raw[reviews_df_raw.user_id == user_id].\n recipe_id)\n sample_df_no_user = recipes_df_raw[~recipes_df_raw.recipe_id.isin(\n user_rates)].sample(n=n_recommendations, random_state=1)\n recipe_df_w_user = recipes_df_raw[recipes_df_raw.recipe_id.isin(user_rates)\n ]\n recipes_df_user = pd.concat([sample_df_no_user, recipe_df_w_user], axis=0)\n merge_df = pd.merge(recipes_df_user[['recipe_id', 'metadata']],\n reviews_df_raw, on='recipe_id', how='right').dropna()\n recipes_df = merge_df[['recipe_id', 'metadata']].groupby(by='recipe_id'\n ).first().reset_index()\n reviews_df = merge_df.drop(['metadata'], axis='columns').reset_index()\n print(len(user_rates))\n print(sample_df_no_user.shape)\n count = CountVectorizer(stop_words='english')\n count_matrix = count.fit_transform(recipes_df['metadata'])\n count_df = pd.DataFrame(count_matrix.toarray(), index=recipes_df.\n recipe_id.tolist())\n n_red = 250\n svd = TruncatedSVD(n_components=n_red)\n latent_df = svd.fit_transform(count_df)\n n = n_red\n latent_df = pd.DataFrame(latent_df[:, 0:n], index=recipes_df.recipe_id.\n tolist())\n latent_df\n ratings1 = pd.merge(recipes_df[['recipe_id']], reviews_df, on=\n 'recipe_id', how='right')\n ratings = ratings1.pivot(index='recipe_id', columns='user_id', values=\n 'rating').fillna(0)\n svd = TruncatedSVD(n_components=800)\n latent_df_2 = svd.fit_transform(ratings)\n index_list = reviews_df.groupby(by='recipe_id').mean().index.tolist()\n latent_df_2 = pd.DataFrame(latent_df_2, index=index_list)\n latent_df.to_csv(f'data/latents/latent_content.csv', index=True)\n latent_df_2.to_csv(f'data/latents/latent_rating.csv', index=True)\n return latent_df, latent_df_2, user_rates\n\n\ndef get_one_recommendation(recipe_id, latent_1, latent_2, n_recommendations):\n v1 = np.array(latent_1.loc[recipe_id]).reshape(1, -1)\n v2 = np.array(latent_2.loc[recipe_id]).reshape(1, -1)\n sim1 = cosine_similarity(latent_1, v1).reshape(-1)\n sim2 = cosine_similarity(latent_2, v2).reshape(-1)\n hybrid = (sim1 + sim2) / 2.0\n dictDf = {'content': sim1, 'collaborative': sim2, 'hybrid': hybrid}\n recommendation_df = pd.DataFrame(dictDf, index=latent_1.index)\n recommendation_df.sort_values('hybrid', ascending=False, inplace=True)\n recommendation_df.head(10)\n return recommendation_df.head(n_recommendations).reset_index().rename(\n columns={'index': 'recipe_id'})\n\n\ndef get_user_recommendations(user_id, n_recommendations=500):\n \"\"\"thi function gets the recommendations fo one user by taking all of its liked and disliked dishes,\n getting the recommendation based on each recipe and then summing the scores\"\"\"\n latent_1, latent_2, recipe_list = get_df_4_model(user_id)\n recommendations = [get_one_recommendation(i, latent_1, latent_2,\n n_recommendations) for i in recipe_list]\n recommendations_df = pd.concat(recommendations)\n grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(\n ).sort_values(by='hybrid', ascending=False)\n return grouped_recommendations\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_df_4_model(user_id, n_recommendations=20000):\n \"\"\"this function generates the latent dataframes used for the prediction model\"\"\"\n print('Generating dataframe for recommendation model')\n recipes_df_raw = pd.read_csv(\n 'data/preprocessed/recipe_pp_20201118_1206.csv')\n reviews_df_raw = pd.read_csv(\n 'data/preprocessed/review_pp_20201118_1206.csv')\n print(\n f'{len(recipes_df_raw.ingredients)} recipes are being considered for recommendation'\n )\n user_rates = list(reviews_df_raw[reviews_df_raw.user_id == user_id].\n recipe_id)\n sample_df_no_user = recipes_df_raw[~recipes_df_raw.recipe_id.isin(\n user_rates)].sample(n=n_recommendations, random_state=1)\n recipe_df_w_user = recipes_df_raw[recipes_df_raw.recipe_id.isin(user_rates)\n ]\n recipes_df_user = pd.concat([sample_df_no_user, recipe_df_w_user], axis=0)\n merge_df = pd.merge(recipes_df_user[['recipe_id', 'metadata']],\n reviews_df_raw, on='recipe_id', how='right').dropna()\n recipes_df = merge_df[['recipe_id', 'metadata']].groupby(by='recipe_id'\n ).first().reset_index()\n reviews_df = merge_df.drop(['metadata'], axis='columns').reset_index()\n print(len(user_rates))\n print(sample_df_no_user.shape)\n count = CountVectorizer(stop_words='english')\n count_matrix = count.fit_transform(recipes_df['metadata'])\n count_df = pd.DataFrame(count_matrix.toarray(), index=recipes_df.\n recipe_id.tolist())\n n_red = 250\n svd = TruncatedSVD(n_components=n_red)\n latent_df = svd.fit_transform(count_df)\n n = n_red\n latent_df = pd.DataFrame(latent_df[:, 0:n], index=recipes_df.recipe_id.\n tolist())\n latent_df\n ratings1 = pd.merge(recipes_df[['recipe_id']], reviews_df, on=\n 'recipe_id', how='right')\n ratings = ratings1.pivot(index='recipe_id', columns='user_id', values=\n 'rating').fillna(0)\n svd = TruncatedSVD(n_components=800)\n latent_df_2 = svd.fit_transform(ratings)\n index_list = reviews_df.groupby(by='recipe_id').mean().index.tolist()\n latent_df_2 = pd.DataFrame(latent_df_2, index=index_list)\n latent_df.to_csv(f'data/latents/latent_content.csv', index=True)\n latent_df_2.to_csv(f'data/latents/latent_rating.csv', index=True)\n return latent_df, latent_df_2, user_rates\n\n\ndef get_one_recommendation(recipe_id, latent_1, latent_2, n_recommendations):\n v1 = np.array(latent_1.loc[recipe_id]).reshape(1, -1)\n v2 = np.array(latent_2.loc[recipe_id]).reshape(1, -1)\n sim1 = cosine_similarity(latent_1, v1).reshape(-1)\n sim2 = cosine_similarity(latent_2, v2).reshape(-1)\n hybrid = (sim1 + sim2) / 2.0\n dictDf = {'content': sim1, 'collaborative': sim2, 'hybrid': hybrid}\n recommendation_df = pd.DataFrame(dictDf, index=latent_1.index)\n recommendation_df.sort_values('hybrid', ascending=False, inplace=True)\n recommendation_df.head(10)\n return recommendation_df.head(n_recommendations).reset_index().rename(\n columns={'index': 'recipe_id'})\n\n\ndef get_user_recommendations(user_id, n_recommendations=500):\n \"\"\"thi function gets the recommendations fo one user by taking all of its liked and disliked dishes,\n getting the recommendation based on each recipe and then summing the scores\"\"\"\n latent_1, latent_2, recipe_list = get_df_4_model(user_id)\n recommendations = [get_one_recommendation(i, latent_1, latent_2,\n n_recommendations) for i in recipe_list]\n recommendations_df = pd.concat(recommendations)\n grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(\n ).sort_values(by='hybrid', ascending=False)\n return grouped_recommendations\n\n\ndef get_superuser_recommendation(n_recommendations=100):\n user_id = 424680\n latent_1, latent_2, recipe_list = get_df_4_model(user_id, n_recommendations\n )\n recipe_list = recipe_list[0:10]\n recommendations = [get_one_recommendation(i, latent_1, latent_2,\n n_recommendations) for i in recipe_list]\n recommendations_df = pd.concat(recommendations)\n grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(\n ).sort_values(by='hybrid', ascending=False)\n print(\n f'The recommendation results are based on {len(recipe_list)} recipes the user liked or disliked'\n )\n return grouped_recommendations[0:30]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_df_4_model(user_id, n_recommendations=20000):\n \"\"\"this function generates the latent dataframes used for the prediction model\"\"\"\n print('Generating dataframe for recommendation model')\n recipes_df_raw = pd.read_csv(\n 'data/preprocessed/recipe_pp_20201118_1206.csv')\n reviews_df_raw = pd.read_csv(\n 'data/preprocessed/review_pp_20201118_1206.csv')\n print(\n f'{len(recipes_df_raw.ingredients)} recipes are being considered for recommendation'\n )\n user_rates = list(reviews_df_raw[reviews_df_raw.user_id == user_id].\n recipe_id)\n sample_df_no_user = recipes_df_raw[~recipes_df_raw.recipe_id.isin(\n user_rates)].sample(n=n_recommendations, random_state=1)\n recipe_df_w_user = recipes_df_raw[recipes_df_raw.recipe_id.isin(user_rates)\n ]\n recipes_df_user = pd.concat([sample_df_no_user, recipe_df_w_user], axis=0)\n merge_df = pd.merge(recipes_df_user[['recipe_id', 'metadata']],\n reviews_df_raw, on='recipe_id', how='right').dropna()\n recipes_df = merge_df[['recipe_id', 'metadata']].groupby(by='recipe_id'\n ).first().reset_index()\n reviews_df = merge_df.drop(['metadata'], axis='columns').reset_index()\n print(len(user_rates))\n print(sample_df_no_user.shape)\n count = CountVectorizer(stop_words='english')\n count_matrix = count.fit_transform(recipes_df['metadata'])\n count_df = pd.DataFrame(count_matrix.toarray(), index=recipes_df.\n recipe_id.tolist())\n n_red = 250\n svd = TruncatedSVD(n_components=n_red)\n latent_df = svd.fit_transform(count_df)\n n = n_red\n latent_df = pd.DataFrame(latent_df[:, 0:n], index=recipes_df.recipe_id.\n tolist())\n latent_df\n ratings1 = pd.merge(recipes_df[['recipe_id']], reviews_df, on=\n 'recipe_id', how='right')\n ratings = ratings1.pivot(index='recipe_id', columns='user_id', values=\n 'rating').fillna(0)\n svd = TruncatedSVD(n_components=800)\n latent_df_2 = svd.fit_transform(ratings)\n index_list = reviews_df.groupby(by='recipe_id').mean().index.tolist()\n latent_df_2 = pd.DataFrame(latent_df_2, index=index_list)\n latent_df.to_csv(f'data/latents/latent_content.csv', index=True)\n latent_df_2.to_csv(f'data/latents/latent_rating.csv', index=True)\n return latent_df, latent_df_2, user_rates\n\n\ndef get_one_recommendation(recipe_id, latent_1, latent_2, n_recommendations):\n v1 = np.array(latent_1.loc[recipe_id]).reshape(1, -1)\n v2 = np.array(latent_2.loc[recipe_id]).reshape(1, -1)\n sim1 = cosine_similarity(latent_1, v1).reshape(-1)\n sim2 = cosine_similarity(latent_2, v2).reshape(-1)\n hybrid = (sim1 + sim2) / 2.0\n dictDf = {'content': sim1, 'collaborative': sim2, 'hybrid': hybrid}\n recommendation_df = pd.DataFrame(dictDf, index=latent_1.index)\n recommendation_df.sort_values('hybrid', ascending=False, inplace=True)\n recommendation_df.head(10)\n return recommendation_df.head(n_recommendations).reset_index().rename(\n columns={'index': 'recipe_id'})\n\n\ndef get_user_recommendations(user_id, n_recommendations=500):\n \"\"\"thi function gets the recommendations fo one user by taking all of its liked and disliked dishes,\n getting the recommendation based on each recipe and then summing the scores\"\"\"\n latent_1, latent_2, recipe_list = get_df_4_model(user_id)\n recommendations = [get_one_recommendation(i, latent_1, latent_2,\n n_recommendations) for i in recipe_list]\n recommendations_df = pd.concat(recommendations)\n grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(\n ).sort_values(by='hybrid', ascending=False)\n return grouped_recommendations\n\n\ndef get_superuser_recommendation(n_recommendations=100):\n user_id = 424680\n latent_1, latent_2, recipe_list = get_df_4_model(user_id, n_recommendations\n )\n recipe_list = recipe_list[0:10]\n recommendations = [get_one_recommendation(i, latent_1, latent_2,\n n_recommendations) for i in recipe_list]\n recommendations_df = pd.concat(recommendations)\n grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(\n ).sort_values(by='hybrid', ascending=False)\n print(\n f'The recommendation results are based on {len(recipe_list)} recipes the user liked or disliked'\n )\n return grouped_recommendations[0:30]\n\n\nif __name__ == '__main__':\n result = get_superuser_recommendation(n_recommendations=4000)\n print('Here are the top results for the user:')\n print(result)\n",
"step-4": "import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\ndef get_df_4_model(user_id, n_recommendations=20000):\n \"\"\"this function generates the latent dataframes used for the prediction model\"\"\"\n print('Generating dataframe for recommendation model')\n recipes_df_raw = pd.read_csv(\n 'data/preprocessed/recipe_pp_20201118_1206.csv')\n reviews_df_raw = pd.read_csv(\n 'data/preprocessed/review_pp_20201118_1206.csv')\n print(\n f'{len(recipes_df_raw.ingredients)} recipes are being considered for recommendation'\n )\n user_rates = list(reviews_df_raw[reviews_df_raw.user_id == user_id].\n recipe_id)\n sample_df_no_user = recipes_df_raw[~recipes_df_raw.recipe_id.isin(\n user_rates)].sample(n=n_recommendations, random_state=1)\n recipe_df_w_user = recipes_df_raw[recipes_df_raw.recipe_id.isin(user_rates)\n ]\n recipes_df_user = pd.concat([sample_df_no_user, recipe_df_w_user], axis=0)\n merge_df = pd.merge(recipes_df_user[['recipe_id', 'metadata']],\n reviews_df_raw, on='recipe_id', how='right').dropna()\n recipes_df = merge_df[['recipe_id', 'metadata']].groupby(by='recipe_id'\n ).first().reset_index()\n reviews_df = merge_df.drop(['metadata'], axis='columns').reset_index()\n print(len(user_rates))\n print(sample_df_no_user.shape)\n count = CountVectorizer(stop_words='english')\n count_matrix = count.fit_transform(recipes_df['metadata'])\n count_df = pd.DataFrame(count_matrix.toarray(), index=recipes_df.\n recipe_id.tolist())\n n_red = 250\n svd = TruncatedSVD(n_components=n_red)\n latent_df = svd.fit_transform(count_df)\n n = n_red\n latent_df = pd.DataFrame(latent_df[:, 0:n], index=recipes_df.recipe_id.\n tolist())\n latent_df\n ratings1 = pd.merge(recipes_df[['recipe_id']], reviews_df, on=\n 'recipe_id', how='right')\n ratings = ratings1.pivot(index='recipe_id', columns='user_id', values=\n 'rating').fillna(0)\n svd = TruncatedSVD(n_components=800)\n latent_df_2 = svd.fit_transform(ratings)\n index_list = reviews_df.groupby(by='recipe_id').mean().index.tolist()\n latent_df_2 = pd.DataFrame(latent_df_2, index=index_list)\n latent_df.to_csv(f'data/latents/latent_content.csv', index=True)\n latent_df_2.to_csv(f'data/latents/latent_rating.csv', index=True)\n return latent_df, latent_df_2, user_rates\n\n\ndef get_one_recommendation(recipe_id, latent_1, latent_2, n_recommendations):\n v1 = np.array(latent_1.loc[recipe_id]).reshape(1, -1)\n v2 = np.array(latent_2.loc[recipe_id]).reshape(1, -1)\n sim1 = cosine_similarity(latent_1, v1).reshape(-1)\n sim2 = cosine_similarity(latent_2, v2).reshape(-1)\n hybrid = (sim1 + sim2) / 2.0\n dictDf = {'content': sim1, 'collaborative': sim2, 'hybrid': hybrid}\n recommendation_df = pd.DataFrame(dictDf, index=latent_1.index)\n recommendation_df.sort_values('hybrid', ascending=False, inplace=True)\n recommendation_df.head(10)\n return recommendation_df.head(n_recommendations).reset_index().rename(\n columns={'index': 'recipe_id'})\n\n\ndef get_user_recommendations(user_id, n_recommendations=500):\n \"\"\"thi function gets the recommendations fo one user by taking all of its liked and disliked dishes,\n getting the recommendation based on each recipe and then summing the scores\"\"\"\n latent_1, latent_2, recipe_list = get_df_4_model(user_id)\n recommendations = [get_one_recommendation(i, latent_1, latent_2,\n n_recommendations) for i in recipe_list]\n recommendations_df = pd.concat(recommendations)\n grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(\n ).sort_values(by='hybrid', ascending=False)\n return grouped_recommendations\n\n\ndef get_superuser_recommendation(n_recommendations=100):\n user_id = 424680\n latent_1, latent_2, recipe_list = get_df_4_model(user_id, n_recommendations\n )\n recipe_list = recipe_list[0:10]\n recommendations = [get_one_recommendation(i, latent_1, latent_2,\n n_recommendations) for i in recipe_list]\n recommendations_df = pd.concat(recommendations)\n grouped_recommendations = recommendations_df.groupby(by='recipe_id').sum(\n ).sort_values(by='hybrid', ascending=False)\n print(\n f'The recommendation results are based on {len(recipe_list)} recipes the user liked or disliked'\n )\n return grouped_recommendations[0:30]\n\n\nif __name__ == '__main__':\n result = get_superuser_recommendation(n_recommendations=4000)\n print('Here are the top results for the user:')\n print(result)\n",
"step-5": "import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\n\n\n\ndef get_df_4_model(user_id, n_recommendations = 20000):\n '''this function generates the latent dataframes used for the prediction model'''\n # First the data needs to be loaded\n print('Generating dataframe for recommendation model')\n recipes_df_raw = pd.read_csv(\"data/preprocessed/recipe_pp_20201118_1206.csv\")#.sample(n=n_recommendations, random_state=1)\n reviews_df_raw = pd.read_csv(\"data/preprocessed/review_pp_20201118_1206.csv\")\n print(f'{len(recipes_df_raw.ingredients)} recipes are being considered for recommendation')\n # !! currently the df is way to big, so we need to take a sample, but ensure that the recipes the user likes are used for finding similarities later\n # For this I will create a sample df without user recipes and concatenate the a df with only user liked recipes\n\n user_rates =list(reviews_df_raw[reviews_df_raw.user_id == user_id].recipe_id) # generate a list of user rated recipes\n\n sample_df_no_user = recipes_df_raw[~recipes_df_raw.recipe_id.isin(user_rates)].sample(n=n_recommendations, random_state=1)\n recipe_df_w_user = recipes_df_raw[recipes_df_raw.recipe_id.isin(user_rates)]\n\n recipes_df_user = pd.concat([sample_df_no_user, recipe_df_w_user], axis=0)\n merge_df = pd.merge(recipes_df_user[['recipe_id', 'metadata']], reviews_df_raw, on=\"recipe_id\", how=\"right\").dropna()\n recipes_df = merge_df[['recipe_id', 'metadata']].groupby(by=\"recipe_id\").first().reset_index()\n reviews_df = merge_df.drop(['metadata'], axis=\"columns\").reset_index()\n print(len(user_rates))\n print(sample_df_no_user.shape)\n #Using CountVectorizer to encode metadata into column\n count = CountVectorizer(stop_words='english')\n count_matrix = count.fit_transform(recipes_df['metadata'])\n #Create a new dataframe count_df with the vectors you get from this count transformation.\n count_df = pd.DataFrame(count_matrix.toarray(), index=recipes_df.recipe_id.tolist())\n #reduce dimensionality\n n_red = 250 # reduction factor\n svd = TruncatedSVD(n_components=n_red)\n latent_df = svd.fit_transform(count_df)\n\n n = n_red\n latent_df = pd.DataFrame(latent_df[:,0:n], index=recipes_df.recipe_id.tolist())\n latent_df\n\n # start recommendin similar recipes on the basis of user ratings (item-item collaborative filtering\n #### -> old: ratings = reviews_df.pivot(index = 'recipe_id', columns ='user_id', values = 'rating').fillna(0)\n #\n ratings1 = pd.merge(recipes_df[['recipe_id']], reviews_df, on=\"recipe_id\", how=\"right\")\n\n ratings = ratings1.pivot(index = 'recipe_id', columns ='user_id', values = 'rating').fillna(0)\n\n svd = TruncatedSVD(n_components=800)\n latent_df_2 = svd.fit_transform(ratings)\n\n index_list = reviews_df.groupby(by=\"recipe_id\").mean().index.tolist()\n latent_df_2 = pd.DataFrame(latent_df_2, index=index_list)\n\n latent_df.to_csv(f'data/latents/latent_content.csv', index=True)\n latent_df_2.to_csv(f'data/latents/latent_rating.csv', index=True)\n\n\n return latent_df, latent_df_2, user_rates\n\ndef get_one_recommendation(recipe_id, latent_1, latent_2, n_recommendations):\n # applying Cosine similarity\n # Get the latent vectors for recipe_id:\"45119\" from content and collaborative matrices\n v1 = np.array(latent_1.loc[recipe_id]).reshape(1, -1)\n v2 = np.array(latent_2.loc[recipe_id]).reshape(1, -1)\n\n# Compute the cosine similartity of this movie with the others in the list\n sim1 = cosine_similarity(latent_1, v1).reshape(-1)\n sim2 = cosine_similarity(latent_2, v2).reshape(-1)\n\n hybrid = ((sim1 + sim2)/2.0)\n\n dictDf = {'content': sim1 , 'collaborative': sim2, 'hybrid': hybrid}\n recommendation_df = pd.DataFrame(dictDf, index = latent_1.index)\n\n recommendation_df.sort_values('hybrid', ascending=False, inplace=True)\n recommendation_df.head(10)\n\n return recommendation_df.head(n_recommendations).reset_index().rename(columns={\"index\":\"recipe_id\"})\n\ndef get_user_recommendations(user_id, n_recommendations = 500):\n '''thi function gets the recommendations fo one user by taking all of its liked and disliked dishes,\n getting the recommendation based on each recipe and then summing the scores'''\n\n # !!!!!!!!!! this function still assumes the user ONLY liked recipes\n # !!!!!!!!!! No dislikes are considered so far!\n latent_1, latent_2, recipe_list = get_df_4_model(user_id)#, n_recommendations)\n\n recommendations = [get_one_recommendation(i, latent_1, latent_2, n_recommendations) for i in recipe_list]# actual_list]\n #concetenate the list to a big df\n recommendations_df=pd.concat(recommendations)\n # sum the scores using groupby\n grouped_recommendations= recommendations_df.groupby(by=\"recipe_id\").sum().sort_values(by=\"hybrid\", ascending=False)\n return grouped_recommendations\n #return recipe_list\n\ndef get_superuser_recommendation(n_recommendations=100):\n user_id = 424680\n\n latent_1, latent_2, recipe_list = get_df_4_model(user_id, n_recommendations)\n\n recipe_list = recipe_list[0:10]\n\n recommendations = [get_one_recommendation(i, latent_1, latent_2, n_recommendations) for i in recipe_list]# actual_list]\n #concetenate the list to a big df\n recommendations_df=pd.concat(recommendations)\n # sum the scores using groupby\n grouped_recommendations= recommendations_df.groupby(by=\"recipe_id\").sum().sort_values(by=\"hybrid\", ascending=False)\n\n print(f'The recommendation results are based on {len(recipe_list)} recipes the user liked or disliked')\n\n return grouped_recommendations[0:30]\n\n\nif __name__ == \"__main__\":\n\n result = get_superuser_recommendation(n_recommendations=4000)\n\n print('Here are the top results for the user:')\n print(result)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#Una empresa les paga a sus empleados con base en las horas trabajadas en la semana.
#Realice un algoritmo para determinar el sueldo semanal de N trabajadores
#y, además, calcule cuánto pagó la empresa por los N empleados.
base = int(input("Dinero por hora trabajada: "))
emp = int(input("Dime el nº de empleados: "))
for i in range(0,8):
|
normal
|
{
"blob_id": "963e736fd4a942fb1c51e1e0a357ad6be48aed9a",
"index": 5985,
"step-1": "\r\n#Una empresa les paga a sus empleados con base en las horas trabajadas en la semana.\r\n#Realice un algoritmo para determinar el sueldo semanal de N trabajadores\r\n#y, además, calcule cuánto pagó la empresa por los N empleados.\r\n\r\nbase = int(input(\"Dinero por hora trabajada: \"))\r\nemp = int(input(\"Dime el nº de empleados: \"))\r\n\r\nfor i in range(0,8):\r\n \r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class StrComparison(MethodResource, Resource):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StrComparison(MethodResource, Resource):
def get(self, domain):
domain_found = ''
similar = False
for row in df_dict:
result = jellyfish.jaro_winkler_similarity(str(row['domain']),
str(domain))
if result > 0.97:
similar = True
break
detail = 'Found near domain by distance string comparison: ' + str(
result) if similar else 'Not similar domain found.'
domain_found = str(row['domain']) if similar else ''
return jsonify({'feature': 'strcomparison', 'domain': domain,
'result': domain_found, 'detail': detail})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
df = pd.read_csv('data/trancotop1m.csv')
df_dict = df.to_dict('records')
class StrComparison(MethodResource, Resource):
def get(self, domain):
domain_found = ''
similar = False
for row in df_dict:
result = jellyfish.jaro_winkler_similarity(str(row['domain']),
str(domain))
if result > 0.97:
similar = True
break
detail = 'Found near domain by distance string comparison: ' + str(
result) if similar else 'Not similar domain found.'
domain_found = str(row['domain']) if similar else ''
return jsonify({'feature': 'strcomparison', 'domain': domain,
'result': domain_found, 'detail': detail})
<|reserved_special_token_1|>
from flask import jsonify
from flask_restful import Resource
from flask_apispec.views import MethodResource
import pandas as pd
import jellyfish
df = pd.read_csv('data/trancotop1m.csv')
df_dict = df.to_dict('records')
class StrComparison(MethodResource, Resource):
def get(self, domain):
domain_found = ''
similar = False
for row in df_dict:
result = jellyfish.jaro_winkler_similarity(str(row['domain']),
str(domain))
if result > 0.97:
similar = True
break
detail = 'Found near domain by distance string comparison: ' + str(
result) if similar else 'Not similar domain found.'
domain_found = str(row['domain']) if similar else ''
return jsonify({'feature': 'strcomparison', 'domain': domain,
'result': domain_found, 'detail': detail})
<|reserved_special_token_1|>
from flask import jsonify
from flask_restful import Resource
from flask_apispec.views import MethodResource
import pandas as pd
import jellyfish
df = pd.read_csv('data/trancotop1m.csv')
df_dict = df.to_dict('records')
class StrComparison(MethodResource,Resource):
# @requires_auth
def get(self, domain):
domain_found = ""
similar = False
for row in df_dict:
result = jellyfish.jaro_winkler_similarity(str(row['domain']), str(domain))
if result > 0.97:
similar = True
break
detail = "Found near domain by distance string comparison: " + str(result) if similar else "Not similar domain found."
domain_found = str(row['domain']) if similar else ""
return jsonify({"feature": "strcomparison", "domain": domain, "result": domain_found, "detail": detail})
|
flexible
|
{
"blob_id": "6d974580ff546bda17caa1e61e2621b4bc705f3f",
"index": 2952,
"step-1": "<mask token>\n\n\nclass StrComparison(MethodResource, Resource):\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass StrComparison(MethodResource, Resource):\n\n def get(self, domain):\n domain_found = ''\n similar = False\n for row in df_dict:\n result = jellyfish.jaro_winkler_similarity(str(row['domain']),\n str(domain))\n if result > 0.97:\n similar = True\n break\n detail = 'Found near domain by distance string comparison: ' + str(\n result) if similar else 'Not similar domain found.'\n domain_found = str(row['domain']) if similar else ''\n return jsonify({'feature': 'strcomparison', 'domain': domain,\n 'result': domain_found, 'detail': detail})\n",
"step-3": "<mask token>\ndf = pd.read_csv('data/trancotop1m.csv')\ndf_dict = df.to_dict('records')\n\n\nclass StrComparison(MethodResource, Resource):\n\n def get(self, domain):\n domain_found = ''\n similar = False\n for row in df_dict:\n result = jellyfish.jaro_winkler_similarity(str(row['domain']),\n str(domain))\n if result > 0.97:\n similar = True\n break\n detail = 'Found near domain by distance string comparison: ' + str(\n result) if similar else 'Not similar domain found.'\n domain_found = str(row['domain']) if similar else ''\n return jsonify({'feature': 'strcomparison', 'domain': domain,\n 'result': domain_found, 'detail': detail})\n",
"step-4": "from flask import jsonify\nfrom flask_restful import Resource\nfrom flask_apispec.views import MethodResource\nimport pandas as pd\nimport jellyfish\ndf = pd.read_csv('data/trancotop1m.csv')\ndf_dict = df.to_dict('records')\n\n\nclass StrComparison(MethodResource, Resource):\n\n def get(self, domain):\n domain_found = ''\n similar = False\n for row in df_dict:\n result = jellyfish.jaro_winkler_similarity(str(row['domain']),\n str(domain))\n if result > 0.97:\n similar = True\n break\n detail = 'Found near domain by distance string comparison: ' + str(\n result) if similar else 'Not similar domain found.'\n domain_found = str(row['domain']) if similar else ''\n return jsonify({'feature': 'strcomparison', 'domain': domain,\n 'result': domain_found, 'detail': detail})\n",
"step-5": "from flask import jsonify\nfrom flask_restful import Resource\nfrom flask_apispec.views import MethodResource\nimport pandas as pd\nimport jellyfish\n\ndf = pd.read_csv('data/trancotop1m.csv')\ndf_dict = df.to_dict('records')\n\nclass StrComparison(MethodResource,Resource):\n # @requires_auth\n def get(self, domain):\n domain_found = \"\"\n similar = False\n for row in df_dict:\n result = jellyfish.jaro_winkler_similarity(str(row['domain']), str(domain))\n if result > 0.97:\n similar = True\n break\n detail = \"Found near domain by distance string comparison: \" + str(result) if similar else \"Not similar domain found.\"\n domain_found = str(row['domain']) if similar else \"\"\n return jsonify({\"feature\": \"strcomparison\", \"domain\": domain, \"result\": domain_found, \"detail\": detail})",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class NeuralNetwork:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def withSeed(self, seed):
self.seed = seed
return self
<|reserved_special_token_0|>
def withMinErrorPercentage(self, min_error_percentage):
self.min_error_percentage = min_error_percentage
return self
def verbose(self, show_operations):
self.show_operations = show_operations
return self
<|reserved_special_token_0|>
def randomize(self):
random.seed(self.seed)
neural_network = [[[random.randint(-1, 0) for _ in range(self.
input_size + 1)] for _ in range(self.hidden_num)], [[random.
randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in
range(self.output_size)]]
return neural_network
def sigmoid(self, x):
return 1 / (1 + math.exp(-x))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test(self, input_):
result = self.ffnn(self.output_data, input_)[-1]
print('Output: ', result)
print('Your number probably is: ', self.guessWith(result))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NeuralNetwork:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def withSeed(self, seed):
self.seed = seed
return self
<|reserved_special_token_0|>
def withMinErrorPercentage(self, min_error_percentage):
self.min_error_percentage = min_error_percentage
return self
def verbose(self, show_operations):
self.show_operations = show_operations
return self
<|reserved_special_token_0|>
def randomize(self):
random.seed(self.seed)
neural_network = [[[random.randint(-1, 0) for _ in range(self.
input_size + 1)] for _ in range(self.hidden_num)], [[random.
randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in
range(self.output_size)]]
return neural_network
def sigmoid(self, x):
return 1 / (1 + math.exp(-x))
def product(self, v, w):
return sum([(a * b) for a, b in zip(v, w)])
def neuron_output(self, weights, inputs):
return self.sigmoid(self.product(weights, inputs))
<|reserved_special_token_0|>
def back_propagation(self, digit, inputs, target):
hidden_output, output = self.ffnn(digit, inputs)
new_output = []
new_hidden = []
error = sum((output - target) * (output - target) for output,
target in zip(output, target)) * 0.5
delta_output = [(output * (1 - output) * (output - target)) for
output, target in zip(output, target)]
for i, output_neuron in enumerate(digit[-1]):
for j, hidden_output_current in enumerate(hidden_output + [1]):
output_neuron[j] -= delta_output[i
] * hidden_output_current * self.alpha
new_output.append(output_neuron)
if self.show_operations:
print('Neuron weights: ', i, output_neuron)
hidden_delta = [(hidden_output_current * (1 - hidden_output_current
) * self.product(delta_output, [n[i] for n in digit[-1]])) for
i, hidden_output_current in enumerate(hidden_output)]
for i, hidden_neuron in enumerate(digit[0]):
for j, input_ in enumerate(inputs + [1]):
hidden_neuron[j] -= hidden_delta[i] * input_ * self.alpha
new_hidden.append(hidden_neuron)
if self.show_operations:
print('Hidden neuron weights: ', i, hidden_neuron)
return new_hidden, new_output, error
def randomTraining(self):
print('Starting training...')
start = time.time()
output = self.randomize()
sq_error = 1
iterations = 1
print('Initial random network: ', output)
while sq_error > self.min_error_percentage:
sq_error = 0
for i in range(len(self.digits)):
hidden, output, error = self.back_propagation(output, self.
digits[i], self.base_output[i])
output = [hidden, output]
sq_error += error
sq_error = sq_error / len(self.digits)
if self.show_operations:
print('Iterations: ', iterations, ', error percentage: ',
sq_error)
iterations += 1
self.output_data = output
end = time.time()
elapsed = end - start
print('Trained finished in: ', elapsed, ' seconds')
print('Total iterations: ', iterations)
print('Error percentage: ', sq_error)
print('Output result: ', self.output_data)
def guessWith(self, output):
index = 0
closest_dif = abs(output[0] - 1)
for i, value in enumerate(output):
current_dif = abs(value - 1)
if current_dif < closest_dif:
closest_dif = current_dif
index = i
return index
def test(self, input_):
result = self.ffnn(self.output_data, input_)[-1]
print('Output: ', result)
print('Your number probably is: ', self.guessWith(result))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NeuralNetwork:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def withSeed(self, seed):
self.seed = seed
return self
<|reserved_special_token_0|>
def withMinErrorPercentage(self, min_error_percentage):
self.min_error_percentage = min_error_percentage
return self
def verbose(self, show_operations):
self.show_operations = show_operations
return self
<|reserved_special_token_0|>
def randomize(self):
random.seed(self.seed)
neural_network = [[[random.randint(-1, 0) for _ in range(self.
input_size + 1)] for _ in range(self.hidden_num)], [[random.
randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in
range(self.output_size)]]
return neural_network
def sigmoid(self, x):
return 1 / (1 + math.exp(-x))
def product(self, v, w):
return sum([(a * b) for a, b in zip(v, w)])
def neuron_output(self, weights, inputs):
return self.sigmoid(self.product(weights, inputs))
def ffnn(self, neural_network, inputs):
outputs = []
for label in neural_network:
inputs = inputs + [1]
output = [self.neuron_output(neuron, inputs) for neuron in label]
outputs.append(output)
inputs = output
return outputs
def back_propagation(self, digit, inputs, target):
hidden_output, output = self.ffnn(digit, inputs)
new_output = []
new_hidden = []
error = sum((output - target) * (output - target) for output,
target in zip(output, target)) * 0.5
delta_output = [(output * (1 - output) * (output - target)) for
output, target in zip(output, target)]
for i, output_neuron in enumerate(digit[-1]):
for j, hidden_output_current in enumerate(hidden_output + [1]):
output_neuron[j] -= delta_output[i
] * hidden_output_current * self.alpha
new_output.append(output_neuron)
if self.show_operations:
print('Neuron weights: ', i, output_neuron)
hidden_delta = [(hidden_output_current * (1 - hidden_output_current
) * self.product(delta_output, [n[i] for n in digit[-1]])) for
i, hidden_output_current in enumerate(hidden_output)]
for i, hidden_neuron in enumerate(digit[0]):
for j, input_ in enumerate(inputs + [1]):
hidden_neuron[j] -= hidden_delta[i] * input_ * self.alpha
new_hidden.append(hidden_neuron)
if self.show_operations:
print('Hidden neuron weights: ', i, hidden_neuron)
return new_hidden, new_output, error
def randomTraining(self):
print('Starting training...')
start = time.time()
output = self.randomize()
sq_error = 1
iterations = 1
print('Initial random network: ', output)
while sq_error > self.min_error_percentage:
sq_error = 0
for i in range(len(self.digits)):
hidden, output, error = self.back_propagation(output, self.
digits[i], self.base_output[i])
output = [hidden, output]
sq_error += error
sq_error = sq_error / len(self.digits)
if self.show_operations:
print('Iterations: ', iterations, ', error percentage: ',
sq_error)
iterations += 1
self.output_data = output
end = time.time()
elapsed = end - start
print('Trained finished in: ', elapsed, ' seconds')
print('Total iterations: ', iterations)
print('Error percentage: ', sq_error)
print('Output result: ', self.output_data)
def guessWith(self, output):
index = 0
closest_dif = abs(output[0] - 1)
for i, value in enumerate(output):
current_dif = abs(value - 1)
if current_dif < closest_dif:
closest_dif = current_dif
index = i
return index
def test(self, input_):
result = self.ffnn(self.output_data, input_)[-1]
print('Output: ', result)
print('Your number probably is: ', self.guessWith(result))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NeuralNetwork:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, seed=5, alpha=0.1, min_error_percentage=0.0005,
input_size=25, output_size=10, hidden_num=5):
self.seed = seed
self.alpha = alpha
self.min_error_percentage = min_error_percentage
self.input_size = input_size
self.output_size = output_size
self.hidden_num = hidden_num
def withSeed(self, seed):
self.seed = seed
return self
<|reserved_special_token_0|>
def withMinErrorPercentage(self, min_error_percentage):
self.min_error_percentage = min_error_percentage
return self
def verbose(self, show_operations):
self.show_operations = show_operations
return self
<|reserved_special_token_0|>
def randomize(self):
random.seed(self.seed)
neural_network = [[[random.randint(-1, 0) for _ in range(self.
input_size + 1)] for _ in range(self.hidden_num)], [[random.
randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in
range(self.output_size)]]
return neural_network
def sigmoid(self, x):
return 1 / (1 + math.exp(-x))
def product(self, v, w):
return sum([(a * b) for a, b in zip(v, w)])
def neuron_output(self, weights, inputs):
return self.sigmoid(self.product(weights, inputs))
def ffnn(self, neural_network, inputs):
outputs = []
for label in neural_network:
inputs = inputs + [1]
output = [self.neuron_output(neuron, inputs) for neuron in label]
outputs.append(output)
inputs = output
return outputs
def back_propagation(self, digit, inputs, target):
hidden_output, output = self.ffnn(digit, inputs)
new_output = []
new_hidden = []
error = sum((output - target) * (output - target) for output,
target in zip(output, target)) * 0.5
delta_output = [(output * (1 - output) * (output - target)) for
output, target in zip(output, target)]
for i, output_neuron in enumerate(digit[-1]):
for j, hidden_output_current in enumerate(hidden_output + [1]):
output_neuron[j] -= delta_output[i
] * hidden_output_current * self.alpha
new_output.append(output_neuron)
if self.show_operations:
print('Neuron weights: ', i, output_neuron)
hidden_delta = [(hidden_output_current * (1 - hidden_output_current
) * self.product(delta_output, [n[i] for n in digit[-1]])) for
i, hidden_output_current in enumerate(hidden_output)]
for i, hidden_neuron in enumerate(digit[0]):
for j, input_ in enumerate(inputs + [1]):
hidden_neuron[j] -= hidden_delta[i] * input_ * self.alpha
new_hidden.append(hidden_neuron)
if self.show_operations:
print('Hidden neuron weights: ', i, hidden_neuron)
return new_hidden, new_output, error
def randomTraining(self):
print('Starting training...')
start = time.time()
output = self.randomize()
sq_error = 1
iterations = 1
print('Initial random network: ', output)
while sq_error > self.min_error_percentage:
sq_error = 0
for i in range(len(self.digits)):
hidden, output, error = self.back_propagation(output, self.
digits[i], self.base_output[i])
output = [hidden, output]
sq_error += error
sq_error = sq_error / len(self.digits)
if self.show_operations:
print('Iterations: ', iterations, ', error percentage: ',
sq_error)
iterations += 1
self.output_data = output
end = time.time()
elapsed = end - start
print('Trained finished in: ', elapsed, ' seconds')
print('Total iterations: ', iterations)
print('Error percentage: ', sq_error)
print('Output result: ', self.output_data)
def guessWith(self, output):
index = 0
closest_dif = abs(output[0] - 1)
for i, value in enumerate(output):
current_dif = abs(value - 1)
if current_dif < closest_dif:
closest_dif = current_dif
index = i
return index
def test(self, input_):
result = self.ffnn(self.output_data, input_)[-1]
print('Output: ', result)
print('Your number probably is: ', self.guessWith(result))
<|reserved_special_token_1|>
import math
import random
import time
import numpy as np
class NeuralNetwork:
digits = [
[
1,1,1,1,1,
1,0,0,0,1,
1,0,0,0,1,
1,0,0,0,1,
1,1,1,1,1
],
[
0,0,1,0,0,
0,0,1,0,0,
0,0,1,0,0,
0,0,1,0,0,
0,0,1,0,0
],
[
1,1,1,1,1,
0,0,0,0,1,
1,1,1,1,1,
1,0,0,0,0,
1,1,1,1,1
],
[
1,1,1,1,1,
0,0,0,0,1,
1,1,1,1,1,
0,0,0,0,1,
1,1,1,1,1
],
[
1,0,0,0,1,
1,0,0,0,1,
1,1,1,1,1,
0,0,0,0,1,
0,0,0,0,1
],
[
1,1,1,1,1,
1,0,0,0,0,
1,1,1,1,1,
0,0,0,0,1,
1,1,1,1,1
],
[
1,1,1,1,1,
1,0,0,0,0,
1,1,1,1,1,
1,0,0,0,1,
1,1,1,1,1
],
[
1,1,1,1,1,
0,0,0,1,0,
0,0,1,0,0,
0,1,0,0,0,
1,0,0,0,0
],
[
1,1,1,1,1,
1,0,0,0,1,
1,1,1,1,1,
1,0,0,0,1,
1,1,1,1,1
],
[
1,1,1,1,1,
1,0,0,0,1,
1,1,1,1,1,
0,0,0,0,1,
0,0,0,0,1
]
]
base_output = [
[1,0,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,1,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0],
[0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,0,0,1]
]
show_operations = False
def __init__(self, seed = 5, alpha = 0.1, min_error_percentage = 0.0005, input_size = 25, output_size = 10, hidden_num = 5):
self.seed = seed
self.alpha = alpha
self.min_error_percentage = min_error_percentage
self.input_size = input_size
self.output_size = output_size
self.hidden_num = hidden_num
def withSeed(self, seed):
self.seed = seed
return self
def withAlpha(self, alpha):
self.alpha = alpha
return self
def withMinErrorPercentage(self, min_error_percentage):
self.min_error_percentage = min_error_percentage
return self
def verbose(self, show_operations):
self.show_operations = show_operations
return self
def withHiddenLabels(self, hidden_num):
self.hidden_num = hidden_num
return self
def randomize(self):
random.seed(self.seed)
neural_network = [
[
[random.randint(-1, 0) for _ in range(self.input_size + 1)] for _ in range(self.hidden_num)
],
[
[random.randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in range(self.output_size)
]
]
return neural_network
def sigmoid(self, x):
return 1 / (1 + math.exp(-x))
def product(self, v, w):
return sum([a * b for a, b in zip(v, w)])
def neuron_output(self, weights, inputs):
return self.sigmoid(self.product(weights, inputs))
def ffnn(self, neural_network, inputs):
outputs = []
for label in neural_network:
inputs = inputs + [1]
output = [self.neuron_output(neuron, inputs) for neuron in label]
outputs.append(output)
inputs = output
return outputs
def back_propagation(self, digit, inputs, target):
hidden_output, output = self.ffnn(digit, inputs)
new_output = []
new_hidden = []
error = sum((output - target) * (output - target) for output, target in zip(output, target)) * 0.5
delta_output = [output * (1 - output) * (output - target) for output, target in zip(output, target)]
for i, output_neuron in enumerate(digit[-1]):
for j, hidden_output_current in enumerate(hidden_output + [1]):
output_neuron[j] -= delta_output[i] * hidden_output_current * self.alpha
new_output.append(output_neuron)
if (self.show_operations):
print("Neuron weights: ", i, output_neuron)
hidden_delta = [hidden_output_current * (1 - hidden_output_current) * self.product(delta_output, [n[i] for n in digit[-1]]) for i, hidden_output_current in enumerate(hidden_output)]
for i, hidden_neuron in enumerate(digit[0]):
for j, input_ in enumerate(inputs + [1]):
hidden_neuron[j] -= hidden_delta[i] * input_ * self.alpha
new_hidden.append(hidden_neuron)
if (self.show_operations):
print("Hidden neuron weights: ", i, hidden_neuron)
return new_hidden, new_output, error
def randomTraining(self):
print("Starting training...")
start = time.time()
output = self.randomize()
sq_error = 1
iterations = 1
print("Initial random network: ", output)
while sq_error > self.min_error_percentage:
sq_error = 0
for i in range(len(self.digits)):
hidden, output, error = self.back_propagation(output, self.digits[i], self.base_output[i])
output = [hidden, output]
sq_error += error
sq_error = sq_error / len(self.digits)
if (self.show_operations):
print("Iterations: ", iterations, ", error percentage: ", sq_error)
iterations += 1
self.output_data = output
end = time.time()
elapsed = end - start
print("Trained finished in: ", elapsed, " seconds")
print("Total iterations: ", iterations)
print("Error percentage: ", sq_error)
print("Output result: ", self.output_data)
def guessWith(self, output):
index = 0
closest_dif = abs(output[0] - 1)
for i, value in enumerate(output):
current_dif = abs(value - 1)
if (current_dif < closest_dif):
closest_dif = current_dif
index = i
return index
def test(self, input_):
result = self.ffnn(self.output_data, input_)[-1]
print("Output: ", result)
print("Your number probably is: ", self.guessWith(result))
|
flexible
|
{
"blob_id": "0af45914c8c111a42b0b9684f5f0ee19ef5eeb70",
"index": 7548,
"step-1": "<mask token>\n\n\nclass NeuralNetwork:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def withSeed(self, seed):\n self.seed = seed\n return self\n <mask token>\n\n def withMinErrorPercentage(self, min_error_percentage):\n self.min_error_percentage = min_error_percentage\n return self\n\n def verbose(self, show_operations):\n self.show_operations = show_operations\n return self\n <mask token>\n\n def randomize(self):\n random.seed(self.seed)\n neural_network = [[[random.randint(-1, 0) for _ in range(self.\n input_size + 1)] for _ in range(self.hidden_num)], [[random.\n randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in\n range(self.output_size)]]\n return neural_network\n\n def sigmoid(self, x):\n return 1 / (1 + math.exp(-x))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test(self, input_):\n result = self.ffnn(self.output_data, input_)[-1]\n print('Output: ', result)\n print('Your number probably is: ', self.guessWith(result))\n",
"step-2": "<mask token>\n\n\nclass NeuralNetwork:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def withSeed(self, seed):\n self.seed = seed\n return self\n <mask token>\n\n def withMinErrorPercentage(self, min_error_percentage):\n self.min_error_percentage = min_error_percentage\n return self\n\n def verbose(self, show_operations):\n self.show_operations = show_operations\n return self\n <mask token>\n\n def randomize(self):\n random.seed(self.seed)\n neural_network = [[[random.randint(-1, 0) for _ in range(self.\n input_size + 1)] for _ in range(self.hidden_num)], [[random.\n randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in\n range(self.output_size)]]\n return neural_network\n\n def sigmoid(self, x):\n return 1 / (1 + math.exp(-x))\n\n def product(self, v, w):\n return sum([(a * b) for a, b in zip(v, w)])\n\n def neuron_output(self, weights, inputs):\n return self.sigmoid(self.product(weights, inputs))\n <mask token>\n\n def back_propagation(self, digit, inputs, target):\n hidden_output, output = self.ffnn(digit, inputs)\n new_output = []\n new_hidden = []\n error = sum((output - target) * (output - target) for output,\n target in zip(output, target)) * 0.5\n delta_output = [(output * (1 - output) * (output - target)) for \n output, target in zip(output, target)]\n for i, output_neuron in enumerate(digit[-1]):\n for j, hidden_output_current in enumerate(hidden_output + [1]):\n output_neuron[j] -= delta_output[i\n ] * hidden_output_current * self.alpha\n new_output.append(output_neuron)\n if self.show_operations:\n print('Neuron weights: ', i, output_neuron)\n hidden_delta = [(hidden_output_current * (1 - hidden_output_current\n ) * self.product(delta_output, [n[i] for n in digit[-1]])) for \n i, hidden_output_current in enumerate(hidden_output)]\n for i, hidden_neuron in enumerate(digit[0]):\n for j, input_ in enumerate(inputs + [1]):\n hidden_neuron[j] -= hidden_delta[i] * input_ * self.alpha\n new_hidden.append(hidden_neuron)\n if self.show_operations:\n print('Hidden neuron weights: ', i, hidden_neuron)\n return new_hidden, new_output, error\n\n def randomTraining(self):\n print('Starting training...')\n start = time.time()\n output = self.randomize()\n sq_error = 1\n iterations = 1\n print('Initial random network: ', output)\n while sq_error > self.min_error_percentage:\n sq_error = 0\n for i in range(len(self.digits)):\n hidden, output, error = self.back_propagation(output, self.\n digits[i], self.base_output[i])\n output = [hidden, output]\n sq_error += error\n sq_error = sq_error / len(self.digits)\n if self.show_operations:\n print('Iterations: ', iterations, ', error percentage: ',\n sq_error)\n iterations += 1\n self.output_data = output\n end = time.time()\n elapsed = end - start\n print('Trained finished in: ', elapsed, ' seconds')\n print('Total iterations: ', iterations)\n print('Error percentage: ', sq_error)\n print('Output result: ', self.output_data)\n\n def guessWith(self, output):\n index = 0\n closest_dif = abs(output[0] - 1)\n for i, value in enumerate(output):\n current_dif = abs(value - 1)\n if current_dif < closest_dif:\n closest_dif = current_dif\n index = i\n return index\n\n def test(self, input_):\n result = self.ffnn(self.output_data, input_)[-1]\n print('Output: ', result)\n print('Your number probably is: ', self.guessWith(result))\n",
"step-3": "<mask token>\n\n\nclass NeuralNetwork:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def withSeed(self, seed):\n self.seed = seed\n return self\n <mask token>\n\n def withMinErrorPercentage(self, min_error_percentage):\n self.min_error_percentage = min_error_percentage\n return self\n\n def verbose(self, show_operations):\n self.show_operations = show_operations\n return self\n <mask token>\n\n def randomize(self):\n random.seed(self.seed)\n neural_network = [[[random.randint(-1, 0) for _ in range(self.\n input_size + 1)] for _ in range(self.hidden_num)], [[random.\n randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in\n range(self.output_size)]]\n return neural_network\n\n def sigmoid(self, x):\n return 1 / (1 + math.exp(-x))\n\n def product(self, v, w):\n return sum([(a * b) for a, b in zip(v, w)])\n\n def neuron_output(self, weights, inputs):\n return self.sigmoid(self.product(weights, inputs))\n\n def ffnn(self, neural_network, inputs):\n outputs = []\n for label in neural_network:\n inputs = inputs + [1]\n output = [self.neuron_output(neuron, inputs) for neuron in label]\n outputs.append(output)\n inputs = output\n return outputs\n\n def back_propagation(self, digit, inputs, target):\n hidden_output, output = self.ffnn(digit, inputs)\n new_output = []\n new_hidden = []\n error = sum((output - target) * (output - target) for output,\n target in zip(output, target)) * 0.5\n delta_output = [(output * (1 - output) * (output - target)) for \n output, target in zip(output, target)]\n for i, output_neuron in enumerate(digit[-1]):\n for j, hidden_output_current in enumerate(hidden_output + [1]):\n output_neuron[j] -= delta_output[i\n ] * hidden_output_current * self.alpha\n new_output.append(output_neuron)\n if self.show_operations:\n print('Neuron weights: ', i, output_neuron)\n hidden_delta = [(hidden_output_current * (1 - hidden_output_current\n ) * self.product(delta_output, [n[i] for n in digit[-1]])) for \n i, hidden_output_current in enumerate(hidden_output)]\n for i, hidden_neuron in enumerate(digit[0]):\n for j, input_ in enumerate(inputs + [1]):\n hidden_neuron[j] -= hidden_delta[i] * input_ * self.alpha\n new_hidden.append(hidden_neuron)\n if self.show_operations:\n print('Hidden neuron weights: ', i, hidden_neuron)\n return new_hidden, new_output, error\n\n def randomTraining(self):\n print('Starting training...')\n start = time.time()\n output = self.randomize()\n sq_error = 1\n iterations = 1\n print('Initial random network: ', output)\n while sq_error > self.min_error_percentage:\n sq_error = 0\n for i in range(len(self.digits)):\n hidden, output, error = self.back_propagation(output, self.\n digits[i], self.base_output[i])\n output = [hidden, output]\n sq_error += error\n sq_error = sq_error / len(self.digits)\n if self.show_operations:\n print('Iterations: ', iterations, ', error percentage: ',\n sq_error)\n iterations += 1\n self.output_data = output\n end = time.time()\n elapsed = end - start\n print('Trained finished in: ', elapsed, ' seconds')\n print('Total iterations: ', iterations)\n print('Error percentage: ', sq_error)\n print('Output result: ', self.output_data)\n\n def guessWith(self, output):\n index = 0\n closest_dif = abs(output[0] - 1)\n for i, value in enumerate(output):\n current_dif = abs(value - 1)\n if current_dif < closest_dif:\n closest_dif = current_dif\n index = i\n return index\n\n def test(self, input_):\n result = self.ffnn(self.output_data, input_)[-1]\n print('Output: ', result)\n print('Your number probably is: ', self.guessWith(result))\n",
"step-4": "<mask token>\n\n\nclass NeuralNetwork:\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, seed=5, alpha=0.1, min_error_percentage=0.0005,\n input_size=25, output_size=10, hidden_num=5):\n self.seed = seed\n self.alpha = alpha\n self.min_error_percentage = min_error_percentage\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_num = hidden_num\n\n def withSeed(self, seed):\n self.seed = seed\n return self\n <mask token>\n\n def withMinErrorPercentage(self, min_error_percentage):\n self.min_error_percentage = min_error_percentage\n return self\n\n def verbose(self, show_operations):\n self.show_operations = show_operations\n return self\n <mask token>\n\n def randomize(self):\n random.seed(self.seed)\n neural_network = [[[random.randint(-1, 0) for _ in range(self.\n input_size + 1)] for _ in range(self.hidden_num)], [[random.\n randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in\n range(self.output_size)]]\n return neural_network\n\n def sigmoid(self, x):\n return 1 / (1 + math.exp(-x))\n\n def product(self, v, w):\n return sum([(a * b) for a, b in zip(v, w)])\n\n def neuron_output(self, weights, inputs):\n return self.sigmoid(self.product(weights, inputs))\n\n def ffnn(self, neural_network, inputs):\n outputs = []\n for label in neural_network:\n inputs = inputs + [1]\n output = [self.neuron_output(neuron, inputs) for neuron in label]\n outputs.append(output)\n inputs = output\n return outputs\n\n def back_propagation(self, digit, inputs, target):\n hidden_output, output = self.ffnn(digit, inputs)\n new_output = []\n new_hidden = []\n error = sum((output - target) * (output - target) for output,\n target in zip(output, target)) * 0.5\n delta_output = [(output * (1 - output) * (output - target)) for \n output, target in zip(output, target)]\n for i, output_neuron in enumerate(digit[-1]):\n for j, hidden_output_current in enumerate(hidden_output + [1]):\n output_neuron[j] -= delta_output[i\n ] * hidden_output_current * self.alpha\n new_output.append(output_neuron)\n if self.show_operations:\n print('Neuron weights: ', i, output_neuron)\n hidden_delta = [(hidden_output_current * (1 - hidden_output_current\n ) * self.product(delta_output, [n[i] for n in digit[-1]])) for \n i, hidden_output_current in enumerate(hidden_output)]\n for i, hidden_neuron in enumerate(digit[0]):\n for j, input_ in enumerate(inputs + [1]):\n hidden_neuron[j] -= hidden_delta[i] * input_ * self.alpha\n new_hidden.append(hidden_neuron)\n if self.show_operations:\n print('Hidden neuron weights: ', i, hidden_neuron)\n return new_hidden, new_output, error\n\n def randomTraining(self):\n print('Starting training...')\n start = time.time()\n output = self.randomize()\n sq_error = 1\n iterations = 1\n print('Initial random network: ', output)\n while sq_error > self.min_error_percentage:\n sq_error = 0\n for i in range(len(self.digits)):\n hidden, output, error = self.back_propagation(output, self.\n digits[i], self.base_output[i])\n output = [hidden, output]\n sq_error += error\n sq_error = sq_error / len(self.digits)\n if self.show_operations:\n print('Iterations: ', iterations, ', error percentage: ',\n sq_error)\n iterations += 1\n self.output_data = output\n end = time.time()\n elapsed = end - start\n print('Trained finished in: ', elapsed, ' seconds')\n print('Total iterations: ', iterations)\n print('Error percentage: ', sq_error)\n print('Output result: ', self.output_data)\n\n def guessWith(self, output):\n index = 0\n closest_dif = abs(output[0] - 1)\n for i, value in enumerate(output):\n current_dif = abs(value - 1)\n if current_dif < closest_dif:\n closest_dif = current_dif\n index = i\n return index\n\n def test(self, input_):\n result = self.ffnn(self.output_data, input_)[-1]\n print('Output: ', result)\n print('Your number probably is: ', self.guessWith(result))\n",
"step-5": "import math\nimport random\nimport time\nimport numpy as np\n\nclass NeuralNetwork:\n\n digits = [\n [\n 1,1,1,1,1,\n 1,0,0,0,1,\n 1,0,0,0,1,\n 1,0,0,0,1,\n 1,1,1,1,1\n ],\n [\n 0,0,1,0,0,\n 0,0,1,0,0,\n 0,0,1,0,0,\n 0,0,1,0,0,\n 0,0,1,0,0\n ],\n [\n 1,1,1,1,1,\n 0,0,0,0,1,\n 1,1,1,1,1,\n 1,0,0,0,0,\n 1,1,1,1,1\n ],\n [\n 1,1,1,1,1,\n 0,0,0,0,1,\n 1,1,1,1,1,\n 0,0,0,0,1,\n 1,1,1,1,1\n ],\n [\n 1,0,0,0,1,\n 1,0,0,0,1,\n 1,1,1,1,1,\n 0,0,0,0,1,\n 0,0,0,0,1\n ],\n [\n 1,1,1,1,1,\n 1,0,0,0,0,\n 1,1,1,1,1,\n 0,0,0,0,1,\n 1,1,1,1,1\n ],\n [\n 1,1,1,1,1,\n 1,0,0,0,0,\n 1,1,1,1,1,\n 1,0,0,0,1,\n 1,1,1,1,1\n ],\n [\n 1,1,1,1,1,\n 0,0,0,1,0,\n 0,0,1,0,0,\n 0,1,0,0,0,\n 1,0,0,0,0\n ],\n [\n 1,1,1,1,1,\n 1,0,0,0,1,\n 1,1,1,1,1,\n 1,0,0,0,1,\n 1,1,1,1,1\n ],\n [\n 1,1,1,1,1,\n 1,0,0,0,1,\n 1,1,1,1,1,\n 0,0,0,0,1,\n 0,0,0,0,1\n ]\n ]\n\n base_output = [\n [1,0,0,0,0,0,0,0,0,0],\n [0,1,0,0,0,0,0,0,0,0],\n [0,0,1,0,0,0,0,0,0,0],\n [0,0,0,1,0,0,0,0,0,0],\n [0,0,0,0,1,0,0,0,0,0],\n [0,0,0,0,0,1,0,0,0,0],\n [0,0,0,0,0,0,1,0,0,0],\n [0,0,0,0,0,0,0,1,0,0],\n [0,0,0,0,0,0,0,0,1,0],\n [0,0,0,0,0,0,0,0,0,1]\n ]\n\n show_operations = False\n\n def __init__(self, seed = 5, alpha = 0.1, min_error_percentage = 0.0005, input_size = 25, output_size = 10, hidden_num = 5):\n self.seed = seed\n self.alpha = alpha\n self.min_error_percentage = min_error_percentage\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_num = hidden_num\n \n def withSeed(self, seed):\n self.seed = seed\n return self\n\n def withAlpha(self, alpha):\n self.alpha = alpha\n return self\n\n def withMinErrorPercentage(self, min_error_percentage):\n self.min_error_percentage = min_error_percentage\n return self\n\n def verbose(self, show_operations):\n self.show_operations = show_operations\n return self\n\n def withHiddenLabels(self, hidden_num):\n self.hidden_num = hidden_num\n return self\n\n def randomize(self):\n random.seed(self.seed)\n neural_network = [\n [\n [random.randint(-1, 0) for _ in range(self.input_size + 1)] for _ in range(self.hidden_num)\n ],\n [\n [random.randint(-1, 0) for _ in range(self.hidden_num + 1)] for _ in range(self.output_size)\n ]\n ]\n return neural_network\n \n def sigmoid(self, x):\n return 1 / (1 + math.exp(-x))\n\n def product(self, v, w):\n return sum([a * b for a, b in zip(v, w)])\n\n def neuron_output(self, weights, inputs):\n return self.sigmoid(self.product(weights, inputs))\n\n def ffnn(self, neural_network, inputs):\n outputs = []\n for label in neural_network:\n inputs = inputs + [1]\n output = [self.neuron_output(neuron, inputs) for neuron in label]\n outputs.append(output)\n inputs = output\n return outputs\n\n def back_propagation(self, digit, inputs, target):\n hidden_output, output = self.ffnn(digit, inputs)\n new_output = []\n new_hidden = []\n \n error = sum((output - target) * (output - target) for output, target in zip(output, target)) * 0.5\n delta_output = [output * (1 - output) * (output - target) for output, target in zip(output, target)]\n \n for i, output_neuron in enumerate(digit[-1]):\n for j, hidden_output_current in enumerate(hidden_output + [1]):\n output_neuron[j] -= delta_output[i] * hidden_output_current * self.alpha\n new_output.append(output_neuron)\n if (self.show_operations):\n print(\"Neuron weights: \", i, output_neuron)\n \n hidden_delta = [hidden_output_current * (1 - hidden_output_current) * self.product(delta_output, [n[i] for n in digit[-1]]) for i, hidden_output_current in enumerate(hidden_output)]\n \n for i, hidden_neuron in enumerate(digit[0]):\n for j, input_ in enumerate(inputs + [1]):\n hidden_neuron[j] -= hidden_delta[i] * input_ * self.alpha\n new_hidden.append(hidden_neuron)\n if (self.show_operations):\n print(\"Hidden neuron weights: \", i, hidden_neuron)\n\n return new_hidden, new_output, error \n \n def randomTraining(self):\n print(\"Starting training...\")\n start = time.time()\n output = self.randomize()\n sq_error = 1\n iterations = 1\n\n print(\"Initial random network: \", output)\n\n while sq_error > self.min_error_percentage:\n sq_error = 0\n for i in range(len(self.digits)):\n hidden, output, error = self.back_propagation(output, self.digits[i], self.base_output[i])\n output = [hidden, output]\n sq_error += error\n sq_error = sq_error / len(self.digits)\n if (self.show_operations):\n print(\"Iterations: \", iterations, \", error percentage: \", sq_error)\n iterations += 1\n \n self.output_data = output\n end = time.time()\n elapsed = end - start\n print(\"Trained finished in: \", elapsed, \" seconds\")\n print(\"Total iterations: \", iterations)\n print(\"Error percentage: \", sq_error)\n print(\"Output result: \", self.output_data)\n\n def guessWith(self, output):\n index = 0\n closest_dif = abs(output[0] - 1)\n for i, value in enumerate(output):\n current_dif = abs(value - 1)\n if (current_dif < closest_dif):\n closest_dif = current_dif\n index = i\n return index\n\n def test(self, input_):\n result = self.ffnn(self.output_data, input_)[-1]\n print(\"Output: \", result)\n print(\"Your number probably is: \", self.guessWith(result))\n",
"step-ids": [
7,
12,
13,
14,
19
]
}
|
[
7,
12,
13,
14,
19
] |
<|reserved_special_token_0|>
def mapfn(k, v):
for w in v.split():
yield w, 1
def reducefn(k, vs):
result = 0
for v in vs:
result += v
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mapfn(k, v):
for w in v.split():
yield w, 1
def reducefn(k, vs):
result = 0
for v in vs:
result += v
return result
<|reserved_special_token_0|>
s.dump_results()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mapfn(k, v):
for w in v.split():
yield w, 1
def reducefn(k, vs):
result = 0
for v in vs:
result += v
return result
s = mincemeat.Server()
s.map_input = FileShardsMapInput('./wordcount_shard*.json', JsonFileMapInput)
s.mapfn = mapfn
s.reducefn = reducefn
s.reduce_output_format = 'json'
s.reduce_shard_pattern = 'wordcount_output_%s.json'
results = s.run_server(password='')
s.dump_results()
<|reserved_special_token_1|>
import mincemeat
import sys
from mapinput import FileShardsMapInput
from mapinput import JsonFileMapInput
def mapfn(k, v):
for w in v.split():
yield w, 1
def reducefn(k, vs):
result = 0
for v in vs:
result += v
return result
s = mincemeat.Server()
s.map_input = FileShardsMapInput('./wordcount_shard*.json', JsonFileMapInput)
s.mapfn = mapfn
s.reducefn = reducefn
s.reduce_output_format = 'json'
s.reduce_shard_pattern = 'wordcount_output_%s.json'
results = s.run_server(password='')
s.dump_results()
<|reserved_special_token_1|>
#!/usr/bin/env python
import mincemeat
import sys
from mapinput import FileShardsMapInput
from mapinput import JsonFileMapInput
def mapfn(k, v):
for w in v.split():
yield w, 1
def reducefn(k, vs):
result = 0
for v in vs:
result += v
return result
s = mincemeat.Server()
s.map_input = FileShardsMapInput("./wordcount_shard*.json", JsonFileMapInput)
s.mapfn = mapfn
s.reducefn = reducefn
s.reduce_output_format = "json"
s.reduce_shard_pattern = "wordcount_output_%s.json"
results = s.run_server(password="")
s.dump_results()
|
flexible
|
{
"blob_id": "09c6dd0f32b8d71dacdd8b10d995ea1575f91f6f",
"index": 2887,
"step-1": "<mask token>\n\n\ndef mapfn(k, v):\n for w in v.split():\n yield w, 1\n\n\ndef reducefn(k, vs):\n result = 0\n for v in vs:\n result += v\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mapfn(k, v):\n for w in v.split():\n yield w, 1\n\n\ndef reducefn(k, vs):\n result = 0\n for v in vs:\n result += v\n return result\n\n\n<mask token>\ns.dump_results()\n",
"step-3": "<mask token>\n\n\ndef mapfn(k, v):\n for w in v.split():\n yield w, 1\n\n\ndef reducefn(k, vs):\n result = 0\n for v in vs:\n result += v\n return result\n\n\ns = mincemeat.Server()\ns.map_input = FileShardsMapInput('./wordcount_shard*.json', JsonFileMapInput)\ns.mapfn = mapfn\ns.reducefn = reducefn\ns.reduce_output_format = 'json'\ns.reduce_shard_pattern = 'wordcount_output_%s.json'\nresults = s.run_server(password='')\ns.dump_results()\n",
"step-4": "import mincemeat\nimport sys\nfrom mapinput import FileShardsMapInput\nfrom mapinput import JsonFileMapInput\n\n\ndef mapfn(k, v):\n for w in v.split():\n yield w, 1\n\n\ndef reducefn(k, vs):\n result = 0\n for v in vs:\n result += v\n return result\n\n\ns = mincemeat.Server()\ns.map_input = FileShardsMapInput('./wordcount_shard*.json', JsonFileMapInput)\ns.mapfn = mapfn\ns.reducefn = reducefn\ns.reduce_output_format = 'json'\ns.reduce_shard_pattern = 'wordcount_output_%s.json'\nresults = s.run_server(password='')\ns.dump_results()\n",
"step-5": "#!/usr/bin/env python\nimport mincemeat\nimport sys\n\nfrom mapinput import FileShardsMapInput\nfrom mapinput import JsonFileMapInput\n\ndef mapfn(k, v):\n for w in v.split():\n yield w, 1\n\ndef reducefn(k, vs):\n result = 0\n for v in vs:\n result += v\n return result\n\ns = mincemeat.Server()\n\ns.map_input = FileShardsMapInput(\"./wordcount_shard*.json\", JsonFileMapInput)\ns.mapfn = mapfn\ns.reducefn = reducefn\ns.reduce_output_format = \"json\"\ns.reduce_shard_pattern = \"wordcount_output_%s.json\"\nresults = s.run_server(password=\"\")\ns.dump_results()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import time
inputStr = """crruafyzloguvxwctqmphenbkd
srcjafyzlcguvrwctqmphenbkd
srijafyzlogbpxwctgmphenbkd
zrijafyzloguvxrctqmphendkd
srijabyzloguvowcqqmphenbkd
srijafyzsoguvxwctbmpienbkd
srirtfyzlognvxwctqmphenbkd
srijafyzloguvxwctgmphenbmq
senjafyzloguvxectqmphenbkd
srijafyeloguvxwwtqmphembkd
srijafyzlogurxtctqmpkenbkd
srijafyzlkguvxictqhphenbkd
srijafgzlogunxwctqophenbkd
shijabyzloguvxwctqmqhenbkd
srjoafyzloguvxwctqmphenbwd
srijafyhloguvxwmtqmphenkkd
srijadyzlogwvxwctqmphenbed
brijafyzloguvmwctqmphenhkd
smijafyzlhguvxwctqmphjnbkd
sriqafvzloguvxwctqmpheebkd
srijafyzloguvxwisqmpuenbkd
mrijakyuloguvxwctqmphenbkd
srnfafyzloguvxwctqmphgnbkd
srijadyzloguvxwhfqmphenbkd
srijafhzloguvxwctdmlhenbkd
srijafyzloguvxwcsqmphykbkd
srijafyzlogwvxwatqmphhnbkd
srijafyzlozqvxwctqmphenbku
srijafyzloguvxwcbamphenbgd
srijafyzlfguvxwctqmphzybkd
srijafyzloguqxwetqmphenkkd
srijafyylogubxwttqmphenbkd
srijafyzloguvxzctadphenbkd
srijafyzloguoxwhtqmchenbkd
srijafyzloguvxwcvqmzhenbko
srijnfyzloguvxwctqmchenjkd
srijaryzloggvxwctqzphenbkd
srijafhzleguvxwcxqmphenbkd
ssijafyzllguvxfctqmphenbkd
srijafyzloguvxdctqmfhenbcd
srijafyzloguvxfctqmplynbkd
srijaftzlogavxwcrqmphenbkd
sriwaoyzloguvxwctqmphenbtd
srijahyzlogunxwctqmphenbvd
srjjafyzloguzxwctumphenbkd
nrijafyzlxguvxwctqmphanbkd
srijafezlqguyxwctqmphenbkd
srijafygloguvxwjtqcphenbkd
erijafyzloguvxoctqmnhenbkd
ssijafyzllguvxwbtqmphenbkd
sriaafyzloguvxwctqqphenbkv
frijafyzloguvswctwmphenbkd
srijafyzyogkvxwctqmprenbkd
syijafyzuoguvxwctqmkhenbkd
srijafyzloganxwctqmphenbkf
srijafyzloguvxwftqmxhenbkq
srijafyflogxvxwctqmghenbkd
srijafyzsoguvxwctqmpjenwkd
srujafylloguvxwctqmphenckd
srijafyzlpzuvxwctqmphenbud
srijafyzlogfvxwctqmhhenbwd
srijafjzlogusxwctqmphepbkd
srijlfyzloguvxwctqfphenzkd
srijafyzlogwvxwctqyphenbqd
srijafyzloluvxwctqtphenukd
srizafyzlowuvxwctqmphqnbkd
sritafkzlkguvxwctqmphenbkd
sbijafdzloguvxgctqmphenbkd
crijafyeloguvxwctqmpsenbkd
srijafyvlogulxwctqmphenbkk
srijafyologuvxwctqmehegbkd
siijafyzloguvxwctjmphenbmd
srijafyzlupuvxwctqmpheabkd
srijafyzlogumxwctqqphanbkd
srijxfyzlogujxwcqqmphenbkd
irijafizeoguvxwctqmphenbkd
sgijafyzloguvtwctqmpfenbkd
srijzfyzloguvmwctnmphenbkd
srijafyzwohuvxwctqmthenbkd
srijafyzlhguvxoctqwphenbkd
srgjafyplogxvxwctqmphenbkd
srijafyqlogovxwctqzphenbkd
srijafjzloguvlnvtqmphenbkd
srijafyzooguvxwctqmphenvud
srijafyzgoguvxwctumphgnbkd
srijaffzloguvxwdqqmphenbkd
srijafyzlogugxwctqxphenbkr
srijafyzlogutxwctqmmcenbkd
srifafyzlhguwxwctqmphenbkd
mrimajyzloguvxwctqmphenbkd
sriyafyzloguvxwcthmphejbkd
srieakyzlokuvxwctqmphenbkd
srisafyzloguhxwctqmphecbkd
srijanyzloguvxcctqmxhenbkd
srijafyzypguvxwctqmqhenbkd
sryjtfyzlvguvxwctqmphenbkd
srijafyzlsguvxwctqmqfenbkd
srijafyzlogudxwbtqwphenbkd
srijysyzloguvxwctqmpvenbkd
srijafyzloggvxwjtqmphegbkd
srijgfyzloguvxwctqmbhdnbkd
ssijufyzloguvawctqmphenbkd
skojafyzloguvxwctqmphenbnd
srijafylloguvxwcqqmpienbkd
trioafyzloguvqwctqmphenbkd
srijafydloguvxwctqmpzjnbkd
saijafvzloguvxwcqqmphenbkd
srhjapyzloguvxwctqmbhenbkd
srijafyzlfguvxwcsqmpwenbkd
shijafyzboguvxwctqmphenbmd
srizafysloguvxwrtqmphenbkd
srijafyzloguvxwciqmwhenbkj
qrijafyzloduvxwctqmphenbko
srijefyuloguvxwctqmphenbed
srijafyzlobuvxwctqmphenhbd
srijafyzloxuvxwctqmpheabkq
srijafyzloguvrwctqmghenkkd
sfisafywloguvxwctqmphenbkd
srgjafyzlogurxwctqmphenbkp
srijafhzloguvxwcjqmphenhkd
srijafyylogufxwrtqmphenbkd
srijafyzvoguvxwzkqmphenbkd
sqijafyzloguvxwctqmpheqbxd
srijafyvloguvxwctqzpherbkd
srijufyzloguvxlcsqmphenbkd
srijafykloguvxlccqmphenbkd
srijafyzloguexwcrqmphenzkd
sridifyzloguyxwctqmphenbkd
srijafyzlogfvxwctqlphenbkl
srijafyzlodqdxwctqmphenbkd
srijafyzloruvxactqmphenekd
grijafyzloguvxpctmmphenbkd
srsjakyzloguvxwctqmphvnbkd
srikafyvloguvxwrtqmphenbkd
srijafyzloguvxwctqjpserbkd
jrijafyzloguvxwctqmpgesbkd
swijafyzluguvxwctqmfhenbkd
srijanynlogovxwctqmphenbkd
jrijafyzloguvxwctymphrnbkd
srinafyzloguvewctqmphenbzd
srijakyzloguvxwctqmphcnbka
srijafyhlobuvxwctqmphenbka
srijafyzcogusxwctqmphwnbkd
srijavyzlosuvxwctqmphjnbkd
orijafyzxoguvxwcnqmphenbkd
srijafyzlogcvxwvtqmthenbkd
srijapyzloauvxwctqmphenvkd
srijaflzloguhxwctqmphenbwd
smijafyzlonuvxwctqmphenbkw
jrijafyzloguvxwclqmnhenbkd
srijaqyzloguvqwctqmphenskd
srijasyzloguvxwctqmvhenbku
crijtfyzloguvxwctqmthenbkd
srrkafyzvoguvxwctqmphenbkd
srijatyzloguvewctqmphenbld
srfjafyyloguvnwctqmphenbkd
srijafyzloguvxwctqjpbenbkt
hrijafyzooguvxwctqmphenbld
srijafbzlogscxwctqmphenbkd
srinafyzlogxvxwctqqphenbkd
slijafyzloglvxwctqmphenbdd
srijafyzlogjvxwcsqmphenbld
sryjcfyzloguvewctqmphenbkd
srijafyzloguexwctqmohknbkd
jaijafyzlogevxwctqmphenbkd
srijafbzlogavxwctqmphenbki
srijafozlogpvxwctqmphgnbkd
srijdfyzloguvxwczqmphenbkm
srijafyzlobuvxwctqmphxndkd
mrijifyzlhguvxwctqmphenbkd
srijafyzloguvxbctumphjnbkd
srijafyzloyuvxwptqmphlnbkd
arijafyzloguvxwcsqmohenbkd
srijaftzioguvxwttqmphenbkd
srijafyzlqsuvxwctqmphxnbkd
srijafyzioguvxwctqnphetbkd
prijafbzloguvxdctqmphenbkd
srijaeyzlnguvxwmtqmphenbkd
srijofyzloguvqwctqmphonbkd
srixaryzpoguvxwctqmphenbkd
srijafyzlowuvxwcwhmphenbkd
srijafydloguvxwctqmptenikd
srijqfyzlogtvfwctqmphenbkd
srijafyzloguvxlctqmpvenbgd
srijafyzlbguvxwjtqgphenbkd
srijafyzlohuqxwctqmphenbka
srijafyzroguvxictqmphynbkd
srijafyzloguvxdctjmphenjkd
srijaoczloguvxwctqmphenbjd
srajafhzloguvxwctqmphenbke
srijofyzloduvxwctqmphanbkd
srijafytloguvxwmtnmphenbkd
srijafyzuoguvxwceqmpgenbkd
rrijafyzloyuvxwctqmphlnbkd
srljafyzloguvxictqmohenbkd
srijafyzlogulxwcrqrphenbkd
srajafyzloguvxwctqmphanbke
srijafyzlhguvxwxtqmpheabkd
sxijafyzloggwxwctqmphenbkd
srijafyultguvxwctqmphinbkd
srijafyzloguvtwctqmfhvnbkd
srijafwzloruvxwctquphenbkd
srbjafyzxoguuxwctqmphenbkd
erijafyzlxguvxbctqmphenbkd
srijagyzlojubxwctqmphenbkd
srijafyzloguvxwdtqmchenakd
srijafkzlogukxwctqiphenbkd
mridafyzloguvxwctqmphenrkd
szqjafyzloguvxwctqmpheibkd
srijahyzloguvxwctcmphenekd
srijafyzloguvxwczpuphenbkd
srijafyzcoguvfwctqmphenbkq
qriiafyzloguvxwctqmpheebkd
srijpfyzloguvxlctqmphenokd
srijzfyzlotuvxwcjqmphenbkd
srinafyqloguvxwctfmphenbkd
srijafyzlogjvxpltqmphenbkd
srijafyzlotuvxwutqmphenbtd
sridafyzloguvxwctqmpyenokd
srxjafyzqogyvxwctqmphenbkd
ssijafyzzoguvxwctqmphenbad
srijafrzloguvxwctqmphekpkd
srijafyzlfgrvxactqmphenbkd
srijafyzroguvxwttqmphekbkd
srijefyzloguvxwctqmpqenbrd
srijefycloguvxwctqmchenbkd
srzjafyzloguvxwcqqmphanbkd
srijauyzlhguvxwctqmphenbgd
srijafyzloguvmwvnqmphenbkd
srihafyzloguvlwotqmphenbkd
srigafyzloguvxwctqmphennsd
sriuafzzloguvxwcuqmphenbkd
srijavuzllguvxwctqmphenbkd
srijafjzloguvlnctqmphenbkd
lrirafyzloguvxwctqmphenbld
soijarxzloguvxwctqmphenbkd
srijapyzlnguvxwctqmdhenbkd
srijafyzkogujxmctqmphenbkd
srijafuzloguvxwcsqvphenbkd
srijagyzzoguvxwctqmpvenbkd
srijafyzlovuvxwctqmrhenbxd
srijafyzqoguvxwctwmpienbkd
sxijafyzloguvxwutqmphenlkd
srijafyzlhgzvxwctqmphqnbkd
srijajyzloguvxwcbwmphenbkd
srijazyzloguvxwhtqmphenbkx
srgjafyzloguvvwctqmphdnbkd
rrivafyzloguvxjctqmphenbkd
srijifyzdoguvxwctqmphenbka
hrijafyzloguvxectqmpheybkd"""
startTime = time.time()
inputList = list(map(str, inputStr.splitlines()))
numRepeatsChar = 0
doubleDupes = 0
tripleDupes = 0
for string in inputList:
hasDoubleDupes = False
hasTripleDupes = False
for char in string:
numRepeatsChar = string.count(char)
if numRepeatsChar == 2 and not hasDoubleDupes:
doubleDupes += 1
hasDoubleDupes = True
elif numRepeatsChar == 3 and not hasTripleDupes:
tripleDupes += 1
hasTripleDupes = True
elif hasDoubleDupes and hasTripleDupes:
break
print(doubleDupes)
print(tripleDupes)
checkSum = doubleDupes * tripleDupes
print('Checksum: ' + str(checkSum))
print("%s seconds" % (time.time() - startTime))
|
normal
|
{
"blob_id": "9620479e9ac27c1c7833c9a31b9cb18408b8d361",
"index": 4019,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor string in inputList:\n hasDoubleDupes = False\n hasTripleDupes = False\n for char in string:\n numRepeatsChar = string.count(char)\n if numRepeatsChar == 2 and not hasDoubleDupes:\n doubleDupes += 1\n hasDoubleDupes = True\n elif numRepeatsChar == 3 and not hasTripleDupes:\n tripleDupes += 1\n hasTripleDupes = True\n elif hasDoubleDupes and hasTripleDupes:\n break\n print(doubleDupes)\n print(tripleDupes)\n<mask token>\nprint('Checksum: ' + str(checkSum))\nprint('%s seconds' % (time.time() - startTime))\n",
"step-3": "<mask token>\ninputStr = \"\"\"crruafyzloguvxwctqmphenbkd\nsrcjafyzlcguvrwctqmphenbkd\nsrijafyzlogbpxwctgmphenbkd\nzrijafyzloguvxrctqmphendkd\nsrijabyzloguvowcqqmphenbkd\nsrijafyzsoguvxwctbmpienbkd\nsrirtfyzlognvxwctqmphenbkd\nsrijafyzloguvxwctgmphenbmq\nsenjafyzloguvxectqmphenbkd\nsrijafyeloguvxwwtqmphembkd\nsrijafyzlogurxtctqmpkenbkd\nsrijafyzlkguvxictqhphenbkd\nsrijafgzlogunxwctqophenbkd\nshijabyzloguvxwctqmqhenbkd\nsrjoafyzloguvxwctqmphenbwd\nsrijafyhloguvxwmtqmphenkkd\nsrijadyzlogwvxwctqmphenbed\nbrijafyzloguvmwctqmphenhkd\nsmijafyzlhguvxwctqmphjnbkd\nsriqafvzloguvxwctqmpheebkd\nsrijafyzloguvxwisqmpuenbkd\nmrijakyuloguvxwctqmphenbkd\nsrnfafyzloguvxwctqmphgnbkd\nsrijadyzloguvxwhfqmphenbkd\nsrijafhzloguvxwctdmlhenbkd\nsrijafyzloguvxwcsqmphykbkd\nsrijafyzlogwvxwatqmphhnbkd\nsrijafyzlozqvxwctqmphenbku\nsrijafyzloguvxwcbamphenbgd\nsrijafyzlfguvxwctqmphzybkd\nsrijafyzloguqxwetqmphenkkd\nsrijafyylogubxwttqmphenbkd\nsrijafyzloguvxzctadphenbkd\nsrijafyzloguoxwhtqmchenbkd\nsrijafyzloguvxwcvqmzhenbko\nsrijnfyzloguvxwctqmchenjkd\nsrijaryzloggvxwctqzphenbkd\nsrijafhzleguvxwcxqmphenbkd\nssijafyzllguvxfctqmphenbkd\nsrijafyzloguvxdctqmfhenbcd\nsrijafyzloguvxfctqmplynbkd\nsrijaftzlogavxwcrqmphenbkd\nsriwaoyzloguvxwctqmphenbtd\nsrijahyzlogunxwctqmphenbvd\nsrjjafyzloguzxwctumphenbkd\nnrijafyzlxguvxwctqmphanbkd\nsrijafezlqguyxwctqmphenbkd\nsrijafygloguvxwjtqcphenbkd\nerijafyzloguvxoctqmnhenbkd\nssijafyzllguvxwbtqmphenbkd\nsriaafyzloguvxwctqqphenbkv\nfrijafyzloguvswctwmphenbkd\nsrijafyzyogkvxwctqmprenbkd\nsyijafyzuoguvxwctqmkhenbkd\nsrijafyzloganxwctqmphenbkf\nsrijafyzloguvxwftqmxhenbkq\nsrijafyflogxvxwctqmghenbkd\nsrijafyzsoguvxwctqmpjenwkd\nsrujafylloguvxwctqmphenckd\nsrijafyzlpzuvxwctqmphenbud\nsrijafyzlogfvxwctqmhhenbwd\nsrijafjzlogusxwctqmphepbkd\nsrijlfyzloguvxwctqfphenzkd\nsrijafyzlogwvxwctqyphenbqd\nsrijafyzloluvxwctqtphenukd\nsrizafyzlowuvxwctqmphqnbkd\nsritafkzlkguvxwctqmphenbkd\nsbijafdzloguvxgctqmphenbkd\ncrijafyeloguvxwctqmpsenbkd\nsrijafyvlogulxwctqmphenbkk\nsrijafyologuvxwctqmehegbkd\nsiijafyzloguvxwctjmphenbmd\nsrijafyzlupuvxwctqmpheabkd\nsrijafyzlogumxwctqqphanbkd\nsrijxfyzlogujxwcqqmphenbkd\nirijafizeoguvxwctqmphenbkd\nsgijafyzloguvtwctqmpfenbkd\nsrijzfyzloguvmwctnmphenbkd\nsrijafyzwohuvxwctqmthenbkd\nsrijafyzlhguvxoctqwphenbkd\nsrgjafyplogxvxwctqmphenbkd\nsrijafyqlogovxwctqzphenbkd\nsrijafjzloguvlnvtqmphenbkd\nsrijafyzooguvxwctqmphenvud\nsrijafyzgoguvxwctumphgnbkd\nsrijaffzloguvxwdqqmphenbkd\nsrijafyzlogugxwctqxphenbkr\nsrijafyzlogutxwctqmmcenbkd\nsrifafyzlhguwxwctqmphenbkd\nmrimajyzloguvxwctqmphenbkd\nsriyafyzloguvxwcthmphejbkd\nsrieakyzlokuvxwctqmphenbkd\nsrisafyzloguhxwctqmphecbkd\nsrijanyzloguvxcctqmxhenbkd\nsrijafyzypguvxwctqmqhenbkd\nsryjtfyzlvguvxwctqmphenbkd\nsrijafyzlsguvxwctqmqfenbkd\nsrijafyzlogudxwbtqwphenbkd\nsrijysyzloguvxwctqmpvenbkd\nsrijafyzloggvxwjtqmphegbkd\nsrijgfyzloguvxwctqmbhdnbkd\nssijufyzloguvawctqmphenbkd\nskojafyzloguvxwctqmphenbnd\nsrijafylloguvxwcqqmpienbkd\ntrioafyzloguvqwctqmphenbkd\nsrijafydloguvxwctqmpzjnbkd\nsaijafvzloguvxwcqqmphenbkd\nsrhjapyzloguvxwctqmbhenbkd\nsrijafyzlfguvxwcsqmpwenbkd\nshijafyzboguvxwctqmphenbmd\nsrizafysloguvxwrtqmphenbkd\nsrijafyzloguvxwciqmwhenbkj\nqrijafyzloduvxwctqmphenbko\nsrijefyuloguvxwctqmphenbed\nsrijafyzlobuvxwctqmphenhbd\nsrijafyzloxuvxwctqmpheabkq\nsrijafyzloguvrwctqmghenkkd\nsfisafywloguvxwctqmphenbkd\nsrgjafyzlogurxwctqmphenbkp\nsrijafhzloguvxwcjqmphenhkd\nsrijafyylogufxwrtqmphenbkd\nsrijafyzvoguvxwzkqmphenbkd\nsqijafyzloguvxwctqmpheqbxd\nsrijafyvloguvxwctqzpherbkd\nsrijufyzloguvxlcsqmphenbkd\nsrijafykloguvxlccqmphenbkd\nsrijafyzloguexwcrqmphenzkd\nsridifyzloguyxwctqmphenbkd\nsrijafyzlogfvxwctqlphenbkl\nsrijafyzlodqdxwctqmphenbkd\nsrijafyzloruvxactqmphenekd\ngrijafyzloguvxpctmmphenbkd\nsrsjakyzloguvxwctqmphvnbkd\nsrikafyvloguvxwrtqmphenbkd\nsrijafyzloguvxwctqjpserbkd\njrijafyzloguvxwctqmpgesbkd\nswijafyzluguvxwctqmfhenbkd\nsrijanynlogovxwctqmphenbkd\njrijafyzloguvxwctymphrnbkd\nsrinafyzloguvewctqmphenbzd\nsrijakyzloguvxwctqmphcnbka\nsrijafyhlobuvxwctqmphenbka\nsrijafyzcogusxwctqmphwnbkd\nsrijavyzlosuvxwctqmphjnbkd\norijafyzxoguvxwcnqmphenbkd\nsrijafyzlogcvxwvtqmthenbkd\nsrijapyzloauvxwctqmphenvkd\nsrijaflzloguhxwctqmphenbwd\nsmijafyzlonuvxwctqmphenbkw\njrijafyzloguvxwclqmnhenbkd\nsrijaqyzloguvqwctqmphenskd\nsrijasyzloguvxwctqmvhenbku\ncrijtfyzloguvxwctqmthenbkd\nsrrkafyzvoguvxwctqmphenbkd\nsrijatyzloguvewctqmphenbld\nsrfjafyyloguvnwctqmphenbkd\nsrijafyzloguvxwctqjpbenbkt\nhrijafyzooguvxwctqmphenbld\nsrijafbzlogscxwctqmphenbkd\nsrinafyzlogxvxwctqqphenbkd\nslijafyzloglvxwctqmphenbdd\nsrijafyzlogjvxwcsqmphenbld\nsryjcfyzloguvewctqmphenbkd\nsrijafyzloguexwctqmohknbkd\njaijafyzlogevxwctqmphenbkd\nsrijafbzlogavxwctqmphenbki\nsrijafozlogpvxwctqmphgnbkd\nsrijdfyzloguvxwczqmphenbkm\nsrijafyzlobuvxwctqmphxndkd\nmrijifyzlhguvxwctqmphenbkd\nsrijafyzloguvxbctumphjnbkd\nsrijafyzloyuvxwptqmphlnbkd\narijafyzloguvxwcsqmohenbkd\nsrijaftzioguvxwttqmphenbkd\nsrijafyzlqsuvxwctqmphxnbkd\nsrijafyzioguvxwctqnphetbkd\nprijafbzloguvxdctqmphenbkd\nsrijaeyzlnguvxwmtqmphenbkd\nsrijofyzloguvqwctqmphonbkd\nsrixaryzpoguvxwctqmphenbkd\nsrijafyzlowuvxwcwhmphenbkd\nsrijafydloguvxwctqmptenikd\nsrijqfyzlogtvfwctqmphenbkd\nsrijafyzloguvxlctqmpvenbgd\nsrijafyzlbguvxwjtqgphenbkd\nsrijafyzlohuqxwctqmphenbka\nsrijafyzroguvxictqmphynbkd\nsrijafyzloguvxdctjmphenjkd\nsrijaoczloguvxwctqmphenbjd\nsrajafhzloguvxwctqmphenbke\nsrijofyzloduvxwctqmphanbkd\nsrijafytloguvxwmtnmphenbkd\nsrijafyzuoguvxwceqmpgenbkd\nrrijafyzloyuvxwctqmphlnbkd\nsrljafyzloguvxictqmohenbkd\nsrijafyzlogulxwcrqrphenbkd\nsrajafyzloguvxwctqmphanbke\nsrijafyzlhguvxwxtqmpheabkd\nsxijafyzloggwxwctqmphenbkd\nsrijafyultguvxwctqmphinbkd\nsrijafyzloguvtwctqmfhvnbkd\nsrijafwzloruvxwctquphenbkd\nsrbjafyzxoguuxwctqmphenbkd\nerijafyzlxguvxbctqmphenbkd\nsrijagyzlojubxwctqmphenbkd\nsrijafyzloguvxwdtqmchenakd\nsrijafkzlogukxwctqiphenbkd\nmridafyzloguvxwctqmphenrkd\nszqjafyzloguvxwctqmpheibkd\nsrijahyzloguvxwctcmphenekd\nsrijafyzloguvxwczpuphenbkd\nsrijafyzcoguvfwctqmphenbkq\nqriiafyzloguvxwctqmpheebkd\nsrijpfyzloguvxlctqmphenokd\nsrijzfyzlotuvxwcjqmphenbkd\nsrinafyqloguvxwctfmphenbkd\nsrijafyzlogjvxpltqmphenbkd\nsrijafyzlotuvxwutqmphenbtd\nsridafyzloguvxwctqmpyenokd\nsrxjafyzqogyvxwctqmphenbkd\nssijafyzzoguvxwctqmphenbad\nsrijafrzloguvxwctqmphekpkd\nsrijafyzlfgrvxactqmphenbkd\nsrijafyzroguvxwttqmphekbkd\nsrijefyzloguvxwctqmpqenbrd\nsrijefycloguvxwctqmchenbkd\nsrzjafyzloguvxwcqqmphanbkd\nsrijauyzlhguvxwctqmphenbgd\nsrijafyzloguvmwvnqmphenbkd\nsrihafyzloguvlwotqmphenbkd\nsrigafyzloguvxwctqmphennsd\nsriuafzzloguvxwcuqmphenbkd\nsrijavuzllguvxwctqmphenbkd\nsrijafjzloguvlnctqmphenbkd\nlrirafyzloguvxwctqmphenbld\nsoijarxzloguvxwctqmphenbkd\nsrijapyzlnguvxwctqmdhenbkd\nsrijafyzkogujxmctqmphenbkd\nsrijafuzloguvxwcsqvphenbkd\nsrijagyzzoguvxwctqmpvenbkd\nsrijafyzlovuvxwctqmrhenbxd\nsrijafyzqoguvxwctwmpienbkd\nsxijafyzloguvxwutqmphenlkd\nsrijafyzlhgzvxwctqmphqnbkd\nsrijajyzloguvxwcbwmphenbkd\nsrijazyzloguvxwhtqmphenbkx\nsrgjafyzloguvvwctqmphdnbkd\nrrivafyzloguvxjctqmphenbkd\nsrijifyzdoguvxwctqmphenbka\nhrijafyzloguvxectqmpheybkd\"\"\"\nstartTime = time.time()\ninputList = list(map(str, inputStr.splitlines()))\nnumRepeatsChar = 0\ndoubleDupes = 0\ntripleDupes = 0\nfor string in inputList:\n hasDoubleDupes = False\n hasTripleDupes = False\n for char in string:\n numRepeatsChar = string.count(char)\n if numRepeatsChar == 2 and not hasDoubleDupes:\n doubleDupes += 1\n hasDoubleDupes = True\n elif numRepeatsChar == 3 and not hasTripleDupes:\n tripleDupes += 1\n hasTripleDupes = True\n elif hasDoubleDupes and hasTripleDupes:\n break\n print(doubleDupes)\n print(tripleDupes)\ncheckSum = doubleDupes * tripleDupes\nprint('Checksum: ' + str(checkSum))\nprint('%s seconds' % (time.time() - startTime))\n",
"step-4": "import time\ninputStr = \"\"\"crruafyzloguvxwctqmphenbkd\nsrcjafyzlcguvrwctqmphenbkd\nsrijafyzlogbpxwctgmphenbkd\nzrijafyzloguvxrctqmphendkd\nsrijabyzloguvowcqqmphenbkd\nsrijafyzsoguvxwctbmpienbkd\nsrirtfyzlognvxwctqmphenbkd\nsrijafyzloguvxwctgmphenbmq\nsenjafyzloguvxectqmphenbkd\nsrijafyeloguvxwwtqmphembkd\nsrijafyzlogurxtctqmpkenbkd\nsrijafyzlkguvxictqhphenbkd\nsrijafgzlogunxwctqophenbkd\nshijabyzloguvxwctqmqhenbkd\nsrjoafyzloguvxwctqmphenbwd\nsrijafyhloguvxwmtqmphenkkd\nsrijadyzlogwvxwctqmphenbed\nbrijafyzloguvmwctqmphenhkd\nsmijafyzlhguvxwctqmphjnbkd\nsriqafvzloguvxwctqmpheebkd\nsrijafyzloguvxwisqmpuenbkd\nmrijakyuloguvxwctqmphenbkd\nsrnfafyzloguvxwctqmphgnbkd\nsrijadyzloguvxwhfqmphenbkd\nsrijafhzloguvxwctdmlhenbkd\nsrijafyzloguvxwcsqmphykbkd\nsrijafyzlogwvxwatqmphhnbkd\nsrijafyzlozqvxwctqmphenbku\nsrijafyzloguvxwcbamphenbgd\nsrijafyzlfguvxwctqmphzybkd\nsrijafyzloguqxwetqmphenkkd\nsrijafyylogubxwttqmphenbkd\nsrijafyzloguvxzctadphenbkd\nsrijafyzloguoxwhtqmchenbkd\nsrijafyzloguvxwcvqmzhenbko\nsrijnfyzloguvxwctqmchenjkd\nsrijaryzloggvxwctqzphenbkd\nsrijafhzleguvxwcxqmphenbkd\nssijafyzllguvxfctqmphenbkd\nsrijafyzloguvxdctqmfhenbcd\nsrijafyzloguvxfctqmplynbkd\nsrijaftzlogavxwcrqmphenbkd\nsriwaoyzloguvxwctqmphenbtd\nsrijahyzlogunxwctqmphenbvd\nsrjjafyzloguzxwctumphenbkd\nnrijafyzlxguvxwctqmphanbkd\nsrijafezlqguyxwctqmphenbkd\nsrijafygloguvxwjtqcphenbkd\nerijafyzloguvxoctqmnhenbkd\nssijafyzllguvxwbtqmphenbkd\nsriaafyzloguvxwctqqphenbkv\nfrijafyzloguvswctwmphenbkd\nsrijafyzyogkvxwctqmprenbkd\nsyijafyzuoguvxwctqmkhenbkd\nsrijafyzloganxwctqmphenbkf\nsrijafyzloguvxwftqmxhenbkq\nsrijafyflogxvxwctqmghenbkd\nsrijafyzsoguvxwctqmpjenwkd\nsrujafylloguvxwctqmphenckd\nsrijafyzlpzuvxwctqmphenbud\nsrijafyzlogfvxwctqmhhenbwd\nsrijafjzlogusxwctqmphepbkd\nsrijlfyzloguvxwctqfphenzkd\nsrijafyzlogwvxwctqyphenbqd\nsrijafyzloluvxwctqtphenukd\nsrizafyzlowuvxwctqmphqnbkd\nsritafkzlkguvxwctqmphenbkd\nsbijafdzloguvxgctqmphenbkd\ncrijafyeloguvxwctqmpsenbkd\nsrijafyvlogulxwctqmphenbkk\nsrijafyologuvxwctqmehegbkd\nsiijafyzloguvxwctjmphenbmd\nsrijafyzlupuvxwctqmpheabkd\nsrijafyzlogumxwctqqphanbkd\nsrijxfyzlogujxwcqqmphenbkd\nirijafizeoguvxwctqmphenbkd\nsgijafyzloguvtwctqmpfenbkd\nsrijzfyzloguvmwctnmphenbkd\nsrijafyzwohuvxwctqmthenbkd\nsrijafyzlhguvxoctqwphenbkd\nsrgjafyplogxvxwctqmphenbkd\nsrijafyqlogovxwctqzphenbkd\nsrijafjzloguvlnvtqmphenbkd\nsrijafyzooguvxwctqmphenvud\nsrijafyzgoguvxwctumphgnbkd\nsrijaffzloguvxwdqqmphenbkd\nsrijafyzlogugxwctqxphenbkr\nsrijafyzlogutxwctqmmcenbkd\nsrifafyzlhguwxwctqmphenbkd\nmrimajyzloguvxwctqmphenbkd\nsriyafyzloguvxwcthmphejbkd\nsrieakyzlokuvxwctqmphenbkd\nsrisafyzloguhxwctqmphecbkd\nsrijanyzloguvxcctqmxhenbkd\nsrijafyzypguvxwctqmqhenbkd\nsryjtfyzlvguvxwctqmphenbkd\nsrijafyzlsguvxwctqmqfenbkd\nsrijafyzlogudxwbtqwphenbkd\nsrijysyzloguvxwctqmpvenbkd\nsrijafyzloggvxwjtqmphegbkd\nsrijgfyzloguvxwctqmbhdnbkd\nssijufyzloguvawctqmphenbkd\nskojafyzloguvxwctqmphenbnd\nsrijafylloguvxwcqqmpienbkd\ntrioafyzloguvqwctqmphenbkd\nsrijafydloguvxwctqmpzjnbkd\nsaijafvzloguvxwcqqmphenbkd\nsrhjapyzloguvxwctqmbhenbkd\nsrijafyzlfguvxwcsqmpwenbkd\nshijafyzboguvxwctqmphenbmd\nsrizafysloguvxwrtqmphenbkd\nsrijafyzloguvxwciqmwhenbkj\nqrijafyzloduvxwctqmphenbko\nsrijefyuloguvxwctqmphenbed\nsrijafyzlobuvxwctqmphenhbd\nsrijafyzloxuvxwctqmpheabkq\nsrijafyzloguvrwctqmghenkkd\nsfisafywloguvxwctqmphenbkd\nsrgjafyzlogurxwctqmphenbkp\nsrijafhzloguvxwcjqmphenhkd\nsrijafyylogufxwrtqmphenbkd\nsrijafyzvoguvxwzkqmphenbkd\nsqijafyzloguvxwctqmpheqbxd\nsrijafyvloguvxwctqzpherbkd\nsrijufyzloguvxlcsqmphenbkd\nsrijafykloguvxlccqmphenbkd\nsrijafyzloguexwcrqmphenzkd\nsridifyzloguyxwctqmphenbkd\nsrijafyzlogfvxwctqlphenbkl\nsrijafyzlodqdxwctqmphenbkd\nsrijafyzloruvxactqmphenekd\ngrijafyzloguvxpctmmphenbkd\nsrsjakyzloguvxwctqmphvnbkd\nsrikafyvloguvxwrtqmphenbkd\nsrijafyzloguvxwctqjpserbkd\njrijafyzloguvxwctqmpgesbkd\nswijafyzluguvxwctqmfhenbkd\nsrijanynlogovxwctqmphenbkd\njrijafyzloguvxwctymphrnbkd\nsrinafyzloguvewctqmphenbzd\nsrijakyzloguvxwctqmphcnbka\nsrijafyhlobuvxwctqmphenbka\nsrijafyzcogusxwctqmphwnbkd\nsrijavyzlosuvxwctqmphjnbkd\norijafyzxoguvxwcnqmphenbkd\nsrijafyzlogcvxwvtqmthenbkd\nsrijapyzloauvxwctqmphenvkd\nsrijaflzloguhxwctqmphenbwd\nsmijafyzlonuvxwctqmphenbkw\njrijafyzloguvxwclqmnhenbkd\nsrijaqyzloguvqwctqmphenskd\nsrijasyzloguvxwctqmvhenbku\ncrijtfyzloguvxwctqmthenbkd\nsrrkafyzvoguvxwctqmphenbkd\nsrijatyzloguvewctqmphenbld\nsrfjafyyloguvnwctqmphenbkd\nsrijafyzloguvxwctqjpbenbkt\nhrijafyzooguvxwctqmphenbld\nsrijafbzlogscxwctqmphenbkd\nsrinafyzlogxvxwctqqphenbkd\nslijafyzloglvxwctqmphenbdd\nsrijafyzlogjvxwcsqmphenbld\nsryjcfyzloguvewctqmphenbkd\nsrijafyzloguexwctqmohknbkd\njaijafyzlogevxwctqmphenbkd\nsrijafbzlogavxwctqmphenbki\nsrijafozlogpvxwctqmphgnbkd\nsrijdfyzloguvxwczqmphenbkm\nsrijafyzlobuvxwctqmphxndkd\nmrijifyzlhguvxwctqmphenbkd\nsrijafyzloguvxbctumphjnbkd\nsrijafyzloyuvxwptqmphlnbkd\narijafyzloguvxwcsqmohenbkd\nsrijaftzioguvxwttqmphenbkd\nsrijafyzlqsuvxwctqmphxnbkd\nsrijafyzioguvxwctqnphetbkd\nprijafbzloguvxdctqmphenbkd\nsrijaeyzlnguvxwmtqmphenbkd\nsrijofyzloguvqwctqmphonbkd\nsrixaryzpoguvxwctqmphenbkd\nsrijafyzlowuvxwcwhmphenbkd\nsrijafydloguvxwctqmptenikd\nsrijqfyzlogtvfwctqmphenbkd\nsrijafyzloguvxlctqmpvenbgd\nsrijafyzlbguvxwjtqgphenbkd\nsrijafyzlohuqxwctqmphenbka\nsrijafyzroguvxictqmphynbkd\nsrijafyzloguvxdctjmphenjkd\nsrijaoczloguvxwctqmphenbjd\nsrajafhzloguvxwctqmphenbke\nsrijofyzloduvxwctqmphanbkd\nsrijafytloguvxwmtnmphenbkd\nsrijafyzuoguvxwceqmpgenbkd\nrrijafyzloyuvxwctqmphlnbkd\nsrljafyzloguvxictqmohenbkd\nsrijafyzlogulxwcrqrphenbkd\nsrajafyzloguvxwctqmphanbke\nsrijafyzlhguvxwxtqmpheabkd\nsxijafyzloggwxwctqmphenbkd\nsrijafyultguvxwctqmphinbkd\nsrijafyzloguvtwctqmfhvnbkd\nsrijafwzloruvxwctquphenbkd\nsrbjafyzxoguuxwctqmphenbkd\nerijafyzlxguvxbctqmphenbkd\nsrijagyzlojubxwctqmphenbkd\nsrijafyzloguvxwdtqmchenakd\nsrijafkzlogukxwctqiphenbkd\nmridafyzloguvxwctqmphenrkd\nszqjafyzloguvxwctqmpheibkd\nsrijahyzloguvxwctcmphenekd\nsrijafyzloguvxwczpuphenbkd\nsrijafyzcoguvfwctqmphenbkq\nqriiafyzloguvxwctqmpheebkd\nsrijpfyzloguvxlctqmphenokd\nsrijzfyzlotuvxwcjqmphenbkd\nsrinafyqloguvxwctfmphenbkd\nsrijafyzlogjvxpltqmphenbkd\nsrijafyzlotuvxwutqmphenbtd\nsridafyzloguvxwctqmpyenokd\nsrxjafyzqogyvxwctqmphenbkd\nssijafyzzoguvxwctqmphenbad\nsrijafrzloguvxwctqmphekpkd\nsrijafyzlfgrvxactqmphenbkd\nsrijafyzroguvxwttqmphekbkd\nsrijefyzloguvxwctqmpqenbrd\nsrijefycloguvxwctqmchenbkd\nsrzjafyzloguvxwcqqmphanbkd\nsrijauyzlhguvxwctqmphenbgd\nsrijafyzloguvmwvnqmphenbkd\nsrihafyzloguvlwotqmphenbkd\nsrigafyzloguvxwctqmphennsd\nsriuafzzloguvxwcuqmphenbkd\nsrijavuzllguvxwctqmphenbkd\nsrijafjzloguvlnctqmphenbkd\nlrirafyzloguvxwctqmphenbld\nsoijarxzloguvxwctqmphenbkd\nsrijapyzlnguvxwctqmdhenbkd\nsrijafyzkogujxmctqmphenbkd\nsrijafuzloguvxwcsqvphenbkd\nsrijagyzzoguvxwctqmpvenbkd\nsrijafyzlovuvxwctqmrhenbxd\nsrijafyzqoguvxwctwmpienbkd\nsxijafyzloguvxwutqmphenlkd\nsrijafyzlhgzvxwctqmphqnbkd\nsrijajyzloguvxwcbwmphenbkd\nsrijazyzloguvxwhtqmphenbkx\nsrgjafyzloguvvwctqmphdnbkd\nrrivafyzloguvxjctqmphenbkd\nsrijifyzdoguvxwctqmphenbka\nhrijafyzloguvxectqmpheybkd\"\"\"\nstartTime = time.time()\ninputList = list(map(str, inputStr.splitlines()))\nnumRepeatsChar = 0\ndoubleDupes = 0\ntripleDupes = 0\nfor string in inputList:\n hasDoubleDupes = False\n hasTripleDupes = False\n for char in string:\n numRepeatsChar = string.count(char)\n if numRepeatsChar == 2 and not hasDoubleDupes:\n doubleDupes += 1\n hasDoubleDupes = True\n elif numRepeatsChar == 3 and not hasTripleDupes:\n tripleDupes += 1\n hasTripleDupes = True\n elif hasDoubleDupes and hasTripleDupes:\n break\n print(doubleDupes)\n print(tripleDupes)\ncheckSum = doubleDupes * tripleDupes\nprint('Checksum: ' + str(checkSum))\nprint('%s seconds' % (time.time() - startTime))\n",
"step-5": "import time\n\ninputStr = \"\"\"crruafyzloguvxwctqmphenbkd\nsrcjafyzlcguvrwctqmphenbkd\nsrijafyzlogbpxwctgmphenbkd\nzrijafyzloguvxrctqmphendkd\nsrijabyzloguvowcqqmphenbkd\nsrijafyzsoguvxwctbmpienbkd\nsrirtfyzlognvxwctqmphenbkd\nsrijafyzloguvxwctgmphenbmq\nsenjafyzloguvxectqmphenbkd\nsrijafyeloguvxwwtqmphembkd\nsrijafyzlogurxtctqmpkenbkd\nsrijafyzlkguvxictqhphenbkd\nsrijafgzlogunxwctqophenbkd\nshijabyzloguvxwctqmqhenbkd\nsrjoafyzloguvxwctqmphenbwd\nsrijafyhloguvxwmtqmphenkkd\nsrijadyzlogwvxwctqmphenbed\nbrijafyzloguvmwctqmphenhkd\nsmijafyzlhguvxwctqmphjnbkd\nsriqafvzloguvxwctqmpheebkd\nsrijafyzloguvxwisqmpuenbkd\nmrijakyuloguvxwctqmphenbkd\nsrnfafyzloguvxwctqmphgnbkd\nsrijadyzloguvxwhfqmphenbkd\nsrijafhzloguvxwctdmlhenbkd\nsrijafyzloguvxwcsqmphykbkd\nsrijafyzlogwvxwatqmphhnbkd\nsrijafyzlozqvxwctqmphenbku\nsrijafyzloguvxwcbamphenbgd\nsrijafyzlfguvxwctqmphzybkd\nsrijafyzloguqxwetqmphenkkd\nsrijafyylogubxwttqmphenbkd\nsrijafyzloguvxzctadphenbkd\nsrijafyzloguoxwhtqmchenbkd\nsrijafyzloguvxwcvqmzhenbko\nsrijnfyzloguvxwctqmchenjkd\nsrijaryzloggvxwctqzphenbkd\nsrijafhzleguvxwcxqmphenbkd\nssijafyzllguvxfctqmphenbkd\nsrijafyzloguvxdctqmfhenbcd\nsrijafyzloguvxfctqmplynbkd\nsrijaftzlogavxwcrqmphenbkd\nsriwaoyzloguvxwctqmphenbtd\nsrijahyzlogunxwctqmphenbvd\nsrjjafyzloguzxwctumphenbkd\nnrijafyzlxguvxwctqmphanbkd\nsrijafezlqguyxwctqmphenbkd\nsrijafygloguvxwjtqcphenbkd\nerijafyzloguvxoctqmnhenbkd\nssijafyzllguvxwbtqmphenbkd\nsriaafyzloguvxwctqqphenbkv\nfrijafyzloguvswctwmphenbkd\nsrijafyzyogkvxwctqmprenbkd\nsyijafyzuoguvxwctqmkhenbkd\nsrijafyzloganxwctqmphenbkf\nsrijafyzloguvxwftqmxhenbkq\nsrijafyflogxvxwctqmghenbkd\nsrijafyzsoguvxwctqmpjenwkd\nsrujafylloguvxwctqmphenckd\nsrijafyzlpzuvxwctqmphenbud\nsrijafyzlogfvxwctqmhhenbwd\nsrijafjzlogusxwctqmphepbkd\nsrijlfyzloguvxwctqfphenzkd\nsrijafyzlogwvxwctqyphenbqd\nsrijafyzloluvxwctqtphenukd\nsrizafyzlowuvxwctqmphqnbkd\nsritafkzlkguvxwctqmphenbkd\nsbijafdzloguvxgctqmphenbkd\ncrijafyeloguvxwctqmpsenbkd\nsrijafyvlogulxwctqmphenbkk\nsrijafyologuvxwctqmehegbkd\nsiijafyzloguvxwctjmphenbmd\nsrijafyzlupuvxwctqmpheabkd\nsrijafyzlogumxwctqqphanbkd\nsrijxfyzlogujxwcqqmphenbkd\nirijafizeoguvxwctqmphenbkd\nsgijafyzloguvtwctqmpfenbkd\nsrijzfyzloguvmwctnmphenbkd\nsrijafyzwohuvxwctqmthenbkd\nsrijafyzlhguvxoctqwphenbkd\nsrgjafyplogxvxwctqmphenbkd\nsrijafyqlogovxwctqzphenbkd\nsrijafjzloguvlnvtqmphenbkd\nsrijafyzooguvxwctqmphenvud\nsrijafyzgoguvxwctumphgnbkd\nsrijaffzloguvxwdqqmphenbkd\nsrijafyzlogugxwctqxphenbkr\nsrijafyzlogutxwctqmmcenbkd\nsrifafyzlhguwxwctqmphenbkd\nmrimajyzloguvxwctqmphenbkd\nsriyafyzloguvxwcthmphejbkd\nsrieakyzlokuvxwctqmphenbkd\nsrisafyzloguhxwctqmphecbkd\nsrijanyzloguvxcctqmxhenbkd\nsrijafyzypguvxwctqmqhenbkd\nsryjtfyzlvguvxwctqmphenbkd\nsrijafyzlsguvxwctqmqfenbkd\nsrijafyzlogudxwbtqwphenbkd\nsrijysyzloguvxwctqmpvenbkd\nsrijafyzloggvxwjtqmphegbkd\nsrijgfyzloguvxwctqmbhdnbkd\nssijufyzloguvawctqmphenbkd\nskojafyzloguvxwctqmphenbnd\nsrijafylloguvxwcqqmpienbkd\ntrioafyzloguvqwctqmphenbkd\nsrijafydloguvxwctqmpzjnbkd\nsaijafvzloguvxwcqqmphenbkd\nsrhjapyzloguvxwctqmbhenbkd\nsrijafyzlfguvxwcsqmpwenbkd\nshijafyzboguvxwctqmphenbmd\nsrizafysloguvxwrtqmphenbkd\nsrijafyzloguvxwciqmwhenbkj\nqrijafyzloduvxwctqmphenbko\nsrijefyuloguvxwctqmphenbed\nsrijafyzlobuvxwctqmphenhbd\nsrijafyzloxuvxwctqmpheabkq\nsrijafyzloguvrwctqmghenkkd\nsfisafywloguvxwctqmphenbkd\nsrgjafyzlogurxwctqmphenbkp\nsrijafhzloguvxwcjqmphenhkd\nsrijafyylogufxwrtqmphenbkd\nsrijafyzvoguvxwzkqmphenbkd\nsqijafyzloguvxwctqmpheqbxd\nsrijafyvloguvxwctqzpherbkd\nsrijufyzloguvxlcsqmphenbkd\nsrijafykloguvxlccqmphenbkd\nsrijafyzloguexwcrqmphenzkd\nsridifyzloguyxwctqmphenbkd\nsrijafyzlogfvxwctqlphenbkl\nsrijafyzlodqdxwctqmphenbkd\nsrijafyzloruvxactqmphenekd\ngrijafyzloguvxpctmmphenbkd\nsrsjakyzloguvxwctqmphvnbkd\nsrikafyvloguvxwrtqmphenbkd\nsrijafyzloguvxwctqjpserbkd\njrijafyzloguvxwctqmpgesbkd\nswijafyzluguvxwctqmfhenbkd\nsrijanynlogovxwctqmphenbkd\njrijafyzloguvxwctymphrnbkd\nsrinafyzloguvewctqmphenbzd\nsrijakyzloguvxwctqmphcnbka\nsrijafyhlobuvxwctqmphenbka\nsrijafyzcogusxwctqmphwnbkd\nsrijavyzlosuvxwctqmphjnbkd\norijafyzxoguvxwcnqmphenbkd\nsrijafyzlogcvxwvtqmthenbkd\nsrijapyzloauvxwctqmphenvkd\nsrijaflzloguhxwctqmphenbwd\nsmijafyzlonuvxwctqmphenbkw\njrijafyzloguvxwclqmnhenbkd\nsrijaqyzloguvqwctqmphenskd\nsrijasyzloguvxwctqmvhenbku\ncrijtfyzloguvxwctqmthenbkd\nsrrkafyzvoguvxwctqmphenbkd\nsrijatyzloguvewctqmphenbld\nsrfjafyyloguvnwctqmphenbkd\nsrijafyzloguvxwctqjpbenbkt\nhrijafyzooguvxwctqmphenbld\nsrijafbzlogscxwctqmphenbkd\nsrinafyzlogxvxwctqqphenbkd\nslijafyzloglvxwctqmphenbdd\nsrijafyzlogjvxwcsqmphenbld\nsryjcfyzloguvewctqmphenbkd\nsrijafyzloguexwctqmohknbkd\njaijafyzlogevxwctqmphenbkd\nsrijafbzlogavxwctqmphenbki\nsrijafozlogpvxwctqmphgnbkd\nsrijdfyzloguvxwczqmphenbkm\nsrijafyzlobuvxwctqmphxndkd\nmrijifyzlhguvxwctqmphenbkd\nsrijafyzloguvxbctumphjnbkd\nsrijafyzloyuvxwptqmphlnbkd\narijafyzloguvxwcsqmohenbkd\nsrijaftzioguvxwttqmphenbkd\nsrijafyzlqsuvxwctqmphxnbkd\nsrijafyzioguvxwctqnphetbkd\nprijafbzloguvxdctqmphenbkd\nsrijaeyzlnguvxwmtqmphenbkd\nsrijofyzloguvqwctqmphonbkd\nsrixaryzpoguvxwctqmphenbkd\nsrijafyzlowuvxwcwhmphenbkd\nsrijafydloguvxwctqmptenikd\nsrijqfyzlogtvfwctqmphenbkd\nsrijafyzloguvxlctqmpvenbgd\nsrijafyzlbguvxwjtqgphenbkd\nsrijafyzlohuqxwctqmphenbka\nsrijafyzroguvxictqmphynbkd\nsrijafyzloguvxdctjmphenjkd\nsrijaoczloguvxwctqmphenbjd\nsrajafhzloguvxwctqmphenbke\nsrijofyzloduvxwctqmphanbkd\nsrijafytloguvxwmtnmphenbkd\nsrijafyzuoguvxwceqmpgenbkd\nrrijafyzloyuvxwctqmphlnbkd\nsrljafyzloguvxictqmohenbkd\nsrijafyzlogulxwcrqrphenbkd\nsrajafyzloguvxwctqmphanbke\nsrijafyzlhguvxwxtqmpheabkd\nsxijafyzloggwxwctqmphenbkd\nsrijafyultguvxwctqmphinbkd\nsrijafyzloguvtwctqmfhvnbkd\nsrijafwzloruvxwctquphenbkd\nsrbjafyzxoguuxwctqmphenbkd\nerijafyzlxguvxbctqmphenbkd\nsrijagyzlojubxwctqmphenbkd\nsrijafyzloguvxwdtqmchenakd\nsrijafkzlogukxwctqiphenbkd\nmridafyzloguvxwctqmphenrkd\nszqjafyzloguvxwctqmpheibkd\nsrijahyzloguvxwctcmphenekd\nsrijafyzloguvxwczpuphenbkd\nsrijafyzcoguvfwctqmphenbkq\nqriiafyzloguvxwctqmpheebkd\nsrijpfyzloguvxlctqmphenokd\nsrijzfyzlotuvxwcjqmphenbkd\nsrinafyqloguvxwctfmphenbkd\nsrijafyzlogjvxpltqmphenbkd\nsrijafyzlotuvxwutqmphenbtd\nsridafyzloguvxwctqmpyenokd\nsrxjafyzqogyvxwctqmphenbkd\nssijafyzzoguvxwctqmphenbad\nsrijafrzloguvxwctqmphekpkd\nsrijafyzlfgrvxactqmphenbkd\nsrijafyzroguvxwttqmphekbkd\nsrijefyzloguvxwctqmpqenbrd\nsrijefycloguvxwctqmchenbkd\nsrzjafyzloguvxwcqqmphanbkd\nsrijauyzlhguvxwctqmphenbgd\nsrijafyzloguvmwvnqmphenbkd\nsrihafyzloguvlwotqmphenbkd\nsrigafyzloguvxwctqmphennsd\nsriuafzzloguvxwcuqmphenbkd\nsrijavuzllguvxwctqmphenbkd\nsrijafjzloguvlnctqmphenbkd\nlrirafyzloguvxwctqmphenbld\nsoijarxzloguvxwctqmphenbkd\nsrijapyzlnguvxwctqmdhenbkd\nsrijafyzkogujxmctqmphenbkd\nsrijafuzloguvxwcsqvphenbkd\nsrijagyzzoguvxwctqmpvenbkd\nsrijafyzlovuvxwctqmrhenbxd\nsrijafyzqoguvxwctwmpienbkd\nsxijafyzloguvxwutqmphenlkd\nsrijafyzlhgzvxwctqmphqnbkd\nsrijajyzloguvxwcbwmphenbkd\nsrijazyzloguvxwhtqmphenbkx\nsrgjafyzloguvvwctqmphdnbkd\nrrivafyzloguvxjctqmphenbkd\nsrijifyzdoguvxwctqmphenbka\nhrijafyzloguvxectqmpheybkd\"\"\"\n\nstartTime = time.time()\ninputList = list(map(str, inputStr.splitlines()))\n\nnumRepeatsChar = 0\ndoubleDupes = 0\ntripleDupes = 0\n\nfor string in inputList:\n hasDoubleDupes = False\n hasTripleDupes = False\n for char in string:\n numRepeatsChar = string.count(char)\n if numRepeatsChar == 2 and not hasDoubleDupes:\n doubleDupes += 1\n hasDoubleDupes = True\n \n elif numRepeatsChar == 3 and not hasTripleDupes:\n tripleDupes += 1\n hasTripleDupes = True\n \n elif hasDoubleDupes and hasTripleDupes:\n break\n\n print(doubleDupes)\n print(tripleDupes)\n\ncheckSum = doubleDupes * tripleDupes\nprint('Checksum: ' + str(checkSum))\n\nprint(\"%s seconds\" % (time.time() - startTime))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
DEFAULT_SERVER_LISTEN_PORT = 2011
DEFAULT_CLIENT_LISTEN_PORT = 2012
import pickle
import socket
from player import Player
from averageddata import *
import zlib
import g
import pygame
from collections import defaultdict
from periodic import Periodic
import random
from projectile import Projectile
TICKTIME = 0.05
class NetCommon:
netEntities = { "player": Player, "projectile":Projectile }
def __init__(self, listenPort):
#Make a UDP socket
self.sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
self.sock.bind( ("0.0.0.0", listenPort) )
self.sock.settimeout(0.01)
self.packetSize = 0
self.t = 0
self.buf = ""
self.packetTimestamps = []
self.packetsPerSecond = 0
self.simulatedLatency = 0
self.simulatedRandomLatencyVal = 0
self.simulatedPacketloss = 0
self.simulatedRandomLatency = 0
self.simulatedPackets = []
self.packet_outbound_last_id = defaultdict(lambda:0)
self.packet_inbound_last_id = defaultdict(lambda:0)
self.packetloss = defaultdict(lambda:0)
self.ensured_send_packet_ids = defaultdict(lambda:0)
self.ensured_sent_packets = defaultdict(dict)
self.ensured_recv_packet_ids = defaultdict(lambda:-1)
self.ensured_packets_received_early = defaultdict(list)
self.resend_unconfirmed_timer = 0
self.averagedData = AveragedData()
self.netinfotimer = 1.0
self.debug_lines = []
self.periodic = Periodic()
self.periodic.add(self.resendUnconfirmed, 0.5)
def readPacket(self, info, data):
self.averagedData.add(self.t, "packets")
self.averagedData.add(self.t, "packetsize", len(data))
unpacked = pickle.loads(zlib.decompress(data))
addr, port = info
addrportstr = addr + ":" + str(port)
if "ensured_id" in unpacked:
if unpacked["ensured_id"] == self.ensured_recv_packet_ids[addrportstr]+1:
print "recv " + str(unpacked["ensured_id"])
self.ensured_recv_packet_ids[addrportstr] += 1
self.sendReceipt(addr, port, unpacked["ensured_id"])
elif unpacked["ensured_id"] < self.ensured_recv_packet_ids[addrportstr]+1:
print unpacked
print "got ensured packet twice; resending receipt for " + str(unpacked["ensured_id"])
self.sendReceipt(addr, port, unpacked["ensured_id"])
return []
else:
print "got packet " + str(unpacked["ensured_id"]) + " before " + str(self.ensured_recv_packet_ids[addrportstr]+1)
self.ensured_packets_received_early[addrportstr].append(unpacked)
return []
allPackets = []
to_remove = []
self.ensured_packets_received_early[addrportstr].sort(lambda a,b:cmp(a["ensured_id"], b["ensured_id"]))
for p in self.ensured_packets_received_early[addrportstr]:
print "resolving old " + str(p["ensured_id"])
if p["ensured_id"] <= self.ensured_recv_packet_ids[addrportstr]+1:
self.ensured_recv_packet_ids[addrportstr] += 1
self.sendReceipt(addr, port, p["ensured_id"])
allPackets.extend(self.readUnpackedPacket(p, addrportstr))
to_remove.append(p)
for p in to_remove:
self.ensured_packets_received_early[addrportstr].remove(p)
allPackets.extend(self.readUnpackedPacket(unpacked, addrportstr))
return allPackets
def sendReceipt(self, addr, port, q):
self.sendPacket({"type":"confirmReceipt","other_ensured_id":q}, addr, port)
def readUnpackedPacket(self, unpacked, addrportstr):
pid = unpacked["packet_id"]
lid = self.packet_inbound_last_id[addrportstr]
if pid > lid + 1:
self.packetloss[addrportstr] += 1
self.packet_inbound_last_id[addrportstr] = pid
if self.packet_inbound_last_id[addrportstr] > 0:
packetloss = self.packetloss[addrportstr] / float(self.packet_inbound_last_id[addrportstr])
self.averagedData.add(self.t, "packetloss_" + addrportstr, packetloss)
return [unpacked]
def sendPacket(self, data, addr, port):
print "packet: " + data["type"]
addrportstr = addr + ":" + str(port)
data["packet_id"] = self.packet_outbound_last_id[addrportstr]
self.packet_outbound_last_id[addrportstr] += 1
self.sock.sendto(zlib.compress(pickle.dumps(data, 2)), (addr, port))
def sendEnsuredPacket(self, data, addr, port):
addrportstr = addr + ":" + str(port)
ensured_id = self.ensured_send_packet_ids[addrportstr]
print "packet: " + data["type"] + " (ensured id: " + str(ensured_id) + ")"
data["packet_id"] = self.packet_outbound_last_id[addrportstr]
self.packet_outbound_last_id[addrportstr] += 1
data["ensured_id"] = ensured_id
cdata = zlib.compress(pickle.dumps(data, 2))
sent = {
"id":ensured_id,
"data":cdata,
"time":self.t,
"info":(addr,port)
}
self.ensured_sent_packets[addrportstr][ensured_id] = sent
self.sock.sendto(cdata, (addr, port))
self.ensured_send_packet_ids[addrportstr] = ensured_id + 1
def process_confirmReceipt(self, data, game, info):
(addr, port) = info
addrportstr = addr + ":" + str(port)
pending_packets = self.ensured_sent_packets[addrportstr]
pid = data["other_ensured_id"]
print "got receipt for " + str(pid)
if pid in pending_packets:
del pending_packets[pid]
else:
if pid > self.ensured_send_packet_ids:
print "got receipt for packet i haven't sent yet!!"
def update(self, game, dt):
self.game = game
self.t = pygame.time.get_ticks() / 1000.0
self.periodic.update()
self.packetsPerSecond = self.averagedData.get_ct(self.t, "packets", 1.0)
self.packetSize = self.averagedData.get_sum(self.t, "packetsize", 1.0)
allPackets = []
try:
(data, info) = self.sock.recvfrom(4096)
#self.packetSize = len(data)
if self.simulatedPacketloss > 0 and random.random() < self.simulatedPacketloss:
pass
else:
allPackets = self.readPacket(info, data)
except socket.timeout:
pass
except socket.error as err:
#print err
pass
#print self.simulatedPackets
if self.simulatedLatency == 0:
for d in allPackets:
self.process(d, game, info)
else:
off = self.simulatedLatency + self.simulatedRandomLatency * random.random()
self.simulatedPackets.extend( [(d, off, info) for d in allPackets] )
thisFramePackets = [ s for s in self.simulatedPackets if s[1] <= 0]
self.simulatedPackets = [ s for s in self.simulatedPackets if s[1] > 0 ]
for (p, t, info) in thisFramePackets:
self.process(p, game, info)
self.simulatedPackets = [ (s[0], s[1] - dt, s[2]) for s in self.simulatedPackets ]
def resendUnconfirmed(self):
for k,packets in self.ensured_sent_packets.items():
for i,packet in packets.items():
if self.t > packet["time"] + 1.5:
print "resending unreceipted packet: " + str(packet["id"])
self.sock.sendto(packet["data"], packet["info"])
def process(self, data, game, info):
if(hasattr(self, "process_" + data["type"])):
f = getattr(self, "process_" + data["type"])
f(data, game, info)
else:
print("Got packet of type '" + data["type"] + "' but there is no process_" + data["type"] + " method to handle it." )
|
normal
|
{
"blob_id": "b7be9fd366d03068a5d6c3cee703d579b9866fd3",
"index": 7992,
"step-1": "DEFAULT_SERVER_LISTEN_PORT = 2011\nDEFAULT_CLIENT_LISTEN_PORT = 2012\n\nimport pickle\nimport socket\nfrom player import Player\nfrom averageddata import *\nimport zlib\nimport g\nimport pygame\nfrom collections import defaultdict\nfrom periodic import Periodic\nimport random\nfrom projectile import Projectile\n\nTICKTIME = 0.05\n\nclass NetCommon:\n\tnetEntities = { \"player\": Player, \"projectile\":Projectile }\n\tdef __init__(self, listenPort):\n\t\t#Make a UDP socket\n\t\tself.sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )\n\t\tself.sock.bind( (\"0.0.0.0\", listenPort) )\n\t\tself.sock.settimeout(0.01)\n\t\tself.packetSize = 0\n\t\tself.t = 0\n\t\t\n\t\tself.buf = \"\"\n\n\t\tself.packetTimestamps = []\n\t\tself.packetsPerSecond = 0\n\t\t\n\t\tself.simulatedLatency = 0\n\t\tself.simulatedRandomLatencyVal = 0\n\t\tself.simulatedPacketloss = 0\n\n\t\tself.simulatedRandomLatency = 0\n\t\tself.simulatedPackets = []\n\n\t\tself.packet_outbound_last_id = defaultdict(lambda:0)\n\t\tself.packet_inbound_last_id = defaultdict(lambda:0)\n\t\tself.packetloss = defaultdict(lambda:0)\n\n\t\tself.ensured_send_packet_ids = defaultdict(lambda:0)\n\t\tself.ensured_sent_packets = defaultdict(dict)\n\t\tself.ensured_recv_packet_ids = defaultdict(lambda:-1)\n\t\tself.ensured_packets_received_early = defaultdict(list)\n\t\tself.resend_unconfirmed_timer = 0\n\n\t\tself.averagedData = AveragedData()\n\n\t\tself.netinfotimer = 1.0\n\n\t\tself.debug_lines = []\n\t\tself.periodic = Periodic()\n\t\tself.periodic.add(self.resendUnconfirmed, 0.5)\n\n\n\tdef readPacket(self, info, data):\n\t\tself.averagedData.add(self.t, \"packets\")\n\t\tself.averagedData.add(self.t, \"packetsize\", len(data))\n\t\tunpacked = pickle.loads(zlib.decompress(data))\n\n\t\taddr, port = info\n\t\taddrportstr = addr + \":\" + str(port)\n\n\t\tif \"ensured_id\" in unpacked:\n\t\t\tif unpacked[\"ensured_id\"] == self.ensured_recv_packet_ids[addrportstr]+1:\n\t\t\t\tprint \"recv \" + str(unpacked[\"ensured_id\"])\n\t\t\t\tself.ensured_recv_packet_ids[addrportstr] += 1\n\t\t\t\tself.sendReceipt(addr, port, unpacked[\"ensured_id\"])\n\t\t\telif unpacked[\"ensured_id\"] < self.ensured_recv_packet_ids[addrportstr]+1:\n\t\t\t\tprint unpacked\n\t\t\t\tprint \"got ensured packet twice; resending receipt for \" + str(unpacked[\"ensured_id\"])\n\t\t\t\tself.sendReceipt(addr, port, unpacked[\"ensured_id\"])\n\t\t\t\treturn []\n\t\t\telse:\n\t\t\t\tprint \"got packet \" + str(unpacked[\"ensured_id\"]) + \" before \" + str(self.ensured_recv_packet_ids[addrportstr]+1)\n\t\t\t\tself.ensured_packets_received_early[addrportstr].append(unpacked)\n\t\t\t\treturn []\n\n\t\tallPackets = []\n\t\tto_remove = []\n\t\tself.ensured_packets_received_early[addrportstr].sort(lambda a,b:cmp(a[\"ensured_id\"], b[\"ensured_id\"]))\n\t\tfor p in self.ensured_packets_received_early[addrportstr]:\n\t\t\tprint \"resolving old \" + str(p[\"ensured_id\"])\n\t\t\tif p[\"ensured_id\"] <= self.ensured_recv_packet_ids[addrportstr]+1:\n\t\t\t\tself.ensured_recv_packet_ids[addrportstr] += 1\n\t\t\t\tself.sendReceipt(addr, port, p[\"ensured_id\"])\n\t\t\t\tallPackets.extend(self.readUnpackedPacket(p, addrportstr))\n\t\t\t\tto_remove.append(p)\n\t\tfor p in to_remove:\n\t\t\tself.ensured_packets_received_early[addrportstr].remove(p)\n\n\t\tallPackets.extend(self.readUnpackedPacket(unpacked, addrportstr))\n\t\treturn allPackets\n\n\tdef sendReceipt(self, addr, port, q):\n\t\tself.sendPacket({\"type\":\"confirmReceipt\",\"other_ensured_id\":q}, addr, port)\n\n\tdef readUnpackedPacket(self, unpacked, addrportstr):\n\t\tpid = unpacked[\"packet_id\"]\n\t\tlid = self.packet_inbound_last_id[addrportstr]\n\t\tif pid > lid + 1:\n\t\t\tself.packetloss[addrportstr] += 1\n\t\tself.packet_inbound_last_id[addrportstr] = pid\n\n\t\tif self.packet_inbound_last_id[addrportstr] > 0:\n\t\t\tpacketloss = self.packetloss[addrportstr] / float(self.packet_inbound_last_id[addrportstr])\n\t\t\tself.averagedData.add(self.t, \"packetloss_\" + addrportstr, packetloss)\n\n\t\treturn [unpacked]\n\n\tdef sendPacket(self, data, addr, port):\n\t\tprint \"packet: \" + data[\"type\"]\n\t\taddrportstr = addr + \":\" + str(port)\n\t\tdata[\"packet_id\"] = self.packet_outbound_last_id[addrportstr]\n\t\tself.packet_outbound_last_id[addrportstr] += 1\n\t\tself.sock.sendto(zlib.compress(pickle.dumps(data, 2)), (addr, port))\n\n\tdef sendEnsuredPacket(self, data, addr, port):\n\t\taddrportstr = addr + \":\" + str(port)\t\t\n\t\tensured_id = self.ensured_send_packet_ids[addrportstr]\n\t\tprint \"packet: \" + data[\"type\"] + \" (ensured id: \" + str(ensured_id) + \")\"\n\t\tdata[\"packet_id\"] = self.packet_outbound_last_id[addrportstr]\n\t\tself.packet_outbound_last_id[addrportstr] += 1\t\t\n\t\tdata[\"ensured_id\"] = ensured_id\n\t\tcdata = zlib.compress(pickle.dumps(data, 2))\n\t\tsent = {\n\t\t\t\"id\":ensured_id,\n\t\t\t\"data\":cdata,\n\t\t\t\"time\":self.t,\n\t\t\t\"info\":(addr,port)\n\t\t}\n\t\tself.ensured_sent_packets[addrportstr][ensured_id] = sent\n\t\tself.sock.sendto(cdata, (addr, port))\n\t\tself.ensured_send_packet_ids[addrportstr] = ensured_id + 1\n\n\tdef process_confirmReceipt(self, data, game, info):\n\t\t(addr, port) = info\n\t\taddrportstr = addr + \":\" + str(port)\n\t\tpending_packets = self.ensured_sent_packets[addrportstr]\n\t\tpid = data[\"other_ensured_id\"]\n\t\tprint \"got receipt for \" + str(pid)\n\t\tif pid in pending_packets:\n\t\t\tdel pending_packets[pid]\n\t\telse:\n\t\t\tif pid > self.ensured_send_packet_ids:\n\t\t\t\tprint \"got receipt for packet i haven't sent yet!!\"\n\n\tdef update(self, game, dt):\n\t\tself.game = game\n\t\t\n\t\tself.t = pygame.time.get_ticks() / 1000.0\n\t\tself.periodic.update()\n\n\t\tself.packetsPerSecond = self.averagedData.get_ct(self.t, \"packets\", 1.0)\n\t\tself.packetSize = self.averagedData.get_sum(self.t, \"packetsize\", 1.0)\n\n\t\tallPackets = []\n\t\ttry:\n\t\t\t(data, info) = self.sock.recvfrom(4096)\n\t\t\t#self.packetSize = len(data)\n\t\t\tif self.simulatedPacketloss > 0 and random.random() < self.simulatedPacketloss:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tallPackets = self.readPacket(info, data)\n\t\texcept socket.timeout:\n\t\t\tpass\n\t\texcept socket.error as err:\n\t\t\t#print err\n\t\t\tpass\n\n\t\t#print self.simulatedPackets\n\t\tif self.simulatedLatency == 0:\n\t\t\tfor d in allPackets:\n\t\t\t\tself.process(d, game, info)\n\t\telse:\n\t\t\toff = self.simulatedLatency + self.simulatedRandomLatency * random.random()\n\t\t\tself.simulatedPackets.extend( [(d, off, info) for d in allPackets] )\n\t\t\tthisFramePackets = [ s for s in self.simulatedPackets if s[1] <= 0]\n\t\t\tself.simulatedPackets = [ s for s in self.simulatedPackets if s[1] > 0 ]\n\t\t\tfor (p, t, info) in thisFramePackets:\n\t\t\t\tself.process(p, game, info)\n\t\t\tself.simulatedPackets = [ (s[0], s[1] - dt, s[2]) for s in self.simulatedPackets ]\n\n\n\tdef resendUnconfirmed(self):\n\t\tfor k,packets in self.ensured_sent_packets.items():\n\t\t\tfor i,packet in packets.items():\n\t\t\t\tif self.t > packet[\"time\"] + 1.5:\n\t\t\t\t\tprint \"resending unreceipted packet: \" + str(packet[\"id\"])\n\t\t\t\t\tself.sock.sendto(packet[\"data\"], packet[\"info\"])\t\t\n\n\tdef process(self, data, game, info):\n\t\tif(hasattr(self, \"process_\" + data[\"type\"])):\n\t\t\tf = getattr(self, \"process_\" + data[\"type\"])\n\t\t\tf(data, game, info)\n\t\telse:\n\t\t\tprint(\"Got packet of type '\" + data[\"type\"] + \"' but there is no process_\" + data[\"type\"] + \" method to handle it.\" )\n\t\t\t",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from db import do_command, do_command_no_return, do_insert
def get_grocery(upc):
cmd = "SELECT name FROM grocery WHERE upc = ?"
rtVal = do_command(cmd, [upc])
length = len(rtVal)
if length > 0:
return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}
return {'success': bool(len(rtVal))}
def grocery_input(upc, name):
cmd = "INSERT INTO grocery (name, upc) VALUES (?, ?)"
rtVal = do_insert(cmd, [name, upc])
return rtVal
def get_grocery_id(upc):
cmd = "SELECT id FROM grocery WHERE upc = ?"
rtVal = do_command(cmd, [upc])
if len(rtVal) > 0:
return rtVal[0]['id']
else:
return -1
def get_grocery_name(upc):
cmd = "SELECT name FROM grocery WHERE upc = ?"
rtVal = do_command((cmd, [upc]))
return rtVal[0]
def grocery_exists(upc):
cmd = "SELECT id FROM grocery WHERE upc = ?"
rtVal = do_command(cmd, [upc])
return bool(len(rtVal))
def remove_grocery(upc):
id = get_grocery_id(upc)
if id != -1:
cmd = "DELETE FROM inventory WHERE grocery_id = ?"
do_command_no_return(cmd, [id])
cmd = "DELETE FROM changes where grocery_id = ?"
do_command_no_return(cmd, [id])
cmd = "DELETE FROM grocery where id = ?"
do_command_no_return(cmd, [id])
def produce_input(plu, name):
cmd = "INSERT INTO produce (name, plu) VALUES (?, ?)"
rtVal = do_insert(cmd, [name, plu])
return rtVal
def get_produce(plu):
cmd = "SELECT name FROM produce WHERE plu = ?"
rtVal = do_command(cmd, [plu])
length = len(rtVal)
if length > 0:
return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}
return {'success': bool(len(rtVal))}
|
normal
|
{
"blob_id": "92b24fe82929ed4590e5350188673c2245136d03",
"index": 5554,
"step-1": "<mask token>\n\n\ndef get_grocery_id(upc):\n cmd = 'SELECT id FROM grocery WHERE upc = ?'\n rtVal = do_command(cmd, [upc])\n if len(rtVal) > 0:\n return rtVal[0]['id']\n else:\n return -1\n\n\n<mask token>\n\n\ndef remove_grocery(upc):\n id = get_grocery_id(upc)\n if id != -1:\n cmd = 'DELETE FROM inventory WHERE grocery_id = ?'\n do_command_no_return(cmd, [id])\n cmd = 'DELETE FROM changes where grocery_id = ?'\n do_command_no_return(cmd, [id])\n cmd = 'DELETE FROM grocery where id = ?'\n do_command_no_return(cmd, [id])\n\n\ndef produce_input(plu, name):\n cmd = 'INSERT INTO produce (name, plu) VALUES (?, ?)'\n rtVal = do_insert(cmd, [name, plu])\n return rtVal\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_grocery(upc):\n cmd = 'SELECT name FROM grocery WHERE upc = ?'\n rtVal = do_command(cmd, [upc])\n length = len(rtVal)\n if length > 0:\n return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}\n return {'success': bool(len(rtVal))}\n\n\n<mask token>\n\n\ndef get_grocery_id(upc):\n cmd = 'SELECT id FROM grocery WHERE upc = ?'\n rtVal = do_command(cmd, [upc])\n if len(rtVal) > 0:\n return rtVal[0]['id']\n else:\n return -1\n\n\n<mask token>\n\n\ndef remove_grocery(upc):\n id = get_grocery_id(upc)\n if id != -1:\n cmd = 'DELETE FROM inventory WHERE grocery_id = ?'\n do_command_no_return(cmd, [id])\n cmd = 'DELETE FROM changes where grocery_id = ?'\n do_command_no_return(cmd, [id])\n cmd = 'DELETE FROM grocery where id = ?'\n do_command_no_return(cmd, [id])\n\n\ndef produce_input(plu, name):\n cmd = 'INSERT INTO produce (name, plu) VALUES (?, ?)'\n rtVal = do_insert(cmd, [name, plu])\n return rtVal\n\n\ndef get_produce(plu):\n cmd = 'SELECT name FROM produce WHERE plu = ?'\n rtVal = do_command(cmd, [plu])\n length = len(rtVal)\n if length > 0:\n return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}\n return {'success': bool(len(rtVal))}\n",
"step-3": "<mask token>\n\n\ndef get_grocery(upc):\n cmd = 'SELECT name FROM grocery WHERE upc = ?'\n rtVal = do_command(cmd, [upc])\n length = len(rtVal)\n if length > 0:\n return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}\n return {'success': bool(len(rtVal))}\n\n\n<mask token>\n\n\ndef get_grocery_id(upc):\n cmd = 'SELECT id FROM grocery WHERE upc = ?'\n rtVal = do_command(cmd, [upc])\n if len(rtVal) > 0:\n return rtVal[0]['id']\n else:\n return -1\n\n\ndef get_grocery_name(upc):\n cmd = 'SELECT name FROM grocery WHERE upc = ?'\n rtVal = do_command((cmd, [upc]))\n return rtVal[0]\n\n\n<mask token>\n\n\ndef remove_grocery(upc):\n id = get_grocery_id(upc)\n if id != -1:\n cmd = 'DELETE FROM inventory WHERE grocery_id = ?'\n do_command_no_return(cmd, [id])\n cmd = 'DELETE FROM changes where grocery_id = ?'\n do_command_no_return(cmd, [id])\n cmd = 'DELETE FROM grocery where id = ?'\n do_command_no_return(cmd, [id])\n\n\ndef produce_input(plu, name):\n cmd = 'INSERT INTO produce (name, plu) VALUES (?, ?)'\n rtVal = do_insert(cmd, [name, plu])\n return rtVal\n\n\ndef get_produce(plu):\n cmd = 'SELECT name FROM produce WHERE plu = ?'\n rtVal = do_command(cmd, [plu])\n length = len(rtVal)\n if length > 0:\n return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}\n return {'success': bool(len(rtVal))}\n",
"step-4": "from db import do_command, do_command_no_return, do_insert\n\n\ndef get_grocery(upc):\n cmd = 'SELECT name FROM grocery WHERE upc = ?'\n rtVal = do_command(cmd, [upc])\n length = len(rtVal)\n if length > 0:\n return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}\n return {'success': bool(len(rtVal))}\n\n\ndef grocery_input(upc, name):\n cmd = 'INSERT INTO grocery (name, upc) VALUES (?, ?)'\n rtVal = do_insert(cmd, [name, upc])\n return rtVal\n\n\ndef get_grocery_id(upc):\n cmd = 'SELECT id FROM grocery WHERE upc = ?'\n rtVal = do_command(cmd, [upc])\n if len(rtVal) > 0:\n return rtVal[0]['id']\n else:\n return -1\n\n\ndef get_grocery_name(upc):\n cmd = 'SELECT name FROM grocery WHERE upc = ?'\n rtVal = do_command((cmd, [upc]))\n return rtVal[0]\n\n\ndef grocery_exists(upc):\n cmd = 'SELECT id FROM grocery WHERE upc = ?'\n rtVal = do_command(cmd, [upc])\n return bool(len(rtVal))\n\n\ndef remove_grocery(upc):\n id = get_grocery_id(upc)\n if id != -1:\n cmd = 'DELETE FROM inventory WHERE grocery_id = ?'\n do_command_no_return(cmd, [id])\n cmd = 'DELETE FROM changes where grocery_id = ?'\n do_command_no_return(cmd, [id])\n cmd = 'DELETE FROM grocery where id = ?'\n do_command_no_return(cmd, [id])\n\n\ndef produce_input(plu, name):\n cmd = 'INSERT INTO produce (name, plu) VALUES (?, ?)'\n rtVal = do_insert(cmd, [name, plu])\n return rtVal\n\n\ndef get_produce(plu):\n cmd = 'SELECT name FROM produce WHERE plu = ?'\n rtVal = do_command(cmd, [plu])\n length = len(rtVal)\n if length > 0:\n return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}\n return {'success': bool(len(rtVal))}\n",
"step-5": "from db import do_command, do_command_no_return, do_insert\n\n\ndef get_grocery(upc):\n cmd = \"SELECT name FROM grocery WHERE upc = ?\"\n rtVal = do_command(cmd, [upc])\n\n length = len(rtVal)\n\n if length > 0:\n return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}\n\n return {'success': bool(len(rtVal))}\n\n\ndef grocery_input(upc, name):\n cmd = \"INSERT INTO grocery (name, upc) VALUES (?, ?)\"\n rtVal = do_insert(cmd, [name, upc])\n\n return rtVal\n\n\ndef get_grocery_id(upc):\n cmd = \"SELECT id FROM grocery WHERE upc = ?\"\n rtVal = do_command(cmd, [upc])\n\n if len(rtVal) > 0:\n return rtVal[0]['id']\n else:\n return -1\n\n\ndef get_grocery_name(upc):\n cmd = \"SELECT name FROM grocery WHERE upc = ?\"\n rtVal = do_command((cmd, [upc]))\n return rtVal[0]\n\n\ndef grocery_exists(upc):\n cmd = \"SELECT id FROM grocery WHERE upc = ?\"\n rtVal = do_command(cmd, [upc])\n\n return bool(len(rtVal))\n\n\ndef remove_grocery(upc):\n id = get_grocery_id(upc)\n\n if id != -1:\n cmd = \"DELETE FROM inventory WHERE grocery_id = ?\"\n do_command_no_return(cmd, [id])\n cmd = \"DELETE FROM changes where grocery_id = ?\"\n do_command_no_return(cmd, [id])\n cmd = \"DELETE FROM grocery where id = ?\"\n do_command_no_return(cmd, [id])\n\n\ndef produce_input(plu, name):\n cmd = \"INSERT INTO produce (name, plu) VALUES (?, ?)\"\n rtVal = do_insert(cmd, [name, plu])\n\n return rtVal\n\ndef get_produce(plu):\n cmd = \"SELECT name FROM produce WHERE plu = ?\"\n rtVal = do_command(cmd, [plu])\n\n length = len(rtVal)\n\n if length > 0:\n return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}\n\n return {'success': bool(len(rtVal))}",
"step-ids": [
3,
5,
6,
9,
10
]
}
|
[
3,
5,
6,
9,
10
] |
# Generated from /home/mridul/PycharmProjects/BTP_2k18-19/PlSql.g4 by ANTLR 4.7.2
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\u020e")
buf.write("\u14d7\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t")
buf.write("L\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\t")
buf.write("U\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t]\4")
buf.write("^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\4f\tf\4")
buf.write("g\tg\4h\th\4i\ti\4j\tj\4k\tk\4l\tl\4m\tm\4n\tn\4o\to\4")
buf.write("p\tp\4q\tq\4r\tr\4s\ts\4t\tt\4u\tu\4v\tv\4w\tw\4x\tx\4")
buf.write("y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4~\t~\4\177\t\177\4\u0080")
buf.write("\t\u0080\4\u0081\t\u0081\4\u0082\t\u0082\4\u0083\t\u0083")
buf.write("\4\u0084\t\u0084\4\u0085\t\u0085\4\u0086\t\u0086\4\u0087")
buf.write("\t\u0087\4\u0088\t\u0088\4\u0089\t\u0089\4\u008a\t\u008a")
buf.write("\4\u008b\t\u008b\4\u008c\t\u008c\4\u008d\t\u008d\4\u008e")
buf.write("\t\u008e\4\u008f\t\u008f\4\u0090\t\u0090\4\u0091\t\u0091")
buf.write("\4\u0092\t\u0092\4\u0093\t\u0093\4\u0094\t\u0094\4\u0095")
buf.write("\t\u0095\4\u0096\t\u0096\4\u0097\t\u0097\4\u0098\t\u0098")
buf.write("\4\u0099\t\u0099\4\u009a\t\u009a\4\u009b\t\u009b\4\u009c")
buf.write("\t\u009c\4\u009d\t\u009d\4\u009e\t\u009e\4\u009f\t\u009f")
buf.write("\4\u00a0\t\u00a0\4\u00a1\t\u00a1\4\u00a2\t\u00a2\4\u00a3")
buf.write("\t\u00a3\4\u00a4\t\u00a4\4\u00a5\t\u00a5\4\u00a6\t\u00a6")
buf.write("\4\u00a7\t\u00a7\4\u00a8\t\u00a8\4\u00a9\t\u00a9\4\u00aa")
buf.write("\t\u00aa\4\u00ab\t\u00ab\4\u00ac\t\u00ac\4\u00ad\t\u00ad")
buf.write("\4\u00ae\t\u00ae\4\u00af\t\u00af\4\u00b0\t\u00b0\4\u00b1")
buf.write("\t\u00b1\4\u00b2\t\u00b2\4\u00b3\t\u00b3\4\u00b4\t\u00b4")
buf.write("\4\u00b5\t\u00b5\4\u00b6\t\u00b6\4\u00b7\t\u00b7\4\u00b8")
buf.write("\t\u00b8\4\u00b9\t\u00b9\4\u00ba\t\u00ba\4\u00bb\t\u00bb")
buf.write("\4\u00bc\t\u00bc\4\u00bd\t\u00bd\4\u00be\t\u00be\4\u00bf")
buf.write("\t\u00bf\4\u00c0\t\u00c0\4\u00c1\t\u00c1\4\u00c2\t\u00c2")
buf.write("\4\u00c3\t\u00c3\4\u00c4\t\u00c4\4\u00c5\t\u00c5\4\u00c6")
buf.write("\t\u00c6\4\u00c7\t\u00c7\4\u00c8\t\u00c8\4\u00c9\t\u00c9")
buf.write("\4\u00ca\t\u00ca\4\u00cb\t\u00cb\4\u00cc\t\u00cc\4\u00cd")
buf.write("\t\u00cd\4\u00ce\t\u00ce\4\u00cf\t\u00cf\4\u00d0\t\u00d0")
buf.write("\4\u00d1\t\u00d1\4\u00d2\t\u00d2\4\u00d3\t\u00d3\4\u00d4")
buf.write("\t\u00d4\4\u00d5\t\u00d5\4\u00d6\t\u00d6\4\u00d7\t\u00d7")
buf.write("\4\u00d8\t\u00d8\4\u00d9\t\u00d9\4\u00da\t\u00da\4\u00db")
buf.write("\t\u00db\4\u00dc\t\u00dc\4\u00dd\t\u00dd\4\u00de\t\u00de")
buf.write("\4\u00df\t\u00df\4\u00e0\t\u00e0\4\u00e1\t\u00e1\4\u00e2")
buf.write("\t\u00e2\4\u00e3\t\u00e3\4\u00e4\t\u00e4\4\u00e5\t\u00e5")
buf.write("\4\u00e6\t\u00e6\4\u00e7\t\u00e7\4\u00e8\t\u00e8\4\u00e9")
buf.write("\t\u00e9\4\u00ea\t\u00ea\4\u00eb\t\u00eb\4\u00ec\t\u00ec")
buf.write("\4\u00ed\t\u00ed\4\u00ee\t\u00ee\4\u00ef\t\u00ef\4\u00f0")
buf.write("\t\u00f0\4\u00f1\t\u00f1\4\u00f2\t\u00f2\4\u00f3\t\u00f3")
buf.write("\4\u00f4\t\u00f4\4\u00f5\t\u00f5\4\u00f6\t\u00f6\4\u00f7")
buf.write("\t\u00f7\4\u00f8\t\u00f8\4\u00f9\t\u00f9\4\u00fa\t\u00fa")
buf.write("\4\u00fb\t\u00fb\4\u00fc\t\u00fc\4\u00fd\t\u00fd\4\u00fe")
buf.write("\t\u00fe\4\u00ff\t\u00ff\4\u0100\t\u0100\4\u0101\t\u0101")
buf.write("\4\u0102\t\u0102\4\u0103\t\u0103\4\u0104\t\u0104\4\u0105")
buf.write("\t\u0105\4\u0106\t\u0106\4\u0107\t\u0107\4\u0108\t\u0108")
buf.write("\4\u0109\t\u0109\4\u010a\t\u010a\4\u010b\t\u010b\4\u010c")
buf.write("\t\u010c\4\u010d\t\u010d\4\u010e\t\u010e\4\u010f\t\u010f")
buf.write("\4\u0110\t\u0110\4\u0111\t\u0111\4\u0112\t\u0112\4\u0113")
buf.write("\t\u0113\4\u0114\t\u0114\4\u0115\t\u0115\4\u0116\t\u0116")
buf.write("\4\u0117\t\u0117\4\u0118\t\u0118\4\u0119\t\u0119\4\u011a")
buf.write("\t\u011a\4\u011b\t\u011b\4\u011c\t\u011c\4\u011d\t\u011d")
buf.write("\4\u011e\t\u011e\4\u011f\t\u011f\4\u0120\t\u0120\4\u0121")
buf.write("\t\u0121\4\u0122\t\u0122\4\u0123\t\u0123\4\u0124\t\u0124")
buf.write("\4\u0125\t\u0125\4\u0126\t\u0126\4\u0127\t\u0127\4\u0128")
buf.write("\t\u0128\4\u0129\t\u0129\4\u012a\t\u012a\4\u012b\t\u012b")
buf.write("\4\u012c\t\u012c\4\u012d\t\u012d\4\u012e\t\u012e\4\u012f")
buf.write("\t\u012f\4\u0130\t\u0130\4\u0131\t\u0131\4\u0132\t\u0132")
buf.write("\4\u0133\t\u0133\4\u0134\t\u0134\4\u0135\t\u0135\4\u0136")
buf.write("\t\u0136\4\u0137\t\u0137\4\u0138\t\u0138\4\u0139\t\u0139")
buf.write("\4\u013a\t\u013a\4\u013b\t\u013b\4\u013c\t\u013c\4\u013d")
buf.write("\t\u013d\4\u013e\t\u013e\4\u013f\t\u013f\4\u0140\t\u0140")
buf.write("\4\u0141\t\u0141\4\u0142\t\u0142\4\u0143\t\u0143\4\u0144")
buf.write("\t\u0144\4\u0145\t\u0145\4\u0146\t\u0146\4\u0147\t\u0147")
buf.write("\4\u0148\t\u0148\4\u0149\t\u0149\4\u014a\t\u014a\4\u014b")
buf.write("\t\u014b\4\u014c\t\u014c\4\u014d\t\u014d\4\u014e\t\u014e")
buf.write("\4\u014f\t\u014f\4\u0150\t\u0150\4\u0151\t\u0151\4\u0152")
buf.write("\t\u0152\4\u0153\t\u0153\4\u0154\t\u0154\4\u0155\t\u0155")
buf.write("\4\u0156\t\u0156\4\u0157\t\u0157\4\u0158\t\u0158\4\u0159")
buf.write("\t\u0159\4\u015a\t\u015a\4\u015b\t\u015b\4\u015c\t\u015c")
buf.write("\4\u015d\t\u015d\4\u015e\t\u015e\4\u015f\t\u015f\4\u0160")
buf.write("\t\u0160\4\u0161\t\u0161\4\u0162\t\u0162\4\u0163\t\u0163")
buf.write("\4\u0164\t\u0164\4\u0165\t\u0165\4\u0166\t\u0166\4\u0167")
buf.write("\t\u0167\4\u0168\t\u0168\4\u0169\t\u0169\4\u016a\t\u016a")
buf.write("\4\u016b\t\u016b\4\u016c\t\u016c\4\u016d\t\u016d\4\u016e")
buf.write("\t\u016e\4\u016f\t\u016f\4\u0170\t\u0170\4\u0171\t\u0171")
buf.write("\4\u0172\t\u0172\4\u0173\t\u0173\4\u0174\t\u0174\4\u0175")
buf.write("\t\u0175\4\u0176\t\u0176\4\u0177\t\u0177\4\u0178\t\u0178")
buf.write("\4\u0179\t\u0179\4\u017a\t\u017a\4\u017b\t\u017b\4\u017c")
buf.write("\t\u017c\4\u017d\t\u017d\4\u017e\t\u017e\4\u017f\t\u017f")
buf.write("\4\u0180\t\u0180\4\u0181\t\u0181\4\u0182\t\u0182\4\u0183")
buf.write("\t\u0183\4\u0184\t\u0184\4\u0185\t\u0185\4\u0186\t\u0186")
buf.write("\4\u0187\t\u0187\4\u0188\t\u0188\4\u0189\t\u0189\4\u018a")
buf.write("\t\u018a\4\u018b\t\u018b\4\u018c\t\u018c\4\u018d\t\u018d")
buf.write("\4\u018e\t\u018e\4\u018f\t\u018f\4\u0190\t\u0190\4\u0191")
buf.write("\t\u0191\4\u0192\t\u0192\4\u0193\t\u0193\4\u0194\t\u0194")
buf.write("\4\u0195\t\u0195\4\u0196\t\u0196\4\u0197\t\u0197\4\u0198")
buf.write("\t\u0198\4\u0199\t\u0199\4\u019a\t\u019a\4\u019b\t\u019b")
buf.write("\4\u019c\t\u019c\4\u019d\t\u019d\4\u019e\t\u019e\4\u019f")
buf.write("\t\u019f\4\u01a0\t\u01a0\4\u01a1\t\u01a1\4\u01a2\t\u01a2")
buf.write("\4\u01a3\t\u01a3\4\u01a4\t\u01a4\4\u01a5\t\u01a5\4\u01a6")
buf.write("\t\u01a6\4\u01a7\t\u01a7\4\u01a8\t\u01a8\4\u01a9\t\u01a9")
buf.write("\4\u01aa\t\u01aa\4\u01ab\t\u01ab\4\u01ac\t\u01ac\4\u01ad")
buf.write("\t\u01ad\4\u01ae\t\u01ae\4\u01af\t\u01af\4\u01b0\t\u01b0")
buf.write("\4\u01b1\t\u01b1\4\u01b2\t\u01b2\4\u01b3\t\u01b3\4\u01b4")
buf.write("\t\u01b4\4\u01b5\t\u01b5\4\u01b6\t\u01b6\4\u01b7\t\u01b7")
buf.write("\4\u01b8\t\u01b8\4\u01b9\t\u01b9\4\u01ba\t\u01ba\4\u01bb")
buf.write("\t\u01bb\4\u01bc\t\u01bc\4\u01bd\t\u01bd\4\u01be\t\u01be")
buf.write("\4\u01bf\t\u01bf\4\u01c0\t\u01c0\4\u01c1\t\u01c1\4\u01c2")
buf.write("\t\u01c2\4\u01c3\t\u01c3\4\u01c4\t\u01c4\4\u01c5\t\u01c5")
buf.write("\4\u01c6\t\u01c6\4\u01c7\t\u01c7\4\u01c8\t\u01c8\4\u01c9")
buf.write("\t\u01c9\4\u01ca\t\u01ca\4\u01cb\t\u01cb\4\u01cc\t\u01cc")
buf.write("\4\u01cd\t\u01cd\4\u01ce\t\u01ce\4\u01cf\t\u01cf\4\u01d0")
buf.write("\t\u01d0\4\u01d1\t\u01d1\4\u01d2\t\u01d2\4\u01d3\t\u01d3")
buf.write("\4\u01d4\t\u01d4\4\u01d5\t\u01d5\4\u01d6\t\u01d6\4\u01d7")
buf.write("\t\u01d7\4\u01d8\t\u01d8\4\u01d9\t\u01d9\4\u01da\t\u01da")
buf.write("\4\u01db\t\u01db\4\u01dc\t\u01dc\4\u01dd\t\u01dd\4\u01de")
buf.write("\t\u01de\4\u01df\t\u01df\4\u01e0\t\u01e0\4\u01e1\t\u01e1")
buf.write("\4\u01e2\t\u01e2\4\u01e3\t\u01e3\4\u01e4\t\u01e4\4\u01e5")
buf.write("\t\u01e5\4\u01e6\t\u01e6\4\u01e7\t\u01e7\4\u01e8\t\u01e8")
buf.write("\4\u01e9\t\u01e9\4\u01ea\t\u01ea\4\u01eb\t\u01eb\4\u01ec")
buf.write("\t\u01ec\4\u01ed\t\u01ed\4\u01ee\t\u01ee\4\u01ef\t\u01ef")
buf.write("\4\u01f0\t\u01f0\4\u01f1\t\u01f1\4\u01f2\t\u01f2\4\u01f3")
buf.write("\t\u01f3\4\u01f4\t\u01f4\4\u01f5\t\u01f5\4\u01f6\t\u01f6")
buf.write("\4\u01f7\t\u01f7\4\u01f8\t\u01f8\4\u01f9\t\u01f9\4\u01fa")
buf.write("\t\u01fa\4\u01fb\t\u01fb\4\u01fc\t\u01fc\4\u01fd\t\u01fd")
buf.write("\4\u01fe\t\u01fe\4\u01ff\t\u01ff\4\u0200\t\u0200\4\u0201")
buf.write("\t\u0201\4\u0202\t\u0202\4\u0203\t\u0203\4\u0204\t\u0204")
buf.write("\4\u0205\t\u0205\4\u0206\t\u0206\4\u0207\t\u0207\4\u0208")
buf.write("\t\u0208\4\u0209\t\u0209\4\u020a\t\u020a\4\u020b\t\u020b")
buf.write("\4\u020c\t\u020c\4\u020d\t\u020d\4\u020e\t\u020e\4\u020f")
buf.write("\t\u020f\4\u0210\t\u0210\4\u0211\t\u0211\4\u0212\t\u0212")
buf.write("\4\u0213\t\u0213\4\u0214\t\u0214\4\u0215\t\u0215\4\u0216")
buf.write("\t\u0216\4\u0217\t\u0217\4\u0218\t\u0218\4\u0219\t\u0219")
buf.write("\4\u021a\t\u021a\4\u021b\t\u021b\4\u021c\t\u021c\4\u021d")
buf.write("\t\u021d\4\u021e\t\u021e\4\u021f\t\u021f\4\u0220\t\u0220")
buf.write("\4\u0221\t\u0221\4\u0222\t\u0222\4\u0223\t\u0223\4\u0224")
buf.write("\t\u0224\4\u0225\t\u0225\4\u0226\t\u0226\4\u0227\t\u0227")
buf.write("\4\u0228\t\u0228\4\u0229\t\u0229\4\u022a\t\u022a\4\u022b")
buf.write("\t\u022b\4\u022c\t\u022c\4\u022d\t\u022d\4\u022e\t\u022e")
buf.write("\4\u022f\t\u022f\4\u0230\t\u0230\4\u0231\t\u0231\4\u0232")
buf.write("\t\u0232\4\u0233\t\u0233\4\u0234\t\u0234\3\2\3\2\3\2\3")
buf.write("\3\3\3\3\4\3\4\3\4\3\4\3\5\3\5\3\5\3\5\3\5\3\5\3\6\3\6")
buf.write("\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3")
buf.write("\7\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\t\3\t\3\n\3\n\3\n")
buf.write("\3\n\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3")
buf.write("\f\3\r\3\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16\3\17\3\17\3")
buf.write("\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\20")
buf.write("\3\20\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22")
buf.write("\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3\24\3\24\3\24\3\24")
buf.write("\3\24\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25")
buf.write("\3\25\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\27\3\27\3\27")
buf.write("\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30")
buf.write("\3\30\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31")
buf.write("\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31")
buf.write("\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33")
buf.write("\3\33\3\33\3\33\3\33\3\34\3\34\3\34\3\34\3\34\3\34\3\35")
buf.write("\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\36\3\36\3\36\3\36")
buf.write("\3\36\3\36\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write("\3\37\3\37\3\37\3\37\3\37\3 \3 \3 \3 \3 \3 \3 \3 \3 \3")
buf.write(" \3 \3 \3 \3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3")
buf.write("!\3\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3")
buf.write("$\3%\3%\3%\3%\3%\3%\3%\3%\3&\3&\3&\3&\3&\3\'\3\'\3\'\3")
buf.write("\'\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3)\3)\3)\3*\3*\3*\3")
buf.write("*\3*\3+\3+\3,\3,\3,\3,\3,\3,\3-\3-\3-\3-\3-\3.\3.\3.\3")
buf.write(".\3.\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3/\3/\3/\3\60\3\60")
buf.write("\3\60\3\60\3\60\3\61\3\61\3\61\3\61\3\61\3\62\3\62\3\62")
buf.write("\3\62\3\62\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\65\3\65")
buf.write("\3\65\3\65\3\65\3\65\3\66\3\66\3\66\3\66\3\67\3\67\3\67")
buf.write("\3\67\3\67\38\38\38\38\38\38\39\39\39\39\39\39\39\39\3")
buf.write(":\3:\3:\3:\3:\3:\3:\3:\3;\3;\3;\3;\3;\3;\3;\3;\3<\3<\3")
buf.write("<\3<\3<\3<\3<\3<\3=\3=\3=\3=\3=\3=\3=\3>\3>\3>\3>\3>\3")
buf.write(">\3>\3>\3>\3>\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3?\3")
buf.write("?\3@\3@\3@\3@\3@\3@\3@\3@\3A\3A\3A\3A\3A\3A\3A\3A\3A\3")
buf.write("B\3B\3B\3B\3B\3B\3B\3B\3C\3C\3C\3C\3C\3C\3C\3C\3C\3C\3")
buf.write("C\3C\3C\3C\3C\3C\3D\3D\3D\3D\3D\3D\3D\3D\3D\3E\3E\3E\3")
buf.write("E\3E\3E\3E\3E\3E\3E\3E\3F\3F\3F\3F\3F\3F\3F\3F\3F\3F\3")
buf.write("F\3F\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3H\3H\3H\3H\3")
buf.write("H\3H\3H\3H\3I\3I\3I\3I\3I\3I\3I\3I\3J\3J\3J\3J\3J\3J\3")
buf.write("J\3J\3J\3K\3K\3K\3K\3K\3K\3K\3K\3L\3L\3L\3L\3L\3L\3L\3")
buf.write("L\3L\3L\3L\3L\3M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3M\3")
buf.write("M\3M\3M\3N\3N\3N\3N\3N\3O\3O\3O\3O\3O\3O\3P\3P\3P\3P\3")
buf.write("P\3P\3P\3Q\3Q\3Q\3Q\3Q\3Q\3R\3R\3R\3R\3R\3S\3S\3S\3S\3")
buf.write("S\3S\3S\3S\3T\3T\3T\3T\3T\3T\3T\3T\3T\3T\3T\3T\3T\3U\3")
buf.write("U\3U\3U\3U\3U\3U\3V\3V\3V\3V\3V\3V\3V\3V\3V\3V\3V\3V\3")
buf.write("W\3W\3W\3W\3W\3W\3X\3X\3X\3X\3X\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3")
buf.write("Y\3Y\3Z\3Z\3Z\3Z\3Z\3[\3[\3[\3[\3\\\3\\\3\\\3\\\3\\\3")
buf.write("\\\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3\\\3]\3]\3]\3]\3]")
buf.write("\3]\3]\3]\3]\3]\3]\3^\3^\3^\3^\3_\3_\3_\3_\3_\3_\3`\3")
buf.write("`\3`\3`\3a\3a\3a\3a\3a\3a\3a\3a\3b\3b\3b\3b\3b\3b\3b\3")
buf.write("b\3c\3c\3c\3c\3c\3c\3c\3c\3c\3c\3d\3d\3d\3d\3d\3d\3d\3")
buf.write("d\3d\3d\3e\3e\3e\3e\3e\3e\3e\3e\3f\3f\3f\3f\3f\3f\3f\3")
buf.write("f\3f\3g\3g\3g\3g\3g\3g\3g\3g\3g\3h\3h\3h\3h\3h\3h\3h\3")
buf.write("h\3i\3i\3i\3i\3i\3i\3i\3j\3j\3j\3j\3j\3j\3k\3k\3k\3k\3")
buf.write("k\3l\3l\3l\3l\3l\3l\3l\3l\3l\3l\3l\3l\3l\3l\3m\3m\3m\3")
buf.write("m\3m\3m\3m\3m\3m\3m\3n\3n\3n\3n\3n\3n\3n\3n\3o\3o\3o\3")
buf.write("o\3o\3o\3o\3o\3o\3o\3o\3o\3o\3p\3p\3p\3p\3p\3p\3p\3p\3")
buf.write("p\3q\3q\3q\3q\3q\3q\3q\3q\3q\3r\3r\3r\3r\3r\3r\3r\3s\3")
buf.write("s\3s\3s\3s\3t\3t\3t\3t\3t\3t\3t\3t\3t\3t\3t\3t\3t\3t\3")
buf.write("t\3t\3t\3t\3t\3t\3t\3t\3t\3t\3t\3u\3u\3u\3u\3u\3v\3v\3")
buf.write("v\3v\3v\3v\3v\3v\3w\3w\3w\3w\3w\3x\3x\3x\3x\3x\3x\3y\3")
buf.write("y\3y\3y\3y\3y\3z\3z\3z\3z\3z\3z\3z\3{\3{\3{\3{\3{\3{\3")
buf.write("{\3{\3{\3|\3|\3|\3|\3}\3}\3}\3}\3}\3}\3}\3}\3}\3}\3}\3")
buf.write("}\3}\3}\3}\3~\3~\3~\3~\3\177\3\177\3\177\3\177\3\177\3")
buf.write("\177\3\177\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080\3\u0080")
buf.write("\3\u0080\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081")
buf.write("\3\u0081\3\u0081\3\u0081\3\u0082\3\u0082\3\u0082\3\u0082")
buf.write("\3\u0082\3\u0082\3\u0082\3\u0083\3\u0083\3\u0083\3\u0083")
buf.write("\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083\3\u0084")
buf.write("\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084")
buf.write("\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084")
buf.write("\3\u0085\3\u0085\3\u0085\3\u0085\3\u0085\3\u0085\3\u0085")
buf.write("\3\u0085\3\u0085\3\u0085\3\u0085\3\u0086\3\u0086\3\u0086")
buf.write("\3\u0086\3\u0086\3\u0086\3\u0086\3\u0086\3\u0087\3\u0087")
buf.write("\3\u0087\3\u0087\3\u0087\3\u0087\3\u0087\3\u0087\3\u0087")
buf.write("\3\u0087\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write("\3\u0088\3\u0088\3\u0089\3\u0089\3\u0089\3\u0089\3\u0089")
buf.write("\3\u0089\3\u0089\3\u008a\3\u008a\3\u008a\3\u008a\3\u008a")
buf.write("\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b")
buf.write("\3\u008b\3\u008c\3\u008c\3\u008c\3\u008c\3\u008c\3\u008c")
buf.write("\3\u008c\3\u008c\3\u008c\3\u008d\3\u008d\3\u008d\3\u008d")
buf.write("\3\u008d\3\u008d\3\u008d\3\u008d\3\u008e\3\u008e\3\u008e")
buf.write("\3\u008e\3\u008e\3\u008e\3\u008e\3\u008e\3\u008f\3\u008f")
buf.write("\3\u008f\3\u008f\3\u008f\3\u008f\3\u0090\3\u0090\3\u0090")
buf.write("\3\u0090\3\u0090\3\u0090\3\u0091\3\u0091\3\u0091\3\u0091")
buf.write("\3\u0091\3\u0091\3\u0092\3\u0092\3\u0092\3\u0092\3\u0092")
buf.write("\3\u0092\3\u0093\3\u0093\3\u0093\3\u0093\3\u0093\3\u0093")
buf.write("\3\u0093\3\u0093\3\u0093\3\u0093\3\u0093\3\u0093\3\u0094")
buf.write("\3\u0094\3\u0094\3\u0094\3\u0094\3\u0094\3\u0095\3\u0095")
buf.write("\3\u0095\3\u0095\3\u0095\3\u0095\3\u0095\3\u0095\3\u0095")
buf.write("\3\u0095\3\u0096\3\u0096\3\u0096\3\u0096\3\u0096\3\u0096")
buf.write("\3\u0096\3\u0096\3\u0097\3\u0097\3\u0097\3\u0097\3\u0098")
buf.write("\3\u0098\3\u0098\3\u0098\3\u0098\3\u0098\3\u0098\3\u0099")
buf.write("\3\u0099\3\u0099\3\u0099\3\u0099\3\u0099\3\u009a\3\u009a")
buf.write("\3\u009a\3\u009a\3\u009a\3\u009b\3\u009b\3\u009b\3\u009b")
buf.write("\3\u009b\3\u009c\3\u009c\3\u009c\3\u009c\3\u009c\3\u009c")
buf.write("\3\u009c\3\u009c\3\u009c\3\u009d\3\u009d\3\u009d\3\u009d")
buf.write("\3\u009d\3\u009e\3\u009e\3\u009e\3\u009e\3\u009e\3\u009e")
buf.write("\3\u009f\3\u009f\3\u009f\3\u009f\3\u009f\3\u009f\3\u00a0")
buf.write("\3\u00a0\3\u00a0\3\u00a0\3\u00a0\3\u00a0\3\u00a0\3\u00a0")
buf.write("\3\u00a0\3\u00a1\3\u00a1\3\u00a1\3\u00a1\3\u00a1\3\u00a2")
buf.write("\3\u00a2\3\u00a2\3\u00a2\3\u00a2\3\u00a2\3\u00a2\3\u00a3")
buf.write("\3\u00a3\3\u00a3\3\u00a3\3\u00a3\3\u00a4\3\u00a4\3\u00a4")
buf.write("\3\u00a4\3\u00a4\3\u00a5\3\u00a5\3\u00a5\3\u00a6\3\u00a6")
buf.write("\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a6\3\u00a7\3\u00a7")
buf.write("\3\u00a7\3\u00a7\3\u00a7\3\u00a7\3\u00a7\3\u00a7\3\u00a7")
buf.write("\3\u00a7\3\u00a8\3\u00a8\3\u00a8\3\u00a9\3\u00a9\3\u00a9")
buf.write("\3\u00a9\3\u00a9\3\u00a9\3\u00a9\3\u00a9\3\u00aa\3\u00aa")
buf.write("\3\u00aa\3\u00aa\3\u00aa\3\u00aa\3\u00aa\3\u00aa\3\u00aa")
buf.write("\3\u00aa\3\u00ab\3\u00ab\3\u00ab\3\u00ab\3\u00ab\3\u00ab")
buf.write("\3\u00ab\3\u00ab\3\u00ab\3\u00ab\3\u00ac\3\u00ac\3\u00ac")
buf.write("\3\u00ac\3\u00ac\3\u00ac\3\u00ac\3\u00ad\3\u00ad\3\u00ad")
buf.write("\3\u00ad\3\u00ad\3\u00ad\3\u00ae\3\u00ae\3\u00ae\3\u00ae")
buf.write("\3\u00ae\3\u00ae\3\u00ae\3\u00ae\3\u00af\3\u00af\3\u00af")
buf.write("\3\u00af\3\u00af\3\u00af\3\u00af\3\u00af\3\u00af\3\u00af")
buf.write("\3\u00b0\3\u00b0\3\u00b0\3\u00b0\3\u00b0\3\u00b0\3\u00b0")
buf.write("\3\u00b0\3\u00b1\3\u00b1\3\u00b1\3\u00b1\3\u00b1\3\u00b1")
buf.write("\3\u00b1\3\u00b1\3\u00b1\3\u00b2\3\u00b2\3\u00b2\3\u00b2")
buf.write("\3\u00b2\3\u00b2\3\u00b2\3\u00b3\3\u00b3\3\u00b3\3\u00b3")
buf.write("\3\u00b3\3\u00b3\3\u00b4\3\u00b4\3\u00b4\3\u00b4\3\u00b4")
buf.write("\3\u00b4\3\u00b5\3\u00b5\3\u00b5\3\u00b5\3\u00b5\3\u00b5")
buf.write("\3\u00b5\3\u00b6\3\u00b6\3\u00b6\3\u00b6\3\u00b6\3\u00b6")
buf.write("\3\u00b6\3\u00b6\3\u00b6\3\u00b6\3\u00b6\3\u00b6\3\u00b6")
buf.write("\3\u00b7\3\u00b7\3\u00b7\3\u00b7\3\u00b7\3\u00b7\3\u00b7")
buf.write("\3\u00b7\3\u00b8\3\u00b8\3\u00b8\3\u00b8\3\u00b9\3\u00b9")
buf.write("\3\u00b9\3\u00b9\3\u00b9\3\u00b9\3\u00b9\3\u00b9\3\u00ba")
buf.write("\3\u00ba\3\u00ba\3\u00ba\3\u00ba\3\u00ba\3\u00ba\3\u00ba")
buf.write("\3\u00ba\3\u00ba\3\u00bb\3\u00bb\3\u00bb\3\u00bb\3\u00bb")
buf.write("\3\u00bb\3\u00bb\3\u00bb\3\u00bb\3\u00bc\3\u00bc\3\u00bc")
buf.write("\3\u00bc\3\u00bc\3\u00bd\3\u00bd\3\u00bd\3\u00bd\3\u00bd")
buf.write("\3\u00bd\3\u00bd\3\u00bd\3\u00bd\3\u00bd\3\u00bd\3\u00be")
buf.write("\3\u00be\3\u00be\3\u00bf\3\u00bf\3\u00bf\3\u00bf\3\u00bf")
buf.write("\3\u00bf\3\u00bf\3\u00bf\3\u00bf\3\u00bf\3\u00c0\3\u00c0")
buf.write("\3\u00c0\3\u00c0\3\u00c0\3\u00c0\3\u00c0\3\u00c0\3\u00c1")
buf.write("\3\u00c1\3\u00c1\3\u00c1\3\u00c1\3\u00c2\3\u00c2\3\u00c2")
buf.write("\3\u00c2\3\u00c2\3\u00c3\3\u00c3\3\u00c3\3\u00c3\3\u00c3")
buf.write("\3\u00c4\3\u00c4\3\u00c4\3\u00c4\3\u00c4\3\u00c4\3\u00c4")
buf.write("\3\u00c4\3\u00c4\3\u00c5\3\u00c5\3\u00c5\3\u00c5\3\u00c5")
buf.write("\3\u00c6\3\u00c6\3\u00c6\3\u00c6\3\u00c6\3\u00c6\3\u00c6")
buf.write("\3\u00c6\3\u00c6\3\u00c6\3\u00c6\3\u00c7\3\u00c7\3\u00c7")
buf.write("\3\u00c7\3\u00c7\3\u00c7\3\u00c7\3\u00c7\3\u00c8\3\u00c8")
buf.write("\3\u00c8\3\u00c8\3\u00c8\3\u00c9\3\u00c9\3\u00c9\3\u00c9")
buf.write("\3\u00c9\3\u00c9\3\u00ca\3\u00ca\3\u00ca\3\u00ca\3\u00ca")
buf.write("\3\u00ca\3\u00ca\3\u00ca\3\u00cb\3\u00cb\3\u00cb\3\u00cb")
buf.write("\3\u00cb\3\u00cc\3\u00cc\3\u00cc\3\u00cc\3\u00cc\3\u00cc")
buf.write("\3\u00cd\3\u00cd\3\u00cd\3\u00cd\3\u00cd\3\u00cd\3\u00ce")
buf.write("\3\u00ce\3\u00ce\3\u00ce\3\u00ce\3\u00ce\3\u00cf\3\u00cf")
buf.write("\3\u00cf\3\u00cf\3\u00cf\3\u00cf\3\u00d0\3\u00d0\3\u00d0")
buf.write("\3\u00d0\3\u00d0\3\u00d0\3\u00d1\3\u00d1\3\u00d1\3\u00d1")
buf.write("\3\u00d1\3\u00d2\3\u00d2\3\u00d2\3\u00d2\3\u00d2\3\u00d2")
buf.write("\3\u00d2\3\u00d3\3\u00d3\3\u00d3\3\u00d3\3\u00d4\3\u00d4")
buf.write("\3\u00d4\3\u00d4\3\u00d4\3\u00d4\3\u00d4\3\u00d5\3\u00d5")
buf.write("\3\u00d5\3\u00d5\3\u00d5\3\u00d5\3\u00d6\3\u00d6\3\u00d6")
buf.write("\3\u00d6\3\u00d6\3\u00d7\3\u00d7\3\u00d7\3\u00d7\3\u00d7")
buf.write("\3\u00d8\3\u00d8\3\u00d8\3\u00d8\3\u00d8\3\u00d9\3\u00d9")
buf.write("\3\u00d9\3\u00d9\3\u00da\3\u00da\3\u00da\3\u00da\3\u00da")
buf.write("\3\u00da\3\u00da\3\u00da\3\u00db\3\u00db\3\u00db\3\u00db")
buf.write("\3\u00db\3\u00db\3\u00db\3\u00db\3\u00db\3\u00dc\3\u00dc")
buf.write("\3\u00dc\3\u00dc\3\u00dc\3\u00dc\3\u00dc\3\u00dc\3\u00dc")
buf.write("\3\u00dd\3\u00dd\3\u00dd\3\u00dd\3\u00dd\3\u00dd\3\u00dd")
buf.write("\3\u00de\3\u00de\3\u00de\3\u00de\3\u00de\3\u00de\3\u00df")
buf.write("\3\u00df\3\u00df\3\u00df\3\u00df\3\u00df\3\u00e0\3\u00e0")
buf.write("\3\u00e0\3\u00e0\3\u00e0\3\u00e0\3\u00e0\3\u00e1\3\u00e1")
buf.write("\3\u00e1\3\u00e1\3\u00e1\3\u00e1\3\u00e1\3\u00e1\3\u00e1")
buf.write("\3\u00e2\3\u00e2\3\u00e2\3\u00e2\3\u00e2\3\u00e2\3\u00e2")
buf.write("\3\u00e2\3\u00e2\3\u00e3\3\u00e3\3\u00e3\3\u00e3\3\u00e3")
buf.write("\3\u00e4\3\u00e4\3\u00e4\3\u00e4\3\u00e4\3\u00e4\3\u00e5")
buf.write("\3\u00e5\3\u00e5\3\u00e5\3\u00e5\3\u00e5\3\u00e5\3\u00e6")
buf.write("\3\u00e6\3\u00e6\3\u00e6\3\u00e6\3\u00e6\3\u00e7\3\u00e7")
buf.write("\3\u00e7\3\u00e7\3\u00e7\3\u00e7\3\u00e7\3\u00e7\3\u00e7")
buf.write("\3\u00e8\3\u00e8\3\u00e8\3\u00e8\3\u00e8\3\u00e9\3\u00e9")
buf.write("\3\u00e9\3\u00e9\3\u00ea\3\u00ea\3\u00ea\3\u00ea\3\u00ea")
buf.write("\3\u00ea\3\u00ea\3\u00ea\3\u00eb\3\u00eb\3\u00eb\3\u00eb")
buf.write("\3\u00eb\3\u00eb\3\u00eb\3\u00eb\3\u00eb\3\u00ec\3\u00ec")
buf.write("\3\u00ec\3\u00ec\3\u00ed\3\u00ed\3\u00ed\3\u00ed\3\u00ed")
buf.write("\3\u00ed\3\u00ee\3\u00ee\3\u00ee\3\u00ee\3\u00ee\3\u00ee")
buf.write("\3\u00ee\3\u00ee\3\u00ee\3\u00ef\3\u00ef\3\u00ef\3\u00ef")
buf.write("\3\u00ef\3\u00ef\3\u00f0\3\u00f0\3\u00f0\3\u00f0\3\u00f0")
buf.write("\3\u00f0\3\u00f0\3\u00f1\3\u00f1\3\u00f1\3\u00f1\3\u00f2")
buf.write("\3\u00f2\3\u00f2\3\u00f3\3\u00f3\3\u00f3\3\u00f3\3\u00f3")
buf.write("\3\u00f3\3\u00f3\3\u00f3\3\u00f4\3\u00f4\3\u00f4\3\u00f4")
buf.write("\3\u00f4\3\u00f4\3\u00f4\3\u00f4\3\u00f5\3\u00f5\3\u00f5")
buf.write("\3\u00f5\3\u00f5\3\u00f5\3\u00f5\3\u00f6\3\u00f6\3\u00f6")
buf.write("\3\u00f6\3\u00f6\3\u00f6\3\u00f6\3\u00f6\3\u00f7\3\u00f7")
buf.write("\3\u00f7\3\u00f7\3\u00f7\3\u00f7\3\u00f7\3\u00f7\3\u00f7")
buf.write("\3\u00f7\3\u00f7\3\u00f7\3\u00f7\3\u00f7\3\u00f7\3\u00f7")
buf.write("\3\u00f7\3\u00f8\3\u00f8\3\u00f8\3\u00f8\3\u00f8\3\u00f8")
buf.write("\3\u00f8\3\u00f8\3\u00f8\3\u00f8\3\u00f8\3\u00f9\3\u00f9")
buf.write("\3\u00f9\3\u00f9\3\u00f9\3\u00f9\3\u00f9\3\u00f9\3\u00f9")
buf.write("\3\u00f9\3\u00f9\3\u00fa\3\u00fa\3\u00fa\3\u00fa\3\u00fa")
buf.write("\3\u00fb\3\u00fb\3\u00fb\3\u00fb\3\u00fb\3\u00fb\3\u00fb")
buf.write("\3\u00fb\3\u00fc\3\u00fc\3\u00fc\3\u00fc\3\u00fc\3\u00fc")
buf.write("\3\u00fc\3\u00fc\3\u00fc\3\u00fc\3\u00fc\3\u00fc\3\u00fc")
buf.write("\3\u00fc\3\u00fd\3\u00fd\3\u00fd\3\u00fd\3\u00fe\3\u00fe")
buf.write("\3\u00fe\3\u00fe\3\u00fe\3\u00fe\3\u00fe\3\u00ff\3\u00ff")
buf.write("\3\u00ff\3\u00ff\3\u00ff\3\u0100\3\u0100\3\u0100\3\u0100")
buf.write("\3\u0100\3\u0100\3\u0101\3\u0101\3\u0101\3\u0101\3\u0101")
buf.write("\3\u0101\3\u0101\3\u0102\3\u0102\3\u0102\3\u0102\3\u0102")
buf.write("\3\u0102\3\u0102\3\u0102\3\u0103\3\u0103\3\u0103\3\u0103")
buf.write("\3\u0103\3\u0103\3\u0103\3\u0103\3\u0103\3\u0103\3\u0104")
buf.write("\3\u0104\3\u0104\3\u0104\3\u0104\3\u0104\3\u0104\3\u0105")
buf.write("\3\u0105\3\u0105\3\u0106\3\u0106\3\u0106\3\u0106\3\u0107")
buf.write("\3\u0107\3\u0107\3\u0107\3\u0108\3\u0108\3\u0108\3\u0108")
buf.write("\3\u0109\3\u0109\3\u0109\3\u010a\3\u010a\3\u010a\3\u010a")
buf.write("\3\u010a\3\u010b\3\u010b\3\u010b\3\u010b\3\u010b\3\u010c")
buf.write("\3\u010c\3\u010c\3\u010c\3\u010c\3\u010c\3\u010c\3\u010d")
buf.write("\3\u010d\3\u010d\3\u010e\3\u010e\3\u010e\3\u010e\3\u010e")
buf.write("\3\u010e\3\u010e\3\u010e\3\u010f\3\u010f\3\u010f\3\u010f")
buf.write("\3\u010f\3\u010f\3\u0110\3\u0110\3\u0110\3\u0110\3\u0110")
buf.write("\3\u0110\3\u0110\3\u0110\3\u0110\3\u0110\3\u0110\3\u0111")
buf.write("\3\u0111\3\u0111\3\u0111\3\u0111\3\u0111\3\u0111\3\u0111")
buf.write("\3\u0112\3\u0112\3\u0112\3\u0112\3\u0113\3\u0113\3\u0113")
buf.write("\3\u0113\3\u0113\3\u0113\3\u0114\3\u0114\3\u0114\3\u0114")
buf.write("\3\u0114\3\u0115\3\u0115\3\u0115\3\u0115\3\u0115\3\u0115")
buf.write("\3\u0115\3\u0115\3\u0115\3\u0115\3\u0115\3\u0116\3\u0116")
buf.write("\3\u0116\3\u0116\3\u0116\3\u0116\3\u0116\3\u0116\3\u0117")
buf.write("\3\u0117\3\u0117\3\u0117\3\u0117\3\u0117\3\u0117\3\u0117")
buf.write("\3\u0117\3\u0117\3\u0117\3\u0117\3\u0117\3\u0117\3\u0117")
buf.write("\3\u0117\3\u0118\3\u0118\3\u0118\3\u0118\3\u0118\3\u0118")
buf.write("\3\u0118\3\u0118\3\u0118\3\u0118\3\u0118\3\u0119\3\u0119")
buf.write("\3\u0119\3\u0119\3\u0119\3\u0119\3\u0119\3\u011a\3\u011a")
buf.write("\3\u011a\3\u011a\3\u011a\3\u011a\3\u011a\3\u011a\3\u011a")
buf.write("\3\u011a\3\u011b\3\u011b\3\u011b\3\u011b\3\u011b\3\u011b")
buf.write("\3\u011b\3\u011b\3\u011c\3\u011c\3\u011c\3\u011c\3\u011c")
buf.write("\3\u011d\3\u011d\3\u011d\3\u011d\3\u011d\3\u011d\3\u011d")
buf.write("\3\u011d\3\u011d\3\u011e\3\u011e\3\u011e\3\u011e\3\u011e")
buf.write("\3\u011e\3\u011f\3\u011f\3\u011f\3\u011f\3\u011f\3\u011f")
buf.write("\3\u011f\3\u011f\3\u011f\3\u011f\3\u0120\3\u0120\3\u0120")
buf.write("\3\u0120\3\u0120\3\u0120\3\u0121\3\u0121\3\u0121\3\u0121")
buf.write("\3\u0121\3\u0122\3\u0122\3\u0122\3\u0122\3\u0122\3\u0122")
buf.write("\3\u0122\3\u0122\3\u0122\3\u0122\3\u0122\3\u0122\3\u0123")
buf.write("\3\u0123\3\u0123\3\u0123\3\u0123\3\u0123\3\u0123\3\u0123")
buf.write("\3\u0123\3\u0124\3\u0124\3\u0124\3\u0124\3\u0124\3\u0124")
buf.write("\3\u0124\3\u0124\3\u0124\3\u0124\3\u0125\3\u0125\3\u0125")
buf.write("\3\u0125\3\u0125\3\u0125\3\u0125\3\u0126\3\u0126\3\u0126")
buf.write("\3\u0126\3\u0126\3\u0126\3\u0126\3\u0126\3\u0126\3\u0126")
buf.write("\3\u0127\3\u0127\3\u0127\3\u0127\3\u0127\3\u0127\3\u0127")
buf.write("\3\u0127\3\u0127\3\u0127\3\u0128\3\u0128\3\u0128\3\u0128")
buf.write("\3\u0128\3\u0128\3\u0128\3\u0128\3\u0129\3\u0129\3\u0129")
buf.write("\3\u0129\3\u0129\3\u0129\3\u012a\3\u012a\3\u012a\3\u012a")
buf.write("\3\u012a\3\u012a\3\u012a\3\u012a\3\u012a\3\u012a\3\u012b")
buf.write("\3\u012b\3\u012b\3\u012b\3\u012b\3\u012b\3\u012c\3\u012c")
buf.write("\3\u012c\3\u012c\3\u012c\3\u012c\3\u012d\3\u012d\3\u012d")
buf.write("\3\u012d\3\u012e\3\u012e\3\u012e\3\u012e\3\u012e\3\u012f")
buf.write("\3\u012f\3\u012f\3\u012f\3\u012f\3\u0130\3\u0130\3\u0130")
buf.write("\3\u0130\3\u0130\3\u0130\3\u0130\3\u0131\3\u0131\3\u0131")
buf.write("\3\u0131\3\u0132\3\u0132\3\u0132\3\u0132\3\u0132\3\u0132")
buf.write("\3\u0132\3\u0132\3\u0132\3\u0132\3\u0133\3\u0133\3\u0133")
buf.write("\3\u0133\3\u0133\3\u0133\3\u0133\3\u0133\3\u0133\3\u0133")
buf.write("\3\u0133\3\u0133\3\u0134\3\u0134\3\u0134\3\u0134\3\u0134")
buf.write("\3\u0134\3\u0134\3\u0135\3\u0135\3\u0135\3\u0135\3\u0135")
buf.write("\3\u0135\3\u0135\3\u0135\3\u0135\3\u0135\3\u0136\3\u0136")
buf.write("\3\u0136\3\u0136\3\u0136\3\u0136\3\u0136\3\u0137\3\u0137")
buf.write("\3\u0137\3\u0137\3\u0137\3\u0137\3\u0137\3\u0137\3\u0138")
buf.write("\3\u0138\3\u0138\3\u0138\3\u0138\3\u0138\3\u0138\3\u0138")
buf.write("\3\u0139\3\u0139\3\u0139\3\u0139\3\u0139\3\u0139\3\u0139")
buf.write("\3\u0139\3\u0139\3\u0139\3\u0139\3\u0139\3\u0139\3\u0139")
buf.write("\3\u0139\3\u0139\3\u0139\3\u0139\3\u0139\3\u0139\3\u013a")
buf.write("\3\u013a\3\u013a\3\u013a\3\u013a\3\u013a\3\u013a\3\u013b")
buf.write("\3\u013b\3\u013b\3\u013b\3\u013b\3\u013b\3\u013b\3\u013b")
buf.write("\3\u013b\3\u013b\3\u013b\3\u013b\3\u013b\3\u013c\3\u013c")
buf.write("\3\u013c\3\u013c\3\u013c\3\u013c\3\u013c\3\u013d\3\u013d")
buf.write("\3\u013d\3\u013d\3\u013d\3\u013d\3\u013d\3\u013d\3\u013d")
buf.write("\3\u013d\3\u013e\3\u013e\3\u013e\3\u013e\3\u013e\3\u013e")
buf.write("\3\u013f\3\u013f\3\u013f\3\u013f\3\u013f\3\u013f\3\u013f")
buf.write("\3\u013f\3\u0140\3\u0140\3\u0140\3\u0140\3\u0140\3\u0140")
buf.write("\3\u0140\3\u0141\3\u0141\3\u0141\3\u0141\3\u0141\3\u0141")
buf.write("\3\u0142\3\u0142\3\u0142\3\u0142\3\u0142\3\u0142\3\u0142")
buf.write("\3\u0142\3\u0142\3\u0143\3\u0143\3\u0143\3\u0143\3\u0143")
buf.write("\3\u0143\3\u0143\3\u0144\3\u0144\3\u0144\3\u0144\3\u0145")
buf.write("\3\u0145\3\u0145\3\u0145\3\u0145\3\u0145\3\u0146\3\u0146")
buf.write("\3\u0146\3\u0146\3\u0146\3\u0147\3\u0147\3\u0147\3\u0147")
buf.write("\3\u0147\3\u0147\3\u0148\3\u0148\3\u0148\3\u0148\3\u0148")
buf.write("\3\u0148\3\u0148\3\u0149\3\u0149\3\u0149\3\u0149\3\u0149")
buf.write("\3\u014a\3\u014a\3\u014a\3\u014a\3\u014a\3\u014a\3\u014a")
buf.write("\3\u014a\3\u014a\3\u014a\3\u014b\3\u014b\3\u014b\3\u014b")
buf.write("\3\u014b\3\u014b\3\u014b\3\u014c\3\u014c\3\u014c\3\u014c")
buf.write("\3\u014c\3\u014c\3\u014c\3\u014c\3\u014c\3\u014c\3\u014c")
buf.write("\3\u014c\3\u014d\3\u014d\3\u014d\3\u014d\3\u014e\3\u014e")
buf.write("\3\u014e\3\u014e\3\u014e\3\u014e\3\u014e\3\u014f\3\u014f")
buf.write("\3\u014f\3\u014f\3\u014f\3\u014f\3\u014f\3\u0150\3\u0150")
buf.write("\3\u0150\3\u0150\3\u0150\3\u0151\3\u0151\3\u0151\3\u0151")
buf.write("\3\u0151\3\u0151\3\u0151\3\u0151\3\u0152\3\u0152\3\u0152")
buf.write("\3\u0152\3\u0152\3\u0152\3\u0152\3\u0153\3\u0153\3\u0153")
buf.write("\3\u0153\3\u0153\3\u0154\3\u0154\3\u0154\3\u0154\3\u0154")
buf.write("\3\u0154\3\u0154\3\u0154\3\u0154\3\u0155\3\u0155\3\u0155")
buf.write("\3\u0155\3\u0155\3\u0155\3\u0155\3\u0155\3\u0155\3\u0155")
buf.write("\3\u0155\3\u0156\3\u0156\3\u0156\3\u0156\3\u0156\3\u0156")
buf.write("\3\u0156\3\u0156\3\u0156\3\u0156\3\u0156\3\u0156\3\u0156")
buf.write("\3\u0157\3\u0157\3\u0157\3\u0157\3\u0157\3\u0157\3\u0157")
buf.write("\3\u0157\3\u0157\3\u0157\3\u0157\3\u0157\3\u0157\3\u0157")
buf.write("\3\u0157\3\u0157\3\u0157\3\u0157\3\u0158\3\u0158\3\u0158")
buf.write("\3\u0158\3\u0158\3\u0158\3\u0158\3\u0158\3\u0158\3\u0158")
buf.write("\3\u0158\3\u0158\3\u0159\3\u0159\3\u0159\3\u0159\3\u0159")
buf.write("\3\u0159\3\u0159\3\u0159\3\u0159\3\u0159\3\u0159\3\u0159")
buf.write("\3\u0159\3\u0159\3\u0159\3\u0159\3\u015a\3\u015a\3\u015a")
buf.write("\3\u015a\3\u015b\3\u015b\3\u015b\3\u015b\3\u015b\3\u015c")
buf.write("\3\u015c\3\u015c\3\u015c\3\u015c\3\u015c\3\u015c\3\u015c")
buf.write("\3\u015c\3\u015d\3\u015d\3\u015d\3\u015d\3\u015d\3\u015d")
buf.write("\3\u015e\3\u015e\3\u015e\3\u015e\3\u015e\3\u015f\3\u015f")
buf.write("\3\u015f\3\u015f\3\u015f\3\u015f\3\u015f\3\u015f\3\u015f")
buf.write("\3\u0160\3\u0160\3\u0160\3\u0160\3\u0160\3\u0160\3\u0160")
buf.write("\3\u0160\3\u0160\3\u0161\3\u0161\3\u0161\3\u0161\3\u0161")
buf.write("\3\u0161\3\u0161\3\u0161\3\u0161\3\u0162\3\u0162\3\u0162")
buf.write("\3\u0162\3\u0162\3\u0162\3\u0162\3\u0162\3\u0162\3\u0162")
buf.write("\3\u0162\3\u0162\3\u0162\3\u0162\3\u0162\3\u0163\3\u0163")
buf.write("\3\u0163\3\u0163\3\u0163\3\u0163\3\u0163\3\u0164\3\u0164")
buf.write("\3\u0164\3\u0164\3\u0164\3\u0165\3\u0165\3\u0165\3\u0165")
buf.write("\3\u0165\3\u0166\3\u0166\3\u0166\3\u0166\3\u0166\3\u0166")
buf.write("\3\u0166\3\u0166\3\u0166\3\u0167\3\u0167\3\u0167\3\u0167")
buf.write("\3\u0167\3\u0167\3\u0167\3\u0167\3\u0167\3\u0168\3\u0168")
buf.write("\3\u0168\3\u0168\3\u0168\3\u0169\3\u0169\3\u0169\3\u0169")
buf.write("\3\u0169\3\u0169\3\u0169\3\u0169\3\u0169\3\u0169\3\u0169")
buf.write("\3\u0169\3\u0169\3\u0169\3\u016a\3\u016a\3\u016a\3\u016a")
buf.write("\3\u016a\3\u016a\3\u016a\3\u016a\3\u016b\3\u016b\3\u016b")
buf.write("\3\u016b\3\u016b\3\u016b\3\u016b\3\u016b\3\u016b\3\u016c")
buf.write("\3\u016c\3\u016c\3\u016c\3\u016c\3\u016c\3\u016c\3\u016c")
buf.write("\3\u016c\3\u016c\3\u016c\3\u016d\3\u016d\3\u016d\3\u016d")
buf.write("\3\u016d\3\u016d\3\u016e\3\u016e\3\u016e\3\u016e\3\u016e")
buf.write("\3\u016e\3\u016e\3\u016e\3\u016f\3\u016f\3\u016f\3\u016f")
buf.write("\3\u016f\3\u016f\3\u016f\3\u016f\3\u016f\3\u016f\3\u0170")
buf.write("\3\u0170\3\u0170\3\u0170\3\u0170\3\u0170\3\u0170\3\u0170")
buf.write("\3\u0170\3\u0170\3\u0170\3\u0170\3\u0170\3\u0171\3\u0171")
buf.write("\3\u0171\3\u0171\3\u0171\3\u0171\3\u0171\3\u0172\3\u0172")
buf.write("\3\u0172\3\u0172\3\u0172\3\u0172\3\u0172\3\u0172\3\u0172")
buf.write("\3\u0172\3\u0172\3\u0173\3\u0173\3\u0173\3\u0173\3\u0173")
buf.write("\3\u0173\3\u0173\3\u0174\3\u0174\3\u0174\3\u0174\3\u0174")
buf.write("\3\u0174\3\u0174\3\u0174\3\u0174\3\u0174\3\u0174\3\u0174")
buf.write("\3\u0175\3\u0175\3\u0175\3\u0175\3\u0175\3\u0175\3\u0175")
buf.write("\3\u0175\3\u0175\3\u0175\3\u0175\3\u0175\3\u0175\3\u0176")
buf.write("\3\u0176\3\u0176\3\u0176\3\u0176\3\u0176\3\u0176\3\u0176")
buf.write("\3\u0176\3\u0176\3\u0176\3\u0176\3\u0176\3\u0176\3\u0177")
buf.write("\3\u0177\3\u0177\3\u0177\3\u0177\3\u0177\3\u0177\3\u0177")
buf.write("\3\u0178\3\u0178\3\u0178\3\u0178\3\u0178\3\u0178\3\u0178")
buf.write("\3\u0178\3\u0179\3\u0179\3\u0179\3\u0179\3\u0179\3\u0179")
buf.write("\3\u0179\3\u0179\3\u017a\3\u017a\3\u017a\3\u017a\3\u017a")
buf.write("\3\u017a\3\u017b\3\u017b\3\u017b\3\u017b\3\u017c\3\u017c")
buf.write("\3\u017c\3\u017c\3\u017c\3\u017d\3\u017d\3\u017d\3\u017d")
buf.write("\3\u017d\3\u017e\3\u017e\3\u017e\3\u017e\3\u017e\3\u017e")
buf.write("\3\u017e\3\u017e\3\u017e\3\u017e\3\u017f\3\u017f\3\u017f")
buf.write("\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f")
buf.write("\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f")
buf.write("\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f\3\u017f")
buf.write("\3\u017f\3\u017f\3\u017f\3\u017f\3\u0180\3\u0180\3\u0180")
buf.write("\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180")
buf.write("\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180")
buf.write("\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180\3\u0180")
buf.write("\3\u0180\3\u0180\3\u0180\3\u0181\3\u0181\3\u0181\3\u0181")
buf.write("\3\u0181\3\u0181\3\u0181\3\u0181\3\u0181\3\u0181\3\u0181")
buf.write("\3\u0181\3\u0181\3\u0181\3\u0181\3\u0181\3\u0181\3\u0181")
buf.write("\3\u0181\3\u0181\3\u0181\3\u0181\3\u0181\3\u0181\3\u0182")
buf.write("\3\u0182\3\u0182\3\u0182\3\u0182\3\u0182\3\u0182\3\u0182")
buf.write("\3\u0182\3\u0182\3\u0182\3\u0182\3\u0182\3\u0182\3\u0183")
buf.write("\3\u0183\3\u0183\3\u0183\3\u0183\3\u0183\3\u0183\3\u0183")
buf.write("\3\u0183\3\u0183\3\u0183\3\u0183\3\u0183\3\u0183\3\u0184")
buf.write("\3\u0184\3\u0184\3\u0184\3\u0184\3\u0184\3\u0184\3\u0184")
buf.write("\3\u0184\3\u0184\3\u0184\3\u0184\3\u0184\3\u0184\3\u0184")
buf.write("\3\u0184\3\u0185\3\u0185\3\u0185\3\u0185\3\u0185\3\u0185")
buf.write("\3\u0185\3\u0185\3\u0185\3\u0185\3\u0185\3\u0185\3\u0185")
buf.write("\3\u0185\3\u0185\3\u0185\3\u0186\3\u0186\3\u0186\3\u0187")
buf.write("\3\u0187\3\u0187\3\u0187\3\u0187\3\u0187\3\u0187\3\u0187")
buf.write("\3\u0187\3\u0188\3\u0188\3\u0188\3\u0188\3\u0188\3\u0188")
buf.write("\3\u0188\3\u0188\3\u0188\3\u0188\3\u0188\3\u0188\3\u0189")
buf.write("\3\u0189\3\u0189\3\u0189\3\u0189\3\u0189\3\u0189\3\u0189")
buf.write("\3\u0189\3\u0189\3\u018a\3\u018a\3\u018a\3\u018a\3\u018a")
buf.write("\3\u018a\3\u018b\3\u018b\3\u018b\3\u018b\3\u018b\3\u018b")
buf.write("\3\u018b\3\u018b\3\u018c\3\u018c\3\u018c\3\u018c\3\u018c")
buf.write("\3\u018d\3\u018d\3\u018d\3\u018d\3\u018d\3\u018e\3\u018e")
buf.write("\3\u018e\3\u018e\3\u018e\3\u018e\3\u018e\3\u018e\3\u018e")
buf.write("\3\u018f\3\u018f\3\u018f\3\u018f\3\u018f\3\u0190\3\u0190")
buf.write("\3\u0190\3\u0190\3\u0190\3\u0190\3\u0190\3\u0190\3\u0190")
buf.write("\3\u0190\3\u0191\3\u0191\3\u0191\3\u0191\3\u0191\3\u0191")
buf.write("\3\u0192\3\u0192\3\u0192\3\u0192\3\u0192\3\u0192\3\u0193")
buf.write("\3\u0193\3\u0193\3\u0193\3\u0193\3\u0193\3\u0193\3\u0194")
buf.write("\3\u0194\3\u0194\3\u0194\3\u0194\3\u0194\3\u0194\3\u0194")
buf.write("\3\u0194\3\u0194\3\u0195\3\u0195\3\u0195\3\u0195\3\u0195")
buf.write("\3\u0195\3\u0195\3\u0195\3\u0196\3\u0196\3\u0196\3\u0196")
buf.write("\3\u0196\3\u0196\3\u0197\3\u0197\3\u0197\3\u0197\3\u0197")
buf.write("\3\u0197\3\u0197\3\u0198\3\u0198\3\u0198\3\u0198\3\u0198")
buf.write("\3\u0198\3\u0198\3\u0198\3\u0199\3\u0199\3\u0199\3\u0199")
buf.write("\3\u0199\3\u0199\3\u0199\3\u019a\3\u019a\3\u019a\3\u019a")
buf.write("\3\u019a\3\u019a\3\u019a\3\u019b\3\u019b\3\u019b\3\u019b")
buf.write("\3\u019c\3\u019c\3\u019c\3\u019c\3\u019c\3\u019c\3\u019d")
buf.write("\3\u019d\3\u019d\3\u019d\3\u019d\3\u019d\3\u019d\3\u019d")
buf.write("\3\u019d\3\u019e\3\u019e\3\u019e\3\u019e\3\u019e\3\u019e")
buf.write("\3\u019f\3\u019f\3\u019f\3\u019f\3\u019f\3\u019f\3\u019f")
buf.write("\3\u01a0\3\u01a0\3\u01a0\3\u01a0\3\u01a0\3\u01a0\3\u01a0")
buf.write("\3\u01a0\3\u01a1\3\u01a1\3\u01a1\3\u01a1\3\u01a1\3\u01a1")
buf.write("\3\u01a1\3\u01a1\3\u01a1\3\u01a2\3\u01a2\3\u01a2\3\u01a2")
buf.write("\3\u01a2\3\u01a2\3\u01a2\3\u01a2\3\u01a2\3\u01a3\3\u01a3")
buf.write("\3\u01a3\3\u01a3\3\u01a3\3\u01a3\3\u01a3\3\u01a4\3\u01a4")
buf.write("\3\u01a4\3\u01a4\3\u01a4\3\u01a4\3\u01a4\3\u01a4\3\u01a5")
buf.write("\3\u01a5\3\u01a5\3\u01a5\3\u01a5\3\u01a5\3\u01a5\3\u01a5")
buf.write("\3\u01a6\3\u01a6\3\u01a6\3\u01a6\3\u01a6\3\u01a6\3\u01a6")
buf.write("\3\u01a6\3\u01a6\3\u01a7\3\u01a7\3\u01a7\3\u01a7\3\u01a7")
buf.write("\3\u01a8\3\u01a8\3\u01a8\3\u01a8\3\u01a8\3\u01a8\3\u01a8")
buf.write("\3\u01a8\3\u01a9\3\u01a9\3\u01a9\3\u01a9\3\u01a9\3\u01a9")
buf.write("\3\u01a9\3\u01a9\3\u01a9\3\u01a9\3\u01a9\3\u01aa\3\u01aa")
buf.write("\3\u01aa\3\u01aa\3\u01aa\3\u01ab\3\u01ab\3\u01ab\3\u01ab")
buf.write("\3\u01ab\3\u01ab\3\u01ab\3\u01ab\3\u01ab\3\u01ac\3\u01ac")
buf.write("\3\u01ac\3\u01ac\3\u01ac\3\u01ac\3\u01ad\3\u01ad\3\u01ad")
buf.write("\3\u01ad\3\u01ad\3\u01ad\3\u01ae\3\u01ae\3\u01ae\3\u01ae")
buf.write("\3\u01ae\3\u01af\3\u01af\3\u01af\3\u01af\3\u01af\3\u01af")
buf.write("\3\u01af\3\u01b0\3\u01b0\3\u01b0\3\u01b0\3\u01b0\3\u01b1")
buf.write("\3\u01b1\3\u01b1\3\u01b1\3\u01b1\3\u01b1\3\u01b2\3\u01b2")
buf.write("\3\u01b2\3\u01b2\3\u01b3\3\u01b3\3\u01b3\3\u01b3\3\u01b3")
buf.write("\3\u01b3\3\u01b3\3\u01b4\3\u01b4\3\u01b4\3\u01b4\3\u01b4")
buf.write("\3\u01b4\3\u01b4\3\u01b4\3\u01b4\3\u01b4\3\u01b4\3\u01b4")
buf.write("\3\u01b4\3\u01b4\3\u01b5\3\u01b5\3\u01b5\3\u01b5\3\u01b5")
buf.write("\3\u01b5\3\u01b5\3\u01b5\3\u01b6\3\u01b6\3\u01b6\3\u01b6")
buf.write("\3\u01b6\3\u01b6\3\u01b6\3\u01b6\3\u01b6\3\u01b6\3\u01b6")
buf.write("\3\u01b6\3\u01b6\3\u01b7\3\u01b7\3\u01b7\3\u01b7\3\u01b7")
buf.write("\3\u01b7\3\u01b7\3\u01b7\3\u01b7\3\u01b7\3\u01b7\3\u01b8")
buf.write("\3\u01b8\3\u01b8\3\u01b8\3\u01b8\3\u01b8\3\u01b8\3\u01b8")
buf.write("\3\u01b8\3\u01b8\3\u01b9\3\u01b9\3\u01b9\3\u01b9\3\u01b9")
buf.write("\3\u01b9\3\u01b9\3\u01b9\3\u01b9\3\u01b9\3\u01ba\3\u01ba")
buf.write("\3\u01ba\3\u01ba\3\u01ba\3\u01ba\3\u01ba\3\u01ba\3\u01ba")
buf.write("\3\u01ba\3\u01ba\3\u01ba\3\u01ba\3\u01ba\3\u01bb\3\u01bb")
buf.write("\3\u01bb\3\u01bb\3\u01bb\3\u01bb\3\u01bb\3\u01bb\3\u01bb")
buf.write("\3\u01bc\3\u01bc\3\u01bc\3\u01bc\3\u01bc\3\u01bc\3\u01bd")
buf.write("\3\u01bd\3\u01bd\3\u01bd\3\u01bd\3\u01bd\3\u01bd\3\u01bd")
buf.write("\3\u01bd\3\u01be\3\u01be\3\u01be\3\u01be\3\u01be\3\u01be")
buf.write("\3\u01be\3\u01be\3\u01bf\3\u01bf\3\u01bf\3\u01bf\3\u01bf")
buf.write("\3\u01bf\3\u01bf\3\u01bf\3\u01bf\3\u01bf\3\u01bf\3\u01bf")
buf.write("\3\u01bf\3\u01c0\3\u01c0\3\u01c0\3\u01c0\3\u01c0\3\u01c0")
buf.write("\3\u01c0\3\u01c0\3\u01c0\3\u01c1\3\u01c1\3\u01c1\3\u01c1")
buf.write("\3\u01c1\3\u01c2\3\u01c2\3\u01c2\3\u01c2\3\u01c3\3\u01c3")
buf.write("\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3")
buf.write("\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3")
buf.write("\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3\3\u01c3")
buf.write("\3\u01c3\3\u01c3\3\u01c4\3\u01c4\3\u01c4\3\u01c4\3\u01c4")
buf.write("\3\u01c5\3\u01c5\3\u01c5\3\u01c5\3\u01c5\3\u01c5\3\u01c5")
buf.write("\3\u01c5\3\u01c5\3\u01c5\3\u01c5\3\u01c6\3\u01c6\3\u01c6")
buf.write("\3\u01c6\3\u01c6\3\u01c6\3\u01c6\3\u01c6\3\u01c6\3\u01c6")
buf.write("\3\u01c6\3\u01c6\3\u01c6\3\u01c6\3\u01c6\3\u01c6\3\u01c6")
buf.write("\3\u01c6\3\u01c7\3\u01c7\3\u01c7\3\u01c7\3\u01c7\3\u01c7")
buf.write("\3\u01c7\3\u01c7\3\u01c7\3\u01c7\3\u01c7\3\u01c7\3\u01c7")
buf.write("\3\u01c7\3\u01c7\3\u01c7\3\u01c8\3\u01c8\3\u01c8\3\u01c8")
buf.write("\3\u01c8\3\u01c8\3\u01c8\3\u01c8\3\u01c8\3\u01c8\3\u01c8")
buf.write("\3\u01c8\3\u01c8\3\u01c8\3\u01c8\3\u01c8\3\u01c8\3\u01c8")
buf.write("\3\u01c8\3\u01c9\3\u01c9\3\u01c9\3\u01c9\3\u01c9\3\u01c9")
buf.write("\3\u01c9\3\u01c9\3\u01c9\3\u01c9\3\u01c9\3\u01c9\3\u01c9")
buf.write("\3\u01c9\3\u01c9\3\u01c9\3\u01c9\3\u01c9\3\u01c9\3\u01c9")
buf.write("\3\u01c9\3\u01c9\3\u01c9\3\u01ca\3\u01ca\3\u01ca\3\u01ca")
buf.write("\3\u01ca\3\u01ca\3\u01ca\3\u01ca\3\u01ca\3\u01ca\3\u01ca")
buf.write("\3\u01ca\3\u01ca\3\u01ca\3\u01ca\3\u01cb\3\u01cb\3\u01cb")
buf.write("\3\u01cb\3\u01cb\3\u01cb\3\u01cb\3\u01cb\3\u01cb\3\u01cb")
buf.write("\3\u01cc\3\u01cc\3\u01cc\3\u01cc\3\u01cc\3\u01cc\3\u01cc")
buf.write("\3\u01cc\3\u01cc\3\u01cc\3\u01cc\3\u01cd\3\u01cd\3\u01cd")
buf.write("\3\u01cd\3\u01cd\3\u01cd\3\u01cd\3\u01cd\3\u01ce\3\u01ce")
buf.write("\3\u01ce\3\u01ce\3\u01ce\3\u01ce\3\u01ce\3\u01ce\3\u01ce")
buf.write("\3\u01ce\3\u01ce\3\u01ce\3\u01ce\3\u01cf\3\u01cf\3\u01cf")
buf.write("\3\u01cf\3\u01cf\3\u01cf\3\u01cf\3\u01cf\3\u01cf\3\u01cf")
buf.write("\3\u01cf\3\u01cf\3\u01cf\3\u01cf\3\u01cf\3\u01cf\3\u01d0")
buf.write("\3\u01d0\3\u01d0\3\u01d0\3\u01d0\3\u01d0\3\u01d0\3\u01d0")
buf.write("\3\u01d0\3\u01d0\3\u01d0\3\u01d0\3\u01d0\3\u01d0\3\u01d0")
buf.write("\3\u01d0\3\u01d1\3\u01d1\3\u01d1\3\u01d1\3\u01d1\3\u01d2")
buf.write("\3\u01d2\3\u01d2\3\u01d2\3\u01d3\3\u01d3\3\u01d3\3\u01d3")
buf.write("\3\u01d3\3\u01d4\3\u01d4\3\u01d4\3\u01d4\3\u01d5\3\u01d5")
buf.write("\3\u01d5\3\u01d5\3\u01d5\3\u01d6\3\u01d6\3\u01d6\3\u01d6")
buf.write("\3\u01d7\3\u01d7\3\u01d7\3\u01d7\3\u01d7\3\u01d7\3\u01d7")
buf.write("\3\u01d8\3\u01d8\3\u01d8\3\u01d8\3\u01d9\3\u01d9\3\u01d9")
buf.write("\3\u01d9\3\u01d9\3\u01d9\3\u01da\3\u01da\3\u01da\3\u01da")
buf.write("\3\u01da\3\u01da\3\u01da\3\u01da\3\u01da\3\u01da\3\u01da")
buf.write("\3\u01da\3\u01da\3\u01da\3\u01da\3\u01da\3\u01db\3\u01db")
buf.write("\3\u01db\3\u01db\3\u01db\3\u01db\3\u01db\3\u01db\3\u01db")
buf.write("\3\u01db\3\u01db\3\u01dc\3\u01dc\3\u01dc\3\u01dc\3\u01dd")
buf.write("\3\u01dd\3\u01dd\3\u01dd\3\u01dd\3\u01dd\3\u01dd\3\u01dd")
buf.write("\3\u01dd\3\u01de\3\u01de\3\u01de\3\u01de\3\u01de\3\u01de")
buf.write("\3\u01df\3\u01df\3\u01df\3\u01df\3\u01df\3\u01df\3\u01df")
buf.write("\3\u01e0\3\u01e0\3\u01e0\3\u01e0\3\u01e0\3\u01e1\3\u01e1")
buf.write("\3\u01e1\3\u01e1\3\u01e1\3\u01e1\3\u01e1\3\u01e2\3\u01e2")
buf.write("\3\u01e2\3\u01e2\3\u01e2\3\u01e2\7\u01e2\u1368\n\u01e2")
buf.write("\f\u01e2\16\u01e2\u136b\13\u01e2\3\u01e2\3\u01e2\3\u01e3")
buf.write("\3\u01e3\3\u01e3\7\u01e3\u1372\n\u01e3\f\u01e3\16\u01e3")
buf.write("\u1375\13\u01e3\3\u01e3\6\u01e3\u1378\n\u01e3\r\u01e3")
buf.write("\16\u01e3\u1379\3\u01e4\3\u01e4\3\u01e4\7\u01e4\u137f")
buf.write("\n\u01e4\f\u01e4\16\u01e4\u1382\13\u01e4\3\u01e4\6\u01e4")
buf.write("\u1385\n\u01e4\r\u01e4\16\u01e4\u1386\3\u01e5\3\u01e5")
buf.write("\3\u01e5\3\u01e6\3\u01e6\3\u01e7\3\u01e7\3\u01e8\3\u01e8")
buf.write("\3\u01e8\5\u01e8\u1393\n\u01e8\3\u01e8\3\u01e8\5\u01e8")
buf.write("\u1397\n\u01e8\5\u01e8\u1399\n\u01e8\3\u01e8\3\u01e8\5")
buf.write("\u01e8\u139d\n\u01e8\3\u01e9\3\u01e9\3\u01e9\3\u01e9\3")
buf.write("\u01e9\7\u01e9\u13a4\n\u01e9\f\u01e9\16\u01e9\u13a7\13")
buf.write("\u01e9\3\u01e9\3\u01e9\3\u01ea\3\u01ea\3\u01ea\3\u01ea")
buf.write("\3\u01ea\5\u01ea\u13b0\n\u01ea\3\u01ea\3\u01ea\3\u01eb")
buf.write("\3\u01eb\3\u01ec\3\u01ec\3\u01ec\7\u01ec\u13b9\n\u01ec")
buf.write("\f\u01ec\16\u01ec\u13bc\13\u01ec\3\u01ec\3\u01ec\3\u01ec")
buf.write("\3\u01ed\3\u01ed\3\u01ed\7\u01ed\u13c4\n\u01ed\f\u01ed")
buf.write("\16\u01ed\u13c7\13\u01ed\3\u01ed\3\u01ed\3\u01ed\3\u01ee")
buf.write("\3\u01ee\3\u01ee\7\u01ee\u13cf\n\u01ee\f\u01ee\16\u01ee")
buf.write("\u13d2\13\u01ee\3\u01ee\3\u01ee\3\u01ee\3\u01ef\3\u01ef")
buf.write("\3\u01ef\7\u01ef\u13da\n\u01ef\f\u01ef\16\u01ef\u13dd")
buf.write("\13\u01ef\3\u01ef\3\u01ef\3\u01ef\3\u01f0\3\u01f0\3\u01f1")
buf.write("\3\u01f1\3\u01f1\3\u01f1\6\u01f1\u13e8\n\u01f1\r\u01f1")
buf.write("\16\u01f1\u13e9\3\u01f1\3\u01f1\3\u01f2\3\u01f2\3\u01f3")
buf.write("\3\u01f3\3\u01f4\3\u01f4\3\u01f5\3\u01f5\3\u01f6\3\u01f6")
buf.write("\3\u01f6\3\u01f7\3\u01f7\3\u01f8\3\u01f8\3\u01f9\3\u01f9")
buf.write("\3\u01fa\3\u01fa\3\u01fb\3\u01fb\3\u01fc\3\u01fc\3\u01fd")
buf.write("\3\u01fd\3\u01fd\3\u01fe\3\u01fe\3\u01fe\3\u01fe\7\u01fe")
buf.write("\u140c\n\u01fe\f\u01fe\16\u01fe\u140f\13\u01fe\3\u01fe")
buf.write("\3\u01fe\3\u01fe\3\u01fe\3\u01fe\5\u01fe\u1416\n\u01fe")
buf.write("\3\u01ff\3\u01ff\3\u0200\3\u0200\3\u0201\3\u0201\3\u0201")
buf.write("\3\u0202\3\u0202\3\u0203\3\u0203\3\u0203\3\u0204\3\u0204")
buf.write("\3\u0204\3\u0204\3\u0204\3\u0204\3\u0204\3\u0204\5\u0204")
buf.write("\u142c\n\u0204\3\u0205\3\u0205\3\u0206\3\u0206\3\u0207")
buf.write("\3\u0207\3\u0208\3\u0208\3\u0209\3\u0209\3\u020a\3\u020a")
buf.write("\3\u020a\3\u020b\3\u020b\3\u020c\3\u020c\3\u020d\3\u020d")
buf.write("\3\u020e\3\u020e\3\u020f\3\u020f\3\u0210\6\u0210\u1446")
buf.write("\n\u0210\r\u0210\16\u0210\u1447\3\u0210\3\u0210\3\u0211")
buf.write("\3\u0211\3\u0212\6\u0212\u144f\n\u0212\r\u0212\16\u0212")
buf.write("\u1450\3\u0213\7\u0213\u1454\n\u0213\f\u0213\16\u0213")
buf.write("\u1457\13\u0213\3\u0213\5\u0213\u145a\n\u0213\3\u0213")
buf.write("\6\u0213\u145d\n\u0213\r\u0213\16\u0213\u145e\3\u0214")
buf.write("\3\u0214\3\u0214\3\u0214\7\u0214\u1465\n\u0214\f\u0214")
buf.write("\16\u0214\u1468\13\u0214\3\u0214\3\u0214\5\u0214\u146c")
buf.write("\n\u0214\3\u0214\3\u0214\3\u0215\3\u0215\3\u0215\3\u0215")
buf.write("\7\u0215\u1474\n\u0215\f\u0215\16\u0215\u1477\13\u0215")
buf.write("\3\u0215\3\u0215\3\u0215\3\u0215\3\u0215\3\u0216\3\u0216")
buf.write("\3\u0216\3\u0216\3\u0216\3\u0216\3\u0216\3\u0216\3\u0216")
buf.write("\7\u0216\u1487\n\u0216\f\u0216\16\u0216\u148a\13\u0216")
buf.write("\3\u0216\3\u0216\5\u0216\u148e\n\u0216\3\u0217\5\u0217")
buf.write("\u1491\n\u0217\3\u0217\3\u0217\3\u0218\3\u0218\3\u0219")
buf.write("\3\u0219\3\u0219\7\u0219\u149a\n\u0219\f\u0219\16\u0219")
buf.write("\u149d\13\u0219\3\u021a\3\u021a\3\u021a\3\u021a\3\u021a")
buf.write("\3\u021b\3\u021b\3\u021c\3\u021c\3\u021d\3\u021d\3\u021e")
buf.write("\3\u021e\3\u021f\3\u021f\3\u0220\3\u0220\3\u0221\3\u0221")
buf.write("\3\u0222\3\u0222\3\u0223\3\u0223\3\u0224\3\u0224\3\u0225")
buf.write("\3\u0225\3\u0226\3\u0226\3\u0227\3\u0227\3\u0228\3\u0228")
buf.write("\3\u0229\3\u0229\3\u022a\3\u022a\3\u022b\3\u022b\3\u022c")
buf.write("\3\u022c\3\u022d\3\u022d\3\u022e\3\u022e\3\u022f\3\u022f")
buf.write("\3\u0230\3\u0230\3\u0231\3\u0231\3\u0232\3\u0232\3\u0233")
buf.write("\3\u0233\3\u0234\3\u0234\7\u13ba\u13c5\u13d0\u13db\u1475")
buf.write("\2\u0235\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f")
buf.write("\27\r\31\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+\27")
buf.write("-\30/\31\61\32\63\33\65\34\67\359\36;\37= ?!A\"C#E$G%")
buf.write("I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65i\66k\67")
buf.write("m8o9q:s;u<w=y>{?}@\177A\u0081B\u0083C\u0085D\u0087E\u0089")
buf.write("F\u008bG\u008dH\u008fI\u0091J\u0093K\u0095L\u0097M\u0099")
buf.write("N\u009bO\u009dP\u009fQ\u00a1R\u00a3S\u00a5T\u00a7U\u00a9")
buf.write("V\u00abW\u00adX\u00afY\u00b1Z\u00b3[\u00b5\\\u00b7]\u00b9")
buf.write("^\u00bb_\u00bd`\u00bfa\u00c1b\u00c3c\u00c5d\u00c7e\u00c9")
buf.write("f\u00cbg\u00cdh\u00cfi\u00d1j\u00d3k\u00d5l\u00d7m\u00d9")
buf.write("n\u00dbo\u00ddp\u00dfq\u00e1r\u00e3s\u00e5t\u00e7u\u00e9")
buf.write("v\u00ebw\u00edx\u00efy\u00f1z\u00f3{\u00f5|\u00f7}\u00f9")
buf.write("~\u00fb\177\u00fd\u0080\u00ff\u0081\u0101\u0082\u0103")
buf.write("\u0083\u0105\u0084\u0107\u0085\u0109\u0086\u010b\u0087")
buf.write("\u010d\u0088\u010f\u0089\u0111\u008a\u0113\u008b\u0115")
buf.write("\u008c\u0117\u008d\u0119\u008e\u011b\u008f\u011d\u0090")
buf.write("\u011f\u0091\u0121\u0092\u0123\u0093\u0125\u0094\u0127")
buf.write("\u0095\u0129\u0096\u012b\u0097\u012d\u0098\u012f\u0099")
buf.write("\u0131\u009a\u0133\u009b\u0135\u009c\u0137\u009d\u0139")
buf.write("\u009e\u013b\u009f\u013d\u00a0\u013f\u00a1\u0141\u00a2")
buf.write("\u0143\u00a3\u0145\u00a4\u0147\u00a5\u0149\u00a6\u014b")
buf.write("\u00a7\u014d\u00a8\u014f\u00a9\u0151\u00aa\u0153\u00ab")
buf.write("\u0155\u00ac\u0157\u00ad\u0159\u00ae\u015b\u00af\u015d")
buf.write("\u00b0\u015f\u00b1\u0161\u00b2\u0163\u00b3\u0165\u00b4")
buf.write("\u0167\u00b5\u0169\u00b6\u016b\u00b7\u016d\u00b8\u016f")
buf.write("\u00b9\u0171\u00ba\u0173\u00bb\u0175\u00bc\u0177\u00bd")
buf.write("\u0179\u00be\u017b\u00bf\u017d\u00c0\u017f\u00c1\u0181")
buf.write("\u00c2\u0183\u00c3\u0185\u00c4\u0187\u00c5\u0189\u00c6")
buf.write("\u018b\u00c7\u018d\u00c8\u018f\u00c9\u0191\u00ca\u0193")
buf.write("\u00cb\u0195\u00cc\u0197\u00cd\u0199\u00ce\u019b\u00cf")
buf.write("\u019d\u00d0\u019f\u00d1\u01a1\u00d2\u01a3\u00d3\u01a5")
buf.write("\u00d4\u01a7\u00d5\u01a9\u00d6\u01ab\u00d7\u01ad\u00d8")
buf.write("\u01af\u00d9\u01b1\u00da\u01b3\u00db\u01b5\u00dc\u01b7")
buf.write("\u00dd\u01b9\u00de\u01bb\u00df\u01bd\u00e0\u01bf\u00e1")
buf.write("\u01c1\u00e2\u01c3\u00e3\u01c5\u00e4\u01c7\u00e5\u01c9")
buf.write("\u00e6\u01cb\u00e7\u01cd\u00e8\u01cf\u00e9\u01d1\u00ea")
buf.write("\u01d3\u00eb\u01d5\u00ec\u01d7\u00ed\u01d9\u00ee\u01db")
buf.write("\u00ef\u01dd\u00f0\u01df\u00f1\u01e1\u00f2\u01e3\u00f3")
buf.write("\u01e5\u00f4\u01e7\u00f5\u01e9\u00f6\u01eb\u00f7\u01ed")
buf.write("\u00f8\u01ef\u00f9\u01f1\u00fa\u01f3\u00fb\u01f5\u00fc")
buf.write("\u01f7\u00fd\u01f9\u00fe\u01fb\u00ff\u01fd\u0100\u01ff")
buf.write("\u0101\u0201\u0102\u0203\u0103\u0205\u0104\u0207\u0105")
buf.write("\u0209\u0106\u020b\u0107\u020d\u0108\u020f\u0109\u0211")
buf.write("\u010a\u0213\u010b\u0215\u010c\u0217\u010d\u0219\u010e")
buf.write("\u021b\u010f\u021d\u0110\u021f\u0111\u0221\u0112\u0223")
buf.write("\u0113\u0225\u0114\u0227\u0115\u0229\u0116\u022b\u0117")
buf.write("\u022d\u0118\u022f\u0119\u0231\u011a\u0233\u011b\u0235")
buf.write("\u011c\u0237\u011d\u0239\u011e\u023b\u011f\u023d\u0120")
buf.write("\u023f\u0121\u0241\u0122\u0243\u0123\u0245\u0124\u0247")
buf.write("\u0125\u0249\u0126\u024b\u0127\u024d\u0128\u024f\u0129")
buf.write("\u0251\u012a\u0253\u012b\u0255\u012c\u0257\u012d\u0259")
buf.write("\u012e\u025b\u012f\u025d\u0130\u025f\u0131\u0261\u0132")
buf.write("\u0263\u0133\u0265\u0134\u0267\u0135\u0269\u0136\u026b")
buf.write("\u0137\u026d\u0138\u026f\u0139\u0271\u013a\u0273\u013b")
buf.write("\u0275\u013c\u0277\u013d\u0279\u013e\u027b\u013f\u027d")
buf.write("\u0140\u027f\u0141\u0281\u0142\u0283\u0143\u0285\u0144")
buf.write("\u0287\u0145\u0289\u0146\u028b\u0147\u028d\u0148\u028f")
buf.write("\u0149\u0291\u014a\u0293\u014b\u0295\u014c\u0297\u014d")
buf.write("\u0299\u014e\u029b\u014f\u029d\u0150\u029f\u0151\u02a1")
buf.write("\u0152\u02a3\u0153\u02a5\u0154\u02a7\u0155\u02a9\u0156")
buf.write("\u02ab\u0157\u02ad\u0158\u02af\u0159\u02b1\u015a\u02b3")
buf.write("\u015b\u02b5\u015c\u02b7\u015d\u02b9\u015e\u02bb\u015f")
buf.write("\u02bd\u0160\u02bf\u0161\u02c1\u0162\u02c3\u0163\u02c5")
buf.write("\u0164\u02c7\u0165\u02c9\u0166\u02cb\u0167\u02cd\u0168")
buf.write("\u02cf\u0169\u02d1\u016a\u02d3\u016b\u02d5\u016c\u02d7")
buf.write("\u016d\u02d9\u016e\u02db\u016f\u02dd\u0170\u02df\u0171")
buf.write("\u02e1\u0172\u02e3\u0173\u02e5\u0174\u02e7\u0175\u02e9")
buf.write("\u0176\u02eb\u0177\u02ed\u0178\u02ef\u0179\u02f1\u017a")
buf.write("\u02f3\u017b\u02f5\u017c\u02f7\u017d\u02f9\u017e\u02fb")
buf.write("\u017f\u02fd\u0180\u02ff\u0181\u0301\u0182\u0303\u0183")
buf.write("\u0305\u0184\u0307\u0185\u0309\u0186\u030b\u0187\u030d")
buf.write("\u0188\u030f\u0189\u0311\u018a\u0313\u018b\u0315\u018c")
buf.write("\u0317\u018d\u0319\u018e\u031b\u018f\u031d\u0190\u031f")
buf.write("\u0191\u0321\u0192\u0323\u0193\u0325\u0194\u0327\u0195")
buf.write("\u0329\u0196\u032b\u0197\u032d\u0198\u032f\u0199\u0331")
buf.write("\u019a\u0333\u019b\u0335\u019c\u0337\u019d\u0339\u019e")
buf.write("\u033b\u019f\u033d\u01a0\u033f\u01a1\u0341\u01a2\u0343")
buf.write("\u01a3\u0345\u01a4\u0347\u01a5\u0349\u01a6\u034b\u01a7")
buf.write("\u034d\u01a8\u034f\u01a9\u0351\u01aa\u0353\u01ab\u0355")
buf.write("\u01ac\u0357\u01ad\u0359\u01ae\u035b\u01af\u035d\u01b0")
buf.write("\u035f\u01b1\u0361\u01b2\u0363\u01b3\u0365\u01b4\u0367")
buf.write("\u01b5\u0369\u01b6\u036b\u01b7\u036d\u01b8\u036f\u01b9")
buf.write("\u0371\u01ba\u0373\u01bb\u0375\u01bc\u0377\u01bd\u0379")
buf.write("\u01be\u037b\u01bf\u037d\u01c0\u037f\u01c1\u0381\u01c2")
buf.write("\u0383\u01c3\u0385\u01c4\u0387\u01c5\u0389\u01c6\u038b")
buf.write("\u01c7\u038d\u01c8\u038f\u01c9\u0391\u01ca\u0393\u01cb")
buf.write("\u0395\u01cc\u0397\u01cd\u0399\u01ce\u039b\u01cf\u039d")
buf.write("\u01d0\u039f\u01d1\u03a1\u01d2\u03a3\u01d3\u03a5\u01d4")
buf.write("\u03a7\u01d5\u03a9\u01d6\u03ab\u01d7\u03ad\u01d8\u03af")
buf.write("\u01d9\u03b1\u01da\u03b3\u01db\u03b5\u01dc\u03b7\u01dd")
buf.write("\u03b9\u01de\u03bb\u01df\u03bd\u01e0\u03bf\u01e1\u03c1")
buf.write("\u01e2\u03c3\u01e3\u03c5\u01e4\u03c7\u01e5\u03c9\u01e6")
buf.write("\u03cb\u01e7\u03cd\u01e8\u03cf\u01e9\u03d1\u01ea\u03d3")
buf.write("\2\u03d5\2\u03d7\2\u03d9\2\u03db\2\u03dd\2\u03df\2\u03e1")
buf.write("\u01eb\u03e3\u01ec\u03e5\u01ed\u03e7\u01ee\u03e9\u01ef")
buf.write("\u03eb\u01f0\u03ed\u01f1\u03ef\u01f2\u03f1\u01f3\u03f3")
buf.write("\u01f4\u03f5\u01f5\u03f7\u01f6\u03f9\u01f7\u03fb\u01f8")
buf.write("\u03fd\u01f9\u03ff\u01fa\u0401\u01fb\u0403\u01fc\u0405")
buf.write("\u01fd\u0407\u01fe\u0409\u01ff\u040b\u0200\u040d\u0201")
buf.write("\u040f\u0202\u0411\2\u0413\u0203\u0415\u0204\u0417\u0205")
buf.write("\u0419\u0206\u041b\u0207\u041d\u0208\u041f\u0209\u0421")
buf.write("\2\u0423\2\u0425\2\u0427\u020a\u0429\u020b\u042b\u020c")
buf.write("\u042d\2\u042f\2\u0431\u020d\u0433\u020e\u0435\2\u0437")
buf.write("\2\u0439\2\u043b\2\u043d\2\u043f\2\u0441\2\u0443\2\u0445")
buf.write("\2\u0447\2\u0449\2\u044b\2\u044d\2\u044f\2\u0451\2\u0453")
buf.write("\2\u0455\2\u0457\2\u0459\2\u045b\2\u045d\2\u045f\2\u0461")
buf.write("\2\u0463\2\u0465\2\u0467\2\3\2\'\5\2\f\f\17\17))\5\2\62")
buf.write(";CHch\4\2GGgg\4\2--//\t\2\13\f\17\17\"\"**>>]]}}\5\2\f")
buf.write("\f\17\17$$\4\2\62;aa\5\2\13\f\17\17\"\"\4\2C\\c|\4\2\f")
buf.write("\f\17\17\4\2\13\13\"\"\5\2%&\62;aa\4\2CCcc\4\2DDdd\4\2")
buf.write("EEee\4\2FFff\4\2HHhh\4\2IIii\4\2JJjj\4\2KKkk\4\2LLll\4")
buf.write("\2MMmm\4\2NNnn\4\2OOoo\4\2PPpp\4\2QQqq\4\2RRrr\4\2SSs")
buf.write("s\4\2TTtt\4\2UUuu\4\2VVvv\4\2WWww\4\2XXxx\4\2YYyy\4\2")
buf.write("ZZzz\4\2[[{{\4\2\\\\||\2\u14dd\2\3\3\2\2\2\2\5\3\2\2\2")
buf.write("\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17")
buf.write("\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3")
buf.write("\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2")
buf.write("\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3")
buf.write("\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2")
buf.write("\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3")
buf.write("\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E")
buf.write("\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2")
buf.write("O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2")
buf.write("\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2")
buf.write("\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2")
buf.write("\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3")
buf.write("\2\2\2\2w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177")
buf.write("\3\2\2\2\2\u0081\3\2\2\2\2\u0083\3\2\2\2\2\u0085\3\2\2")
buf.write("\2\2\u0087\3\2\2\2\2\u0089\3\2\2\2\2\u008b\3\2\2\2\2\u008d")
buf.write("\3\2\2\2\2\u008f\3\2\2\2\2\u0091\3\2\2\2\2\u0093\3\2\2")
buf.write("\2\2\u0095\3\2\2\2\2\u0097\3\2\2\2\2\u0099\3\2\2\2\2\u009b")
buf.write("\3\2\2\2\2\u009d\3\2\2\2\2\u009f\3\2\2\2\2\u00a1\3\2\2")
buf.write("\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2\2\2\u00a7\3\2\2\2\2\u00a9")
buf.write("\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad\3\2\2\2\2\u00af\3\2\2")
buf.write("\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5\3\2\2\2\2\u00b7")
buf.write("\3\2\2\2\2\u00b9\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd\3\2\2")
buf.write("\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2\2\2\u00c3\3\2\2\2\2\u00c5")
buf.write("\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9\3\2\2\2\2\u00cb\3\2\2")
buf.write("\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2\2\2\u00d1\3\2\2\2\2\u00d3")
buf.write("\3\2\2\2\2\u00d5\3\2\2\2\2\u00d7\3\2\2\2\2\u00d9\3\2\2")
buf.write("\2\2\u00db\3\2\2\2\2\u00dd\3\2\2\2\2\u00df\3\2\2\2\2\u00e1")
buf.write("\3\2\2\2\2\u00e3\3\2\2\2\2\u00e5\3\2\2\2\2\u00e7\3\2\2")
buf.write("\2\2\u00e9\3\2\2\2\2\u00eb\3\2\2\2\2\u00ed\3\2\2\2\2\u00ef")
buf.write("\3\2\2\2\2\u00f1\3\2\2\2\2\u00f3\3\2\2\2\2\u00f5\3\2\2")
buf.write("\2\2\u00f7\3\2\2\2\2\u00f9\3\2\2\2\2\u00fb\3\2\2\2\2\u00fd")
buf.write("\3\2\2\2\2\u00ff\3\2\2\2\2\u0101\3\2\2\2\2\u0103\3\2\2")
buf.write("\2\2\u0105\3\2\2\2\2\u0107\3\2\2\2\2\u0109\3\2\2\2\2\u010b")
buf.write("\3\2\2\2\2\u010d\3\2\2\2\2\u010f\3\2\2\2\2\u0111\3\2\2")
buf.write("\2\2\u0113\3\2\2\2\2\u0115\3\2\2\2\2\u0117\3\2\2\2\2\u0119")
buf.write("\3\2\2\2\2\u011b\3\2\2\2\2\u011d\3\2\2\2\2\u011f\3\2\2")
buf.write("\2\2\u0121\3\2\2\2\2\u0123\3\2\2\2\2\u0125\3\2\2\2\2\u0127")
buf.write("\3\2\2\2\2\u0129\3\2\2\2\2\u012b\3\2\2\2\2\u012d\3\2\2")
buf.write("\2\2\u012f\3\2\2\2\2\u0131\3\2\2\2\2\u0133\3\2\2\2\2\u0135")
buf.write("\3\2\2\2\2\u0137\3\2\2\2\2\u0139\3\2\2\2\2\u013b\3\2\2")
buf.write("\2\2\u013d\3\2\2\2\2\u013f\3\2\2\2\2\u0141\3\2\2\2\2\u0143")
buf.write("\3\2\2\2\2\u0145\3\2\2\2\2\u0147\3\2\2\2\2\u0149\3\2\2")
buf.write("\2\2\u014b\3\2\2\2\2\u014d\3\2\2\2\2\u014f\3\2\2\2\2\u0151")
buf.write("\3\2\2\2\2\u0153\3\2\2\2\2\u0155\3\2\2\2\2\u0157\3\2\2")
buf.write("\2\2\u0159\3\2\2\2\2\u015b\3\2\2\2\2\u015d\3\2\2\2\2\u015f")
buf.write("\3\2\2\2\2\u0161\3\2\2\2\2\u0163\3\2\2\2\2\u0165\3\2\2")
buf.write("\2\2\u0167\3\2\2\2\2\u0169\3\2\2\2\2\u016b\3\2\2\2\2\u016d")
buf.write("\3\2\2\2\2\u016f\3\2\2\2\2\u0171\3\2\2\2\2\u0173\3\2\2")
buf.write("\2\2\u0175\3\2\2\2\2\u0177\3\2\2\2\2\u0179\3\2\2\2\2\u017b")
buf.write("\3\2\2\2\2\u017d\3\2\2\2\2\u017f\3\2\2\2\2\u0181\3\2\2")
buf.write("\2\2\u0183\3\2\2\2\2\u0185\3\2\2\2\2\u0187\3\2\2\2\2\u0189")
buf.write("\3\2\2\2\2\u018b\3\2\2\2\2\u018d\3\2\2\2\2\u018f\3\2\2")
buf.write("\2\2\u0191\3\2\2\2\2\u0193\3\2\2\2\2\u0195\3\2\2\2\2\u0197")
buf.write("\3\2\2\2\2\u0199\3\2\2\2\2\u019b\3\2\2\2\2\u019d\3\2\2")
buf.write("\2\2\u019f\3\2\2\2\2\u01a1\3\2\2\2\2\u01a3\3\2\2\2\2\u01a5")
buf.write("\3\2\2\2\2\u01a7\3\2\2\2\2\u01a9\3\2\2\2\2\u01ab\3\2\2")
buf.write("\2\2\u01ad\3\2\2\2\2\u01af\3\2\2\2\2\u01b1\3\2\2\2\2\u01b3")
buf.write("\3\2\2\2\2\u01b5\3\2\2\2\2\u01b7\3\2\2\2\2\u01b9\3\2\2")
buf.write("\2\2\u01bb\3\2\2\2\2\u01bd\3\2\2\2\2\u01bf\3\2\2\2\2\u01c1")
buf.write("\3\2\2\2\2\u01c3\3\2\2\2\2\u01c5\3\2\2\2\2\u01c7\3\2\2")
buf.write("\2\2\u01c9\3\2\2\2\2\u01cb\3\2\2\2\2\u01cd\3\2\2\2\2\u01cf")
buf.write("\3\2\2\2\2\u01d1\3\2\2\2\2\u01d3\3\2\2\2\2\u01d5\3\2\2")
buf.write("\2\2\u01d7\3\2\2\2\2\u01d9\3\2\2\2\2\u01db\3\2\2\2\2\u01dd")
buf.write("\3\2\2\2\2\u01df\3\2\2\2\2\u01e1\3\2\2\2\2\u01e3\3\2\2")
buf.write("\2\2\u01e5\3\2\2\2\2\u01e7\3\2\2\2\2\u01e9\3\2\2\2\2\u01eb")
buf.write("\3\2\2\2\2\u01ed\3\2\2\2\2\u01ef\3\2\2\2\2\u01f1\3\2\2")
buf.write("\2\2\u01f3\3\2\2\2\2\u01f5\3\2\2\2\2\u01f7\3\2\2\2\2\u01f9")
buf.write("\3\2\2\2\2\u01fb\3\2\2\2\2\u01fd\3\2\2\2\2\u01ff\3\2\2")
buf.write("\2\2\u0201\3\2\2\2\2\u0203\3\2\2\2\2\u0205\3\2\2\2\2\u0207")
buf.write("\3\2\2\2\2\u0209\3\2\2\2\2\u020b\3\2\2\2\2\u020d\3\2\2")
buf.write("\2\2\u020f\3\2\2\2\2\u0211\3\2\2\2\2\u0213\3\2\2\2\2\u0215")
buf.write("\3\2\2\2\2\u0217\3\2\2\2\2\u0219\3\2\2\2\2\u021b\3\2\2")
buf.write("\2\2\u021d\3\2\2\2\2\u021f\3\2\2\2\2\u0221\3\2\2\2\2\u0223")
buf.write("\3\2\2\2\2\u0225\3\2\2\2\2\u0227\3\2\2\2\2\u0229\3\2\2")
buf.write("\2\2\u022b\3\2\2\2\2\u022d\3\2\2\2\2\u022f\3\2\2\2\2\u0231")
buf.write("\3\2\2\2\2\u0233\3\2\2\2\2\u0235\3\2\2\2\2\u0237\3\2\2")
buf.write("\2\2\u0239\3\2\2\2\2\u023b\3\2\2\2\2\u023d\3\2\2\2\2\u023f")
buf.write("\3\2\2\2\2\u0241\3\2\2\2\2\u0243\3\2\2\2\2\u0245\3\2\2")
buf.write("\2\2\u0247\3\2\2\2\2\u0249\3\2\2\2\2\u024b\3\2\2\2\2\u024d")
buf.write("\3\2\2\2\2\u024f\3\2\2\2\2\u0251\3\2\2\2\2\u0253\3\2\2")
buf.write("\2\2\u0255\3\2\2\2\2\u0257\3\2\2\2\2\u0259\3\2\2\2\2\u025b")
buf.write("\3\2\2\2\2\u025d\3\2\2\2\2\u025f\3\2\2\2\2\u0261\3\2\2")
buf.write("\2\2\u0263\3\2\2\2\2\u0265\3\2\2\2\2\u0267\3\2\2\2\2\u0269")
buf.write("\3\2\2\2\2\u026b\3\2\2\2\2\u026d\3\2\2\2\2\u026f\3\2\2")
buf.write("\2\2\u0271\3\2\2\2\2\u0273\3\2\2\2\2\u0275\3\2\2\2\2\u0277")
buf.write("\3\2\2\2\2\u0279\3\2\2\2\2\u027b\3\2\2\2\2\u027d\3\2\2")
buf.write("\2\2\u027f\3\2\2\2\2\u0281\3\2\2\2\2\u0283\3\2\2\2\2\u0285")
buf.write("\3\2\2\2\2\u0287\3\2\2\2\2\u0289\3\2\2\2\2\u028b\3\2\2")
buf.write("\2\2\u028d\3\2\2\2\2\u028f\3\2\2\2\2\u0291\3\2\2\2\2\u0293")
buf.write("\3\2\2\2\2\u0295\3\2\2\2\2\u0297\3\2\2\2\2\u0299\3\2\2")
buf.write("\2\2\u029b\3\2\2\2\2\u029d\3\2\2\2\2\u029f\3\2\2\2\2\u02a1")
buf.write("\3\2\2\2\2\u02a3\3\2\2\2\2\u02a5\3\2\2\2\2\u02a7\3\2\2")
buf.write("\2\2\u02a9\3\2\2\2\2\u02ab\3\2\2\2\2\u02ad\3\2\2\2\2\u02af")
buf.write("\3\2\2\2\2\u02b1\3\2\2\2\2\u02b3\3\2\2\2\2\u02b5\3\2\2")
buf.write("\2\2\u02b7\3\2\2\2\2\u02b9\3\2\2\2\2\u02bb\3\2\2\2\2\u02bd")
buf.write("\3\2\2\2\2\u02bf\3\2\2\2\2\u02c1\3\2\2\2\2\u02c3\3\2\2")
buf.write("\2\2\u02c5\3\2\2\2\2\u02c7\3\2\2\2\2\u02c9\3\2\2\2\2\u02cb")
buf.write("\3\2\2\2\2\u02cd\3\2\2\2\2\u02cf\3\2\2\2\2\u02d1\3\2\2")
buf.write("\2\2\u02d3\3\2\2\2\2\u02d5\3\2\2\2\2\u02d7\3\2\2\2\2\u02d9")
buf.write("\3\2\2\2\2\u02db\3\2\2\2\2\u02dd\3\2\2\2\2\u02df\3\2\2")
buf.write("\2\2\u02e1\3\2\2\2\2\u02e3\3\2\2\2\2\u02e5\3\2\2\2\2\u02e7")
buf.write("\3\2\2\2\2\u02e9\3\2\2\2\2\u02eb\3\2\2\2\2\u02ed\3\2\2")
buf.write("\2\2\u02ef\3\2\2\2\2\u02f1\3\2\2\2\2\u02f3\3\2\2\2\2\u02f5")
buf.write("\3\2\2\2\2\u02f7\3\2\2\2\2\u02f9\3\2\2\2\2\u02fb\3\2\2")
buf.write("\2\2\u02fd\3\2\2\2\2\u02ff\3\2\2\2\2\u0301\3\2\2\2\2\u0303")
buf.write("\3\2\2\2\2\u0305\3\2\2\2\2\u0307\3\2\2\2\2\u0309\3\2\2")
buf.write("\2\2\u030b\3\2\2\2\2\u030d\3\2\2\2\2\u030f\3\2\2\2\2\u0311")
buf.write("\3\2\2\2\2\u0313\3\2\2\2\2\u0315\3\2\2\2\2\u0317\3\2\2")
buf.write("\2\2\u0319\3\2\2\2\2\u031b\3\2\2\2\2\u031d\3\2\2\2\2\u031f")
buf.write("\3\2\2\2\2\u0321\3\2\2\2\2\u0323\3\2\2\2\2\u0325\3\2\2")
buf.write("\2\2\u0327\3\2\2\2\2\u0329\3\2\2\2\2\u032b\3\2\2\2\2\u032d")
buf.write("\3\2\2\2\2\u032f\3\2\2\2\2\u0331\3\2\2\2\2\u0333\3\2\2")
buf.write("\2\2\u0335\3\2\2\2\2\u0337\3\2\2\2\2\u0339\3\2\2\2\2\u033b")
buf.write("\3\2\2\2\2\u033d\3\2\2\2\2\u033f\3\2\2\2\2\u0341\3\2\2")
buf.write("\2\2\u0343\3\2\2\2\2\u0345\3\2\2\2\2\u0347\3\2\2\2\2\u0349")
buf.write("\3\2\2\2\2\u034b\3\2\2\2\2\u034d\3\2\2\2\2\u034f\3\2\2")
buf.write("\2\2\u0351\3\2\2\2\2\u0353\3\2\2\2\2\u0355\3\2\2\2\2\u0357")
buf.write("\3\2\2\2\2\u0359\3\2\2\2\2\u035b\3\2\2\2\2\u035d\3\2\2")
buf.write("\2\2\u035f\3\2\2\2\2\u0361\3\2\2\2\2\u0363\3\2\2\2\2\u0365")
buf.write("\3\2\2\2\2\u0367\3\2\2\2\2\u0369\3\2\2\2\2\u036b\3\2\2")
buf.write("\2\2\u036d\3\2\2\2\2\u036f\3\2\2\2\2\u0371\3\2\2\2\2\u0373")
buf.write("\3\2\2\2\2\u0375\3\2\2\2\2\u0377\3\2\2\2\2\u0379\3\2\2")
buf.write("\2\2\u037b\3\2\2\2\2\u037d\3\2\2\2\2\u037f\3\2\2\2\2\u0381")
buf.write("\3\2\2\2\2\u0383\3\2\2\2\2\u0385\3\2\2\2\2\u0387\3\2\2")
buf.write("\2\2\u0389\3\2\2\2\2\u038b\3\2\2\2\2\u038d\3\2\2\2\2\u038f")
buf.write("\3\2\2\2\2\u0391\3\2\2\2\2\u0393\3\2\2\2\2\u0395\3\2\2")
buf.write("\2\2\u0397\3\2\2\2\2\u0399\3\2\2\2\2\u039b\3\2\2\2\2\u039d")
buf.write("\3\2\2\2\2\u039f\3\2\2\2\2\u03a1\3\2\2\2\2\u03a3\3\2\2")
buf.write("\2\2\u03a5\3\2\2\2\2\u03a7\3\2\2\2\2\u03a9\3\2\2\2\2\u03ab")
buf.write("\3\2\2\2\2\u03ad\3\2\2\2\2\u03af\3\2\2\2\2\u03b1\3\2\2")
buf.write("\2\2\u03b3\3\2\2\2\2\u03b5\3\2\2\2\2\u03b7\3\2\2\2\2\u03b9")
buf.write("\3\2\2\2\2\u03bb\3\2\2\2\2\u03bd\3\2\2\2\2\u03bf\3\2\2")
buf.write("\2\2\u03c1\3\2\2\2\2\u03c3\3\2\2\2\2\u03c5\3\2\2\2\2\u03c7")
buf.write("\3\2\2\2\2\u03c9\3\2\2\2\2\u03cb\3\2\2\2\2\u03cd\3\2\2")
buf.write("\2\2\u03cf\3\2\2\2\2\u03d1\3\2\2\2\2\u03d3\3\2\2\2\2\u03e1")
buf.write("\3\2\2\2\2\u03e3\3\2\2\2\2\u03e5\3\2\2\2\2\u03e7\3\2\2")
buf.write("\2\2\u03e9\3\2\2\2\2\u03eb\3\2\2\2\2\u03ed\3\2\2\2\2\u03ef")
buf.write("\3\2\2\2\2\u03f1\3\2\2\2\2\u03f3\3\2\2\2\2\u03f5\3\2\2")
buf.write("\2\2\u03f7\3\2\2\2\2\u03f9\3\2\2\2\2\u03fb\3\2\2\2\2\u03fd")
buf.write("\3\2\2\2\2\u03ff\3\2\2\2\2\u0401\3\2\2\2\2\u0403\3\2\2")
buf.write("\2\2\u0405\3\2\2\2\2\u0407\3\2\2\2\2\u0409\3\2\2\2\2\u040b")
buf.write("\3\2\2\2\2\u040d\3\2\2\2\2\u040f\3\2\2\2\2\u0413\3\2\2")
buf.write("\2\2\u0415\3\2\2\2\2\u0417\3\2\2\2\2\u0419\3\2\2\2\2\u041b")
buf.write("\3\2\2\2\2\u041d\3\2\2\2\2\u041f\3\2\2\2\2\u0427\3\2\2")
buf.write("\2\2\u0429\3\2\2\2\2\u042b\3\2\2\2\2\u0431\3\2\2\2\2\u0433")
buf.write("\3\2\2\2\3\u0469\3\2\2\2\5\u046c\3\2\2\2\7\u046e\3\2\2")
buf.write("\2\t\u0472\3\2\2\2\13\u0478\3\2\2\2\r\u047e\3\2\2\2\17")
buf.write("\u0488\3\2\2\2\21\u048c\3\2\2\2\23\u0492\3\2\2\2\25\u049a")
buf.write("\3\2\2\2\27\u049e\3\2\2\2\31\u04a2\3\2\2\2\33\u04a8\3")
buf.write("\2\2\2\35\u04ab\3\2\2\2\37\u04b2\3\2\2\2!\u04b9\3\2\2")
buf.write("\2#\u04bd\3\2\2\2%\u04c7\3\2\2\2\'\u04ca\3\2\2\2)\u04d4")
buf.write("\3\2\2\2+\u04da\3\2\2\2-\u04e1\3\2\2\2/\u04e6\3\2\2\2")
buf.write("\61\u04f0\3\2\2\2\63\u0507\3\2\2\2\65\u050d\3\2\2\2\67")
buf.write("\u0514\3\2\2\29\u051a\3\2\2\2;\u0522\3\2\2\2=\u0528\3")
buf.write("\2\2\2?\u0536\3\2\2\2A\u0543\3\2\2\2C\u0552\3\2\2\2E\u0557")
buf.write("\3\2\2\2G\u055d\3\2\2\2I\u0562\3\2\2\2K\u056a\3\2\2\2")
buf.write("M\u056f\3\2\2\2O\u0577\3\2\2\2Q\u057c\3\2\2\2S\u057f\3")
buf.write("\2\2\2U\u0584\3\2\2\2W\u0586\3\2\2\2Y\u058c\3\2\2\2[\u0591")
buf.write("\3\2\2\2]\u059b\3\2\2\2_\u05a3\3\2\2\2a\u05a8\3\2\2\2")
buf.write("c\u05ad\3\2\2\2e\u05b2\3\2\2\2g\u05ba\3\2\2\2i\u05c4\3")
buf.write("\2\2\2k\u05ca\3\2\2\2m\u05ce\3\2\2\2o\u05d3\3\2\2\2q\u05d9")
buf.write("\3\2\2\2s\u05e1\3\2\2\2u\u05e9\3\2\2\2w\u05f1\3\2\2\2")
buf.write("y\u05f9\3\2\2\2{\u0600\3\2\2\2}\u060a\3\2\2\2\177\u0618")
buf.write("\3\2\2\2\u0081\u0620\3\2\2\2\u0083\u0629\3\2\2\2\u0085")
buf.write("\u0631\3\2\2\2\u0087\u0641\3\2\2\2\u0089\u064a\3\2\2\2")
buf.write("\u008b\u0655\3\2\2\2\u008d\u0661\3\2\2\2\u008f\u066d\3")
buf.write("\2\2\2\u0091\u0675\3\2\2\2\u0093\u067d\3\2\2\2\u0095\u0686")
buf.write("\3\2\2\2\u0097\u068e\3\2\2\2\u0099\u069a\3\2\2\2\u009b")
buf.write("\u06aa\3\2\2\2\u009d\u06af\3\2\2\2\u009f\u06b5\3\2\2\2")
buf.write("\u00a1\u06bc\3\2\2\2\u00a3\u06c2\3\2\2\2\u00a5\u06c7\3")
buf.write("\2\2\2\u00a7\u06cf\3\2\2\2\u00a9\u06dc\3\2\2\2\u00ab\u06e3")
buf.write("\3\2\2\2\u00ad\u06ef\3\2\2\2\u00af\u06f5\3\2\2\2\u00b1")
buf.write("\u06fa\3\2\2\2\u00b3\u0703\3\2\2\2\u00b5\u0708\3\2\2\2")
buf.write("\u00b7\u070c\3\2\2\2\u00b9\u071b\3\2\2\2\u00bb\u0726\3")
buf.write("\2\2\2\u00bd\u072a\3\2\2\2\u00bf\u0730\3\2\2\2\u00c1\u0734")
buf.write("\3\2\2\2\u00c3\u073c\3\2\2\2\u00c5\u0744\3\2\2\2\u00c7")
buf.write("\u074e\3\2\2\2\u00c9\u0758\3\2\2\2\u00cb\u0760\3\2\2\2")
buf.write("\u00cd\u0769\3\2\2\2\u00cf\u0772\3\2\2\2\u00d1\u077a\3")
buf.write("\2\2\2\u00d3\u0781\3\2\2\2\u00d5\u0787\3\2\2\2\u00d7\u078c")
buf.write("\3\2\2\2\u00d9\u079a\3\2\2\2\u00db\u07a4\3\2\2\2\u00dd")
buf.write("\u07ac\3\2\2\2\u00df\u07b9\3\2\2\2\u00e1\u07c2\3\2\2\2")
buf.write("\u00e3\u07cb\3\2\2\2\u00e5\u07d2\3\2\2\2\u00e7\u07d7\3")
buf.write("\2\2\2\u00e9\u07f0\3\2\2\2\u00eb\u07f5\3\2\2\2\u00ed\u07fd")
buf.write("\3\2\2\2\u00ef\u0802\3\2\2\2\u00f1\u0808\3\2\2\2\u00f3")
buf.write("\u080e\3\2\2\2\u00f5\u0815\3\2\2\2\u00f7\u081e\3\2\2\2")
buf.write("\u00f9\u0822\3\2\2\2\u00fb\u0831\3\2\2\2\u00fd\u0835\3")
buf.write("\2\2\2\u00ff\u083c\3\2\2\2\u0101\u0843\3\2\2\2\u0103\u084c")
buf.write("\3\2\2\2\u0105\u0853\3\2\2\2\u0107\u085d\3\2\2\2\u0109")
buf.write("\u086c\3\2\2\2\u010b\u0877\3\2\2\2\u010d\u087f\3\2\2\2")
buf.write("\u010f\u0889\3\2\2\2\u0111\u0891\3\2\2\2\u0113\u0898\3")
buf.write("\2\2\2\u0115\u089d\3\2\2\2\u0117\u08a5\3\2\2\2\u0119\u08ae")
buf.write("\3\2\2\2\u011b\u08b6\3\2\2\2\u011d\u08be\3\2\2\2\u011f")
buf.write("\u08c4\3\2\2\2\u0121\u08ca\3\2\2\2\u0123\u08d0\3\2\2\2")
buf.write("\u0125\u08d6\3\2\2\2\u0127\u08e2\3\2\2\2\u0129\u08e8\3")
buf.write("\2\2\2\u012b\u08f2\3\2\2\2\u012d\u08fa\3\2\2\2\u012f\u08fe")
buf.write("\3\2\2\2\u0131\u0905\3\2\2\2\u0133\u090b\3\2\2\2\u0135")
buf.write("\u0910\3\2\2\2\u0137\u0915\3\2\2\2\u0139\u091e\3\2\2\2")
buf.write("\u013b\u0923\3\2\2\2\u013d\u0929\3\2\2\2\u013f\u092f\3")
buf.write("\2\2\2\u0141\u0938\3\2\2\2\u0143\u093d\3\2\2\2\u0145\u0944")
buf.write("\3\2\2\2\u0147\u0949\3\2\2\2\u0149\u094e\3\2\2\2\u014b")
buf.write("\u0951\3\2\2\2\u014d\u0958\3\2\2\2\u014f\u0962\3\2\2\2")
buf.write("\u0151\u0965\3\2\2\2\u0153\u096d\3\2\2\2\u0155\u0977\3")
buf.write("\2\2\2\u0157\u0981\3\2\2\2\u0159\u0988\3\2\2\2\u015b\u098e")
buf.write("\3\2\2\2\u015d\u0996\3\2\2\2\u015f\u09a0\3\2\2\2\u0161")
buf.write("\u09a8\3\2\2\2\u0163\u09b1\3\2\2\2\u0165\u09b8\3\2\2\2")
buf.write("\u0167\u09be\3\2\2\2\u0169\u09c4\3\2\2\2\u016b\u09cb\3")
buf.write("\2\2\2\u016d\u09d8\3\2\2\2\u016f\u09e0\3\2\2\2\u0171\u09e4")
buf.write("\3\2\2\2\u0173\u09ec\3\2\2\2\u0175\u09f6\3\2\2\2\u0177")
buf.write("\u09ff\3\2\2\2\u0179\u0a04\3\2\2\2\u017b\u0a0f\3\2\2\2")
buf.write("\u017d\u0a12\3\2\2\2\u017f\u0a1c\3\2\2\2\u0181\u0a24\3")
buf.write("\2\2\2\u0183\u0a29\3\2\2\2\u0185\u0a2e\3\2\2\2\u0187\u0a33")
buf.write("\3\2\2\2\u0189\u0a3c\3\2\2\2\u018b\u0a41\3\2\2\2\u018d")
buf.write("\u0a4c\3\2\2\2\u018f\u0a54\3\2\2\2\u0191\u0a59\3\2\2\2")
buf.write("\u0193\u0a5f\3\2\2\2\u0195\u0a67\3\2\2\2\u0197\u0a6c\3")
buf.write("\2\2\2\u0199\u0a72\3\2\2\2\u019b\u0a78\3\2\2\2\u019d\u0a7e")
buf.write("\3\2\2\2\u019f\u0a84\3\2\2\2\u01a1\u0a8a\3\2\2\2\u01a3")
buf.write("\u0a8f\3\2\2\2\u01a5\u0a96\3\2\2\2\u01a7\u0a9a\3\2\2\2")
buf.write("\u01a9\u0aa1\3\2\2\2\u01ab\u0aa7\3\2\2\2\u01ad\u0aac\3")
buf.write("\2\2\2\u01af\u0ab1\3\2\2\2\u01b1\u0ab6\3\2\2\2\u01b3\u0aba")
buf.write("\3\2\2\2\u01b5\u0ac2\3\2\2\2\u01b7\u0acb\3\2\2\2\u01b9")
buf.write("\u0ad4\3\2\2\2\u01bb\u0adb\3\2\2\2\u01bd\u0ae1\3\2\2\2")
buf.write("\u01bf\u0ae7\3\2\2\2\u01c1\u0aee\3\2\2\2\u01c3\u0af7\3")
buf.write("\2\2\2\u01c5\u0b00\3\2\2\2\u01c7\u0b05\3\2\2\2\u01c9\u0b0b")
buf.write("\3\2\2\2\u01cb\u0b12\3\2\2\2\u01cd\u0b18\3\2\2\2\u01cf")
buf.write("\u0b21\3\2\2\2\u01d1\u0b26\3\2\2\2\u01d3\u0b2a\3\2\2\2")
buf.write("\u01d5\u0b32\3\2\2\2\u01d7\u0b3b\3\2\2\2\u01d9\u0b3f\3")
buf.write("\2\2\2\u01db\u0b45\3\2\2\2\u01dd\u0b4e\3\2\2\2\u01df\u0b54")
buf.write("\3\2\2\2\u01e1\u0b5b\3\2\2\2\u01e3\u0b5f\3\2\2\2\u01e5")
buf.write("\u0b62\3\2\2\2\u01e7\u0b6a\3\2\2\2\u01e9\u0b72\3\2\2\2")
buf.write("\u01eb\u0b79\3\2\2\2\u01ed\u0b81\3\2\2\2\u01ef\u0b92\3")
buf.write("\2\2\2\u01f1\u0b9d\3\2\2\2\u01f3\u0ba8\3\2\2\2\u01f5\u0bad")
buf.write("\3\2\2\2\u01f7\u0bb5\3\2\2\2\u01f9\u0bc3\3\2\2\2\u01fb")
buf.write("\u0bc7\3\2\2\2\u01fd\u0bce\3\2\2\2\u01ff\u0bd3\3\2\2\2")
buf.write("\u0201\u0bd9\3\2\2\2\u0203\u0be0\3\2\2\2\u0205\u0be8\3")
buf.write("\2\2\2\u0207\u0bf2\3\2\2\2\u0209\u0bf9\3\2\2\2\u020b\u0bfc")
buf.write("\3\2\2\2\u020d\u0c00\3\2\2\2\u020f\u0c04\3\2\2\2\u0211")
buf.write("\u0c08\3\2\2\2\u0213\u0c0b\3\2\2\2\u0215\u0c10\3\2\2\2")
buf.write("\u0217\u0c15\3\2\2\2\u0219\u0c1c\3\2\2\2\u021b\u0c1f\3")
buf.write("\2\2\2\u021d\u0c27\3\2\2\2\u021f\u0c2d\3\2\2\2\u0221\u0c38")
buf.write("\3\2\2\2\u0223\u0c40\3\2\2\2\u0225\u0c44\3\2\2\2\u0227")
buf.write("\u0c4a\3\2\2\2\u0229\u0c4f\3\2\2\2\u022b\u0c5a\3\2\2\2")
buf.write("\u022d\u0c62\3\2\2\2\u022f\u0c72\3\2\2\2\u0231\u0c7d\3")
buf.write("\2\2\2\u0233\u0c84\3\2\2\2\u0235\u0c8e\3\2\2\2\u0237\u0c96")
buf.write("\3\2\2\2\u0239\u0c9b\3\2\2\2\u023b\u0ca4\3\2\2\2\u023d")
buf.write("\u0caa\3\2\2\2\u023f\u0cb4\3\2\2\2\u0241\u0cba\3\2\2\2")
buf.write("\u0243\u0cbf\3\2\2\2\u0245\u0ccb\3\2\2\2\u0247\u0cd4\3")
buf.write("\2\2\2\u0249\u0cde\3\2\2\2\u024b\u0ce5\3\2\2\2\u024d\u0cef")
buf.write("\3\2\2\2\u024f\u0cf9\3\2\2\2\u0251\u0d01\3\2\2\2\u0253")
buf.write("\u0d07\3\2\2\2\u0255\u0d11\3\2\2\2\u0257\u0d17\3\2\2\2")
buf.write("\u0259\u0d1d\3\2\2\2\u025b\u0d21\3\2\2\2\u025d\u0d26\3")
buf.write("\2\2\2\u025f\u0d2b\3\2\2\2\u0261\u0d32\3\2\2\2\u0263\u0d36")
buf.write("\3\2\2\2\u0265\u0d40\3\2\2\2\u0267\u0d4c\3\2\2\2\u0269")
buf.write("\u0d53\3\2\2\2\u026b\u0d5d\3\2\2\2\u026d\u0d64\3\2\2\2")
buf.write("\u026f\u0d6c\3\2\2\2\u0271\u0d74\3\2\2\2\u0273\u0d88\3")
buf.write("\2\2\2\u0275\u0d8f\3\2\2\2\u0277\u0d9c\3\2\2\2\u0279\u0da3")
buf.write("\3\2\2\2\u027b\u0dad\3\2\2\2\u027d\u0db3\3\2\2\2\u027f")
buf.write("\u0dbb\3\2\2\2\u0281\u0dc2\3\2\2\2\u0283\u0dc8\3\2\2\2")
buf.write("\u0285\u0dd1\3\2\2\2\u0287\u0dd8\3\2\2\2\u0289\u0ddc\3")
buf.write("\2\2\2\u028b\u0de2\3\2\2\2\u028d\u0de7\3\2\2\2\u028f\u0ded")
buf.write("\3\2\2\2\u0291\u0df4\3\2\2\2\u0293\u0df9\3\2\2\2\u0295")
buf.write("\u0e03\3\2\2\2\u0297\u0e0a\3\2\2\2\u0299\u0e16\3\2\2\2")
buf.write("\u029b\u0e1a\3\2\2\2\u029d\u0e21\3\2\2\2\u029f\u0e28\3")
buf.write("\2\2\2\u02a1\u0e2d\3\2\2\2\u02a3\u0e35\3\2\2\2\u02a5\u0e3c")
buf.write("\3\2\2\2\u02a7\u0e41\3\2\2\2\u02a9\u0e4a\3\2\2\2\u02ab")
buf.write("\u0e55\3\2\2\2\u02ad\u0e62\3\2\2\2\u02af\u0e74\3\2\2\2")
buf.write("\u02b1\u0e80\3\2\2\2\u02b3\u0e90\3\2\2\2\u02b5\u0e94\3")
buf.write("\2\2\2\u02b7\u0e99\3\2\2\2\u02b9\u0ea2\3\2\2\2\u02bb\u0ea8")
buf.write("\3\2\2\2\u02bd\u0ead\3\2\2\2\u02bf\u0eb6\3\2\2\2\u02c1")
buf.write("\u0ebf\3\2\2\2\u02c3\u0ec8\3\2\2\2\u02c5\u0ed7\3\2\2\2")
buf.write("\u02c7\u0ede\3\2\2\2\u02c9\u0ee3\3\2\2\2\u02cb\u0ee8\3")
buf.write("\2\2\2\u02cd\u0ef1\3\2\2\2\u02cf\u0efa\3\2\2\2\u02d1\u0eff")
buf.write("\3\2\2\2\u02d3\u0f0d\3\2\2\2\u02d5\u0f15\3\2\2\2\u02d7")
buf.write("\u0f1e\3\2\2\2\u02d9\u0f29\3\2\2\2\u02db\u0f2f\3\2\2\2")
buf.write("\u02dd\u0f37\3\2\2\2\u02df\u0f41\3\2\2\2\u02e1\u0f4e\3")
buf.write("\2\2\2\u02e3\u0f55\3\2\2\2\u02e5\u0f60\3\2\2\2\u02e7\u0f67")
buf.write("\3\2\2\2\u02e9\u0f73\3\2\2\2\u02eb\u0f80\3\2\2\2\u02ed")
buf.write("\u0f8e\3\2\2\2\u02ef\u0f96\3\2\2\2\u02f1\u0f9e\3\2\2\2")
buf.write("\u02f3\u0fa6\3\2\2\2\u02f5\u0fac\3\2\2\2\u02f7\u0fb0\3")
buf.write("\2\2\2\u02f9\u0fb5\3\2\2\2\u02fb\u0fba\3\2\2\2\u02fd\u0fc4")
buf.write("\3\2\2\2\u02ff\u0fe0\3\2\2\2\u0301\u0ffb\3\2\2\2\u0303")
buf.write("\u1013\3\2\2\2\u0305\u1021\3\2\2\2\u0307\u102f\3\2\2\2")
buf.write("\u0309\u103f\3\2\2\2\u030b\u104f\3\2\2\2\u030d\u1052\3")
buf.write("\2\2\2\u030f\u105b\3\2\2\2\u0311\u1067\3\2\2\2\u0313\u1071")
buf.write("\3\2\2\2\u0315\u1077\3\2\2\2\u0317\u107f\3\2\2\2\u0319")
buf.write("\u1084\3\2\2\2\u031b\u1089\3\2\2\2\u031d\u1092\3\2\2\2")
buf.write("\u031f\u1097\3\2\2\2\u0321\u10a1\3\2\2\2\u0323\u10a7\3")
buf.write("\2\2\2\u0325\u10ad\3\2\2\2\u0327\u10b4\3\2\2\2\u0329\u10be")
buf.write("\3\2\2\2\u032b\u10c6\3\2\2\2\u032d\u10cc\3\2\2\2\u032f")
buf.write("\u10d3\3\2\2\2\u0331\u10db\3\2\2\2\u0333\u10e2\3\2\2\2")
buf.write("\u0335\u10e9\3\2\2\2\u0337\u10ed\3\2\2\2\u0339\u10f3\3")
buf.write("\2\2\2\u033b\u10fc\3\2\2\2\u033d\u1102\3\2\2\2\u033f\u1109")
buf.write("\3\2\2\2\u0341\u1111\3\2\2\2\u0343\u111a\3\2\2\2\u0345")
buf.write("\u1123\3\2\2\2\u0347\u112a\3\2\2\2\u0349\u1132\3\2\2\2")
buf.write("\u034b\u113a\3\2\2\2\u034d\u1143\3\2\2\2\u034f\u1148\3")
buf.write("\2\2\2\u0351\u1150\3\2\2\2\u0353\u115b\3\2\2\2\u0355\u1160")
buf.write("\3\2\2\2\u0357\u1169\3\2\2\2\u0359\u116f\3\2\2\2\u035b")
buf.write("\u1175\3\2\2\2\u035d\u117a\3\2\2\2\u035f\u1181\3\2\2\2")
buf.write("\u0361\u1186\3\2\2\2\u0363\u118c\3\2\2\2\u0365\u1190\3")
buf.write("\2\2\2\u0367\u1197\3\2\2\2\u0369\u11a5\3\2\2\2\u036b\u11ad")
buf.write("\3\2\2\2\u036d\u11ba\3\2\2\2\u036f\u11c5\3\2\2\2\u0371")
buf.write("\u11cf\3\2\2\2\u0373\u11d9\3\2\2\2\u0375\u11e7\3\2\2\2")
buf.write("\u0377\u11f0\3\2\2\2\u0379\u11f6\3\2\2\2\u037b\u11ff\3")
buf.write("\2\2\2\u037d\u1207\3\2\2\2\u037f\u1214\3\2\2\2\u0381\u121d")
buf.write("\3\2\2\2\u0383\u1222\3\2\2\2\u0385\u1226\3\2\2\2\u0387")
buf.write("\u123f\3\2\2\2\u0389\u1244\3\2\2\2\u038b\u124f\3\2\2\2")
buf.write("\u038d\u1261\3\2\2\2\u038f\u1271\3\2\2\2\u0391\u1284\3")
buf.write("\2\2\2\u0393\u129b\3\2\2\2\u0395\u12aa\3\2\2\2\u0397\u12b4")
buf.write("\3\2\2\2\u0399\u12bf\3\2\2\2\u039b\u12c7\3\2\2\2\u039d")
buf.write("\u12d4\3\2\2\2\u039f\u12e4\3\2\2\2\u03a1\u12f4\3\2\2\2")
buf.write("\u03a3\u12f9\3\2\2\2\u03a5\u12fd\3\2\2\2\u03a7\u1302\3")
buf.write("\2\2\2\u03a9\u1306\3\2\2\2\u03ab\u130b\3\2\2\2\u03ad\u130f")
buf.write("\3\2\2\2\u03af\u1316\3\2\2\2\u03b1\u131a\3\2\2\2\u03b3")
buf.write("\u1320\3\2\2\2\u03b5\u1330\3\2\2\2\u03b7\u133b\3\2\2\2")
buf.write("\u03b9\u133f\3\2\2\2\u03bb\u1348\3\2\2\2\u03bd\u134e\3")
buf.write("\2\2\2\u03bf\u1355\3\2\2\2\u03c1\u135a\3\2\2\2\u03c3\u1361")
buf.write("\3\2\2\2\u03c5\u136e\3\2\2\2\u03c7\u137b\3\2\2\2\u03c9")
buf.write("\u1388\3\2\2\2\u03cb\u138b\3\2\2\2\u03cd\u138d\3\2\2\2")
buf.write("\u03cf\u138f\3\2\2\2\u03d1\u139e\3\2\2\2\u03d3\u13aa\3")
buf.write("\2\2\2\u03d5\u13b3\3\2\2\2\u03d7\u13b5\3\2\2\2\u03d9\u13c0")
buf.write("\3\2\2\2\u03db\u13cb\3\2\2\2\u03dd\u13d6\3\2\2\2\u03df")
buf.write("\u13e1\3\2\2\2\u03e1\u13e3\3\2\2\2\u03e3\u13ed\3\2\2\2")
buf.write("\u03e5\u13ef\3\2\2\2\u03e7\u13f1\3\2\2\2\u03e9\u13f3\3")
buf.write("\2\2\2\u03eb\u13f5\3\2\2\2\u03ed\u13f8\3\2\2\2\u03ef\u13fa")
buf.write("\3\2\2\2\u03f1\u13fc\3\2\2\2\u03f3\u13fe\3\2\2\2\u03f5")
buf.write("\u1400\3\2\2\2\u03f7\u1402\3\2\2\2\u03f9\u1404\3\2\2\2")
buf.write("\u03fb\u1415\3\2\2\2\u03fd\u1417\3\2\2\2\u03ff\u1419\3")
buf.write("\2\2\2\u0401\u141b\3\2\2\2\u0403\u141e\3\2\2\2\u0405\u1420")
buf.write("\3\2\2\2\u0407\u142b\3\2\2\2\u0409\u142d\3\2\2\2\u040b")
buf.write("\u142f\3\2\2\2\u040d\u1431\3\2\2\2\u040f\u1433\3\2\2\2")
buf.write("\u0411\u1435\3\2\2\2\u0413\u1437\3\2\2\2\u0415\u143a\3")
buf.write("\2\2\2\u0417\u143c\3\2\2\2\u0419\u143e\3\2\2\2\u041b\u1440")
buf.write("\3\2\2\2\u041d\u1442\3\2\2\2\u041f\u1445\3\2\2\2\u0421")
buf.write("\u144b\3\2\2\2\u0423\u144e\3\2\2\2\u0425\u1455\3\2\2\2")
buf.write("\u0427\u1460\3\2\2\2\u0429\u146f\3\2\2\2\u042b\u147d\3")
buf.write("\2\2\2\u042d\u1490\3\2\2\2\u042f\u1494\3\2\2\2\u0431\u1496")
buf.write("\3\2\2\2\u0433\u149e\3\2\2\2\u0435\u14a3\3\2\2\2\u0437")
buf.write("\u14a5\3\2\2\2\u0439\u14a7\3\2\2\2\u043b\u14a9\3\2\2\2")
buf.write("\u043d\u14ab\3\2\2\2\u043f\u14ad\3\2\2\2\u0441\u14af\3")
buf.write("\2\2\2\u0443\u14b1\3\2\2\2\u0445\u14b3\3\2\2\2\u0447\u14b5")
buf.write("\3\2\2\2\u0449\u14b7\3\2\2\2\u044b\u14b9\3\2\2\2\u044d")
buf.write("\u14bb\3\2\2\2\u044f\u14bd\3\2\2\2\u0451\u14bf\3\2\2\2")
buf.write("\u0453\u14c1\3\2\2\2\u0455\u14c3\3\2\2\2\u0457\u14c5\3")
buf.write("\2\2\2\u0459\u14c7\3\2\2\2\u045b\u14c9\3\2\2\2\u045d\u14cb")
buf.write("\3\2\2\2\u045f\u14cd\3\2\2\2\u0461\u14cf\3\2\2\2\u0463")
buf.write("\u14d1\3\2\2\2\u0465\u14d3\3\2\2\2\u0467\u14d5\3\2\2\2")
buf.write("\u0469\u046a\7\60\2\2\u046a\u046b\7\60\2\2\u046b\4\3\2")
buf.write("\2\2\u046c\u046d\5\u0435\u021b\2\u046d\6\3\2\2\2\u046e")
buf.write("\u046f\5\u0435\u021b\2\u046f\u0470\5\u043b\u021e\2\u0470")
buf.write("\u0471\5\u043b\u021e\2\u0471\b\3\2\2\2\u0472\u0473\5\u0435")
buf.write("\u021b\2\u0473\u0474\5\u043f\u0220\2\u0474\u0475\5\u045b")
buf.write("\u022e\2\u0475\u0476\5\u043d\u021f\2\u0476\u0477\5\u0457")
buf.write("\u022c\2\u0477\n\3\2\2\2\u0478\u0479\5\u0435\u021b\2\u0479")
buf.write("\u047a\5\u0441\u0221\2\u047a\u047b\5\u043d\u021f\2\u047b")
buf.write("\u047c\5\u044f\u0228\2\u047c\u047d\5\u045b\u022e\2\u047d")
buf.write("\f\3\2\2\2\u047e\u047f\5\u0435\u021b\2\u047f\u0480\5\u0441")
buf.write("\u0221\2\u0480\u0481\5\u0441\u0221\2\u0481\u0482\5\u0457")
buf.write("\u022c\2\u0482\u0483\5\u043d\u021f\2\u0483\u0484\5\u0441")
buf.write("\u0221\2\u0484\u0485\5\u0435\u021b\2\u0485\u0486\5\u045b")
buf.write("\u022e\2\u0486\u0487\5\u043d\u021f\2\u0487\16\3\2\2\2")
buf.write("\u0488\u0489\5\u0435\u021b\2\u0489\u048a\5\u044b\u0226")
buf.write("\2\u048a\u048b\5\u044b\u0226\2\u048b\20\3\2\2\2\u048c")
buf.write("\u048d\5\u0435\u021b\2\u048d\u048e\5\u044b\u0226\2\u048e")
buf.write("\u048f\5\u045b\u022e\2\u048f\u0490\5\u043d\u021f\2\u0490")
buf.write("\u0491\5\u0457\u022c\2\u0491\22\3\2\2\2\u0492\u0493\5")
buf.write("\u0435\u021b\2\u0493\u0494\5\u044f\u0228\2\u0494\u0495")
buf.write("\5\u0435\u021b\2\u0495\u0496\5\u044b\u0226\2\u0496\u0497")
buf.write("\5\u0465\u0233\2\u0497\u0498\5\u0467\u0234\2\u0498\u0499")
buf.write("\5\u043d\u021f\2\u0499\24\3\2\2\2\u049a\u049b\5\u0435")
buf.write("\u021b\2\u049b\u049c\5\u044f\u0228\2\u049c\u049d\5\u043b")
buf.write("\u021e\2\u049d\26\3\2\2\2\u049e\u049f\5\u0435\u021b\2")
buf.write("\u049f\u04a0\5\u044f\u0228\2\u04a0\u04a1\5\u0465\u0233")
buf.write("\2\u04a1\30\3\2\2\2\u04a2\u04a3\5\u0435\u021b\2\u04a3")
buf.write("\u04a4\5\u0457\u022c\2\u04a4\u04a5\5\u0457\u022c\2\u04a5")
buf.write("\u04a6\5\u0435\u021b\2\u04a6\u04a7\5\u0465\u0233\2\u04a7")
buf.write("\32\3\2\2\2\u04a8\u04a9\5\u0435\u021b\2\u04a9\u04aa\5")
buf.write("\u0459\u022d\2\u04aa\34\3\2\2\2\u04ab\u04ac\5\u0435\u021b")
buf.write("\2\u04ac\u04ad\5\u0459\u022d\2\u04ad\u04ae\5\u0459\u022d")
buf.write("\2\u04ae\u04af\5\u045d\u022f\2\u04af\u04b0\5\u044d\u0227")
buf.write("\2\u04b0\u04b1\5\u043d\u021f\2\u04b1\36\3\2\2\2\u04b2")
buf.write("\u04b3\5\u0435\u021b\2\u04b3\u04b4\5\u0459\u022d\2\u04b4")
buf.write("\u04b5\5\u0459\u022d\2\u04b5\u04b6\5\u043d\u021f\2\u04b6")
buf.write("\u04b7\5\u0457\u022c\2\u04b7\u04b8\5\u045b\u022e\2\u04b8")
buf.write(" \3\2\2\2\u04b9\u04ba\5\u0435\u021b\2\u04ba\u04bb\5\u0459")
buf.write("\u022d\2\u04bb\u04bc\5\u0439\u021d\2\u04bc\"\3\2\2\2\u04bd")
buf.write("\u04be\5\u0435\u021b\2\u04be\u04bf\5\u0459\u022d\2\u04bf")
buf.write("\u04c0\5\u0459\u022d\2\u04c0\u04c1\5\u0451\u0229\2\u04c1")
buf.write("\u04c2\5\u0439\u021d\2\u04c2\u04c3\5\u0445\u0223\2\u04c3")
buf.write("\u04c4\5\u0435\u021b\2\u04c4\u04c5\5\u045b\u022e\2\u04c5")
buf.write("\u04c6\5\u043d\u021f\2\u04c6$\3\2\2\2\u04c7\u04c8\5\u0435")
buf.write("\u021b\2\u04c8\u04c9\5\u045b\u022e\2\u04c9&\3\2\2\2\u04ca")
buf.write("\u04cb\5\u0435\u021b\2\u04cb\u04cc\5\u045b\u022e\2\u04cc")
buf.write("\u04cd\5\u045b\u022e\2\u04cd\u04ce\5\u0457\u022c\2\u04ce")
buf.write("\u04cf\5\u0445\u0223\2\u04cf\u04d0\5\u0437\u021c\2\u04d0")
buf.write("\u04d1\5\u045d\u022f\2\u04d1\u04d2\5\u045b\u022e\2\u04d2")
buf.write("\u04d3\5\u043d\u021f\2\u04d3(\3\2\2\2\u04d4\u04d5\5\u0435")
buf.write("\u021b\2\u04d5\u04d6\5\u045d\u022f\2\u04d6\u04d7\5\u043b")
buf.write("\u021e\2\u04d7\u04d8\5\u0445\u0223\2\u04d8\u04d9\5\u045b")
buf.write("\u022e\2\u04d9*\3\2\2\2\u04da\u04db\5\u0435\u021b\2\u04db")
buf.write("\u04dc\5\u045d\u022f\2\u04dc\u04dd\5\u045b\u022e\2\u04dd")
buf.write("\u04de\5\u0443\u0222\2\u04de\u04df\5\u0445\u0223\2\u04df")
buf.write("\u04e0\5\u043b\u021e\2\u04e0,\3\2\2\2\u04e1\u04e2\5\u0435")
buf.write("\u021b\2\u04e2\u04e3\5\u045d\u022f\2\u04e3\u04e4\5\u045b")
buf.write("\u022e\2\u04e4\u04e5\5\u0451\u0229\2\u04e5.\3\2\2\2\u04e6")
buf.write("\u04e7\5\u0435\u021b\2\u04e7\u04e8\5\u045d\u022f\2\u04e8")
buf.write("\u04e9\5\u045b\u022e\2\u04e9\u04ea\5\u0451\u0229\2\u04ea")
buf.write("\u04eb\5\u044d\u0227\2\u04eb\u04ec\5\u0435\u021b\2\u04ec")
buf.write("\u04ed\5\u045b\u022e\2\u04ed\u04ee\5\u0445\u0223\2\u04ee")
buf.write("\u04ef\5\u0439\u021d\2\u04ef\60\3\2\2\2\u04f0\u04f1\5")
buf.write("\u0435\u021b\2\u04f1\u04f2\5\u045d\u022f\2\u04f2\u04f3")
buf.write("\5\u045b\u022e\2\u04f3\u04f4\5\u0451\u0229\2\u04f4\u04f5")
buf.write("\5\u044f\u0228\2\u04f5\u04f6\5\u0451\u0229\2\u04f6\u04f7")
buf.write("\5\u044d\u0227\2\u04f7\u04f8\5\u0451\u0229\2\u04f8\u04f9")
buf.write("\5\u045d\u022f\2\u04f9\u04fa\5\u0459\u022d\2\u04fa\u04fb")
buf.write("\7a\2\2\u04fb\u04fc\5\u045b\u022e\2\u04fc\u04fd\5\u0457")
buf.write("\u022c\2\u04fd\u04fe\5\u0435\u021b\2\u04fe\u04ff\5\u044f")
buf.write("\u0228\2\u04ff\u0500\5\u0459\u022d\2\u0500\u0501\5\u0435")
buf.write("\u021b\2\u0501\u0502\5\u0439\u021d\2\u0502\u0503\5\u045b")
buf.write("\u022e\2\u0503\u0504\5\u0445\u0223\2\u0504\u0505\5\u0451")
buf.write("\u0229\2\u0505\u0506\5\u044f\u0228\2\u0506\62\3\2\2\2")
buf.write("\u0507\u0508\5\u0437\u021c\2\u0508\u0509\5\u0435\u021b")
buf.write("\2\u0509\u050a\5\u045b\u022e\2\u050a\u050b\5\u0439\u021d")
buf.write("\2\u050b\u050c\5\u0443\u0222\2\u050c\64\3\2\2\2\u050d")
buf.write("\u050e\5\u0437\u021c\2\u050e\u050f\5\u043d\u021f\2\u050f")
buf.write("\u0510\5\u043f\u0220\2\u0510\u0511\5\u0451\u0229\2\u0511")
buf.write("\u0512\5\u0457\u022c\2\u0512\u0513\5\u043d\u021f\2\u0513")
buf.write("\66\3\2\2\2\u0514\u0515\5\u0437\u021c\2\u0515\u0516\5")
buf.write("\u043d\u021f\2\u0516\u0517\5\u0441\u0221\2\u0517\u0518")
buf.write("\5\u0445\u0223\2\u0518\u0519\5\u044f\u0228\2\u05198\3")
buf.write("\2\2\2\u051a\u051b\5\u0437\u021c\2\u051b\u051c\5\u043d")
buf.write("\u021f\2\u051c\u051d\5\u045b\u022e\2\u051d\u051e\5\u0461")
buf.write("\u0231\2\u051e\u051f\5\u043d\u021f\2\u051f\u0520\5\u043d")
buf.write("\u021f\2\u0520\u0521\5\u044f\u0228\2\u0521:\3\2\2\2\u0522")
buf.write("\u0523\5\u0437\u021c\2\u0523\u0524\5\u043f\u0220\2\u0524")
buf.write("\u0525\5\u0445\u0223\2\u0525\u0526\5\u044b\u0226\2\u0526")
buf.write("\u0527\5\u043d\u021f\2\u0527<\3\2\2\2\u0528\u0529\5\u0437")
buf.write("\u021c\2\u0529\u052a\5\u0445\u0223\2\u052a\u052b\5\u044f")
buf.write("\u0228\2\u052b\u052c\5\u0435\u021b\2\u052c\u052d\5\u0457")
buf.write("\u022c\2\u052d\u052e\5\u0465\u0233\2\u052e\u052f\7a\2")
buf.write("\2\u052f\u0530\5\u043b\u021e\2\u0530\u0531\5\u0451\u0229")
buf.write("\2\u0531\u0532\5\u045d\u022f\2\u0532\u0533\5\u0437\u021c")
buf.write("\2\u0533\u0534\5\u044b\u0226\2\u0534\u0535\5\u043d\u021f")
buf.write("\2\u0535>\3\2\2\2\u0536\u0537\5\u0437\u021c\2\u0537\u0538")
buf.write("\5\u0445\u0223\2\u0538\u0539\5\u044f\u0228\2\u0539\u053a")
buf.write("\5\u0435\u021b\2\u053a\u053b\5\u0457\u022c\2\u053b\u053c")
buf.write("\5\u0465\u0233\2\u053c\u053d\7a\2\2\u053d\u053e\5\u043f")
buf.write("\u0220\2\u053e\u053f\5\u044b\u0226\2\u053f\u0540\5\u0451")
buf.write("\u0229\2\u0540\u0541\5\u0435\u021b\2\u0541\u0542\5\u045b")
buf.write("\u022e\2\u0542@\3\2\2\2\u0543\u0544\5\u0437\u021c\2\u0544")
buf.write("\u0545\5\u0445\u0223\2\u0545\u0546\5\u044f\u0228\2\u0546")
buf.write("\u0547\5\u0435\u021b\2\u0547\u0548\5\u0457\u022c\2\u0548")
buf.write("\u0549\5\u0465\u0233\2\u0549\u054a\7a\2\2\u054a\u054b")
buf.write("\5\u0445\u0223\2\u054b\u054c\5\u044f\u0228\2\u054c\u054d")
buf.write("\5\u045b\u022e\2\u054d\u054e\5\u043d\u021f\2\u054e\u054f")
buf.write("\5\u0441\u0221\2\u054f\u0550\5\u043d\u021f\2\u0550\u0551")
buf.write("\5\u0457\u022c\2\u0551B\3\2\2\2\u0552\u0553\5\u0437\u021c")
buf.write("\2\u0553\u0554\5\u044b\u0226\2\u0554\u0555\5\u0451\u0229")
buf.write("\2\u0555\u0556\5\u0437\u021c\2\u0556D\3\2\2\2\u0557\u0558")
buf.write("\5\u0437\u021c\2\u0558\u0559\5\u044b\u0226\2\u0559\u055a")
buf.write("\5\u0451\u0229\2\u055a\u055b\5\u0439\u021d\2\u055b\u055c")
buf.write("\5\u0449\u0225\2\u055cF\3\2\2\2\u055d\u055e\5\u0437\u021c")
buf.write("\2\u055e\u055f\5\u0451\u0229\2\u055f\u0560\5\u043b\u021e")
buf.write("\2\u0560\u0561\5\u0465\u0233\2\u0561H\3\2\2\2\u0562\u0563")
buf.write("\5\u0437\u021c\2\u0563\u0564\5\u0451\u0229\2\u0564\u0565")
buf.write("\5\u0451\u0229\2\u0565\u0566\5\u044b\u0226\2\u0566\u0567")
buf.write("\5\u043d\u021f\2\u0567\u0568\5\u0435\u021b\2\u0568\u0569")
buf.write("\5\u044f\u0228\2\u0569J\3\2\2\2\u056a\u056b\5\u0437\u021c")
buf.write("\2\u056b\u056c\5\u0451\u0229\2\u056c\u056d\5\u045b\u022e")
buf.write("\2\u056d\u056e\5\u0443\u0222\2\u056eL\3\2\2\2\u056f\u0570")
buf.write("\5\u0437\u021c\2\u0570\u0571\5\u0457\u022c\2\u0571\u0572")
buf.write("\5\u043d\u021f\2\u0572\u0573\5\u0435\u021b\2\u0573\u0574")
buf.write("\5\u043b\u021e\2\u0574\u0575\5\u045b\u022e\2\u0575\u0576")
buf.write("\5\u0443\u0222\2\u0576N\3\2\2\2\u0577\u0578\5\u0437\u021c")
buf.write("\2\u0578\u0579\5\u045d\u022f\2\u0579\u057a\5\u044b\u0226")
buf.write("\2\u057a\u057b\5\u0449\u0225\2\u057bP\3\2\2\2\u057c\u057d")
buf.write("\5\u0437\u021c\2\u057d\u057e\5\u0465\u0233\2\u057eR\3")
buf.write("\2\2\2\u057f\u0580\5\u0437\u021c\2\u0580\u0581\5\u0465")
buf.write("\u0233\2\u0581\u0582\5\u045b\u022e\2\u0582\u0583\5\u043d")
buf.write("\u021f\2\u0583T\3\2\2\2\u0584\u0585\5\u0439\u021d\2\u0585")
buf.write("V\3\2\2\2\u0586\u0587\5\u0439\u021d\2\u0587\u0588\5\u0435")
buf.write("\u021b\2\u0588\u0589\5\u0439\u021d\2\u0589\u058a\5\u0443")
buf.write("\u0222\2\u058a\u058b\5\u043d\u021f\2\u058bX\3\2\2\2\u058c")
buf.write("\u058d\5\u0439\u021d\2\u058d\u058e\5\u0435\u021b\2\u058e")
buf.write("\u058f\5\u044b\u0226\2\u058f\u0590\5\u044b\u0226\2\u0590")
buf.write("Z\3\2\2\2\u0591\u0592\5\u0439\u021d\2\u0592\u0593\5\u0435")
buf.write("\u021b\2\u0593\u0594\5\u044f\u0228\2\u0594\u0595\5\u0451")
buf.write("\u0229\2\u0595\u0596\5\u044f\u0228\2\u0596\u0597\5\u0445")
buf.write("\u0223\2\u0597\u0598\5\u0439\u021d\2\u0598\u0599\5\u0435")
buf.write("\u021b\2\u0599\u059a\5\u044b\u0226\2\u059a\\\3\2\2\2\u059b")
buf.write("\u059c\5\u0439\u021d\2\u059c\u059d\5\u0435\u021b\2\u059d")
buf.write("\u059e\5\u0459\u022d\2\u059e\u059f\5\u0439\u021d\2\u059f")
buf.write("\u05a0\5\u0435\u021b\2\u05a0\u05a1\5\u043b\u021e\2\u05a1")
buf.write("\u05a2\5\u043d\u021f\2\u05a2^\3\2\2\2\u05a3\u05a4\5\u0439")
buf.write("\u021d\2\u05a4\u05a5\5\u0435\u021b\2\u05a5\u05a6\5\u0459")
buf.write("\u022d\2\u05a6\u05a7\5\u043d\u021f\2\u05a7`\3\2\2\2\u05a8")
buf.write("\u05a9\5\u0439\u021d\2\u05a9\u05aa\5\u0435\u021b\2\u05aa")
buf.write("\u05ab\5\u0459\u022d\2\u05ab\u05ac\5\u045b\u022e\2\u05ac")
buf.write("b\3\2\2\2\u05ad\u05ae\5\u0439\u021d\2\u05ae\u05af\5\u0443")
buf.write("\u0222\2\u05af\u05b0\5\u0435\u021b\2\u05b0\u05b1\5\u0457")
buf.write("\u022c\2\u05b1d\3\2\2\2\u05b2\u05b3\5\u0439\u021d\2\u05b3")
buf.write("\u05b4\5\u0443\u0222\2\u05b4\u05b5\5\u0435\u021b\2\u05b5")
buf.write("\u05b6\5\u0457\u022c\2\u05b6\u05b7\7a\2\2\u05b7\u05b8")
buf.write("\5\u0439\u021d\2\u05b8\u05b9\5\u0459\u022d\2\u05b9f\3")
buf.write("\2\2\2\u05ba\u05bb\5\u0439\u021d\2\u05bb\u05bc\5\u0443")
buf.write("\u0222\2\u05bc\u05bd\5\u0435\u021b\2\u05bd\u05be\5\u0457")
buf.write("\u022c\2\u05be\u05bf\5\u0435\u021b\2\u05bf\u05c0\5\u0439")
buf.write("\u021d\2\u05c0\u05c1\5\u045b\u022e\2\u05c1\u05c2\5\u043d")
buf.write("\u021f\2\u05c2\u05c3\5\u0457\u022c\2\u05c3h\3\2\2\2\u05c4")
buf.write("\u05c5\5\u0439\u021d\2\u05c5\u05c6\5\u0443\u0222\2\u05c6")
buf.write("\u05c7\5\u043d\u021f\2\u05c7\u05c8\5\u0439\u021d\2\u05c8")
buf.write("\u05c9\5\u0449\u0225\2\u05c9j\3\2\2\2\u05ca\u05cb\5\u0439")
buf.write("\u021d\2\u05cb\u05cc\5\u0443\u0222\2\u05cc\u05cd\5\u0457")
buf.write("\u022c\2\u05cdl\3\2\2\2\u05ce\u05cf\5\u0439\u021d\2\u05cf")
buf.write("\u05d0\5\u044b\u0226\2\u05d0\u05d1\5\u0451\u0229\2\u05d1")
buf.write("\u05d2\5\u0437\u021c\2\u05d2n\3\2\2\2\u05d3\u05d4\5\u0439")
buf.write("\u021d\2\u05d4\u05d5\5\u044b\u0226\2\u05d5\u05d6\5\u0451")
buf.write("\u0229\2\u05d6\u05d7\5\u0459\u022d\2\u05d7\u05d8\5\u043d")
buf.write("\u021f\2\u05d8p\3\2\2\2\u05d9\u05da\5\u0439\u021d\2\u05da")
buf.write("\u05db\5\u044b\u0226\2\u05db\u05dc\5\u045d\u022f\2\u05dc")
buf.write("\u05dd\5\u0459\u022d\2\u05dd\u05de\5\u045b\u022e\2\u05de")
buf.write("\u05df\5\u043d\u021f\2\u05df\u05e0\5\u0457\u022c\2\u05e0")
buf.write("r\3\2\2\2\u05e1\u05e2\5\u0439\u021d\2\u05e2\u05e3\5\u0451")
buf.write("\u0229\2\u05e3\u05e4\5\u044b\u0226\2\u05e4\u05e5\5\u044b")
buf.write("\u0226\2\u05e5\u05e6\5\u043d\u021f\2\u05e6\u05e7\5\u0439")
buf.write("\u021d\2\u05e7\u05e8\5\u045b\u022e\2\u05e8t\3\2\2\2\u05e9")
buf.write("\u05ea\5\u0439\u021d\2\u05ea\u05eb\5\u0451\u0229\2\u05eb")
buf.write("\u05ec\5\u044b\u0226\2\u05ec\u05ed\5\u045d\u022f\2\u05ed")
buf.write("\u05ee\5\u044d\u0227\2\u05ee\u05ef\5\u044f\u0228\2\u05ef")
buf.write("\u05f0\5\u0459\u022d\2\u05f0v\3\2\2\2\u05f1\u05f2\5\u0439")
buf.write("\u021d\2\u05f2\u05f3\5\u0451\u0229\2\u05f3\u05f4\5\u044d")
buf.write("\u0227\2\u05f4\u05f5\5\u044d\u0227\2\u05f5\u05f6\5\u043d")
buf.write("\u021f\2\u05f6\u05f7\5\u044f\u0228\2\u05f7\u05f8\5\u045b")
buf.write("\u022e\2\u05f8x\3\2\2\2\u05f9\u05fa\5\u0439\u021d\2\u05fa")
buf.write("\u05fb\5\u0451\u0229\2\u05fb\u05fc\5\u044d\u0227\2\u05fc")
buf.write("\u05fd\5\u044d\u0227\2\u05fd\u05fe\5\u0445\u0223\2\u05fe")
buf.write("\u05ff\5\u045b\u022e\2\u05ffz\3\2\2\2\u0600\u0601\5\u0439")
buf.write("\u021d\2\u0601\u0602\5\u0451\u0229\2\u0602\u0603\5\u044d")
buf.write("\u0227\2\u0603\u0604\5\u044d\u0227\2\u0604\u0605\5\u0445")
buf.write("\u0223\2\u0605\u0606\5\u045b\u022e\2\u0606\u0607\5\u045b")
buf.write("\u022e\2\u0607\u0608\5\u043d\u021f\2\u0608\u0609\5\u043b")
buf.write("\u021e\2\u0609|\3\2\2\2\u060a\u060b\5\u0439\u021d\2\u060b")
buf.write("\u060c\5\u0451\u0229\2\u060c\u060d\5\u044d\u0227\2\u060d")
buf.write("\u060e\5\u0453\u022a\2\u060e\u060f\5\u0435\u021b\2\u060f")
buf.write("\u0610\5\u045b\u022e\2\u0610\u0611\5\u0445\u0223\2\u0611")
buf.write("\u0612\5\u0437\u021c\2\u0612\u0613\5\u0445\u0223\2\u0613")
buf.write("\u0614\5\u044b\u0226\2\u0614\u0615\5\u0445\u0223\2\u0615")
buf.write("\u0616\5\u045b\u022e\2\u0616\u0617\5\u0465\u0233\2\u0617")
buf.write("~\3\2\2\2\u0618\u0619\5\u0439\u021d\2\u0619\u061a\5\u0451")
buf.write("\u0229\2\u061a\u061b\5\u044d\u0227\2\u061b\u061c\5\u0453")
buf.write("\u022a\2\u061c\u061d\5\u0445\u0223\2\u061d\u061e\5\u044b")
buf.write("\u0226\2\u061e\u061f\5\u043d\u021f\2\u061f\u0080\3\2\2")
buf.write("\2\u0620\u0621\5\u0439\u021d\2\u0621\u0622\5\u0451\u0229")
buf.write("\2\u0622\u0623\5\u044d\u0227\2\u0623\u0624\5\u0453\u022a")
buf.write("\2\u0624\u0625\5\u0451\u0229\2\u0625\u0626\5\u045d\u022f")
buf.write("\2\u0626\u0627\5\u044f\u0228\2\u0627\u0628\5\u043b\u021e")
buf.write("\2\u0628\u0082\3\2\2\2\u0629\u062a\5\u0439\u021d\2\u062a")
buf.write("\u062b\5\u0451\u0229\2\u062b\u062c\5\u044f\u0228\2\u062c")
buf.write("\u062d\5\u044f\u0228\2\u062d\u062e\5\u043d\u021f\2\u062e")
buf.write("\u062f\5\u0439\u021d\2\u062f\u0630\5\u045b\u022e\2\u0630")
buf.write("\u0084\3\2\2\2\u0631\u0632\5\u0439\u021d\2\u0632\u0633")
buf.write("\5\u0451\u0229\2\u0633\u0634\5\u044f\u0228\2\u0634\u0635")
buf.write("\5\u044f\u0228\2\u0635\u0636\5\u043d\u021f\2\u0636\u0637")
buf.write("\5\u0439\u021d\2\u0637\u0638\5\u045b\u022e\2\u0638\u0639")
buf.write("\7a\2\2\u0639\u063a\5\u0437\u021c\2\u063a\u063b\5\u0465")
buf.write("\u0233\2\u063b\u063c\7a\2\2\u063c\u063d\5\u0457\u022c")
buf.write("\2\u063d\u063e\5\u0451\u0229\2\u063e\u063f\5\u0451\u0229")
buf.write("\2\u063f\u0640\5\u045b\u022e\2\u0640\u0086\3\2\2\2\u0641")
buf.write("\u0642\5\u0439\u021d\2\u0642\u0643\5\u0451\u0229\2\u0643")
buf.write("\u0644\5\u044f\u0228\2\u0644\u0645\5\u0459\u022d\2\u0645")
buf.write("\u0646\5\u045b\u022e\2\u0646\u0647\5\u0435\u021b\2\u0647")
buf.write("\u0648\5\u044f\u0228\2\u0648\u0649\5\u045b\u022e\2\u0649")
buf.write("\u0088\3\2\2\2\u064a\u064b\5\u0439\u021d\2\u064b\u064c")
buf.write("\5\u0451\u0229\2\u064c\u064d\5\u044f\u0228\2\u064d\u064e")
buf.write("\5\u0459\u022d\2\u064e\u064f\5\u045b\u022e\2\u064f\u0650")
buf.write("\5\u0457\u022c\2\u0650\u0651\5\u0435\u021b\2\u0651\u0652")
buf.write("\5\u0445\u0223\2\u0652\u0653\5\u044f\u0228\2\u0653\u0654")
buf.write("\5\u045b\u022e\2\u0654\u008a\3\2\2\2\u0655\u0656\5\u0439")
buf.write("\u021d\2\u0656\u0657\5\u0451\u0229\2\u0657\u0658\5\u044f")
buf.write("\u0228\2\u0658\u0659\5\u0459\u022d\2\u0659\u065a\5\u045b")
buf.write("\u022e\2\u065a\u065b\5\u0457\u022c\2\u065b\u065c\5\u0435")
buf.write("\u021b\2\u065c\u065d\5\u0445\u0223\2\u065d\u065e\5\u044f")
buf.write("\u0228\2\u065e\u065f\5\u045b\u022e\2\u065f\u0660\5\u0459")
buf.write("\u022d\2\u0660\u008c\3\2\2\2\u0661\u0662\5\u0439\u021d")
buf.write("\2\u0662\u0663\5\u0451\u0229\2\u0663\u0664\5\u044f\u0228")
buf.write("\2\u0664\u0665\5\u0459\u022d\2\u0665\u0666\5\u045b\u022e")
buf.write("\2\u0666\u0667\5\u0457\u022c\2\u0667\u0668\5\u045d\u022f")
buf.write("\2\u0668\u0669\5\u0439\u021d\2\u0669\u066a\5\u045b\u022e")
buf.write("\2\u066a\u066b\5\u0451\u0229\2\u066b\u066c\5\u0457\u022c")
buf.write("\2\u066c\u008e\3\2\2\2\u066d\u066e\5\u0439\u021d\2\u066e")
buf.write("\u066f\5\u0451\u0229\2\u066f\u0670\5\u044f\u0228\2\u0670")
buf.write("\u0671\5\u045b\u022e\2\u0671\u0672\5\u043d\u021f\2\u0672")
buf.write("\u0673\5\u044f\u0228\2\u0673\u0674\5\u045b\u022e\2\u0674")
buf.write("\u0090\3\2\2\2\u0675\u0676\5\u0439\u021d\2\u0676\u0677")
buf.write("\5\u0451\u0229\2\u0677\u0678\5\u044f\u0228\2\u0678\u0679")
buf.write("\5\u045b\u022e\2\u0679\u067a\5\u043d\u021f\2\u067a\u067b")
buf.write("\5\u0463\u0232\2\u067b\u067c\5\u045b\u022e\2\u067c\u0092")
buf.write("\3\2\2\2\u067d\u067e\5\u0439\u021d\2\u067e\u067f\5\u0451")
buf.write("\u0229\2\u067f\u0680\5\u044f\u0228\2\u0680\u0681\5\u045b")
buf.write("\u022e\2\u0681\u0682\5\u0445\u0223\2\u0682\u0683\5\u044f")
buf.write("\u0228\2\u0683\u0684\5\u045d\u022f\2\u0684\u0685\5\u043d")
buf.write("\u021f\2\u0685\u0094\3\2\2\2\u0686\u0687\5\u0439\u021d")
buf.write("\2\u0687\u0688\5\u0451\u0229\2\u0688\u0689\5\u044f\u0228")
buf.write("\2\u0689\u068a\5\u045f\u0230\2\u068a\u068b\5\u043d\u021f")
buf.write("\2\u068b\u068c\5\u0457\u022c\2\u068c\u068d\5\u045b\u022e")
buf.write("\2\u068d\u0096\3\2\2\2\u068e\u068f\5\u0439\u021d\2\u068f")
buf.write("\u0690\5\u0451\u0229\2\u0690\u0691\5\u0457\u022c\2\u0691")
buf.write("\u0692\5\u0457\u022c\2\u0692\u0693\5\u045d\u022f\2\u0693")
buf.write("\u0694\5\u0453\u022a\2\u0694\u0695\5\u045b\u022e\2\u0695")
buf.write("\u0696\7a\2\2\u0696\u0697\5\u0463\u0232\2\u0697\u0698")
buf.write("\5\u0445\u0223\2\u0698\u0699\5\u043b\u021e\2\u0699\u0098")
buf.write("\3\2\2\2\u069a\u069b\5\u0439\u021d\2\u069b\u069c\5\u0451")
buf.write("\u0229\2\u069c\u069d\5\u0457\u022c\2\u069d\u069e\5\u0457")
buf.write("\u022c\2\u069e\u069f\5\u045d\u022f\2\u069f\u06a0\5\u0453")
buf.write("\u022a\2\u06a0\u06a1\5\u045b\u022e\2\u06a1\u06a2\7a\2")
buf.write("\2\u06a2\u06a3\5\u0463\u0232\2\u06a3\u06a4\5\u0445\u0223")
buf.write("\2\u06a4\u06a5\5\u043b\u021e\2\u06a5\u06a6\7a\2\2\u06a6")
buf.write("\u06a7\5\u0435\u021b\2\u06a7\u06a8\5\u044b\u0226\2\u06a8")
buf.write("\u06a9\5\u044b\u0226\2\u06a9\u009a\3\2\2\2\u06aa\u06ab")
buf.write("\5\u0439\u021d\2\u06ab\u06ac\5\u0451\u0229\2\u06ac\u06ad")
buf.write("\5\u0459\u022d\2\u06ad\u06ae\5\u045b\u022e\2\u06ae\u009c")
buf.write("\3\2\2\2\u06af\u06b0\5\u0439\u021d\2\u06b0\u06b1\5\u0451")
buf.write("\u0229\2\u06b1\u06b2\5\u045d\u022f\2\u06b2\u06b3\5\u044f")
buf.write("\u0228\2\u06b3\u06b4\5\u045b\u022e\2\u06b4\u009e\3\2\2")
buf.write("\2\u06b5\u06b6\5\u0439\u021d\2\u06b6\u06b7\5\u0457\u022c")
buf.write("\2\u06b7\u06b8\5\u043d\u021f\2\u06b8\u06b9\5\u0435\u021b")
buf.write("\2\u06b9\u06ba\5\u045b\u022e\2\u06ba\u06bb\5\u043d\u021f")
buf.write("\2\u06bb\u00a0\3\2\2\2\u06bc\u06bd\5\u0439\u021d\2\u06bd")
buf.write("\u06be\5\u0457\u022c\2\u06be\u06bf\5\u0451\u0229\2\u06bf")
buf.write("\u06c0\5\u0459\u022d\2\u06c0\u06c1\5\u0459\u022d\2\u06c1")
buf.write("\u00a2\3\2\2\2\u06c2\u06c3\5\u0439\u021d\2\u06c3\u06c4")
buf.write("\5\u045d\u022f\2\u06c4\u06c5\5\u0437\u021c\2\u06c5\u06c6")
buf.write("\5\u043d\u021f\2\u06c6\u00a4\3\2\2\2\u06c7\u06c8\5\u0439")
buf.write("\u021d\2\u06c8\u06c9\5\u045d\u022f\2\u06c9\u06ca\5\u0457")
buf.write("\u022c\2\u06ca\u06cb\5\u0457\u022c\2\u06cb\u06cc\5\u043d")
buf.write("\u021f\2\u06cc\u06cd\5\u044f\u0228\2\u06cd\u06ce\5\u045b")
buf.write("\u022e\2\u06ce\u00a6\3\2\2\2\u06cf\u06d0\5\u0439\u021d")
buf.write("\2\u06d0\u06d1\5\u045d\u022f\2\u06d1\u06d2\5\u0457\u022c")
buf.write("\2\u06d2\u06d3\5\u0457\u022c\2\u06d3\u06d4\5\u043d\u021f")
buf.write("\2\u06d4\u06d5\5\u044f\u0228\2\u06d5\u06d6\5\u045b\u022e")
buf.write("\2\u06d6\u06d7\7a\2\2\u06d7\u06d8\5\u045d\u022f\2\u06d8")
buf.write("\u06d9\5\u0459\u022d\2\u06d9\u06da\5\u043d\u021f\2\u06da")
buf.write("\u06db\5\u0457\u022c\2\u06db\u00a8\3\2\2\2\u06dc\u06dd")
buf.write("\5\u0439\u021d\2\u06dd\u06de\5\u045d\u022f\2\u06de\u06df")
buf.write("\5\u0457\u022c\2\u06df\u06e0\5\u0459\u022d\2\u06e0\u06e1")
buf.write("\5\u0451\u0229\2\u06e1\u06e2\5\u0457\u022c\2\u06e2\u00aa")
buf.write("\3\2\2\2\u06e3\u06e4\5\u0439\u021d\2\u06e4\u06e5\5\u045d")
buf.write("\u022f\2\u06e5\u06e6\5\u0459\u022d\2\u06e6\u06e7\5\u045b")
buf.write("\u022e\2\u06e7\u06e8\5\u0451\u0229\2\u06e8\u06e9\5\u044d")
buf.write("\u0227\2\u06e9\u06ea\5\u043b\u021e\2\u06ea\u06eb\5\u0435")
buf.write("\u021b\2\u06eb\u06ec\5\u045b\u022e\2\u06ec\u06ed\5\u045d")
buf.write("\u022f\2\u06ed\u06ee\5\u044d\u0227\2\u06ee\u00ac\3\2\2")
buf.write("\2\u06ef\u06f0\5\u0439\u021d\2\u06f0\u06f1\5\u0465\u0233")
buf.write("\2\u06f1\u06f2\5\u0439\u021d\2\u06f2\u06f3\5\u044b\u0226")
buf.write("\2\u06f3\u06f4\5\u043d\u021f\2\u06f4\u00ae\3\2\2\2\u06f5")
buf.write("\u06f6\5\u043b\u021e\2\u06f6\u06f7\5\u0435\u021b\2\u06f7")
buf.write("\u06f8\5\u045b\u022e\2\u06f8\u06f9\5\u0435\u021b\2\u06f9")
buf.write("\u00b0\3\2\2\2\u06fa\u06fb\5\u043b\u021e\2\u06fb\u06fc")
buf.write("\5\u0435\u021b\2\u06fc\u06fd\5\u045b\u022e\2\u06fd\u06fe")
buf.write("\5\u0435\u021b\2\u06fe\u06ff\5\u0437\u021c\2\u06ff\u0700")
buf.write("\5\u0435\u021b\2\u0700\u0701\5\u0459\u022d\2\u0701\u0702")
buf.write("\5\u043d\u021f\2\u0702\u00b2\3\2\2\2\u0703\u0704\5\u043b")
buf.write("\u021e\2\u0704\u0705\5\u0435\u021b\2\u0705\u0706\5\u045b")
buf.write("\u022e\2\u0706\u0707\5\u043d\u021f\2\u0707\u00b4\3\2\2")
buf.write("\2\u0708\u0709\5\u043b\u021e\2\u0709\u070a\5\u0435\u021b")
buf.write("\2\u070a\u070b\5\u0465\u0233\2\u070b\u00b6\3\2\2\2\u070c")
buf.write("\u070d\5\u043b\u021e\2\u070d\u070e\5\u0437\u021c\2\u070e")
buf.write("\u070f\7a\2\2\u070f\u0710\5\u0457\u022c\2\u0710\u0711")
buf.write("\5\u0451\u0229\2\u0711\u0712\5\u044b\u0226\2\u0712\u0713")
buf.write("\5\u043d\u021f\2\u0713\u0714\7a\2\2\u0714\u0715\5\u0439")
buf.write("\u021d\2\u0715\u0716\5\u0443\u0222\2\u0716\u0717\5\u0435")
buf.write("\u021b\2\u0717\u0718\5\u044f\u0228\2\u0718\u0719\5\u0441")
buf.write("\u0221\2\u0719\u071a\5\u043d\u021f\2\u071a\u00b8\3\2\2")
buf.write("\2\u071b\u071c\5\u043b\u021e\2\u071c\u071d\5\u0437\u021c")
buf.write("\2\u071d\u071e\5\u045b\u022e\2\u071e\u071f\5\u0445\u0223")
buf.write("\2\u071f\u0720\5\u044d\u0227\2\u0720\u0721\5\u043d\u021f")
buf.write("\2\u0721\u0722\5\u0467\u0234\2\u0722\u0723\5\u0451\u0229")
buf.write("\2\u0723\u0724\5\u044f\u0228\2\u0724\u0725\5\u043d\u021f")
buf.write("\2\u0725\u00ba\3\2\2\2\u0726\u0727\5\u043b\u021e\2\u0727")
buf.write("\u0728\5\u043b\u021e\2\u0728\u0729\5\u044b\u0226\2\u0729")
buf.write("\u00bc\3\2\2\2\u072a\u072b\5\u043b\u021e\2\u072b\u072c")
buf.write("\5\u043d\u021f\2\u072c\u072d\5\u0437\u021c\2\u072d\u072e")
buf.write("\5\u045d\u022f\2\u072e\u072f\5\u0441\u0221\2\u072f\u00be")
buf.write("\3\2\2\2\u0730\u0731\5\u043b\u021e\2\u0731\u0732\5\u043d")
buf.write("\u021f\2\u0732\u0733\5\u0439\u021d\2\u0733\u00c0\3\2\2")
buf.write("\2\u0734\u0735\5\u043b\u021e\2\u0735\u0736\5\u043d\u021f")
buf.write("\2\u0736\u0737\5\u0439\u021d\2\u0737\u0738\5\u0445\u0223")
buf.write("\2\u0738\u0739\5\u044d\u0227\2\u0739\u073a\5\u0435\u021b")
buf.write("\2\u073a\u073b\5\u044b\u0226\2\u073b\u00c2\3\2\2\2\u073c")
buf.write("\u073d\5\u043b\u021e\2\u073d\u073e\5\u043d\u021f\2\u073e")
buf.write("\u073f\5\u0439\u021d\2\u073f\u0740\5\u044b\u0226\2\u0740")
buf.write("\u0741\5\u0435\u021b\2\u0741\u0742\5\u0457\u022c\2\u0742")
buf.write("\u0743\5\u043d\u021f\2\u0743\u00c4\3\2\2\2\u0744\u0745")
buf.write("\5\u043b\u021e\2\u0745\u0746\5\u043d\u021f\2\u0746\u0747")
buf.write("\5\u0439\u021d\2\u0747\u0748\5\u0451\u0229\2\u0748\u0749")
buf.write("\5\u044d\u0227\2\u0749\u074a\5\u0453\u022a\2\u074a\u074b")
buf.write("\5\u0451\u0229\2\u074b\u074c\5\u0459\u022d\2\u074c\u074d")
buf.write("\5\u043d\u021f\2\u074d\u00c6\3\2\2\2\u074e\u074f\5\u043b")
buf.write("\u021e\2\u074f\u0750\5\u043d\u021f\2\u0750\u0751\5\u0439")
buf.write("\u021d\2\u0751\u0752\5\u0457\u022c\2\u0752\u0753\5\u043d")
buf.write("\u021f\2\u0753\u0754\5\u044d\u0227\2\u0754\u0755\5\u043d")
buf.write("\u021f\2\u0755\u0756\5\u044f\u0228\2\u0756\u0757\5\u045b")
buf.write("\u022e\2\u0757\u00c8\3\2\2\2\u0758\u0759\5\u043b\u021e")
buf.write("\2\u0759\u075a\5\u043d\u021f\2\u075a\u075b\5\u043f\u0220")
buf.write("\2\u075b\u075c\5\u0435\u021b\2\u075c\u075d\5\u045d\u022f")
buf.write("\2\u075d\u075e\5\u044b\u0226\2\u075e\u075f\5\u045b\u022e")
buf.write("\2\u075f\u00ca\3\2\2\2\u0760\u0761\5\u043b\u021e\2\u0761")
buf.write("\u0762\5\u043d\u021f\2\u0762\u0763\5\u043f\u0220\2\u0763")
buf.write("\u0764\5\u0435\u021b\2\u0764\u0765\5\u045d\u022f\2\u0765")
buf.write("\u0766\5\u044b\u0226\2\u0766\u0767\5\u045b\u022e\2\u0767")
buf.write("\u0768\5\u0459\u022d\2\u0768\u00cc\3\2\2\2\u0769\u076a")
buf.write("\5\u043b\u021e\2\u076a\u076b\5\u043d\u021f\2\u076b\u076c")
buf.write("\5\u043f\u0220\2\u076c\u076d\5\u043d\u021f\2\u076d\u076e")
buf.write("\5\u0457\u022c\2\u076e\u076f\5\u0457\u022c\2\u076f\u0770")
buf.write("\5\u043d\u021f\2\u0770\u0771\5\u043b\u021e\2\u0771\u00ce")
buf.write("\3\2\2\2\u0772\u0773\5\u043b\u021e\2\u0773\u0774\5\u043d")
buf.write("\u021f\2\u0774\u0775\5\u043f\u0220\2\u0775\u0776\5\u0445")
buf.write("\u0223\2\u0776\u0777\5\u044f\u0228\2\u0777\u0778\5\u043d")
buf.write("\u021f\2\u0778\u0779\5\u0457\u022c\2\u0779\u00d0\3\2\2")
buf.write("\2\u077a\u077b\5\u043b\u021e\2\u077b\u077c\5\u043d\u021f")
buf.write("\2\u077c\u077d\5\u044b\u0226\2\u077d\u077e\5\u043d\u021f")
buf.write("\2\u077e\u077f\5\u045b\u022e\2\u077f\u0780\5\u043d\u021f")
buf.write("\2\u0780\u00d2\3\2\2\2\u0781\u0782\5\u043b\u021e\2\u0782")
buf.write("\u0783\5\u043d\u021f\2\u0783\u0784\5\u0453\u022a\2\u0784")
buf.write("\u0785\5\u045b\u022e\2\u0785\u0786\5\u0443\u0222\2\u0786")
buf.write("\u00d4\3\2\2\2\u0787\u0788\5\u043b\u021e\2\u0788\u0789")
buf.write("\5\u043d\u021f\2\u0789\u078a\5\u0459\u022d\2\u078a\u078b")
buf.write("\5\u0439\u021d\2\u078b\u00d6\3\2\2\2\u078c\u078d\5\u043b")
buf.write("\u021e\2\u078d\u078e\5\u043d\u021f\2\u078e\u078f\5\u045b")
buf.write("\u022e\2\u078f\u0790\5\u043d\u021f\2\u0790\u0791\5\u0457")
buf.write("\u022c\2\u0791\u0792\5\u044d\u0227\2\u0792\u0793\5\u0445")
buf.write("\u0223\2\u0793\u0794\5\u044f\u0228\2\u0794\u0795\5\u0445")
buf.write("\u0223\2\u0795\u0796\5\u0459\u022d\2\u0796\u0797\5\u045b")
buf.write("\u022e\2\u0797\u0798\5\u0445\u0223\2\u0798\u0799\5\u0439")
buf.write("\u021d\2\u0799\u00d8\3\2\2\2\u079a\u079b\5\u043b\u021e")
buf.write("\2\u079b\u079c\5\u0445\u0223\2\u079c\u079d\5\u044d\u0227")
buf.write("\2\u079d\u079e\5\u043d\u021f\2\u079e\u079f\5\u044f\u0228")
buf.write("\2\u079f\u07a0\5\u0459\u022d\2\u07a0\u07a1\5\u0445\u0223")
buf.write("\2\u07a1\u07a2\5\u0451\u0229\2\u07a2\u07a3\5\u044f\u0228")
buf.write("\2\u07a3\u00da\3\2\2\2\u07a4\u07a5\5\u043b\u021e\2\u07a5")
buf.write("\u07a6\5\u0445\u0223\2\u07a6\u07a7\5\u0459\u022d\2\u07a7")
buf.write("\u07a8\5\u0435\u021b\2\u07a8\u07a9\5\u0437\u021c\2\u07a9")
buf.write("\u07aa\5\u044b\u0226\2\u07aa\u07ab\5\u043d\u021f\2\u07ab")
buf.write("\u00dc\3\2\2\2\u07ac\u07ad\5\u043b\u021e\2\u07ad\u07ae")
buf.write("\5\u0445\u0223\2\u07ae\u07af\5\u0459\u022d\2\u07af\u07b0")
buf.write("\5\u0435\u021b\2\u07b0\u07b1\5\u0459\u022d\2\u07b1\u07b2")
buf.write("\5\u0459\u022d\2\u07b2\u07b3\5\u0451\u0229\2\u07b3\u07b4")
buf.write("\5\u0439\u021d\2\u07b4\u07b5\5\u0445\u0223\2\u07b5\u07b6")
buf.write("\5\u0435\u021b\2\u07b6\u07b7\5\u045b\u022e\2\u07b7\u07b8")
buf.write("\5\u043d\u021f\2\u07b8\u00de\3\2\2\2\u07b9\u07ba\5\u043b")
buf.write("\u021e\2\u07ba\u07bb\5\u0445\u0223\2\u07bb\u07bc\5\u0459")
buf.write("\u022d\2\u07bc\u07bd\5\u045b\u022e\2\u07bd\u07be\5\u0445")
buf.write("\u0223\2\u07be\u07bf\5\u044f\u0228\2\u07bf\u07c0\5\u0439")
buf.write("\u021d\2\u07c0\u07c1\5\u045b\u022e\2\u07c1\u00e0\3\2\2")
buf.write("\2\u07c2\u07c3\5\u043b\u021e\2\u07c3\u07c4\5\u0451\u0229")
buf.write("\2\u07c4\u07c5\5\u0439\u021d\2\u07c5\u07c6\5\u045d\u022f")
buf.write("\2\u07c6\u07c7\5\u044d\u0227\2\u07c7\u07c8\5\u043d\u021f")
buf.write("\2\u07c8\u07c9\5\u044f\u0228\2\u07c9\u07ca\5\u045b\u022e")
buf.write("\2\u07ca\u00e2\3\2\2\2\u07cb\u07cc\5\u043b\u021e\2\u07cc")
buf.write("\u07cd\5\u0451\u0229\2\u07cd\u07ce\5\u045d\u022f\2\u07ce")
buf.write("\u07cf\5\u0437\u021c\2\u07cf\u07d0\5\u044b\u0226\2\u07d0")
buf.write("\u07d1\5\u043d\u021f\2\u07d1\u00e4\3\2\2\2\u07d2\u07d3")
buf.write("\5\u043b\u021e\2\u07d3\u07d4\5\u0457\u022c\2\u07d4\u07d5")
buf.write("\5\u0451\u0229\2\u07d5\u07d6\5\u0453\u022a\2\u07d6\u00e6")
buf.write("\3\2\2\2\u07d7\u07d8\5\u043b\u021e\2\u07d8\u07d9\5\u0459")
buf.write("\u022d\2\u07d9\u07da\5\u0445\u0223\2\u07da\u07db\5\u044f")
buf.write("\u0228\2\u07db\u07dc\5\u045b\u022e\2\u07dc\u07dd\5\u043d")
buf.write("\u021f\2\u07dd\u07de\5\u0457\u022c\2\u07de\u07df\5\u045f")
buf.write("\u0230\2\u07df\u07e0\5\u0435\u021b\2\u07e0\u07e1\5\u044b")
buf.write("\u0226\2\u07e1\u07e2\7a\2\2\u07e2\u07e3\5\u045d\u022f")
buf.write("\2\u07e3\u07e4\5\u044f\u0228\2\u07e4\u07e5\5\u0439\u021d")
buf.write("\2\u07e5\u07e6\5\u0451\u0229\2\u07e6\u07e7\5\u044f\u0228")
buf.write("\2\u07e7\u07e8\5\u0459\u022d\2\u07e8\u07e9\5\u045b\u022e")
buf.write("\2\u07e9\u07ea\5\u0457\u022c\2\u07ea\u07eb\5\u0435\u021b")
buf.write("\2\u07eb\u07ec\5\u0445\u0223\2\u07ec\u07ed\5\u044f\u0228")
buf.write("\2\u07ed\u07ee\5\u043d\u021f\2\u07ee\u07ef\5\u043b\u021e")
buf.write("\2\u07ef\u00e8\3\2\2\2\u07f0\u07f1\5\u043d\u021f\2\u07f1")
buf.write("\u07f2\5\u0435\u021b\2\u07f2\u07f3\5\u0439\u021d\2\u07f3")
buf.write("\u07f4\5\u0443\u0222\2\u07f4\u00ea\3\2\2\2\u07f5\u07f6")
buf.write("\5\u043d\u021f\2\u07f6\u07f7\5\u044b\u0226\2\u07f7\u07f8")
buf.write("\5\u043d\u021f\2\u07f8\u07f9\5\u044d\u0227\2\u07f9\u07fa")
buf.write("\5\u043d\u021f\2\u07fa\u07fb\5\u044f\u0228\2\u07fb\u07fc")
buf.write("\5\u045b\u022e\2\u07fc\u00ec\3\2\2\2\u07fd\u07fe\5\u043d")
buf.write("\u021f\2\u07fe\u07ff\5\u044b\u0226\2\u07ff\u0800\5\u0459")
buf.write("\u022d\2\u0800\u0801\5\u043d\u021f\2\u0801\u00ee\3\2\2")
buf.write("\2\u0802\u0803\5\u043d\u021f\2\u0803\u0804\5\u044b\u0226")
buf.write("\2\u0804\u0805\5\u0459\u022d\2\u0805\u0806\5\u0445\u0223")
buf.write("\2\u0806\u0807\5\u043f\u0220\2\u0807\u00f0\3\2\2\2\u0808")
buf.write("\u0809\5\u043d\u021f\2\u0809\u080a\5\u044d\u0227\2\u080a")
buf.write("\u080b\5\u0453\u022a\2\u080b\u080c\5\u045b\u022e\2\u080c")
buf.write("\u080d\5\u0465\u0233\2\u080d\u00f2\3\2\2\2\u080e\u080f")
buf.write("\5\u043d\u021f\2\u080f\u0810\5\u044f\u0228\2\u0810\u0811")
buf.write("\5\u0435\u021b\2\u0811\u0812\5\u0437\u021c\2\u0812\u0813")
buf.write("\5\u044b\u0226\2\u0813\u0814\5\u043d\u021f\2\u0814\u00f4")
buf.write("\3\2\2\2\u0815\u0816\5\u043d\u021f\2\u0816\u0817\5\u044f")
buf.write("\u0228\2\u0817\u0818\5\u0439\u021d\2\u0818\u0819\5\u0451")
buf.write("\u0229\2\u0819\u081a\5\u043b\u021e\2\u081a\u081b\5\u0445")
buf.write("\u0223\2\u081b\u081c\5\u044f\u0228\2\u081c\u081d\5\u0441")
buf.write("\u0221\2\u081d\u00f6\3\2\2\2\u081e\u081f\5\u043d\u021f")
buf.write("\2\u081f\u0820\5\u044f\u0228\2\u0820\u0821\5\u043b\u021e")
buf.write("\2\u0821\u00f8\3\2\2\2\u0822\u0823\5\u043d\u021f\2\u0823")
buf.write("\u0824\5\u044f\u0228\2\u0824\u0825\5\u045b\u022e\2\u0825")
buf.write("\u0826\5\u0445\u0223\2\u0826\u0827\5\u045b\u022e\2\u0827")
buf.write("\u0828\5\u0465\u0233\2\u0828\u0829\5\u043d\u021f\2\u0829")
buf.write("\u082a\5\u0459\u022d\2\u082a\u082b\5\u0439\u021d\2\u082b")
buf.write("\u082c\5\u0435\u021b\2\u082c\u082d\5\u0453\u022a\2\u082d")
buf.write("\u082e\5\u0445\u0223\2\u082e\u082f\5\u044f\u0228\2\u082f")
buf.write("\u0830\5\u0441\u0221\2\u0830\u00fa\3\2\2\2\u0831\u0832")
buf.write("\5\u043d\u021f\2\u0832\u0833\5\u0457\u022c\2\u0833\u0834")
buf.write("\5\u0457\u022c\2\u0834\u00fc\3\2\2\2\u0835\u0836\5\u043d")
buf.write("\u021f\2\u0836\u0837\5\u0457\u022c\2\u0837\u0838\5\u0457")
buf.write("\u022c\2\u0838\u0839\5\u0451\u0229\2\u0839\u083a\5\u0457")
buf.write("\u022c\2\u083a\u083b\5\u0459\u022d\2\u083b\u00fe\3\2\2")
buf.write("\2\u083c\u083d\5\u043d\u021f\2\u083d\u083e\5\u0459\u022d")
buf.write("\2\u083e\u083f\5\u0439\u021d\2\u083f\u0840\5\u0435\u021b")
buf.write("\2\u0840\u0841\5\u0453\u022a\2\u0841\u0842\5\u043d\u021f")
buf.write("\2\u0842\u0100\3\2\2\2\u0843\u0844\5\u043d\u021f\2\u0844")
buf.write("\u0845\5\u045f\u0230\2\u0845\u0846\5\u0435\u021b\2\u0846")
buf.write("\u0847\5\u044b\u0226\2\u0847\u0848\5\u044f\u0228\2\u0848")
buf.write("\u0849\5\u0435\u021b\2\u0849\u084a\5\u044d\u0227\2\u084a")
buf.write("\u084b\5\u043d\u021f\2\u084b\u0102\3\2\2\2\u084c\u084d")
buf.write("\5\u043d\u021f\2\u084d\u084e\5\u0463\u0232\2\u084e\u084f")
buf.write("\5\u0439\u021d\2\u084f\u0850\5\u043d\u021f\2\u0850\u0851")
buf.write("\5\u0453\u022a\2\u0851\u0852\5\u045b\u022e\2\u0852\u0104")
buf.write("\3\2\2\2\u0853\u0854\5\u043d\u021f\2\u0854\u0855\5\u0463")
buf.write("\u0232\2\u0855\u0856\5\u0439\u021d\2\u0856\u0857\5\u043d")
buf.write("\u021f\2\u0857\u0858\5\u0453\u022a\2\u0858\u0859\5\u045b")
buf.write("\u022e\2\u0859\u085a\5\u0445\u0223\2\u085a\u085b\5\u0451")
buf.write("\u0229\2\u085b\u085c\5\u044f\u0228\2\u085c\u0106\3\2\2")
buf.write("\2\u085d\u085e\5\u043d\u021f\2\u085e\u085f\5\u0463\u0232")
buf.write("\2\u085f\u0860\5\u0439\u021d\2\u0860\u0861\5\u043d\u021f")
buf.write("\2\u0861\u0862\5\u0453\u022a\2\u0862\u0863\5\u045b\u022e")
buf.write("\2\u0863\u0864\5\u0445\u0223\2\u0864\u0865\5\u0451\u0229")
buf.write("\2\u0865\u0866\5\u044f\u0228\2\u0866\u0867\7a\2\2\u0867")
buf.write("\u0868\5\u0445\u0223\2\u0868\u0869\5\u044f\u0228\2\u0869")
buf.write("\u086a\5\u0445\u0223\2\u086a\u086b\5\u045b\u022e\2\u086b")
buf.write("\u0108\3\2\2\2\u086c\u086d\5\u043d\u021f\2\u086d\u086e")
buf.write("\5\u0463\u0232\2\u086e\u086f\5\u0439\u021d\2\u086f\u0870")
buf.write("\5\u043d\u021f\2\u0870\u0871\5\u0453\u022a\2\u0871\u0872")
buf.write("\5\u045b\u022e\2\u0872\u0873\5\u0445\u0223\2\u0873\u0874")
buf.write("\5\u0451\u0229\2\u0874\u0875\5\u044f\u0228\2\u0875\u0876")
buf.write("\5\u0459\u022d\2\u0876\u010a\3\2\2\2\u0877\u0878\5\u043d")
buf.write("\u021f\2\u0878\u0879\5\u0463\u0232\2\u0879\u087a\5\u0439")
buf.write("\u021d\2\u087a\u087b\5\u044b\u0226\2\u087b\u087c\5\u045d")
buf.write("\u022f\2\u087c\u087d\5\u043b\u021e\2\u087d\u087e\5\u043d")
buf.write("\u021f\2\u087e\u010c\3\2\2\2\u087f\u0880\5\u043d\u021f")
buf.write("\2\u0880\u0881\5\u0463\u0232\2\u0881\u0882\5\u0439\u021d")
buf.write("\2\u0882\u0883\5\u044b\u0226\2\u0883\u0884\5\u045d\u022f")
buf.write("\2\u0884\u0885\5\u0459\u022d\2\u0885\u0886\5\u0445\u0223")
buf.write("\2\u0886\u0887\5\u045f\u0230\2\u0887\u0888\5\u043d\u021f")
buf.write("\2\u0888\u010e\3\2\2\2\u0889\u088a\5\u043d\u021f\2\u088a")
buf.write("\u088b\5\u0463\u0232\2\u088b\u088c\5\u043d\u021f\2\u088c")
buf.write("\u088d\5\u0439\u021d\2\u088d\u088e\5\u045d\u022f\2\u088e")
buf.write("\u088f\5\u045b\u022e\2\u088f\u0890\5\u043d\u021f\2\u0890")
buf.write("\u0110\3\2\2\2\u0891\u0892\5\u043d\u021f\2\u0892\u0893")
buf.write("\5\u0463\u0232\2\u0893\u0894\5\u0445\u0223\2\u0894\u0895")
buf.write("\5\u0459\u022d\2\u0895\u0896\5\u045b\u022e\2\u0896\u0897")
buf.write("\5\u0459\u022d\2\u0897\u0112\3\2\2\2\u0898\u0899\5\u043d")
buf.write("\u021f\2\u0899\u089a\5\u0463\u0232\2\u089a\u089b\5\u0445")
buf.write("\u0223\2\u089b\u089c\5\u045b\u022e\2\u089c\u0114\3\2\2")
buf.write("\2\u089d\u089e\5\u043d\u021f\2\u089e\u089f\5\u0463\u0232")
buf.write("\2\u089f\u08a0\5\u0453\u022a\2\u08a0\u08a1\5\u044b\u0226")
buf.write("\2\u08a1\u08a2\5\u0435\u021b\2\u08a2\u08a3\5\u0445\u0223")
buf.write("\2\u08a3\u08a4\5\u044f\u0228\2\u08a4\u0116\3\2\2\2\u08a5")
buf.write("\u08a6\5\u043d\u021f\2\u08a6\u08a7\5\u0463\u0232\2\u08a7")
buf.write("\u08a8\5\u045b\u022e\2\u08a8\u08a9\5\u043d\u021f\2\u08a9")
buf.write("\u08aa\5\u0457\u022c\2\u08aa\u08ab\5\u044f\u0228\2\u08ab")
buf.write("\u08ac\5\u0435\u021b\2\u08ac\u08ad\5\u044b\u0226\2\u08ad")
buf.write("\u0118\3\2\2\2\u08ae\u08af\5\u043d\u021f\2\u08af\u08b0")
buf.write("\5\u0463\u0232\2\u08b0\u08b1\5\u045b\u022e\2\u08b1\u08b2")
buf.write("\5\u0457\u022c\2\u08b2\u08b3\5\u0435\u021b\2\u08b3\u08b4")
buf.write("\5\u0439\u021d\2\u08b4\u08b5\5\u045b\u022e\2\u08b5\u011a")
buf.write("\3\2\2\2\u08b6\u08b7\5\u043f\u0220\2\u08b7\u08b8\5\u0435")
buf.write("\u021b\2\u08b8\u08b9\5\u0445\u0223\2\u08b9\u08ba\5\u044b")
buf.write("\u0226\2\u08ba\u08bb\5\u045d\u022f\2\u08bb\u08bc\5\u0457")
buf.write("\u022c\2\u08bc\u08bd\5\u043d\u021f\2\u08bd\u011c\3\2\2")
buf.write("\2\u08be\u08bf\5\u043f\u0220\2\u08bf\u08c0\5\u0435\u021b")
buf.write("\2\u08c0\u08c1\5\u044b\u0226\2\u08c1\u08c2\5\u0459\u022d")
buf.write("\2\u08c2\u08c3\5\u043d\u021f\2\u08c3\u011e\3\2\2\2\u08c4")
buf.write("\u08c5\5\u043f\u0220\2\u08c5\u08c6\5\u043d\u021f\2\u08c6")
buf.write("\u08c7\5\u045b\u022e\2\u08c7\u08c8\5\u0439\u021d\2\u08c8")
buf.write("\u08c9\5\u0443\u0222\2\u08c9\u0120\3\2\2\2\u08ca\u08cb")
buf.write("\5\u043f\u0220\2\u08cb\u08cc\5\u0445\u0223\2\u08cc\u08cd")
buf.write("\5\u044f\u0228\2\u08cd\u08ce\5\u0435\u021b\2\u08ce\u08cf")
buf.write("\5\u044b\u0226\2\u08cf\u0122\3\2\2\2\u08d0\u08d1\5\u043f")
buf.write("\u0220\2\u08d1\u08d2\5\u0445\u0223\2\u08d2\u08d3\5\u0457")
buf.write("\u022c\2\u08d3\u08d4\5\u0459\u022d\2\u08d4\u08d5\5\u045b")
buf.write("\u022e\2\u08d5\u0124\3\2\2\2\u08d6\u08d7\5\u043f\u0220")
buf.write("\2\u08d7\u08d8\5\u0445\u0223\2\u08d8\u08d9\5\u0457\u022c")
buf.write("\2\u08d9\u08da\5\u0459\u022d\2\u08da\u08db\5\u045b\u022e")
buf.write("\2\u08db\u08dc\7a\2\2\u08dc\u08dd\5\u045f\u0230\2\u08dd")
buf.write("\u08de\5\u0435\u021b\2\u08de\u08df\5\u044b\u0226\2\u08df")
buf.write("\u08e0\5\u045d\u022f\2\u08e0\u08e1\5\u043d\u021f\2\u08e1")
buf.write("\u0126\3\2\2\2\u08e2\u08e3\5\u043f\u0220\2\u08e3\u08e4")
buf.write("\5\u044b\u0226\2\u08e4\u08e5\5\u0451\u0229\2\u08e5\u08e6")
buf.write("\5\u0435\u021b\2\u08e6\u08e7\5\u045b\u022e\2\u08e7\u0128")
buf.write("\3\2\2\2\u08e8\u08e9\5\u043f\u0220\2\u08e9\u08ea\5\u0451")
buf.write("\u0229\2\u08ea\u08eb\5\u044b\u0226\2\u08eb\u08ec\5\u044b")
buf.write("\u0226\2\u08ec\u08ed\5\u0451\u0229\2\u08ed\u08ee\5\u0461")
buf.write("\u0231\2\u08ee\u08ef\5\u0445\u0223\2\u08ef\u08f0\5\u044f")
buf.write("\u0228\2\u08f0\u08f1\5\u0441\u0221\2\u08f1\u012a\3\2\2")
buf.write("\2\u08f2\u08f3\5\u043f\u0220\2\u08f3\u08f4\5\u0451\u0229")
buf.write("\2\u08f4\u08f5\5\u044b\u0226\2\u08f5\u08f6\5\u044b\u0226")
buf.write("\2\u08f6\u08f7\5\u0451\u0229\2\u08f7\u08f8\5\u0461\u0231")
buf.write("\2\u08f8\u08f9\5\u0459\u022d\2\u08f9\u012c\3\2\2\2\u08fa")
buf.write("\u08fb\5\u043f\u0220\2\u08fb\u08fc\5\u0451\u0229\2\u08fc")
buf.write("\u08fd\5\u0457\u022c\2\u08fd\u012e\3\2\2\2\u08fe\u08ff")
buf.write("\5\u043f\u0220\2\u08ff\u0900\5\u0451\u0229\2\u0900\u0901")
buf.write("\5\u0457\u022c\2\u0901\u0902\5\u0435\u021b\2\u0902\u0903")
buf.write("\5\u044b\u0226\2\u0903\u0904\5\u044b\u0226\2\u0904\u0130")
buf.write("\3\2\2\2\u0905\u0906\5\u043f\u0220\2\u0906\u0907\5\u0451")
buf.write("\u0229\2\u0907\u0908\5\u0457\u022c\2\u0908\u0909\5\u0439")
buf.write("\u021d\2\u0909\u090a\5\u043d\u021f\2\u090a\u0132\3\2\2")
buf.write("\2\u090b\u090c\5\u043f\u0220\2\u090c\u090d\5\u0457\u022c")
buf.write("\2\u090d\u090e\5\u0451\u0229\2\u090e\u090f\5\u044d\u0227")
buf.write("\2\u090f\u0134\3\2\2\2\u0910\u0911\5\u043f\u0220\2\u0911")
buf.write("\u0912\5\u045d\u022f\2\u0912\u0913\5\u044b\u0226\2\u0913")
buf.write("\u0914\5\u044b\u0226\2\u0914\u0136\3\2\2\2\u0915\u0916")
buf.write("\5\u043f\u0220\2\u0916\u0917\5\u045d\u022f\2\u0917\u0918")
buf.write("\5\u044f\u0228\2\u0918\u0919\5\u0439\u021d\2\u0919\u091a")
buf.write("\5\u045b\u022e\2\u091a\u091b\5\u0445\u0223\2\u091b\u091c")
buf.write("\5\u0451\u0229\2\u091c\u091d\5\u044f\u0228\2\u091d\u0138")
buf.write("\3\2\2\2\u091e\u091f\5\u0441\u0221\2\u091f\u0920\5\u0451")
buf.write("\u0229\2\u0920\u0921\5\u045b\u022e\2\u0921\u0922\5\u0451")
buf.write("\u0229\2\u0922\u013a\3\2\2\2\u0923\u0924\5\u0441\u0221")
buf.write("\2\u0924\u0925\5\u0457\u022c\2\u0925\u0926\5\u0435\u021b")
buf.write("\2\u0926\u0927\5\u044f\u0228\2\u0927\u0928\5\u045b\u022e")
buf.write("\2\u0928\u013c\3\2\2\2\u0929\u092a\5\u0441\u0221\2\u092a")
buf.write("\u092b\5\u0457\u022c\2\u092b\u092c\5\u0451\u0229\2\u092c")
buf.write("\u092d\5\u045d\u022f\2\u092d\u092e\5\u0453\u022a\2\u092e")
buf.write("\u013e\3\2\2\2\u092f\u0930\5\u0441\u0221\2\u0930\u0931")
buf.write("\5\u0457\u022c\2\u0931\u0932\5\u0451\u0229\2\u0932\u0933")
buf.write("\5\u045d\u022f\2\u0933\u0934\5\u0453\u022a\2\u0934\u0935")
buf.write("\5\u0445\u0223\2\u0935\u0936\5\u044f\u0228\2\u0936\u0937")
buf.write("\5\u0441\u0221\2\u0937\u0140\3\2\2\2\u0938\u0939\5\u0443")
buf.write("\u0222\2\u0939\u093a\5\u0435\u021b\2\u093a\u093b\5\u0459")
buf.write("\u022d\2\u093b\u093c\5\u0443\u0222\2\u093c\u0142\3\2\2")
buf.write("\2\u093d\u093e\5\u0443\u0222\2\u093e\u093f\5\u0435\u021b")
buf.write("\2\u093f\u0940\5\u045f\u0230\2\u0940\u0941\5\u0445\u0223")
buf.write("\2\u0941\u0942\5\u044f\u0228\2\u0942\u0943\5\u0441\u0221")
buf.write("\2\u0943\u0144\3\2\2\2\u0944\u0945\5\u0443\u0222\2\u0945")
buf.write("\u0946\5\u0445\u0223\2\u0946\u0947\5\u043b\u021e\2\u0947")
buf.write("\u0948\5\u043d\u021f\2\u0948\u0146\3\2\2\2\u0949\u094a")
buf.write("\5\u0443\u0222\2\u094a\u094b\5\u0451\u0229\2\u094b\u094c")
buf.write("\5\u045d\u022f\2\u094c\u094d\5\u0457\u022c\2\u094d\u0148")
buf.write("\3\2\2\2\u094e\u094f\5\u0445\u0223\2\u094f\u0950\5\u043f")
buf.write("\u0220\2\u0950\u014a\3\2\2\2\u0951\u0952\5\u0445\u0223")
buf.write("\2\u0952\u0953\5\u0441\u0221\2\u0953\u0954\5\u044f\u0228")
buf.write("\2\u0954\u0955\5\u0451\u0229\2\u0955\u0956\5\u0457\u022c")
buf.write("\2\u0956\u0957\5\u043d\u021f\2\u0957\u014c\3\2\2\2\u0958")
buf.write("\u0959\5\u0445\u0223\2\u0959\u095a\5\u044d\u0227\2\u095a")
buf.write("\u095b\5\u044d\u0227\2\u095b\u095c\5\u043d\u021f\2\u095c")
buf.write("\u095d\5\u043b\u021e\2\u095d\u095e\5\u0445\u0223\2\u095e")
buf.write("\u095f\5\u0435\u021b\2\u095f\u0960\5\u045b\u022e\2\u0960")
buf.write("\u0961\5\u043d\u021f\2\u0961\u014e\3\2\2\2\u0962\u0963")
buf.write("\5\u0445\u0223\2\u0963\u0964\5\u044f\u0228\2\u0964\u0150")
buf.write("\3\2\2\2\u0965\u0966\5\u0445\u0223\2\u0966\u0967\5\u044f")
buf.write("\u0228\2\u0967\u0968\5\u0439\u021d\2\u0968\u0969\5\u044b")
buf.write("\u0226\2\u0969\u096a\5\u045d\u022f\2\u096a\u096b\5\u043b")
buf.write("\u021e\2\u096b\u096c\5\u043d\u021f\2\u096c\u0152\3\2\2")
buf.write("\2\u096d\u096e\5\u0445\u0223\2\u096e\u096f\5\u044f\u0228")
buf.write("\2\u096f\u0970\5\u0439\u021d\2\u0970\u0971\5\u044b\u0226")
buf.write("\2\u0971\u0972\5\u045d\u022f\2\u0972\u0973\5\u043b\u021e")
buf.write("\2\u0973\u0974\5\u0445\u0223\2\u0974\u0975\5\u044f\u0228")
buf.write("\2\u0975\u0976\5\u0441\u0221\2\u0976\u0154\3\2\2\2\u0977")
buf.write("\u0978\5\u0445\u0223\2\u0978\u0979\5\u044f\u0228\2\u0979")
buf.write("\u097a\5\u0439\u021d\2\u097a\u097b\5\u0457\u022c\2\u097b")
buf.write("\u097c\5\u043d\u021f\2\u097c\u097d\5\u044d\u0227\2\u097d")
buf.write("\u097e\5\u043d\u021f\2\u097e\u097f\5\u044f\u0228\2\u097f")
buf.write("\u0980\5\u045b\u022e\2\u0980\u0156\3\2\2\2\u0981\u0982")
buf.write("\5\u0445\u0223\2\u0982\u0983\5\u044f\u0228\2\u0983\u0984")
buf.write("\5\u043b\u021e\2\u0984\u0985\5\u043d\u021f\2\u0985\u0986")
buf.write("\5\u044f\u0228\2\u0986\u0987\5\u045b\u022e\2\u0987\u0158")
buf.write("\3\2\2\2\u0988\u0989\5\u0445\u0223\2\u0989\u098a\5\u044f")
buf.write("\u0228\2\u098a\u098b\5\u043b\u021e\2\u098b\u098c\5\u043d")
buf.write("\u021f\2\u098c\u098d\5\u0463\u0232\2\u098d\u015a\3\2\2")
buf.write("\2\u098e\u098f\5\u0445\u0223\2\u098f\u0990\5\u044f\u0228")
buf.write("\2\u0990\u0991\5\u043b\u021e\2\u0991\u0992\5\u043d\u021f")
buf.write("\2\u0992\u0993\5\u0463\u0232\2\u0993\u0994\5\u043d\u021f")
buf.write("\2\u0994\u0995\5\u043b\u021e\2\u0995\u015c\3\2\2\2\u0996")
buf.write("\u0997\5\u0445\u0223\2\u0997\u0998\5\u044f\u0228\2\u0998")
buf.write("\u0999\5\u043b\u021e\2\u0999\u099a\5\u0445\u0223\2\u099a")
buf.write("\u099b\5\u0439\u021d\2\u099b\u099c\5\u0435\u021b\2\u099c")
buf.write("\u099d\5\u045b\u022e\2\u099d\u099e\5\u0451\u0229\2\u099e")
buf.write("\u099f\5\u0457\u022c\2\u099f\u015e\3\2\2\2\u09a0\u09a1")
buf.write("\5\u0445\u0223\2\u09a1\u09a2\5\u044f\u0228\2\u09a2\u09a3")
buf.write("\5\u043b\u021e\2\u09a3\u09a4\5\u0445\u0223\2\u09a4\u09a5")
buf.write("\5\u0439\u021d\2\u09a5\u09a6\5\u043d\u021f\2\u09a6\u09a7")
buf.write("\5\u0459\u022d\2\u09a7\u0160\3\2\2\2\u09a8\u09a9\5\u0445")
buf.write("\u0223\2\u09a9\u09aa\5\u044f\u0228\2\u09aa\u09ab\5\u043f")
buf.write("\u0220\2\u09ab\u09ac\5\u0445\u0223\2\u09ac\u09ad\5\u044f")
buf.write("\u0228\2\u09ad\u09ae\5\u0445\u0223\2\u09ae\u09af\5\u045b")
buf.write("\u022e\2\u09af\u09b0\5\u043d\u021f\2\u09b0\u0162\3\2\2")
buf.write("\2\u09b1\u09b2\5\u0445\u0223\2\u09b2\u09b3\5\u044f\u0228")
buf.write("\2\u09b3\u09b4\5\u044b\u0226\2\u09b4\u09b5\5\u0445\u0223")
buf.write("\2\u09b5\u09b6\5\u044f\u0228\2\u09b6\u09b7\5\u043d\u021f")
buf.write("\2\u09b7\u0164\3\2\2\2\u09b8\u09b9\5\u0445\u0223\2\u09b9")
buf.write("\u09ba\5\u044f\u0228\2\u09ba\u09bb\5\u044f\u0228\2\u09bb")
buf.write("\u09bc\5\u043d\u021f\2\u09bc\u09bd\5\u0457\u022c\2\u09bd")
buf.write("\u0166\3\2\2\2\u09be\u09bf\5\u0445\u0223\2\u09bf\u09c0")
buf.write("\5\u044f\u0228\2\u09c0\u09c1\5\u0451\u0229\2\u09c1\u09c2")
buf.write("\5\u045d\u022f\2\u09c2\u09c3\5\u045b\u022e\2\u09c3\u0168")
buf.write("\3\2\2\2\u09c4\u09c5\5\u0445\u0223\2\u09c5\u09c6\5\u044f")
buf.write("\u0228\2\u09c6\u09c7\5\u0459\u022d\2\u09c7\u09c8\5\u043d")
buf.write("\u021f\2\u09c8\u09c9\5\u0457\u022c\2\u09c9\u09ca\5\u045b")
buf.write("\u022e\2\u09ca\u016a\3\2\2\2\u09cb\u09cc\5\u0445\u0223")
buf.write("\2\u09cc\u09cd\5\u044f\u0228\2\u09cd\u09ce\5\u0459\u022d")
buf.write("\2\u09ce\u09cf\5\u045b\u022e\2\u09cf\u09d0\5\u0435\u021b")
buf.write("\2\u09d0\u09d1\5\u044f\u0228\2\u09d1\u09d2\5\u045b\u022e")
buf.write("\2\u09d2\u09d3\5\u0445\u0223\2\u09d3\u09d4\5\u0435\u021b")
buf.write("\2\u09d4\u09d5\5\u0437\u021c\2\u09d5\u09d6\5\u044b\u0226")
buf.write("\2\u09d6\u09d7\5\u043d\u021f\2\u09d7\u016c\3\2\2\2\u09d8")
buf.write("\u09d9\5\u0445\u0223\2\u09d9\u09da\5\u044f\u0228\2\u09da")
buf.write("\u09db\5\u0459\u022d\2\u09db\u09dc\5\u045b\u022e\2\u09dc")
buf.write("\u09dd\5\u043d\u021f\2\u09dd\u09de\5\u0435\u021b\2\u09de")
buf.write("\u09df\5\u043b\u021e\2\u09df\u016e\3\2\2\2\u09e0\u09e1")
buf.write("\5\u0445\u0223\2\u09e1\u09e2\5\u044f\u0228\2\u09e2\u09e3")
buf.write("\5\u045b\u022e\2\u09e3\u0170\3\2\2\2\u09e4\u09e5\5\u0445")
buf.write("\u0223\2\u09e5\u09e6\5\u044f\u0228\2\u09e6\u09e7\5\u045b")
buf.write("\u022e\2\u09e7\u09e8\5\u043d\u021f\2\u09e8\u09e9\5\u0441")
buf.write("\u0221\2\u09e9\u09ea\5\u043d\u021f\2\u09ea\u09eb\5\u0457")
buf.write("\u022c\2\u09eb\u0172\3\2\2\2\u09ec\u09ed\5\u0445\u0223")
buf.write("\2\u09ed\u09ee\5\u044f\u0228\2\u09ee\u09ef\5\u045b\u022e")
buf.write("\2\u09ef\u09f0\5\u043d\u021f\2\u09f0\u09f1\5\u0457\u022c")
buf.write("\2\u09f1\u09f2\5\u0459\u022d\2\u09f2\u09f3\5\u043d\u021f")
buf.write("\2\u09f3\u09f4\5\u0439\u021d\2\u09f4\u09f5\5\u045b\u022e")
buf.write("\2\u09f5\u0174\3\2\2\2\u09f6\u09f7\5\u0445\u0223\2\u09f7")
buf.write("\u09f8\5\u044f\u0228\2\u09f8\u09f9\5\u045b\u022e\2\u09f9")
buf.write("\u09fa\5\u043d\u021f\2\u09fa\u09fb\5\u0457\u022c\2\u09fb")
buf.write("\u09fc\5\u045f\u0230\2\u09fc\u09fd\5\u0435\u021b\2\u09fd")
buf.write("\u09fe\5\u044b\u0226\2\u09fe\u0176\3\2\2\2\u09ff\u0a00")
buf.write("\5\u0445\u0223\2\u0a00\u0a01\5\u044f\u0228\2\u0a01\u0a02")
buf.write("\5\u045b\u022e\2\u0a02\u0a03\5\u0451\u0229\2\u0a03\u0178")
buf.write("\3\2\2\2\u0a04\u0a05\5\u0445\u0223\2\u0a05\u0a06\5\u044f")
buf.write("\u0228\2\u0a06\u0a07\5\u045f\u0230\2\u0a07\u0a08\5\u0435")
buf.write("\u021b\2\u0a08\u0a09\5\u044b\u0226\2\u0a09\u0a0a\5\u0445")
buf.write("\u0223\2\u0a0a\u0a0b\5\u043b\u021e\2\u0a0b\u0a0c\5\u0435")
buf.write("\u021b\2\u0a0c\u0a0d\5\u045b\u022e\2\u0a0d\u0a0e\5\u043d")
buf.write("\u021f\2\u0a0e\u017a\3\2\2\2\u0a0f\u0a10\5\u0445\u0223")
buf.write("\2\u0a10\u0a11\5\u0459\u022d\2\u0a11\u017c\3\2\2\2\u0a12")
buf.write("\u0a13\5\u0445\u0223\2\u0a13\u0a14\5\u0459\u022d\2\u0a14")
buf.write("\u0a15\5\u0451\u0229\2\u0a15\u0a16\5\u044b\u0226\2\u0a16")
buf.write("\u0a17\5\u0435\u021b\2\u0a17\u0a18\5\u045b\u022e\2\u0a18")
buf.write("\u0a19\5\u0445\u0223\2\u0a19\u0a1a\5\u0451\u0229\2\u0a1a")
buf.write("\u0a1b\5\u044f\u0228\2\u0a1b\u017e\3\2\2\2\u0a1c\u0a1d")
buf.write("\5\u0445\u0223\2\u0a1d\u0a1e\5\u045b\u022e\2\u0a1e\u0a1f")
buf.write("\5\u043d\u021f\2\u0a1f\u0a20\5\u0457\u022c\2\u0a20\u0a21")
buf.write("\5\u0435\u021b\2\u0a21\u0a22\5\u045b\u022e\2\u0a22\u0a23")
buf.write("\5\u043d\u021f\2\u0a23\u0180\3\2\2\2\u0a24\u0a25\5\u0447")
buf.write("\u0224\2\u0a25\u0a26\5\u0435\u021b\2\u0a26\u0a27\5\u045f")
buf.write("\u0230\2\u0a27\u0a28\5\u0435\u021b\2\u0a28\u0182\3\2\2")
buf.write("\2\u0a29\u0a2a\5\u0447\u0224\2\u0a2a\u0a2b\5\u0451\u0229")
buf.write("\2\u0a2b\u0a2c\5\u0445\u0223\2\u0a2c\u0a2d\5\u044f\u0228")
buf.write("\2\u0a2d\u0184\3\2\2\2\u0a2e\u0a2f\5\u0449\u0225\2\u0a2f")
buf.write("\u0a30\5\u043d\u021f\2\u0a30\u0a31\5\u043d\u021f\2\u0a31")
buf.write("\u0a32\5\u0453\u022a\2\u0a32\u0186\3\2\2\2\u0a33\u0a34")
buf.write("\5\u044b\u0226\2\u0a34\u0a35\5\u0435\u021b\2\u0a35\u0a36")
buf.write("\5\u044f\u0228\2\u0a36\u0a37\5\u0441\u0221\2\u0a37\u0a38")
buf.write("\5\u045d\u022f\2\u0a38\u0a39\5\u0435\u021b\2\u0a39\u0a3a")
buf.write("\5\u0441\u0221\2\u0a3a\u0a3b\5\u043d\u021f\2\u0a3b\u0188")
buf.write("\3\2\2\2\u0a3c\u0a3d\5\u044b\u0226\2\u0a3d\u0a3e\5\u0435")
buf.write("\u021b\2\u0a3e\u0a3f\5\u0459\u022d\2\u0a3f\u0a40\5\u045b")
buf.write("\u022e\2\u0a40\u018a\3\2\2\2\u0a41\u0a42\5\u044b\u0226")
buf.write("\2\u0a42\u0a43\5\u0435\u021b\2\u0a43\u0a44\5\u0459\u022d")
buf.write("\2\u0a44\u0a45\5\u045b\u022e\2\u0a45\u0a46\7a\2\2\u0a46")
buf.write("\u0a47\5\u045f\u0230\2\u0a47\u0a48\5\u0435\u021b\2\u0a48")
buf.write("\u0a49\5\u044b\u0226\2\u0a49\u0a4a\5\u045d\u022f\2\u0a4a")
buf.write("\u0a4b\5\u043d\u021f\2\u0a4b\u018c\3\2\2\2\u0a4c\u0a4d")
buf.write("\5\u044b\u0226\2\u0a4d\u0a4e\5\u043d\u021f\2\u0a4e\u0a4f")
buf.write("\5\u0435\u021b\2\u0a4f\u0a50\5\u043b\u021e\2\u0a50\u0a51")
buf.write("\5\u0445\u0223\2\u0a51\u0a52\5\u044f\u0228\2\u0a52\u0a53")
buf.write("\5\u0441\u0221\2\u0a53\u018e\3\2\2\2\u0a54\u0a55\5\u044b")
buf.write("\u0226\2\u0a55\u0a56\5\u043d\u021f\2\u0a56\u0a57\5\u043f")
buf.write("\u0220\2\u0a57\u0a58\5\u045b\u022e\2\u0a58\u0190\3\2\2")
buf.write("\2\u0a59\u0a5a\5\u044b\u0226\2\u0a5a\u0a5b\5\u043d\u021f")
buf.write("\2\u0a5b\u0a5c\5\u045f\u0230\2\u0a5c\u0a5d\5\u043d\u021f")
buf.write("\2\u0a5d\u0a5e\5\u044b\u0226\2\u0a5e\u0192\3\2\2\2\u0a5f")
buf.write("\u0a60\5\u044b\u0226\2\u0a60\u0a61\5\u0445\u0223\2\u0a61")
buf.write("\u0a62\5\u0437\u021c\2\u0a62\u0a63\5\u0457\u022c\2\u0a63")
buf.write("\u0a64\5\u0435\u021b\2\u0a64\u0a65\5\u0457\u022c\2\u0a65")
buf.write("\u0a66\5\u0465\u0233\2\u0a66\u0194\3\2\2\2\u0a67\u0a68")
buf.write("\5\u044b\u0226\2\u0a68\u0a69\5\u0445\u0223\2\u0a69\u0a6a")
buf.write("\5\u0449\u0225\2\u0a6a\u0a6b\5\u043d\u021f\2\u0a6b\u0196")
buf.write("\3\2\2\2\u0a6c\u0a6d\5\u044b\u0226\2\u0a6d\u0a6e\5\u0445")
buf.write("\u0223\2\u0a6e\u0a6f\5\u0449\u0225\2\u0a6f\u0a70\5\u043d")
buf.write("\u021f\2\u0a70\u0a71\7\64\2\2\u0a71\u0198\3\2\2\2\u0a72")
buf.write("\u0a73\5\u044b\u0226\2\u0a73\u0a74\5\u0445\u0223\2\u0a74")
buf.write("\u0a75\5\u0449\u0225\2\u0a75\u0a76\5\u043d\u021f\2\u0a76")
buf.write("\u0a77\7\66\2\2\u0a77\u019a\3\2\2\2\u0a78\u0a79\5\u044b")
buf.write("\u0226\2\u0a79\u0a7a\5\u0445\u0223\2\u0a7a\u0a7b\5\u0449")
buf.write("\u0225\2\u0a7b\u0a7c\5\u043d\u021f\2\u0a7c\u0a7d\5\u0439")
buf.write("\u021d\2\u0a7d\u019c\3\2\2\2\u0a7e\u0a7f\5\u044b\u0226")
buf.write("\2\u0a7f\u0a80\5\u0445\u0223\2\u0a80\u0a81\5\u044d\u0227")
buf.write("\2\u0a81\u0a82\5\u0445\u0223\2\u0a82\u0a83\5\u045b\u022e")
buf.write("\2\u0a83\u019e\3\2\2\2\u0a84\u0a85\5\u044b\u0226\2\u0a85")
buf.write("\u0a86\5\u0451\u0229\2\u0a86\u0a87\5\u0439\u021d\2\u0a87")
buf.write("\u0a88\5\u0435\u021b\2\u0a88\u0a89\5\u044b\u0226\2\u0a89")
buf.write("\u01a0\3\2\2\2\u0a8a\u0a8b\5\u044b\u0226\2\u0a8b\u0a8c")
buf.write("\5\u0451\u0229\2\u0a8c\u0a8d\5\u0439\u021d\2\u0a8d\u0a8e")
buf.write("\5\u0449\u0225\2\u0a8e\u01a2\3\2\2\2\u0a8f\u0a90\5\u044b")
buf.write("\u0226\2\u0a90\u0a91\5\u0451\u0229\2\u0a91\u0a92\5\u0439")
buf.write("\u021d\2\u0a92\u0a93\5\u0449\u0225\2\u0a93\u0a94\5\u043d")
buf.write("\u021f\2\u0a94\u0a95\5\u043b\u021e\2\u0a95\u01a4\3\2\2")
buf.write("\2\u0a96\u0a97\5\u044b\u0226\2\u0a97\u0a98\5\u0451\u0229")
buf.write("\2\u0a98\u0a99\5\u0441\u0221\2\u0a99\u01a6\3\2\2\2\u0a9a")
buf.write("\u0a9b\5\u044b\u0226\2\u0a9b\u0a9c\5\u0451\u0229\2\u0a9c")
buf.write("\u0a9d\5\u0441\u0221\2\u0a9d\u0a9e\5\u0451\u0229\2\u0a9e")
buf.write("\u0a9f\5\u043f\u0220\2\u0a9f\u0aa0\5\u043f\u0220\2\u0aa0")
buf.write("\u01a8\3\2\2\2\u0aa1\u0aa2\5\u044b\u0226\2\u0aa2\u0aa3")
buf.write("\5\u0451\u0229\2\u0aa3\u0aa4\5\u0441\u0221\2\u0aa4\u0aa5")
buf.write("\5\u0451\u0229\2\u0aa5\u0aa6\5\u044f\u0228\2\u0aa6\u01aa")
buf.write("\3\2\2\2\u0aa7\u0aa8\5\u044b\u0226\2\u0aa8\u0aa9\5\u0451")
buf.write("\u0229\2\u0aa9\u0aaa\5\u044f\u0228\2\u0aaa\u0aab\5\u0441")
buf.write("\u0221\2\u0aab\u01ac\3\2\2\2\u0aac\u0aad\5\u044b\u0226")
buf.write("\2\u0aad\u0aae\5\u0451\u0229\2\u0aae\u0aaf\5\u0451\u0229")
buf.write("\2\u0aaf\u0ab0\5\u0453\u022a\2\u0ab0\u01ae\3\2\2\2\u0ab1")
buf.write("\u0ab2\5\u044d\u0227\2\u0ab2\u0ab3\5\u0435\u021b\2\u0ab3")
buf.write("\u0ab4\5\u0445\u0223\2\u0ab4\u0ab5\5\u044f\u0228\2\u0ab5")
buf.write("\u01b0\3\2\2\2\u0ab6\u0ab7\5\u044d\u0227\2\u0ab7\u0ab8")
buf.write("\5\u0435\u021b\2\u0ab8\u0ab9\5\u0453\u022a\2\u0ab9\u01b2")
buf.write("\3\2\2\2\u0aba\u0abb\5\u044d\u0227\2\u0abb\u0abc\5\u0435")
buf.write("\u021b\2\u0abc\u0abd\5\u045b\u022e\2\u0abd\u0abe\5\u0439")
buf.write("\u021d\2\u0abe\u0abf\5\u0443\u0222\2\u0abf\u0ac0\5\u043d")
buf.write("\u021f\2\u0ac0\u0ac1\5\u043b\u021e\2\u0ac1\u01b4\3\2\2")
buf.write("\2\u0ac2\u0ac3\5\u044d\u0227\2\u0ac3\u0ac4\5\u0435\u021b")
buf.write("\2\u0ac4\u0ac5\5\u0463\u0232\2\u0ac5\u0ac6\5\u045f\u0230")
buf.write("\2\u0ac6\u0ac7\5\u0435\u021b\2\u0ac7\u0ac8\5\u044b\u0226")
buf.write("\2\u0ac8\u0ac9\5\u045d\u022f\2\u0ac9\u0aca\5\u043d\u021f")
buf.write("\2\u0aca\u01b6\3\2\2\2\u0acb\u0acc\5\u044d\u0227\2\u0acc")
buf.write("\u0acd\5\u043d\u021f\2\u0acd\u0ace\5\u0435\u021b\2\u0ace")
buf.write("\u0acf\5\u0459\u022d\2\u0acf\u0ad0\5\u045d\u022f\2\u0ad0")
buf.write("\u0ad1\5\u0457\u022c\2\u0ad1\u0ad2\5\u043d\u021f\2\u0ad2")
buf.write("\u0ad3\5\u0459\u022d\2\u0ad3\u01b8\3\2\2\2\u0ad4\u0ad5")
buf.write("\5\u044d\u0227\2\u0ad5\u0ad6\5\u043d\u021f\2\u0ad6\u0ad7")
buf.write("\5\u044d\u0227\2\u0ad7\u0ad8\5\u0437\u021c\2\u0ad8\u0ad9")
buf.write("\5\u043d\u021f\2\u0ad9\u0ada\5\u0457\u022c\2\u0ada\u01ba")
buf.write("\3\2\2\2\u0adb\u0adc\5\u044d\u0227\2\u0adc\u0add\5\u043d")
buf.write("\u021f\2\u0add\u0ade\5\u0457\u022c\2\u0ade\u0adf\5\u0441")
buf.write("\u0221\2\u0adf\u0ae0\5\u043d\u021f\2\u0ae0\u01bc\3\2\2")
buf.write("\2\u0ae1\u0ae2\5\u044d\u0227\2\u0ae2\u0ae3\5\u0445\u0223")
buf.write("\2\u0ae3\u0ae4\5\u044f\u0228\2\u0ae4\u0ae5\5\u045d\u022f")
buf.write("\2\u0ae5\u0ae6\5\u0459\u022d\2\u0ae6\u01be\3\2\2\2\u0ae7")
buf.write("\u0ae8\5\u044d\u0227\2\u0ae8\u0ae9\5\u0445\u0223\2\u0ae9")
buf.write("\u0aea\5\u044f\u0228\2\u0aea\u0aeb\5\u045d\u022f\2\u0aeb")
buf.write("\u0aec\5\u045b\u022e\2\u0aec\u0aed\5\u043d\u021f\2\u0aed")
buf.write("\u01c0\3\2\2\2\u0aee\u0aef\5\u044d\u0227\2\u0aef\u0af0")
buf.write("\5\u0445\u0223\2\u0af0\u0af1\5\u044f\u0228\2\u0af1\u0af2")
buf.write("\5\u045f\u0230\2\u0af2\u0af3\5\u0435\u021b\2\u0af3\u0af4")
buf.write("\5\u044b\u0226\2\u0af4\u0af5\5\u045d\u022f\2\u0af5\u0af6")
buf.write("\5\u043d\u021f\2\u0af6\u01c2\3\2\2\2\u0af7\u0af8\5\u044d")
buf.write("\u0227\2\u0af8\u0af9\5\u044b\u0226\2\u0af9\u0afa\5\u0459")
buf.write("\u022d\2\u0afa\u0afb\5\u044b\u0226\2\u0afb\u0afc\5\u0435")
buf.write("\u021b\2\u0afc\u0afd\5\u0437\u021c\2\u0afd\u0afe\5\u043d")
buf.write("\u021f\2\u0afe\u0aff\5\u044b\u0226\2\u0aff\u01c4\3\2\2")
buf.write("\2\u0b00\u0b01\5\u044d\u0227\2\u0b01\u0b02\5\u0451\u0229")
buf.write("\2\u0b02\u0b03\5\u043b\u021e\2\u0b03\u0b04\5\u043d\u021f")
buf.write("\2\u0b04\u01c6\3\2\2\2\u0b05\u0b06\5\u044d\u0227\2\u0b06")
buf.write("\u0b07\5\u0451\u0229\2\u0b07\u0b08\5\u043b\u021e\2\u0b08")
buf.write("\u0b09\5\u043d\u021f\2\u0b09\u0b0a\5\u044b\u0226\2\u0b0a")
buf.write("\u01c8\3\2\2\2\u0b0b\u0b0c\5\u044d\u0227\2\u0b0c\u0b0d")
buf.write("\5\u0451\u0229\2\u0b0d\u0b0e\5\u043b\u021e\2\u0b0e\u0b0f")
buf.write("\5\u0445\u0223\2\u0b0f\u0b10\5\u043f\u0220\2\u0b10\u0b11")
buf.write("\5\u0465\u0233\2\u0b11\u01ca\3\2\2\2\u0b12\u0b13\5\u044d")
buf.write("\u0227\2\u0b13\u0b14\5\u0451\u0229\2\u0b14\u0b15\5\u044f")
buf.write("\u0228\2\u0b15\u0b16\5\u045b\u022e\2\u0b16\u0b17\5\u0443")
buf.write("\u0222\2\u0b17\u01cc\3\2\2\2\u0b18\u0b19\5\u044d\u0227")
buf.write("\2\u0b19\u0b1a\5\u045d\u022f\2\u0b1a\u0b1b\5\u044b\u0226")
buf.write("\2\u0b1b\u0b1c\5\u045b\u022e\2\u0b1c\u0b1d\5\u0445\u0223")
buf.write("\2\u0b1d\u0b1e\5\u0459\u022d\2\u0b1e\u0b1f\5\u043d\u021f")
buf.write("\2\u0b1f\u0b20\5\u045b\u022e\2\u0b20\u01ce\3\2\2\2\u0b21")
buf.write("\u0b22\5\u044f\u0228\2\u0b22\u0b23\5\u0435\u021b\2\u0b23")
buf.write("\u0b24\5\u044d\u0227\2\u0b24\u0b25\5\u043d\u021f\2\u0b25")
buf.write("\u01d0\3\2\2\2\u0b26\u0b27\5\u044f\u0228\2\u0b27\u0b28")
buf.write("\5\u0435\u021b\2\u0b28\u0b29\5\u044f\u0228\2\u0b29\u01d2")
buf.write("\3\2\2\2\u0b2a\u0b2b\5\u044f\u0228\2\u0b2b\u0b2c\5\u0435")
buf.write("\u021b\2\u0b2c\u0b2d\5\u045b\u022e\2\u0b2d\u0b2e\5\u045d")
buf.write("\u022f\2\u0b2e\u0b2f\5\u0457\u022c\2\u0b2f\u0b30\5\u0435")
buf.write("\u021b\2\u0b30\u0b31\5\u044b\u0226\2\u0b31\u01d4\3\2\2")
buf.write("\2\u0b32\u0b33\5\u044f\u0228\2\u0b33\u0b34\5\u0435\u021b")
buf.write("\2\u0b34\u0b35\5\u045b\u022e\2\u0b35\u0b36\5\u045d\u022f")
buf.write("\2\u0b36\u0b37\5\u0457\u022c\2\u0b37\u0b38\5\u0435\u021b")
buf.write("\2\u0b38\u0b39\5\u044b\u0226\2\u0b39\u0b3a\5\u044f\u0228")
buf.write("\2\u0b3a\u01d6\3\2\2\2\u0b3b\u0b3c\5\u044f\u0228\2\u0b3c")
buf.write("\u0b3d\5\u0435\u021b\2\u0b3d\u0b3e\5\u045f\u0230\2\u0b3e")
buf.write("\u01d8\3\2\2\2\u0b3f\u0b40\5\u044f\u0228\2\u0b40\u0b41")
buf.write("\5\u0439\u021d\2\u0b41\u0b42\5\u0443\u0222\2\u0b42\u0b43")
buf.write("\5\u0435\u021b\2\u0b43\u0b44\5\u0457\u022c\2\u0b44\u01da")
buf.write("\3\2\2\2\u0b45\u0b46\5\u044f\u0228\2\u0b46\u0b47\5\u0439")
buf.write("\u021d\2\u0b47\u0b48\5\u0443\u0222\2\u0b48\u0b49\5\u0435")
buf.write("\u021b\2\u0b49\u0b4a\5\u0457\u022c\2\u0b4a\u0b4b\7a\2")
buf.write("\2\u0b4b\u0b4c\5\u0439\u021d\2\u0b4c\u0b4d\5\u0459\u022d")
buf.write("\2\u0b4d\u01dc\3\2\2\2\u0b4e\u0b4f\5\u044f\u0228\2\u0b4f")
buf.write("\u0b50\5\u0439\u021d\2\u0b50\u0b51\5\u044b\u0226\2\u0b51")
buf.write("\u0b52\5\u0451\u0229\2\u0b52\u0b53\5\u0437\u021c\2\u0b53")
buf.write("\u01de\3\2\2\2\u0b54\u0b55\5\u044f\u0228\2\u0b55\u0b56")
buf.write("\5\u043d\u021f\2\u0b56\u0b57\5\u0459\u022d\2\u0b57\u0b58")
buf.write("\5\u045b\u022e\2\u0b58\u0b59\5\u043d\u021f\2\u0b59\u0b5a")
buf.write("\5\u043b\u021e\2\u0b5a\u01e0\3\2\2\2\u0b5b\u0b5c\5\u044f")
buf.write("\u0228\2\u0b5c\u0b5d\5\u043d\u021f\2\u0b5d\u0b5e\5\u0461")
buf.write("\u0231\2\u0b5e\u01e2\3\2\2\2\u0b5f\u0b60\5\u044f\u0228")
buf.write("\2\u0b60\u0b61\5\u0451\u0229\2\u0b61\u01e4\3\2\2\2\u0b62")
buf.write("\u0b63\5\u044f\u0228\2\u0b63\u0b64\5\u0451\u0229\2\u0b64")
buf.write("\u0b65\5\u0435\u021b\2\u0b65\u0b66\5\u045d\u022f\2\u0b66")
buf.write("\u0b67\5\u043b\u021e\2\u0b67\u0b68\5\u0445\u0223\2\u0b68")
buf.write("\u0b69\5\u045b\u022e\2\u0b69\u01e6\3\2\2\2\u0b6a\u0b6b")
buf.write("\5\u044f\u0228\2\u0b6b\u0b6c\5\u0451\u0229\2\u0b6c\u0b6d")
buf.write("\5\u0439\u021d\2\u0b6d\u0b6e\5\u0435\u021b\2\u0b6e\u0b6f")
buf.write("\5\u0439\u021d\2\u0b6f\u0b70\5\u0443\u0222\2\u0b70\u0b71")
buf.write("\5\u043d\u021f\2\u0b71\u01e8\3\2\2\2\u0b72\u0b73\5\u044f")
buf.write("\u0228\2\u0b73\u0b74\5\u0451\u0229\2\u0b74\u0b75\5\u0439")
buf.write("\u021d\2\u0b75\u0b76\5\u0451\u0229\2\u0b76\u0b77\5\u0453")
buf.write("\u022a\2\u0b77\u0b78\5\u0465\u0233\2\u0b78\u01ea\3\2\2")
buf.write("\2\u0b79\u0b7a\5\u044f\u0228\2\u0b7a\u0b7b\5\u0451\u0229")
buf.write("\2\u0b7b\u0b7c\5\u0439\u021d\2\u0b7c\u0b7d\5\u0465\u0233")
buf.write("\2\u0b7d\u0b7e\5\u0439\u021d\2\u0b7e\u0b7f\5\u044b\u0226")
buf.write("\2\u0b7f\u0b80\5\u043d\u021f\2\u0b80\u01ec\3\2\2\2\u0b81")
buf.write("\u0b82\5\u044f\u0228\2\u0b82\u0b83\5\u0451\u0229\2\u0b83")
buf.write("\u0b84\5\u043d\u021f\2\u0b84\u0b85\5\u044f\u0228\2\u0b85")
buf.write("\u0b86\5\u045b\u022e\2\u0b86\u0b87\5\u0445\u0223\2\u0b87")
buf.write("\u0b88\5\u045b\u022e\2\u0b88\u0b89\5\u0465\u0233\2\u0b89")
buf.write("\u0b8a\5\u043d\u021f\2\u0b8a\u0b8b\5\u0459\u022d\2\u0b8b")
buf.write("\u0b8c\5\u0439\u021d\2\u0b8c\u0b8d\5\u0435\u021b\2\u0b8d")
buf.write("\u0b8e\5\u0453\u022a\2\u0b8e\u0b8f\5\u0445\u0223\2\u0b8f")
buf.write("\u0b90\5\u044f\u0228\2\u0b90\u0b91\5\u0441\u0221\2\u0b91")
buf.write("\u01ee\3\2\2\2\u0b92\u0b93\5\u044f\u0228\2\u0b93\u0b94")
buf.write("\5\u0451\u0229\2\u0b94\u0b95\5\u044d\u0227\2\u0b95\u0b96")
buf.write("\5\u0435\u021b\2\u0b96\u0b97\5\u0463\u0232\2\u0b97\u0b98")
buf.write("\5\u045f\u0230\2\u0b98\u0b99\5\u0435\u021b\2\u0b99\u0b9a")
buf.write("\5\u044b\u0226\2\u0b9a\u0b9b\5\u045d\u022f\2\u0b9b\u0b9c")
buf.write("\5\u043d\u021f\2\u0b9c\u01f0\3\2\2\2\u0b9d\u0b9e\5\u044f")
buf.write("\u0228\2\u0b9e\u0b9f\5\u0451\u0229\2\u0b9f\u0ba0\5\u044d")
buf.write("\u0227\2\u0ba0\u0ba1\5\u0445\u0223\2\u0ba1\u0ba2\5\u044f")
buf.write("\u0228\2\u0ba2\u0ba3\5\u045f\u0230\2\u0ba3\u0ba4\5\u0435")
buf.write("\u021b\2\u0ba4\u0ba5\5\u044b\u0226\2\u0ba5\u0ba6\5\u045d")
buf.write("\u022f\2\u0ba6\u0ba7\5\u043d\u021f\2\u0ba7\u01f2\3\2\2")
buf.write("\2\u0ba8\u0ba9\5\u044f\u0228\2\u0ba9\u0baa\5\u0451\u0229")
buf.write("\2\u0baa\u0bab\5\u044f\u0228\2\u0bab\u0bac\5\u043d\u021f")
buf.write("\2\u0bac\u01f4\3\2\2\2\u0bad\u0bae\5\u044f\u0228\2\u0bae")
buf.write("\u0baf\5\u0451\u0229\2\u0baf\u0bb0\5\u0451\u0229\2\u0bb0")
buf.write("\u0bb1\5\u0457\u022c\2\u0bb1\u0bb2\5\u043b\u021e\2\u0bb2")
buf.write("\u0bb3\5\u043d\u021f\2\u0bb3\u0bb4\5\u0457\u022c\2\u0bb4")
buf.write("\u01f6\3\2\2\2\u0bb5\u0bb6\5\u044f\u0228\2\u0bb6\u0bb7")
buf.write("\5\u0451\u0229\2\u0bb7\u0bb8\5\u0459\u022d\2\u0bb8\u0bb9")
buf.write("\5\u0439\u021d\2\u0bb9\u0bba\5\u0443\u0222\2\u0bba\u0bbb")
buf.write("\5\u043d\u021f\2\u0bbb\u0bbc\5\u044d\u0227\2\u0bbc\u0bbd")
buf.write("\5\u0435\u021b\2\u0bbd\u0bbe\5\u0439\u021d\2\u0bbe\u0bbf")
buf.write("\5\u0443\u0222\2\u0bbf\u0bc0\5\u043d\u021f\2\u0bc0\u0bc1")
buf.write("\5\u0439\u021d\2\u0bc1\u0bc2\5\u0449\u0225\2\u0bc2\u01f8")
buf.write("\3\2\2\2\u0bc3\u0bc4\5\u044f\u0228\2\u0bc4\u0bc5\5\u0451")
buf.write("\u0229\2\u0bc5\u0bc6\5\u045b\u022e\2\u0bc6\u01fa\3\2\2")
buf.write("\2\u0bc7\u0bc8\5\u044f\u0228\2\u0bc8\u0bc9\5\u0451\u0229")
buf.write("\2\u0bc9\u0bca\5\u0461\u0231\2\u0bca\u0bcb\5\u0435\u021b")
buf.write("\2\u0bcb\u0bcc\5\u0445\u0223\2\u0bcc\u0bcd\5\u045b\u022e")
buf.write("\2\u0bcd\u01fc\3\2\2\2\u0bce\u0bcf\5\u044f\u0228\2\u0bcf")
buf.write("\u0bd0\5\u045d\u022f\2\u0bd0\u0bd1\5\u044b\u0226\2\u0bd1")
buf.write("\u0bd2\5\u044b\u0226\2\u0bd2\u01fe\3\2\2\2\u0bd3\u0bd4")
buf.write("\5\u044f\u0228\2\u0bd4\u0bd5\5\u045d\u022f\2\u0bd5\u0bd6")
buf.write("\5\u044b\u0226\2\u0bd6\u0bd7\5\u044b\u0226\2\u0bd7\u0bd8")
buf.write("\5\u0459\u022d\2\u0bd8\u0200\3\2\2\2\u0bd9\u0bda\5\u044f")
buf.write("\u0228\2\u0bda\u0bdb\5\u045d\u022f\2\u0bdb\u0bdc\5\u044d")
buf.write("\u0227\2\u0bdc\u0bdd\5\u0437\u021c\2\u0bdd\u0bde\5\u043d")
buf.write("\u021f\2\u0bde\u0bdf\5\u0457\u022c\2\u0bdf\u0202\3\2\2")
buf.write("\2\u0be0\u0be1\5\u044f\u0228\2\u0be1\u0be2\5\u045d\u022f")
buf.write("\2\u0be2\u0be3\5\u044d\u0227\2\u0be3\u0be4\5\u043d\u021f")
buf.write("\2\u0be4\u0be5\5\u0457\u022c\2\u0be5\u0be6\5\u0445\u0223")
buf.write("\2\u0be6\u0be7\5\u0439\u021d\2\u0be7\u0204\3\2\2\2\u0be8")
buf.write("\u0be9\5\u044f\u0228\2\u0be9\u0bea\5\u045f\u0230\2\u0bea")
buf.write("\u0beb\5\u0435\u021b\2\u0beb\u0bec\5\u0457\u022c\2\u0bec")
buf.write("\u0bed\5\u0439\u021d\2\u0bed\u0bee\5\u0443\u0222\2\u0bee")
buf.write("\u0bef\5\u0435\u021b\2\u0bef\u0bf0\5\u0457\u022c\2\u0bf0")
buf.write("\u0bf1\7\64\2\2\u0bf1\u0206\3\2\2\2\u0bf2\u0bf3\5\u0451")
buf.write("\u0229\2\u0bf3\u0bf4\5\u0437\u021c\2\u0bf4\u0bf5\5\u0447")
buf.write("\u0224\2\u0bf5\u0bf6\5\u043d\u021f\2\u0bf6\u0bf7\5\u0439")
buf.write("\u021d\2\u0bf7\u0bf8\5\u045b\u022e\2\u0bf8\u0208\3\2\2")
buf.write("\2\u0bf9\u0bfa\5\u0451\u0229\2\u0bfa\u0bfb\5\u043f\u0220")
buf.write("\2\u0bfb\u020a\3\2\2\2\u0bfc\u0bfd\5\u0451\u0229\2\u0bfd")
buf.write("\u0bfe\5\u043f\u0220\2\u0bfe\u0bff\5\u043f\u0220\2\u0bff")
buf.write("\u020c\3\2\2\2\u0c00\u0c01\5\u0451\u0229\2\u0c01\u0c02")
buf.write("\5\u0445\u0223\2\u0c02\u0c03\5\u043b\u021e\2\u0c03\u020e")
buf.write("\3\2\2\2\u0c04\u0c05\5\u0451\u0229\2\u0c05\u0c06\5\u044b")
buf.write("\u0226\2\u0c06\u0c07\5\u043b\u021e\2\u0c07\u0210\3\2\2")
buf.write("\2\u0c08\u0c09\5\u0451\u0229\2\u0c09\u0c0a\5\u044f\u0228")
buf.write("\2\u0c0a\u0212\3\2\2\2\u0c0b\u0c0c\5\u0451\u0229\2\u0c0c")
buf.write("\u0c0d\5\u044f\u0228\2\u0c0d\u0c0e\5\u044b\u0226\2\u0c0e")
buf.write("\u0c0f\5\u0465\u0233\2\u0c0f\u0214\3\2\2\2\u0c10\u0c11")
buf.write("\5\u0451\u0229\2\u0c11\u0c12\5\u0453\u022a\2\u0c12\u0c13")
buf.write("\5\u043d\u021f\2\u0c13\u0c14\5\u044f\u0228\2\u0c14\u0216")
buf.write("\3\2\2\2\u0c15\u0c16\5\u0451\u0229\2\u0c16\u0c17\5\u0453")
buf.write("\u022a\2\u0c17\u0c18\5\u045b\u022e\2\u0c18\u0c19\5\u0445")
buf.write("\u0223\2\u0c19\u0c1a\5\u0451\u0229\2\u0c1a\u0c1b\5\u044f")
buf.write("\u0228\2\u0c1b\u0218\3\2\2\2\u0c1c\u0c1d\5\u0451\u0229")
buf.write("\2\u0c1d\u0c1e\5\u0457\u022c\2\u0c1e\u021a\3\2\2\2\u0c1f")
buf.write("\u0c20\5\u0451\u0229\2\u0c20\u0c21\5\u0457\u022c\2\u0c21")
buf.write("\u0c22\5\u0435\u021b\2\u0c22\u0c23\5\u043b\u021e\2\u0c23")
buf.write("\u0c24\5\u0435\u021b\2\u0c24\u0c25\5\u045b\u022e\2\u0c25")
buf.write("\u0c26\5\u0435\u021b\2\u0c26\u021c\3\2\2\2\u0c27\u0c28")
buf.write("\5\u0451\u0229\2\u0c28\u0c29\5\u0457\u022c\2\u0c29\u0c2a")
buf.write("\5\u043b\u021e\2\u0c2a\u0c2b\5\u043d\u021f\2\u0c2b\u0c2c")
buf.write("\5\u0457\u022c\2\u0c2c\u021e\3\2\2\2\u0c2d\u0c2e\5\u0451")
buf.write("\u0229\2\u0c2e\u0c2f\5\u0457\u022c\2\u0c2f\u0c30\5\u043b")
buf.write("\u021e\2\u0c30\u0c31\5\u0445\u0223\2\u0c31\u0c32\5\u044f")
buf.write("\u0228\2\u0c32\u0c33\5\u0435\u021b\2\u0c33\u0c34\5\u044b")
buf.write("\u0226\2\u0c34\u0c35\5\u0445\u0223\2\u0c35\u0c36\5\u045b")
buf.write("\u022e\2\u0c36\u0c37\5\u0465\u0233\2\u0c37\u0220\3\2\2")
buf.write("\2\u0c38\u0c39\5\u0451\u0229\2\u0c39\u0c3a\5\u0459\u022d")
buf.write("\2\u0c3a\u0c3b\5\u043d\u021f\2\u0c3b\u0c3c\5\u0457\u022c")
buf.write("\2\u0c3c\u0c3d\5\u0457\u022c\2\u0c3d\u0c3e\5\u0451\u0229")
buf.write("\2\u0c3e\u0c3f\5\u0457\u022c\2\u0c3f\u0222\3\2\2\2\u0c40")
buf.write("\u0c41\5\u0451\u0229\2\u0c41\u0c42\5\u045d\u022f\2\u0c42")
buf.write("\u0c43\5\u045b\u022e\2\u0c43\u0224\3\2\2\2\u0c44\u0c45")
buf.write("\5\u0451\u0229\2\u0c45\u0c46\5\u045d\u022f\2\u0c46\u0c47")
buf.write("\5\u045b\u022e\2\u0c47\u0c48\5\u043d\u021f\2\u0c48\u0c49")
buf.write("\5\u0457\u022c\2\u0c49\u0226\3\2\2\2\u0c4a\u0c4b\5\u0451")
buf.write("\u0229\2\u0c4b\u0c4c\5\u045f\u0230\2\u0c4c\u0c4d\5\u043d")
buf.write("\u021f\2\u0c4d\u0c4e\5\u0457\u022c\2\u0c4e\u0228\3\2\2")
buf.write("\2\u0c4f\u0c50\5\u0451\u0229\2\u0c50\u0c51\5\u045f\u0230")
buf.write("\2\u0c51\u0c52\5\u043d\u021f\2\u0c52\u0c53\5\u0457\u022c")
buf.write("\2\u0c53\u0c54\5\u0457\u022c\2\u0c54\u0c55\5\u0445\u0223")
buf.write("\2\u0c55\u0c56\5\u043b\u021e\2\u0c56\u0c57\5\u0445\u0223")
buf.write("\2\u0c57\u0c58\5\u044f\u0228\2\u0c58\u0c59\5\u0441\u0221")
buf.write("\2\u0c59\u022a\3\2\2\2\u0c5a\u0c5b\5\u0453\u022a\2\u0c5b")
buf.write("\u0c5c\5\u0435\u021b\2\u0c5c\u0c5d\5\u0439\u021d\2\u0c5d")
buf.write("\u0c5e\5\u0449\u0225\2\u0c5e\u0c5f\5\u0435\u021b\2\u0c5f")
buf.write("\u0c60\5\u0441\u0221\2\u0c60\u0c61\5\u043d\u021f\2\u0c61")
buf.write("\u022c\3\2\2\2\u0c62\u0c63\5\u0453\u022a\2\u0c63\u0c64")
buf.write("\5\u0435\u021b\2\u0c64\u0c65\5\u0457\u022c\2\u0c65\u0c66")
buf.write("\5\u0435\u021b\2\u0c66\u0c67\5\u044b\u0226\2\u0c67\u0c68")
buf.write("\5\u044b\u0226\2\u0c68\u0c69\5\u043d\u021f\2\u0c69\u0c6a")
buf.write("\5\u044b\u0226\2\u0c6a\u0c6b\7a\2\2\u0c6b\u0c6c\5\u043d")
buf.write("\u021f\2\u0c6c\u0c6d\5\u044f\u0228\2\u0c6d\u0c6e\5\u0435")
buf.write("\u021b\2\u0c6e\u0c6f\5\u0437\u021c\2\u0c6f\u0c70\5\u044b")
buf.write("\u0226\2\u0c70\u0c71\5\u043d\u021f\2\u0c71\u022e\3\2\2")
buf.write("\2\u0c72\u0c73\5\u0453\u022a\2\u0c73\u0c74\5\u0435\u021b")
buf.write("\2\u0c74\u0c75\5\u0457\u022c\2\u0c75\u0c76\5\u0435\u021b")
buf.write("\2\u0c76\u0c77\5\u044d\u0227\2\u0c77\u0c78\5\u043d\u021f")
buf.write("\2\u0c78\u0c79\5\u045b\u022e\2\u0c79\u0c7a\5\u043d\u021f")
buf.write("\2\u0c7a\u0c7b\5\u0457\u022c\2\u0c7b\u0c7c\5\u0459\u022d")
buf.write("\2\u0c7c\u0230\3\2\2\2\u0c7d\u0c7e\5\u0453\u022a\2\u0c7e")
buf.write("\u0c7f\5\u0435\u021b\2\u0c7f\u0c80\5\u0457\u022c\2\u0c80")
buf.write("\u0c81\5\u043d\u021f\2\u0c81\u0c82\5\u044f\u0228\2\u0c82")
buf.write("\u0c83\5\u045b\u022e\2\u0c83\u0232\3\2\2\2\u0c84\u0c85")
buf.write("\5\u0453\u022a\2\u0c85\u0c86\5\u0435\u021b\2\u0c86\u0c87")
buf.write("\5\u0457\u022c\2\u0c87\u0c88\5\u045b\u022e\2\u0c88\u0c89")
buf.write("\5\u0445\u0223\2\u0c89\u0c8a\5\u045b\u022e\2\u0c8a\u0c8b")
buf.write("\5\u0445\u0223\2\u0c8b\u0c8c\5\u0451\u0229\2\u0c8c\u0c8d")
buf.write("\5\u044f\u0228\2\u0c8d\u0234\3\2\2\2\u0c8e\u0c8f\5\u0453")
buf.write("\u022a\2\u0c8f\u0c90\5\u0435\u021b\2\u0c90\u0c91\5\u0459")
buf.write("\u022d\2\u0c91\u0c92\5\u0459\u022d\2\u0c92\u0c93\5\u0445")
buf.write("\u0223\2\u0c93\u0c94\5\u044f\u0228\2\u0c94\u0c95\5\u0441")
buf.write("\u0221\2\u0c95\u0236\3\2\2\2\u0c96\u0c97\5\u0453\u022a")
buf.write("\2\u0c97\u0c98\5\u0435\u021b\2\u0c98\u0c99\5\u045b\u022e")
buf.write("\2\u0c99\u0c9a\5\u0443\u0222\2\u0c9a\u0238\3\2\2\2\u0c9b")
buf.write("\u0c9c\7\'\2\2\u0c9c\u0c9d\5\u0457\u022c\2\u0c9d\u0c9e")
buf.write("\5\u0451\u0229\2\u0c9e\u0c9f\5\u0461\u0231\2\u0c9f\u0ca0")
buf.write("\5\u045b\u022e\2\u0ca0\u0ca1\5\u0465\u0233\2\u0ca1\u0ca2")
buf.write("\5\u0453\u022a\2\u0ca2\u0ca3\5\u043d\u021f\2\u0ca3\u023a")
buf.write("\3\2\2\2\u0ca4\u0ca5\7\'\2\2\u0ca5\u0ca6\5\u045b\u022e")
buf.write("\2\u0ca6\u0ca7\5\u0465\u0233\2\u0ca7\u0ca8\5\u0453\u022a")
buf.write("\2\u0ca8\u0ca9\5\u043d\u021f\2\u0ca9\u023c\3\2\2\2\u0caa")
buf.write("\u0cab\5\u0453\u022a\2\u0cab\u0cac\5\u0445\u0223\2\u0cac")
buf.write("\u0cad\5\u0453\u022a\2\u0cad\u0cae\5\u043d\u021f\2\u0cae")
buf.write("\u0caf\5\u044b\u0226\2\u0caf\u0cb0\5\u0445\u0223\2\u0cb0")
buf.write("\u0cb1\5\u044f\u0228\2\u0cb1\u0cb2\5\u043d\u021f\2\u0cb2")
buf.write("\u0cb3\5\u043b\u021e\2\u0cb3\u023e\3\2\2\2\u0cb4\u0cb5")
buf.write("\5\u0453\u022a\2\u0cb5\u0cb6\5\u0445\u0223\2\u0cb6\u0cb7")
buf.write("\5\u045f\u0230\2\u0cb7\u0cb8\5\u0451\u0229\2\u0cb8\u0cb9")
buf.write("\5\u045b\u022e\2\u0cb9\u0240\3\2\2\2\u0cba\u0cbb\5\u0453")
buf.write("\u022a\2\u0cbb\u0cbc\5\u044b\u0226\2\u0cbc\u0cbd\5\u0435")
buf.write("\u021b\2\u0cbd\u0cbe\5\u044f\u0228\2\u0cbe\u0242\3\2\2")
buf.write("\2\u0cbf\u0cc0\5\u0453\u022a\2\u0cc0\u0cc1\5\u044b\u0226")
buf.write("\2\u0cc1\u0cc2\5\u0459\u022d\2\u0cc2\u0cc3\7a\2\2\u0cc3")
buf.write("\u0cc4\5\u0445\u0223\2\u0cc4\u0cc5\5\u044f\u0228\2\u0cc5")
buf.write("\u0cc6\5\u045b\u022e\2\u0cc6\u0cc7\5\u043d\u021f\2\u0cc7")
buf.write("\u0cc8\5\u0441\u0221\2\u0cc8\u0cc9\5\u043d\u021f\2\u0cc9")
buf.write("\u0cca\5\u0457\u022c\2\u0cca\u0244\3\2\2\2\u0ccb\u0ccc")
buf.write("\5\u0453\u022a\2\u0ccc\u0ccd\5\u0451\u0229\2\u0ccd\u0cce")
buf.write("\5\u0459\u022d\2\u0cce\u0ccf\5\u0445\u0223\2\u0ccf\u0cd0")
buf.write("\5\u045b\u022e\2\u0cd0\u0cd1\5\u0445\u0223\2\u0cd1\u0cd2")
buf.write("\5\u045f\u0230\2\u0cd2\u0cd3\5\u043d\u021f\2\u0cd3\u0246")
buf.write("\3\2\2\2\u0cd4\u0cd5\5\u0453\u022a\2\u0cd5\u0cd6\5\u0451")
buf.write("\u0229\2\u0cd6\u0cd7\5\u0459\u022d\2\u0cd7\u0cd8\5\u0445")
buf.write("\u0223\2\u0cd8\u0cd9\5\u045b\u022e\2\u0cd9\u0cda\5\u0445")
buf.write("\u0223\2\u0cda\u0cdb\5\u045f\u0230\2\u0cdb\u0cdc\5\u043d")
buf.write("\u021f\2\u0cdc\u0cdd\5\u044f\u0228\2\u0cdd\u0248\3\2\2")
buf.write("\2\u0cde\u0cdf\5\u0453\u022a\2\u0cdf\u0ce0\5\u0457\u022c")
buf.write("\2\u0ce0\u0ce1\5\u0435\u021b\2\u0ce1\u0ce2\5\u0441\u0221")
buf.write("\2\u0ce2\u0ce3\5\u044d\u0227\2\u0ce3\u0ce4\5\u0435\u021b")
buf.write("\2\u0ce4\u024a\3\2\2\2\u0ce5\u0ce6\5\u0453\u022a\2\u0ce6")
buf.write("\u0ce7\5\u0457\u022c\2\u0ce7\u0ce8\5\u043d\u021f\2\u0ce8")
buf.write("\u0ce9\5\u0439\u021d\2\u0ce9\u0cea\5\u043d\u021f\2\u0cea")
buf.write("\u0ceb\5\u043b\u021e\2\u0ceb\u0cec\5\u0445\u0223\2\u0cec")
buf.write("\u0ced\5\u044f\u0228\2\u0ced\u0cee\5\u0441\u0221\2\u0cee")
buf.write("\u024c\3\2\2\2\u0cef\u0cf0\5\u0453\u022a\2\u0cf0\u0cf1")
buf.write("\5\u0457\u022c\2\u0cf1\u0cf2\5\u043d\u021f\2\u0cf2\u0cf3")
buf.write("\5\u0439\u021d\2\u0cf3\u0cf4\5\u0445\u0223\2\u0cf4\u0cf5")
buf.write("\5\u0459\u022d\2\u0cf5\u0cf6\5\u0445\u0223\2\u0cf6\u0cf7")
buf.write("\5\u0451\u0229\2\u0cf7\u0cf8\5\u044f\u0228\2\u0cf8\u024e")
buf.write("\3\2\2\2\u0cf9\u0cfa\5\u0453\u022a\2\u0cfa\u0cfb\5\u0457")
buf.write("\u022c\2\u0cfb\u0cfc\5\u043d\u021f\2\u0cfc\u0cfd\5\u0459")
buf.write("\u022d\2\u0cfd\u0cfe\5\u043d\u021f\2\u0cfe\u0cff\5\u044f")
buf.write("\u0228\2\u0cff\u0d00\5\u045b\u022e\2\u0d00\u0250\3\2\2")
buf.write("\2\u0d01\u0d02\5\u0453\u022a\2\u0d02\u0d03\5\u0457\u022c")
buf.write("\2\u0d03\u0d04\5\u0445\u0223\2\u0d04\u0d05\5\u0451\u0229")
buf.write("\2\u0d05\u0d06\5\u0457\u022c\2\u0d06\u0252\3\2\2\2\u0d07")
buf.write("\u0d08\5\u0453\u022a\2\u0d08\u0d09\5\u0457\u022c\2\u0d09")
buf.write("\u0d0a\5\u0451\u0229\2\u0d0a\u0d0b\5\u0439\u021d\2\u0d0b")
buf.write("\u0d0c\5\u043d\u021f\2\u0d0c\u0d0d\5\u043b\u021e\2\u0d0d")
buf.write("\u0d0e\5\u045d\u022f\2\u0d0e\u0d0f\5\u0457\u022c\2\u0d0f")
buf.write("\u0d10\5\u043d\u021f\2\u0d10\u0254\3\2\2\2\u0d11\u0d12")
buf.write("\5\u0457\u022c\2\u0d12\u0d13\5\u0435\u021b\2\u0d13\u0d14")
buf.write("\5\u0445\u0223\2\u0d14\u0d15\5\u0459\u022d\2\u0d15\u0d16")
buf.write("\5\u043d\u021f\2\u0d16\u0256\3\2\2\2\u0d17\u0d18\5\u0457")
buf.write("\u022c\2\u0d18\u0d19\5\u0435\u021b\2\u0d19\u0d1a\5\u044f")
buf.write("\u0228\2\u0d1a\u0d1b\5\u0441\u0221\2\u0d1b\u0d1c\5\u043d")
buf.write("\u021f\2\u0d1c\u0258\3\2\2\2\u0d1d\u0d1e\5\u0457\u022c")
buf.write("\2\u0d1e\u0d1f\5\u0435\u021b\2\u0d1f\u0d20\5\u0461\u0231")
buf.write("\2\u0d20\u025a\3\2\2\2\u0d21\u0d22\5\u0457\u022c\2\u0d22")
buf.write("\u0d23\5\u043d\u021f\2\u0d23\u0d24\5\u0435\u021b\2\u0d24")
buf.write("\u0d25\5\u043b\u021e\2\u0d25\u025c\3\2\2\2\u0d26\u0d27")
buf.write("\5\u0457\u022c\2\u0d27\u0d28\5\u043d\u021f\2\u0d28\u0d29")
buf.write("\5\u0435\u021b\2\u0d29\u0d2a\5\u044b\u0226\2\u0d2a\u025e")
buf.write("\3\2\2\2\u0d2b\u0d2c\5\u0457\u022c\2\u0d2c\u0d2d\5\u043d")
buf.write("\u021f\2\u0d2d\u0d2e\5\u0439\u021d\2\u0d2e\u0d2f\5\u0451")
buf.write("\u0229\2\u0d2f\u0d30\5\u0457\u022c\2\u0d30\u0d31\5\u043b")
buf.write("\u021e\2\u0d31\u0260\3\2\2\2\u0d32\u0d33\5\u0457\u022c")
buf.write("\2\u0d33\u0d34\5\u043d\u021f\2\u0d34\u0d35\5\u043f\u0220")
buf.write("\2\u0d35\u0262\3\2\2\2\u0d36\u0d37\5\u0457\u022c\2\u0d37")
buf.write("\u0d38\5\u043d\u021f\2\u0d38\u0d39\5\u043f\u0220\2\u0d39")
buf.write("\u0d3a\5\u043d\u021f\2\u0d3a\u0d3b\5\u0457\u022c\2\u0d3b")
buf.write("\u0d3c\5\u043d\u021f\2\u0d3c\u0d3d\5\u044f\u0228\2\u0d3d")
buf.write("\u0d3e\5\u0439\u021d\2\u0d3e\u0d3f\5\u043d\u021f\2\u0d3f")
buf.write("\u0264\3\2\2\2\u0d40\u0d41\5\u0457\u022c\2\u0d41\u0d42")
buf.write("\5\u043d\u021f\2\u0d42\u0d43\5\u043f\u0220\2\u0d43\u0d44")
buf.write("\5\u043d\u021f\2\u0d44\u0d45\5\u0457\u022c\2\u0d45\u0d46")
buf.write("\5\u043d\u021f\2\u0d46\u0d47\5\u044f\u0228\2\u0d47\u0d48")
buf.write("\5\u0439\u021d\2\u0d48\u0d49\5\u0445\u0223\2\u0d49\u0d4a")
buf.write("\5\u044f\u0228\2\u0d4a\u0d4b\5\u0441\u0221\2\u0d4b\u0266")
buf.write("\3\2\2\2\u0d4c\u0d4d\5\u0457\u022c\2\u0d4d\u0d4e\5\u043d")
buf.write("\u021f\2\u0d4e\u0d4f\5\u0447\u0224\2\u0d4f\u0d50\5\u043d")
buf.write("\u021f\2\u0d50\u0d51\5\u0439\u021d\2\u0d51\u0d52\5\u045b")
buf.write("\u022e\2\u0d52\u0268\3\2\2\2\u0d53\u0d54\5\u0457\u022c")
buf.write("\2\u0d54\u0d55\5\u043d\u021f\2\u0d55\u0d56\5\u044b\u0226")
buf.write("\2\u0d56\u0d57\5\u0445\u0223\2\u0d57\u0d58\5\u043d\u021f")
buf.write("\2\u0d58\u0d59\5\u0459\u022d\2\u0d59\u0d5a\7a\2\2\u0d5a")
buf.write("\u0d5b\5\u0451\u0229\2\u0d5b\u0d5c\5\u044f\u0228\2\u0d5c")
buf.write("\u026a\3\2\2\2\u0d5d\u0d5e\5\u0457\u022c\2\u0d5e\u0d5f")
buf.write("\5\u043d\u021f\2\u0d5f\u0d60\5\u044f\u0228\2\u0d60\u0d61")
buf.write("\5\u0435\u021b\2\u0d61\u0d62\5\u044d\u0227\2\u0d62\u0d63")
buf.write("\5\u043d\u021f\2\u0d63\u026c\3\2\2\2\u0d64\u0d65\5\u0457")
buf.write("\u022c\2\u0d65\u0d66\5\u043d\u021f\2\u0d66\u0d67\5\u0453")
buf.write("\u022a\2\u0d67\u0d68\5\u044b\u0226\2\u0d68\u0d69\5\u0435")
buf.write("\u021b\2\u0d69\u0d6a\5\u0439\u021d\2\u0d6a\u0d6b\5\u043d")
buf.write("\u021f\2\u0d6b\u026e\3\2\2\2\u0d6c\u0d6d\5\u0457\u022c")
buf.write("\2\u0d6d\u0d6e\5\u043d\u021f\2\u0d6e\u0d6f\5\u0459\u022d")
buf.write("\2\u0d6f\u0d70\5\u0453\u022a\2\u0d70\u0d71\5\u043d\u021f")
buf.write("\2\u0d71\u0d72\5\u0439\u021d\2\u0d72\u0d73\5\u045b\u022e")
buf.write("\2\u0d73\u0270\3\2\2\2\u0d74\u0d75\5\u0457\u022c\2\u0d75")
buf.write("\u0d76\5\u043d\u021f\2\u0d76\u0d77\5\u0459\u022d\2\u0d77")
buf.write("\u0d78\5\u045b\u022e\2\u0d78\u0d79\5\u0457\u022c\2\u0d79")
buf.write("\u0d7a\5\u0445\u0223\2\u0d7a\u0d7b\5\u0439\u021d\2\u0d7b")
buf.write("\u0d7c\5\u045b\u022e\2\u0d7c\u0d7d\7a\2\2\u0d7d\u0d7e")
buf.write("\5\u0457\u022c\2\u0d7e\u0d7f\5\u043d\u021f\2\u0d7f\u0d80")
buf.write("\5\u043f\u0220\2\u0d80\u0d81\5\u043d\u021f\2\u0d81\u0d82")
buf.write("\5\u0457\u022c\2\u0d82\u0d83\5\u043d\u021f\2\u0d83\u0d84")
buf.write("\5\u044f\u0228\2\u0d84\u0d85\5\u0439\u021d\2\u0d85\u0d86")
buf.write("\5\u043d\u021f\2\u0d86\u0d87\5\u0459\u022d\2\u0d87\u0272")
buf.write("\3\2\2\2\u0d88\u0d89\5\u0457\u022c\2\u0d89\u0d8a\5\u043d")
buf.write("\u021f\2\u0d8a\u0d8b\5\u0459\u022d\2\u0d8b\u0d8c\5\u045d")
buf.write("\u022f\2\u0d8c\u0d8d\5\u044b\u0226\2\u0d8d\u0d8e\5\u045b")
buf.write("\u022e\2\u0d8e\u0274\3\2\2\2\u0d8f\u0d90\5\u0457\u022c")
buf.write("\2\u0d90\u0d91\5\u043d\u021f\2\u0d91\u0d92\5\u0459\u022d")
buf.write("\2\u0d92\u0d93\5\u045d\u022f\2\u0d93\u0d94\5\u044b\u0226")
buf.write("\2\u0d94\u0d95\5\u045b\u022e\2\u0d95\u0d96\7a\2\2\u0d96")
buf.write("\u0d97\5\u0439\u021d\2\u0d97\u0d98\5\u0435\u021b\2\u0d98")
buf.write("\u0d99\5\u0439\u021d\2\u0d99\u0d9a\5\u0443\u0222\2\u0d9a")
buf.write("\u0d9b\5\u043d\u021f\2\u0d9b\u0276\3\2\2\2\u0d9c\u0d9d")
buf.write("\5\u0457\u022c\2\u0d9d\u0d9e\5\u043d\u021f\2\u0d9e\u0d9f")
buf.write("\5\u045b\u022e\2\u0d9f\u0da0\5\u045d\u022f\2\u0da0\u0da1")
buf.write("\5\u0457\u022c\2\u0da1\u0da2\5\u044f\u0228\2\u0da2\u0278")
buf.write("\3\2\2\2\u0da3\u0da4\5\u0457\u022c\2\u0da4\u0da5\5\u043d")
buf.write("\u021f\2\u0da5\u0da6\5\u045b\u022e\2\u0da6\u0da7\5\u045d")
buf.write("\u022f\2\u0da7\u0da8\5\u0457\u022c\2\u0da8\u0da9\5\u044f")
buf.write("\u0228\2\u0da9\u0daa\5\u0445\u0223\2\u0daa\u0dab\5\u044f")
buf.write("\u0228\2\u0dab\u0dac\5\u0441\u0221\2\u0dac\u027a\3\2\2")
buf.write("\2\u0dad\u0dae\5\u0457\u022c\2\u0dae\u0daf\5\u043d\u021f")
buf.write("\2\u0daf\u0db0\5\u045d\u022f\2\u0db0\u0db1\5\u0459\u022d")
buf.write("\2\u0db1\u0db2\5\u043d\u021f\2\u0db2\u027c\3\2\2\2\u0db3")
buf.write("\u0db4\5\u0457\u022c\2\u0db4\u0db5\5\u043d\u021f\2\u0db5")
buf.write("\u0db6\5\u045f\u0230\2\u0db6\u0db7\5\u043d\u021f\2\u0db7")
buf.write("\u0db8\5\u0457\u022c\2\u0db8\u0db9\5\u0459\u022d\2\u0db9")
buf.write("\u0dba\5\u043d\u021f\2\u0dba\u027e\3\2\2\2\u0dbb\u0dbc")
buf.write("\5\u0457\u022c\2\u0dbc\u0dbd\5\u043d\u021f\2\u0dbd\u0dbe")
buf.write("\5\u045f\u0230\2\u0dbe\u0dbf\5\u0451\u0229\2\u0dbf\u0dc0")
buf.write("\5\u0449\u0225\2\u0dc0\u0dc1\5\u043d\u021f\2\u0dc1\u0280")
buf.write("\3\2\2\2\u0dc2\u0dc3\5\u0457\u022c\2\u0dc3\u0dc4\5\u0445")
buf.write("\u0223\2\u0dc4\u0dc5\5\u0441\u0221\2\u0dc5\u0dc6\5\u0443")
buf.write("\u0222\2\u0dc6\u0dc7\5\u045b\u022e\2\u0dc7\u0282\3\2\2")
buf.write("\2\u0dc8\u0dc9\5\u0457\u022c\2\u0dc9\u0dca\5\u0451\u0229")
buf.write("\2\u0dca\u0dcb\5\u044b\u0226\2\u0dcb\u0dcc\5\u044b\u0226")
buf.write("\2\u0dcc\u0dcd\5\u0437\u021c\2\u0dcd\u0dce\5\u0435\u021b")
buf.write("\2\u0dce\u0dcf\5\u0439\u021d\2\u0dcf\u0dd0\5\u0449\u0225")
buf.write("\2\u0dd0\u0284\3\2\2\2\u0dd1\u0dd2\5\u0457\u022c\2\u0dd2")
buf.write("\u0dd3\5\u0451\u0229\2\u0dd3\u0dd4\5\u044b\u0226\2\u0dd4")
buf.write("\u0dd5\5\u044b\u0226\2\u0dd5\u0dd6\5\u045d\u022f\2\u0dd6")
buf.write("\u0dd7\5\u0453\u022a\2\u0dd7\u0286\3\2\2\2\u0dd8\u0dd9")
buf.write("\5\u0457\u022c\2\u0dd9\u0dda\5\u0451\u0229\2\u0dda\u0ddb")
buf.write("\5\u0461\u0231\2\u0ddb\u0288\3\2\2\2\u0ddc\u0ddd\5\u0457")
buf.write("\u022c\2\u0ddd\u0dde\5\u0451\u0229\2\u0dde\u0ddf\5\u0461")
buf.write("\u0231\2\u0ddf\u0de0\5\u0445\u0223\2\u0de0\u0de1\5\u043b")
buf.write("\u021e\2\u0de1\u028a\3\2\2\2\u0de2\u0de3\5\u0457\u022c")
buf.write("\2\u0de3\u0de4\5\u0451\u0229\2\u0de4\u0de5\5\u0461\u0231")
buf.write("\2\u0de5\u0de6\5\u0459\u022d\2\u0de6\u028c\3\2\2\2\u0de7")
buf.write("\u0de8\5\u0457\u022c\2\u0de8\u0de9\5\u045d\u022f\2\u0de9")
buf.write("\u0dea\5\u044b\u0226\2\u0dea\u0deb\5\u043d\u021f\2\u0deb")
buf.write("\u0dec\5\u0459\u022d\2\u0dec\u028e\3\2\2\2\u0ded\u0dee")
buf.write("\5\u0459\u022d\2\u0dee\u0def\5\u0435\u021b\2\u0def\u0df0")
buf.write("\5\u044d\u0227\2\u0df0\u0df1\5\u0453\u022a\2\u0df1\u0df2")
buf.write("\5\u044b\u0226\2\u0df2\u0df3\5\u043d\u021f\2\u0df3\u0290")
buf.write("\3\2\2\2\u0df4\u0df5\5\u0459\u022d\2\u0df5\u0df6\5\u0435")
buf.write("\u021b\2\u0df6\u0df7\5\u045f\u0230\2\u0df7\u0df8\5\u043d")
buf.write("\u021f\2\u0df8\u0292\3\2\2\2\u0df9\u0dfa\5\u0459\u022d")
buf.write("\2\u0dfa\u0dfb\5\u0435\u021b\2\u0dfb\u0dfc\5\u045f\u0230")
buf.write("\2\u0dfc\u0dfd\5\u043d\u021f\2\u0dfd\u0dfe\5\u0453\u022a")
buf.write("\2\u0dfe\u0dff\5\u0451\u0229\2\u0dff\u0e00\5\u0445\u0223")
buf.write("\2\u0e00\u0e01\5\u044f\u0228\2\u0e01\u0e02\5\u045b\u022e")
buf.write("\2\u0e02\u0294\3\2\2\2\u0e03\u0e04\5\u0459\u022d\2\u0e04")
buf.write("\u0e05\5\u0439\u021d\2\u0e05\u0e06\5\u0443\u0222\2\u0e06")
buf.write("\u0e07\5\u043d\u021f\2\u0e07\u0e08\5\u044d\u0227\2\u0e08")
buf.write("\u0e09\5\u0435\u021b\2\u0e09\u0296\3\2\2\2\u0e0a\u0e0b")
buf.write("\5\u0459\u022d\2\u0e0b\u0e0c\5\u0439\u021d\2\u0e0c\u0e0d")
buf.write("\5\u0443\u0222\2\u0e0d\u0e0e\5\u043d\u021f\2\u0e0e\u0e0f")
buf.write("\5\u044d\u0227\2\u0e0f\u0e10\5\u0435\u021b\2\u0e10\u0e11")
buf.write("\5\u0439\u021d\2\u0e11\u0e12\5\u0443\u0222\2\u0e12\u0e13")
buf.write("\5\u043d\u021f\2\u0e13\u0e14\5\u0439\u021d\2\u0e14\u0e15")
buf.write("\5\u0449\u0225\2\u0e15\u0298\3\2\2\2\u0e16\u0e17\5\u0459")
buf.write("\u022d\2\u0e17\u0e18\5\u0439\u021d\2\u0e18\u0e19\5\u044f")
buf.write("\u0228\2\u0e19\u029a\3\2\2\2\u0e1a\u0e1b\5\u0459\u022d")
buf.write("\2\u0e1b\u0e1c\5\u043d\u021f\2\u0e1c\u0e1d\5\u0435\u021b")
buf.write("\2\u0e1d\u0e1e\5\u0457\u022c\2\u0e1e\u0e1f\5\u0439\u021d")
buf.write("\2\u0e1f\u0e20\5\u0443\u0222\2\u0e20\u029c\3\2\2\2\u0e21")
buf.write("\u0e22\5\u0459\u022d\2\u0e22\u0e23\5\u043d\u021f\2\u0e23")
buf.write("\u0e24\5\u0439\u021d\2\u0e24\u0e25\5\u0451\u0229\2\u0e25")
buf.write("\u0e26\5\u044f\u0228\2\u0e26\u0e27\5\u043b\u021e\2\u0e27")
buf.write("\u029e\3\2\2\2\u0e28\u0e29\5\u0459\u022d\2\u0e29\u0e2a")
buf.write("\5\u043d\u021f\2\u0e2a\u0e2b\5\u043d\u021f\2\u0e2b\u0e2c")
buf.write("\5\u043b\u021e\2\u0e2c\u02a0\3\2\2\2\u0e2d\u0e2e\5\u0459")
buf.write("\u022d\2\u0e2e\u0e2f\5\u043d\u021f\2\u0e2f\u0e30\5\u0441")
buf.write("\u0221\2\u0e30\u0e31\5\u044d\u0227\2\u0e31\u0e32\5\u043d")
buf.write("\u021f\2\u0e32\u0e33\5\u044f\u0228\2\u0e33\u0e34\5\u045b")
buf.write("\u022e\2\u0e34\u02a2\3\2\2\2\u0e35\u0e36\5\u0459\u022d")
buf.write("\2\u0e36\u0e37\5\u043d\u021f\2\u0e37\u0e38\5\u044b\u0226")
buf.write("\2\u0e38\u0e39\5\u043d\u021f\2\u0e39\u0e3a\5\u0439\u021d")
buf.write("\2\u0e3a\u0e3b\5\u045b\u022e\2\u0e3b\u02a4\3\2\2\2\u0e3c")
buf.write("\u0e3d\5\u0459\u022d\2\u0e3d\u0e3e\5\u043d\u021f\2\u0e3e")
buf.write("\u0e3f\5\u044b\u0226\2\u0e3f\u0e40\5\u043f\u0220\2\u0e40")
buf.write("\u02a6\3\2\2\2\u0e41\u0e42\5\u0459\u022d\2\u0e42\u0e43")
buf.write("\5\u043d\u021f\2\u0e43\u0e44\5\u0455\u022b\2\u0e44\u0e45")
buf.write("\5\u045d\u022f\2\u0e45\u0e46\5\u043d\u021f\2\u0e46\u0e47")
buf.write("\5\u044f\u0228\2\u0e47\u0e48\5\u0439\u021d\2\u0e48\u0e49")
buf.write("\5\u043d\u021f\2\u0e49\u02a8\3\2\2\2\u0e4a\u0e4b\5\u0459")
buf.write("\u022d\2\u0e4b\u0e4c\5\u043d\u021f\2\u0e4c\u0e4d\5\u0455")
buf.write("\u022b\2\u0e4d\u0e4e\5\u045d\u022f\2\u0e4e\u0e4f\5\u043d")
buf.write("\u021f\2\u0e4f\u0e50\5\u044f\u0228\2\u0e50\u0e51\5\u045b")
buf.write("\u022e\2\u0e51\u0e52\5\u0445\u0223\2\u0e52\u0e53\5\u0435")
buf.write("\u021b\2\u0e53\u0e54\5\u044b\u0226\2\u0e54\u02aa\3\2\2")
buf.write("\2\u0e55\u0e56\5\u0459\u022d\2\u0e56\u0e57\5\u043d\u021f")
buf.write("\2\u0e57\u0e58\5\u0457\u022c\2\u0e58\u0e59\5\u0445\u0223")
buf.write("\2\u0e59\u0e5a\5\u0435\u021b\2\u0e5a\u0e5b\5\u044b\u0226")
buf.write("\2\u0e5b\u0e5c\5\u0445\u0223\2\u0e5c\u0e5d\5\u0467\u0234")
buf.write("\2\u0e5d\u0e5e\5\u0435\u021b\2\u0e5e\u0e5f\5\u0437\u021c")
buf.write("\2\u0e5f\u0e60\5\u044b\u0226\2\u0e60\u0e61\5\u043d\u021f")
buf.write("\2\u0e61\u02ac\3\2\2\2\u0e62\u0e63\5\u0459\u022d\2\u0e63")
buf.write("\u0e64\5\u043d\u021f\2\u0e64\u0e65\5\u0457\u022c\2\u0e65")
buf.write("\u0e66\5\u0445\u0223\2\u0e66\u0e67\5\u0435\u021b\2\u0e67")
buf.write("\u0e68\5\u044b\u0226\2\u0e68\u0e69\5\u044b\u0226\2\u0e69")
buf.write("\u0e6a\5\u0465\u0233\2\u0e6a\u0e6b\7a\2\2\u0e6b\u0e6c")
buf.write("\5\u0457\u022c\2\u0e6c\u0e6d\5\u043d\u021f\2\u0e6d\u0e6e")
buf.write("\5\u045d\u022f\2\u0e6e\u0e6f\5\u0459\u022d\2\u0e6f\u0e70")
buf.write("\5\u0435\u021b\2\u0e70\u0e71\5\u0437\u021c\2\u0e71\u0e72")
buf.write("\5\u044b\u0226\2\u0e72\u0e73\5\u043d\u021f\2\u0e73\u02ae")
buf.write("\3\2\2\2\u0e74\u0e75\5\u0459\u022d\2\u0e75\u0e76\5\u043d")
buf.write("\u021f\2\u0e76\u0e77\5\u0457\u022c\2\u0e77\u0e78\5\u045f")
buf.write("\u0230\2\u0e78\u0e79\5\u043d\u021f\2\u0e79\u0e7a\5\u0457")
buf.write("\u022c\2\u0e7a\u0e7b\5\u043d\u021f\2\u0e7b\u0e7c\5\u0457")
buf.write("\u022c\2\u0e7c\u0e7d\5\u0457\u022c\2\u0e7d\u0e7e\5\u0451")
buf.write("\u0229\2\u0e7e\u0e7f\5\u0457\u022c\2\u0e7f\u02b0\3\2\2")
buf.write("\2\u0e80\u0e81\5\u0459\u022d\2\u0e81\u0e82\5\u043d\u021f")
buf.write("\2\u0e82\u0e83\5\u0459\u022d\2\u0e83\u0e84\5\u0459\u022d")
buf.write("\2\u0e84\u0e85\5\u0445\u0223\2\u0e85\u0e86\5\u0451\u0229")
buf.write("\2\u0e86\u0e87\5\u044f\u0228\2\u0e87\u0e88\5\u045b\u022e")
buf.write("\2\u0e88\u0e89\5\u0445\u0223\2\u0e89\u0e8a\5\u044d\u0227")
buf.write("\2\u0e8a\u0e8b\5\u043d\u021f\2\u0e8b\u0e8c\5\u0467\u0234")
buf.write("\2\u0e8c\u0e8d\5\u0451\u0229\2\u0e8d\u0e8e\5\u044f\u0228")
buf.write("\2\u0e8e\u0e8f\5\u043d\u021f\2\u0e8f\u02b2\3\2\2\2\u0e90")
buf.write("\u0e91\5\u0459\u022d\2\u0e91\u0e92\5\u043d\u021f\2\u0e92")
buf.write("\u0e93\5\u045b\u022e\2\u0e93\u02b4\3\2\2\2\u0e94\u0e95")
buf.write("\5\u0459\u022d\2\u0e95\u0e96\5\u043d\u021f\2\u0e96\u0e97")
buf.write("\5\u045b\u022e\2\u0e97\u0e98\5\u0459\u022d\2\u0e98\u02b6")
buf.write("\3\2\2\2\u0e99\u0e9a\5\u0459\u022d\2\u0e9a\u0e9b\5\u043d")
buf.write("\u021f\2\u0e9b\u0e9c\5\u045b\u022e\2\u0e9c\u0e9d\5\u045b")
buf.write("\u022e\2\u0e9d\u0e9e\5\u0445\u0223\2\u0e9e\u0e9f\5\u044f")
buf.write("\u0228\2\u0e9f\u0ea0\5\u0441\u0221\2\u0ea0\u0ea1\5\u0459")
buf.write("\u022d\2\u0ea1\u02b8\3\2\2\2\u0ea2\u0ea3\5\u0459\u022d")
buf.write("\2\u0ea3\u0ea4\5\u0443\u0222\2\u0ea4\u0ea5\5\u0435\u021b")
buf.write("\2\u0ea5\u0ea6\5\u0457\u022c\2\u0ea6\u0ea7\5\u043d\u021f")
buf.write("\2\u0ea7\u02ba\3\2\2\2\u0ea8\u0ea9\5\u0459\u022d\2\u0ea9")
buf.write("\u0eaa\5\u0443\u0222\2\u0eaa\u0eab\5\u0451\u0229\2\u0eab")
buf.write("\u0eac\5\u0461\u0231\2\u0eac\u02bc\3\2\2\2\u0ead\u0eae")
buf.write("\5\u0459\u022d\2\u0eae\u0eaf\5\u0443\u0222\2\u0eaf\u0eb0")
buf.write("\5\u045d\u022f\2\u0eb0\u0eb1\5\u045b\u022e\2\u0eb1\u0eb2")
buf.write("\5\u043b\u021e\2\u0eb2\u0eb3\5\u0451\u0229\2\u0eb3\u0eb4")
buf.write("\5\u0461\u0231\2\u0eb4\u0eb5\5\u044f\u0228\2\u0eb5\u02be")
buf.write("\3\2\2\2\u0eb6\u0eb7\5\u0459\u022d\2\u0eb7\u0eb8\5\u0445")
buf.write("\u0223\2\u0eb8\u0eb9\5\u0437\u021c\2\u0eb9\u0eba\5\u044b")
buf.write("\u0226\2\u0eba\u0ebb\5\u0445\u0223\2\u0ebb\u0ebc\5\u044f")
buf.write("\u0228\2\u0ebc\u0ebd\5\u0441\u0221\2\u0ebd\u0ebe\5\u0459")
buf.write("\u022d\2\u0ebe\u02c0\3\2\2\2\u0ebf\u0ec0\5\u0459\u022d")
buf.write("\2\u0ec0\u0ec1\5\u0445\u0223\2\u0ec1\u0ec2\5\u0441\u0221")
buf.write("\2\u0ec2\u0ec3\5\u044f\u0228\2\u0ec3\u0ec4\5\u045b\u022e")
buf.write("\2\u0ec4\u0ec5\5\u0465\u0233\2\u0ec5\u0ec6\5\u0453\u022a")
buf.write("\2\u0ec6\u0ec7\5\u043d\u021f\2\u0ec7\u02c2\3\2\2\2\u0ec8")
buf.write("\u0ec9\5\u0459\u022d\2\u0ec9\u0eca\5\u0445\u0223\2\u0eca")
buf.write("\u0ecb\5\u044d\u0227\2\u0ecb\u0ecc\5\u0453\u022a\2\u0ecc")
buf.write("\u0ecd\5\u044b\u0226\2\u0ecd\u0ece\5\u043d\u021f\2\u0ece")
buf.write("\u0ecf\7a\2\2\u0ecf\u0ed0\5\u0445\u0223\2\u0ed0\u0ed1")
buf.write("\5\u044f\u0228\2\u0ed1\u0ed2\5\u045b\u022e\2\u0ed2\u0ed3")
buf.write("\5\u043d\u021f\2\u0ed3\u0ed4\5\u0441\u0221\2\u0ed4\u0ed5")
buf.write("\5\u043d\u021f\2\u0ed5\u0ed6\5\u0457\u022c\2\u0ed6\u02c4")
buf.write("\3\2\2\2\u0ed7\u0ed8\5\u0459\u022d\2\u0ed8\u0ed9\5\u0445")
buf.write("\u0223\2\u0ed9\u0eda\5\u044f\u0228\2\u0eda\u0edb\5\u0441")
buf.write("\u0221\2\u0edb\u0edc\5\u044b\u0226\2\u0edc\u0edd\5\u043d")
buf.write("\u021f\2\u0edd\u02c6\3\2\2\2\u0ede\u0edf\5\u0459\u022d")
buf.write("\2\u0edf\u0ee0\5\u0445\u0223\2\u0ee0\u0ee1\5\u0467\u0234")
buf.write("\2\u0ee1\u0ee2\5\u043d\u021f\2\u0ee2\u02c8\3\2\2\2\u0ee3")
buf.write("\u0ee4\5\u0459\u022d\2\u0ee4\u0ee5\5\u0449\u0225\2\u0ee5")
buf.write("\u0ee6\5\u0445\u0223\2\u0ee6\u0ee7\5\u0453\u022a\2\u0ee7")
buf.write("\u02ca\3\2\2\2\u0ee8\u0ee9\5\u0459\u022d\2\u0ee9\u0eea")
buf.write("\5\u044d\u0227\2\u0eea\u0eeb\5\u0435\u021b\2\u0eeb\u0eec")
buf.write("\5\u044b\u0226\2\u0eec\u0eed\5\u044b\u0226\2\u0eed\u0eee")
buf.write("\5\u0445\u0223\2\u0eee\u0eef\5\u044f\u0228\2\u0eef\u0ef0")
buf.write("\5\u045b\u022e\2\u0ef0\u02cc\3\2\2\2\u0ef1\u0ef2\5\u0459")
buf.write("\u022d\2\u0ef2\u0ef3\5\u044f\u0228\2\u0ef3\u0ef4\5\u0435")
buf.write("\u021b\2\u0ef4\u0ef5\5\u0453\u022a\2\u0ef5\u0ef6\5\u0459")
buf.write("\u022d\2\u0ef6\u0ef7\5\u0443\u0222\2\u0ef7\u0ef8\5\u0451")
buf.write("\u0229\2\u0ef8\u0ef9\5\u045b\u022e\2\u0ef9\u02ce\3\2\2")
buf.write("\2\u0efa\u0efb\5\u0459\u022d\2\u0efb\u0efc\5\u0451\u0229")
buf.write("\2\u0efc\u0efd\5\u044d\u0227\2\u0efd\u0efe\5\u043d\u021f")
buf.write("\2\u0efe\u02d0\3\2\2\2\u0eff\u0f00\5\u0459\u022d\2\u0f00")
buf.write("\u0f01\5\u0453\u022a\2\u0f01\u0f02\5\u043d\u021f\2\u0f02")
buf.write("\u0f03\5\u0439\u021d\2\u0f03\u0f04\5\u0445\u0223\2\u0f04")
buf.write("\u0f05\5\u043f\u0220\2\u0f05\u0f06\5\u0445\u0223\2\u0f06")
buf.write("\u0f07\5\u0439\u021d\2\u0f07\u0f08\5\u0435\u021b\2\u0f08")
buf.write("\u0f09\5\u045b\u022e\2\u0f09\u0f0a\5\u0445\u0223\2\u0f0a")
buf.write("\u0f0b\5\u0451\u0229\2\u0f0b\u0f0c\5\u044f\u0228\2\u0f0c")
buf.write("\u02d2\3\2\2\2\u0f0d\u0f0e\5\u0459\u022d\2\u0f0e\u0f0f")
buf.write("\5\u0455\u022b\2\u0f0f\u0f10\5\u044b\u0226\2\u0f10\u0f11")
buf.write("\5\u043b\u021e\2\u0f11\u0f12\5\u0435\u021b\2\u0f12\u0f13")
buf.write("\5\u045b\u022e\2\u0f13\u0f14\5\u0435\u021b\2\u0f14\u02d4")
buf.write("\3\2\2\2\u0f15\u0f16\5\u0459\u022d\2\u0f16\u0f17\5\u0455")
buf.write("\u022b\2\u0f17\u0f18\5\u044b\u0226\2\u0f18\u0f19\5\u043d")
buf.write("\u021f\2\u0f19\u0f1a\5\u0457\u022c\2\u0f1a\u0f1b\5\u0457")
buf.write("\u022c\2\u0f1b\u0f1c\5\u0451\u0229\2\u0f1c\u0f1d\5\u0457")
buf.write("\u022c\2\u0f1d\u02d6\3\2\2\2\u0f1e\u0f1f\5\u0459\u022d")
buf.write("\2\u0f1f\u0f20\5\u045b\u022e\2\u0f20\u0f21\5\u0435\u021b")
buf.write("\2\u0f21\u0f22\5\u044f\u0228\2\u0f22\u0f23\5\u043b\u021e")
buf.write("\2\u0f23\u0f24\5\u0435\u021b\2\u0f24\u0f25\5\u044b\u0226")
buf.write("\2\u0f25\u0f26\5\u0451\u0229\2\u0f26\u0f27\5\u044f\u0228")
buf.write("\2\u0f27\u0f28\5\u043d\u021f\2\u0f28\u02d8\3\2\2\2\u0f29")
buf.write("\u0f2a\5\u0459\u022d\2\u0f2a\u0f2b\5\u045b\u022e\2\u0f2b")
buf.write("\u0f2c\5\u0435\u021b\2\u0f2c\u0f2d\5\u0457\u022c\2\u0f2d")
buf.write("\u0f2e\5\u045b\u022e\2\u0f2e\u02da\3\2\2\2\u0f2f\u0f30")
buf.write("\5\u0459\u022d\2\u0f30\u0f31\5\u045b\u022e\2\u0f31\u0f32")
buf.write("\5\u0435\u021b\2\u0f32\u0f33\5\u0457\u022c\2\u0f33\u0f34")
buf.write("\5\u045b\u022e\2\u0f34\u0f35\5\u045d\u022f\2\u0f35\u0f36")
buf.write("\5\u0453\u022a\2\u0f36\u02dc\3\2\2\2\u0f37\u0f38\5\u0459")
buf.write("\u022d\2\u0f38\u0f39\5\u045b\u022e\2\u0f39\u0f3a\5\u0435")
buf.write("\u021b\2\u0f3a\u0f3b\5\u045b\u022e\2\u0f3b\u0f3c\5\u043d")
buf.write("\u021f\2\u0f3c\u0f3d\5\u044d\u0227\2\u0f3d\u0f3e\5\u043d")
buf.write("\u021f\2\u0f3e\u0f3f\5\u044f\u0228\2\u0f3f\u0f40\5\u045b")
buf.write("\u022e\2\u0f40\u02de\3\2\2\2\u0f41\u0f42\5\u0459\u022d")
buf.write("\2\u0f42\u0f43\5\u045b\u022e\2\u0f43\u0f44\5\u0435\u021b")
buf.write("\2\u0f44\u0f45\5\u045b\u022e\2\u0f45\u0f46\5\u043d\u021f")
buf.write("\2\u0f46\u0f47\5\u044d\u0227\2\u0f47\u0f48\5\u043d\u021f")
buf.write("\2\u0f48\u0f49\5\u044f\u0228\2\u0f49\u0f4a\5\u045b\u022e")
buf.write("\2\u0f4a\u0f4b\7a\2\2\u0f4b\u0f4c\5\u0445\u0223\2\u0f4c")
buf.write("\u0f4d\5\u043b\u021e\2\u0f4d\u02e0\3\2\2\2\u0f4e\u0f4f")
buf.write("\5\u0459\u022d\2\u0f4f\u0f50\5\u045b\u022e\2\u0f50\u0f51")
buf.write("\5\u0435\u021b\2\u0f51\u0f52\5\u045b\u022e\2\u0f52\u0f53")
buf.write("\5\u0445\u0223\2\u0f53\u0f54\5\u0439\u021d\2\u0f54\u02e2")
buf.write("\3\2\2\2\u0f55\u0f56\5\u0459\u022d\2\u0f56\u0f57\5\u045b")
buf.write("\u022e\2\u0f57\u0f58\5\u0435\u021b\2\u0f58\u0f59\5\u045b")
buf.write("\u022e\2\u0f59\u0f5a\5\u0445\u0223\2\u0f5a\u0f5b\5\u0459")
buf.write("\u022d\2\u0f5b\u0f5c\5\u045b\u022e\2\u0f5c\u0f5d\5\u0445")
buf.write("\u0223\2\u0f5d\u0f5e\5\u0439\u021d\2\u0f5e\u0f5f\5\u0459")
buf.write("\u022d\2\u0f5f\u02e4\3\2\2\2\u0f60\u0f61\5\u0459\u022d")
buf.write("\2\u0f61\u0f62\5\u045b\u022e\2\u0f62\u0f63\5\u0457\u022c")
buf.write("\2\u0f63\u0f64\5\u0445\u0223\2\u0f64\u0f65\5\u044f\u0228")
buf.write("\2\u0f65\u0f66\5\u0441\u0221\2\u0f66\u02e6\3\2\2\2\u0f67")
buf.write("\u0f68\5\u0459\u022d\2\u0f68\u0f69\5\u045d\u022f\2\u0f69")
buf.write("\u0f6a\5\u0437\u021c\2\u0f6a\u0f6b\5\u044d\u0227\2\u0f6b")
buf.write("\u0f6c\5\u045d\u022f\2\u0f6c\u0f6d\5\u044b\u0226\2\u0f6d")
buf.write("\u0f6e\5\u045b\u022e\2\u0f6e\u0f6f\5\u0445\u0223\2\u0f6f")
buf.write("\u0f70\5\u0459\u022d\2\u0f70\u0f71\5\u043d\u021f\2\u0f71")
buf.write("\u0f72\5\u045b\u022e\2\u0f72\u02e8\3\2\2\2\u0f73\u0f74")
buf.write("\5\u0459\u022d\2\u0f74\u0f75\5\u045d\u022f\2\u0f75\u0f76")
buf.write("\5\u0437\u021c\2\u0f76\u0f77\5\u0453\u022a\2\u0f77\u0f78")
buf.write("\5\u0435\u021b\2\u0f78\u0f79\5\u0457\u022c\2\u0f79\u0f7a")
buf.write("\5\u045b\u022e\2\u0f7a\u0f7b\5\u0445\u0223\2\u0f7b\u0f7c")
buf.write("\5\u045b\u022e\2\u0f7c\u0f7d\5\u0445\u0223\2\u0f7d\u0f7e")
buf.write("\5\u0451\u0229\2\u0f7e\u0f7f\5\u044f\u0228\2\u0f7f\u02ea")
buf.write("\3\2\2\2\u0f80\u0f81\5\u0459\u022d\2\u0f81\u0f82\5\u045d")
buf.write("\u022f\2\u0f82\u0f83\5\u0437\u021c\2\u0f83\u0f84\5\u0459")
buf.write("\u022d\2\u0f84\u0f85\5\u045b\u022e\2\u0f85\u0f86\5\u0445")
buf.write("\u0223\2\u0f86\u0f87\5\u045b\u022e\2\u0f87\u0f88\5\u045d")
buf.write("\u022f\2\u0f88\u0f89\5\u045b\u022e\2\u0f89\u0f8a\5\u0435")
buf.write("\u021b\2\u0f8a\u0f8b\5\u0437\u021c\2\u0f8b\u0f8c\5\u044b")
buf.write("\u0226\2\u0f8c\u0f8d\5\u043d\u021f\2\u0f8d\u02ec\3\2\2")
buf.write("\2\u0f8e\u0f8f\5\u0459\u022d\2\u0f8f\u0f90\5\u045d\u022f")
buf.write("\2\u0f90\u0f91\5\u0437\u021c\2\u0f91\u0f92\5\u045b\u022e")
buf.write("\2\u0f92\u0f93\5\u0465\u0233\2\u0f93\u0f94\5\u0453\u022a")
buf.write("\2\u0f94\u0f95\5\u043d\u021f\2\u0f95\u02ee\3\2\2\2\u0f96")
buf.write("\u0f97\5\u0459\u022d\2\u0f97\u0f98\5\u045d\u022f\2\u0f98")
buf.write("\u0f99\5\u0439\u021d\2\u0f99\u0f9a\5\u0439\u021d\2\u0f9a")
buf.write("\u0f9b\5\u043d\u021f\2\u0f9b\u0f9c\5\u0459\u022d\2\u0f9c")
buf.write("\u0f9d\5\u0459\u022d\2\u0f9d\u02f0\3\2\2\2\u0f9e\u0f9f")
buf.write("\5\u0459\u022d\2\u0f9f\u0fa0\5\u045d\u022f\2\u0fa0\u0fa1")
buf.write("\5\u0459\u022d\2\u0fa1\u0fa2\5\u0453\u022a\2\u0fa2\u0fa3")
buf.write("\5\u043d\u021f\2\u0fa3\u0fa4\5\u044f\u0228\2\u0fa4\u0fa5")
buf.write("\5\u043b\u021e\2\u0fa5\u02f2\3\2\2\2\u0fa6\u0fa7\5\u045b")
buf.write("\u022e\2\u0fa7\u0fa8\5\u0435\u021b\2\u0fa8\u0fa9\5\u0437")
buf.write("\u021c\2\u0fa9\u0faa\5\u044b\u0226\2\u0faa\u0fab\5\u043d")
buf.write("\u021f\2\u0fab\u02f4\3\2\2\2\u0fac\u0fad\5\u045b\u022e")
buf.write("\2\u0fad\u0fae\5\u0443\u0222\2\u0fae\u0faf\5\u043d\u021f")
buf.write("\2\u0faf\u02f6\3\2\2\2\u0fb0\u0fb1\5\u045b\u022e\2\u0fb1")
buf.write("\u0fb2\5\u0443\u0222\2\u0fb2\u0fb3\5\u043d\u021f\2\u0fb3")
buf.write("\u0fb4\5\u044f\u0228\2\u0fb4\u02f8\3\2\2\2\u0fb5\u0fb6")
buf.write("\5\u045b\u022e\2\u0fb6\u0fb7\5\u0445\u0223\2\u0fb7\u0fb8")
buf.write("\5\u044d\u0227\2\u0fb8\u0fb9\5\u043d\u021f\2\u0fb9\u02fa")
buf.write("\3\2\2\2\u0fba\u0fbb\5\u045b\u022e\2\u0fbb\u0fbc\5\u0445")
buf.write("\u0223\2\u0fbc\u0fbd\5\u044d\u0227\2\u0fbd\u0fbe\5\u043d")
buf.write("\u021f\2\u0fbe\u0fbf\5\u0459\u022d\2\u0fbf\u0fc0\5\u045b")
buf.write("\u022e\2\u0fc0\u0fc1\5\u0435\u021b\2\u0fc1\u0fc2\5\u044d")
buf.write("\u0227\2\u0fc2\u0fc3\5\u0453\u022a\2\u0fc3\u02fc\3\2\2")
buf.write("\2\u0fc4\u0fc5\5\u045b\u022e\2\u0fc5\u0fc6\5\u0445\u0223")
buf.write("\2\u0fc6\u0fc7\5\u044d\u0227\2\u0fc7\u0fc8\5\u043d\u021f")
buf.write("\2\u0fc8\u0fc9\5\u0459\u022d\2\u0fc9\u0fca\5\u045b\u022e")
buf.write("\2\u0fca\u0fcb\5\u0435\u021b\2\u0fcb\u0fcc\5\u044d\u0227")
buf.write("\2\u0fcc\u0fcd\5\u0453\u022a\2\u0fcd\u0fce\7a\2\2\u0fce")
buf.write("\u0fcf\5\u044b\u0226\2\u0fcf\u0fd0\5\u045b\u022e\2\u0fd0")
buf.write("\u0fd1\5\u0467\u0234\2\u0fd1\u0fd2\7a\2\2\u0fd2\u0fd3")
buf.write("\5\u045d\u022f\2\u0fd3\u0fd4\5\u044f\u0228\2\u0fd4\u0fd5")
buf.write("\5\u0439\u021d\2\u0fd5\u0fd6\5\u0451\u0229\2\u0fd6\u0fd7")
buf.write("\5\u044f\u0228\2\u0fd7\u0fd8\5\u0459\u022d\2\u0fd8\u0fd9")
buf.write("\5\u045b\u022e\2\u0fd9\u0fda\5\u0457\u022c\2\u0fda\u0fdb")
buf.write("\5\u0435\u021b\2\u0fdb\u0fdc\5\u0445\u0223\2\u0fdc\u0fdd")
buf.write("\5\u044f\u0228\2\u0fdd\u0fde\5\u043d\u021f\2\u0fde\u0fdf")
buf.write("\5\u043b\u021e\2\u0fdf\u02fe\3\2\2\2\u0fe0\u0fe1\5\u045b")
buf.write("\u022e\2\u0fe1\u0fe2\5\u0445\u0223\2\u0fe2\u0fe3\5\u044d")
buf.write("\u0227\2\u0fe3\u0fe4\5\u043d\u021f\2\u0fe4\u0fe5\5\u0459")
buf.write("\u022d\2\u0fe5\u0fe6\5\u045b\u022e\2\u0fe6\u0fe7\5\u0435")
buf.write("\u021b\2\u0fe7\u0fe8\5\u044d\u0227\2\u0fe8\u0fe9\5\u0453")
buf.write("\u022a\2\u0fe9\u0fea\7a\2\2\u0fea\u0feb\5\u045b\u022e")
buf.write("\2\u0feb\u0fec\5\u0467\u0234\2\u0fec\u0fed\7a\2\2\u0fed")
buf.write("\u0fee\5\u045d\u022f\2\u0fee\u0fef\5\u044f\u0228\2\u0fef")
buf.write("\u0ff0\5\u0439\u021d\2\u0ff0\u0ff1\5\u0451\u0229\2\u0ff1")
buf.write("\u0ff2\5\u044f\u0228\2\u0ff2\u0ff3\5\u0459\u022d\2\u0ff3")
buf.write("\u0ff4\5\u045b\u022e\2\u0ff4\u0ff5\5\u0457\u022c\2\u0ff5")
buf.write("\u0ff6\5\u0435\u021b\2\u0ff6\u0ff7\5\u0445\u0223\2\u0ff7")
buf.write("\u0ff8\5\u044f\u0228\2\u0ff8\u0ff9\5\u043d\u021f\2\u0ff9")
buf.write("\u0ffa\5\u043b\u021e\2\u0ffa\u0300\3\2\2\2\u0ffb\u0ffc")
buf.write("\5\u045b\u022e\2\u0ffc\u0ffd\5\u0445\u0223\2\u0ffd\u0ffe")
buf.write("\5\u044d\u0227\2\u0ffe\u0fff\5\u043d\u021f\2\u0fff\u1000")
buf.write("\5\u0459\u022d\2\u1000\u1001\5\u045b\u022e\2\u1001\u1002")
buf.write("\5\u0435\u021b\2\u1002\u1003\5\u044d\u0227\2\u1003\u1004")
buf.write("\5\u0453\u022a\2\u1004\u1005\7a\2\2\u1005\u1006\5\u045d")
buf.write("\u022f\2\u1006\u1007\5\u044f\u0228\2\u1007\u1008\5\u0439")
buf.write("\u021d\2\u1008\u1009\5\u0451\u0229\2\u1009\u100a\5\u044f")
buf.write("\u0228\2\u100a\u100b\5\u0459\u022d\2\u100b\u100c\5\u045b")
buf.write("\u022e\2\u100c\u100d\5\u0457\u022c\2\u100d\u100e\5\u0435")
buf.write("\u021b\2\u100e\u100f\5\u0445\u0223\2\u100f\u1010\5\u044f")
buf.write("\u0228\2\u1010\u1011\5\u043d\u021f\2\u1011\u1012\5\u043b")
buf.write("\u021e\2\u1012\u0302\3\2\2\2\u1013\u1014\5\u045b\u022e")
buf.write("\2\u1014\u1015\5\u0445\u0223\2\u1015\u1016\5\u044d\u0227")
buf.write("\2\u1016\u1017\5\u043d\u021f\2\u1017\u1018\5\u0467\u0234")
buf.write("\2\u1018\u1019\5\u0451\u0229\2\u1019\u101a\5\u044f\u0228")
buf.write("\2\u101a\u101b\5\u043d\u021f\2\u101b\u101c\7a\2\2\u101c")
buf.write("\u101d\5\u0435\u021b\2\u101d\u101e\5\u0437\u021c\2\u101e")
buf.write("\u101f\5\u0437\u021c\2\u101f\u1020\5\u0457\u022c\2\u1020")
buf.write("\u0304\3\2\2\2\u1021\u1022\5\u045b\u022e\2\u1022\u1023")
buf.write("\5\u0445\u0223\2\u1023\u1024\5\u044d\u0227\2\u1024\u1025")
buf.write("\5\u043d\u021f\2\u1025\u1026\5\u0467\u0234\2\u1026\u1027")
buf.write("\5\u0451\u0229\2\u1027\u1028\5\u044f\u0228\2\u1028\u1029")
buf.write("\5\u043d\u021f\2\u1029\u102a\7a\2\2\u102a\u102b\5\u0443")
buf.write("\u0222\2\u102b\u102c\5\u0451\u0229\2\u102c\u102d\5\u045d")
buf.write("\u022f\2\u102d\u102e\5\u0457\u022c\2\u102e\u0306\3\2\2")
buf.write("\2\u102f\u1030\5\u045b\u022e\2\u1030\u1031\5\u0445\u0223")
buf.write("\2\u1031\u1032\5\u044d\u0227\2\u1032\u1033\5\u043d\u021f")
buf.write("\2\u1033\u1034\5\u0467\u0234\2\u1034\u1035\5\u0451\u0229")
buf.write("\2\u1035\u1036\5\u044f\u0228\2\u1036\u1037\5\u043d\u021f")
buf.write("\2\u1037\u1038\7a\2\2\u1038\u1039\5\u044d\u0227\2\u1039")
buf.write("\u103a\5\u0445\u0223\2\u103a\u103b\5\u044f\u0228\2\u103b")
buf.write("\u103c\5\u045d\u022f\2\u103c\u103d\5\u045b\u022e\2\u103d")
buf.write("\u103e\5\u043d\u021f\2\u103e\u0308\3\2\2\2\u103f\u1040")
buf.write("\5\u045b\u022e\2\u1040\u1041\5\u0445\u0223\2\u1041\u1042")
buf.write("\5\u044d\u0227\2\u1042\u1043\5\u043d\u021f\2\u1043\u1044")
buf.write("\5\u0467\u0234\2\u1044\u1045\5\u0451\u0229\2\u1045\u1046")
buf.write("\5\u044f\u0228\2\u1046\u1047\5\u043d\u021f\2\u1047\u1048")
buf.write("\7a\2\2\u1048\u1049\5\u0457\u022c\2\u1049\u104a\5\u043d")
buf.write("\u021f\2\u104a\u104b\5\u0441\u0221\2\u104b\u104c\5\u0445")
buf.write("\u0223\2\u104c\u104d\5\u0451\u0229\2\u104d\u104e\5\u044f")
buf.write("\u0228\2\u104e\u030a\3\2\2\2\u104f\u1050\5\u045b\u022e")
buf.write("\2\u1050\u1051\5\u0451\u0229\2\u1051\u030c\3\2\2\2\u1052")
buf.write("\u1053\5\u045b\u022e\2\u1053\u1054\5\u0457\u022c\2\u1054")
buf.write("\u1055\5\u0435\u021b\2\u1055\u1056\5\u0445\u0223\2\u1056")
buf.write("\u1057\5\u044b\u0226\2\u1057\u1058\5\u0445\u0223\2\u1058")
buf.write("\u1059\5\u044f\u0228\2\u1059\u105a\5\u0441\u0221\2\u105a")
buf.write("\u030e\3\2\2\2\u105b\u105c\5\u045b\u022e\2\u105c\u105d")
buf.write("\5\u0457\u022c\2\u105d\u105e\5\u0435\u021b\2\u105e\u105f")
buf.write("\5\u044f\u0228\2\u105f\u1060\5\u0459\u022d\2\u1060\u1061")
buf.write("\5\u0435\u021b\2\u1061\u1062\5\u0439\u021d\2\u1062\u1063")
buf.write("\5\u045b\u022e\2\u1063\u1064\5\u0445\u0223\2\u1064\u1065")
buf.write("\5\u0451\u0229\2\u1065\u1066\5\u044f\u0228\2\u1066\u0310")
buf.write("\3\2\2\2\u1067\u1068\5\u045b\u022e\2\u1068\u1069\5\u0457")
buf.write("\u022c\2\u1069\u106a\5\u0435\u021b\2\u106a\u106b\5\u044f")
buf.write("\u0228\2\u106b\u106c\5\u0459\u022d\2\u106c\u106d\5\u044b")
buf.write("\u0226\2\u106d\u106e\5\u0435\u021b\2\u106e\u106f\5\u045b")
buf.write("\u022e\2\u106f\u1070\5\u043d\u021f\2\u1070\u0312\3\2\2")
buf.write("\2\u1071\u1072\5\u045b\u022e\2\u1072\u1073\5\u0457\u022c")
buf.write("\2\u1073\u1074\5\u043d\u021f\2\u1074\u1075\5\u0435\u021b")
buf.write("\2\u1075\u1076\5\u045b\u022e\2\u1076\u0314\3\2\2\2\u1077")
buf.write("\u1078\5\u045b\u022e\2\u1078\u1079\5\u0457\u022c\2\u1079")
buf.write("\u107a\5\u0445\u0223\2\u107a\u107b\5\u0441\u0221\2\u107b")
buf.write("\u107c\5\u0441\u0221\2\u107c\u107d\5\u043d\u021f\2\u107d")
buf.write("\u107e\5\u0457\u022c\2\u107e\u0316\3\2\2\2\u107f\u1080")
buf.write("\5\u045b\u022e\2\u1080\u1081\5\u0457\u022c\2\u1081\u1082")
buf.write("\5\u0445\u0223\2\u1082\u1083\5\u044d\u0227\2\u1083\u0318")
buf.write("\3\2\2\2\u1084\u1085\5\u045b\u022e\2\u1085\u1086\5\u0457")
buf.write("\u022c\2\u1086\u1087\5\u045d\u022f\2\u1087\u1088\5\u043d")
buf.write("\u021f\2\u1088\u031a\3\2\2\2\u1089\u108a\5\u045b\u022e")
buf.write("\2\u108a\u108b\5\u0457\u022c\2\u108b\u108c\5\u045d\u022f")
buf.write("\2\u108c\u108d\5\u044f\u0228\2\u108d\u108e\5\u0439\u021d")
buf.write("\2\u108e\u108f\5\u0435\u021b\2\u108f\u1090\5\u045b\u022e")
buf.write("\2\u1090\u1091\5\u043d\u021f\2\u1091\u031c\3\2\2\2\u1092")
buf.write("\u1093\5\u045b\u022e\2\u1093\u1094\5\u0465\u0233\2\u1094")
buf.write("\u1095\5\u0453\u022a\2\u1095\u1096\5\u043d\u021f\2\u1096")
buf.write("\u031e\3\2\2\2\u1097\u1098\5\u045d\u022f\2\u1098\u1099")
buf.write("\5\u044f\u0228\2\u1099\u109a\5\u0437\u021c\2\u109a\u109b")
buf.write("\5\u0451\u0229\2\u109b\u109c\5\u045d\u022f\2\u109c\u109d")
buf.write("\5\u044f\u0228\2\u109d\u109e\5\u043b\u021e\2\u109e\u109f")
buf.write("\5\u043d\u021f\2\u109f\u10a0\5\u043b\u021e\2\u10a0\u0320")
buf.write("\3\2\2\2\u10a1\u10a2\5\u045d\u022f\2\u10a2\u10a3\5\u044f")
buf.write("\u0228\2\u10a3\u10a4\5\u043b\u021e\2\u10a4\u10a5\5\u043d")
buf.write("\u021f\2\u10a5\u10a6\5\u0457\u022c\2\u10a6\u0322\3\2\2")
buf.write("\2\u10a7\u10a8\5\u045d\u022f\2\u10a8\u10a9\5\u044f\u0228")
buf.write("\2\u10a9\u10aa\5\u0445\u0223\2\u10aa\u10ab\5\u0451\u0229")
buf.write("\2\u10ab\u10ac\5\u044f\u0228\2\u10ac\u0324\3\2\2\2\u10ad")
buf.write("\u10ae\5\u045d\u022f\2\u10ae\u10af\5\u044f\u0228\2\u10af")
buf.write("\u10b0\5\u0445\u0223\2\u10b0\u10b1\5\u0455\u022b\2\u10b1")
buf.write("\u10b2\5\u045d\u022f\2\u10b2\u10b3\5\u043d\u021f\2\u10b3")
buf.write("\u0326\3\2\2\2\u10b4\u10b5\5\u045d\u022f\2\u10b5\u10b6")
buf.write("\5\u044f\u0228\2\u10b6\u10b7\5\u044b\u0226\2\u10b7\u10b8")
buf.write("\5\u0445\u0223\2\u10b8\u10b9\5\u044d\u0227\2\u10b9\u10ba")
buf.write("\5\u0445\u0223\2\u10ba\u10bb\5\u045b\u022e\2\u10bb\u10bc")
buf.write("\5\u043d\u021f\2\u10bc\u10bd\5\u043b\u021e\2\u10bd\u0328")
buf.write("\3\2\2\2\u10be\u10bf\5\u045d\u022f\2\u10bf\u10c0\5\u044f")
buf.write("\u0228\2\u10c0\u10c1\5\u0453\u022a\2\u10c1\u10c2\5\u0445")
buf.write("\u0223\2\u10c2\u10c3\5\u045f\u0230\2\u10c3\u10c4\5\u0451")
buf.write("\u0229\2\u10c4\u10c5\5\u045b\u022e\2\u10c5\u032a\3\2\2")
buf.write("\2\u10c6\u10c7\5\u045d\u022f\2\u10c7\u10c8\5\u044f\u0228")
buf.write("\2\u10c8\u10c9\5\u045b\u022e\2\u10c9\u10ca\5\u0445\u0223")
buf.write("\2\u10ca\u10cb\5\u044b\u0226\2\u10cb\u032c\3\2\2\2\u10cc")
buf.write("\u10cd\5\u045d\u022f\2\u10cd\u10ce\5\u0453\u022a\2\u10ce")
buf.write("\u10cf\5\u043b\u021e\2\u10cf\u10d0\5\u0435\u021b\2\u10d0")
buf.write("\u10d1\5\u045b\u022e\2\u10d1\u10d2\5\u043d\u021f\2\u10d2")
buf.write("\u032e\3\2\2\2\u10d3\u10d4\5\u045d\u022f\2\u10d4\u10d5")
buf.write("\5\u0453\u022a\2\u10d5\u10d6\5\u043b\u021e\2\u10d6\u10d7")
buf.write("\5\u0435\u021b\2\u10d7\u10d8\5\u045b\u022e\2\u10d8\u10d9")
buf.write("\5\u043d\u021f\2\u10d9\u10da\5\u043b\u021e\2\u10da\u0330")
buf.write("\3\2\2\2\u10db\u10dc\5\u045d\u022f\2\u10dc\u10dd\5\u0453")
buf.write("\u022a\2\u10dd\u10de\5\u0459\u022d\2\u10de\u10df\5\u043d")
buf.write("\u021f\2\u10df\u10e0\5\u0457\u022c\2\u10e0\u10e1\5\u045b")
buf.write("\u022e\2\u10e1\u0332\3\2\2\2\u10e2\u10e3\5\u045d\u022f")
buf.write("\2\u10e3\u10e4\5\u0457\u022c\2\u10e4\u10e5\5\u0451\u0229")
buf.write("\2\u10e5\u10e6\5\u0461\u0231\2\u10e6\u10e7\5\u0445\u0223")
buf.write("\2\u10e7\u10e8\5\u043b\u021e\2\u10e8\u0334\3\2\2\2\u10e9")
buf.write("\u10ea\5\u045d\u022f\2\u10ea\u10eb\5\u0459\u022d\2\u10eb")
buf.write("\u10ec\5\u043d\u021f\2\u10ec\u0336\3\2\2\2\u10ed\u10ee")
buf.write("\5\u045d\u022f\2\u10ee\u10ef\5\u0459\u022d\2\u10ef\u10f0")
buf.write("\5\u0445\u0223\2\u10f0\u10f1\5\u044f\u0228\2\u10f1\u10f2")
buf.write("\5\u0441\u0221\2\u10f2\u0338\3\2\2\2\u10f3\u10f4\5\u045f")
buf.write("\u0230\2\u10f4\u10f5\5\u0435\u021b\2\u10f5\u10f6\5\u044b")
buf.write("\u0226\2\u10f6\u10f7\5\u0445\u0223\2\u10f7\u10f8\5\u043b")
buf.write("\u021e\2\u10f8\u10f9\5\u0435\u021b\2\u10f9\u10fa\5\u045b")
buf.write("\u022e\2\u10fa\u10fb\5\u043d\u021f\2\u10fb\u033a\3\2\2")
buf.write("\2\u10fc\u10fd\5\u045f\u0230\2\u10fd\u10fe\5\u0435\u021b")
buf.write("\2\u10fe\u10ff\5\u044b\u0226\2\u10ff\u1100\5\u045d\u022f")
buf.write("\2\u1100\u1101\5\u043d\u021f\2\u1101\u033c\3\2\2\2\u1102")
buf.write("\u1103\5\u045f\u0230\2\u1103\u1104\5\u0435\u021b\2\u1104")
buf.write("\u1105\5\u044b\u0226\2\u1105\u1106\5\u045d\u022f\2\u1106")
buf.write("\u1107\5\u043d\u021f\2\u1107\u1108\5\u0459\u022d\2\u1108")
buf.write("\u033e\3\2\2\2\u1109\u110a\5\u045f\u0230\2\u110a\u110b")
buf.write("\5\u0435\u021b\2\u110b\u110c\5\u0457\u022c\2\u110c\u110d")
buf.write("\5\u0439\u021d\2\u110d\u110e\5\u0443\u0222\2\u110e\u110f")
buf.write("\5\u0435\u021b\2\u110f\u1110\5\u0457\u022c\2\u1110\u0340")
buf.write("\3\2\2\2\u1111\u1112\5\u045f\u0230\2\u1112\u1113\5\u0435")
buf.write("\u021b\2\u1113\u1114\5\u0457\u022c\2\u1114\u1115\5\u0439")
buf.write("\u021d\2\u1115\u1116\5\u0443\u0222\2\u1116\u1117\5\u0435")
buf.write("\u021b\2\u1117\u1118\5\u0457\u022c\2\u1118\u1119\7\64")
buf.write("\2\2\u1119\u0342\3\2\2\2\u111a\u111b\5\u045f\u0230\2\u111b")
buf.write("\u111c\5\u0435\u021b\2\u111c\u111d\5\u0457\u022c\2\u111d")
buf.write("\u111e\5\u0445\u0223\2\u111e\u111f\5\u0435\u021b\2\u111f")
buf.write("\u1120\5\u0437\u021c\2\u1120\u1121\5\u044b\u0226\2\u1121")
buf.write("\u1122\5\u043d\u021f\2\u1122\u0344\3\2\2\2\u1123\u1124")
buf.write("\5\u045f\u0230\2\u1124\u1125\5\u0435\u021b\2\u1125\u1126")
buf.write("\5\u0457\u022c\2\u1126\u1127\5\u0457\u022c\2\u1127\u1128")
buf.write("\5\u0435\u021b\2\u1128\u1129\5\u0465\u0233\2\u1129\u0346")
buf.write("\3\2\2\2\u112a\u112b\5\u045f\u0230\2\u112b\u112c\5\u0435")
buf.write("\u021b\2\u112c\u112d\5\u0457\u022c\2\u112d\u112e\5\u0465")
buf.write("\u0233\2\u112e\u112f\5\u0445\u0223\2\u112f\u1130\5\u044f")
buf.write("\u0228\2\u1130\u1131\5\u0441\u0221\2\u1131\u0348\3\2\2")
buf.write("\2\u1132\u1133\5\u045f\u0230\2\u1133\u1134\5\u043d\u021f")
buf.write("\2\u1134\u1135\5\u0457\u022c\2\u1135\u1136\5\u0459\u022d")
buf.write("\2\u1136\u1137\5\u0445\u0223\2\u1137\u1138\5\u0451\u0229")
buf.write("\2\u1138\u1139\5\u044f\u0228\2\u1139\u034a\3\2\2\2\u113a")
buf.write("\u113b\5\u045f\u0230\2\u113b\u113c\5\u043d\u021f\2\u113c")
buf.write("\u113d\5\u0457\u022c\2\u113d\u113e\5\u0459\u022d\2\u113e")
buf.write("\u113f\5\u0445\u0223\2\u113f\u1140\5\u0451\u0229\2\u1140")
buf.write("\u1141\5\u044f\u0228\2\u1141\u1142\5\u0459\u022d\2\u1142")
buf.write("\u034c\3\2\2\2\u1143\u1144\5\u0461\u0231\2\u1144\u1145")
buf.write("\5\u0435\u021b\2\u1145\u1146\5\u0445\u0223\2\u1146\u1147")
buf.write("\5\u045b\u022e\2\u1147\u034e\3\2\2\2\u1148\u1149\5\u0461")
buf.write("\u0231\2\u1149\u114a\5\u0435\u021b\2\u114a\u114b\5\u0457")
buf.write("\u022c\2\u114b\u114c\5\u044f\u0228\2\u114c\u114d\5\u0445")
buf.write("\u0223\2\u114d\u114e\5\u044f\u0228\2\u114e\u114f\5\u0441")
buf.write("\u0221\2\u114f\u0350\3\2\2\2\u1150\u1151\5\u0461\u0231")
buf.write("\2\u1151\u1152\5\u043d\u021f\2\u1152\u1153\5\u044b\u0226")
buf.write("\2\u1153\u1154\5\u044b\u0226\2\u1154\u1155\5\u043f\u0220")
buf.write("\2\u1155\u1156\5\u0451\u0229\2\u1156\u1157\5\u0457\u022c")
buf.write("\2\u1157\u1158\5\u044d\u0227\2\u1158\u1159\5\u043d\u021f")
buf.write("\2\u1159\u115a\5\u043b\u021e\2\u115a\u0352\3\2\2\2\u115b")
buf.write("\u115c\5\u0461\u0231\2\u115c\u115d\5\u0443\u0222\2\u115d")
buf.write("\u115e\5\u043d\u021f\2\u115e\u115f\5\u044f\u0228\2\u115f")
buf.write("\u0354\3\2\2\2\u1160\u1161\5\u0461\u0231\2\u1161\u1162")
buf.write("\5\u0443\u0222\2\u1162\u1163\5\u043d\u021f\2\u1163\u1164")
buf.write("\5\u044f\u0228\2\u1164\u1165\5\u043d\u021f\2\u1165\u1166")
buf.write("\5\u045f\u0230\2\u1166\u1167\5\u043d\u021f\2\u1167\u1168")
buf.write("\5\u0457\u022c\2\u1168\u0356\3\2\2\2\u1169\u116a\5\u0461")
buf.write("\u0231\2\u116a\u116b\5\u0443\u0222\2\u116b\u116c\5\u043d")
buf.write("\u021f\2\u116c\u116d\5\u0457\u022c\2\u116d\u116e\5\u043d")
buf.write("\u021f\2\u116e\u0358\3\2\2\2\u116f\u1170\5\u0461\u0231")
buf.write("\2\u1170\u1171\5\u0443\u0222\2\u1171\u1172\5\u0445\u0223")
buf.write("\2\u1172\u1173\5\u044b\u0226\2\u1173\u1174\5\u043d\u021f")
buf.write("\2\u1174\u035a\3\2\2\2\u1175\u1176\5\u0461\u0231\2\u1176")
buf.write("\u1177\5\u0445\u0223\2\u1177\u1178\5\u045b\u022e\2\u1178")
buf.write("\u1179\5\u0443\u0222\2\u1179\u035c\3\2\2\2\u117a\u117b")
buf.write("\5\u0461\u0231\2\u117b\u117c\5\u0445\u0223\2\u117c\u117d")
buf.write("\5\u045b\u022e\2\u117d\u117e\5\u0443\u0222\2\u117e\u117f")
buf.write("\5\u0445\u0223\2\u117f\u1180\5\u044f\u0228\2\u1180\u035e")
buf.write("\3\2\2\2\u1181\u1182\5\u0461\u0231\2\u1182\u1183\5\u0451")
buf.write("\u0229\2\u1183\u1184\5\u0457\u022c\2\u1184\u1185\5\u0449")
buf.write("\u0225\2\u1185\u0360\3\2\2\2\u1186\u1187\5\u0461\u0231")
buf.write("\2\u1187\u1188\5\u0457\u022c\2\u1188\u1189\5\u0445\u0223")
buf.write("\2\u1189\u118a\5\u045b\u022e\2\u118a\u118b\5\u043d\u021f")
buf.write("\2\u118b\u0362\3\2\2\2\u118c\u118d\5\u0463\u0232\2\u118d")
buf.write("\u118e\5\u044d\u0227\2\u118e\u118f\5\u044b\u0226\2\u118f")
buf.write("\u0364\3\2\2\2\u1190\u1191\5\u0463\u0232\2\u1191\u1192")
buf.write("\5\u044d\u0227\2\u1192\u1193\5\u044b\u0226\2\u1193\u1194")
buf.write("\5\u0435\u021b\2\u1194\u1195\5\u0441\u0221\2\u1195\u1196")
buf.write("\5\u0441\u0221\2\u1196\u0366\3\2\2\2\u1197\u1198\5\u0463")
buf.write("\u0232\2\u1198\u1199\5\u044d\u0227\2\u1199\u119a\5\u044b")
buf.write("\u0226\2\u119a\u119b\5\u0435\u021b\2\u119b\u119c\5\u045b")
buf.write("\u022e\2\u119c\u119d\5\u045b\u022e\2\u119d\u119e\5\u0457")
buf.write("\u022c\2\u119e\u119f\5\u0445\u0223\2\u119f\u11a0\5\u0437")
buf.write("\u021c\2\u11a0\u11a1\5\u045d\u022f\2\u11a1\u11a2\5\u045b")
buf.write("\u022e\2\u11a2\u11a3\5\u043d\u021f\2\u11a3\u11a4\5\u0459")
buf.write("\u022d\2\u11a4\u0368\3\2\2\2\u11a5\u11a6\5\u0463\u0232")
buf.write("\2\u11a6\u11a7\5\u044d\u0227\2\u11a7\u11a8\5\u044b\u0226")
buf.write("\2\u11a8\u11a9\5\u0439\u021d\2\u11a9\u11aa\5\u0435\u021b")
buf.write("\2\u11aa\u11ab\5\u0459\u022d\2\u11ab\u11ac\5\u045b\u022e")
buf.write("\2\u11ac\u036a\3\2\2\2\u11ad\u11ae\5\u0463\u0232\2\u11ae")
buf.write("\u11af\5\u044d\u0227\2\u11af\u11b0\5\u044b\u0226\2\u11b0")
buf.write("\u11b1\5\u0439\u021d\2\u11b1\u11b2\5\u0451\u0229\2\u11b2")
buf.write("\u11b3\5\u044b\u0226\2\u11b3\u11b4\5\u0435\u021b\2\u11b4")
buf.write("\u11b5\5\u045b\u022e\2\u11b5\u11b6\5\u045b\u022e\2\u11b6")
buf.write("\u11b7\5\u045f\u0230\2\u11b7\u11b8\5\u0435\u021b\2\u11b8")
buf.write("\u11b9\5\u044b\u0226\2\u11b9\u036c\3\2\2\2\u11ba\u11bb")
buf.write("\5\u0463\u0232\2\u11bb\u11bc\5\u044d\u0227\2\u11bc\u11bd")
buf.write("\5\u044b\u0226\2\u11bd\u11be\5\u043d\u021f\2\u11be\u11bf")
buf.write("\5\u044b\u0226\2\u11bf\u11c0\5\u043d\u021f\2\u11c0\u11c1")
buf.write("\5\u044d\u0227\2\u11c1\u11c2\5\u043d\u021f\2\u11c2\u11c3")
buf.write("\5\u044f\u0228\2\u11c3\u11c4\5\u045b\u022e\2\u11c4\u036e")
buf.write("\3\2\2\2\u11c5\u11c6\5\u0463\u0232\2\u11c6\u11c7\5\u044d")
buf.write("\u0227\2\u11c7\u11c8\5\u044b\u0226\2\u11c8\u11c9\5\u043d")
buf.write("\u021f\2\u11c9\u11ca\5\u0463\u0232\2\u11ca\u11cb\5\u0445")
buf.write("\u0223\2\u11cb\u11cc\5\u0459\u022d\2\u11cc\u11cd\5\u045b")
buf.write("\u022e\2\u11cd\u11ce\5\u0459\u022d\2\u11ce\u0370\3\2\2")
buf.write("\2\u11cf\u11d0\5\u0463\u0232\2\u11d0\u11d1\5\u044d\u0227")
buf.write("\2\u11d1\u11d2\5\u044b\u0226\2\u11d2\u11d3\5\u043f\u0220")
buf.write("\2\u11d3\u11d4\5\u0451\u0229\2\u11d4\u11d5\5\u0457\u022c")
buf.write("\2\u11d5\u11d6\5\u043d\u021f\2\u11d6\u11d7\5\u0459\u022d")
buf.write("\2\u11d7\u11d8\5\u045b\u022e\2\u11d8\u0372\3\2\2\2\u11d9")
buf.write("\u11da\5\u0463\u0232\2\u11da\u11db\5\u044d\u0227\2\u11db")
buf.write("\u11dc\5\u044b\u0226\2\u11dc\u11dd\5\u044f\u0228\2\u11dd")
buf.write("\u11de\5\u0435\u021b\2\u11de\u11df\5\u044d\u0227\2\u11df")
buf.write("\u11e0\5\u043d\u021f\2\u11e0\u11e1\5\u0459\u022d\2\u11e1")
buf.write("\u11e2\5\u0453\u022a\2\u11e2\u11e3\5\u0435\u021b\2\u11e3")
buf.write("\u11e4\5\u0439\u021d\2\u11e4\u11e5\5\u043d\u021f\2\u11e5")
buf.write("\u11e6\5\u0459\u022d\2\u11e6\u0374\3\2\2\2\u11e7\u11e8")
buf.write("\5\u0463\u0232\2\u11e8\u11e9\5\u044d\u0227\2\u11e9\u11ea")
buf.write("\5\u044b\u0226\2\u11ea\u11eb\5\u0453\u022a\2\u11eb\u11ec")
buf.write("\5\u0435\u021b\2\u11ec\u11ed\5\u0457\u022c\2\u11ed\u11ee")
buf.write("\5\u0459\u022d\2\u11ee\u11ef\5\u043d\u021f\2\u11ef\u0376")
buf.write("\3\2\2\2\u11f0\u11f1\5\u0463\u0232\2\u11f1\u11f2\5\u044d")
buf.write("\u0227\2\u11f2\u11f3\5\u044b\u0226\2\u11f3\u11f4\5\u0453")
buf.write("\u022a\2\u11f4\u11f5\5\u0445\u0223\2\u11f5\u0378\3\2\2")
buf.write("\2\u11f6\u11f7\5\u0463\u0232\2\u11f7\u11f8\5\u044d\u0227")
buf.write("\2\u11f8\u11f9\5\u044b\u0226\2\u11f9\u11fa\5\u0455\u022b")
buf.write("\2\u11fa\u11fb\5\u045d\u022f\2\u11fb\u11fc\5\u043d\u021f")
buf.write("\2\u11fc\u11fd\5\u0457\u022c\2\u11fd\u11fe\5\u0465\u0233")
buf.write("\2\u11fe\u037a\3\2\2\2\u11ff\u1200\5\u0463\u0232\2\u1200")
buf.write("\u1201\5\u044d\u0227\2\u1201\u1202\5\u044b\u0226\2\u1202")
buf.write("\u1203\5\u0457\u022c\2\u1203\u1204\5\u0451\u0229\2\u1204")
buf.write("\u1205\5\u0451\u0229\2\u1205\u1206\5\u045b\u022e\2\u1206")
buf.write("\u037c\3\2\2\2\u1207\u1208\5\u0463\u0232\2\u1208\u1209")
buf.write("\5\u044d\u0227\2\u1209\u120a\5\u044b\u0226\2\u120a\u120b")
buf.write("\5\u0459\u022d\2\u120b\u120c\5\u043d\u021f\2\u120c\u120d")
buf.write("\5\u0457\u022c\2\u120d\u120e\5\u0445\u0223\2\u120e\u120f")
buf.write("\5\u0435\u021b\2\u120f\u1210\5\u044b\u0226\2\u1210\u1211")
buf.write("\5\u0445\u0223\2\u1211\u1212\5\u0467\u0234\2\u1212\u1213")
buf.write("\5\u043d\u021f\2\u1213\u037e\3\2\2\2\u1214\u1215\5\u0463")
buf.write("\u0232\2\u1215\u1216\5\u044d\u0227\2\u1216\u1217\5\u044b")
buf.write("\u0226\2\u1217\u1218\5\u045b\u022e\2\u1218\u1219\5\u0435")
buf.write("\u021b\2\u1219\u121a\5\u0437\u021c\2\u121a\u121b\5\u044b")
buf.write("\u0226\2\u121b\u121c\5\u043d\u021f\2\u121c\u0380\3\2\2")
buf.write("\2\u121d\u121e\5\u0465\u0233\2\u121e\u121f\5\u043d\u021f")
buf.write("\2\u121f\u1220\5\u0435\u021b\2\u1220\u1221\5\u0457\u022c")
buf.write("\2\u1221\u0382\3\2\2\2\u1222\u1223\5\u0465\u0233\2\u1223")
buf.write("\u1224\5\u043d\u021f\2\u1224\u1225\5\u0459\u022d\2\u1225")
buf.write("\u0384\3\2\2\2\u1226\u1227\5\u0465\u0233\2\u1227\u1228")
buf.write("\5\u044d\u0227\2\u1228\u1229\5\u0445\u0223\2\u1229\u122a")
buf.write("\5\u044f\u0228\2\u122a\u122b\5\u045b\u022e\2\u122b\u122c")
buf.write("\5\u043d\u021f\2\u122c\u122d\5\u0457\u022c\2\u122d\u122e")
buf.write("\5\u045f\u0230\2\u122e\u122f\5\u0435\u021b\2\u122f\u1230")
buf.write("\5\u044b\u0226\2\u1230\u1231\7a\2\2\u1231\u1232\5\u045d")
buf.write("\u022f\2\u1232\u1233\5\u044f\u0228\2\u1233\u1234\5\u0439")
buf.write("\u021d\2\u1234\u1235\5\u0451\u0229\2\u1235\u1236\5\u044f")
buf.write("\u0228\2\u1236\u1237\5\u0459\u022d\2\u1237\u1238\5\u045b")
buf.write("\u022e\2\u1238\u1239\5\u0457\u022c\2\u1239\u123a\5\u0435")
buf.write("\u021b\2\u123a\u123b\5\u0445\u0223\2\u123b\u123c\5\u044f")
buf.write("\u0228\2\u123c\u123d\5\u043d\u021f\2\u123d\u123e\5\u043b")
buf.write("\u021e\2\u123e\u0386\3\2\2\2\u123f\u1240\5\u0467\u0234")
buf.write("\2\u1240\u1241\5\u0451\u0229\2\u1241\u1242\5\u044f\u0228")
buf.write("\2\u1242\u1243\5\u043d\u021f\2\u1243\u0388\3\2\2\2\u1244")
buf.write("\u1245\5\u0453\u022a\2\u1245\u1246\5\u0457\u022c\2\u1246")
buf.write("\u1247\5\u043d\u021f\2\u1247\u1248\5\u043b\u021e\2\u1248")
buf.write("\u1249\5\u0445\u0223\2\u1249\u124a\5\u0439\u021d\2\u124a")
buf.write("\u124b\5\u045b\u022e\2\u124b\u124c\5\u0445\u0223\2\u124c")
buf.write("\u124d\5\u0451\u0229\2\u124d\u124e\5\u044f\u0228\2\u124e")
buf.write("\u038a\3\2\2\2\u124f\u1250\5\u0453\u022a\2\u1250\u1251")
buf.write("\5\u0457\u022c\2\u1251\u1252\5\u043d\u021f\2\u1252\u1253")
buf.write("\5\u043b\u021e\2\u1253\u1254\5\u0445\u0223\2\u1254\u1255")
buf.write("\5\u0439\u021d\2\u1255\u1256\5\u045b\u022e\2\u1256\u1257")
buf.write("\5\u0445\u0223\2\u1257\u1258\5\u0451\u0229\2\u1258\u1259")
buf.write("\5\u044f\u0228\2\u1259\u125a\7a\2\2\u125a\u125b\5\u0437")
buf.write("\u021c\2\u125b\u125c\5\u0451\u0229\2\u125c\u125d\5\u045d")
buf.write("\u022f\2\u125d\u125e\5\u044f\u0228\2\u125e\u125f\5\u043b")
buf.write("\u021e\2\u125f\u1260\5\u0459\u022d\2\u1260\u038c\3\2\2")
buf.write("\2\u1261\u1262\5\u0453\u022a\2\u1262\u1263\5\u0457\u022c")
buf.write("\2\u1263\u1264\5\u043d\u021f\2\u1264\u1265\5\u043b\u021e")
buf.write("\2\u1265\u1266\5\u0445\u0223\2\u1266\u1267\5\u0439\u021d")
buf.write("\2\u1267\u1268\5\u045b\u022e\2\u1268\u1269\5\u0445\u0223")
buf.write("\2\u1269\u126a\5\u0451\u0229\2\u126a\u126b\5\u044f\u0228")
buf.write("\2\u126b\u126c\7a\2\2\u126c\u126d\5\u0439\u021d\2\u126d")
buf.write("\u126e\5\u0451\u0229\2\u126e\u126f\5\u0459\u022d\2\u126f")
buf.write("\u1270\5\u045b\u022e\2\u1270\u038e\3\2\2\2\u1271\u1272")
buf.write("\5\u0453\u022a\2\u1272\u1273\5\u0457\u022c\2\u1273\u1274")
buf.write("\5\u043d\u021f\2\u1274\u1275\5\u043b\u021e\2\u1275\u1276")
buf.write("\5\u0445\u0223\2\u1276\u1277\5\u0439\u021d\2\u1277\u1278")
buf.write("\5\u045b\u022e\2\u1278\u1279\5\u0445\u0223\2\u1279\u127a")
buf.write("\5\u0451\u0229\2\u127a\u127b\5\u044f\u0228\2\u127b\u127c")
buf.write("\7a\2\2\u127c\u127d\5\u043b\u021e\2\u127d\u127e\5\u043d")
buf.write("\u021f\2\u127e\u127f\5\u045b\u022e\2\u127f\u1280\5\u0435")
buf.write("\u021b\2\u1280\u1281\5\u0445\u0223\2\u1281\u1282\5\u044b")
buf.write("\u0226\2\u1282\u1283\5\u0459\u022d\2\u1283\u0390\3\2\2")
buf.write("\2\u1284\u1285\5\u0453\u022a\2\u1285\u1286\5\u0457\u022c")
buf.write("\2\u1286\u1287\5\u043d\u021f\2\u1287\u1288\5\u043b\u021e")
buf.write("\2\u1288\u1289\5\u0445\u0223\2\u1289\u128a\5\u0439\u021d")
buf.write("\2\u128a\u128b\5\u045b\u022e\2\u128b\u128c\5\u0445\u0223")
buf.write("\2\u128c\u128d\5\u0451\u0229\2\u128d\u128e\5\u044f\u0228")
buf.write("\2\u128e\u128f\7a\2\2\u128f\u1290\5\u0453\u022a\2\u1290")
buf.write("\u1291\5\u0457\u022c\2\u1291\u1292\5\u0451\u0229\2\u1292")
buf.write("\u1293\5\u0437\u021c\2\u1293\u1294\5\u0435\u021b\2\u1294")
buf.write("\u1295\5\u0437\u021c\2\u1295\u1296\5\u0445\u0223\2\u1296")
buf.write("\u1297\5\u044b\u0226\2\u1297\u1298\5\u0445\u0223\2\u1298")
buf.write("\u1299\5\u045b\u022e\2\u1299\u129a\5\u0465\u0233\2\u129a")
buf.write("\u0392\3\2\2\2\u129b\u129c\5\u0453\u022a\2\u129c\u129d")
buf.write("\5\u0457\u022c\2\u129d\u129e\5\u043d\u021f\2\u129e\u129f")
buf.write("\5\u043b\u021e\2\u129f\u12a0\5\u0445\u0223\2\u12a0\u12a1")
buf.write("\5\u0439\u021d\2\u12a1\u12a2\5\u045b\u022e\2\u12a2\u12a3")
buf.write("\5\u0445\u0223\2\u12a3\u12a4\5\u0451\u0229\2\u12a4\u12a5")
buf.write("\5\u044f\u0228\2\u12a5\u12a6\7a\2\2\u12a6\u12a7\5\u0459")
buf.write("\u022d\2\u12a7\u12a8\5\u043d\u021f\2\u12a8\u12a9\5\u045b")
buf.write("\u022e\2\u12a9\u0394\3\2\2\2\u12aa\u12ab\5\u0439\u021d")
buf.write("\2\u12ab\u12ac\5\u045d\u022f\2\u12ac\u12ad\5\u044d\u0227")
buf.write("\2\u12ad\u12ae\5\u043d\u021f\2\u12ae\u12af\7a\2\2\u12af")
buf.write("\u12b0\5\u043b\u021e\2\u12b0\u12b1\5\u0445\u0223\2\u12b1")
buf.write("\u12b2\5\u0459\u022d\2\u12b2\u12b3\5\u045b\u022e\2\u12b3")
buf.write("\u0396\3\2\2\2\u12b4\u12b5\5\u043b\u021e\2\u12b5\u12b6")
buf.write("\5\u043d\u021f\2\u12b6\u12b7\5\u044f\u0228\2\u12b7\u12b8")
buf.write("\5\u0459\u022d\2\u12b8\u12b9\5\u043d\u021f\2\u12b9\u12ba")
buf.write("\7a\2\2\u12ba\u12bb\5\u0457\u022c\2\u12bb\u12bc\5\u0435")
buf.write("\u021b\2\u12bc\u12bd\5\u044f\u0228\2\u12bd\u12be\5\u0449")
buf.write("\u0225\2\u12be\u0398\3\2\2\2\u12bf\u12c0\5\u044b\u0226")
buf.write("\2\u12c0\u12c1\5\u0445\u0223\2\u12c1\u12c2\5\u0459\u022d")
buf.write("\2\u12c2\u12c3\5\u045b\u022e\2\u12c3\u12c4\5\u0435\u021b")
buf.write("\2\u12c4\u12c5\5\u0441\u0221\2\u12c5\u12c6\5\u0441\u0221")
buf.write("\2\u12c6\u039a\3\2\2\2\u12c7\u12c8\5\u0453\u022a\2\u12c8")
buf.write("\u12c9\5\u043d\u021f\2\u12c9\u12ca\5\u0457\u022c\2\u12ca")
buf.write("\u12cb\5\u0439\u021d\2\u12cb\u12cc\5\u043d\u021f\2\u12cc")
buf.write("\u12cd\5\u044f\u0228\2\u12cd\u12ce\5\u045b\u022e\2\u12ce")
buf.write("\u12cf\7a\2\2\u12cf\u12d0\5\u0457\u022c\2\u12d0\u12d1")
buf.write("\5\u0435\u021b\2\u12d1\u12d2\5\u044f\u0228\2\u12d2\u12d3")
buf.write("\5\u0449\u0225\2\u12d3\u039c\3\2\2\2\u12d4\u12d5\5\u0453")
buf.write("\u022a\2\u12d5\u12d6\5\u043d\u021f\2\u12d6\u12d7\5\u0457")
buf.write("\u022c\2\u12d7\u12d8\5\u0439\u021d\2\u12d8\u12d9\5\u043d")
buf.write("\u021f\2\u12d9\u12da\5\u044f\u0228\2\u12da\u12db\5\u045b")
buf.write("\u022e\2\u12db\u12dc\5\u0445\u0223\2\u12dc\u12dd\5\u044b")
buf.write("\u0226\2\u12dd\u12de\5\u043d\u021f\2\u12de\u12df\7a\2")
buf.write("\2\u12df\u12e0\5\u0439\u021d\2\u12e0\u12e1\5\u0451\u0229")
buf.write("\2\u12e1\u12e2\5\u044f\u0228\2\u12e2\u12e3\5\u045b\u022e")
buf.write("\2\u12e3\u039e\3\2\2\2\u12e4\u12e5\5\u0453\u022a\2\u12e5")
buf.write("\u12e6\5\u043d\u021f\2\u12e6\u12e7\5\u0457\u022c\2\u12e7")
buf.write("\u12e8\5\u0439\u021d\2\u12e8\u12e9\5\u043d\u021f\2\u12e9")
buf.write("\u12ea\5\u044f\u0228\2\u12ea\u12eb\5\u045b\u022e\2\u12eb")
buf.write("\u12ec\5\u0445\u0223\2\u12ec\u12ed\5\u044b\u0226\2\u12ed")
buf.write("\u12ee\5\u043d\u021f\2\u12ee\u12ef\7a\2\2\u12ef\u12f0")
buf.write("\5\u043b\u021e\2\u12f0\u12f1\5\u0445\u0223\2\u12f1\u12f2")
buf.write("\5\u0459\u022d\2\u12f2\u12f3\5\u0439\u021d\2\u12f3\u03a0")
buf.write("\3\2\2\2\u12f4\u12f5\5\u0457\u022c\2\u12f5\u12f6\5\u0435")
buf.write("\u021b\2\u12f6\u12f7\5\u044f\u0228\2\u12f7\u12f8\5\u0449")
buf.write("\u0225\2\u12f8\u03a2\3\2\2\2\u12f9\u12fa\5\u0435\u021b")
buf.write("\2\u12fa\u12fb\5\u045f\u0230\2\u12fb\u12fc\5\u0441\u0221")
buf.write("\2\u12fc\u03a4\3\2\2\2\u12fd\u12fe\5\u0439\u021d\2\u12fe")
buf.write("\u12ff\5\u0451\u0229\2\u12ff\u1300\5\u0457\u022c\2\u1300")
buf.write("\u1301\5\u0457\u022c\2\u1301\u03a6\3\2\2\2\u1302\u1303")
buf.write("\5\u044b\u0226\2\u1303\u1304\5\u0435\u021b\2\u1304\u1305")
buf.write("\5\u0441\u0221\2\u1305\u03a8\3\2\2\2\u1306\u1307\5\u044b")
buf.write("\u0226\2\u1307\u1308\5\u043d\u021f\2\u1308\u1309\5\u0435")
buf.write("\u021b\2\u1309\u130a\5\u043b\u021e\2\u130a\u03aa\3\2\2")
buf.write("\2\u130b\u130c\5\u044d\u0227\2\u130c\u130d\5\u0435\u021b")
buf.write("\2\u130d\u130e\5\u0463\u0232\2\u130e\u03ac\3\2\2\2\u130f")
buf.write("\u1310\5\u044d\u0227\2\u1310\u1311\5\u043d\u021f\2\u1311")
buf.write("\u1312\5\u043b\u021e\2\u1312\u1313\5\u0445\u0223\2\u1313")
buf.write("\u1314\5\u0435\u021b\2\u1314\u1315\5\u044f\u0228\2\u1315")
buf.write("\u03ae\3\2\2\2\u1316\u1317\5\u044d\u0227\2\u1317\u1318")
buf.write("\5\u0445\u0223\2\u1318\u1319\5\u044f\u0228\2\u1319\u03b0")
buf.write("\3\2\2\2\u131a\u131b\5\u044f\u0228\2\u131b\u131c\5\u045b")
buf.write("\u022e\2\u131c\u131d\5\u0445\u0223\2\u131d\u131e\5\u044b")
buf.write("\u0226\2\u131e\u131f\5\u043d\u021f\2\u131f\u03b2\3\2\2")
buf.write("\2\u1320\u1321\5\u0457\u022c\2\u1321\u1322\5\u0435\u021b")
buf.write("\2\u1322\u1323\5\u045b\u022e\2\u1323\u1324\5\u0445\u0223")
buf.write("\2\u1324\u1325\5\u0451\u0229\2\u1325\u1326\7a\2\2\u1326")
buf.write("\u1327\5\u045b\u022e\2\u1327\u1328\5\u0451\u0229\2\u1328")
buf.write("\u1329\7a\2\2\u1329\u132a\5\u0457\u022c\2\u132a\u132b")
buf.write("\5\u043d\u021f\2\u132b\u132c\5\u0453\u022a\2\u132c\u132d")
buf.write("\5\u0451\u0229\2\u132d\u132e\5\u0457\u022c\2\u132e\u132f")
buf.write("\5\u045b\u022e\2\u132f\u03b4\3\2\2\2\u1330\u1331\5\u0457")
buf.write("\u022c\2\u1331\u1332\5\u0451\u0229\2\u1332\u1333\5\u0461")
buf.write("\u0231\2\u1333\u1334\7a\2\2\u1334\u1335\5\u044f\u0228")
buf.write("\2\u1335\u1336\5\u045d\u022f\2\u1336\u1337\5\u044d\u0227")
buf.write("\2\u1337\u1338\5\u0437\u021c\2\u1338\u1339\5\u043d\u021f")
buf.write("\2\u1339\u133a\5\u0457\u022c\2\u133a\u03b6\3\2\2\2\u133b")
buf.write("\u133c\5\u0459\u022d\2\u133c\u133d\5\u045d\u022f\2\u133d")
buf.write("\u133e\5\u044d\u0227\2\u133e\u03b8\3\2\2\2\u133f\u1340")
buf.write("\5\u045f\u0230\2\u1340\u1341\5\u0435\u021b\2\u1341\u1342")
buf.write("\5\u0457\u022c\2\u1342\u1343\5\u0445\u0223\2\u1343\u1344")
buf.write("\5\u0435\u021b\2\u1344\u1345\5\u044f\u0228\2\u1345\u1346")
buf.write("\5\u0439\u021d\2\u1346\u1347\5\u043d\u021f\2\u1347\u03ba")
buf.write("\3\2\2\2\u1348\u1349\5\u0457\u022c\2\u1349\u134a\5\u043d")
buf.write("\u021f\2\u134a\u134b\5\u0441\u0221\2\u134b\u134c\5\u0457")
buf.write("\u022c\2\u134c\u134d\7a\2\2\u134d\u03bc\3\2\2\2\u134e")
buf.write("\u134f\5\u0459\u022d\2\u134f\u1350\5\u045b\u022e\2\u1350")
buf.write("\u1351\5\u043b\u021e\2\u1351\u1352\5\u043b\u021e\2\u1352")
buf.write("\u1353\5\u043d\u021f\2\u1353\u1354\5\u045f\u0230\2\u1354")
buf.write("\u03be\3\2\2\2\u1355\u1356\5\u045f\u0230\2\u1356\u1357")
buf.write("\5\u0435\u021b\2\u1357\u1358\5\u0457\u022c\2\u1358\u1359")
buf.write("\7a\2\2\u1359\u03c0\3\2\2\2\u135a\u135b\5\u0439\u021d")
buf.write("\2\u135b\u135c\5\u0451\u0229\2\u135c\u135d\5\u045f\u0230")
buf.write("\2\u135d\u135e\5\u0435\u021b\2\u135e\u135f\5\u0457\u022c")
buf.write("\2\u135f\u1360\7a\2\2\u1360\u03c2\3\2\2\2\u1361\u1362")
buf.write("\5\u044f\u0228\2\u1362\u1369\7)\2\2\u1363\u1368\n\2\2")
buf.write("\2\u1364\u1365\7)\2\2\u1365\u1368\7)\2\2\u1366\u1368\5")
buf.write("\u042d\u0217\2\u1367\u1363\3\2\2\2\u1367\u1364\3\2\2\2")
buf.write("\u1367\u1366\3\2\2\2\u1368\u136b\3\2\2\2\u1369\u1367\3")
buf.write("\2\2\2\u1369\u136a\3\2\2\2\u136a\u136c\3\2\2\2\u136b\u1369")
buf.write("\3\2\2\2\u136c\u136d\7)\2\2\u136d\u03c4\3\2\2\2\u136e")
buf.write("\u1377\5\u0437\u021c\2\u136f\u1373\7)\2\2\u1370\u1372")
buf.write("\4\62\63\2\u1371\u1370\3\2\2\2\u1372\u1375\3\2\2\2\u1373")
buf.write("\u1371\3\2\2\2\u1373\u1374\3\2\2\2\u1374\u1376\3\2\2\2")
buf.write("\u1375\u1373\3\2\2\2\u1376\u1378\7)\2\2\u1377\u136f\3")
buf.write("\2\2\2\u1378\u1379\3\2\2\2\u1379\u1377\3\2\2\2\u1379\u137a")
buf.write("\3\2\2\2\u137a\u03c6\3\2\2\2\u137b\u1384\5\u0463\u0232")
buf.write("\2\u137c\u1380\7)\2\2\u137d\u137f\t\3\2\2\u137e\u137d")
buf.write("\3\2\2\2\u137f\u1382\3\2\2\2\u1380\u137e\3\2\2\2\u1380")
buf.write("\u1381\3\2\2\2\u1381\u1383\3\2\2\2\u1382\u1380\3\2\2\2")
buf.write("\u1383\u1385\7)\2\2\u1384\u137c\3\2\2\2\u1385\u1386\3")
buf.write("\2\2\2\u1386\u1384\3\2\2\2\u1386\u1387\3\2\2\2\u1387\u03c8")
buf.write("\3\2\2\2\u1388\u1389\7\60\2\2\u1389\u138a\7\60\2\2\u138a")
buf.write("\u03ca\3\2\2\2\u138b\u138c\7\60\2\2\u138c\u03cc\3\2\2")
buf.write("\2\u138d\u138e\5\u0423\u0212\2\u138e\u03ce\3\2\2\2\u138f")
buf.write("\u1398\5\u0425\u0213\2\u1390\u1392\t\4\2\2\u1391\u1393")
buf.write("\t\5\2\2\u1392\u1391\3\2\2\2\u1392\u1393\3\2\2\2\u1393")
buf.write("\u1396\3\2\2\2\u1394\u1397\5\u0425\u0213\2\u1395\u1397")
buf.write("\5\u0423\u0212\2\u1396\u1394\3\2\2\2\u1396\u1395\3\2\2")
buf.write("\2\u1397\u1399\3\2\2\2\u1398\u1390\3\2\2\2\u1398\u1399")
buf.write("\3\2\2\2\u1399\u139c\3\2\2\2\u139a\u139d\5\u043b\u021e")
buf.write("\2\u139b\u139d\5\u043f\u0220\2\u139c\u139a\3\2\2\2\u139c")
buf.write("\u139b\3\2\2\2\u139c\u139d\3\2\2\2\u139d\u03d0\3\2\2\2")
buf.write("\u139e\u13a5\7)\2\2\u139f\u13a4\n\2\2\2\u13a0\u13a1\7")
buf.write(")\2\2\u13a1\u13a4\7)\2\2\u13a2\u13a4\5\u042d\u0217\2\u13a3")
buf.write("\u139f\3\2\2\2\u13a3\u13a0\3\2\2\2\u13a3\u13a2\3\2\2\2")
buf.write("\u13a4\u13a7\3\2\2\2\u13a5\u13a3\3\2\2\2\u13a5\u13a6\3")
buf.write("\2\2\2\u13a6\u13a8\3\2\2\2\u13a7\u13a5\3\2\2\2\u13a8\u13a9")
buf.write("\7)\2\2\u13a9\u03d2\3\2\2\2\u13aa\u13af\5\u0455\u022b")
buf.write("\2\u13ab\u13b0\5\u03d7\u01ec\2\u13ac\u13b0\5\u03d9\u01ed")
buf.write("\2\u13ad\u13b0\5\u03db\u01ee\2\u13ae\u13b0\5\u03dd\u01ef")
buf.write("\2\u13af\u13ab\3\2\2\2\u13af\u13ac\3\2\2\2\u13af\u13ad")
buf.write("\3\2\2\2\u13af\u13ae\3\2\2\2\u13b0\u13b1\3\2\2\2\u13b1")
buf.write("\u13b2\b\u01ea\2\2\u13b2\u03d4\3\2\2\2\u13b3\u13b4\7)")
buf.write("\2\2\u13b4\u03d6\3\2\2\2\u13b5\u13b6\5\u03d5\u01eb\2\u13b6")
buf.write("\u13ba\7>\2\2\u13b7\u13b9\13\2\2\2\u13b8\u13b7\3\2\2\2")
buf.write("\u13b9\u13bc\3\2\2\2\u13ba\u13bb\3\2\2\2\u13ba\u13b8\3")
buf.write("\2\2\2\u13bb\u13bd\3\2\2\2\u13bc\u13ba\3\2\2\2\u13bd\u13be")
buf.write("\7@\2\2\u13be\u13bf\5\u03d5\u01eb\2\u13bf\u03d8\3\2\2")
buf.write("\2\u13c0\u13c1\5\u03d5\u01eb\2\u13c1\u13c5\7}\2\2\u13c2")
buf.write("\u13c4\13\2\2\2\u13c3\u13c2\3\2\2\2\u13c4\u13c7\3\2\2")
buf.write("\2\u13c5\u13c6\3\2\2\2\u13c5\u13c3\3\2\2\2\u13c6\u13c8")
buf.write("\3\2\2\2\u13c7\u13c5\3\2\2\2\u13c8\u13c9\7\177\2\2\u13c9")
buf.write("\u13ca\5\u03d5\u01eb\2\u13ca\u03da\3\2\2\2\u13cb\u13cc")
buf.write("\5\u03d5\u01eb\2\u13cc\u13d0\7]\2\2\u13cd\u13cf\13\2\2")
buf.write("\2\u13ce\u13cd\3\2\2\2\u13cf\u13d2\3\2\2\2\u13d0\u13d1")
buf.write("\3\2\2\2\u13d0\u13ce\3\2\2\2\u13d1\u13d3\3\2\2\2\u13d2")
buf.write("\u13d0\3\2\2\2\u13d3\u13d4\7_\2\2\u13d4\u13d5\5\u03d5")
buf.write("\u01eb\2\u13d5\u03dc\3\2\2\2\u13d6\u13d7\5\u03d5\u01eb")
buf.write("\2\u13d7\u13db\7*\2\2\u13d8\u13da\13\2\2\2\u13d9\u13d8")
buf.write("\3\2\2\2\u13da\u13dd\3\2\2\2\u13db\u13dc\3\2\2\2\u13db")
buf.write("\u13d9\3\2\2\2\u13dc\u13de\3\2\2\2\u13dd\u13db\3\2\2\2")
buf.write("\u13de\u13df\7+\2\2\u13df\u13e0\5\u03d5\u01eb\2\u13e0")
buf.write("\u03de\3\2\2\2\u13e1\u13e2\n\6\2\2\u13e2\u03e0\3\2\2\2")
buf.write("\u13e3\u13e7\7$\2\2\u13e4\u13e8\n\7\2\2\u13e5\u13e6\7")
buf.write("$\2\2\u13e6\u13e8\7$\2\2\u13e7\u13e4\3\2\2\2\u13e7\u13e5")
buf.write("\3\2\2\2\u13e8\u13e9\3\2\2\2\u13e9\u13e7\3\2\2\2\u13e9")
buf.write("\u13ea\3\2\2\2\u13ea\u13eb\3\2\2\2\u13eb\u13ec\7$\2\2")
buf.write("\u13ec\u03e2\3\2\2\2\u13ed\u13ee\7\'\2\2\u13ee\u03e4\3")
buf.write("\2\2\2\u13ef\u13f0\7(\2\2\u13f0\u03e6\3\2\2\2\u13f1\u13f2")
buf.write("\7*\2\2\u13f2\u03e8\3\2\2\2\u13f3\u13f4\7+\2\2\u13f4\u03ea")
buf.write("\3\2\2\2\u13f5\u13f6\7,\2\2\u13f6\u13f7\7,\2\2\u13f7\u03ec")
buf.write("\3\2\2\2\u13f8\u13f9\7,\2\2\u13f9\u03ee\3\2\2\2\u13fa")
buf.write("\u13fb\7-\2\2\u13fb\u03f0\3\2\2\2\u13fc\u13fd\7/\2\2\u13fd")
buf.write("\u03f2\3\2\2\2\u13fe\u13ff\7.\2\2\u13ff\u03f4\3\2\2\2")
buf.write("\u1400\u1401\7\61\2\2\u1401\u03f6\3\2\2\2\u1402\u1403")
buf.write("\7B\2\2\u1403\u03f8\3\2\2\2\u1404\u1405\7<\2\2\u1405\u1406")
buf.write("\7?\2\2\u1406\u03fa\3\2\2\2\u1407\u1408\7<\2\2\u1408\u140d")
buf.write("\5\u0421\u0211\2\u1409\u140c\5\u0421\u0211\2\u140a\u140c")
buf.write("\t\b\2\2\u140b\u1409\3\2\2\2\u140b\u140a\3\2\2\2\u140c")
buf.write("\u140f\3\2\2\2\u140d\u140b\3\2\2\2\u140d\u140e\3\2\2\2")
buf.write("\u140e\u1416\3\2\2\2\u140f\u140d\3\2\2\2\u1410\u1411\7")
buf.write("<\2\2\u1411\u1416\5\u03e1\u01f1\2\u1412\u1413\7<\2\2\u1413")
buf.write("\u1416\5\u03cd\u01e7\2\u1414\u1416\5\u0411\u0209\2\u1415")
buf.write("\u1407\3\2\2\2\u1415\u1410\3\2\2\2\u1415\u1412\3\2\2\2")
buf.write("\u1415\u1414\3\2\2\2\u1416\u03fc\3\2\2\2\u1417\u1418\7")
buf.write("<\2\2\u1418\u03fe\3\2\2\2\u1419\u141a\7=\2\2\u141a\u0400")
buf.write("\3\2\2\2\u141b\u141c\7>\2\2\u141c\u141d\7?\2\2\u141d\u0402")
buf.write("\3\2\2\2\u141e\u141f\7>\2\2\u141f\u0404\3\2\2\2\u1420")
buf.write("\u1421\7@\2\2\u1421\u1422\7?\2\2\u1422\u0406\3\2\2\2\u1423")
buf.write("\u1424\7#\2\2\u1424\u142c\7?\2\2\u1425\u1426\7>\2\2\u1426")
buf.write("\u142c\7@\2\2\u1427\u1428\7`\2\2\u1428\u142c\7?\2\2\u1429")
buf.write("\u142a\7\u0080\2\2\u142a\u142c\7?\2\2\u142b\u1423\3\2")
buf.write("\2\2\u142b\u1425\3\2\2\2\u142b\u1427\3\2\2\2\u142b\u1429")
buf.write("\3\2\2\2\u142c\u0408\3\2\2\2\u142d\u142e\7`\2\2\u142e")
buf.write("\u040a\3\2\2\2\u142f\u1430\7\u0080\2\2\u1430\u040c\3\2")
buf.write("\2\2\u1431\u1432\7#\2\2\u1432\u040e\3\2\2\2\u1433\u1434")
buf.write("\7@\2\2\u1434\u0410\3\2\2\2\u1435\u1436\7A\2\2\u1436\u0412")
buf.write("\3\2\2\2\u1437\u1438\7~\2\2\u1438\u1439\7~\2\2\u1439\u0414")
buf.write("\3\2\2\2\u143a\u143b\7~\2\2\u143b\u0416\3\2\2\2\u143c")
buf.write("\u143d\7?\2\2\u143d\u0418\3\2\2\2\u143e\u143f\7]\2\2\u143f")
buf.write("\u041a\3\2\2\2\u1440\u1441\7_\2\2\u1441\u041c\3\2\2\2")
buf.write("\u1442\u1443\7a\2\2\u1443\u041e\3\2\2\2\u1444\u1446\t")
buf.write("\t\2\2\u1445\u1444\3\2\2\2\u1446\u1447\3\2\2\2\u1447\u1445")
buf.write("\3\2\2\2\u1447\u1448\3\2\2\2\u1448\u1449\3\2\2\2\u1449")
buf.write("\u144a\b\u0210\3\2\u144a\u0420\3\2\2\2\u144b\u144c\t\n")
buf.write("\2\2\u144c\u0422\3\2\2\2\u144d\u144f\4\62;\2\u144e\u144d")
buf.write("\3\2\2\2\u144f\u1450\3\2\2\2\u1450\u144e\3\2\2\2\u1450")
buf.write("\u1451\3\2\2\2\u1451\u0424\3\2\2\2\u1452\u1454\5\u03cd")
buf.write("\u01e7\2\u1453\u1452\3\2\2\2\u1454\u1457\3\2\2\2\u1455")
buf.write("\u1453\3\2\2\2\u1455\u1456\3\2\2\2\u1456\u1459\3\2\2\2")
buf.write("\u1457\u1455\3\2\2\2\u1458\u145a\7\60\2\2\u1459\u1458")
buf.write("\3\2\2\2\u1459\u145a\3\2\2\2\u145a\u145c\3\2\2\2\u145b")
buf.write("\u145d\5\u03cd\u01e7\2\u145c\u145b\3\2\2\2\u145d\u145e")
buf.write("\3\2\2\2\u145e\u145c\3\2\2\2\u145e\u145f\3\2\2\2\u145f")
buf.write("\u0426\3\2\2\2\u1460\u1461\7/\2\2\u1461\u1462\7/\2\2\u1462")
buf.write("\u1466\3\2\2\2\u1463\u1465\n\13\2\2\u1464\u1463\3\2\2")
buf.write("\2\u1465\u1468\3\2\2\2\u1466\u1464\3\2\2\2\u1466\u1467")
buf.write("\3\2\2\2\u1467\u146b\3\2\2\2\u1468\u1466\3\2\2\2\u1469")
buf.write("\u146c\5\u042d\u0217\2\u146a\u146c\7\2\2\3\u146b\u1469")
buf.write("\3\2\2\2\u146b\u146a\3\2\2\2\u146c\u146d\3\2\2\2\u146d")
buf.write("\u146e\b\u0214\4\2\u146e\u0428\3\2\2\2\u146f\u1470\7\61")
buf.write("\2\2\u1470\u1471\7,\2\2\u1471\u1475\3\2\2\2\u1472\u1474")
buf.write("\13\2\2\2\u1473\u1472\3\2\2\2\u1474\u1477\3\2\2\2\u1475")
buf.write("\u1476\3\2\2\2\u1475\u1473\3\2\2\2\u1476\u1478\3\2\2\2")
buf.write("\u1477\u1475\3\2\2\2\u1478\u1479\7,\2\2\u1479\u147a\7")
buf.write("\61\2\2\u147a\u147b\3\2\2\2\u147b\u147c\b\u0215\4\2\u147c")
buf.write("\u042a\3\2\2\2\u147d\u147e\7r\2\2\u147e\u147f\7t\2\2\u147f")
buf.write("\u1480\7q\2\2\u1480\u1481\7o\2\2\u1481\u1482\7r\2\2\u1482")
buf.write("\u1483\7v\2\2\u1483\u1484\3\2\2\2\u1484\u1488\5\u042f")
buf.write("\u0218\2\u1485\u1487\n\13\2\2\u1486\u1485\3\2\2\2\u1487")
buf.write("\u148a\3\2\2\2\u1488\u1486\3\2\2\2\u1488\u1489\3\2\2\2")
buf.write("\u1489\u148d\3\2\2\2\u148a\u1488\3\2\2\2\u148b\u148e\5")
buf.write("\u042d\u0217\2\u148c\u148e\7\2\2\3\u148d\u148b\3\2\2\2")
buf.write("\u148d\u148c\3\2\2\2\u148e\u042c\3\2\2\2\u148f\u1491\7")
buf.write("\17\2\2\u1490\u148f\3\2\2\2\u1490\u1491\3\2\2\2\u1491")
buf.write("\u1492\3\2\2\2\u1492\u1493\7\f\2\2\u1493\u042e\3\2\2\2")
buf.write("\u1494\u1495\t\f\2\2\u1495\u0430\3\2\2\2\u1496\u149b\5")
buf.write("\u0421\u0211\2\u1497\u149a\5\u0421\u0211\2\u1498\u149a")
buf.write("\t\r\2\2\u1499\u1497\3\2\2\2\u1499\u1498\3\2\2\2\u149a")
buf.write("\u149d\3\2\2\2\u149b\u1499\3\2\2\2\u149b\u149c\3\2\2\2")
buf.write("\u149c\u0432\3\2\2\2\u149d\u149b\3\2\2\2\u149e\u149f\7")
buf.write("B\2\2\u149f\u14a0\7#\2\2\u14a0\u14a1\3\2\2\2\u14a1\u14a2")
buf.write("\b\u021a\4\2\u14a2\u0434\3\2\2\2\u14a3\u14a4\t\16\2\2")
buf.write("\u14a4\u0436\3\2\2\2\u14a5\u14a6\t\17\2\2\u14a6\u0438")
buf.write("\3\2\2\2\u14a7\u14a8\t\20\2\2\u14a8\u043a\3\2\2\2\u14a9")
buf.write("\u14aa\t\21\2\2\u14aa\u043c\3\2\2\2\u14ab\u14ac\t\4\2")
buf.write("\2\u14ac\u043e\3\2\2\2\u14ad\u14ae\t\22\2\2\u14ae\u0440")
buf.write("\3\2\2\2\u14af\u14b0\t\23\2\2\u14b0\u0442\3\2\2\2\u14b1")
buf.write("\u14b2\t\24\2\2\u14b2\u0444\3\2\2\2\u14b3\u14b4\t\25\2")
buf.write("\2\u14b4\u0446\3\2\2\2\u14b5\u14b6\t\26\2\2\u14b6\u0448")
buf.write("\3\2\2\2\u14b7\u14b8\t\27\2\2\u14b8\u044a\3\2\2\2\u14b9")
buf.write("\u14ba\t\30\2\2\u14ba\u044c\3\2\2\2\u14bb\u14bc\t\31\2")
buf.write("\2\u14bc\u044e\3\2\2\2\u14bd\u14be\t\32\2\2\u14be\u0450")
buf.write("\3\2\2\2\u14bf\u14c0\t\33\2\2\u14c0\u0452\3\2\2\2\u14c1")
buf.write("\u14c2\t\34\2\2\u14c2\u0454\3\2\2\2\u14c3\u14c4\t\35\2")
buf.write("\2\u14c4\u0456\3\2\2\2\u14c5\u14c6\t\36\2\2\u14c6\u0458")
buf.write("\3\2\2\2\u14c7\u14c8\t\37\2\2\u14c8\u045a\3\2\2\2\u14c9")
buf.write("\u14ca\t \2\2\u14ca\u045c\3\2\2\2\u14cb\u14cc\t!\2\2\u14cc")
buf.write("\u045e\3\2\2\2\u14cd\u14ce\t\"\2\2\u14ce\u0460\3\2\2\2")
buf.write("\u14cf\u14d0\t#\2\2\u14d0\u0462\3\2\2\2\u14d1\u14d2\t")
buf.write("$\2\2\u14d2\u0464\3\2\2\2\u14d3\u14d4\t%\2\2\u14d4\u0466")
buf.write("\3\2\2\2\u14d5\u14d6\t&\2\2\u14d6\u0468\3\2\2\2\'\2\u1367")
buf.write("\u1369\u1373\u1379\u1380\u1386\u1392\u1396\u1398\u139c")
buf.write("\u13a3\u13a5\u13af\u13ba\u13c5\u13d0\u13db\u13e7\u13e9")
buf.write("\u140b\u140d\u1415\u142b\u1447\u1450\u1455\u1459\u145e")
buf.write("\u1466\u146b\u1475\u1488\u148d\u1490\u1499\u149b\5\t\u01ea")
buf.write("\2\b\2\2\2\3\2")
return buf.getvalue()
class PlSqlLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
A_LETTER = 2
ADD = 3
AFTER = 4
AGENT = 5
AGGREGATE = 6
ALL = 7
ALTER = 8
ANALYZE = 9
AND = 10
ANY = 11
ARRAY = 12
AS = 13
ASSUME = 14
ASSERT = 15
ASC = 16
ASSOCIATE = 17
AT = 18
ATTRIBUTE = 19
AUDIT = 20
AUTHID = 21
AUTO = 22
AUTOMATIC = 23
AUTONOMOUS_TRANSACTION = 24
BATCH = 25
BEFORE = 26
BEGIN = 27
BETWEEN = 28
BFILE = 29
BINARY_DOUBLE = 30
BINARY_FLOAT = 31
BINARY_INTEGER = 32
BLOB = 33
BLOCK = 34
BODY = 35
BOOLEAN = 36
BOTH = 37
BREADTH = 38
BULK = 39
BY = 40
BYTE = 41
C_LETTER = 42
CACHE = 43
CALL = 44
CANONICAL = 45
CASCADE = 46
CASE = 47
CAST = 48
CHAR = 49
CHAR_CS = 50
CHARACTER = 51
CHECK = 52
CHR = 53
CLOB = 54
CLOSE = 55
CLUSTER = 56
COLLECT = 57
COLUMNS = 58
COMMENT = 59
COMMIT = 60
COMMITTED = 61
COMPATIBILITY = 62
COMPILE = 63
COMPOUND = 64
CONNECT = 65
CONNECT_BY_ROOT = 66
CONSTANT = 67
CONSTRAINT = 68
CONSTRAINTS = 69
CONSTRUCTOR = 70
CONTENT = 71
CONTEXT = 72
CONTINUE = 73
CONVERT = 74
CORRUPT_XID = 75
CORRUPT_XID_ALL = 76
COST = 77
COUNT = 78
CREATE = 79
CROSS = 80
CUBE = 81
CURRENT = 82
CURRENT_USER = 83
CURSOR = 84
CUSTOMDATUM = 85
CYCLE = 86
DATA = 87
DATABASE = 88
DATE = 89
DAY = 90
DB_ROLE_CHANGE = 91
DBTIMEZONE = 92
DDL = 93
DEBUG = 94
DEC = 95
DECIMAL = 96
DECLARE = 97
DECOMPOSE = 98
DECREMENT = 99
DEFAULT = 100
DEFAULTS = 101
DEFERRED = 102
DEFINER = 103
DELETE = 104
DEPTH = 105
DESC = 106
DETERMINISTIC = 107
DIMENSION = 108
DISABLE = 109
DISASSOCIATE = 110
DISTINCT = 111
DOCUMENT = 112
DOUBLE = 113
DROP = 114
DSINTERVAL_UNCONSTRAINED = 115
EACH = 116
ELEMENT = 117
ELSE = 118
ELSIF = 119
EMPTY = 120
ENABLE = 121
ENCODING = 122
END = 123
ENTITYESCAPING = 124
ERR = 125
ERRORS = 126
ESCAPE = 127
EVALNAME = 128
EXCEPT = 129
EXCEPTION = 130
EXCEPTION_INIT = 131
EXCEPTIONS = 132
EXCLUDE = 133
EXCLUSIVE = 134
EXECUTE = 135
EXISTS = 136
EXIT = 137
EXPLAIN = 138
EXTERNAL = 139
EXTRACT = 140
FAILURE = 141
FALSE = 142
FETCH = 143
FINAL = 144
FIRST = 145
FIRST_VALUE = 146
FLOAT = 147
FOLLOWING = 148
FOLLOWS = 149
FOR = 150
FORALL = 151
FORCE = 152
FROM = 153
FULL = 154
FUNCTION = 155
GOTO = 156
GRANT = 157
GROUP = 158
GROUPING = 159
HASH = 160
HAVING = 161
HIDE = 162
HOUR = 163
IF = 164
IGNORE = 165
IMMEDIATE = 166
IN = 167
INCLUDE = 168
INCLUDING = 169
INCREMENT = 170
INDENT = 171
INDEX = 172
INDEXED = 173
INDICATOR = 174
INDICES = 175
INFINITE = 176
INLINE = 177
INNER = 178
INOUT = 179
INSERT = 180
INSTANTIABLE = 181
INSTEAD = 182
INT = 183
INTEGER = 184
INTERSECT = 185
INTERVAL = 186
INTO = 187
INVALIDATE = 188
IS = 189
ISOLATION = 190
ITERATE = 191
JAVA = 192
JOIN = 193
KEEP = 194
LANGUAGE = 195
LAST = 196
LAST_VALUE = 197
LEADING = 198
LEFT = 199
LEVEL = 200
LIBRARY = 201
LIKE = 202
LIKE2 = 203
LIKE4 = 204
LIKEC = 205
LIMIT = 206
LOCAL = 207
LOCK = 208
LOCKED = 209
LOG = 210
LOGOFF = 211
LOGON = 212
LONG = 213
LOOP = 214
MAIN = 215
MAP = 216
MATCHED = 217
MAXVALUE = 218
MEASURES = 219
MEMBER = 220
MERGE = 221
MINUS = 222
MINUTE = 223
MINVALUE = 224
MLSLABEL = 225
MODE = 226
MODEL = 227
MODIFY = 228
MONTH = 229
MULTISET = 230
NAME = 231
NAN = 232
NATURAL = 233
NATURALN = 234
NAV = 235
NCHAR = 236
NCHAR_CS = 237
NCLOB = 238
NESTED = 239
NEW = 240
NO = 241
NOAUDIT = 242
NOCACHE = 243
NOCOPY = 244
NOCYCLE = 245
NOENTITYESCAPING = 246
NOMAXVALUE = 247
NOMINVALUE = 248
NONE = 249
NOORDER = 250
NOSCHEMACHECK = 251
NOT = 252
NOWAIT = 253
NULL = 254
NULLS = 255
NUMBER = 256
NUMERIC = 257
NVARCHAR2 = 258
OBJECT = 259
OF = 260
OFF = 261
OID = 262
OLD = 263
ON = 264
ONLY = 265
OPEN = 266
OPTION = 267
OR = 268
ORADATA = 269
ORDER = 270
ORDINALITY = 271
OSERROR = 272
OUT = 273
OUTER = 274
OVER = 275
OVERRIDING = 276
PACKAGE = 277
PARALLEL_ENABLE = 278
PARAMETERS = 279
PARENT = 280
PARTITION = 281
PASSING = 282
PATH = 283
PERCENT_ROWTYPE = 284
PERCENT_TYPE = 285
PIPELINED = 286
PIVOT = 287
PLAN = 288
PLS_INTEGER = 289
POSITIVE = 290
POSITIVEN = 291
PRAGMA = 292
PRECEDING = 293
PRECISION = 294
PRESENT = 295
PRIOR = 296
PROCEDURE = 297
RAISE = 298
RANGE = 299
RAW = 300
READ = 301
REAL = 302
RECORD = 303
REF = 304
REFERENCE = 305
REFERENCING = 306
REJECT = 307
RELIES_ON = 308
RENAME = 309
REPLACE = 310
RESPECT = 311
RESTRICT_REFERENCES = 312
RESULT = 313
RESULT_CACHE = 314
RETURN = 315
RETURNING = 316
REUSE = 317
REVERSE = 318
REVOKE = 319
RIGHT = 320
ROLLBACK = 321
ROLLUP = 322
ROW = 323
ROWID = 324
ROWS = 325
RULES = 326
SAMPLE = 327
SAVE = 328
SAVEPOINT = 329
SCHEMA = 330
SCHEMACHECK = 331
SCN = 332
SEARCH = 333
SECOND = 334
SEED = 335
SEGMENT = 336
SELECT = 337
SELF = 338
SEQUENCE = 339
SEQUENTIAL = 340
SERIALIZABLE = 341
SERIALLY_REUSABLE = 342
SERVERERROR = 343
SESSIONTIMEZONE = 344
SET = 345
SETS = 346
SETTINGS = 347
SHARE = 348
SHOW = 349
SHUTDOWN = 350
SIBLINGS = 351
SIGNTYPE = 352
SIMPLE_INTEGER = 353
SINGLE = 354
SIZE = 355
SKIP_ = 356
SMALLINT = 357
SNAPSHOT = 358
SOME = 359
SPECIFICATION = 360
SQLDATA = 361
SQLERROR = 362
STANDALONE = 363
START = 364
STARTUP = 365
STATEMENT = 366
STATEMENT_ID = 367
STATIC = 368
STATISTICS = 369
STRING = 370
SUBMULTISET = 371
SUBPARTITION = 372
SUBSTITUTABLE = 373
SUBTYPE = 374
SUCCESS = 375
SUSPEND = 376
TABLE = 377
THE = 378
THEN = 379
TIME = 380
TIMESTAMP = 381
TIMESTAMP_LTZ_UNCONSTRAINED = 382
TIMESTAMP_TZ_UNCONSTRAINED = 383
TIMESTAMP_UNCONSTRAINED = 384
TIMEZONE_ABBR = 385
TIMEZONE_HOUR = 386
TIMEZONE_MINUTE = 387
TIMEZONE_REGION = 388
TO = 389
TRAILING = 390
TRANSACTION = 391
TRANSLATE = 392
TREAT = 393
TRIGGER = 394
TRIM = 395
TRUE = 396
TRUNCATE = 397
TYPE = 398
UNBOUNDED = 399
UNDER = 400
UNION = 401
UNIQUE = 402
UNLIMITED = 403
UNPIVOT = 404
UNTIL = 405
UPDATE = 406
UPDATED = 407
UPSERT = 408
UROWID = 409
USE = 410
USING = 411
VALIDATE = 412
VALUE = 413
VALUES = 414
VARCHAR = 415
VARCHAR2 = 416
VARIABLE = 417
VARRAY = 418
VARYING = 419
VERSION = 420
VERSIONS = 421
WAIT = 422
WARNING = 423
WELLFORMED = 424
WHEN = 425
WHENEVER = 426
WHERE = 427
WHILE = 428
WITH = 429
WITHIN = 430
WORK = 431
WRITE = 432
XML = 433
XMLAGG = 434
XMLATTRIBUTES = 435
XMLCAST = 436
XMLCOLATTVAL = 437
XMLELEMENT = 438
XMLEXISTS = 439
XMLFOREST = 440
XMLNAMESPACES = 441
XMLPARSE = 442
XMLPI = 443
XMLQUERY = 444
XMLROOT = 445
XMLSERIALIZE = 446
XMLTABLE = 447
YEAR = 448
YES = 449
YMINTERVAL_UNCONSTRAINED = 450
ZONE = 451
PREDICTION = 452
PREDICTION_BOUNDS = 453
PREDICTION_COST = 454
PREDICTION_DETAILS = 455
PREDICTION_PROBABILITY = 456
PREDICTION_SET = 457
CUME_DIST = 458
DENSE_RANK = 459
LISTAGG = 460
PERCENT_RANK = 461
PERCENTILE_CONT = 462
PERCENTILE_DISC = 463
RANK = 464
AVG = 465
CORR = 466
LAG = 467
LEAD = 468
MAX = 469
MEDIAN = 470
MIN = 471
NTILE = 472
RATIO_TO_REPORT = 473
ROW_NUMBER = 474
SUM = 475
VARIANCE = 476
REGR_ = 477
STDDEV = 478
VAR_ = 479
COVAR_ = 480
NATIONAL_CHAR_STRING_LIT = 481
BIT_STRING_LIT = 482
HEX_STRING_LIT = 483
DOUBLE_PERIOD = 484
PERIOD = 485
UNSIGNED_INTEGER = 486
APPROXIMATE_NUM_LIT = 487
CHAR_STRING = 488
DELIMITED_ID = 489
PERCENT = 490
AMPERSAND = 491
LEFT_PAREN = 492
RIGHT_PAREN = 493
DOUBLE_ASTERISK = 494
ASTERISK = 495
PLUS_SIGN = 496
MINUS_SIGN = 497
COMMA = 498
SOLIDUS = 499
AT_SIGN = 500
ASSIGN_OP = 501
BINDVAR = 502
COLON = 503
SEMICOLON = 504
LESS_THAN_OR_EQUALS_OP = 505
LESS_THAN_OP = 506
GREATER_THAN_OR_EQUALS_OP = 507
NOT_EQUAL_OP = 508
CARRET_OPERATOR_PART = 509
TILDE_OPERATOR_PART = 510
EXCLAMATION_OPERATOR_PART = 511
GREATER_THAN_OP = 512
CONCATENATION_OP = 513
VERTICAL_BAR = 514
EQUALS_OP = 515
LEFT_BRACKET = 516
RIGHT_BRACKET = 517
INTRODUCER = 518
SPACES = 519
SINGLE_LINE_COMMENT = 520
MULTI_LINE_COMMENT = 521
PROMPT = 522
REGULAR_ID = 523
ZV = 524
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'..'", "'.'", "'%'", "'&'", "'('", "')'", "'**'", "'*'", "'+'",
"'-'", "','", "'/'", "'@'", "':='", "':'", "';'", "'<='", "'<'",
"'>='", "'^'", "'~'", "'!'", "'>'", "'||'", "'|'", "'='", "'['",
"']'", "'_'", "'@!'" ]
symbolicNames = [ "<INVALID>",
"A_LETTER", "ADD", "AFTER", "AGENT", "AGGREGATE", "ALL", "ALTER",
"ANALYZE", "AND", "ANY", "ARRAY", "AS", "ASSUME", "ASSERT",
"ASC", "ASSOCIATE", "AT", "ATTRIBUTE", "AUDIT", "AUTHID", "AUTO",
"AUTOMATIC", "AUTONOMOUS_TRANSACTION", "BATCH", "BEFORE", "BEGIN",
"BETWEEN", "BFILE", "BINARY_DOUBLE", "BINARY_FLOAT", "BINARY_INTEGER",
"BLOB", "BLOCK", "BODY", "BOOLEAN", "BOTH", "BREADTH", "BULK",
"BY", "BYTE", "C_LETTER", "CACHE", "CALL", "CANONICAL", "CASCADE",
"CASE", "CAST", "CHAR", "CHAR_CS", "CHARACTER", "CHECK", "CHR",
"CLOB", "CLOSE", "CLUSTER", "COLLECT", "COLUMNS", "COMMENT",
"COMMIT", "COMMITTED", "COMPATIBILITY", "COMPILE", "COMPOUND",
"CONNECT", "CONNECT_BY_ROOT", "CONSTANT", "CONSTRAINT", "CONSTRAINTS",
"CONSTRUCTOR", "CONTENT", "CONTEXT", "CONTINUE", "CONVERT",
"CORRUPT_XID", "CORRUPT_XID_ALL", "COST", "COUNT", "CREATE",
"CROSS", "CUBE", "CURRENT", "CURRENT_USER", "CURSOR", "CUSTOMDATUM",
"CYCLE", "DATA", "DATABASE", "DATE", "DAY", "DB_ROLE_CHANGE",
"DBTIMEZONE", "DDL", "DEBUG", "DEC", "DECIMAL", "DECLARE", "DECOMPOSE",
"DECREMENT", "DEFAULT", "DEFAULTS", "DEFERRED", "DEFINER", "DELETE",
"DEPTH", "DESC", "DETERMINISTIC", "DIMENSION", "DISABLE", "DISASSOCIATE",
"DISTINCT", "DOCUMENT", "DOUBLE", "DROP", "DSINTERVAL_UNCONSTRAINED",
"EACH", "ELEMENT", "ELSE", "ELSIF", "EMPTY", "ENABLE", "ENCODING",
"END", "ENTITYESCAPING", "ERR", "ERRORS", "ESCAPE", "EVALNAME",
"EXCEPT", "EXCEPTION", "EXCEPTION_INIT", "EXCEPTIONS", "EXCLUDE",
"EXCLUSIVE", "EXECUTE", "EXISTS", "EXIT", "EXPLAIN", "EXTERNAL",
"EXTRACT", "FAILURE", "FALSE", "FETCH", "FINAL", "FIRST", "FIRST_VALUE",
"FLOAT", "FOLLOWING", "FOLLOWS", "FOR", "FORALL", "FORCE", "FROM",
"FULL", "FUNCTION", "GOTO", "GRANT", "GROUP", "GROUPING", "HASH",
"HAVING", "HIDE", "HOUR", "IF", "IGNORE", "IMMEDIATE", "IN",
"INCLUDE", "INCLUDING", "INCREMENT", "INDENT", "INDEX", "INDEXED",
"INDICATOR", "INDICES", "INFINITE", "INLINE", "INNER", "INOUT",
"INSERT", "INSTANTIABLE", "INSTEAD", "INT", "INTEGER", "INTERSECT",
"INTERVAL", "INTO", "INVALIDATE", "IS", "ISOLATION", "ITERATE",
"JAVA", "JOIN", "KEEP", "LANGUAGE", "LAST", "LAST_VALUE", "LEADING",
"LEFT", "LEVEL", "LIBRARY", "LIKE", "LIKE2", "LIKE4", "LIKEC",
"LIMIT", "LOCAL", "LOCK", "LOCKED", "LOG", "LOGOFF", "LOGON",
"LONG", "LOOP", "MAIN", "MAP", "MATCHED", "MAXVALUE", "MEASURES",
"MEMBER", "MERGE", "MINUS", "MINUTE", "MINVALUE", "MLSLABEL",
"MODE", "MODEL", "MODIFY", "MONTH", "MULTISET", "NAME", "NAN",
"NATURAL", "NATURALN", "NAV", "NCHAR", "NCHAR_CS", "NCLOB",
"NESTED", "NEW", "NO", "NOAUDIT", "NOCACHE", "NOCOPY", "NOCYCLE",
"NOENTITYESCAPING", "NOMAXVALUE", "NOMINVALUE", "NONE", "NOORDER",
"NOSCHEMACHECK", "NOT", "NOWAIT", "NULL", "NULLS", "NUMBER",
"NUMERIC", "NVARCHAR2", "OBJECT", "OF", "OFF", "OID", "OLD",
"ON", "ONLY", "OPEN", "OPTION", "OR", "ORADATA", "ORDER", "ORDINALITY",
"OSERROR", "OUT", "OUTER", "OVER", "OVERRIDING", "PACKAGE",
"PARALLEL_ENABLE", "PARAMETERS", "PARENT", "PARTITION", "PASSING",
"PATH", "PERCENT_ROWTYPE", "PERCENT_TYPE", "PIPELINED", "PIVOT",
"PLAN", "PLS_INTEGER", "POSITIVE", "POSITIVEN", "PRAGMA", "PRECEDING",
"PRECISION", "PRESENT", "PRIOR", "PROCEDURE", "RAISE", "RANGE",
"RAW", "READ", "REAL", "RECORD", "REF", "REFERENCE", "REFERENCING",
"REJECT", "RELIES_ON", "RENAME", "REPLACE", "RESPECT", "RESTRICT_REFERENCES",
"RESULT", "RESULT_CACHE", "RETURN", "RETURNING", "REUSE", "REVERSE",
"REVOKE", "RIGHT", "ROLLBACK", "ROLLUP", "ROW", "ROWID", "ROWS",
"RULES", "SAMPLE", "SAVE", "SAVEPOINT", "SCHEMA", "SCHEMACHECK",
"SCN", "SEARCH", "SECOND", "SEED", "SEGMENT", "SELECT", "SELF",
"SEQUENCE", "SEQUENTIAL", "SERIALIZABLE", "SERIALLY_REUSABLE",
"SERVERERROR", "SESSIONTIMEZONE", "SET", "SETS", "SETTINGS",
"SHARE", "SHOW", "SHUTDOWN", "SIBLINGS", "SIGNTYPE", "SIMPLE_INTEGER",
"SINGLE", "SIZE", "SKIP_", "SMALLINT", "SNAPSHOT", "SOME", "SPECIFICATION",
"SQLDATA", "SQLERROR", "STANDALONE", "START", "STARTUP", "STATEMENT",
"STATEMENT_ID", "STATIC", "STATISTICS", "STRING", "SUBMULTISET",
"SUBPARTITION", "SUBSTITUTABLE", "SUBTYPE", "SUCCESS", "SUSPEND",
"TABLE", "THE", "THEN", "TIME", "TIMESTAMP", "TIMESTAMP_LTZ_UNCONSTRAINED",
"TIMESTAMP_TZ_UNCONSTRAINED", "TIMESTAMP_UNCONSTRAINED", "TIMEZONE_ABBR",
"TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TIMEZONE_REGION", "TO",
"TRAILING", "TRANSACTION", "TRANSLATE", "TREAT", "TRIGGER",
"TRIM", "TRUE", "TRUNCATE", "TYPE", "UNBOUNDED", "UNDER", "UNION",
"UNIQUE", "UNLIMITED", "UNPIVOT", "UNTIL", "UPDATE", "UPDATED",
"UPSERT", "UROWID", "USE", "USING", "VALIDATE", "VALUE", "VALUES",
"VARCHAR", "VARCHAR2", "VARIABLE", "VARRAY", "VARYING", "VERSION",
"VERSIONS", "WAIT", "WARNING", "WELLFORMED", "WHEN", "WHENEVER",
"WHERE", "WHILE", "WITH", "WITHIN", "WORK", "WRITE", "XML",
"XMLAGG", "XMLATTRIBUTES", "XMLCAST", "XMLCOLATTVAL", "XMLELEMENT",
"XMLEXISTS", "XMLFOREST", "XMLNAMESPACES", "XMLPARSE", "XMLPI",
"XMLQUERY", "XMLROOT", "XMLSERIALIZE", "XMLTABLE", "YEAR", "YES",
"YMINTERVAL_UNCONSTRAINED", "ZONE", "PREDICTION", "PREDICTION_BOUNDS",
"PREDICTION_COST", "PREDICTION_DETAILS", "PREDICTION_PROBABILITY",
"PREDICTION_SET", "CUME_DIST", "DENSE_RANK", "LISTAGG", "PERCENT_RANK",
"PERCENTILE_CONT", "PERCENTILE_DISC", "RANK", "AVG", "CORR",
"LAG", "LEAD", "MAX", "MEDIAN", "MIN", "NTILE", "RATIO_TO_REPORT",
"ROW_NUMBER", "SUM", "VARIANCE", "REGR_", "STDDEV", "VAR_",
"COVAR_", "NATIONAL_CHAR_STRING_LIT", "BIT_STRING_LIT", "HEX_STRING_LIT",
"DOUBLE_PERIOD", "PERIOD", "UNSIGNED_INTEGER", "APPROXIMATE_NUM_LIT",
"CHAR_STRING", "DELIMITED_ID", "PERCENT", "AMPERSAND", "LEFT_PAREN",
"RIGHT_PAREN", "DOUBLE_ASTERISK", "ASTERISK", "PLUS_SIGN", "MINUS_SIGN",
"COMMA", "SOLIDUS", "AT_SIGN", "ASSIGN_OP", "BINDVAR", "COLON",
"SEMICOLON", "LESS_THAN_OR_EQUALS_OP", "LESS_THAN_OP", "GREATER_THAN_OR_EQUALS_OP",
"NOT_EQUAL_OP", "CARRET_OPERATOR_PART", "TILDE_OPERATOR_PART",
"EXCLAMATION_OPERATOR_PART", "GREATER_THAN_OP", "CONCATENATION_OP",
"VERTICAL_BAR", "EQUALS_OP", "LEFT_BRACKET", "RIGHT_BRACKET",
"INTRODUCER", "SPACES", "SINGLE_LINE_COMMENT", "MULTI_LINE_COMMENT",
"PROMPT", "REGULAR_ID", "ZV" ]
ruleNames = [ "T__0", "A_LETTER", "ADD", "AFTER", "AGENT", "AGGREGATE",
"ALL", "ALTER", "ANALYZE", "AND", "ANY", "ARRAY", "AS",
"ASSUME", "ASSERT", "ASC", "ASSOCIATE", "AT", "ATTRIBUTE",
"AUDIT", "AUTHID", "AUTO", "AUTOMATIC", "AUTONOMOUS_TRANSACTION",
"BATCH", "BEFORE", "BEGIN", "BETWEEN", "BFILE", "BINARY_DOUBLE",
"BINARY_FLOAT", "BINARY_INTEGER", "BLOB", "BLOCK", "BODY",
"BOOLEAN", "BOTH", "BREADTH", "BULK", "BY", "BYTE", "C_LETTER",
"CACHE", "CALL", "CANONICAL", "CASCADE", "CASE", "CAST",
"CHAR", "CHAR_CS", "CHARACTER", "CHECK", "CHR", "CLOB",
"CLOSE", "CLUSTER", "COLLECT", "COLUMNS", "COMMENT", "COMMIT",
"COMMITTED", "COMPATIBILITY", "COMPILE", "COMPOUND", "CONNECT",
"CONNECT_BY_ROOT", "CONSTANT", "CONSTRAINT", "CONSTRAINTS",
"CONSTRUCTOR", "CONTENT", "CONTEXT", "CONTINUE", "CONVERT",
"CORRUPT_XID", "CORRUPT_XID_ALL", "COST", "COUNT", "CREATE",
"CROSS", "CUBE", "CURRENT", "CURRENT_USER", "CURSOR",
"CUSTOMDATUM", "CYCLE", "DATA", "DATABASE", "DATE", "DAY",
"DB_ROLE_CHANGE", "DBTIMEZONE", "DDL", "DEBUG", "DEC",
"DECIMAL", "DECLARE", "DECOMPOSE", "DECREMENT", "DEFAULT",
"DEFAULTS", "DEFERRED", "DEFINER", "DELETE", "DEPTH",
"DESC", "DETERMINISTIC", "DIMENSION", "DISABLE", "DISASSOCIATE",
"DISTINCT", "DOCUMENT", "DOUBLE", "DROP", "DSINTERVAL_UNCONSTRAINED",
"EACH", "ELEMENT", "ELSE", "ELSIF", "EMPTY", "ENABLE",
"ENCODING", "END", "ENTITYESCAPING", "ERR", "ERRORS",
"ESCAPE", "EVALNAME", "EXCEPT", "EXCEPTION", "EXCEPTION_INIT",
"EXCEPTIONS", "EXCLUDE", "EXCLUSIVE", "EXECUTE", "EXISTS",
"EXIT", "EXPLAIN", "EXTERNAL", "EXTRACT", "FAILURE", "FALSE",
"FETCH", "FINAL", "FIRST", "FIRST_VALUE", "FLOAT", "FOLLOWING",
"FOLLOWS", "FOR", "FORALL", "FORCE", "FROM", "FULL", "FUNCTION",
"GOTO", "GRANT", "GROUP", "GROUPING", "HASH", "HAVING",
"HIDE", "HOUR", "IF", "IGNORE", "IMMEDIATE", "IN", "INCLUDE",
"INCLUDING", "INCREMENT", "INDENT", "INDEX", "INDEXED",
"INDICATOR", "INDICES", "INFINITE", "INLINE", "INNER",
"INOUT", "INSERT", "INSTANTIABLE", "INSTEAD", "INT", "INTEGER",
"INTERSECT", "INTERVAL", "INTO", "INVALIDATE", "IS", "ISOLATION",
"ITERATE", "JAVA", "JOIN", "KEEP", "LANGUAGE", "LAST",
"LAST_VALUE", "LEADING", "LEFT", "LEVEL", "LIBRARY", "LIKE",
"LIKE2", "LIKE4", "LIKEC", "LIMIT", "LOCAL", "LOCK", "LOCKED",
"LOG", "LOGOFF", "LOGON", "LONG", "LOOP", "MAIN", "MAP",
"MATCHED", "MAXVALUE", "MEASURES", "MEMBER", "MERGE",
"MINUS", "MINUTE", "MINVALUE", "MLSLABEL", "MODE", "MODEL",
"MODIFY", "MONTH", "MULTISET", "NAME", "NAN", "NATURAL",
"NATURALN", "NAV", "NCHAR", "NCHAR_CS", "NCLOB", "NESTED",
"NEW", "NO", "NOAUDIT", "NOCACHE", "NOCOPY", "NOCYCLE",
"NOENTITYESCAPING", "NOMAXVALUE", "NOMINVALUE", "NONE",
"NOORDER", "NOSCHEMACHECK", "NOT", "NOWAIT", "NULL", "NULLS",
"NUMBER", "NUMERIC", "NVARCHAR2", "OBJECT", "OF", "OFF",
"OID", "OLD", "ON", "ONLY", "OPEN", "OPTION", "OR", "ORADATA",
"ORDER", "ORDINALITY", "OSERROR", "OUT", "OUTER", "OVER",
"OVERRIDING", "PACKAGE", "PARALLEL_ENABLE", "PARAMETERS",
"PARENT", "PARTITION", "PASSING", "PATH", "PERCENT_ROWTYPE",
"PERCENT_TYPE", "PIPELINED", "PIVOT", "PLAN", "PLS_INTEGER",
"POSITIVE", "POSITIVEN", "PRAGMA", "PRECEDING", "PRECISION",
"PRESENT", "PRIOR", "PROCEDURE", "RAISE", "RANGE", "RAW",
"READ", "REAL", "RECORD", "REF", "REFERENCE", "REFERENCING",
"REJECT", "RELIES_ON", "RENAME", "REPLACE", "RESPECT",
"RESTRICT_REFERENCES", "RESULT", "RESULT_CACHE", "RETURN",
"RETURNING", "REUSE", "REVERSE", "REVOKE", "RIGHT", "ROLLBACK",
"ROLLUP", "ROW", "ROWID", "ROWS", "RULES", "SAMPLE", "SAVE",
"SAVEPOINT", "SCHEMA", "SCHEMACHECK", "SCN", "SEARCH",
"SECOND", "SEED", "SEGMENT", "SELECT", "SELF", "SEQUENCE",
"SEQUENTIAL", "SERIALIZABLE", "SERIALLY_REUSABLE", "SERVERERROR",
"SESSIONTIMEZONE", "SET", "SETS", "SETTINGS", "SHARE",
"SHOW", "SHUTDOWN", "SIBLINGS", "SIGNTYPE", "SIMPLE_INTEGER",
"SINGLE", "SIZE", "SKIP_", "SMALLINT", "SNAPSHOT", "SOME",
"SPECIFICATION", "SQLDATA", "SQLERROR", "STANDALONE",
"START", "STARTUP", "STATEMENT", "STATEMENT_ID", "STATIC",
"STATISTICS", "STRING", "SUBMULTISET", "SUBPARTITION",
"SUBSTITUTABLE", "SUBTYPE", "SUCCESS", "SUSPEND", "TABLE",
"THE", "THEN", "TIME", "TIMESTAMP", "TIMESTAMP_LTZ_UNCONSTRAINED",
"TIMESTAMP_TZ_UNCONSTRAINED", "TIMESTAMP_UNCONSTRAINED",
"TIMEZONE_ABBR", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TIMEZONE_REGION",
"TO", "TRAILING", "TRANSACTION", "TRANSLATE", "TREAT",
"TRIGGER", "TRIM", "TRUE", "TRUNCATE", "TYPE", "UNBOUNDED",
"UNDER", "UNION", "UNIQUE", "UNLIMITED", "UNPIVOT", "UNTIL",
"UPDATE", "UPDATED", "UPSERT", "UROWID", "USE", "USING",
"VALIDATE", "VALUE", "VALUES", "VARCHAR", "VARCHAR2",
"VARIABLE", "VARRAY", "VARYING", "VERSION", "VERSIONS",
"WAIT", "WARNING", "WELLFORMED", "WHEN", "WHENEVER", "WHERE",
"WHILE", "WITH", "WITHIN", "WORK", "WRITE", "XML", "XMLAGG",
"XMLATTRIBUTES", "XMLCAST", "XMLCOLATTVAL", "XMLELEMENT",
"XMLEXISTS", "XMLFOREST", "XMLNAMESPACES", "XMLPARSE",
"XMLPI", "XMLQUERY", "XMLROOT", "XMLSERIALIZE", "XMLTABLE",
"YEAR", "YES", "YMINTERVAL_UNCONSTRAINED", "ZONE", "PREDICTION",
"PREDICTION_BOUNDS", "PREDICTION_COST", "PREDICTION_DETAILS",
"PREDICTION_PROBABILITY", "PREDICTION_SET", "CUME_DIST",
"DENSE_RANK", "LISTAGG", "PERCENT_RANK", "PERCENTILE_CONT",
"PERCENTILE_DISC", "RANK", "AVG", "CORR", "LAG", "LEAD",
"MAX", "MEDIAN", "MIN", "NTILE", "RATIO_TO_REPORT", "ROW_NUMBER",
"SUM", "VARIANCE", "REGR_", "STDDEV", "VAR_", "COVAR_",
"NATIONAL_CHAR_STRING_LIT", "BIT_STRING_LIT", "HEX_STRING_LIT",
"DOUBLE_PERIOD", "PERIOD", "UNSIGNED_INTEGER", "APPROXIMATE_NUM_LIT",
"CHAR_STRING", "CHAR_STRING_PERL", "QUOTE", "QS_ANGLE",
"QS_BRACE", "QS_BRACK", "QS_PAREN", "QS_OTHER_CH", "DELIMITED_ID",
"PERCENT", "AMPERSAND", "LEFT_PAREN", "RIGHT_PAREN", "DOUBLE_ASTERISK",
"ASTERISK", "PLUS_SIGN", "MINUS_SIGN", "COMMA", "SOLIDUS",
"AT_SIGN", "ASSIGN_OP", "BINDVAR", "COLON", "SEMICOLON",
"LESS_THAN_OR_EQUALS_OP", "LESS_THAN_OP", "GREATER_THAN_OR_EQUALS_OP",
"NOT_EQUAL_OP", "CARRET_OPERATOR_PART", "TILDE_OPERATOR_PART",
"EXCLAMATION_OPERATOR_PART", "GREATER_THAN_OP", "QUESTION_MARK",
"CONCATENATION_OP", "VERTICAL_BAR", "EQUALS_OP", "LEFT_BRACKET",
"RIGHT_BRACKET", "INTRODUCER", "SPACES", "SIMPLE_LETTER",
"UNSIGNED_INTEGER_FRAGMENT", "FLOAT_FRAGMENT", "SINGLE_LINE_COMMENT",
"MULTI_LINE_COMMENT", "PROMPT", "NEWLINE", "SPACE", "REGULAR_ID",
"ZV", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J",
"K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U",
"V", "W", "X", "Y", "Z" ]
grammarFileName = "PlSql.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
normal
|
{
"blob_id": "b6dbed95b321ac93c712c4735d601a00650b8dc4",
"index": 1552,
"step-1": "<mask token>\n\n\nclass PlSqlLexer(Lexer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass PlSqlLexer(Lexer):\n atn = ATNDeserializer().deserialize(serializedATN())\n decisionsToDFA = [DFA(ds, i) for i, ds in enumerate(atn.decisionToState)]\n T__0 = 1\n A_LETTER = 2\n ADD = 3\n AFTER = 4\n AGENT = 5\n AGGREGATE = 6\n ALL = 7\n ALTER = 8\n ANALYZE = 9\n AND = 10\n ANY = 11\n ARRAY = 12\n AS = 13\n ASSUME = 14\n ASSERT = 15\n ASC = 16\n ASSOCIATE = 17\n AT = 18\n ATTRIBUTE = 19\n AUDIT = 20\n AUTHID = 21\n AUTO = 22\n AUTOMATIC = 23\n AUTONOMOUS_TRANSACTION = 24\n BATCH = 25\n BEFORE = 26\n BEGIN = 27\n BETWEEN = 28\n BFILE = 29\n BINARY_DOUBLE = 30\n BINARY_FLOAT = 31\n BINARY_INTEGER = 32\n BLOB = 33\n BLOCK = 34\n BODY = 35\n BOOLEAN = 36\n BOTH = 37\n BREADTH = 38\n BULK = 39\n BY = 40\n BYTE = 41\n C_LETTER = 42\n CACHE = 43\n CALL = 44\n CANONICAL = 45\n CASCADE = 46\n CASE = 47\n CAST = 48\n CHAR = 49\n CHAR_CS = 50\n CHARACTER = 51\n CHECK = 52\n CHR = 53\n CLOB = 54\n CLOSE = 55\n CLUSTER = 56\n COLLECT = 57\n COLUMNS = 58\n COMMENT = 59\n COMMIT = 60\n COMMITTED = 61\n COMPATIBILITY = 62\n COMPILE = 63\n COMPOUND = 64\n CONNECT = 65\n CONNECT_BY_ROOT = 66\n CONSTANT = 67\n CONSTRAINT = 68\n CONSTRAINTS = 69\n CONSTRUCTOR = 70\n CONTENT = 71\n CONTEXT = 72\n CONTINUE = 73\n CONVERT = 74\n CORRUPT_XID = 75\n CORRUPT_XID_ALL = 76\n COST = 77\n COUNT = 78\n CREATE = 79\n CROSS = 80\n CUBE = 81\n CURRENT = 82\n CURRENT_USER = 83\n CURSOR = 84\n CUSTOMDATUM = 85\n CYCLE = 86\n DATA = 87\n DATABASE = 88\n DATE = 89\n DAY = 90\n DB_ROLE_CHANGE = 91\n DBTIMEZONE = 92\n DDL = 93\n DEBUG = 94\n DEC = 95\n DECIMAL = 96\n DECLARE = 97\n DECOMPOSE = 98\n DECREMENT = 99\n DEFAULT = 100\n DEFAULTS = 101\n DEFERRED = 102\n DEFINER = 103\n DELETE = 104\n DEPTH = 105\n DESC = 106\n DETERMINISTIC = 107\n DIMENSION = 108\n DISABLE = 109\n DISASSOCIATE = 110\n DISTINCT = 111\n DOCUMENT = 112\n DOUBLE = 113\n DROP = 114\n DSINTERVAL_UNCONSTRAINED = 115\n EACH = 116\n ELEMENT = 117\n ELSE = 118\n ELSIF = 119\n EMPTY = 120\n ENABLE = 121\n ENCODING = 122\n END = 123\n ENTITYESCAPING = 124\n ERR = 125\n ERRORS = 126\n ESCAPE = 127\n EVALNAME = 128\n EXCEPT = 129\n EXCEPTION = 130\n EXCEPTION_INIT = 131\n EXCEPTIONS = 132\n EXCLUDE = 133\n EXCLUSIVE = 134\n EXECUTE = 135\n EXISTS = 136\n EXIT = 137\n EXPLAIN = 138\n EXTERNAL = 139\n EXTRACT = 140\n FAILURE = 141\n FALSE = 142\n FETCH = 143\n FINAL = 144\n FIRST = 145\n FIRST_VALUE = 146\n FLOAT = 147\n FOLLOWING = 148\n FOLLOWS = 149\n FOR = 150\n FORALL = 151\n FORCE = 152\n FROM = 153\n FULL = 154\n FUNCTION = 155\n GOTO = 156\n GRANT = 157\n GROUP = 158\n GROUPING = 159\n HASH = 160\n HAVING = 161\n HIDE = 162\n HOUR = 163\n IF = 164\n IGNORE = 165\n IMMEDIATE = 166\n IN = 167\n INCLUDE = 168\n INCLUDING = 169\n INCREMENT = 170\n INDENT = 171\n INDEX = 172\n INDEXED = 173\n INDICATOR = 174\n INDICES = 175\n INFINITE = 176\n INLINE = 177\n INNER = 178\n INOUT = 179\n INSERT = 180\n INSTANTIABLE = 181\n INSTEAD = 182\n INT = 183\n INTEGER = 184\n INTERSECT = 185\n INTERVAL = 186\n INTO = 187\n INVALIDATE = 188\n IS = 189\n ISOLATION = 190\n ITERATE = 191\n JAVA = 192\n JOIN = 193\n KEEP = 194\n LANGUAGE = 195\n LAST = 196\n LAST_VALUE = 197\n LEADING = 198\n LEFT = 199\n LEVEL = 200\n LIBRARY = 201\n LIKE = 202\n LIKE2 = 203\n LIKE4 = 204\n LIKEC = 205\n LIMIT = 206\n LOCAL = 207\n LOCK = 208\n LOCKED = 209\n LOG = 210\n LOGOFF = 211\n LOGON = 212\n LONG = 213\n LOOP = 214\n MAIN = 215\n MAP = 216\n MATCHED = 217\n MAXVALUE = 218\n MEASURES = 219\n MEMBER = 220\n MERGE = 221\n MINUS = 222\n MINUTE = 223\n MINVALUE = 224\n MLSLABEL = 225\n MODE = 226\n MODEL = 227\n MODIFY = 228\n MONTH = 229\n MULTISET = 230\n NAME = 231\n NAN = 232\n NATURAL = 233\n NATURALN = 234\n NAV = 235\n NCHAR = 236\n NCHAR_CS = 237\n NCLOB = 238\n NESTED = 239\n NEW = 240\n NO = 241\n NOAUDIT = 242\n NOCACHE = 243\n NOCOPY = 244\n NOCYCLE = 245\n NOENTITYESCAPING = 246\n NOMAXVALUE = 247\n NOMINVALUE = 248\n NONE = 249\n NOORDER = 250\n NOSCHEMACHECK = 251\n NOT = 252\n NOWAIT = 253\n NULL = 254\n NULLS = 255\n NUMBER = 256\n NUMERIC = 257\n NVARCHAR2 = 258\n OBJECT = 259\n OF = 260\n OFF = 261\n OID = 262\n OLD = 263\n ON = 264\n ONLY = 265\n OPEN = 266\n OPTION = 267\n OR = 268\n ORADATA = 269\n ORDER = 270\n ORDINALITY = 271\n OSERROR = 272\n OUT = 273\n OUTER = 274\n OVER = 275\n OVERRIDING = 276\n PACKAGE = 277\n PARALLEL_ENABLE = 278\n PARAMETERS = 279\n PARENT = 280\n PARTITION = 281\n PASSING = 282\n PATH = 283\n PERCENT_ROWTYPE = 284\n PERCENT_TYPE = 285\n PIPELINED = 286\n PIVOT = 287\n PLAN = 288\n PLS_INTEGER = 289\n POSITIVE = 290\n POSITIVEN = 291\n PRAGMA = 292\n PRECEDING = 293\n PRECISION = 294\n PRESENT = 295\n PRIOR = 296\n PROCEDURE = 297\n RAISE = 298\n RANGE = 299\n RAW = 300\n READ = 301\n REAL = 302\n RECORD = 303\n REF = 304\n REFERENCE = 305\n REFERENCING = 306\n REJECT = 307\n RELIES_ON = 308\n RENAME = 309\n REPLACE = 310\n RESPECT = 311\n RESTRICT_REFERENCES = 312\n RESULT = 313\n RESULT_CACHE = 314\n RETURN = 315\n RETURNING = 316\n REUSE = 317\n REVERSE = 318\n REVOKE = 319\n RIGHT = 320\n ROLLBACK = 321\n ROLLUP = 322\n ROW = 323\n ROWID = 324\n ROWS = 325\n RULES = 326\n SAMPLE = 327\n SAVE = 328\n SAVEPOINT = 329\n SCHEMA = 330\n SCHEMACHECK = 331\n SCN = 332\n SEARCH = 333\n SECOND = 334\n SEED = 335\n SEGMENT = 336\n SELECT = 337\n SELF = 338\n SEQUENCE = 339\n SEQUENTIAL = 340\n SERIALIZABLE = 341\n SERIALLY_REUSABLE = 342\n SERVERERROR = 343\n SESSIONTIMEZONE = 344\n SET = 345\n SETS = 346\n SETTINGS = 347\n SHARE = 348\n SHOW = 349\n SHUTDOWN = 350\n SIBLINGS = 351\n SIGNTYPE = 352\n SIMPLE_INTEGER = 353\n SINGLE = 354\n SIZE = 355\n SKIP_ = 356\n SMALLINT = 357\n SNAPSHOT = 358\n SOME = 359\n SPECIFICATION = 360\n SQLDATA = 361\n SQLERROR = 362\n STANDALONE = 363\n START = 364\n STARTUP = 365\n STATEMENT = 366\n STATEMENT_ID = 367\n STATIC = 368\n STATISTICS = 369\n STRING = 370\n SUBMULTISET = 371\n SUBPARTITION = 372\n SUBSTITUTABLE = 373\n SUBTYPE = 374\n SUCCESS = 375\n SUSPEND = 376\n TABLE = 377\n THE = 378\n THEN = 379\n TIME = 380\n TIMESTAMP = 381\n TIMESTAMP_LTZ_UNCONSTRAINED = 382\n TIMESTAMP_TZ_UNCONSTRAINED = 383\n TIMESTAMP_UNCONSTRAINED = 384\n TIMEZONE_ABBR = 385\n TIMEZONE_HOUR = 386\n TIMEZONE_MINUTE = 387\n TIMEZONE_REGION = 388\n TO = 389\n TRAILING = 390\n TRANSACTION = 391\n TRANSLATE = 392\n TREAT = 393\n TRIGGER = 394\n TRIM = 395\n TRUE = 396\n TRUNCATE = 397\n TYPE = 398\n UNBOUNDED = 399\n UNDER = 400\n UNION = 401\n UNIQUE = 402\n UNLIMITED = 403\n UNPIVOT = 404\n UNTIL = 405\n UPDATE = 406\n UPDATED = 407\n UPSERT = 408\n UROWID = 409\n USE = 410\n USING = 411\n VALIDATE = 412\n VALUE = 413\n VALUES = 414\n VARCHAR = 415\n VARCHAR2 = 416\n VARIABLE = 417\n VARRAY = 418\n VARYING = 419\n VERSION = 420\n VERSIONS = 421\n WAIT = 422\n WARNING = 423\n WELLFORMED = 424\n WHEN = 425\n WHENEVER = 426\n WHERE = 427\n WHILE = 428\n WITH = 429\n WITHIN = 430\n WORK = 431\n WRITE = 432\n XML = 433\n XMLAGG = 434\n XMLATTRIBUTES = 435\n XMLCAST = 436\n XMLCOLATTVAL = 437\n XMLELEMENT = 438\n XMLEXISTS = 439\n XMLFOREST = 440\n XMLNAMESPACES = 441\n XMLPARSE = 442\n XMLPI = 443\n XMLQUERY = 444\n XMLROOT = 445\n XMLSERIALIZE = 446\n XMLTABLE = 447\n YEAR = 448\n YES = 449\n YMINTERVAL_UNCONSTRAINED = 450\n ZONE = 451\n PREDICTION = 452\n PREDICTION_BOUNDS = 453\n PREDICTION_COST = 454\n PREDICTION_DETAILS = 455\n PREDICTION_PROBABILITY = 456\n PREDICTION_SET = 457\n CUME_DIST = 458\n DENSE_RANK = 459\n LISTAGG = 460\n PERCENT_RANK = 461\n PERCENTILE_CONT = 462\n PERCENTILE_DISC = 463\n RANK = 464\n AVG = 465\n CORR = 466\n LAG = 467\n LEAD = 468\n MAX = 469\n MEDIAN = 470\n MIN = 471\n NTILE = 472\n RATIO_TO_REPORT = 473\n ROW_NUMBER = 474\n SUM = 475\n VARIANCE = 476\n REGR_ = 477\n STDDEV = 478\n VAR_ = 479\n COVAR_ = 480\n NATIONAL_CHAR_STRING_LIT = 481\n BIT_STRING_LIT = 482\n HEX_STRING_LIT = 483\n DOUBLE_PERIOD = 484\n PERIOD = 485\n UNSIGNED_INTEGER = 486\n APPROXIMATE_NUM_LIT = 487\n CHAR_STRING = 488\n DELIMITED_ID = 489\n PERCENT = 490\n AMPERSAND = 491\n LEFT_PAREN = 492\n RIGHT_PAREN = 493\n DOUBLE_ASTERISK = 494\n ASTERISK = 495\n PLUS_SIGN = 496\n MINUS_SIGN = 497\n COMMA = 498\n SOLIDUS = 499\n AT_SIGN = 500\n ASSIGN_OP = 501\n BINDVAR = 502\n COLON = 503\n SEMICOLON = 504\n LESS_THAN_OR_EQUALS_OP = 505\n LESS_THAN_OP = 506\n GREATER_THAN_OR_EQUALS_OP = 507\n NOT_EQUAL_OP = 508\n CARRET_OPERATOR_PART = 509\n TILDE_OPERATOR_PART = 510\n EXCLAMATION_OPERATOR_PART = 511\n GREATER_THAN_OP = 512\n CONCATENATION_OP = 513\n VERTICAL_BAR = 514\n EQUALS_OP = 515\n LEFT_BRACKET = 516\n RIGHT_BRACKET = 517\n INTRODUCER = 518\n SPACES = 519\n SINGLE_LINE_COMMENT = 520\n MULTI_LINE_COMMENT = 521\n PROMPT = 522\n REGULAR_ID = 523\n ZV = 524\n channelNames = [u'DEFAULT_TOKEN_CHANNEL', u'HIDDEN']\n modeNames = ['DEFAULT_MODE']\n literalNames = ['<INVALID>', \"'..'\", \"'.'\", \"'%'\", \"'&'\", \"'('\", \"')'\",\n \"'**'\", \"'*'\", \"'+'\", \"'-'\", \"','\", \"'/'\", \"'@'\", \"':='\", \"':'\",\n \"';'\", \"'<='\", \"'<'\", \"'>='\", \"'^'\", \"'~'\", \"'!'\", \"'>'\", \"'||'\",\n \"'|'\", \"'='\", \"'['\", \"']'\", \"'_'\", \"'@!'\"]\n symbolicNames = ['<INVALID>', 'A_LETTER', 'ADD', 'AFTER', 'AGENT',\n 'AGGREGATE', 'ALL', 'ALTER', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS',\n 'ASSUME', 'ASSERT', 'ASC', 'ASSOCIATE', 'AT', 'ATTRIBUTE', 'AUDIT',\n 'AUTHID', 'AUTO', 'AUTOMATIC', 'AUTONOMOUS_TRANSACTION', 'BATCH',\n 'BEFORE', 'BEGIN', 'BETWEEN', 'BFILE', 'BINARY_DOUBLE',\n 'BINARY_FLOAT', 'BINARY_INTEGER', 'BLOB', 'BLOCK', 'BODY',\n 'BOOLEAN', 'BOTH', 'BREADTH', 'BULK', 'BY', 'BYTE', 'C_LETTER',\n 'CACHE', 'CALL', 'CANONICAL', 'CASCADE', 'CASE', 'CAST', 'CHAR',\n 'CHAR_CS', 'CHARACTER', 'CHECK', 'CHR', 'CLOB', 'CLOSE', 'CLUSTER',\n 'COLLECT', 'COLUMNS', 'COMMENT', 'COMMIT', 'COMMITTED',\n 'COMPATIBILITY', 'COMPILE', 'COMPOUND', 'CONNECT',\n 'CONNECT_BY_ROOT', 'CONSTANT', 'CONSTRAINT', 'CONSTRAINTS',\n 'CONSTRUCTOR', 'CONTENT', 'CONTEXT', 'CONTINUE', 'CONVERT',\n 'CORRUPT_XID', 'CORRUPT_XID_ALL', 'COST', 'COUNT', 'CREATE',\n 'CROSS', 'CUBE', 'CURRENT', 'CURRENT_USER', 'CURSOR', 'CUSTOMDATUM',\n 'CYCLE', 'DATA', 'DATABASE', 'DATE', 'DAY', 'DB_ROLE_CHANGE',\n 'DBTIMEZONE', 'DDL', 'DEBUG', 'DEC', 'DECIMAL', 'DECLARE',\n 'DECOMPOSE', 'DECREMENT', 'DEFAULT', 'DEFAULTS', 'DEFERRED',\n 'DEFINER', 'DELETE', 'DEPTH', 'DESC', 'DETERMINISTIC', 'DIMENSION',\n 'DISABLE', 'DISASSOCIATE', 'DISTINCT', 'DOCUMENT', 'DOUBLE', 'DROP',\n 'DSINTERVAL_UNCONSTRAINED', 'EACH', 'ELEMENT', 'ELSE', 'ELSIF',\n 'EMPTY', 'ENABLE', 'ENCODING', 'END', 'ENTITYESCAPING', 'ERR',\n 'ERRORS', 'ESCAPE', 'EVALNAME', 'EXCEPT', 'EXCEPTION',\n 'EXCEPTION_INIT', 'EXCEPTIONS', 'EXCLUDE', 'EXCLUSIVE', 'EXECUTE',\n 'EXISTS', 'EXIT', 'EXPLAIN', 'EXTERNAL', 'EXTRACT', 'FAILURE',\n 'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FIRST_VALUE', 'FLOAT',\n 'FOLLOWING', 'FOLLOWS', 'FOR', 'FORALL', 'FORCE', 'FROM', 'FULL',\n 'FUNCTION', 'GOTO', 'GRANT', 'GROUP', 'GROUPING', 'HASH', 'HAVING',\n 'HIDE', 'HOUR', 'IF', 'IGNORE', 'IMMEDIATE', 'IN', 'INCLUDE',\n 'INCLUDING', 'INCREMENT', 'INDENT', 'INDEX', 'INDEXED', 'INDICATOR',\n 'INDICES', 'INFINITE', 'INLINE', 'INNER', 'INOUT', 'INSERT',\n 'INSTANTIABLE', 'INSTEAD', 'INT', 'INTEGER', 'INTERSECT',\n 'INTERVAL', 'INTO', 'INVALIDATE', 'IS', 'ISOLATION', 'ITERATE',\n 'JAVA', 'JOIN', 'KEEP', 'LANGUAGE', 'LAST', 'LAST_VALUE', 'LEADING',\n 'LEFT', 'LEVEL', 'LIBRARY', 'LIKE', 'LIKE2', 'LIKE4', 'LIKEC',\n 'LIMIT', 'LOCAL', 'LOCK', 'LOCKED', 'LOG', 'LOGOFF', 'LOGON',\n 'LONG', 'LOOP', 'MAIN', 'MAP', 'MATCHED', 'MAXVALUE', 'MEASURES',\n 'MEMBER', 'MERGE', 'MINUS', 'MINUTE', 'MINVALUE', 'MLSLABEL',\n 'MODE', 'MODEL', 'MODIFY', 'MONTH', 'MULTISET', 'NAME', 'NAN',\n 'NATURAL', 'NATURALN', 'NAV', 'NCHAR', 'NCHAR_CS', 'NCLOB',\n 'NESTED', 'NEW', 'NO', 'NOAUDIT', 'NOCACHE', 'NOCOPY', 'NOCYCLE',\n 'NOENTITYESCAPING', 'NOMAXVALUE', 'NOMINVALUE', 'NONE', 'NOORDER',\n 'NOSCHEMACHECK', 'NOT', 'NOWAIT', 'NULL', 'NULLS', 'NUMBER',\n 'NUMERIC', 'NVARCHAR2', 'OBJECT', 'OF', 'OFF', 'OID', 'OLD', 'ON',\n 'ONLY', 'OPEN', 'OPTION', 'OR', 'ORADATA', 'ORDER', 'ORDINALITY',\n 'OSERROR', 'OUT', 'OUTER', 'OVER', 'OVERRIDING', 'PACKAGE',\n 'PARALLEL_ENABLE', 'PARAMETERS', 'PARENT', 'PARTITION', 'PASSING',\n 'PATH', 'PERCENT_ROWTYPE', 'PERCENT_TYPE', 'PIPELINED', 'PIVOT',\n 'PLAN', 'PLS_INTEGER', 'POSITIVE', 'POSITIVEN', 'PRAGMA',\n 'PRECEDING', 'PRECISION', 'PRESENT', 'PRIOR', 'PROCEDURE', 'RAISE',\n 'RANGE', 'RAW', 'READ', 'REAL', 'RECORD', 'REF', 'REFERENCE',\n 'REFERENCING', 'REJECT', 'RELIES_ON', 'RENAME', 'REPLACE',\n 'RESPECT', 'RESTRICT_REFERENCES', 'RESULT', 'RESULT_CACHE',\n 'RETURN', 'RETURNING', 'REUSE', 'REVERSE', 'REVOKE', 'RIGHT',\n 'ROLLBACK', 'ROLLUP', 'ROW', 'ROWID', 'ROWS', 'RULES', 'SAMPLE',\n 'SAVE', 'SAVEPOINT', 'SCHEMA', 'SCHEMACHECK', 'SCN', 'SEARCH',\n 'SECOND', 'SEED', 'SEGMENT', 'SELECT', 'SELF', 'SEQUENCE',\n 'SEQUENTIAL', 'SERIALIZABLE', 'SERIALLY_REUSABLE', 'SERVERERROR',\n 'SESSIONTIMEZONE', 'SET', 'SETS', 'SETTINGS', 'SHARE', 'SHOW',\n 'SHUTDOWN', 'SIBLINGS', 'SIGNTYPE', 'SIMPLE_INTEGER', 'SINGLE',\n 'SIZE', 'SKIP_', 'SMALLINT', 'SNAPSHOT', 'SOME', 'SPECIFICATION',\n 'SQLDATA', 'SQLERROR', 'STANDALONE', 'START', 'STARTUP',\n 'STATEMENT', 'STATEMENT_ID', 'STATIC', 'STATISTICS', 'STRING',\n 'SUBMULTISET', 'SUBPARTITION', 'SUBSTITUTABLE', 'SUBTYPE',\n 'SUCCESS', 'SUSPEND', 'TABLE', 'THE', 'THEN', 'TIME', 'TIMESTAMP',\n 'TIMESTAMP_LTZ_UNCONSTRAINED', 'TIMESTAMP_TZ_UNCONSTRAINED',\n 'TIMESTAMP_UNCONSTRAINED', 'TIMEZONE_ABBR', 'TIMEZONE_HOUR',\n 'TIMEZONE_MINUTE', 'TIMEZONE_REGION', 'TO', 'TRAILING',\n 'TRANSACTION', 'TRANSLATE', 'TREAT', 'TRIGGER', 'TRIM', 'TRUE',\n 'TRUNCATE', 'TYPE', 'UNBOUNDED', 'UNDER', 'UNION', 'UNIQUE',\n 'UNLIMITED', 'UNPIVOT', 'UNTIL', 'UPDATE', 'UPDATED', 'UPSERT',\n 'UROWID', 'USE', 'USING', 'VALIDATE', 'VALUE', 'VALUES', 'VARCHAR',\n 'VARCHAR2', 'VARIABLE', 'VARRAY', 'VARYING', 'VERSION', 'VERSIONS',\n 'WAIT', 'WARNING', 'WELLFORMED', 'WHEN', 'WHENEVER', 'WHERE',\n 'WHILE', 'WITH', 'WITHIN', 'WORK', 'WRITE', 'XML', 'XMLAGG',\n 'XMLATTRIBUTES', 'XMLCAST', 'XMLCOLATTVAL', 'XMLELEMENT',\n 'XMLEXISTS', 'XMLFOREST', 'XMLNAMESPACES', 'XMLPARSE', 'XMLPI',\n 'XMLQUERY', 'XMLROOT', 'XMLSERIALIZE', 'XMLTABLE', 'YEAR', 'YES',\n 'YMINTERVAL_UNCONSTRAINED', 'ZONE', 'PREDICTION',\n 'PREDICTION_BOUNDS', 'PREDICTION_COST', 'PREDICTION_DETAILS',\n 'PREDICTION_PROBABILITY', 'PREDICTION_SET', 'CUME_DIST',\n 'DENSE_RANK', 'LISTAGG', 'PERCENT_RANK', 'PERCENTILE_CONT',\n 'PERCENTILE_DISC', 'RANK', 'AVG', 'CORR', 'LAG', 'LEAD', 'MAX',\n 'MEDIAN', 'MIN', 'NTILE', 'RATIO_TO_REPORT', 'ROW_NUMBER', 'SUM',\n 'VARIANCE', 'REGR_', 'STDDEV', 'VAR_', 'COVAR_',\n 'NATIONAL_CHAR_STRING_LIT', 'BIT_STRING_LIT', 'HEX_STRING_LIT',\n 'DOUBLE_PERIOD', 'PERIOD', 'UNSIGNED_INTEGER',\n 'APPROXIMATE_NUM_LIT', 'CHAR_STRING', 'DELIMITED_ID', 'PERCENT',\n 'AMPERSAND', 'LEFT_PAREN', 'RIGHT_PAREN', 'DOUBLE_ASTERISK',\n 'ASTERISK', 'PLUS_SIGN', 'MINUS_SIGN', 'COMMA', 'SOLIDUS',\n 'AT_SIGN', 'ASSIGN_OP', 'BINDVAR', 'COLON', 'SEMICOLON',\n 'LESS_THAN_OR_EQUALS_OP', 'LESS_THAN_OP',\n 'GREATER_THAN_OR_EQUALS_OP', 'NOT_EQUAL_OP', 'CARRET_OPERATOR_PART',\n 'TILDE_OPERATOR_PART', 'EXCLAMATION_OPERATOR_PART',\n 'GREATER_THAN_OP', 'CONCATENATION_OP', 'VERTICAL_BAR', 'EQUALS_OP',\n 'LEFT_BRACKET', 'RIGHT_BRACKET', 'INTRODUCER', 'SPACES',\n 'SINGLE_LINE_COMMENT', 'MULTI_LINE_COMMENT', 'PROMPT', 'REGULAR_ID',\n 'ZV']\n ruleNames = ['T__0', 'A_LETTER', 'ADD', 'AFTER', 'AGENT', 'AGGREGATE',\n 'ALL', 'ALTER', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS', 'ASSUME',\n 'ASSERT', 'ASC', 'ASSOCIATE', 'AT', 'ATTRIBUTE', 'AUDIT', 'AUTHID',\n 'AUTO', 'AUTOMATIC', 'AUTONOMOUS_TRANSACTION', 'BATCH', 'BEFORE',\n 'BEGIN', 'BETWEEN', 'BFILE', 'BINARY_DOUBLE', 'BINARY_FLOAT',\n 'BINARY_INTEGER', 'BLOB', 'BLOCK', 'BODY', 'BOOLEAN', 'BOTH',\n 'BREADTH', 'BULK', 'BY', 'BYTE', 'C_LETTER', 'CACHE', 'CALL',\n 'CANONICAL', 'CASCADE', 'CASE', 'CAST', 'CHAR', 'CHAR_CS',\n 'CHARACTER', 'CHECK', 'CHR', 'CLOB', 'CLOSE', 'CLUSTER', 'COLLECT',\n 'COLUMNS', 'COMMENT', 'COMMIT', 'COMMITTED', 'COMPATIBILITY',\n 'COMPILE', 'COMPOUND', 'CONNECT', 'CONNECT_BY_ROOT', 'CONSTANT',\n 'CONSTRAINT', 'CONSTRAINTS', 'CONSTRUCTOR', 'CONTENT', 'CONTEXT',\n 'CONTINUE', 'CONVERT', 'CORRUPT_XID', 'CORRUPT_XID_ALL', 'COST',\n 'COUNT', 'CREATE', 'CROSS', 'CUBE', 'CURRENT', 'CURRENT_USER',\n 'CURSOR', 'CUSTOMDATUM', 'CYCLE', 'DATA', 'DATABASE', 'DATE', 'DAY',\n 'DB_ROLE_CHANGE', 'DBTIMEZONE', 'DDL', 'DEBUG', 'DEC', 'DECIMAL',\n 'DECLARE', 'DECOMPOSE', 'DECREMENT', 'DEFAULT', 'DEFAULTS',\n 'DEFERRED', 'DEFINER', 'DELETE', 'DEPTH', 'DESC', 'DETERMINISTIC',\n 'DIMENSION', 'DISABLE', 'DISASSOCIATE', 'DISTINCT', 'DOCUMENT',\n 'DOUBLE', 'DROP', 'DSINTERVAL_UNCONSTRAINED', 'EACH', 'ELEMENT',\n 'ELSE', 'ELSIF', 'EMPTY', 'ENABLE', 'ENCODING', 'END',\n 'ENTITYESCAPING', 'ERR', 'ERRORS', 'ESCAPE', 'EVALNAME', 'EXCEPT',\n 'EXCEPTION', 'EXCEPTION_INIT', 'EXCEPTIONS', 'EXCLUDE', 'EXCLUSIVE',\n 'EXECUTE', 'EXISTS', 'EXIT', 'EXPLAIN', 'EXTERNAL', 'EXTRACT',\n 'FAILURE', 'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FIRST_VALUE',\n 'FLOAT', 'FOLLOWING', 'FOLLOWS', 'FOR', 'FORALL', 'FORCE', 'FROM',\n 'FULL', 'FUNCTION', 'GOTO', 'GRANT', 'GROUP', 'GROUPING', 'HASH',\n 'HAVING', 'HIDE', 'HOUR', 'IF', 'IGNORE', 'IMMEDIATE', 'IN',\n 'INCLUDE', 'INCLUDING', 'INCREMENT', 'INDENT', 'INDEX', 'INDEXED',\n 'INDICATOR', 'INDICES', 'INFINITE', 'INLINE', 'INNER', 'INOUT',\n 'INSERT', 'INSTANTIABLE', 'INSTEAD', 'INT', 'INTEGER', 'INTERSECT',\n 'INTERVAL', 'INTO', 'INVALIDATE', 'IS', 'ISOLATION', 'ITERATE',\n 'JAVA', 'JOIN', 'KEEP', 'LANGUAGE', 'LAST', 'LAST_VALUE', 'LEADING',\n 'LEFT', 'LEVEL', 'LIBRARY', 'LIKE', 'LIKE2', 'LIKE4', 'LIKEC',\n 'LIMIT', 'LOCAL', 'LOCK', 'LOCKED', 'LOG', 'LOGOFF', 'LOGON',\n 'LONG', 'LOOP', 'MAIN', 'MAP', 'MATCHED', 'MAXVALUE', 'MEASURES',\n 'MEMBER', 'MERGE', 'MINUS', 'MINUTE', 'MINVALUE', 'MLSLABEL',\n 'MODE', 'MODEL', 'MODIFY', 'MONTH', 'MULTISET', 'NAME', 'NAN',\n 'NATURAL', 'NATURALN', 'NAV', 'NCHAR', 'NCHAR_CS', 'NCLOB',\n 'NESTED', 'NEW', 'NO', 'NOAUDIT', 'NOCACHE', 'NOCOPY', 'NOCYCLE',\n 'NOENTITYESCAPING', 'NOMAXVALUE', 'NOMINVALUE', 'NONE', 'NOORDER',\n 'NOSCHEMACHECK', 'NOT', 'NOWAIT', 'NULL', 'NULLS', 'NUMBER',\n 'NUMERIC', 'NVARCHAR2', 'OBJECT', 'OF', 'OFF', 'OID', 'OLD', 'ON',\n 'ONLY', 'OPEN', 'OPTION', 'OR', 'ORADATA', 'ORDER', 'ORDINALITY',\n 'OSERROR', 'OUT', 'OUTER', 'OVER', 'OVERRIDING', 'PACKAGE',\n 'PARALLEL_ENABLE', 'PARAMETERS', 'PARENT', 'PARTITION', 'PASSING',\n 'PATH', 'PERCENT_ROWTYPE', 'PERCENT_TYPE', 'PIPELINED', 'PIVOT',\n 'PLAN', 'PLS_INTEGER', 'POSITIVE', 'POSITIVEN', 'PRAGMA',\n 'PRECEDING', 'PRECISION', 'PRESENT', 'PRIOR', 'PROCEDURE', 'RAISE',\n 'RANGE', 'RAW', 'READ', 'REAL', 'RECORD', 'REF', 'REFERENCE',\n 'REFERENCING', 'REJECT', 'RELIES_ON', 'RENAME', 'REPLACE',\n 'RESPECT', 'RESTRICT_REFERENCES', 'RESULT', 'RESULT_CACHE',\n 'RETURN', 'RETURNING', 'REUSE', 'REVERSE', 'REVOKE', 'RIGHT',\n 'ROLLBACK', 'ROLLUP', 'ROW', 'ROWID', 'ROWS', 'RULES', 'SAMPLE',\n 'SAVE', 'SAVEPOINT', 'SCHEMA', 'SCHEMACHECK', 'SCN', 'SEARCH',\n 'SECOND', 'SEED', 'SEGMENT', 'SELECT', 'SELF', 'SEQUENCE',\n 'SEQUENTIAL', 'SERIALIZABLE', 'SERIALLY_REUSABLE', 'SERVERERROR',\n 'SESSIONTIMEZONE', 'SET', 'SETS', 'SETTINGS', 'SHARE', 'SHOW',\n 'SHUTDOWN', 'SIBLINGS', 'SIGNTYPE', 'SIMPLE_INTEGER', 'SINGLE',\n 'SIZE', 'SKIP_', 'SMALLINT', 'SNAPSHOT', 'SOME', 'SPECIFICATION',\n 'SQLDATA', 'SQLERROR', 'STANDALONE', 'START', 'STARTUP',\n 'STATEMENT', 'STATEMENT_ID', 'STATIC', 'STATISTICS', 'STRING',\n 'SUBMULTISET', 'SUBPARTITION', 'SUBSTITUTABLE', 'SUBTYPE',\n 'SUCCESS', 'SUSPEND', 'TABLE', 'THE', 'THEN', 'TIME', 'TIMESTAMP',\n 'TIMESTAMP_LTZ_UNCONSTRAINED', 'TIMESTAMP_TZ_UNCONSTRAINED',\n 'TIMESTAMP_UNCONSTRAINED', 'TIMEZONE_ABBR', 'TIMEZONE_HOUR',\n 'TIMEZONE_MINUTE', 'TIMEZONE_REGION', 'TO', 'TRAILING',\n 'TRANSACTION', 'TRANSLATE', 'TREAT', 'TRIGGER', 'TRIM', 'TRUE',\n 'TRUNCATE', 'TYPE', 'UNBOUNDED', 'UNDER', 'UNION', 'UNIQUE',\n 'UNLIMITED', 'UNPIVOT', 'UNTIL', 'UPDATE', 'UPDATED', 'UPSERT',\n 'UROWID', 'USE', 'USING', 'VALIDATE', 'VALUE', 'VALUES', 'VARCHAR',\n 'VARCHAR2', 'VARIABLE', 'VARRAY', 'VARYING', 'VERSION', 'VERSIONS',\n 'WAIT', 'WARNING', 'WELLFORMED', 'WHEN', 'WHENEVER', 'WHERE',\n 'WHILE', 'WITH', 'WITHIN', 'WORK', 'WRITE', 'XML', 'XMLAGG',\n 'XMLATTRIBUTES', 'XMLCAST', 'XMLCOLATTVAL', 'XMLELEMENT',\n 'XMLEXISTS', 'XMLFOREST', 'XMLNAMESPACES', 'XMLPARSE', 'XMLPI',\n 'XMLQUERY', 'XMLROOT', 'XMLSERIALIZE', 'XMLTABLE', 'YEAR', 'YES',\n 'YMINTERVAL_UNCONSTRAINED', 'ZONE', 'PREDICTION',\n 'PREDICTION_BOUNDS', 'PREDICTION_COST', 'PREDICTION_DETAILS',\n 'PREDICTION_PROBABILITY', 'PREDICTION_SET', 'CUME_DIST',\n 'DENSE_RANK', 'LISTAGG', 'PERCENT_RANK', 'PERCENTILE_CONT',\n 'PERCENTILE_DISC', 'RANK', 'AVG', 'CORR', 'LAG', 'LEAD', 'MAX',\n 'MEDIAN', 'MIN', 'NTILE', 'RATIO_TO_REPORT', 'ROW_NUMBER', 'SUM',\n 'VARIANCE', 'REGR_', 'STDDEV', 'VAR_', 'COVAR_',\n 'NATIONAL_CHAR_STRING_LIT', 'BIT_STRING_LIT', 'HEX_STRING_LIT',\n 'DOUBLE_PERIOD', 'PERIOD', 'UNSIGNED_INTEGER',\n 'APPROXIMATE_NUM_LIT', 'CHAR_STRING', 'CHAR_STRING_PERL', 'QUOTE',\n 'QS_ANGLE', 'QS_BRACE', 'QS_BRACK', 'QS_PAREN', 'QS_OTHER_CH',\n 'DELIMITED_ID', 'PERCENT', 'AMPERSAND', 'LEFT_PAREN', 'RIGHT_PAREN',\n 'DOUBLE_ASTERISK', 'ASTERISK', 'PLUS_SIGN', 'MINUS_SIGN', 'COMMA',\n 'SOLIDUS', 'AT_SIGN', 'ASSIGN_OP', 'BINDVAR', 'COLON', 'SEMICOLON',\n 'LESS_THAN_OR_EQUALS_OP', 'LESS_THAN_OP',\n 'GREATER_THAN_OR_EQUALS_OP', 'NOT_EQUAL_OP', 'CARRET_OPERATOR_PART',\n 'TILDE_OPERATOR_PART', 'EXCLAMATION_OPERATOR_PART',\n 'GREATER_THAN_OP', 'QUESTION_MARK', 'CONCATENATION_OP',\n 'VERTICAL_BAR', 'EQUALS_OP', 'LEFT_BRACKET', 'RIGHT_BRACKET',\n 'INTRODUCER', 'SPACES', 'SIMPLE_LETTER',\n 'UNSIGNED_INTEGER_FRAGMENT', 'FLOAT_FRAGMENT',\n 'SINGLE_LINE_COMMENT', 'MULTI_LINE_COMMENT', 'PROMPT', 'NEWLINE',\n 'SPACE', 'REGULAR_ID', 'ZV', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',\n 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',\n 'V', 'W', 'X', 'Y', 'Z']\n grammarFileName = 'PlSql.g4'\n\n def __init__(self, input=None, output: TextIO=sys.stdout):\n super().__init__(input, output)\n self.checkVersion('4.7.2')\n self._interp = LexerATNSimulator(self, self.atn, self.\n decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None\n",
"step-3": "<mask token>\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write('\\x03悋Ꜫ脳맭䅼㯧瞆奤\\x02Ȏ')\n buf.write(\n 'ᓗ\\x08\\x01\\x04\\x02\\t\\x02\\x04\\x03\\t\\x03\\x04\\x04\\t\\x04\\x04\\x05\\t\\x05\\x04\\x06\\t\\x06\\x04\\x07'\n )\n buf.write(\n '\\t\\x07\\x04\\x08\\t\\x08\\x04\\t\\t\\t\\x04\\n\\t\\n\\x04\\x0b\\t\\x0b\\x04\\x0c\\t\\x0c\\x04\\r\\t\\r'\n )\n buf.write(\n '\\x04\\x0e\\t\\x0e\\x04\\x0f\\t\\x0f\\x04\\x10\\t\\x10\\x04\\x11\\t\\x11\\x04\\x12\\t\\x12\\x04\\x13'\n )\n buf.write(\n '\\t\\x13\\x04\\x14\\t\\x14\\x04\\x15\\t\\x15\\x04\\x16\\t\\x16\\x04\\x17\\t\\x17\\x04\\x18\\t\\x18'\n )\n buf.write(\n '\\x04\\x19\\t\\x19\\x04\\x1a\\t\\x1a\\x04\\x1b\\t\\x1b\\x04\\x1c\\t\\x1c\\x04\\x1d\\t\\x1d\\x04\\x1e'\n )\n buf.write(\n '\\t\\x1e\\x04\\x1f\\t\\x1f\\x04 \\t \\x04!\\t!\\x04\"\\t\"\\x04#\\t#\\x04$\\t$\\x04%\\t%'\n )\n buf.write(\n \"\\x04&\\t&\\x04'\\t'\\x04(\\t(\\x04)\\t)\\x04*\\t*\\x04+\\t+\\x04,\\t,\\x04-\\t-\\x04.\"\n )\n buf.write('\\t.\\x04/\\t/\\x040\\t0\\x041\\t1\\x042\\t2\\x043\\t3\\x044')\n buf.write('\\t4\\x045\\t5\\x046\\t6\\x047\\t7\\x048\\t8\\x049\\t9\\x04:\\t:')\n buf.write(\n '\\x04;\\t;\\x04<\\t<\\x04=\\t=\\x04>\\t>\\x04?\\t?\\x04@\\t@\\x04A\\tA\\x04B\\tB\\x04C\\t'\n )\n buf.write(\n 'C\\x04D\\tD\\x04E\\tE\\x04F\\tF\\x04G\\tG\\x04H\\tH\\x04I\\tI\\x04J\\tJ\\x04K\\tK\\x04L\\t'\n )\n buf.write(\n 'L\\x04M\\tM\\x04N\\tN\\x04O\\tO\\x04P\\tP\\x04Q\\tQ\\x04R\\tR\\x04S\\tS\\x04T\\tT\\x04U\\t'\n )\n buf.write(\n 'U\\x04V\\tV\\x04W\\tW\\x04X\\tX\\x04Y\\tY\\x04Z\\tZ\\x04[\\t[\\x04\\\\\\t\\\\\\x04]\\t]\\x04'\n )\n buf.write(\n '^\\t^\\x04_\\t_\\x04`\\t`\\x04a\\ta\\x04b\\tb\\x04c\\tc\\x04d\\td\\x04e\\te\\x04f\\tf\\x04'\n )\n buf.write(\n 'g\\tg\\x04h\\th\\x04i\\ti\\x04j\\tj\\x04k\\tk\\x04l\\tl\\x04m\\tm\\x04n\\tn\\x04o\\to\\x04'\n )\n buf.write(\n 'p\\tp\\x04q\\tq\\x04r\\tr\\x04s\\ts\\x04t\\tt\\x04u\\tu\\x04v\\tv\\x04w\\tw\\x04x\\tx\\x04'\n )\n buf.write(\n 'y\\ty\\x04z\\tz\\x04{\\t{\\x04|\\t|\\x04}\\t}\\x04~\\t~\\x04\\x7f\\t\\x7f\\x04\\x80'\n )\n buf.write('\\t\\x80\\x04\\x81\\t\\x81\\x04\\x82\\t\\x82\\x04\\x83\\t\\x83')\n buf.write('\\x04\\x84\\t\\x84\\x04\\x85\\t\\x85\\x04\\x86\\t\\x86\\x04\\x87')\n buf.write('\\t\\x87\\x04\\x88\\t\\x88\\x04\\x89\\t\\x89\\x04\\x8a\\t\\x8a')\n buf.write('\\x04\\x8b\\t\\x8b\\x04\\x8c\\t\\x8c\\x04\\x8d\\t\\x8d\\x04\\x8e')\n buf.write('\\t\\x8e\\x04\\x8f\\t\\x8f\\x04\\x90\\t\\x90\\x04\\x91\\t\\x91')\n buf.write('\\x04\\x92\\t\\x92\\x04\\x93\\t\\x93\\x04\\x94\\t\\x94\\x04\\x95')\n buf.write('\\t\\x95\\x04\\x96\\t\\x96\\x04\\x97\\t\\x97\\x04\\x98\\t\\x98')\n buf.write('\\x04\\x99\\t\\x99\\x04\\x9a\\t\\x9a\\x04\\x9b\\t\\x9b\\x04\\x9c')\n buf.write('\\t\\x9c\\x04\\x9d\\t\\x9d\\x04\\x9e\\t\\x9e\\x04\\x9f\\t\\x9f')\n buf.write('\\x04\\xa0\\t\\xa0\\x04¡\\t¡\\x04¢\\t¢\\x04£')\n buf.write('\\t£\\x04¤\\t¤\\x04¥\\t¥\\x04¦\\t¦')\n buf.write('\\x04§\\t§\\x04¨\\t¨\\x04©\\t©\\x04ª')\n buf.write('\\tª\\x04«\\t«\\x04¬\\t¬\\x04\\xad\\t\\xad')\n buf.write('\\x04®\\t®\\x04¯\\t¯\\x04°\\t°\\x04±')\n buf.write('\\t±\\x04²\\t²\\x04³\\t³\\x04´\\t´')\n buf.write('\\x04µ\\tµ\\x04¶\\t¶\\x04·\\t·\\x04¸')\n buf.write('\\t¸\\x04¹\\t¹\\x04º\\tº\\x04»\\t»')\n buf.write('\\x04¼\\t¼\\x04½\\t½\\x04¾\\t¾\\x04¿')\n buf.write('\\t¿\\x04À\\tÀ\\x04Á\\tÁ\\x04Â\\tÂ')\n buf.write('\\x04Ã\\tÃ\\x04Ä\\tÄ\\x04Å\\tÅ\\x04Æ')\n buf.write('\\tÆ\\x04Ç\\tÇ\\x04È\\tÈ\\x04É\\tÉ')\n buf.write('\\x04Ê\\tÊ\\x04Ë\\tË\\x04Ì\\tÌ\\x04Í')\n buf.write('\\tÍ\\x04Î\\tÎ\\x04Ï\\tÏ\\x04Ð\\tÐ')\n buf.write('\\x04Ñ\\tÑ\\x04Ò\\tÒ\\x04Ó\\tÓ\\x04Ô')\n buf.write('\\tÔ\\x04Õ\\tÕ\\x04Ö\\tÖ\\x04×\\t×')\n buf.write('\\x04Ø\\tØ\\x04Ù\\tÙ\\x04Ú\\tÚ\\x04Û')\n buf.write('\\tÛ\\x04Ü\\tÜ\\x04Ý\\tÝ\\x04Þ\\tÞ')\n buf.write('\\x04ß\\tß\\x04à\\tà\\x04á\\tá\\x04â')\n buf.write('\\tâ\\x04ã\\tã\\x04ä\\tä\\x04å\\tå')\n buf.write('\\x04æ\\tæ\\x04ç\\tç\\x04è\\tè\\x04é')\n buf.write('\\té\\x04ê\\tê\\x04ë\\të\\x04ì\\tì')\n buf.write('\\x04í\\tí\\x04î\\tî\\x04ï\\tï\\x04ð')\n buf.write('\\tð\\x04ñ\\tñ\\x04ò\\tò\\x04ó\\tó')\n buf.write('\\x04ô\\tô\\x04õ\\tõ\\x04ö\\tö\\x04÷')\n buf.write('\\t÷\\x04ø\\tø\\x04ù\\tù\\x04ú\\tú')\n buf.write('\\x04û\\tû\\x04ü\\tü\\x04ý\\tý\\x04þ')\n buf.write('\\tþ\\x04ÿ\\tÿ\\x04Ā\\tĀ\\x04ā\\tā')\n buf.write('\\x04Ă\\tĂ\\x04ă\\tă\\x04Ą\\tĄ\\x04ą')\n buf.write('\\tą\\x04Ć\\tĆ\\x04ć\\tć\\x04Ĉ\\tĈ')\n buf.write('\\x04ĉ\\tĉ\\x04Ċ\\tĊ\\x04ċ\\tċ\\x04Č')\n buf.write('\\tČ\\x04č\\tč\\x04Ď\\tĎ\\x04ď\\tď')\n buf.write('\\x04Đ\\tĐ\\x04đ\\tđ\\x04Ē\\tĒ\\x04ē')\n buf.write('\\tē\\x04Ĕ\\tĔ\\x04ĕ\\tĕ\\x04Ė\\tĖ')\n buf.write('\\x04ė\\tė\\x04Ę\\tĘ\\x04ę\\tę\\x04Ě')\n buf.write('\\tĚ\\x04ě\\tě\\x04Ĝ\\tĜ\\x04ĝ\\tĝ')\n buf.write('\\x04Ğ\\tĞ\\x04ğ\\tğ\\x04Ġ\\tĠ\\x04ġ')\n buf.write('\\tġ\\x04Ģ\\tĢ\\x04ģ\\tģ\\x04Ĥ\\tĤ')\n buf.write('\\x04ĥ\\tĥ\\x04Ħ\\tĦ\\x04ħ\\tħ\\x04Ĩ')\n buf.write('\\tĨ\\x04ĩ\\tĩ\\x04Ī\\tĪ\\x04ī\\tī')\n buf.write('\\x04Ĭ\\tĬ\\x04ĭ\\tĭ\\x04Į\\tĮ\\x04į')\n buf.write('\\tį\\x04İ\\tİ\\x04ı\\tı\\x04IJ\\tIJ')\n buf.write('\\x04ij\\tij\\x04Ĵ\\tĴ\\x04ĵ\\tĵ\\x04Ķ')\n buf.write('\\tĶ\\x04ķ\\tķ\\x04ĸ\\tĸ\\x04Ĺ\\tĹ')\n buf.write('\\x04ĺ\\tĺ\\x04Ļ\\tĻ\\x04ļ\\tļ\\x04Ľ')\n buf.write('\\tĽ\\x04ľ\\tľ\\x04Ŀ\\tĿ\\x04ŀ\\tŀ')\n buf.write('\\x04Ł\\tŁ\\x04ł\\tł\\x04Ń\\tŃ\\x04ń')\n buf.write('\\tń\\x04Ņ\\tŅ\\x04ņ\\tņ\\x04Ň\\tŇ')\n buf.write('\\x04ň\\tň\\x04ʼn\\tʼn\\x04Ŋ\\tŊ\\x04ŋ')\n buf.write('\\tŋ\\x04Ō\\tŌ\\x04ō\\tō\\x04Ŏ\\tŎ')\n buf.write('\\x04ŏ\\tŏ\\x04Ő\\tŐ\\x04ő\\tő\\x04Œ')\n buf.write('\\tŒ\\x04œ\\tœ\\x04Ŕ\\tŔ\\x04ŕ\\tŕ')\n buf.write('\\x04Ŗ\\tŖ\\x04ŗ\\tŗ\\x04Ř\\tŘ\\x04ř')\n buf.write('\\tř\\x04Ś\\tŚ\\x04ś\\tś\\x04Ŝ\\tŜ')\n buf.write('\\x04ŝ\\tŝ\\x04Ş\\tŞ\\x04ş\\tş\\x04Š')\n buf.write('\\tŠ\\x04š\\tš\\x04Ţ\\tŢ\\x04ţ\\tţ')\n buf.write('\\x04Ť\\tŤ\\x04ť\\tť\\x04Ŧ\\tŦ\\x04ŧ')\n buf.write('\\tŧ\\x04Ũ\\tŨ\\x04ũ\\tũ\\x04Ū\\tŪ')\n buf.write('\\x04ū\\tū\\x04Ŭ\\tŬ\\x04ŭ\\tŭ\\x04Ů')\n buf.write('\\tŮ\\x04ů\\tů\\x04Ű\\tŰ\\x04ű\\tű')\n buf.write('\\x04Ų\\tŲ\\x04ų\\tų\\x04Ŵ\\tŴ\\x04ŵ')\n buf.write('\\tŵ\\x04Ŷ\\tŶ\\x04ŷ\\tŷ\\x04Ÿ\\tŸ')\n buf.write('\\x04Ź\\tŹ\\x04ź\\tź\\x04Ż\\tŻ\\x04ż')\n buf.write('\\tż\\x04Ž\\tŽ\\x04ž\\tž\\x04ſ\\tſ')\n buf.write('\\x04ƀ\\tƀ\\x04Ɓ\\tƁ\\x04Ƃ\\tƂ\\x04ƃ')\n buf.write('\\tƃ\\x04Ƅ\\tƄ\\x04ƅ\\tƅ\\x04Ɔ\\tƆ')\n buf.write('\\x04Ƈ\\tƇ\\x04ƈ\\tƈ\\x04Ɖ\\tƉ\\x04Ɗ')\n buf.write('\\tƊ\\x04Ƌ\\tƋ\\x04ƌ\\tƌ\\x04ƍ\\tƍ')\n buf.write('\\x04Ǝ\\tƎ\\x04Ə\\tƏ\\x04Ɛ\\tƐ\\x04Ƒ')\n buf.write('\\tƑ\\x04ƒ\\tƒ\\x04Ɠ\\tƓ\\x04Ɣ\\tƔ')\n buf.write('\\x04ƕ\\tƕ\\x04Ɩ\\tƖ\\x04Ɨ\\tƗ\\x04Ƙ')\n buf.write('\\tƘ\\x04ƙ\\tƙ\\x04ƚ\\tƚ\\x04ƛ\\tƛ')\n buf.write('\\x04Ɯ\\tƜ\\x04Ɲ\\tƝ\\x04ƞ\\tƞ\\x04Ɵ')\n buf.write('\\tƟ\\x04Ơ\\tƠ\\x04ơ\\tơ\\x04Ƣ\\tƢ')\n buf.write('\\x04ƣ\\tƣ\\x04Ƥ\\tƤ\\x04ƥ\\tƥ\\x04Ʀ')\n buf.write('\\tƦ\\x04Ƨ\\tƧ\\x04ƨ\\tƨ\\x04Ʃ\\tƩ')\n buf.write('\\x04ƪ\\tƪ\\x04ƫ\\tƫ\\x04Ƭ\\tƬ\\x04ƭ')\n buf.write('\\tƭ\\x04Ʈ\\tƮ\\x04Ư\\tƯ\\x04ư\\tư')\n buf.write('\\x04Ʊ\\tƱ\\x04Ʋ\\tƲ\\x04Ƴ\\tƳ\\x04ƴ')\n buf.write('\\tƴ\\x04Ƶ\\tƵ\\x04ƶ\\tƶ\\x04Ʒ\\tƷ')\n buf.write('\\x04Ƹ\\tƸ\\x04ƹ\\tƹ\\x04ƺ\\tƺ\\x04ƻ')\n buf.write('\\tƻ\\x04Ƽ\\tƼ\\x04ƽ\\tƽ\\x04ƾ\\tƾ')\n buf.write('\\x04ƿ\\tƿ\\x04ǀ\\tǀ\\x04ǁ\\tǁ\\x04ǂ')\n buf.write('\\tǂ\\x04ǃ\\tǃ\\x04DŽ\\tDŽ\\x04Dž\\tDž')\n buf.write('\\x04dž\\tdž\\x04LJ\\tLJ\\x04Lj\\tLj\\x04lj')\n buf.write('\\tlj\\x04NJ\\tNJ\\x04Nj\\tNj\\x04nj\\tnj')\n buf.write('\\x04Ǎ\\tǍ\\x04ǎ\\tǎ\\x04Ǐ\\tǏ\\x04ǐ')\n buf.write('\\tǐ\\x04Ǒ\\tǑ\\x04ǒ\\tǒ\\x04Ǔ\\tǓ')\n buf.write('\\x04ǔ\\tǔ\\x04Ǖ\\tǕ\\x04ǖ\\tǖ\\x04Ǘ')\n buf.write('\\tǗ\\x04ǘ\\tǘ\\x04Ǚ\\tǙ\\x04ǚ\\tǚ')\n buf.write('\\x04Ǜ\\tǛ\\x04ǜ\\tǜ\\x04ǝ\\tǝ\\x04Ǟ')\n buf.write('\\tǞ\\x04ǟ\\tǟ\\x04Ǡ\\tǠ\\x04ǡ\\tǡ')\n buf.write('\\x04Ǣ\\tǢ\\x04ǣ\\tǣ\\x04Ǥ\\tǤ\\x04ǥ')\n buf.write('\\tǥ\\x04Ǧ\\tǦ\\x04ǧ\\tǧ\\x04Ǩ\\tǨ')\n buf.write('\\x04ǩ\\tǩ\\x04Ǫ\\tǪ\\x04ǫ\\tǫ\\x04Ǭ')\n buf.write('\\tǬ\\x04ǭ\\tǭ\\x04Ǯ\\tǮ\\x04ǯ\\tǯ')\n buf.write('\\x04ǰ\\tǰ\\x04DZ\\tDZ\\x04Dz\\tDz\\x04dz')\n buf.write('\\tdz\\x04Ǵ\\tǴ\\x04ǵ\\tǵ\\x04Ƕ\\tǶ')\n buf.write('\\x04Ƿ\\tǷ\\x04Ǹ\\tǸ\\x04ǹ\\tǹ\\x04Ǻ')\n buf.write('\\tǺ\\x04ǻ\\tǻ\\x04Ǽ\\tǼ\\x04ǽ\\tǽ')\n buf.write('\\x04Ǿ\\tǾ\\x04ǿ\\tǿ\\x04Ȁ\\tȀ\\x04ȁ')\n buf.write('\\tȁ\\x04Ȃ\\tȂ\\x04ȃ\\tȃ\\x04Ȅ\\tȄ')\n buf.write('\\x04ȅ\\tȅ\\x04Ȇ\\tȆ\\x04ȇ\\tȇ\\x04Ȉ')\n buf.write('\\tȈ\\x04ȉ\\tȉ\\x04Ȋ\\tȊ\\x04ȋ\\tȋ')\n buf.write('\\x04Ȍ\\tȌ\\x04ȍ\\tȍ\\x04Ȏ\\tȎ\\x04ȏ')\n buf.write('\\tȏ\\x04Ȑ\\tȐ\\x04ȑ\\tȑ\\x04Ȓ\\tȒ')\n buf.write('\\x04ȓ\\tȓ\\x04Ȕ\\tȔ\\x04ȕ\\tȕ\\x04Ȗ')\n buf.write('\\tȖ\\x04ȗ\\tȗ\\x04Ș\\tȘ\\x04ș\\tș')\n buf.write('\\x04Ț\\tȚ\\x04ț\\tț\\x04Ȝ\\tȜ\\x04ȝ')\n buf.write('\\tȝ\\x04Ȟ\\tȞ\\x04ȟ\\tȟ\\x04Ƞ\\tȠ')\n buf.write('\\x04ȡ\\tȡ\\x04Ȣ\\tȢ\\x04ȣ\\tȣ\\x04Ȥ')\n buf.write('\\tȤ\\x04ȥ\\tȥ\\x04Ȧ\\tȦ\\x04ȧ\\tȧ')\n buf.write('\\x04Ȩ\\tȨ\\x04ȩ\\tȩ\\x04Ȫ\\tȪ\\x04ȫ')\n buf.write('\\tȫ\\x04Ȭ\\tȬ\\x04ȭ\\tȭ\\x04Ȯ\\tȮ')\n buf.write('\\x04ȯ\\tȯ\\x04Ȱ\\tȰ\\x04ȱ\\tȱ\\x04Ȳ')\n buf.write('\\tȲ\\x04ȳ\\tȳ\\x04ȴ\\tȴ\\x03\\x02\\x03\\x02\\x03\\x02\\x03')\n buf.write(\n '\\x03\\x03\\x03\\x03\\x04\\x03\\x04\\x03\\x04\\x03\\x04\\x03\\x05\\x03\\x05\\x03\\x05\\x03\\x05\\x03\\x05\\x03\\x05\\x03\\x06\\x03\\x06'\n )\n buf.write(\n '\\x03\\x06\\x03\\x06\\x03\\x06\\x03\\x06\\x03\\x07\\x03\\x07\\x03\\x07\\x03\\x07\\x03\\x07\\x03\\x07\\x03\\x07\\x03\\x07\\x03\\x07\\x03'\n )\n buf.write(\"\"\"\u0007\u0003\b\u0003\b\u0003\b\u0003\b\u0003\t\u0003\t\u0003\t\u0003\t\u0003\t\u0003\t\u0003\n\u0003\n\u0003\n\"\"\")\n buf.write(\"\"\"\u0003\n\u0003\n\u0003\n\u0003\n\u0003\n\u0003\u000b\u0003\u000b\u0003\u000b\u0003\u000b\u0003\f\u0003\f\u0003\f\u0003\"\"\")\n buf.write(\n '\\x0c\\x03\\r\\x03\\r\\x03\\r\\x03\\r\\x03\\r\\x03\\r\\x03\\x0e\\x03\\x0e\\x03\\x0e\\x03\\x0f\\x03\\x0f\\x03'\n )\n buf.write(\n '\\x0f\\x03\\x0f\\x03\\x0f\\x03\\x0f\\x03\\x0f\\x03\\x10\\x03\\x10\\x03\\x10\\x03\\x10\\x03\\x10\\x03\\x10'\n )\n buf.write(\n '\\x03\\x10\\x03\\x11\\x03\\x11\\x03\\x11\\x03\\x11\\x03\\x12\\x03\\x12\\x03\\x12\\x03\\x12\\x03\\x12\\x03\\x12'\n )\n buf.write(\n '\\x03\\x12\\x03\\x12\\x03\\x12\\x03\\x12\\x03\\x13\\x03\\x13\\x03\\x13\\x03\\x14\\x03\\x14\\x03\\x14\\x03\\x14'\n )\n buf.write(\n '\\x03\\x14\\x03\\x14\\x03\\x14\\x03\\x14\\x03\\x14\\x03\\x14\\x03\\x15\\x03\\x15\\x03\\x15\\x03\\x15\\x03\\x15'\n )\n buf.write(\n '\\x03\\x15\\x03\\x16\\x03\\x16\\x03\\x16\\x03\\x16\\x03\\x16\\x03\\x16\\x03\\x16\\x03\\x17\\x03\\x17\\x03\\x17'\n )\n buf.write(\n '\\x03\\x17\\x03\\x17\\x03\\x18\\x03\\x18\\x03\\x18\\x03\\x18\\x03\\x18\\x03\\x18\\x03\\x18\\x03\\x18\\x03\\x18'\n )\n buf.write(\n '\\x03\\x18\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19'\n )\n buf.write(\n '\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19'\n )\n buf.write(\n '\\x03\\x19\\x03\\x19\\x03\\x1a\\x03\\x1a\\x03\\x1a\\x03\\x1a\\x03\\x1a\\x03\\x1a\\x03\\x1b\\x03\\x1b\\x03\\x1b'\n )\n buf.write(\n '\\x03\\x1b\\x03\\x1b\\x03\\x1b\\x03\\x1b\\x03\\x1c\\x03\\x1c\\x03\\x1c\\x03\\x1c\\x03\\x1c\\x03\\x1c\\x03\\x1d'\n )\n buf.write(\n '\\x03\\x1d\\x03\\x1d\\x03\\x1d\\x03\\x1d\\x03\\x1d\\x03\\x1d\\x03\\x1d\\x03\\x1e\\x03\\x1e\\x03\\x1e\\x03\\x1e'\n )\n buf.write(\n '\\x03\\x1e\\x03\\x1e\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03\\x1f'\n )\n buf.write(\n '\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03 \\x03 \\x03 \\x03 \\x03 \\x03 \\x03 \\x03 \\x03 \\x03'\n )\n buf.write(\n ' \\x03 \\x03 \\x03 \\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03'\n )\n buf.write(\n '!\\x03\"\\x03\"\\x03\"\\x03\"\\x03\"\\x03#\\x03#\\x03#\\x03#\\x03#\\x03#\\x03$\\x03$\\x03$\\x03$\\x03'\n )\n buf.write(\n \"$\\x03%\\x03%\\x03%\\x03%\\x03%\\x03%\\x03%\\x03%\\x03&\\x03&\\x03&\\x03&\\x03&\\x03'\\x03'\\x03'\\x03\"\n )\n buf.write(\n \"'\\x03'\\x03'\\x03'\\x03'\\x03(\\x03(\\x03(\\x03(\\x03(\\x03)\\x03)\\x03)\\x03*\\x03*\\x03*\\x03\"\n )\n buf.write(\n '*\\x03*\\x03+\\x03+\\x03,\\x03,\\x03,\\x03,\\x03,\\x03,\\x03-\\x03-\\x03-\\x03-\\x03-\\x03.\\x03.\\x03.\\x03'\n )\n buf.write(\n '.\\x03.\\x03.\\x03.\\x03.\\x03.\\x03.\\x03/\\x03/\\x03/\\x03/\\x03/\\x03/\\x03/\\x03/\\x030\\x030'\n )\n buf.write('\\x030\\x030\\x030\\x031\\x031\\x031\\x031\\x031\\x032\\x032\\x032')\n buf.write('\\x032\\x032\\x033\\x033\\x033\\x033\\x033\\x033\\x033\\x033\\x034')\n buf.write('\\x034\\x034\\x034\\x034\\x034\\x034\\x034\\x034\\x034\\x035\\x035')\n buf.write('\\x035\\x035\\x035\\x035\\x036\\x036\\x036\\x036\\x037\\x037\\x037')\n buf.write(\n '\\x037\\x037\\x038\\x038\\x038\\x038\\x038\\x038\\x039\\x039\\x039\\x039\\x039\\x039\\x039\\x039\\x03'\n )\n buf.write(\n ':\\x03:\\x03:\\x03:\\x03:\\x03:\\x03:\\x03:\\x03;\\x03;\\x03;\\x03;\\x03;\\x03;\\x03;\\x03;\\x03<\\x03<\\x03'\n )\n buf.write(\n '<\\x03<\\x03<\\x03<\\x03<\\x03<\\x03=\\x03=\\x03=\\x03=\\x03=\\x03=\\x03=\\x03>\\x03>\\x03>\\x03>\\x03>\\x03'\n )\n buf.write(\n '>\\x03>\\x03>\\x03>\\x03>\\x03?\\x03?\\x03?\\x03?\\x03?\\x03?\\x03?\\x03?\\x03?\\x03?\\x03?\\x03?\\x03?\\x03'\n )\n buf.write(\n '?\\x03@\\x03@\\x03@\\x03@\\x03@\\x03@\\x03@\\x03@\\x03A\\x03A\\x03A\\x03A\\x03A\\x03A\\x03A\\x03A\\x03A\\x03'\n )\n buf.write(\n 'B\\x03B\\x03B\\x03B\\x03B\\x03B\\x03B\\x03B\\x03C\\x03C\\x03C\\x03C\\x03C\\x03C\\x03C\\x03C\\x03C\\x03C\\x03'\n )\n buf.write(\n 'C\\x03C\\x03C\\x03C\\x03C\\x03C\\x03D\\x03D\\x03D\\x03D\\x03D\\x03D\\x03D\\x03D\\x03D\\x03E\\x03E\\x03E\\x03'\n )\n buf.write(\n 'E\\x03E\\x03E\\x03E\\x03E\\x03E\\x03E\\x03E\\x03F\\x03F\\x03F\\x03F\\x03F\\x03F\\x03F\\x03F\\x03F\\x03F\\x03'\n )\n buf.write(\n 'F\\x03F\\x03G\\x03G\\x03G\\x03G\\x03G\\x03G\\x03G\\x03G\\x03G\\x03G\\x03G\\x03G\\x03H\\x03H\\x03H\\x03H\\x03'\n )\n buf.write(\n 'H\\x03H\\x03H\\x03H\\x03I\\x03I\\x03I\\x03I\\x03I\\x03I\\x03I\\x03I\\x03J\\x03J\\x03J\\x03J\\x03J\\x03J\\x03'\n )\n buf.write(\n 'J\\x03J\\x03J\\x03K\\x03K\\x03K\\x03K\\x03K\\x03K\\x03K\\x03K\\x03L\\x03L\\x03L\\x03L\\x03L\\x03L\\x03L\\x03'\n )\n buf.write(\n 'L\\x03L\\x03L\\x03L\\x03L\\x03M\\x03M\\x03M\\x03M\\x03M\\x03M\\x03M\\x03M\\x03M\\x03M\\x03M\\x03M\\x03M\\x03'\n )\n buf.write(\n 'M\\x03M\\x03M\\x03N\\x03N\\x03N\\x03N\\x03N\\x03O\\x03O\\x03O\\x03O\\x03O\\x03O\\x03P\\x03P\\x03P\\x03P\\x03'\n )\n buf.write(\n 'P\\x03P\\x03P\\x03Q\\x03Q\\x03Q\\x03Q\\x03Q\\x03Q\\x03R\\x03R\\x03R\\x03R\\x03R\\x03S\\x03S\\x03S\\x03S\\x03'\n )\n buf.write(\n 'S\\x03S\\x03S\\x03S\\x03T\\x03T\\x03T\\x03T\\x03T\\x03T\\x03T\\x03T\\x03T\\x03T\\x03T\\x03T\\x03T\\x03U\\x03'\n )\n buf.write(\n 'U\\x03U\\x03U\\x03U\\x03U\\x03U\\x03V\\x03V\\x03V\\x03V\\x03V\\x03V\\x03V\\x03V\\x03V\\x03V\\x03V\\x03V\\x03'\n )\n buf.write(\n 'W\\x03W\\x03W\\x03W\\x03W\\x03W\\x03X\\x03X\\x03X\\x03X\\x03X\\x03Y\\x03Y\\x03Y\\x03Y\\x03Y\\x03Y\\x03Y\\x03'\n )\n buf.write(\n 'Y\\x03Y\\x03Z\\x03Z\\x03Z\\x03Z\\x03Z\\x03[\\x03[\\x03[\\x03[\\x03\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03'\n )\n buf.write(\n '\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03]\\x03]\\x03]\\x03]\\x03]'\n )\n buf.write(\n '\\x03]\\x03]\\x03]\\x03]\\x03]\\x03]\\x03^\\x03^\\x03^\\x03^\\x03_\\x03_\\x03_\\x03_\\x03_\\x03_\\x03`\\x03'\n )\n buf.write(\n '`\\x03`\\x03`\\x03a\\x03a\\x03a\\x03a\\x03a\\x03a\\x03a\\x03a\\x03b\\x03b\\x03b\\x03b\\x03b\\x03b\\x03b\\x03'\n )\n buf.write(\n 'b\\x03c\\x03c\\x03c\\x03c\\x03c\\x03c\\x03c\\x03c\\x03c\\x03c\\x03d\\x03d\\x03d\\x03d\\x03d\\x03d\\x03d\\x03'\n )\n buf.write(\n 'd\\x03d\\x03d\\x03e\\x03e\\x03e\\x03e\\x03e\\x03e\\x03e\\x03e\\x03f\\x03f\\x03f\\x03f\\x03f\\x03f\\x03f\\x03'\n )\n buf.write(\n 'f\\x03f\\x03g\\x03g\\x03g\\x03g\\x03g\\x03g\\x03g\\x03g\\x03g\\x03h\\x03h\\x03h\\x03h\\x03h\\x03h\\x03h\\x03'\n )\n buf.write(\n 'h\\x03i\\x03i\\x03i\\x03i\\x03i\\x03i\\x03i\\x03j\\x03j\\x03j\\x03j\\x03j\\x03j\\x03k\\x03k\\x03k\\x03k\\x03'\n )\n buf.write(\n 'k\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03m\\x03m\\x03m\\x03'\n )\n buf.write(\n 'm\\x03m\\x03m\\x03m\\x03m\\x03m\\x03m\\x03n\\x03n\\x03n\\x03n\\x03n\\x03n\\x03n\\x03n\\x03o\\x03o\\x03o\\x03'\n )\n buf.write(\n 'o\\x03o\\x03o\\x03o\\x03o\\x03o\\x03o\\x03o\\x03o\\x03o\\x03p\\x03p\\x03p\\x03p\\x03p\\x03p\\x03p\\x03p\\x03'\n )\n buf.write(\n 'p\\x03q\\x03q\\x03q\\x03q\\x03q\\x03q\\x03q\\x03q\\x03q\\x03r\\x03r\\x03r\\x03r\\x03r\\x03r\\x03r\\x03s\\x03'\n )\n buf.write(\n 's\\x03s\\x03s\\x03s\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03'\n )\n buf.write(\n 't\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03u\\x03u\\x03u\\x03u\\x03u\\x03v\\x03v\\x03'\n )\n buf.write(\n 'v\\x03v\\x03v\\x03v\\x03v\\x03v\\x03w\\x03w\\x03w\\x03w\\x03w\\x03x\\x03x\\x03x\\x03x\\x03x\\x03x\\x03y\\x03'\n )\n buf.write(\n 'y\\x03y\\x03y\\x03y\\x03y\\x03z\\x03z\\x03z\\x03z\\x03z\\x03z\\x03z\\x03{\\x03{\\x03{\\x03{\\x03{\\x03{\\x03'\n )\n buf.write(\n '{\\x03{\\x03{\\x03|\\x03|\\x03|\\x03|\\x03}\\x03}\\x03}\\x03}\\x03}\\x03}\\x03}\\x03}\\x03}\\x03}\\x03}\\x03'\n )\n buf.write(\n '}\\x03}\\x03}\\x03}\\x03~\\x03~\\x03~\\x03~\\x03\\x7f\\x03\\x7f\\x03\\x7f\\x03\\x7f\\x03\\x7f\\x03'\n )\n buf.write(\n '\\x7f\\x03\\x7f\\x03\\x80\\x03\\x80\\x03\\x80\\x03\\x80\\x03\\x80\\x03\\x80')\n buf.write('\\x03\\x80\\x03\\x81\\x03\\x81\\x03\\x81\\x03\\x81\\x03\\x81\\x03\\x81')\n buf.write('\\x03\\x81\\x03\\x81\\x03\\x81\\x03\\x82\\x03\\x82\\x03\\x82\\x03\\x82')\n buf.write('\\x03\\x82\\x03\\x82\\x03\\x82\\x03\\x83\\x03\\x83\\x03\\x83\\x03\\x83')\n buf.write('\\x03\\x83\\x03\\x83\\x03\\x83\\x03\\x83\\x03\\x83\\x03\\x83\\x03\\x84')\n buf.write('\\x03\\x84\\x03\\x84\\x03\\x84\\x03\\x84\\x03\\x84\\x03\\x84\\x03\\x84')\n buf.write('\\x03\\x84\\x03\\x84\\x03\\x84\\x03\\x84\\x03\\x84\\x03\\x84\\x03\\x84')\n buf.write('\\x03\\x85\\x03\\x85\\x03\\x85\\x03\\x85\\x03\\x85\\x03\\x85\\x03\\x85')\n buf.write('\\x03\\x85\\x03\\x85\\x03\\x85\\x03\\x85\\x03\\x86\\x03\\x86\\x03\\x86')\n buf.write('\\x03\\x86\\x03\\x86\\x03\\x86\\x03\\x86\\x03\\x86\\x03\\x87\\x03\\x87')\n buf.write('\\x03\\x87\\x03\\x87\\x03\\x87\\x03\\x87\\x03\\x87\\x03\\x87\\x03\\x87')\n buf.write('\\x03\\x87\\x03\\x88\\x03\\x88\\x03\\x88\\x03\\x88\\x03\\x88\\x03\\x88')\n buf.write('\\x03\\x88\\x03\\x88\\x03\\x89\\x03\\x89\\x03\\x89\\x03\\x89\\x03\\x89')\n buf.write('\\x03\\x89\\x03\\x89\\x03\\x8a\\x03\\x8a\\x03\\x8a\\x03\\x8a\\x03\\x8a')\n buf.write('\\x03\\x8b\\x03\\x8b\\x03\\x8b\\x03\\x8b\\x03\\x8b\\x03\\x8b\\x03\\x8b')\n buf.write('\\x03\\x8b\\x03\\x8c\\x03\\x8c\\x03\\x8c\\x03\\x8c\\x03\\x8c\\x03\\x8c')\n buf.write('\\x03\\x8c\\x03\\x8c\\x03\\x8c\\x03\\x8d\\x03\\x8d\\x03\\x8d\\x03\\x8d')\n buf.write('\\x03\\x8d\\x03\\x8d\\x03\\x8d\\x03\\x8d\\x03\\x8e\\x03\\x8e\\x03\\x8e')\n buf.write('\\x03\\x8e\\x03\\x8e\\x03\\x8e\\x03\\x8e\\x03\\x8e\\x03\\x8f\\x03\\x8f')\n buf.write('\\x03\\x8f\\x03\\x8f\\x03\\x8f\\x03\\x8f\\x03\\x90\\x03\\x90\\x03\\x90')\n buf.write('\\x03\\x90\\x03\\x90\\x03\\x90\\x03\\x91\\x03\\x91\\x03\\x91\\x03\\x91')\n buf.write('\\x03\\x91\\x03\\x91\\x03\\x92\\x03\\x92\\x03\\x92\\x03\\x92\\x03\\x92')\n buf.write('\\x03\\x92\\x03\\x93\\x03\\x93\\x03\\x93\\x03\\x93\\x03\\x93\\x03\\x93')\n buf.write('\\x03\\x93\\x03\\x93\\x03\\x93\\x03\\x93\\x03\\x93\\x03\\x93\\x03\\x94')\n buf.write('\\x03\\x94\\x03\\x94\\x03\\x94\\x03\\x94\\x03\\x94\\x03\\x95\\x03\\x95')\n buf.write('\\x03\\x95\\x03\\x95\\x03\\x95\\x03\\x95\\x03\\x95\\x03\\x95\\x03\\x95')\n buf.write('\\x03\\x95\\x03\\x96\\x03\\x96\\x03\\x96\\x03\\x96\\x03\\x96\\x03\\x96')\n buf.write('\\x03\\x96\\x03\\x96\\x03\\x97\\x03\\x97\\x03\\x97\\x03\\x97\\x03\\x98')\n buf.write('\\x03\\x98\\x03\\x98\\x03\\x98\\x03\\x98\\x03\\x98\\x03\\x98\\x03\\x99')\n buf.write('\\x03\\x99\\x03\\x99\\x03\\x99\\x03\\x99\\x03\\x99\\x03\\x9a\\x03\\x9a')\n buf.write('\\x03\\x9a\\x03\\x9a\\x03\\x9a\\x03\\x9b\\x03\\x9b\\x03\\x9b\\x03\\x9b')\n buf.write('\\x03\\x9b\\x03\\x9c\\x03\\x9c\\x03\\x9c\\x03\\x9c\\x03\\x9c\\x03\\x9c')\n buf.write('\\x03\\x9c\\x03\\x9c\\x03\\x9c\\x03\\x9d\\x03\\x9d\\x03\\x9d\\x03\\x9d')\n buf.write('\\x03\\x9d\\x03\\x9e\\x03\\x9e\\x03\\x9e\\x03\\x9e\\x03\\x9e\\x03\\x9e')\n buf.write('\\x03\\x9f\\x03\\x9f\\x03\\x9f\\x03\\x9f\\x03\\x9f\\x03\\x9f\\x03\\xa0')\n buf.write('\\x03\\xa0\\x03\\xa0\\x03\\xa0\\x03\\xa0\\x03\\xa0\\x03\\xa0\\x03\\xa0')\n buf.write('\\x03\\xa0\\x03¡\\x03¡\\x03¡\\x03¡\\x03¡\\x03¢')\n buf.write('\\x03¢\\x03¢\\x03¢\\x03¢\\x03¢\\x03¢\\x03£')\n buf.write('\\x03£\\x03£\\x03£\\x03£\\x03¤\\x03¤\\x03¤')\n buf.write('\\x03¤\\x03¤\\x03¥\\x03¥\\x03¥\\x03¦\\x03¦')\n buf.write('\\x03¦\\x03¦\\x03¦\\x03¦\\x03¦\\x03§\\x03§')\n buf.write('\\x03§\\x03§\\x03§\\x03§\\x03§\\x03§\\x03§')\n buf.write('\\x03§\\x03¨\\x03¨\\x03¨\\x03©\\x03©\\x03©')\n buf.write('\\x03©\\x03©\\x03©\\x03©\\x03©\\x03ª\\x03ª')\n buf.write('\\x03ª\\x03ª\\x03ª\\x03ª\\x03ª\\x03ª\\x03ª')\n buf.write('\\x03ª\\x03«\\x03«\\x03«\\x03«\\x03«\\x03«')\n buf.write('\\x03«\\x03«\\x03«\\x03«\\x03¬\\x03¬\\x03¬')\n buf.write('\\x03¬\\x03¬\\x03¬\\x03¬\\x03\\xad\\x03\\xad\\x03\\xad')\n buf.write('\\x03\\xad\\x03\\xad\\x03\\xad\\x03®\\x03®\\x03®\\x03®')\n buf.write('\\x03®\\x03®\\x03®\\x03®\\x03¯\\x03¯\\x03¯')\n buf.write('\\x03¯\\x03¯\\x03¯\\x03¯\\x03¯\\x03¯\\x03¯')\n buf.write('\\x03°\\x03°\\x03°\\x03°\\x03°\\x03°\\x03°')\n buf.write('\\x03°\\x03±\\x03±\\x03±\\x03±\\x03±\\x03±')\n buf.write('\\x03±\\x03±\\x03±\\x03²\\x03²\\x03²\\x03²')\n buf.write('\\x03²\\x03²\\x03²\\x03³\\x03³\\x03³\\x03³')\n buf.write('\\x03³\\x03³\\x03´\\x03´\\x03´\\x03´\\x03´')\n buf.write('\\x03´\\x03µ\\x03µ\\x03µ\\x03µ\\x03µ\\x03µ')\n buf.write('\\x03µ\\x03¶\\x03¶\\x03¶\\x03¶\\x03¶\\x03¶')\n buf.write('\\x03¶\\x03¶\\x03¶\\x03¶\\x03¶\\x03¶\\x03¶')\n buf.write('\\x03·\\x03·\\x03·\\x03·\\x03·\\x03·\\x03·')\n buf.write('\\x03·\\x03¸\\x03¸\\x03¸\\x03¸\\x03¹\\x03¹')\n buf.write('\\x03¹\\x03¹\\x03¹\\x03¹\\x03¹\\x03¹\\x03º')\n buf.write('\\x03º\\x03º\\x03º\\x03º\\x03º\\x03º\\x03º')\n buf.write('\\x03º\\x03º\\x03»\\x03»\\x03»\\x03»\\x03»')\n buf.write('\\x03»\\x03»\\x03»\\x03»\\x03¼\\x03¼\\x03¼')\n buf.write('\\x03¼\\x03¼\\x03½\\x03½\\x03½\\x03½\\x03½')\n buf.write('\\x03½\\x03½\\x03½\\x03½\\x03½\\x03½\\x03¾')\n buf.write('\\x03¾\\x03¾\\x03¿\\x03¿\\x03¿\\x03¿\\x03¿')\n buf.write('\\x03¿\\x03¿\\x03¿\\x03¿\\x03¿\\x03À\\x03À')\n buf.write('\\x03À\\x03À\\x03À\\x03À\\x03À\\x03À\\x03Á')\n buf.write('\\x03Á\\x03Á\\x03Á\\x03Á\\x03Â\\x03Â\\x03Â')\n buf.write('\\x03Â\\x03Â\\x03Ã\\x03Ã\\x03Ã\\x03Ã\\x03Ã')\n buf.write('\\x03Ä\\x03Ä\\x03Ä\\x03Ä\\x03Ä\\x03Ä\\x03Ä')\n buf.write('\\x03Ä\\x03Ä\\x03Å\\x03Å\\x03Å\\x03Å\\x03Å')\n buf.write('\\x03Æ\\x03Æ\\x03Æ\\x03Æ\\x03Æ\\x03Æ\\x03Æ')\n buf.write('\\x03Æ\\x03Æ\\x03Æ\\x03Æ\\x03Ç\\x03Ç\\x03Ç')\n buf.write('\\x03Ç\\x03Ç\\x03Ç\\x03Ç\\x03Ç\\x03È\\x03È')\n buf.write('\\x03È\\x03È\\x03È\\x03É\\x03É\\x03É\\x03É')\n buf.write('\\x03É\\x03É\\x03Ê\\x03Ê\\x03Ê\\x03Ê\\x03Ê')\n buf.write('\\x03Ê\\x03Ê\\x03Ê\\x03Ë\\x03Ë\\x03Ë\\x03Ë')\n buf.write('\\x03Ë\\x03Ì\\x03Ì\\x03Ì\\x03Ì\\x03Ì\\x03Ì')\n buf.write('\\x03Í\\x03Í\\x03Í\\x03Í\\x03Í\\x03Í\\x03Î')\n buf.write('\\x03Î\\x03Î\\x03Î\\x03Î\\x03Î\\x03Ï\\x03Ï')\n buf.write('\\x03Ï\\x03Ï\\x03Ï\\x03Ï\\x03Ð\\x03Ð\\x03Ð')\n buf.write('\\x03Ð\\x03Ð\\x03Ð\\x03Ñ\\x03Ñ\\x03Ñ\\x03Ñ')\n buf.write('\\x03Ñ\\x03Ò\\x03Ò\\x03Ò\\x03Ò\\x03Ò\\x03Ò')\n buf.write('\\x03Ò\\x03Ó\\x03Ó\\x03Ó\\x03Ó\\x03Ô\\x03Ô')\n buf.write('\\x03Ô\\x03Ô\\x03Ô\\x03Ô\\x03Ô\\x03Õ\\x03Õ')\n buf.write('\\x03Õ\\x03Õ\\x03Õ\\x03Õ\\x03Ö\\x03Ö\\x03Ö')\n buf.write('\\x03Ö\\x03Ö\\x03×\\x03×\\x03×\\x03×\\x03×')\n buf.write('\\x03Ø\\x03Ø\\x03Ø\\x03Ø\\x03Ø\\x03Ù\\x03Ù')\n buf.write('\\x03Ù\\x03Ù\\x03Ú\\x03Ú\\x03Ú\\x03Ú\\x03Ú')\n buf.write('\\x03Ú\\x03Ú\\x03Ú\\x03Û\\x03Û\\x03Û\\x03Û')\n buf.write('\\x03Û\\x03Û\\x03Û\\x03Û\\x03Û\\x03Ü\\x03Ü')\n buf.write('\\x03Ü\\x03Ü\\x03Ü\\x03Ü\\x03Ü\\x03Ü\\x03Ü')\n buf.write('\\x03Ý\\x03Ý\\x03Ý\\x03Ý\\x03Ý\\x03Ý\\x03Ý')\n buf.write('\\x03Þ\\x03Þ\\x03Þ\\x03Þ\\x03Þ\\x03Þ\\x03ß')\n buf.write('\\x03ß\\x03ß\\x03ß\\x03ß\\x03ß\\x03à\\x03à')\n buf.write('\\x03à\\x03à\\x03à\\x03à\\x03à\\x03á\\x03á')\n buf.write('\\x03á\\x03á\\x03á\\x03á\\x03á\\x03á\\x03á')\n buf.write('\\x03â\\x03â\\x03â\\x03â\\x03â\\x03â\\x03â')\n buf.write('\\x03â\\x03â\\x03ã\\x03ã\\x03ã\\x03ã\\x03ã')\n buf.write('\\x03ä\\x03ä\\x03ä\\x03ä\\x03ä\\x03ä\\x03å')\n buf.write('\\x03å\\x03å\\x03å\\x03å\\x03å\\x03å\\x03æ')\n buf.write('\\x03æ\\x03æ\\x03æ\\x03æ\\x03æ\\x03ç\\x03ç')\n buf.write('\\x03ç\\x03ç\\x03ç\\x03ç\\x03ç\\x03ç\\x03ç')\n buf.write('\\x03è\\x03è\\x03è\\x03è\\x03è\\x03é\\x03é')\n buf.write('\\x03é\\x03é\\x03ê\\x03ê\\x03ê\\x03ê\\x03ê')\n buf.write('\\x03ê\\x03ê\\x03ê\\x03ë\\x03ë\\x03ë\\x03ë')\n buf.write('\\x03ë\\x03ë\\x03ë\\x03ë\\x03ë\\x03ì\\x03ì')\n buf.write('\\x03ì\\x03ì\\x03í\\x03í\\x03í\\x03í\\x03í')\n buf.write('\\x03í\\x03î\\x03î\\x03î\\x03î\\x03î\\x03î')\n buf.write('\\x03î\\x03î\\x03î\\x03ï\\x03ï\\x03ï\\x03ï')\n buf.write('\\x03ï\\x03ï\\x03ð\\x03ð\\x03ð\\x03ð\\x03ð')\n buf.write('\\x03ð\\x03ð\\x03ñ\\x03ñ\\x03ñ\\x03ñ\\x03ò')\n buf.write('\\x03ò\\x03ò\\x03ó\\x03ó\\x03ó\\x03ó\\x03ó')\n buf.write('\\x03ó\\x03ó\\x03ó\\x03ô\\x03ô\\x03ô\\x03ô')\n buf.write('\\x03ô\\x03ô\\x03ô\\x03ô\\x03õ\\x03õ\\x03õ')\n buf.write('\\x03õ\\x03õ\\x03õ\\x03õ\\x03ö\\x03ö\\x03ö')\n buf.write('\\x03ö\\x03ö\\x03ö\\x03ö\\x03ö\\x03÷\\x03÷')\n buf.write('\\x03÷\\x03÷\\x03÷\\x03÷\\x03÷\\x03÷\\x03÷')\n buf.write('\\x03÷\\x03÷\\x03÷\\x03÷\\x03÷\\x03÷\\x03÷')\n buf.write('\\x03÷\\x03ø\\x03ø\\x03ø\\x03ø\\x03ø\\x03ø')\n buf.write('\\x03ø\\x03ø\\x03ø\\x03ø\\x03ø\\x03ù\\x03ù')\n buf.write('\\x03ù\\x03ù\\x03ù\\x03ù\\x03ù\\x03ù\\x03ù')\n buf.write('\\x03ù\\x03ù\\x03ú\\x03ú\\x03ú\\x03ú\\x03ú')\n buf.write('\\x03û\\x03û\\x03û\\x03û\\x03û\\x03û\\x03û')\n buf.write('\\x03û\\x03ü\\x03ü\\x03ü\\x03ü\\x03ü\\x03ü')\n buf.write('\\x03ü\\x03ü\\x03ü\\x03ü\\x03ü\\x03ü\\x03ü')\n buf.write('\\x03ü\\x03ý\\x03ý\\x03ý\\x03ý\\x03þ\\x03þ')\n buf.write('\\x03þ\\x03þ\\x03þ\\x03þ\\x03þ\\x03ÿ\\x03ÿ')\n buf.write('\\x03ÿ\\x03ÿ\\x03ÿ\\x03Ā\\x03Ā\\x03Ā\\x03Ā')\n buf.write('\\x03Ā\\x03Ā\\x03ā\\x03ā\\x03ā\\x03ā\\x03ā')\n buf.write('\\x03ā\\x03ā\\x03Ă\\x03Ă\\x03Ă\\x03Ă\\x03Ă')\n buf.write('\\x03Ă\\x03Ă\\x03Ă\\x03ă\\x03ă\\x03ă\\x03ă')\n buf.write('\\x03ă\\x03ă\\x03ă\\x03ă\\x03ă\\x03ă\\x03Ą')\n buf.write('\\x03Ą\\x03Ą\\x03Ą\\x03Ą\\x03Ą\\x03Ą\\x03ą')\n buf.write('\\x03ą\\x03ą\\x03Ć\\x03Ć\\x03Ć\\x03Ć\\x03ć')\n buf.write('\\x03ć\\x03ć\\x03ć\\x03Ĉ\\x03Ĉ\\x03Ĉ\\x03Ĉ')\n buf.write('\\x03ĉ\\x03ĉ\\x03ĉ\\x03Ċ\\x03Ċ\\x03Ċ\\x03Ċ')\n buf.write('\\x03Ċ\\x03ċ\\x03ċ\\x03ċ\\x03ċ\\x03ċ\\x03Č')\n buf.write('\\x03Č\\x03Č\\x03Č\\x03Č\\x03Č\\x03Č\\x03č')\n buf.write('\\x03č\\x03č\\x03Ď\\x03Ď\\x03Ď\\x03Ď\\x03Ď')\n buf.write('\\x03Ď\\x03Ď\\x03Ď\\x03ď\\x03ď\\x03ď\\x03ď')\n buf.write('\\x03ď\\x03ď\\x03Đ\\x03Đ\\x03Đ\\x03Đ\\x03Đ')\n buf.write('\\x03Đ\\x03Đ\\x03Đ\\x03Đ\\x03Đ\\x03Đ\\x03đ')\n buf.write('\\x03đ\\x03đ\\x03đ\\x03đ\\x03đ\\x03đ\\x03đ')\n buf.write('\\x03Ē\\x03Ē\\x03Ē\\x03Ē\\x03ē\\x03ē\\x03ē')\n buf.write('\\x03ē\\x03ē\\x03ē\\x03Ĕ\\x03Ĕ\\x03Ĕ\\x03Ĕ')\n buf.write('\\x03Ĕ\\x03ĕ\\x03ĕ\\x03ĕ\\x03ĕ\\x03ĕ\\x03ĕ')\n buf.write('\\x03ĕ\\x03ĕ\\x03ĕ\\x03ĕ\\x03ĕ\\x03Ė\\x03Ė')\n buf.write('\\x03Ė\\x03Ė\\x03Ė\\x03Ė\\x03Ė\\x03Ė\\x03ė')\n buf.write('\\x03ė\\x03ė\\x03ė\\x03ė\\x03ė\\x03ė\\x03ė')\n buf.write('\\x03ė\\x03ė\\x03ė\\x03ė\\x03ė\\x03ė\\x03ė')\n buf.write('\\x03ė\\x03Ę\\x03Ę\\x03Ę\\x03Ę\\x03Ę\\x03Ę')\n buf.write('\\x03Ę\\x03Ę\\x03Ę\\x03Ę\\x03Ę\\x03ę\\x03ę')\n buf.write('\\x03ę\\x03ę\\x03ę\\x03ę\\x03ę\\x03Ě\\x03Ě')\n buf.write('\\x03Ě\\x03Ě\\x03Ě\\x03Ě\\x03Ě\\x03Ě\\x03Ě')\n buf.write('\\x03Ě\\x03ě\\x03ě\\x03ě\\x03ě\\x03ě\\x03ě')\n buf.write('\\x03ě\\x03ě\\x03Ĝ\\x03Ĝ\\x03Ĝ\\x03Ĝ\\x03Ĝ')\n buf.write('\\x03ĝ\\x03ĝ\\x03ĝ\\x03ĝ\\x03ĝ\\x03ĝ\\x03ĝ')\n buf.write('\\x03ĝ\\x03ĝ\\x03Ğ\\x03Ğ\\x03Ğ\\x03Ğ\\x03Ğ')\n buf.write('\\x03Ğ\\x03ğ\\x03ğ\\x03ğ\\x03ğ\\x03ğ\\x03ğ')\n buf.write('\\x03ğ\\x03ğ\\x03ğ\\x03ğ\\x03Ġ\\x03Ġ\\x03Ġ')\n buf.write('\\x03Ġ\\x03Ġ\\x03Ġ\\x03ġ\\x03ġ\\x03ġ\\x03ġ')\n buf.write('\\x03ġ\\x03Ģ\\x03Ģ\\x03Ģ\\x03Ģ\\x03Ģ\\x03Ģ')\n buf.write('\\x03Ģ\\x03Ģ\\x03Ģ\\x03Ģ\\x03Ģ\\x03Ģ\\x03ģ')\n buf.write('\\x03ģ\\x03ģ\\x03ģ\\x03ģ\\x03ģ\\x03ģ\\x03ģ')\n buf.write('\\x03ģ\\x03Ĥ\\x03Ĥ\\x03Ĥ\\x03Ĥ\\x03Ĥ\\x03Ĥ')\n buf.write('\\x03Ĥ\\x03Ĥ\\x03Ĥ\\x03Ĥ\\x03ĥ\\x03ĥ\\x03ĥ')\n buf.write('\\x03ĥ\\x03ĥ\\x03ĥ\\x03ĥ\\x03Ħ\\x03Ħ\\x03Ħ')\n buf.write('\\x03Ħ\\x03Ħ\\x03Ħ\\x03Ħ\\x03Ħ\\x03Ħ\\x03Ħ')\n buf.write('\\x03ħ\\x03ħ\\x03ħ\\x03ħ\\x03ħ\\x03ħ\\x03ħ')\n buf.write('\\x03ħ\\x03ħ\\x03ħ\\x03Ĩ\\x03Ĩ\\x03Ĩ\\x03Ĩ')\n buf.write('\\x03Ĩ\\x03Ĩ\\x03Ĩ\\x03Ĩ\\x03ĩ\\x03ĩ\\x03ĩ')\n buf.write('\\x03ĩ\\x03ĩ\\x03ĩ\\x03Ī\\x03Ī\\x03Ī\\x03Ī')\n buf.write('\\x03Ī\\x03Ī\\x03Ī\\x03Ī\\x03Ī\\x03Ī\\x03ī')\n buf.write('\\x03ī\\x03ī\\x03ī\\x03ī\\x03ī\\x03Ĭ\\x03Ĭ')\n buf.write('\\x03Ĭ\\x03Ĭ\\x03Ĭ\\x03Ĭ\\x03ĭ\\x03ĭ\\x03ĭ')\n buf.write('\\x03ĭ\\x03Į\\x03Į\\x03Į\\x03Į\\x03Į\\x03į')\n buf.write('\\x03į\\x03į\\x03į\\x03į\\x03İ\\x03İ\\x03İ')\n buf.write('\\x03İ\\x03İ\\x03İ\\x03İ\\x03ı\\x03ı\\x03ı')\n buf.write('\\x03ı\\x03IJ\\x03IJ\\x03IJ\\x03IJ\\x03IJ\\x03IJ')\n buf.write('\\x03IJ\\x03IJ\\x03IJ\\x03IJ\\x03ij\\x03ij\\x03ij')\n buf.write('\\x03ij\\x03ij\\x03ij\\x03ij\\x03ij\\x03ij\\x03ij')\n buf.write('\\x03ij\\x03ij\\x03Ĵ\\x03Ĵ\\x03Ĵ\\x03Ĵ\\x03Ĵ')\n buf.write('\\x03Ĵ\\x03Ĵ\\x03ĵ\\x03ĵ\\x03ĵ\\x03ĵ\\x03ĵ')\n buf.write('\\x03ĵ\\x03ĵ\\x03ĵ\\x03ĵ\\x03ĵ\\x03Ķ\\x03Ķ')\n buf.write('\\x03Ķ\\x03Ķ\\x03Ķ\\x03Ķ\\x03Ķ\\x03ķ\\x03ķ')\n buf.write('\\x03ķ\\x03ķ\\x03ķ\\x03ķ\\x03ķ\\x03ķ\\x03ĸ')\n buf.write('\\x03ĸ\\x03ĸ\\x03ĸ\\x03ĸ\\x03ĸ\\x03ĸ\\x03ĸ')\n buf.write('\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ')\n buf.write('\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ')\n buf.write('\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03ĺ')\n buf.write('\\x03ĺ\\x03ĺ\\x03ĺ\\x03ĺ\\x03ĺ\\x03ĺ\\x03Ļ')\n buf.write('\\x03Ļ\\x03Ļ\\x03Ļ\\x03Ļ\\x03Ļ\\x03Ļ\\x03Ļ')\n buf.write('\\x03Ļ\\x03Ļ\\x03Ļ\\x03Ļ\\x03Ļ\\x03ļ\\x03ļ')\n buf.write('\\x03ļ\\x03ļ\\x03ļ\\x03ļ\\x03ļ\\x03Ľ\\x03Ľ')\n buf.write('\\x03Ľ\\x03Ľ\\x03Ľ\\x03Ľ\\x03Ľ\\x03Ľ\\x03Ľ')\n buf.write('\\x03Ľ\\x03ľ\\x03ľ\\x03ľ\\x03ľ\\x03ľ\\x03ľ')\n buf.write('\\x03Ŀ\\x03Ŀ\\x03Ŀ\\x03Ŀ\\x03Ŀ\\x03Ŀ\\x03Ŀ')\n buf.write('\\x03Ŀ\\x03ŀ\\x03ŀ\\x03ŀ\\x03ŀ\\x03ŀ\\x03ŀ')\n buf.write('\\x03ŀ\\x03Ł\\x03Ł\\x03Ł\\x03Ł\\x03Ł\\x03Ł')\n buf.write('\\x03ł\\x03ł\\x03ł\\x03ł\\x03ł\\x03ł\\x03ł')\n buf.write('\\x03ł\\x03ł\\x03Ń\\x03Ń\\x03Ń\\x03Ń\\x03Ń')\n buf.write('\\x03Ń\\x03Ń\\x03ń\\x03ń\\x03ń\\x03ń\\x03Ņ')\n buf.write('\\x03Ņ\\x03Ņ\\x03Ņ\\x03Ņ\\x03Ņ\\x03ņ\\x03ņ')\n buf.write('\\x03ņ\\x03ņ\\x03ņ\\x03Ň\\x03Ň\\x03Ň\\x03Ň')\n buf.write('\\x03Ň\\x03Ň\\x03ň\\x03ň\\x03ň\\x03ň\\x03ň')\n buf.write('\\x03ň\\x03ň\\x03ʼn\\x03ʼn\\x03ʼn\\x03ʼn\\x03ʼn')\n buf.write('\\x03Ŋ\\x03Ŋ\\x03Ŋ\\x03Ŋ\\x03Ŋ\\x03Ŋ\\x03Ŋ')\n buf.write('\\x03Ŋ\\x03Ŋ\\x03Ŋ\\x03ŋ\\x03ŋ\\x03ŋ\\x03ŋ')\n buf.write('\\x03ŋ\\x03ŋ\\x03ŋ\\x03Ō\\x03Ō\\x03Ō\\x03Ō')\n buf.write('\\x03Ō\\x03Ō\\x03Ō\\x03Ō\\x03Ō\\x03Ō\\x03Ō')\n buf.write('\\x03Ō\\x03ō\\x03ō\\x03ō\\x03ō\\x03Ŏ\\x03Ŏ')\n buf.write('\\x03Ŏ\\x03Ŏ\\x03Ŏ\\x03Ŏ\\x03Ŏ\\x03ŏ\\x03ŏ')\n buf.write('\\x03ŏ\\x03ŏ\\x03ŏ\\x03ŏ\\x03ŏ\\x03Ő\\x03Ő')\n buf.write('\\x03Ő\\x03Ő\\x03Ő\\x03ő\\x03ő\\x03ő\\x03ő')\n buf.write('\\x03ő\\x03ő\\x03ő\\x03ő\\x03Œ\\x03Œ\\x03Œ')\n buf.write('\\x03Œ\\x03Œ\\x03Œ\\x03Œ\\x03œ\\x03œ\\x03œ')\n buf.write('\\x03œ\\x03œ\\x03Ŕ\\x03Ŕ\\x03Ŕ\\x03Ŕ\\x03Ŕ')\n buf.write('\\x03Ŕ\\x03Ŕ\\x03Ŕ\\x03Ŕ\\x03ŕ\\x03ŕ\\x03ŕ')\n buf.write('\\x03ŕ\\x03ŕ\\x03ŕ\\x03ŕ\\x03ŕ\\x03ŕ\\x03ŕ')\n buf.write('\\x03ŕ\\x03Ŗ\\x03Ŗ\\x03Ŗ\\x03Ŗ\\x03Ŗ\\x03Ŗ')\n buf.write('\\x03Ŗ\\x03Ŗ\\x03Ŗ\\x03Ŗ\\x03Ŗ\\x03Ŗ\\x03Ŗ')\n buf.write('\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ')\n buf.write('\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ')\n buf.write('\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ\\x03Ř\\x03Ř\\x03Ř')\n buf.write('\\x03Ř\\x03Ř\\x03Ř\\x03Ř\\x03Ř\\x03Ř\\x03Ř')\n buf.write('\\x03Ř\\x03Ř\\x03ř\\x03ř\\x03ř\\x03ř\\x03ř')\n buf.write('\\x03ř\\x03ř\\x03ř\\x03ř\\x03ř\\x03ř\\x03ř')\n buf.write('\\x03ř\\x03ř\\x03ř\\x03ř\\x03Ś\\x03Ś\\x03Ś')\n buf.write('\\x03Ś\\x03ś\\x03ś\\x03ś\\x03ś\\x03ś\\x03Ŝ')\n buf.write('\\x03Ŝ\\x03Ŝ\\x03Ŝ\\x03Ŝ\\x03Ŝ\\x03Ŝ\\x03Ŝ')\n buf.write('\\x03Ŝ\\x03ŝ\\x03ŝ\\x03ŝ\\x03ŝ\\x03ŝ\\x03ŝ')\n buf.write('\\x03Ş\\x03Ş\\x03Ş\\x03Ş\\x03Ş\\x03ş\\x03ş')\n buf.write('\\x03ş\\x03ş\\x03ş\\x03ş\\x03ş\\x03ş\\x03ş')\n buf.write('\\x03Š\\x03Š\\x03Š\\x03Š\\x03Š\\x03Š\\x03Š')\n buf.write('\\x03Š\\x03Š\\x03š\\x03š\\x03š\\x03š\\x03š')\n buf.write('\\x03š\\x03š\\x03š\\x03š\\x03Ţ\\x03Ţ\\x03Ţ')\n buf.write('\\x03Ţ\\x03Ţ\\x03Ţ\\x03Ţ\\x03Ţ\\x03Ţ\\x03Ţ')\n buf.write('\\x03Ţ\\x03Ţ\\x03Ţ\\x03Ţ\\x03Ţ\\x03ţ\\x03ţ')\n buf.write('\\x03ţ\\x03ţ\\x03ţ\\x03ţ\\x03ţ\\x03Ť\\x03Ť')\n buf.write('\\x03Ť\\x03Ť\\x03Ť\\x03ť\\x03ť\\x03ť\\x03ť')\n buf.write('\\x03ť\\x03Ŧ\\x03Ŧ\\x03Ŧ\\x03Ŧ\\x03Ŧ\\x03Ŧ')\n buf.write('\\x03Ŧ\\x03Ŧ\\x03Ŧ\\x03ŧ\\x03ŧ\\x03ŧ\\x03ŧ')\n buf.write('\\x03ŧ\\x03ŧ\\x03ŧ\\x03ŧ\\x03ŧ\\x03Ũ\\x03Ũ')\n buf.write('\\x03Ũ\\x03Ũ\\x03Ũ\\x03ũ\\x03ũ\\x03ũ\\x03ũ')\n buf.write('\\x03ũ\\x03ũ\\x03ũ\\x03ũ\\x03ũ\\x03ũ\\x03ũ')\n buf.write('\\x03ũ\\x03ũ\\x03ũ\\x03Ū\\x03Ū\\x03Ū\\x03Ū')\n buf.write('\\x03Ū\\x03Ū\\x03Ū\\x03Ū\\x03ū\\x03ū\\x03ū')\n buf.write('\\x03ū\\x03ū\\x03ū\\x03ū\\x03ū\\x03ū\\x03Ŭ')\n buf.write('\\x03Ŭ\\x03Ŭ\\x03Ŭ\\x03Ŭ\\x03Ŭ\\x03Ŭ\\x03Ŭ')\n buf.write('\\x03Ŭ\\x03Ŭ\\x03Ŭ\\x03ŭ\\x03ŭ\\x03ŭ\\x03ŭ')\n buf.write('\\x03ŭ\\x03ŭ\\x03Ů\\x03Ů\\x03Ů\\x03Ů\\x03Ů')\n buf.write('\\x03Ů\\x03Ů\\x03Ů\\x03ů\\x03ů\\x03ů\\x03ů')\n buf.write('\\x03ů\\x03ů\\x03ů\\x03ů\\x03ů\\x03ů\\x03Ű')\n buf.write('\\x03Ű\\x03Ű\\x03Ű\\x03Ű\\x03Ű\\x03Ű\\x03Ű')\n buf.write('\\x03Ű\\x03Ű\\x03Ű\\x03Ű\\x03Ű\\x03ű\\x03ű')\n buf.write('\\x03ű\\x03ű\\x03ű\\x03ű\\x03ű\\x03Ų\\x03Ų')\n buf.write('\\x03Ų\\x03Ų\\x03Ų\\x03Ų\\x03Ų\\x03Ų\\x03Ų')\n buf.write('\\x03Ų\\x03Ų\\x03ų\\x03ų\\x03ų\\x03ų\\x03ų')\n buf.write('\\x03ų\\x03ų\\x03Ŵ\\x03Ŵ\\x03Ŵ\\x03Ŵ\\x03Ŵ')\n buf.write('\\x03Ŵ\\x03Ŵ\\x03Ŵ\\x03Ŵ\\x03Ŵ\\x03Ŵ\\x03Ŵ')\n buf.write('\\x03ŵ\\x03ŵ\\x03ŵ\\x03ŵ\\x03ŵ\\x03ŵ\\x03ŵ')\n buf.write('\\x03ŵ\\x03ŵ\\x03ŵ\\x03ŵ\\x03ŵ\\x03ŵ\\x03Ŷ')\n buf.write('\\x03Ŷ\\x03Ŷ\\x03Ŷ\\x03Ŷ\\x03Ŷ\\x03Ŷ\\x03Ŷ')\n buf.write('\\x03Ŷ\\x03Ŷ\\x03Ŷ\\x03Ŷ\\x03Ŷ\\x03Ŷ\\x03ŷ')\n buf.write('\\x03ŷ\\x03ŷ\\x03ŷ\\x03ŷ\\x03ŷ\\x03ŷ\\x03ŷ')\n buf.write('\\x03Ÿ\\x03Ÿ\\x03Ÿ\\x03Ÿ\\x03Ÿ\\x03Ÿ\\x03Ÿ')\n buf.write('\\x03Ÿ\\x03Ź\\x03Ź\\x03Ź\\x03Ź\\x03Ź\\x03Ź')\n buf.write('\\x03Ź\\x03Ź\\x03ź\\x03ź\\x03ź\\x03ź\\x03ź')\n buf.write('\\x03ź\\x03Ż\\x03Ż\\x03Ż\\x03Ż\\x03ż\\x03ż')\n buf.write('\\x03ż\\x03ż\\x03ż\\x03Ž\\x03Ž\\x03Ž\\x03Ž')\n buf.write('\\x03Ž\\x03ž\\x03ž\\x03ž\\x03ž\\x03ž\\x03ž')\n buf.write('\\x03ž\\x03ž\\x03ž\\x03ž\\x03ſ\\x03ſ\\x03ſ')\n buf.write('\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ')\n buf.write('\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ')\n buf.write('\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ')\n buf.write('\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ƀ\\x03ƀ\\x03ƀ')\n buf.write('\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ')\n buf.write('\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ')\n buf.write('\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ')\n buf.write('\\x03ƀ\\x03ƀ\\x03ƀ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ')\n buf.write('\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ')\n buf.write('\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ')\n buf.write('\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ƃ')\n buf.write('\\x03Ƃ\\x03Ƃ\\x03Ƃ\\x03Ƃ\\x03Ƃ\\x03Ƃ\\x03Ƃ')\n buf.write('\\x03Ƃ\\x03Ƃ\\x03Ƃ\\x03Ƃ\\x03Ƃ\\x03Ƃ\\x03ƃ')\n buf.write('\\x03ƃ\\x03ƃ\\x03ƃ\\x03ƃ\\x03ƃ\\x03ƃ\\x03ƃ')\n buf.write('\\x03ƃ\\x03ƃ\\x03ƃ\\x03ƃ\\x03ƃ\\x03ƃ\\x03Ƅ')\n buf.write('\\x03Ƅ\\x03Ƅ\\x03Ƅ\\x03Ƅ\\x03Ƅ\\x03Ƅ\\x03Ƅ')\n buf.write('\\x03Ƅ\\x03Ƅ\\x03Ƅ\\x03Ƅ\\x03Ƅ\\x03Ƅ\\x03Ƅ')\n buf.write('\\x03Ƅ\\x03ƅ\\x03ƅ\\x03ƅ\\x03ƅ\\x03ƅ\\x03ƅ')\n buf.write('\\x03ƅ\\x03ƅ\\x03ƅ\\x03ƅ\\x03ƅ\\x03ƅ\\x03ƅ')\n buf.write('\\x03ƅ\\x03ƅ\\x03ƅ\\x03Ɔ\\x03Ɔ\\x03Ɔ\\x03Ƈ')\n buf.write('\\x03Ƈ\\x03Ƈ\\x03Ƈ\\x03Ƈ\\x03Ƈ\\x03Ƈ\\x03Ƈ')\n buf.write('\\x03Ƈ\\x03ƈ\\x03ƈ\\x03ƈ\\x03ƈ\\x03ƈ\\x03ƈ')\n buf.write('\\x03ƈ\\x03ƈ\\x03ƈ\\x03ƈ\\x03ƈ\\x03ƈ\\x03Ɖ')\n buf.write('\\x03Ɖ\\x03Ɖ\\x03Ɖ\\x03Ɖ\\x03Ɖ\\x03Ɖ\\x03Ɖ')\n buf.write('\\x03Ɖ\\x03Ɖ\\x03Ɗ\\x03Ɗ\\x03Ɗ\\x03Ɗ\\x03Ɗ')\n buf.write('\\x03Ɗ\\x03Ƌ\\x03Ƌ\\x03Ƌ\\x03Ƌ\\x03Ƌ\\x03Ƌ')\n buf.write('\\x03Ƌ\\x03Ƌ\\x03ƌ\\x03ƌ\\x03ƌ\\x03ƌ\\x03ƌ')\n buf.write('\\x03ƍ\\x03ƍ\\x03ƍ\\x03ƍ\\x03ƍ\\x03Ǝ\\x03Ǝ')\n buf.write('\\x03Ǝ\\x03Ǝ\\x03Ǝ\\x03Ǝ\\x03Ǝ\\x03Ǝ\\x03Ǝ')\n buf.write('\\x03Ə\\x03Ə\\x03Ə\\x03Ə\\x03Ə\\x03Ɛ\\x03Ɛ')\n buf.write('\\x03Ɛ\\x03Ɛ\\x03Ɛ\\x03Ɛ\\x03Ɛ\\x03Ɛ\\x03Ɛ')\n buf.write('\\x03Ɛ\\x03Ƒ\\x03Ƒ\\x03Ƒ\\x03Ƒ\\x03Ƒ\\x03Ƒ')\n buf.write('\\x03ƒ\\x03ƒ\\x03ƒ\\x03ƒ\\x03ƒ\\x03ƒ\\x03Ɠ')\n buf.write('\\x03Ɠ\\x03Ɠ\\x03Ɠ\\x03Ɠ\\x03Ɠ\\x03Ɠ\\x03Ɣ')\n buf.write('\\x03Ɣ\\x03Ɣ\\x03Ɣ\\x03Ɣ\\x03Ɣ\\x03Ɣ\\x03Ɣ')\n buf.write('\\x03Ɣ\\x03Ɣ\\x03ƕ\\x03ƕ\\x03ƕ\\x03ƕ\\x03ƕ')\n buf.write('\\x03ƕ\\x03ƕ\\x03ƕ\\x03Ɩ\\x03Ɩ\\x03Ɩ\\x03Ɩ')\n buf.write('\\x03Ɩ\\x03Ɩ\\x03Ɨ\\x03Ɨ\\x03Ɨ\\x03Ɨ\\x03Ɨ')\n buf.write('\\x03Ɨ\\x03Ɨ\\x03Ƙ\\x03Ƙ\\x03Ƙ\\x03Ƙ\\x03Ƙ')\n buf.write('\\x03Ƙ\\x03Ƙ\\x03Ƙ\\x03ƙ\\x03ƙ\\x03ƙ\\x03ƙ')\n buf.write('\\x03ƙ\\x03ƙ\\x03ƙ\\x03ƚ\\x03ƚ\\x03ƚ\\x03ƚ')\n buf.write('\\x03ƚ\\x03ƚ\\x03ƚ\\x03ƛ\\x03ƛ\\x03ƛ\\x03ƛ')\n buf.write('\\x03Ɯ\\x03Ɯ\\x03Ɯ\\x03Ɯ\\x03Ɯ\\x03Ɯ\\x03Ɲ')\n buf.write('\\x03Ɲ\\x03Ɲ\\x03Ɲ\\x03Ɲ\\x03Ɲ\\x03Ɲ\\x03Ɲ')\n buf.write('\\x03Ɲ\\x03ƞ\\x03ƞ\\x03ƞ\\x03ƞ\\x03ƞ\\x03ƞ')\n buf.write('\\x03Ɵ\\x03Ɵ\\x03Ɵ\\x03Ɵ\\x03Ɵ\\x03Ɵ\\x03Ɵ')\n buf.write('\\x03Ơ\\x03Ơ\\x03Ơ\\x03Ơ\\x03Ơ\\x03Ơ\\x03Ơ')\n buf.write('\\x03Ơ\\x03ơ\\x03ơ\\x03ơ\\x03ơ\\x03ơ\\x03ơ')\n buf.write('\\x03ơ\\x03ơ\\x03ơ\\x03Ƣ\\x03Ƣ\\x03Ƣ\\x03Ƣ')\n buf.write('\\x03Ƣ\\x03Ƣ\\x03Ƣ\\x03Ƣ\\x03Ƣ\\x03ƣ\\x03ƣ')\n buf.write('\\x03ƣ\\x03ƣ\\x03ƣ\\x03ƣ\\x03ƣ\\x03Ƥ\\x03Ƥ')\n buf.write('\\x03Ƥ\\x03Ƥ\\x03Ƥ\\x03Ƥ\\x03Ƥ\\x03Ƥ\\x03ƥ')\n buf.write('\\x03ƥ\\x03ƥ\\x03ƥ\\x03ƥ\\x03ƥ\\x03ƥ\\x03ƥ')\n buf.write('\\x03Ʀ\\x03Ʀ\\x03Ʀ\\x03Ʀ\\x03Ʀ\\x03Ʀ\\x03Ʀ')\n buf.write('\\x03Ʀ\\x03Ʀ\\x03Ƨ\\x03Ƨ\\x03Ƨ\\x03Ƨ\\x03Ƨ')\n buf.write('\\x03ƨ\\x03ƨ\\x03ƨ\\x03ƨ\\x03ƨ\\x03ƨ\\x03ƨ')\n buf.write('\\x03ƨ\\x03Ʃ\\x03Ʃ\\x03Ʃ\\x03Ʃ\\x03Ʃ\\x03Ʃ')\n buf.write('\\x03Ʃ\\x03Ʃ\\x03Ʃ\\x03Ʃ\\x03Ʃ\\x03ƪ\\x03ƪ')\n buf.write('\\x03ƪ\\x03ƪ\\x03ƪ\\x03ƫ\\x03ƫ\\x03ƫ\\x03ƫ')\n buf.write('\\x03ƫ\\x03ƫ\\x03ƫ\\x03ƫ\\x03ƫ\\x03Ƭ\\x03Ƭ')\n buf.write('\\x03Ƭ\\x03Ƭ\\x03Ƭ\\x03Ƭ\\x03ƭ\\x03ƭ\\x03ƭ')\n buf.write('\\x03ƭ\\x03ƭ\\x03ƭ\\x03Ʈ\\x03Ʈ\\x03Ʈ\\x03Ʈ')\n buf.write('\\x03Ʈ\\x03Ư\\x03Ư\\x03Ư\\x03Ư\\x03Ư\\x03Ư')\n buf.write('\\x03Ư\\x03ư\\x03ư\\x03ư\\x03ư\\x03ư\\x03Ʊ')\n buf.write('\\x03Ʊ\\x03Ʊ\\x03Ʊ\\x03Ʊ\\x03Ʊ\\x03Ʋ\\x03Ʋ')\n buf.write('\\x03Ʋ\\x03Ʋ\\x03Ƴ\\x03Ƴ\\x03Ƴ\\x03Ƴ\\x03Ƴ')\n buf.write('\\x03Ƴ\\x03Ƴ\\x03ƴ\\x03ƴ\\x03ƴ\\x03ƴ\\x03ƴ')\n buf.write('\\x03ƴ\\x03ƴ\\x03ƴ\\x03ƴ\\x03ƴ\\x03ƴ\\x03ƴ')\n buf.write('\\x03ƴ\\x03ƴ\\x03Ƶ\\x03Ƶ\\x03Ƶ\\x03Ƶ\\x03Ƶ')\n buf.write('\\x03Ƶ\\x03Ƶ\\x03Ƶ\\x03ƶ\\x03ƶ\\x03ƶ\\x03ƶ')\n buf.write('\\x03ƶ\\x03ƶ\\x03ƶ\\x03ƶ\\x03ƶ\\x03ƶ\\x03ƶ')\n buf.write('\\x03ƶ\\x03ƶ\\x03Ʒ\\x03Ʒ\\x03Ʒ\\x03Ʒ\\x03Ʒ')\n buf.write('\\x03Ʒ\\x03Ʒ\\x03Ʒ\\x03Ʒ\\x03Ʒ\\x03Ʒ\\x03Ƹ')\n buf.write('\\x03Ƹ\\x03Ƹ\\x03Ƹ\\x03Ƹ\\x03Ƹ\\x03Ƹ\\x03Ƹ')\n buf.write('\\x03Ƹ\\x03Ƹ\\x03ƹ\\x03ƹ\\x03ƹ\\x03ƹ\\x03ƹ')\n buf.write('\\x03ƹ\\x03ƹ\\x03ƹ\\x03ƹ\\x03ƹ\\x03ƺ\\x03ƺ')\n buf.write('\\x03ƺ\\x03ƺ\\x03ƺ\\x03ƺ\\x03ƺ\\x03ƺ\\x03ƺ')\n buf.write('\\x03ƺ\\x03ƺ\\x03ƺ\\x03ƺ\\x03ƺ\\x03ƻ\\x03ƻ')\n buf.write('\\x03ƻ\\x03ƻ\\x03ƻ\\x03ƻ\\x03ƻ\\x03ƻ\\x03ƻ')\n buf.write('\\x03Ƽ\\x03Ƽ\\x03Ƽ\\x03Ƽ\\x03Ƽ\\x03Ƽ\\x03ƽ')\n buf.write('\\x03ƽ\\x03ƽ\\x03ƽ\\x03ƽ\\x03ƽ\\x03ƽ\\x03ƽ')\n buf.write('\\x03ƽ\\x03ƾ\\x03ƾ\\x03ƾ\\x03ƾ\\x03ƾ\\x03ƾ')\n buf.write('\\x03ƾ\\x03ƾ\\x03ƿ\\x03ƿ\\x03ƿ\\x03ƿ\\x03ƿ')\n buf.write('\\x03ƿ\\x03ƿ\\x03ƿ\\x03ƿ\\x03ƿ\\x03ƿ\\x03ƿ')\n buf.write('\\x03ƿ\\x03ǀ\\x03ǀ\\x03ǀ\\x03ǀ\\x03ǀ\\x03ǀ')\n buf.write('\\x03ǀ\\x03ǀ\\x03ǀ\\x03ǁ\\x03ǁ\\x03ǁ\\x03ǁ')\n buf.write('\\x03ǁ\\x03ǂ\\x03ǂ\\x03ǂ\\x03ǂ\\x03ǃ\\x03ǃ')\n buf.write('\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ')\n buf.write('\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ')\n buf.write('\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ')\n buf.write('\\x03ǃ\\x03ǃ\\x03DŽ\\x03DŽ\\x03DŽ\\x03DŽ\\x03DŽ')\n buf.write('\\x03Dž\\x03Dž\\x03Dž\\x03Dž\\x03Dž\\x03Dž\\x03Dž')\n buf.write('\\x03Dž\\x03Dž\\x03Dž\\x03Dž\\x03dž\\x03dž\\x03dž')\n buf.write('\\x03dž\\x03dž\\x03dž\\x03dž\\x03dž\\x03dž\\x03dž')\n buf.write('\\x03dž\\x03dž\\x03dž\\x03dž\\x03dž\\x03dž\\x03dž')\n buf.write('\\x03dž\\x03LJ\\x03LJ\\x03LJ\\x03LJ\\x03LJ\\x03LJ')\n buf.write('\\x03LJ\\x03LJ\\x03LJ\\x03LJ\\x03LJ\\x03LJ\\x03LJ')\n buf.write('\\x03LJ\\x03LJ\\x03LJ\\x03Lj\\x03Lj\\x03Lj\\x03Lj')\n buf.write('\\x03Lj\\x03Lj\\x03Lj\\x03Lj\\x03Lj\\x03Lj\\x03Lj')\n buf.write('\\x03Lj\\x03Lj\\x03Lj\\x03Lj\\x03Lj\\x03Lj\\x03Lj')\n buf.write('\\x03Lj\\x03lj\\x03lj\\x03lj\\x03lj\\x03lj\\x03lj')\n buf.write('\\x03lj\\x03lj\\x03lj\\x03lj\\x03lj\\x03lj\\x03lj')\n buf.write('\\x03lj\\x03lj\\x03lj\\x03lj\\x03lj\\x03lj\\x03lj')\n buf.write('\\x03lj\\x03lj\\x03lj\\x03NJ\\x03NJ\\x03NJ\\x03NJ')\n buf.write('\\x03NJ\\x03NJ\\x03NJ\\x03NJ\\x03NJ\\x03NJ\\x03NJ')\n buf.write('\\x03NJ\\x03NJ\\x03NJ\\x03NJ\\x03Nj\\x03Nj\\x03Nj')\n buf.write('\\x03Nj\\x03Nj\\x03Nj\\x03Nj\\x03Nj\\x03Nj\\x03Nj')\n buf.write('\\x03nj\\x03nj\\x03nj\\x03nj\\x03nj\\x03nj\\x03nj')\n buf.write('\\x03nj\\x03nj\\x03nj\\x03nj\\x03Ǎ\\x03Ǎ\\x03Ǎ')\n buf.write('\\x03Ǎ\\x03Ǎ\\x03Ǎ\\x03Ǎ\\x03Ǎ\\x03ǎ\\x03ǎ')\n buf.write('\\x03ǎ\\x03ǎ\\x03ǎ\\x03ǎ\\x03ǎ\\x03ǎ\\x03ǎ')\n buf.write('\\x03ǎ\\x03ǎ\\x03ǎ\\x03ǎ\\x03Ǐ\\x03Ǐ\\x03Ǐ')\n buf.write('\\x03Ǐ\\x03Ǐ\\x03Ǐ\\x03Ǐ\\x03Ǐ\\x03Ǐ\\x03Ǐ')\n buf.write('\\x03Ǐ\\x03Ǐ\\x03Ǐ\\x03Ǐ\\x03Ǐ\\x03Ǐ\\x03ǐ')\n buf.write('\\x03ǐ\\x03ǐ\\x03ǐ\\x03ǐ\\x03ǐ\\x03ǐ\\x03ǐ')\n buf.write('\\x03ǐ\\x03ǐ\\x03ǐ\\x03ǐ\\x03ǐ\\x03ǐ\\x03ǐ')\n buf.write('\\x03ǐ\\x03Ǒ\\x03Ǒ\\x03Ǒ\\x03Ǒ\\x03Ǒ\\x03ǒ')\n buf.write('\\x03ǒ\\x03ǒ\\x03ǒ\\x03Ǔ\\x03Ǔ\\x03Ǔ\\x03Ǔ')\n buf.write('\\x03Ǔ\\x03ǔ\\x03ǔ\\x03ǔ\\x03ǔ\\x03Ǖ\\x03Ǖ')\n buf.write('\\x03Ǖ\\x03Ǖ\\x03Ǖ\\x03ǖ\\x03ǖ\\x03ǖ\\x03ǖ')\n buf.write('\\x03Ǘ\\x03Ǘ\\x03Ǘ\\x03Ǘ\\x03Ǘ\\x03Ǘ\\x03Ǘ')\n buf.write('\\x03ǘ\\x03ǘ\\x03ǘ\\x03ǘ\\x03Ǚ\\x03Ǚ\\x03Ǚ')\n buf.write('\\x03Ǚ\\x03Ǚ\\x03Ǚ\\x03ǚ\\x03ǚ\\x03ǚ\\x03ǚ')\n buf.write('\\x03ǚ\\x03ǚ\\x03ǚ\\x03ǚ\\x03ǚ\\x03ǚ\\x03ǚ')\n buf.write('\\x03ǚ\\x03ǚ\\x03ǚ\\x03ǚ\\x03ǚ\\x03Ǜ\\x03Ǜ')\n buf.write('\\x03Ǜ\\x03Ǜ\\x03Ǜ\\x03Ǜ\\x03Ǜ\\x03Ǜ\\x03Ǜ')\n buf.write('\\x03Ǜ\\x03Ǜ\\x03ǜ\\x03ǜ\\x03ǜ\\x03ǜ\\x03ǝ')\n buf.write('\\x03ǝ\\x03ǝ\\x03ǝ\\x03ǝ\\x03ǝ\\x03ǝ\\x03ǝ')\n buf.write('\\x03ǝ\\x03Ǟ\\x03Ǟ\\x03Ǟ\\x03Ǟ\\x03Ǟ\\x03Ǟ')\n buf.write('\\x03ǟ\\x03ǟ\\x03ǟ\\x03ǟ\\x03ǟ\\x03ǟ\\x03ǟ')\n buf.write('\\x03Ǡ\\x03Ǡ\\x03Ǡ\\x03Ǡ\\x03Ǡ\\x03ǡ\\x03ǡ')\n buf.write('\\x03ǡ\\x03ǡ\\x03ǡ\\x03ǡ\\x03ǡ\\x03Ǣ\\x03Ǣ')\n buf.write('\\x03Ǣ\\x03Ǣ\\x03Ǣ\\x03Ǣ\\x07Ǣ፨\\nǢ')\n buf.write('\\x0cǢ\\x0eǢ፫\\x0bǢ\\x03Ǣ\\x03Ǣ\\x03ǣ')\n buf.write('\\x03ǣ\\x03ǣ\\x07ǣ፲\\nǣ\\x0cǣ\\x0eǣ')\n buf.write('፵\\x0bǣ\\x03ǣ\\x06ǣ፸\\nǣ\\rǣ')\n buf.write('\\x0eǣ፹\\x03Ǥ\\x03Ǥ\\x03Ǥ\\x07Ǥ\\u137f')\n buf.write('\\nǤ\\x0cǤ\\x0eǤᎂ\\x0bǤ\\x03Ǥ\\x06Ǥ')\n buf.write('ᎅ\\nǤ\\rǤ\\x0eǤᎆ\\x03ǥ\\x03ǥ')\n buf.write('\\x03ǥ\\x03Ǧ\\x03Ǧ\\x03ǧ\\x03ǧ\\x03Ǩ\\x03Ǩ')\n buf.write('\\x03Ǩ\\x05Ǩ᎓\\nǨ\\x03Ǩ\\x03Ǩ\\x05Ǩ')\n buf.write('᎗\\nǨ\\x05Ǩ᎙\\nǨ\\x03Ǩ\\x03Ǩ\\x05')\n buf.write('Ǩ\\u139d\\nǨ\\x03ǩ\\x03ǩ\\x03ǩ\\x03ǩ\\x03')\n buf.write('ǩ\\x07ǩᎤ\\nǩ\\x0cǩ\\x0eǩᎧ\\x0b')\n buf.write('ǩ\\x03ǩ\\x03ǩ\\x03Ǫ\\x03Ǫ\\x03Ǫ\\x03Ǫ')\n buf.write('\\x03Ǫ\\x05ǪᎰ\\nǪ\\x03Ǫ\\x03Ǫ\\x03ǫ')\n buf.write('\\x03ǫ\\x03Ǭ\\x03Ǭ\\x03Ǭ\\x07ǬᎹ\\nǬ')\n buf.write('\\x0cǬ\\x0eǬᎼ\\x0bǬ\\x03Ǭ\\x03Ǭ\\x03Ǭ')\n buf.write('\\x03ǭ\\x03ǭ\\x03ǭ\\x07ǭᏄ\\nǭ\\x0cǭ')\n buf.write('\\x0eǭᏇ\\x0bǭ\\x03ǭ\\x03ǭ\\x03ǭ\\x03Ǯ')\n buf.write('\\x03Ǯ\\x03Ǯ\\x07ǮᏏ\\nǮ\\x0cǮ\\x0eǮ')\n buf.write('Ꮢ\\x0bǮ\\x03Ǯ\\x03Ǯ\\x03Ǯ\\x03ǯ\\x03ǯ')\n buf.write('\\x03ǯ\\x07ǯᏚ\\nǯ\\x0cǯ\\x0eǯᏝ')\n buf.write('\\x0bǯ\\x03ǯ\\x03ǯ\\x03ǯ\\x03ǰ\\x03ǰ\\x03DZ')\n buf.write('\\x03DZ\\x03DZ\\x03DZ\\x06DZᏨ\\nDZ\\rDZ')\n buf.write('\\x0eDZᏩ\\x03DZ\\x03DZ\\x03Dz\\x03Dz\\x03dz')\n buf.write('\\x03dz\\x03Ǵ\\x03Ǵ\\x03ǵ\\x03ǵ\\x03Ƕ\\x03Ƕ')\n buf.write('\\x03Ƕ\\x03Ƿ\\x03Ƿ\\x03Ǹ\\x03Ǹ\\x03ǹ\\x03ǹ')\n buf.write('\\x03Ǻ\\x03Ǻ\\x03ǻ\\x03ǻ\\x03Ǽ\\x03Ǽ\\x03ǽ')\n buf.write('\\x03ǽ\\x03ǽ\\x03Ǿ\\x03Ǿ\\x03Ǿ\\x03Ǿ\\x07Ǿ')\n buf.write('ᐌ\\nǾ\\x0cǾ\\x0eǾᐏ\\x0bǾ\\x03Ǿ')\n buf.write('\\x03Ǿ\\x03Ǿ\\x03Ǿ\\x03Ǿ\\x05Ǿᐖ\\nǾ')\n buf.write('\\x03ǿ\\x03ǿ\\x03Ȁ\\x03Ȁ\\x03ȁ\\x03ȁ\\x03ȁ')\n buf.write('\\x03Ȃ\\x03Ȃ\\x03ȃ\\x03ȃ\\x03ȃ\\x03Ȅ\\x03Ȅ')\n buf.write('\\x03Ȅ\\x03Ȅ\\x03Ȅ\\x03Ȅ\\x03Ȅ\\x03Ȅ\\x05Ȅ')\n buf.write('ᐬ\\nȄ\\x03ȅ\\x03ȅ\\x03Ȇ\\x03Ȇ\\x03ȇ')\n buf.write('\\x03ȇ\\x03Ȉ\\x03Ȉ\\x03ȉ\\x03ȉ\\x03Ȋ\\x03Ȋ')\n buf.write('\\x03Ȋ\\x03ȋ\\x03ȋ\\x03Ȍ\\x03Ȍ\\x03ȍ\\x03ȍ')\n buf.write('\\x03Ȏ\\x03Ȏ\\x03ȏ\\x03ȏ\\x03Ȑ\\x06Ȑᑆ')\n buf.write('\\nȐ\\rȐ\\x0eȐᑇ\\x03Ȑ\\x03Ȑ\\x03ȑ')\n buf.write('\\x03ȑ\\x03Ȓ\\x06Ȓᑏ\\nȒ\\rȒ\\x0eȒ')\n buf.write('ᑐ\\x03ȓ\\x07ȓᑔ\\nȓ\\x0cȓ\\x0eȓ')\n buf.write('ᑗ\\x0bȓ\\x03ȓ\\x05ȓᑚ\\nȓ\\x03ȓ')\n buf.write('\\x06ȓᑝ\\nȓ\\rȓ\\x0eȓᑞ\\x03Ȕ')\n buf.write('\\x03Ȕ\\x03Ȕ\\x03Ȕ\\x07Ȕᑥ\\nȔ\\x0cȔ')\n buf.write('\\x0eȔᑨ\\x0bȔ\\x03Ȕ\\x03Ȕ\\x05Ȕᑬ')\n buf.write('\\nȔ\\x03Ȕ\\x03Ȕ\\x03ȕ\\x03ȕ\\x03ȕ\\x03ȕ')\n buf.write('\\x07ȕᑴ\\nȕ\\x0cȕ\\x0eȕᑷ\\x0bȕ')\n buf.write('\\x03ȕ\\x03ȕ\\x03ȕ\\x03ȕ\\x03ȕ\\x03Ȗ\\x03Ȗ')\n buf.write('\\x03Ȗ\\x03Ȗ\\x03Ȗ\\x03Ȗ\\x03Ȗ\\x03Ȗ\\x03Ȗ')\n buf.write('\\x07Ȗᒇ\\nȖ\\x0cȖ\\x0eȖᒊ\\x0bȖ')\n buf.write('\\x03Ȗ\\x03Ȗ\\x05Ȗᒎ\\nȖ\\x03ȗ\\x05ȗ')\n buf.write('ᒑ\\nȗ\\x03ȗ\\x03ȗ\\x03Ș\\x03Ș\\x03ș')\n buf.write('\\x03ș\\x03ș\\x07șᒚ\\nș\\x0cș\\x0eș')\n buf.write('ᒝ\\x0bș\\x03Ț\\x03Ț\\x03Ț\\x03Ț\\x03Ț')\n buf.write('\\x03ț\\x03ț\\x03Ȝ\\x03Ȝ\\x03ȝ\\x03ȝ\\x03Ȟ')\n buf.write('\\x03Ȟ\\x03ȟ\\x03ȟ\\x03Ƞ\\x03Ƞ\\x03ȡ\\x03ȡ')\n buf.write('\\x03Ȣ\\x03Ȣ\\x03ȣ\\x03ȣ\\x03Ȥ\\x03Ȥ\\x03ȥ')\n buf.write('\\x03ȥ\\x03Ȧ\\x03Ȧ\\x03ȧ\\x03ȧ\\x03Ȩ\\x03Ȩ')\n buf.write('\\x03ȩ\\x03ȩ\\x03Ȫ\\x03Ȫ\\x03ȫ\\x03ȫ\\x03Ȭ')\n buf.write('\\x03Ȭ\\x03ȭ\\x03ȭ\\x03Ȯ\\x03Ȯ\\x03ȯ\\x03ȯ')\n buf.write('\\x03Ȱ\\x03Ȱ\\x03ȱ\\x03ȱ\\x03Ȳ\\x03Ȳ\\x03ȳ')\n buf.write('\\x03ȳ\\x03ȴ\\x03ȴ\\x07ᎺᏅᏐᏛᑵ')\n buf.write(\n '\\x02ȵ\\x03\\x03\\x05\\x04\\x07\\x05\\t\\x06\\x0b\\x07\\r\\x08\\x0f\\t\\x11\\n\\x13\\x0b\\x15\\x0c'\n )\n buf.write(\n \"\\x17\\r\\x19\\x0e\\x1b\\x0f\\x1d\\x10\\x1f\\x11!\\x12#\\x13%\\x14'\\x15)\\x16+\\x17\"\n )\n buf.write('-\\x18/\\x191\\x1a3\\x1b5\\x1c7\\x1d9\\x1e;\\x1f= ?!A\"C#E$G%')\n buf.write(\"I&K'M(O)Q*S+U,W-Y.[/]0_1a2c3e4g5i6k7\")\n buf.write('m8o9q:s;u<w=y>{?}@\\x7fA\\x81B\\x83C\\x85D\\x87E\\x89')\n buf.write('F\\x8bG\\x8dH\\x8fI\\x91J\\x93K\\x95L\\x97M\\x99')\n buf.write('N\\x9bO\\x9dP\\x9fQ¡R£S¥T§U©')\n buf.write('V«W\\xadX¯Y±Z³[µ\\\\·]¹')\n buf.write('^»_½`¿aÁbÃcÅdÇeÉ')\n buf.write('fËgÍhÏiÑjÓkÕl×mÙ')\n buf.write('nÛoÝpßqárãsåtçué')\n buf.write('vëwíxïyñzó{õ|÷}ù')\n buf.write('~û\\x7fý\\x80ÿ\\x81ā\\x82ă')\n buf.write('\\x83ą\\x84ć\\x85ĉ\\x86ċ\\x87')\n buf.write('č\\x88ď\\x89đ\\x8aē\\x8bĕ')\n buf.write('\\x8cė\\x8dę\\x8eě\\x8fĝ\\x90')\n buf.write('ğ\\x91ġ\\x92ģ\\x93ĥ\\x94ħ')\n buf.write('\\x95ĩ\\x96ī\\x97ĭ\\x98į\\x99')\n buf.write('ı\\x9aij\\x9bĵ\\x9cķ\\x9dĹ')\n buf.write('\\x9eĻ\\x9fĽ\\xa0Ŀ¡Ł¢')\n buf.write('Ń£Ņ¤Ň¥ʼn¦ŋ')\n buf.write('§ō¨ŏ©őªœ«')\n buf.write('ŕ¬ŗ\\xadř®ś¯ŝ')\n buf.write('°ş±š²ţ³ť´')\n buf.write('ŧµũ¶ū·ŭ¸ů')\n buf.write('¹űºų»ŵ¼ŷ½')\n buf.write('Ź¾Ż¿ŽÀſÁƁ')\n buf.write('ÂƃÃƅÄƇÅƉÆ')\n buf.write('ƋÇƍÈƏÉƑÊƓ')\n buf.write('ËƕÌƗÍƙÎƛÏ')\n buf.write('ƝÐƟÑơÒƣÓƥ')\n buf.write('ÔƧÕƩÖƫ×ƭØ')\n buf.write('ƯÙƱÚƳÛƵÜƷ')\n buf.write('ÝƹÞƻßƽàƿá')\n buf.write('ǁâǃãDžäLJålj')\n buf.write('æNjçǍèǏéǑê')\n buf.write('ǓëǕìǗíǙîǛ')\n buf.write('ïǝðǟñǡòǣó')\n buf.write('ǥôǧõǩöǫ÷ǭ')\n buf.write('øǯùDZúdzûǵü')\n buf.write('ǷýǹþǻÿǽĀǿ')\n buf.write('āȁĂȃăȅĄȇą')\n buf.write('ȉĆȋćȍĈȏĉȑ')\n buf.write('ĊȓċȕČȗčșĎ')\n buf.write('țďȝĐȟđȡĒȣ')\n buf.write('ēȥĔȧĕȩĖȫė')\n buf.write('ȭĘȯęȱĚȳěȵ')\n buf.write('ĜȷĝȹĞȻğȽĠ')\n buf.write('ȿġɁĢɃģɅĤɇ')\n buf.write('ĥɉĦɋħɍĨɏĩ')\n buf.write('ɑĪɓīɕĬɗĭə')\n buf.write('ĮɛįɝİɟıɡIJ')\n buf.write('ɣijɥĴɧĵɩĶɫ')\n buf.write('ķɭĸɯĹɱĺɳĻ')\n buf.write('ɵļɷĽɹľɻĿɽ')\n buf.write('ŀɿŁʁłʃŃʅń')\n buf.write('ʇŅʉņʋŇʍňʏ')\n buf.write('ʼnʑŊʓŋʕŌʗō')\n buf.write('ʙŎʛŏʝŐʟőʡ')\n buf.write('ŒʣœʥŔʧŕʩŖ')\n buf.write('ʫŗʭŘʯřʱŚʳ')\n buf.write('śʵŜʷŝʹŞʻş')\n buf.write('ʽŠʿšˁŢ˃ţ˅')\n buf.write('ŤˇťˉŦˋŧˍŨ')\n buf.write('ˏũˑŪ˓ū˕Ŭ˗')\n buf.write('ŭ˙ٲů˝Ű˟ű')\n buf.write('ˡŲˣų˥Ŵ˧ŵ˩')\n buf.write('Ŷ˫ŷ˭Ÿ˯Ź˱ź')\n buf.write('˳Ż˵ż˷Ž˹ž˻')\n buf.write('ſ˽ƀ˿Ɓ́Ƃ̃ƃ')\n buf.write('̅Ƅ̇ƅ̉Ɔ̋Ƈ̍')\n buf.write('ƈ̏Ɖ̑Ɗ̓Ƌ̕ƌ')\n buf.write('̗ƍ̙Ǝ̛Ə̝Ɛ̟')\n buf.write('Ƒ̡ƒ̣Ɠ̥Ɣ̧ƕ')\n buf.write('̩Ɩ̫Ɨ̭Ƙ̯ƙ̱')\n buf.write('ƚ̳ƛ̵Ɯ̷Ɲ̹ƞ')\n buf.write('̻Ɵ̽Ơ̿ớƢ̓')\n buf.write('ƣͅƤ͇ƥ͉Ʀ͋Ƨ')\n buf.write('͍ƨ͏Ʃ͑ƪ͓ƫ͕')\n buf.write('Ƭ͗ƭ͙Ʈ͛Ư͝ư')\n buf.write('͟Ʊ͡ƲͣƳͥƴͧ')\n buf.write('ƵͩƶͫƷͭƸͯƹ')\n buf.write('ͱƺͳƻ͵Ƽͷƽ\\u0379')\n buf.write('ƾͻƿͽǀͿǁ\\u0381ǂ')\n buf.write('\\u0383ǃ΅DŽ·DžΉdž\\u038b')\n buf.write('LJ\\u038dLjΏljΑNJΓNj')\n buf.write('ΕnjΗǍΙǎΛǏΝ')\n buf.write('ǐΟǑΡǒΣǓΥǔ')\n buf.write('ΧǕΩǖΫǗέǘί')\n buf.write('ǙαǚγǛεǜηǝ')\n buf.write('ιǞλǟνǠοǡρ')\n buf.write('ǢσǣυǤχǥωǦ')\n buf.write('ϋǧύǨϏǩϑǪϓ')\n buf.write('\\x02ϕ\\x02ϗ\\x02ϙ\\x02ϛ\\x02ϝ\\x02ϟ\\x02ϡ')\n buf.write('ǫϣǬϥǭϧǮϩǯ')\n buf.write('ϫǰϭDZϯDzϱdzϳ')\n buf.write('ǴϵǵϷǶϹǷϻǸ')\n buf.write('ϽǹϿǺЁǻЃǼЅ')\n buf.write('ǽЇǾЉǿЋȀЍȁ')\n buf.write('ЏȂБ\\x02ГȃЕȄЗȅ')\n buf.write('ЙȆЛȇНȈПȉС')\n buf.write('\\x02У\\x02Х\\x02ЧȊЩȋЫȌ')\n buf.write('Э\\x02Я\\x02бȍгȎе\\x02з')\n buf.write('\\x02й\\x02л\\x02н\\x02п\\x02с\\x02у\\x02х')\n buf.write('\\x02ч\\x02щ\\x02ы\\x02э\\x02я\\x02ё\\x02ѓ')\n buf.write('\\x02ѕ\\x02ї\\x02љ\\x02ћ\\x02ѝ\\x02џ\\x02ѡ')\n buf.write(\n \"\\x02ѣ\\x02ѥ\\x02ѧ\\x02\\x03\\x02'\\x05\\x02\\x0c\\x0c\\x0f\\x0f))\\x05\\x022\")\n buf.write(\n ';CHch\\x04\\x02GGgg\\x04\\x02--//\\t\\x02\\x0b\\x0c\\x0f\\x0f\"\"**>>]]}}\\x05\\x02\\x0c'\n )\n buf.write(\n '\\x0c\\x0f\\x0f$$\\x04\\x022;aa\\x05\\x02\\x0b\\x0c\\x0f\\x0f\"\"\\x04\\x02C\\\\c|\\x04\\x02\\x0c'\n )\n buf.write(\n '\\x0c\\x0f\\x0f\\x04\\x02\\x0b\\x0b\"\"\\x05\\x02%&2;aa\\x04\\x02CCcc\\x04\\x02DDdd\\x04\\x02'\n )\n buf.write(\n 'EEee\\x04\\x02FFff\\x04\\x02HHhh\\x04\\x02IIii\\x04\\x02JJjj\\x04\\x02KKkk\\x04\\x02LLll\\x04'\n )\n buf.write(\n '\\x02MMmm\\x04\\x02NNnn\\x04\\x02OOoo\\x04\\x02PPpp\\x04\\x02QQqq\\x04\\x02RRrr\\x04\\x02SSs'\n )\n buf.write(\n 's\\x04\\x02TTtt\\x04\\x02UUuu\\x04\\x02VVvv\\x04\\x02WWww\\x04\\x02XXxx\\x04\\x02YYyy\\x04\\x02'\n )\n buf.write(\n 'ZZzz\\x04\\x02[[{{\\x04\\x02\\\\\\\\||\\x02ᓝ\\x02\\x03\\x03\\x02\\x02\\x02\\x02\\x05\\x03\\x02\\x02\\x02'\n )\n buf.write(\n '\\x02\\x07\\x03\\x02\\x02\\x02\\x02\\t\\x03\\x02\\x02\\x02\\x02\\x0b\\x03\\x02\\x02\\x02\\x02\\r\\x03\\x02\\x02\\x02\\x02\\x0f'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02\\x11\\x03\\x02\\x02\\x02\\x02\\x13\\x03\\x02\\x02\\x02\\x02\\x15\\x03\\x02\\x02\\x02\\x02\\x17\\x03'\n )\n buf.write(\n '\\x02\\x02\\x02\\x02\\x19\\x03\\x02\\x02\\x02\\x02\\x1b\\x03\\x02\\x02\\x02\\x02\\x1d\\x03\\x02\\x02\\x02\\x02\\x1f\\x03\\x02'\n )\n buf.write(\n \"\\x02\\x02\\x02!\\x03\\x02\\x02\\x02\\x02#\\x03\\x02\\x02\\x02\\x02%\\x03\\x02\\x02\\x02\\x02'\\x03\\x02\\x02\\x02\\x02)\\x03\"\n )\n buf.write(\n '\\x02\\x02\\x02\\x02+\\x03\\x02\\x02\\x02\\x02-\\x03\\x02\\x02\\x02\\x02/\\x03\\x02\\x02\\x02\\x021\\x03\\x02\\x02\\x02\\x02'\n )\n buf.write(\n '3\\x03\\x02\\x02\\x02\\x025\\x03\\x02\\x02\\x02\\x027\\x03\\x02\\x02\\x02\\x029\\x03\\x02\\x02\\x02\\x02;\\x03'\n )\n buf.write(\n '\\x02\\x02\\x02\\x02=\\x03\\x02\\x02\\x02\\x02?\\x03\\x02\\x02\\x02\\x02A\\x03\\x02\\x02\\x02\\x02C\\x03\\x02\\x02\\x02\\x02E'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02G\\x03\\x02\\x02\\x02\\x02I\\x03\\x02\\x02\\x02\\x02K\\x03\\x02\\x02\\x02\\x02M\\x03\\x02\\x02\\x02\\x02'\n )\n buf.write(\n 'O\\x03\\x02\\x02\\x02\\x02Q\\x03\\x02\\x02\\x02\\x02S\\x03\\x02\\x02\\x02\\x02U\\x03\\x02\\x02\\x02\\x02W\\x03\\x02\\x02\\x02'\n )\n buf.write(\n '\\x02Y\\x03\\x02\\x02\\x02\\x02[\\x03\\x02\\x02\\x02\\x02]\\x03\\x02\\x02\\x02\\x02_\\x03\\x02\\x02\\x02\\x02a\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02c\\x03\\x02\\x02\\x02\\x02e\\x03\\x02\\x02\\x02\\x02g\\x03\\x02\\x02\\x02\\x02i\\x03\\x02\\x02\\x02\\x02k\\x03\\x02'\n )\n buf.write(\n '\\x02\\x02\\x02m\\x03\\x02\\x02\\x02\\x02o\\x03\\x02\\x02\\x02\\x02q\\x03\\x02\\x02\\x02\\x02s\\x03\\x02\\x02\\x02\\x02u\\x03'\n )\n buf.write(\n '\\x02\\x02\\x02\\x02w\\x03\\x02\\x02\\x02\\x02y\\x03\\x02\\x02\\x02\\x02{\\x03\\x02\\x02\\x02\\x02}\\x03\\x02\\x02\\x02\\x02\\x7f'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02\\x81\\x03\\x02\\x02\\x02\\x02\\x83\\x03\\x02\\x02\\x02\\x02\\x85\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02\\x87\\x03\\x02\\x02\\x02\\x02\\x89\\x03\\x02\\x02\\x02\\x02\\x8b\\x03\\x02\\x02\\x02\\x02\\x8d'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02\\x8f\\x03\\x02\\x02\\x02\\x02\\x91\\x03\\x02\\x02\\x02\\x02\\x93\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02\\x95\\x03\\x02\\x02\\x02\\x02\\x97\\x03\\x02\\x02\\x02\\x02\\x99\\x03\\x02\\x02\\x02\\x02\\x9b'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02\\x9d\\x03\\x02\\x02\\x02\\x02\\x9f\\x03\\x02\\x02\\x02\\x02¡\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02£\\x03\\x02\\x02\\x02\\x02¥\\x03\\x02\\x02\\x02\\x02§\\x03\\x02\\x02\\x02\\x02©'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02«\\x03\\x02\\x02\\x02\\x02\\xad\\x03\\x02\\x02\\x02\\x02¯\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02±\\x03\\x02\\x02\\x02\\x02³\\x03\\x02\\x02\\x02\\x02µ\\x03\\x02\\x02\\x02\\x02·'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02¹\\x03\\x02\\x02\\x02\\x02»\\x03\\x02\\x02\\x02\\x02½\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02¿\\x03\\x02\\x02\\x02\\x02Á\\x03\\x02\\x02\\x02\\x02Ã\\x03\\x02\\x02\\x02\\x02Å'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ç\\x03\\x02\\x02\\x02\\x02É\\x03\\x02\\x02\\x02\\x02Ë\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Í\\x03\\x02\\x02\\x02\\x02Ï\\x03\\x02\\x02\\x02\\x02Ñ\\x03\\x02\\x02\\x02\\x02Ó'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Õ\\x03\\x02\\x02\\x02\\x02×\\x03\\x02\\x02\\x02\\x02Ù\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Û\\x03\\x02\\x02\\x02\\x02Ý\\x03\\x02\\x02\\x02\\x02ß\\x03\\x02\\x02\\x02\\x02á'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ã\\x03\\x02\\x02\\x02\\x02å\\x03\\x02\\x02\\x02\\x02ç\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02é\\x03\\x02\\x02\\x02\\x02ë\\x03\\x02\\x02\\x02\\x02í\\x03\\x02\\x02\\x02\\x02ï'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ñ\\x03\\x02\\x02\\x02\\x02ó\\x03\\x02\\x02\\x02\\x02õ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02÷\\x03\\x02\\x02\\x02\\x02ù\\x03\\x02\\x02\\x02\\x02û\\x03\\x02\\x02\\x02\\x02ý'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ÿ\\x03\\x02\\x02\\x02\\x02ā\\x03\\x02\\x02\\x02\\x02ă\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ą\\x03\\x02\\x02\\x02\\x02ć\\x03\\x02\\x02\\x02\\x02ĉ\\x03\\x02\\x02\\x02\\x02ċ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02č\\x03\\x02\\x02\\x02\\x02ď\\x03\\x02\\x02\\x02\\x02đ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ē\\x03\\x02\\x02\\x02\\x02ĕ\\x03\\x02\\x02\\x02\\x02ė\\x03\\x02\\x02\\x02\\x02ę'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ě\\x03\\x02\\x02\\x02\\x02ĝ\\x03\\x02\\x02\\x02\\x02ğ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ġ\\x03\\x02\\x02\\x02\\x02ģ\\x03\\x02\\x02\\x02\\x02ĥ\\x03\\x02\\x02\\x02\\x02ħ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ĩ\\x03\\x02\\x02\\x02\\x02ī\\x03\\x02\\x02\\x02\\x02ĭ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02į\\x03\\x02\\x02\\x02\\x02ı\\x03\\x02\\x02\\x02\\x02ij\\x03\\x02\\x02\\x02\\x02ĵ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ķ\\x03\\x02\\x02\\x02\\x02Ĺ\\x03\\x02\\x02\\x02\\x02Ļ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Ľ\\x03\\x02\\x02\\x02\\x02Ŀ\\x03\\x02\\x02\\x02\\x02Ł\\x03\\x02\\x02\\x02\\x02Ń'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ņ\\x03\\x02\\x02\\x02\\x02Ň\\x03\\x02\\x02\\x02\\x02ʼn\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ŋ\\x03\\x02\\x02\\x02\\x02ō\\x03\\x02\\x02\\x02\\x02ŏ\\x03\\x02\\x02\\x02\\x02ő'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02œ\\x03\\x02\\x02\\x02\\x02ŕ\\x03\\x02\\x02\\x02\\x02ŗ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ř\\x03\\x02\\x02\\x02\\x02ś\\x03\\x02\\x02\\x02\\x02ŝ\\x03\\x02\\x02\\x02\\x02ş'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02š\\x03\\x02\\x02\\x02\\x02ţ\\x03\\x02\\x02\\x02\\x02ť\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ŧ\\x03\\x02\\x02\\x02\\x02ũ\\x03\\x02\\x02\\x02\\x02ū\\x03\\x02\\x02\\x02\\x02ŭ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ů\\x03\\x02\\x02\\x02\\x02ű\\x03\\x02\\x02\\x02\\x02ų\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ŵ\\x03\\x02\\x02\\x02\\x02ŷ\\x03\\x02\\x02\\x02\\x02Ź\\x03\\x02\\x02\\x02\\x02Ż'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ž\\x03\\x02\\x02\\x02\\x02ſ\\x03\\x02\\x02\\x02\\x02Ɓ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ƃ\\x03\\x02\\x02\\x02\\x02ƅ\\x03\\x02\\x02\\x02\\x02Ƈ\\x03\\x02\\x02\\x02\\x02Ɖ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ƌ\\x03\\x02\\x02\\x02\\x02ƍ\\x03\\x02\\x02\\x02\\x02Ə\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Ƒ\\x03\\x02\\x02\\x02\\x02Ɠ\\x03\\x02\\x02\\x02\\x02ƕ\\x03\\x02\\x02\\x02\\x02Ɨ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ƙ\\x03\\x02\\x02\\x02\\x02ƛ\\x03\\x02\\x02\\x02\\x02Ɲ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Ɵ\\x03\\x02\\x02\\x02\\x02ơ\\x03\\x02\\x02\\x02\\x02ƣ\\x03\\x02\\x02\\x02\\x02ƥ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ƨ\\x03\\x02\\x02\\x02\\x02Ʃ\\x03\\x02\\x02\\x02\\x02ƫ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ƭ\\x03\\x02\\x02\\x02\\x02Ư\\x03\\x02\\x02\\x02\\x02Ʊ\\x03\\x02\\x02\\x02\\x02Ƴ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ƶ\\x03\\x02\\x02\\x02\\x02Ʒ\\x03\\x02\\x02\\x02\\x02ƹ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ƻ\\x03\\x02\\x02\\x02\\x02ƽ\\x03\\x02\\x02\\x02\\x02ƿ\\x03\\x02\\x02\\x02\\x02ǁ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ǃ\\x03\\x02\\x02\\x02\\x02Dž\\x03\\x02\\x02\\x02\\x02LJ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02lj\\x03\\x02\\x02\\x02\\x02Nj\\x03\\x02\\x02\\x02\\x02Ǎ\\x03\\x02\\x02\\x02\\x02Ǐ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ǒ\\x03\\x02\\x02\\x02\\x02Ǔ\\x03\\x02\\x02\\x02\\x02Ǖ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Ǘ\\x03\\x02\\x02\\x02\\x02Ǚ\\x03\\x02\\x02\\x02\\x02Ǜ\\x03\\x02\\x02\\x02\\x02ǝ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ǟ\\x03\\x02\\x02\\x02\\x02ǡ\\x03\\x02\\x02\\x02\\x02ǣ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ǥ\\x03\\x02\\x02\\x02\\x02ǧ\\x03\\x02\\x02\\x02\\x02ǩ\\x03\\x02\\x02\\x02\\x02ǫ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ǭ\\x03\\x02\\x02\\x02\\x02ǯ\\x03\\x02\\x02\\x02\\x02DZ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02dz\\x03\\x02\\x02\\x02\\x02ǵ\\x03\\x02\\x02\\x02\\x02Ƿ\\x03\\x02\\x02\\x02\\x02ǹ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ǻ\\x03\\x02\\x02\\x02\\x02ǽ\\x03\\x02\\x02\\x02\\x02ǿ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ȁ\\x03\\x02\\x02\\x02\\x02ȃ\\x03\\x02\\x02\\x02\\x02ȅ\\x03\\x02\\x02\\x02\\x02ȇ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ȉ\\x03\\x02\\x02\\x02\\x02ȋ\\x03\\x02\\x02\\x02\\x02ȍ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ȏ\\x03\\x02\\x02\\x02\\x02ȑ\\x03\\x02\\x02\\x02\\x02ȓ\\x03\\x02\\x02\\x02\\x02ȕ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ȗ\\x03\\x02\\x02\\x02\\x02ș\\x03\\x02\\x02\\x02\\x02ț\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ȝ\\x03\\x02\\x02\\x02\\x02ȟ\\x03\\x02\\x02\\x02\\x02ȡ\\x03\\x02\\x02\\x02\\x02ȣ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ȥ\\x03\\x02\\x02\\x02\\x02ȧ\\x03\\x02\\x02\\x02\\x02ȩ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ȫ\\x03\\x02\\x02\\x02\\x02ȭ\\x03\\x02\\x02\\x02\\x02ȯ\\x03\\x02\\x02\\x02\\x02ȱ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ȳ\\x03\\x02\\x02\\x02\\x02ȵ\\x03\\x02\\x02\\x02\\x02ȷ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ȹ\\x03\\x02\\x02\\x02\\x02Ȼ\\x03\\x02\\x02\\x02\\x02Ƚ\\x03\\x02\\x02\\x02\\x02ȿ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ɂ\\x03\\x02\\x02\\x02\\x02Ƀ\\x03\\x02\\x02\\x02\\x02Ʌ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ɇ\\x03\\x02\\x02\\x02\\x02ɉ\\x03\\x02\\x02\\x02\\x02ɋ\\x03\\x02\\x02\\x02\\x02ɍ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ɏ\\x03\\x02\\x02\\x02\\x02ɑ\\x03\\x02\\x02\\x02\\x02ɓ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ɕ\\x03\\x02\\x02\\x02\\x02ɗ\\x03\\x02\\x02\\x02\\x02ə\\x03\\x02\\x02\\x02\\x02ɛ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ɝ\\x03\\x02\\x02\\x02\\x02ɟ\\x03\\x02\\x02\\x02\\x02ɡ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ɣ\\x03\\x02\\x02\\x02\\x02ɥ\\x03\\x02\\x02\\x02\\x02ɧ\\x03\\x02\\x02\\x02\\x02ɩ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ɫ\\x03\\x02\\x02\\x02\\x02ɭ\\x03\\x02\\x02\\x02\\x02ɯ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ɱ\\x03\\x02\\x02\\x02\\x02ɳ\\x03\\x02\\x02\\x02\\x02ɵ\\x03\\x02\\x02\\x02\\x02ɷ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ɹ\\x03\\x02\\x02\\x02\\x02ɻ\\x03\\x02\\x02\\x02\\x02ɽ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ɿ\\x03\\x02\\x02\\x02\\x02ʁ\\x03\\x02\\x02\\x02\\x02ʃ\\x03\\x02\\x02\\x02\\x02ʅ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ʇ\\x03\\x02\\x02\\x02\\x02ʉ\\x03\\x02\\x02\\x02\\x02ʋ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ʍ\\x03\\x02\\x02\\x02\\x02ʏ\\x03\\x02\\x02\\x02\\x02ʑ\\x03\\x02\\x02\\x02\\x02ʓ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ʕ\\x03\\x02\\x02\\x02\\x02ʗ\\x03\\x02\\x02\\x02\\x02ʙ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ʛ\\x03\\x02\\x02\\x02\\x02ʝ\\x03\\x02\\x02\\x02\\x02ʟ\\x03\\x02\\x02\\x02\\x02ʡ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ʣ\\x03\\x02\\x02\\x02\\x02ʥ\\x03\\x02\\x02\\x02\\x02ʧ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ʩ\\x03\\x02\\x02\\x02\\x02ʫ\\x03\\x02\\x02\\x02\\x02ʭ\\x03\\x02\\x02\\x02\\x02ʯ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ʱ\\x03\\x02\\x02\\x02\\x02ʳ\\x03\\x02\\x02\\x02\\x02ʵ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ʷ\\x03\\x02\\x02\\x02\\x02ʹ\\x03\\x02\\x02\\x02\\x02ʻ\\x03\\x02\\x02\\x02\\x02ʽ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ʿ\\x03\\x02\\x02\\x02\\x02ˁ\\x03\\x02\\x02\\x02\\x02˃\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02˅\\x03\\x02\\x02\\x02\\x02ˇ\\x03\\x02\\x02\\x02\\x02ˉ\\x03\\x02\\x02\\x02\\x02ˋ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ˍ\\x03\\x02\\x02\\x02\\x02ˏ\\x03\\x02\\x02\\x02\\x02ˑ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02˓\\x03\\x02\\x02\\x02\\x02˕\\x03\\x02\\x02\\x02\\x02˗\\x03\\x02\\x02\\x02\\x02˙'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02˛\\x03\\x02\\x02\\x02\\x02˝\\x03\\x02\\x02\\x02\\x02˟\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ˡ\\x03\\x02\\x02\\x02\\x02ˣ\\x03\\x02\\x02\\x02\\x02˥\\x03\\x02\\x02\\x02\\x02˧'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02˩\\x03\\x02\\x02\\x02\\x02˫\\x03\\x02\\x02\\x02\\x02˭\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02˯\\x03\\x02\\x02\\x02\\x02˱\\x03\\x02\\x02\\x02\\x02˳\\x03\\x02\\x02\\x02\\x02˵'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02˷\\x03\\x02\\x02\\x02\\x02˹\\x03\\x02\\x02\\x02\\x02˻\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02˽\\x03\\x02\\x02\\x02\\x02˿\\x03\\x02\\x02\\x02\\x02́\\x03\\x02\\x02\\x02\\x02̃'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02̅\\x03\\x02\\x02\\x02\\x02̇\\x03\\x02\\x02\\x02\\x02̉\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02̋\\x03\\x02\\x02\\x02\\x02̍\\x03\\x02\\x02\\x02\\x02̏\\x03\\x02\\x02\\x02\\x02̑'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02̓\\x03\\x02\\x02\\x02\\x02̕\\x03\\x02\\x02\\x02\\x02̗\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02̙\\x03\\x02\\x02\\x02\\x02̛\\x03\\x02\\x02\\x02\\x02̝\\x03\\x02\\x02\\x02\\x02̟'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02̡\\x03\\x02\\x02\\x02\\x02̣\\x03\\x02\\x02\\x02\\x02̥\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02̧\\x03\\x02\\x02\\x02\\x02̩\\x03\\x02\\x02\\x02\\x02̫\\x03\\x02\\x02\\x02\\x02̭'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02̯\\x03\\x02\\x02\\x02\\x02̱\\x03\\x02\\x02\\x02\\x02̳\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02̵\\x03\\x02\\x02\\x02\\x02̷\\x03\\x02\\x02\\x02\\x02̹\\x03\\x02\\x02\\x02\\x02̻'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02̽\\x03\\x02\\x02\\x02\\x02̿\\x03\\x02\\x02\\x02\\x02́\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02̓\\x03\\x02\\x02\\x02\\x02ͅ\\x03\\x02\\x02\\x02\\x02͇\\x03\\x02\\x02\\x02\\x02͉'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02͋\\x03\\x02\\x02\\x02\\x02͍\\x03\\x02\\x02\\x02\\x02͏\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02͑\\x03\\x02\\x02\\x02\\x02͓\\x03\\x02\\x02\\x02\\x02͕\\x03\\x02\\x02\\x02\\x02͗'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02͙\\x03\\x02\\x02\\x02\\x02͛\\x03\\x02\\x02\\x02\\x02͝\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02͟\\x03\\x02\\x02\\x02\\x02͡\\x03\\x02\\x02\\x02\\x02ͣ\\x03\\x02\\x02\\x02\\x02ͥ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ͧ\\x03\\x02\\x02\\x02\\x02ͩ\\x03\\x02\\x02\\x02\\x02ͫ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ͭ\\x03\\x02\\x02\\x02\\x02ͯ\\x03\\x02\\x02\\x02\\x02ͱ\\x03\\x02\\x02\\x02\\x02ͳ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02͵\\x03\\x02\\x02\\x02\\x02ͷ\\x03\\x02\\x02\\x02\\x02\\u0379\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ͻ\\x03\\x02\\x02\\x02\\x02ͽ\\x03\\x02\\x02\\x02\\x02Ϳ\\x03\\x02\\x02\\x02\\x02\\u0381'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02\\u0383\\x03\\x02\\x02\\x02\\x02΅\\x03\\x02\\x02\\x02\\x02·\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Ή\\x03\\x02\\x02\\x02\\x02\\u038b\\x03\\x02\\x02\\x02\\x02\\u038d\\x03\\x02\\x02\\x02\\x02Ώ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Α\\x03\\x02\\x02\\x02\\x02Γ\\x03\\x02\\x02\\x02\\x02Ε\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Η\\x03\\x02\\x02\\x02\\x02Ι\\x03\\x02\\x02\\x02\\x02Λ\\x03\\x02\\x02\\x02\\x02Ν'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ο\\x03\\x02\\x02\\x02\\x02Ρ\\x03\\x02\\x02\\x02\\x02Σ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Υ\\x03\\x02\\x02\\x02\\x02Χ\\x03\\x02\\x02\\x02\\x02Ω\\x03\\x02\\x02\\x02\\x02Ϋ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02έ\\x03\\x02\\x02\\x02\\x02ί\\x03\\x02\\x02\\x02\\x02α\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02γ\\x03\\x02\\x02\\x02\\x02ε\\x03\\x02\\x02\\x02\\x02η\\x03\\x02\\x02\\x02\\x02ι'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02λ\\x03\\x02\\x02\\x02\\x02ν\\x03\\x02\\x02\\x02\\x02ο\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ρ\\x03\\x02\\x02\\x02\\x02σ\\x03\\x02\\x02\\x02\\x02υ\\x03\\x02\\x02\\x02\\x02χ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ω\\x03\\x02\\x02\\x02\\x02ϋ\\x03\\x02\\x02\\x02\\x02ύ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Ϗ\\x03\\x02\\x02\\x02\\x02ϑ\\x03\\x02\\x02\\x02\\x02ϓ\\x03\\x02\\x02\\x02\\x02ϡ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ϣ\\x03\\x02\\x02\\x02\\x02ϥ\\x03\\x02\\x02\\x02\\x02ϧ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ϩ\\x03\\x02\\x02\\x02\\x02ϫ\\x03\\x02\\x02\\x02\\x02ϭ\\x03\\x02\\x02\\x02\\x02ϯ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ϱ\\x03\\x02\\x02\\x02\\x02ϳ\\x03\\x02\\x02\\x02\\x02ϵ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Ϸ\\x03\\x02\\x02\\x02\\x02Ϲ\\x03\\x02\\x02\\x02\\x02ϻ\\x03\\x02\\x02\\x02\\x02Ͻ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ͽ\\x03\\x02\\x02\\x02\\x02Ё\\x03\\x02\\x02\\x02\\x02Ѓ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Ѕ\\x03\\x02\\x02\\x02\\x02Ї\\x03\\x02\\x02\\x02\\x02Љ\\x03\\x02\\x02\\x02\\x02Ћ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ѝ\\x03\\x02\\x02\\x02\\x02Џ\\x03\\x02\\x02\\x02\\x02Г\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Е\\x03\\x02\\x02\\x02\\x02З\\x03\\x02\\x02\\x02\\x02Й\\x03\\x02\\x02\\x02\\x02Л'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Н\\x03\\x02\\x02\\x02\\x02П\\x03\\x02\\x02\\x02\\x02Ч\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Щ\\x03\\x02\\x02\\x02\\x02Ы\\x03\\x02\\x02\\x02\\x02б\\x03\\x02\\x02\\x02\\x02г'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x03ѩ\\x03\\x02\\x02\\x02\\x05Ѭ\\x03\\x02\\x02\\x02\\x07Ѯ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\tѲ\\x03\\x02\\x02\\x02\\x0bѸ\\x03\\x02\\x02\\x02\\rѾ\\x03\\x02\\x02\\x02\\x0f'\n )\n buf.write(\n '҈\\x03\\x02\\x02\\x02\\x11Ҍ\\x03\\x02\\x02\\x02\\x13Ғ\\x03\\x02\\x02\\x02\\x15Қ')\n buf.write(\n '\\x03\\x02\\x02\\x02\\x17Ҟ\\x03\\x02\\x02\\x02\\x19Ң\\x03\\x02\\x02\\x02\\x1bҨ\\x03'\n )\n buf.write(\n '\\x02\\x02\\x02\\x1dҫ\\x03\\x02\\x02\\x02\\x1fҲ\\x03\\x02\\x02\\x02!ҹ\\x03\\x02\\x02'\n )\n buf.write(\n \"\\x02#ҽ\\x03\\x02\\x02\\x02%Ӈ\\x03\\x02\\x02\\x02'ӊ\\x03\\x02\\x02\\x02)Ӕ\")\n buf.write(\n '\\x03\\x02\\x02\\x02+Ӛ\\x03\\x02\\x02\\x02-ӡ\\x03\\x02\\x02\\x02/Ӧ\\x03\\x02\\x02\\x02'\n )\n buf.write('1Ӱ\\x03\\x02\\x02\\x023ԇ\\x03\\x02\\x02\\x025ԍ\\x03\\x02\\x02\\x027')\n buf.write('Ԕ\\x03\\x02\\x02\\x029Ԛ\\x03\\x02\\x02\\x02;Ԣ\\x03\\x02\\x02\\x02=Ԩ\\x03'\n )\n buf.write(\n '\\x02\\x02\\x02?Զ\\x03\\x02\\x02\\x02AՃ\\x03\\x02\\x02\\x02CՒ\\x03\\x02\\x02\\x02E\\u0557'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02G՝\\x03\\x02\\x02\\x02Iբ\\x03\\x02\\x02\\x02Kժ\\x03\\x02\\x02\\x02'\n )\n buf.write(\n 'Mկ\\x03\\x02\\x02\\x02Oշ\\x03\\x02\\x02\\x02Qռ\\x03\\x02\\x02\\x02Sտ\\x03')\n buf.write(\n '\\x02\\x02\\x02Uք\\x03\\x02\\x02\\x02Wֆ\\x03\\x02\\x02\\x02Y\\u058c\\x03\\x02\\x02\\x02[֑'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02]֛\\x03\\x02\\x02\\x02_֣\\x03\\x02\\x02\\x02a֨\\x03\\x02\\x02\\x02'\n )\n buf.write(\n 'c֭\\x03\\x02\\x02\\x02eֲ\\x03\\x02\\x02\\x02gֺ\\x03\\x02\\x02\\x02iׄ\\x03')\n buf.write(\n '\\x02\\x02\\x02k\\u05ca\\x03\\x02\\x02\\x02m\\u05ce\\x03\\x02\\x02\\x02oד\\x03\\x02\\x02\\x02qי'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02sס\\x03\\x02\\x02\\x02uש\\x03\\x02\\x02\\x02wױ\\x03\\x02\\x02\\x02'\n )\n buf.write(\n 'y\\u05f9\\x03\\x02\\x02\\x02{\\u0600\\x03\\x02\\x02\\x02}؊\\x03\\x02\\x02\\x02\\x7fؘ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x81ؠ\\x03\\x02\\x02\\x02\\x83ة\\x03\\x02\\x02\\x02\\x85')\n buf.write('ر\\x03\\x02\\x02\\x02\\x87ف\\x03\\x02\\x02\\x02\\x89ي\\x03\\x02\\x02\\x02'\n )\n buf.write('\\x8bٕ\\x03\\x02\\x02\\x02\\x8d١\\x03\\x02\\x02\\x02\\x8f٭\\x03')\n buf.write('\\x02\\x02\\x02\\x91ٵ\\x03\\x02\\x02\\x02\\x93ٽ\\x03\\x02\\x02\\x02\\x95چ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x97ڎ\\x03\\x02\\x02\\x02\\x99ښ\\x03\\x02\\x02\\x02\\x9b')\n buf.write('ڪ\\x03\\x02\\x02\\x02\\x9dگ\\x03\\x02\\x02\\x02\\x9fڵ\\x03\\x02\\x02\\x02'\n )\n buf.write('¡ڼ\\x03\\x02\\x02\\x02£ۂ\\x03\\x02\\x02\\x02¥ۇ\\x03')\n buf.write('\\x02\\x02\\x02§ۏ\\x03\\x02\\x02\\x02©ۜ\\x03\\x02\\x02\\x02«ۣ')\n buf.write('\\x03\\x02\\x02\\x02\\xadۯ\\x03\\x02\\x02\\x02¯۵\\x03\\x02\\x02\\x02±')\n buf.write('ۺ\\x03\\x02\\x02\\x02³܃\\x03\\x02\\x02\\x02µ܈\\x03\\x02\\x02\\x02')\n buf.write('·܌\\x03\\x02\\x02\\x02¹ܛ\\x03\\x02\\x02\\x02»ܦ\\x03')\n buf.write('\\x02\\x02\\x02½ܪ\\x03\\x02\\x02\\x02¿ܰ\\x03\\x02\\x02\\x02Áܴ')\n buf.write('\\x03\\x02\\x02\\x02Ãܼ\\x03\\x02\\x02\\x02Å݄\\x03\\x02\\x02\\x02Ç')\n buf.write('ݎ\\x03\\x02\\x02\\x02Éݘ\\x03\\x02\\x02\\x02Ëݠ\\x03\\x02\\x02\\x02')\n buf.write('Íݩ\\x03\\x02\\x02\\x02Ïݲ\\x03\\x02\\x02\\x02Ñݺ\\x03')\n buf.write('\\x02\\x02\\x02Óށ\\x03\\x02\\x02\\x02Õއ\\x03\\x02\\x02\\x02×ތ')\n buf.write('\\x03\\x02\\x02\\x02Ùޚ\\x03\\x02\\x02\\x02Ûޤ\\x03\\x02\\x02\\x02Ý')\n buf.write('ެ\\x03\\x02\\x02\\x02ß\\u07b9\\x03\\x02\\x02\\x02á߂\\x03\\x02\\x02\\x02')\n buf.write('ãߋ\\x03\\x02\\x02\\x02åߒ\\x03\\x02\\x02\\x02çߗ\\x03')\n buf.write('\\x02\\x02\\x02é߰\\x03\\x02\\x02\\x02ëߵ\\x03\\x02\\x02\\x02í߽')\n buf.write('\\x03\\x02\\x02\\x02ïࠂ\\x03\\x02\\x02\\x02ñࠈ\\x03\\x02\\x02\\x02ó')\n buf.write('ࠎ\\x03\\x02\\x02\\x02õࠕ\\x03\\x02\\x02\\x02÷ࠞ\\x03\\x02\\x02\\x02')\n buf.write('ùࠢ\\x03\\x02\\x02\\x02û࠱\\x03\\x02\\x02\\x02ý࠵\\x03')\n buf.write('\\x02\\x02\\x02ÿ࠼\\x03\\x02\\x02\\x02āࡃ\\x03\\x02\\x02\\x02ăࡌ')\n buf.write('\\x03\\x02\\x02\\x02ąࡓ\\x03\\x02\\x02\\x02ć\\u085d\\x03\\x02\\x02\\x02ĉ')\n buf.write('\\u086c\\x03\\x02\\x02\\x02ċࡷ\\x03\\x02\\x02\\x02čࡿ\\x03\\x02\\x02\\x02')\n buf.write('ďࢉ\\x03\\x02\\x02\\x02đ\\u0891\\x03\\x02\\x02\\x02ē࢘\\x03')\n buf.write('\\x02\\x02\\x02ĕ࢝\\x03\\x02\\x02\\x02ėࢥ\\x03\\x02\\x02\\x02ęࢮ')\n buf.write('\\x03\\x02\\x02\\x02ěࢶ\\x03\\x02\\x02\\x02ĝࢾ\\x03\\x02\\x02\\x02ğ')\n buf.write('ࣄ\\x03\\x02\\x02\\x02ġ࣊\\x03\\x02\\x02\\x02ģ࣐\\x03\\x02\\x02\\x02')\n buf.write('ĥࣖ\\x03\\x02\\x02\\x02ħ\\u08e2\\x03\\x02\\x02\\x02ĩࣨ\\x03')\n buf.write('\\x02\\x02\\x02īࣲ\\x03\\x02\\x02\\x02ĭࣺ\\x03\\x02\\x02\\x02įࣾ')\n buf.write('\\x03\\x02\\x02\\x02ıअ\\x03\\x02\\x02\\x02ijऋ\\x03\\x02\\x02\\x02ĵ')\n buf.write('ऐ\\x03\\x02\\x02\\x02ķक\\x03\\x02\\x02\\x02Ĺञ\\x03\\x02\\x02\\x02')\n buf.write('Ļण\\x03\\x02\\x02\\x02Ľऩ\\x03\\x02\\x02\\x02Ŀय\\x03')\n buf.write('\\x02\\x02\\x02Łस\\x03\\x02\\x02\\x02Ńऽ\\x03\\x02\\x02\\x02Ņॄ')\n buf.write('\\x03\\x02\\x02\\x02Ňॉ\\x03\\x02\\x02\\x02ʼnॎ\\x03\\x02\\x02\\x02ŋ')\n buf.write('॑\\x03\\x02\\x02\\x02ōक़\\x03\\x02\\x02\\x02ŏॢ\\x03\\x02\\x02\\x02')\n buf.write('ő॥\\x03\\x02\\x02\\x02œ७\\x03\\x02\\x02\\x02ŕॷ\\x03')\n buf.write('\\x02\\x02\\x02ŗঁ\\x03\\x02\\x02\\x02řঈ\\x03\\x02\\x02\\x02ś\\u098e')\n buf.write('\\x03\\x02\\x02\\x02ŝখ\\x03\\x02\\x02\\x02şঠ\\x03\\x02\\x02\\x02š')\n buf.write('ন\\x03\\x02\\x02\\x02ţ\\u09b1\\x03\\x02\\x02\\x02ťস\\x03\\x02\\x02\\x02')\n buf.write('ŧা\\x03\\x02\\x02\\x02ũৄ\\x03\\x02\\x02\\x02ūো\\x03')\n buf.write(\n '\\x02\\x02\\x02ŭ\\u09d8\\x03\\x02\\x02\\x02ůৠ\\x03\\x02\\x02\\x02ű\\u09e4')\n buf.write('\\x03\\x02\\x02\\x02ų৬\\x03\\x02\\x02\\x02ŵ৶\\x03\\x02\\x02\\x02ŷ')\n buf.write(\n '\\u09ff\\x03\\x02\\x02\\x02Ź\\u0a04\\x03\\x02\\x02\\x02Żਏ\\x03\\x02\\x02\\x02')\n buf.write('Ž\\u0a12\\x03\\x02\\x02\\x02ſਜ\\x03\\x02\\x02\\x02Ɓਤ\\x03')\n buf.write('\\x02\\x02\\x02ƃ\\u0a29\\x03\\x02\\x02\\x02ƅਮ\\x03\\x02\\x02\\x02Ƈਲ਼')\n buf.write('\\x03\\x02\\x02\\x02Ɖ਼\\x03\\x02\\x02\\x02Ƌੁ\\x03\\x02\\x02\\x02ƍ')\n buf.write('ੌ\\x03\\x02\\x02\\x02Ə\\u0a54\\x03\\x02\\x02\\x02Ƒਖ਼\\x03\\x02\\x02\\x02')\n buf.write('Ɠ\\u0a5f\\x03\\x02\\x02\\x02ƕ੧\\x03\\x02\\x02\\x02Ɨ੬\\x03')\n buf.write(\n '\\x02\\x02\\x02ƙੲ\\x03\\x02\\x02\\x02ƛ\\u0a78\\x03\\x02\\x02\\x02Ɲ\\u0a7e')\n buf.write('\\x03\\x02\\x02\\x02Ɵ\\u0a84\\x03\\x02\\x02\\x02ơઊ\\x03\\x02\\x02\\x02ƣ')\n buf.write('એ\\x03\\x02\\x02\\x02ƥખ\\x03\\x02\\x02\\x02Ƨચ\\x03\\x02\\x02\\x02')\n buf.write('Ʃડ\\x03\\x02\\x02\\x02ƫધ\\x03\\x02\\x02\\x02ƭબ\\x03')\n buf.write(\n '\\x02\\x02\\x02Ư\\u0ab1\\x03\\x02\\x02\\x02Ʊશ\\x03\\x02\\x02\\x02Ƴ\\u0aba')\n buf.write('\\x03\\x02\\x02\\x02Ƶૂ\\x03\\x02\\x02\\x02Ʒો\\x03\\x02\\x02\\x02ƹ')\n buf.write(\n '\\u0ad4\\x03\\x02\\x02\\x02ƻ\\u0adb\\x03\\x02\\x02\\x02ƽૡ\\x03\\x02\\x02\\x02')\n buf.write('ƿ૧\\x03\\x02\\x02\\x02ǁ૮\\x03\\x02\\x02\\x02ǃ\\u0af7\\x03')\n buf.write('\\x02\\x02\\x02Dž\\u0b00\\x03\\x02\\x02\\x02LJଅ\\x03\\x02\\x02\\x02ljଋ')\n buf.write('\\x03\\x02\\x02\\x02Nj\\u0b12\\x03\\x02\\x02\\x02Ǎଘ\\x03\\x02\\x02\\x02Ǐ')\n buf.write('ଡ\\x03\\x02\\x02\\x02Ǒଦ\\x03\\x02\\x02\\x02Ǔପ\\x03\\x02\\x02\\x02')\n buf.write('Ǖଲ\\x03\\x02\\x02\\x02Ǘ\\u0b3b\\x03\\x02\\x02\\x02Ǚି\\x03')\n buf.write(\n '\\x02\\x02\\x02Ǜ\\u0b45\\x03\\x02\\x02\\x02ǝ\\u0b4e\\x03\\x02\\x02\\x02ǟ\\u0b54'\n )\n buf.write('\\x03\\x02\\x02\\x02ǡ\\u0b5b\\x03\\x02\\x02\\x02ǣୟ\\x03\\x02\\x02\\x02ǥ')\n buf.write('ୢ\\x03\\x02\\x02\\x02ǧ୪\\x03\\x02\\x02\\x02ǩ୲\\x03\\x02\\x02\\x02')\n buf.write('ǫ\\u0b79\\x03\\x02\\x02\\x02ǭ\\u0b81\\x03\\x02\\x02\\x02ǯஒ\\x03')\n buf.write(\n '\\x02\\x02\\x02DZ\\u0b9d\\x03\\x02\\x02\\x02dzந\\x03\\x02\\x02\\x02ǵ\\u0bad')\n buf.write('\\x03\\x02\\x02\\x02Ƿவ\\x03\\x02\\x02\\x02ǹ\\u0bc3\\x03\\x02\\x02\\x02ǻ')\n buf.write(\n 'ே\\x03\\x02\\x02\\x02ǽ\\u0bce\\x03\\x02\\x02\\x02ǿ\\u0bd3\\x03\\x02\\x02\\x02')\n buf.write('ȁ\\u0bd9\\x03\\x02\\x02\\x02ȃ\\u0be0\\x03\\x02\\x02\\x02ȅ௨\\x03')\n buf.write('\\x02\\x02\\x02ȇ௲\\x03\\x02\\x02\\x02ȉ௹\\x03\\x02\\x02\\x02ȋ\\u0bfc')\n buf.write('\\x03\\x02\\x02\\x02ȍఀ\\x03\\x02\\x02\\x02ȏఄ\\x03\\x02\\x02\\x02ȑ')\n buf.write('ఈ\\x03\\x02\\x02\\x02ȓఋ\\x03\\x02\\x02\\x02ȕఐ\\x03\\x02\\x02\\x02')\n buf.write('ȗక\\x03\\x02\\x02\\x02șజ\\x03\\x02\\x02\\x02țట\\x03')\n buf.write('\\x02\\x02\\x02ȝధ\\x03\\x02\\x02\\x02ȟభ\\x03\\x02\\x02\\x02ȡస')\n buf.write('\\x03\\x02\\x02\\x02ȣీ\\x03\\x02\\x02\\x02ȥౄ\\x03\\x02\\x02\\x02ȧ')\n buf.write('ొ\\x03\\x02\\x02\\x02ȩ\\u0c4f\\x03\\x02\\x02\\x02ȫౚ\\x03\\x02\\x02\\x02')\n buf.write('ȭౢ\\x03\\x02\\x02\\x02ȯ\\u0c72\\x03\\x02\\x02\\x02ȱ౽\\x03')\n buf.write('\\x02\\x02\\x02ȳ಄\\x03\\x02\\x02\\x02ȵಎ\\x03\\x02\\x02\\x02ȷಖ')\n buf.write('\\x03\\x02\\x02\\x02ȹಛ\\x03\\x02\\x02\\x02Ȼತ\\x03\\x02\\x02\\x02Ƚ')\n buf.write(\n 'ಪ\\x03\\x02\\x02\\x02ȿ\\u0cb4\\x03\\x02\\x02\\x02Ɂ\\u0cba\\x03\\x02\\x02\\x02')\n buf.write('Ƀಿ\\x03\\x02\\x02\\x02Ʌೋ\\x03\\x02\\x02\\x02ɇ\\u0cd4\\x03')\n buf.write('\\x02\\x02\\x02ɉೞ\\x03\\x02\\x02\\x02ɋ\\u0ce5\\x03\\x02\\x02\\x02ɍ೯')\n buf.write('\\x03\\x02\\x02\\x02ɏ\\u0cf9\\x03\\x02\\x02\\x02ɑഁ\\x03\\x02\\x02\\x02ɓ')\n buf.write('ഇ\\x03\\x02\\x02\\x02ɕ\\u0d11\\x03\\x02\\x02\\x02ɗഗ\\x03\\x02\\x02\\x02')\n buf.write('əഝ\\x03\\x02\\x02\\x02ɛഡ\\x03\\x02\\x02\\x02ɝദ\\x03')\n buf.write('\\x02\\x02\\x02ɟഫ\\x03\\x02\\x02\\x02ɡല\\x03\\x02\\x02\\x02ɣശ')\n buf.write('\\x03\\x02\\x02\\x02ɥീ\\x03\\x02\\x02\\x02ɧൌ\\x03\\x02\\x02\\x02ɩ')\n buf.write(\n '\\u0d53\\x03\\x02\\x02\\x02ɫ൝\\x03\\x02\\x02\\x02ɭ\\u0d64\\x03\\x02\\x02\\x02')\n buf.write('ɯ൬\\x03\\x02\\x02\\x02ɱ൴\\x03\\x02\\x02\\x02ɳඈ\\x03')\n buf.write('\\x02\\x02\\x02ɵඏ\\x03\\x02\\x02\\x02ɷග\\x03\\x02\\x02\\x02ɹඣ')\n buf.write('\\x03\\x02\\x02\\x02ɻත\\x03\\x02\\x02\\x02ɽඳ\\x03\\x02\\x02\\x02ɿ')\n buf.write('ර\\x03\\x02\\x02\\x02ʁෂ\\x03\\x02\\x02\\x02ʃ\\u0dc8\\x03\\x02\\x02\\x02')\n buf.write('ʅෑ\\x03\\x02\\x02\\x02ʇෘ\\x03\\x02\\x02\\x02ʉො\\x03')\n buf.write('\\x02\\x02\\x02ʋ\\u0de2\\x03\\x02\\x02\\x02ʍ෧\\x03\\x02\\x02\\x02ʏ෭')\n buf.write('\\x03\\x02\\x02\\x02ʑ෴\\x03\\x02\\x02\\x02ʓ\\u0df9\\x03\\x02\\x02\\x02ʕ')\n buf.write('ฃ\\x03\\x02\\x02\\x02ʗช\\x03\\x02\\x02\\x02ʙถ\\x03\\x02\\x02\\x02')\n buf.write('ʛบ\\x03\\x02\\x02\\x02ʝม\\x03\\x02\\x02\\x02ʟศ\\x03')\n buf.write('\\x02\\x02\\x02ʡอ\\x03\\x02\\x02\\x02ʣี\\x03\\x02\\x02\\x02ʥ\\u0e3c')\n buf.write('\\x03\\x02\\x02\\x02ʧแ\\x03\\x02\\x02\\x02ʩ๊\\x03\\x02\\x02\\x02ʫ')\n buf.write(\n '๕\\x03\\x02\\x02\\x02ʭ\\u0e62\\x03\\x02\\x02\\x02ʯ\\u0e74\\x03\\x02\\x02\\x02')\n buf.write('ʱ\\u0e80\\x03\\x02\\x02\\x02ʳຐ\\x03\\x02\\x02\\x02ʵດ\\x03')\n buf.write('\\x02\\x02\\x02ʷນ\\x03\\x02\\x02\\x02ʹຢ\\x03\\x02\\x02\\x02ʻຨ')\n buf.write('\\x03\\x02\\x02\\x02ʽອ\\x03\\x02\\x02\\x02ʿຶ\\x03\\x02\\x02\\x02ˁ')\n buf.write('\\u0ebf\\x03\\x02\\x02\\x02˃່\\x03\\x02\\x02\\x02˅໗\\x03\\x02\\x02\\x02')\n buf.write('ˇໞ\\x03\\x02\\x02\\x02ˉ\\u0ee3\\x03\\x02\\x02\\x02ˋ\\u0ee8\\x03')\n buf.write(\n '\\x02\\x02\\x02ˍ\\u0ef1\\x03\\x02\\x02\\x02ˏ\\u0efa\\x03\\x02\\x02\\x02ˑ\\u0eff'\n )\n buf.write('\\x03\\x02\\x02\\x02˓།\\x03\\x02\\x02\\x02˕༕\\x03\\x02\\x02\\x02˗')\n buf.write('༞\\x03\\x02\\x02\\x02˙༩\\x03\\x02\\x02\\x02˛༯\\x03\\x02\\x02\\x02')\n buf.write('˝༷\\x03\\x02\\x02\\x02˟ཁ\\x03\\x02\\x02\\x02ˡཎ\\x03')\n buf.write('\\x02\\x02\\x02ˣཕ\\x03\\x02\\x02\\x02˥འ\\x03\\x02\\x02\\x02˧ཧ')\n buf.write('\\x03\\x02\\x02\\x02˩ཱི\\x03\\x02\\x02\\x02˫ྀ\\x03\\x02\\x02\\x02˭')\n buf.write('ྎ\\x03\\x02\\x02\\x02˯ྖ\\x03\\x02\\x02\\x02˱ྞ\\x03\\x02\\x02\\x02')\n buf.write('˳ྦ\\x03\\x02\\x02\\x02˵ྫྷ\\x03\\x02\\x02\\x02˷ྰ\\x03')\n buf.write('\\x02\\x02\\x02˹ྵ\\x03\\x02\\x02\\x02˻ྺ\\x03\\x02\\x02\\x02˽࿄')\n buf.write(\n '\\x03\\x02\\x02\\x02˿\\u0fe0\\x03\\x02\\x02\\x02́\\u0ffb\\x03\\x02\\x02\\x02̃')\n buf.write('ဓ\\x03\\x02\\x02\\x02̅အ\\x03\\x02\\x02\\x02̇ု\\x03\\x02\\x02\\x02')\n buf.write('̉ဿ\\x03\\x02\\x02\\x02̋၏\\x03\\x02\\x02\\x02̍ၒ\\x03')\n buf.write('\\x02\\x02\\x02̏ၛ\\x03\\x02\\x02\\x02̑ၧ\\x03\\x02\\x02\\x02̓ၱ')\n buf.write('\\x03\\x02\\x02\\x02̕ၷ\\x03\\x02\\x02\\x02̗ၿ\\x03\\x02\\x02\\x02̙')\n buf.write('ႄ\\x03\\x02\\x02\\x02̛ႉ\\x03\\x02\\x02\\x02̝႒\\x03\\x02\\x02\\x02')\n buf.write('̟႗\\x03\\x02\\x02\\x02̡Ⴁ\\x03\\x02\\x02\\x02̣Ⴇ\\x03')\n buf.write('\\x02\\x02\\x02̥Ⴍ\\x03\\x02\\x02\\x02̧Ⴔ\\x03\\x02\\x02\\x02̩Ⴞ')\n buf.write(\n '\\x03\\x02\\x02\\x02̫\\u10c6\\x03\\x02\\x02\\x02̭\\u10cc\\x03\\x02\\x02\\x02̯')\n buf.write('დ\\x03\\x02\\x02\\x02̱მ\\x03\\x02\\x02\\x02̳ტ\\x03\\x02\\x02\\x02')\n buf.write('̵ჩ\\x03\\x02\\x02\\x02̷ჭ\\x03\\x02\\x02\\x02̹ჳ\\x03')\n buf.write('\\x02\\x02\\x02̻ჼ\\x03\\x02\\x02\\x02̽ᄂ\\x03\\x02\\x02\\x02̿ᄉ')\n buf.write('\\x03\\x02\\x02\\x02́ᄑ\\x03\\x02\\x02\\x02̓ᄚ\\x03\\x02\\x02\\x02ͅ')\n buf.write('ᄣ\\x03\\x02\\x02\\x02͇ᄪ\\x03\\x02\\x02\\x02͉ᄲ\\x03\\x02\\x02\\x02')\n buf.write('͋ᄺ\\x03\\x02\\x02\\x02͍ᅃ\\x03\\x02\\x02\\x02͏ᅈ\\x03')\n buf.write('\\x02\\x02\\x02͑ᅐ\\x03\\x02\\x02\\x02͓ᅛ\\x03\\x02\\x02\\x02͕ᅠ')\n buf.write('\\x03\\x02\\x02\\x02͗ᅩ\\x03\\x02\\x02\\x02͙ᅯ\\x03\\x02\\x02\\x02͛')\n buf.write('ᅵ\\x03\\x02\\x02\\x02͝ᅺ\\x03\\x02\\x02\\x02͟ᆁ\\x03\\x02\\x02\\x02')\n buf.write('͡ᆆ\\x03\\x02\\x02\\x02ͣᆌ\\x03\\x02\\x02\\x02ͥᆐ\\x03')\n buf.write('\\x02\\x02\\x02ͧᆗ\\x03\\x02\\x02\\x02ͩᆥ\\x03\\x02\\x02\\x02ͫᆭ')\n buf.write('\\x03\\x02\\x02\\x02ͭᆺ\\x03\\x02\\x02\\x02ͯᇅ\\x03\\x02\\x02\\x02ͱ')\n buf.write('ᇏ\\x03\\x02\\x02\\x02ͳᇙ\\x03\\x02\\x02\\x02͵ᇧ\\x03\\x02\\x02\\x02')\n buf.write('ͷᇰ\\x03\\x02\\x02\\x02\\u0379ᇶ\\x03\\x02\\x02\\x02ͻᇿ\\x03')\n buf.write('\\x02\\x02\\x02ͽሇ\\x03\\x02\\x02\\x02Ϳሔ\\x03\\x02\\x02\\x02\\u0381ም')\n buf.write('\\x03\\x02\\x02\\x02\\u0383ሢ\\x03\\x02\\x02\\x02΅ሦ\\x03\\x02\\x02\\x02·')\n buf.write(\n 'ሿ\\x03\\x02\\x02\\x02Ήቄ\\x03\\x02\\x02\\x02\\u038b\\u124f\\x03\\x02\\x02\\x02')\n buf.write('\\u038dቡ\\x03\\x02\\x02\\x02Ώቱ\\x03\\x02\\x02\\x02Αኄ\\x03')\n buf.write('\\x02\\x02\\x02Γኛ\\x03\\x02\\x02\\x02Εኪ\\x03\\x02\\x02\\x02Ηኴ')\n buf.write(\n '\\x03\\x02\\x02\\x02Ι\\u12bf\\x03\\x02\\x02\\x02Λ\\u12c7\\x03\\x02\\x02\\x02Ν')\n buf.write('ዔ\\x03\\x02\\x02\\x02Οዤ\\x03\\x02\\x02\\x02Ρዴ\\x03\\x02\\x02\\x02')\n buf.write('Σዹ\\x03\\x02\\x02\\x02Υዽ\\x03\\x02\\x02\\x02Χጂ\\x03')\n buf.write('\\x02\\x02\\x02Ωጆ\\x03\\x02\\x02\\x02Ϋጋ\\x03\\x02\\x02\\x02έጏ')\n buf.write('\\x03\\x02\\x02\\x02ί\\u1316\\x03\\x02\\x02\\x02αጚ\\x03\\x02\\x02\\x02γ')\n buf.write('ጠ\\x03\\x02\\x02\\x02εጰ\\x03\\x02\\x02\\x02ηጻ\\x03\\x02\\x02\\x02')\n buf.write('ιጿ\\x03\\x02\\x02\\x02λፈ\\x03\\x02\\x02\\x02νፎ\\x03')\n buf.write('\\x02\\x02\\x02οፕ\\x03\\x02\\x02\\x02ρፚ\\x03\\x02\\x02\\x02σ፡')\n buf.write('\\x03\\x02\\x02\\x02υ፮\\x03\\x02\\x02\\x02χ፻\\x03\\x02\\x02\\x02ω')\n buf.write('ᎈ\\x03\\x02\\x02\\x02ϋᎋ\\x03\\x02\\x02\\x02ύᎍ\\x03\\x02\\x02\\x02')\n buf.write('Ϗᎏ\\x03\\x02\\x02\\x02ϑ\\u139e\\x03\\x02\\x02\\x02ϓᎪ\\x03')\n buf.write('\\x02\\x02\\x02ϕᎳ\\x03\\x02\\x02\\x02ϗᎵ\\x03\\x02\\x02\\x02ϙᏀ')\n buf.write('\\x03\\x02\\x02\\x02ϛᏋ\\x03\\x02\\x02\\x02ϝᏖ\\x03\\x02\\x02\\x02ϟ')\n buf.write('Ꮱ\\x03\\x02\\x02\\x02ϡᏣ\\x03\\x02\\x02\\x02ϣᏭ\\x03\\x02\\x02\\x02')\n buf.write('ϥᏯ\\x03\\x02\\x02\\x02ϧᏱ\\x03\\x02\\x02\\x02ϩᏳ\\x03')\n buf.write('\\x02\\x02\\x02ϫᏵ\\x03\\x02\\x02\\x02ϭᏸ\\x03\\x02\\x02\\x02ϯᏺ')\n buf.write('\\x03\\x02\\x02\\x02ϱᏼ\\x03\\x02\\x02\\x02ϳ\\u13fe\\x03\\x02\\x02\\x02ϵ')\n buf.write('᐀\\x03\\x02\\x02\\x02Ϸᐂ\\x03\\x02\\x02\\x02Ϲᐄ\\x03\\x02\\x02\\x02')\n buf.write('ϻᐕ\\x03\\x02\\x02\\x02Ͻᐗ\\x03\\x02\\x02\\x02Ͽᐙ\\x03')\n buf.write('\\x02\\x02\\x02Ёᐛ\\x03\\x02\\x02\\x02Ѓᐞ\\x03\\x02\\x02\\x02Ѕᐠ')\n buf.write('\\x03\\x02\\x02\\x02Їᐫ\\x03\\x02\\x02\\x02Љᐭ\\x03\\x02\\x02\\x02Ћ')\n buf.write('ᐯ\\x03\\x02\\x02\\x02Ѝᐱ\\x03\\x02\\x02\\x02Џᐳ\\x03\\x02\\x02\\x02')\n buf.write('Бᐵ\\x03\\x02\\x02\\x02Гᐷ\\x03\\x02\\x02\\x02Еᐺ\\x03')\n buf.write('\\x02\\x02\\x02Зᐼ\\x03\\x02\\x02\\x02Йᐾ\\x03\\x02\\x02\\x02Лᑀ')\n buf.write('\\x03\\x02\\x02\\x02Нᑂ\\x03\\x02\\x02\\x02Пᑅ\\x03\\x02\\x02\\x02С')\n buf.write('ᑋ\\x03\\x02\\x02\\x02Уᑎ\\x03\\x02\\x02\\x02Хᑕ\\x03\\x02\\x02\\x02')\n buf.write('Чᑠ\\x03\\x02\\x02\\x02Щᑯ\\x03\\x02\\x02\\x02Ыᑽ\\x03')\n buf.write('\\x02\\x02\\x02Эᒐ\\x03\\x02\\x02\\x02Яᒔ\\x03\\x02\\x02\\x02бᒖ')\n buf.write('\\x03\\x02\\x02\\x02гᒞ\\x03\\x02\\x02\\x02еᒣ\\x03\\x02\\x02\\x02з')\n buf.write('ᒥ\\x03\\x02\\x02\\x02йᒧ\\x03\\x02\\x02\\x02лᒩ\\x03\\x02\\x02\\x02')\n buf.write('нᒫ\\x03\\x02\\x02\\x02пᒭ\\x03\\x02\\x02\\x02сᒯ\\x03')\n buf.write('\\x02\\x02\\x02уᒱ\\x03\\x02\\x02\\x02хᒳ\\x03\\x02\\x02\\x02чᒵ')\n buf.write('\\x03\\x02\\x02\\x02щᒷ\\x03\\x02\\x02\\x02ыᒹ\\x03\\x02\\x02\\x02э')\n buf.write('ᒻ\\x03\\x02\\x02\\x02яᒽ\\x03\\x02\\x02\\x02ёᒿ\\x03\\x02\\x02\\x02')\n buf.write('ѓᓁ\\x03\\x02\\x02\\x02ѕᓃ\\x03\\x02\\x02\\x02їᓅ\\x03')\n buf.write('\\x02\\x02\\x02љᓇ\\x03\\x02\\x02\\x02ћᓉ\\x03\\x02\\x02\\x02ѝᓋ')\n buf.write('\\x03\\x02\\x02\\x02џᓍ\\x03\\x02\\x02\\x02ѡᓏ\\x03\\x02\\x02\\x02ѣ')\n buf.write('ᓑ\\x03\\x02\\x02\\x02ѥᓓ\\x03\\x02\\x02\\x02ѧᓕ\\x03\\x02\\x02\\x02')\n buf.write('ѩѪ\\x070\\x02\\x02Ѫѫ\\x070\\x02\\x02ѫ\\x04\\x03\\x02')\n buf.write('\\x02\\x02Ѭѭ\\x05еț\\x02ѭ\\x06\\x03\\x02\\x02\\x02Ѯ')\n buf.write('ѯ\\x05еț\\x02ѯѰ\\x05лȞ\\x02Ѱ')\n buf.write('ѱ\\x05лȞ\\x02ѱ\\x08\\x03\\x02\\x02\\x02Ѳѳ\\x05е')\n buf.write('ț\\x02ѳѴ\\x05пȠ\\x02Ѵѵ\\x05ћ')\n buf.write('Ȯ\\x02ѵѶ\\x05нȟ\\x02Ѷѷ\\x05ї')\n buf.write('Ȭ\\x02ѷ\\n\\x03\\x02\\x02\\x02Ѹѹ\\x05еț\\x02ѹ')\n buf.write('Ѻ\\x05сȡ\\x02Ѻѻ\\x05нȟ\\x02ѻ')\n buf.write('Ѽ\\x05яȨ\\x02Ѽѽ\\x05ћȮ\\x02ѽ')\n buf.write('\\x0c\\x03\\x02\\x02\\x02Ѿѿ\\x05еț\\x02ѿҀ\\x05с')\n buf.write('ȡ\\x02Ҁҁ\\x05сȡ\\x02ҁ҂\\x05ї')\n buf.write('Ȭ\\x02҂҃\\x05нȟ\\x02҃҄\\x05с')\n buf.write('ȡ\\x02҄҅\\x05еț\\x02҅҆\\x05ћ')\n buf.write('Ȯ\\x02҆҇\\x05нȟ\\x02҇\\x0e\\x03\\x02\\x02\\x02')\n buf.write('҈҉\\x05еț\\x02҉Ҋ\\x05ыȦ')\n buf.write('\\x02Ҋҋ\\x05ыȦ\\x02ҋ\\x10\\x03\\x02\\x02\\x02Ҍ')\n buf.write('ҍ\\x05еț\\x02ҍҎ\\x05ыȦ\\x02Ҏ')\n buf.write('ҏ\\x05ћȮ\\x02ҏҐ\\x05нȟ\\x02Ґ')\n buf.write('ґ\\x05їȬ\\x02ґ\\x12\\x03\\x02\\x02\\x02Ғғ\\x05')\n buf.write('еț\\x02ғҔ\\x05яȨ\\x02Ҕҕ')\n buf.write('\\x05еț\\x02ҕҖ\\x05ыȦ\\x02Җҗ')\n buf.write('\\x05ѥȳ\\x02җҘ\\x05ѧȴ\\x02Ҙҙ')\n buf.write('\\x05нȟ\\x02ҙ\\x14\\x03\\x02\\x02\\x02Ққ\\x05е')\n buf.write('ț\\x02қҜ\\x05яȨ\\x02Ҝҝ\\x05л')\n buf.write('Ȟ\\x02ҝ\\x16\\x03\\x02\\x02\\x02Ҟҟ\\x05еț\\x02')\n buf.write('ҟҠ\\x05яȨ\\x02Ҡҡ\\x05ѥȳ')\n buf.write('\\x02ҡ\\x18\\x03\\x02\\x02\\x02Ңң\\x05еț\\x02ң')\n buf.write('Ҥ\\x05їȬ\\x02Ҥҥ\\x05їȬ\\x02ҥ')\n buf.write('Ҧ\\x05еț\\x02Ҧҧ\\x05ѥȳ\\x02ҧ')\n buf.write('\\x1a\\x03\\x02\\x02\\x02Ҩҩ\\x05еț\\x02ҩҪ\\x05')\n buf.write('љȭ\\x02Ҫ\\x1c\\x03\\x02\\x02\\x02ҫҬ\\x05еț')\n buf.write('\\x02Ҭҭ\\x05љȭ\\x02ҭҮ\\x05љȭ')\n buf.write('\\x02Үү\\x05ѝȯ\\x02үҰ\\x05эȧ')\n buf.write('\\x02Ұұ\\x05нȟ\\x02ұ\\x1e\\x03\\x02\\x02\\x02Ҳ')\n buf.write('ҳ\\x05еț\\x02ҳҴ\\x05љȭ\\x02Ҵ')\n buf.write('ҵ\\x05љȭ\\x02ҵҶ\\x05нȟ\\x02Ҷ')\n buf.write('ҷ\\x05їȬ\\x02ҷҸ\\x05ћȮ\\x02Ҹ')\n buf.write(' \\x03\\x02\\x02\\x02ҹҺ\\x05еț\\x02Һһ\\x05љ')\n buf.write('ȭ\\x02һҼ\\x05йȝ\\x02Ҽ\"\\x03\\x02\\x02\\x02ҽ')\n buf.write('Ҿ\\x05еț\\x02Ҿҿ\\x05љȭ\\x02ҿ')\n buf.write('Ӏ\\x05љȭ\\x02ӀӁ\\x05ёȩ\\x02Ӂ')\n buf.write('ӂ\\x05йȝ\\x02ӂӃ\\x05хȣ\\x02Ӄ')\n buf.write('ӄ\\x05еț\\x02ӄӅ\\x05ћȮ\\x02Ӆ')\n buf.write('ӆ\\x05нȟ\\x02ӆ$\\x03\\x02\\x02\\x02Ӈӈ\\x05е')\n buf.write('ț\\x02ӈӉ\\x05ћȮ\\x02Ӊ&\\x03\\x02\\x02\\x02ӊ')\n buf.write('Ӌ\\x05еț\\x02Ӌӌ\\x05ћȮ\\x02ӌ')\n buf.write('Ӎ\\x05ћȮ\\x02Ӎӎ\\x05їȬ\\x02ӎ')\n buf.write('ӏ\\x05хȣ\\x02ӏӐ\\x05зȜ\\x02Ӑ')\n buf.write('ӑ\\x05ѝȯ\\x02ӑӒ\\x05ћȮ\\x02Ӓ')\n buf.write('ӓ\\x05нȟ\\x02ӓ(\\x03\\x02\\x02\\x02Ӕӕ\\x05е')\n buf.write('ț\\x02ӕӖ\\x05ѝȯ\\x02Ӗӗ\\x05л')\n buf.write('Ȟ\\x02ӗӘ\\x05хȣ\\x02Әә\\x05ћ')\n buf.write('Ȯ\\x02ә*\\x03\\x02\\x02\\x02Ӛӛ\\x05еț\\x02ӛ')\n buf.write('Ӝ\\x05ѝȯ\\x02Ӝӝ\\x05ћȮ\\x02ӝ')\n buf.write('Ӟ\\x05уȢ\\x02Ӟӟ\\x05хȣ\\x02ӟ')\n buf.write('Ӡ\\x05лȞ\\x02Ӡ,\\x03\\x02\\x02\\x02ӡӢ\\x05е')\n buf.write('ț\\x02Ӣӣ\\x05ѝȯ\\x02ӣӤ\\x05ћ')\n buf.write('Ȯ\\x02Ӥӥ\\x05ёȩ\\x02ӥ.\\x03\\x02\\x02\\x02Ӧ')\n buf.write('ӧ\\x05еț\\x02ӧӨ\\x05ѝȯ\\x02Ө')\n buf.write('ө\\x05ћȮ\\x02өӪ\\x05ёȩ\\x02Ӫ')\n buf.write('ӫ\\x05эȧ\\x02ӫӬ\\x05еț\\x02Ӭ')\n buf.write('ӭ\\x05ћȮ\\x02ӭӮ\\x05хȣ\\x02Ӯ')\n buf.write('ӯ\\x05йȝ\\x02ӯ0\\x03\\x02\\x02\\x02Ӱӱ\\x05')\n buf.write('еț\\x02ӱӲ\\x05ѝȯ\\x02Ӳӳ')\n buf.write('\\x05ћȮ\\x02ӳӴ\\x05ёȩ\\x02Ӵӵ')\n buf.write('\\x05яȨ\\x02ӵӶ\\x05ёȩ\\x02Ӷӷ')\n buf.write('\\x05эȧ\\x02ӷӸ\\x05ёȩ\\x02Ӹӹ')\n buf.write('\\x05ѝȯ\\x02ӹӺ\\x05љȭ\\x02Ӻӻ')\n buf.write('\\x07a\\x02\\x02ӻӼ\\x05ћȮ\\x02Ӽӽ\\x05ї')\n buf.write('Ȭ\\x02ӽӾ\\x05еț\\x02Ӿӿ\\x05я')\n buf.write('Ȩ\\x02ӿԀ\\x05љȭ\\x02Ԁԁ\\x05е')\n buf.write('ț\\x02ԁԂ\\x05йȝ\\x02Ԃԃ\\x05ћ')\n buf.write('Ȯ\\x02ԃԄ\\x05хȣ\\x02Ԅԅ\\x05ё')\n buf.write('ȩ\\x02ԅԆ\\x05яȨ\\x02Ԇ2\\x03\\x02\\x02\\x02')\n buf.write('ԇԈ\\x05зȜ\\x02Ԉԉ\\x05еț')\n buf.write('\\x02ԉԊ\\x05ћȮ\\x02Ԋԋ\\x05йȝ')\n buf.write('\\x02ԋԌ\\x05уȢ\\x02Ԍ4\\x03\\x02\\x02\\x02ԍ')\n buf.write('Ԏ\\x05зȜ\\x02Ԏԏ\\x05нȟ\\x02ԏ')\n buf.write('Ԑ\\x05пȠ\\x02Ԑԑ\\x05ёȩ\\x02ԑ')\n buf.write('Ԓ\\x05їȬ\\x02Ԓԓ\\x05нȟ\\x02ԓ')\n buf.write('6\\x03\\x02\\x02\\x02Ԕԕ\\x05зȜ\\x02ԕԖ\\x05')\n buf.write('нȟ\\x02Ԗԗ\\x05сȡ\\x02ԗԘ')\n buf.write('\\x05хȣ\\x02Ԙԙ\\x05яȨ\\x02ԙ8\\x03')\n buf.write('\\x02\\x02\\x02Ԛԛ\\x05зȜ\\x02ԛԜ\\x05н')\n buf.write('ȟ\\x02Ԝԝ\\x05ћȮ\\x02ԝԞ\\x05ѡ')\n buf.write('ȱ\\x02Ԟԟ\\x05нȟ\\x02ԟԠ\\x05н')\n buf.write('ȟ\\x02Ԡԡ\\x05яȨ\\x02ԡ:\\x03\\x02\\x02\\x02Ԣ')\n buf.write('ԣ\\x05зȜ\\x02ԣԤ\\x05пȠ\\x02Ԥ')\n buf.write('ԥ\\x05хȣ\\x02ԥԦ\\x05ыȦ\\x02Ԧ')\n buf.write('ԧ\\x05нȟ\\x02ԧ<\\x03\\x02\\x02\\x02Ԩԩ\\x05з')\n buf.write('Ȝ\\x02ԩԪ\\x05хȣ\\x02Ԫԫ\\x05я')\n buf.write('Ȩ\\x02ԫԬ\\x05еț\\x02Ԭԭ\\x05ї')\n buf.write('Ȭ\\x02ԭԮ\\x05ѥȳ\\x02Ԯԯ\\x07a\\x02')\n buf.write('\\x02ԯ\\u0530\\x05лȞ\\x02\\u0530Ա\\x05ёȩ')\n buf.write('\\x02ԱԲ\\x05ѝȯ\\x02ԲԳ\\x05зȜ')\n buf.write('\\x02ԳԴ\\x05ыȦ\\x02ԴԵ\\x05нȟ')\n buf.write('\\x02Ե>\\x03\\x02\\x02\\x02ԶԷ\\x05зȜ\\x02ԷԸ')\n buf.write('\\x05хȣ\\x02ԸԹ\\x05яȨ\\x02ԹԺ')\n buf.write('\\x05еț\\x02ԺԻ\\x05їȬ\\x02ԻԼ')\n buf.write('\\x05ѥȳ\\x02ԼԽ\\x07a\\x02\\x02ԽԾ\\x05п')\n buf.write('Ƞ\\x02ԾԿ\\x05ыȦ\\x02ԿՀ\\x05ё')\n buf.write('ȩ\\x02ՀՁ\\x05еț\\x02ՁՂ\\x05ћ')\n buf.write('Ȯ\\x02Ղ@\\x03\\x02\\x02\\x02ՃՄ\\x05зȜ\\x02Մ')\n buf.write('Յ\\x05хȣ\\x02ՅՆ\\x05яȨ\\x02Ն')\n buf.write('Շ\\x05еț\\x02ՇՈ\\x05їȬ\\x02Ո')\n buf.write('Չ\\x05ѥȳ\\x02ՉՊ\\x07a\\x02\\x02ՊՋ')\n buf.write('\\x05хȣ\\x02ՋՌ\\x05яȨ\\x02ՌՍ')\n buf.write('\\x05ћȮ\\x02ՍՎ\\x05нȟ\\x02ՎՏ')\n buf.write('\\x05сȡ\\x02ՏՐ\\x05нȟ\\x02ՐՑ')\n buf.write('\\x05їȬ\\x02ՑB\\x03\\x02\\x02\\x02ՒՓ\\x05зȜ')\n buf.write('\\x02ՓՔ\\x05ыȦ\\x02ՔՕ\\x05ёȩ')\n buf.write('\\x02ՕՖ\\x05зȜ\\x02ՖD\\x03\\x02\\x02\\x02\\u0557\\u0558')\n buf.write('\\x05зȜ\\x02\\u0558ՙ\\x05ыȦ\\x02ՙ՚')\n buf.write('\\x05ёȩ\\x02՚՛\\x05йȝ\\x02՛՜')\n buf.write('\\x05щȥ\\x02՜F\\x03\\x02\\x02\\x02՝՞\\x05зȜ')\n buf.write('\\x02՞՟\\x05ёȩ\\x02՟ՠ\\x05лȞ')\n buf.write('\\x02ՠա\\x05ѥȳ\\x02աH\\x03\\x02\\x02\\x02բգ')\n buf.write('\\x05зȜ\\x02գդ\\x05ёȩ\\x02դե')\n buf.write('\\x05ёȩ\\x02եզ\\x05ыȦ\\x02զէ')\n buf.write('\\x05нȟ\\x02էը\\x05еț\\x02ըթ')\n buf.write('\\x05яȨ\\x02թJ\\x03\\x02\\x02\\x02ժի\\x05зȜ')\n buf.write('\\x02իլ\\x05ёȩ\\x02լխ\\x05ћȮ')\n buf.write('\\x02խծ\\x05уȢ\\x02ծL\\x03\\x02\\x02\\x02կհ')\n buf.write('\\x05зȜ\\x02հձ\\x05їȬ\\x02ձղ')\n buf.write('\\x05нȟ\\x02ղճ\\x05еț\\x02ճմ')\n buf.write('\\x05лȞ\\x02մյ\\x05ћȮ\\x02յն')\n buf.write('\\x05уȢ\\x02նN\\x03\\x02\\x02\\x02շո\\x05зȜ')\n buf.write('\\x02ոչ\\x05ѝȯ\\x02չպ\\x05ыȦ')\n buf.write('\\x02պջ\\x05щȥ\\x02ջP\\x03\\x02\\x02\\x02ռս')\n buf.write('\\x05зȜ\\x02սվ\\x05ѥȳ\\x02վR\\x03')\n buf.write('\\x02\\x02\\x02տր\\x05зȜ\\x02րց\\x05ѥ')\n buf.write('ȳ\\x02ցւ\\x05ћȮ\\x02ւփ\\x05н')\n buf.write('ȟ\\x02փT\\x03\\x02\\x02\\x02քօ\\x05йȝ\\x02օ')\n buf.write('V\\x03\\x02\\x02\\x02ֆև\\x05йȝ\\x02ևֈ\\x05е')\n buf.write('ț\\x02ֈ։\\x05йȝ\\x02։֊\\x05у')\n buf.write('Ȣ\\x02֊\\u058b\\x05нȟ\\x02\\u058bX\\x03\\x02\\x02\\x02\\u058c')\n buf.write('֍\\x05йȝ\\x02֍֎\\x05еț\\x02֎')\n buf.write('֏\\x05ыȦ\\x02֏\\u0590\\x05ыȦ\\x02\\u0590')\n buf.write('Z\\x03\\x02\\x02\\x02֑֒\\x05йȝ\\x02֒֓\\x05е')\n buf.write('ț\\x02֓֔\\x05яȨ\\x02֔֕\\x05ё')\n buf.write('ȩ\\x02֖֕\\x05яȨ\\x02֖֗\\x05х')\n buf.write('ȣ\\x02֗֘\\x05йȝ\\x02֘֙\\x05е')\n buf.write('ț\\x02֚֙\\x05ыȦ\\x02֚\\\\\\x03\\x02\\x02\\x02֛')\n buf.write('֜\\x05йȝ\\x02֜֝\\x05еț\\x02֝')\n buf.write('֞\\x05љȭ\\x02֞֟\\x05йȝ\\x02֟')\n buf.write('֠\\x05еț\\x02֠֡\\x05лȞ\\x02֡')\n buf.write('֢\\x05нȟ\\x02֢^\\x03\\x02\\x02\\x02֣֤\\x05й')\n buf.write('ȝ\\x02֤֥\\x05еț\\x02֥֦\\x05љ')\n buf.write('ȭ\\x02֦֧\\x05нȟ\\x02֧`\\x03\\x02\\x02\\x02֨')\n buf.write('֩\\x05йȝ\\x02֪֩\\x05еț\\x02֪')\n buf.write('֫\\x05љȭ\\x02֫֬\\x05ћȮ\\x02֬')\n buf.write('b\\x03\\x02\\x02\\x02֭֮\\x05йȝ\\x02֮֯\\x05у')\n buf.write('Ȣ\\x02ְ֯\\x05еț\\x02ְֱ\\x05ї')\n buf.write('Ȭ\\x02ֱd\\x03\\x02\\x02\\x02ֲֳ\\x05йȝ\\x02ֳ')\n buf.write('ִ\\x05уȢ\\x02ִֵ\\x05еț\\x02ֵ')\n buf.write('ֶ\\x05їȬ\\x02ֶַ\\x07a\\x02\\x02ַָ')\n buf.write('\\x05йȝ\\x02ָֹ\\x05љȭ\\x02ֹf\\x03')\n buf.write('\\x02\\x02\\x02ֺֻ\\x05йȝ\\x02ֻּ\\x05у')\n buf.write('Ȣ\\x02ּֽ\\x05еț\\x02ֽ־\\x05ї')\n buf.write('Ȭ\\x02־ֿ\\x05еț\\x02ֿ׀\\x05й')\n buf.write('ȝ\\x02׀ׁ\\x05ћȮ\\x02ׁׂ\\x05н')\n buf.write('ȟ\\x02ׂ׃\\x05їȬ\\x02׃h\\x03\\x02\\x02\\x02ׄ')\n buf.write('ׅ\\x05йȝ\\x02ׅ׆\\x05уȢ\\x02׆')\n buf.write('ׇ\\x05нȟ\\x02ׇ\\u05c8\\x05йȝ\\x02\\u05c8')\n buf.write('\\u05c9\\x05щȥ\\x02\\u05c9j\\x03\\x02\\x02\\x02\\u05ca\\u05cb\\x05й')\n buf.write('ȝ\\x02\\u05cb\\u05cc\\x05уȢ\\x02\\u05cc\\u05cd\\x05ї')\n buf.write('Ȭ\\x02\\u05cdl\\x03\\x02\\x02\\x02\\u05ce\\u05cf\\x05йȝ\\x02\\u05cf')\n buf.write('א\\x05ыȦ\\x02אב\\x05ёȩ\\x02ב')\n buf.write('ג\\x05зȜ\\x02גn\\x03\\x02\\x02\\x02דה\\x05й')\n buf.write('ȝ\\x02הו\\x05ыȦ\\x02וז\\x05ё')\n buf.write('ȩ\\x02זח\\x05љȭ\\x02חט\\x05н')\n buf.write('ȟ\\x02טp\\x03\\x02\\x02\\x02יך\\x05йȝ\\x02ך')\n buf.write('כ\\x05ыȦ\\x02כל\\x05ѝȯ\\x02ל')\n buf.write('ם\\x05љȭ\\x02םמ\\x05ћȮ\\x02מ')\n buf.write('ן\\x05нȟ\\x02ןנ\\x05їȬ\\x02נ')\n buf.write('r\\x03\\x02\\x02\\x02סע\\x05йȝ\\x02עף\\x05ё')\n buf.write('ȩ\\x02ףפ\\x05ыȦ\\x02פץ\\x05ы')\n buf.write('Ȧ\\x02ץצ\\x05нȟ\\x02צק\\x05й')\n buf.write('ȝ\\x02קר\\x05ћȮ\\x02רt\\x03\\x02\\x02\\x02ש')\n buf.write('ת\\x05йȝ\\x02ת\\u05eb\\x05ёȩ\\x02\\u05eb')\n buf.write('\\u05ec\\x05ыȦ\\x02\\u05ec\\u05ed\\x05ѝȯ\\x02\\u05ed')\n buf.write('\\u05ee\\x05эȧ\\x02\\u05eeׯ\\x05яȨ\\x02ׯ')\n buf.write('װ\\x05љȭ\\x02װv\\x03\\x02\\x02\\x02ױײ\\x05й')\n buf.write('ȝ\\x02ײ׳\\x05ёȩ\\x02׳״\\x05э')\n buf.write('ȧ\\x02״\\u05f5\\x05эȧ\\x02\\u05f5\\u05f6\\x05н')\n buf.write('ȟ\\x02\\u05f6\\u05f7\\x05яȨ\\x02\\u05f7\\u05f8\\x05ћ')\n buf.write('Ȯ\\x02\\u05f8x\\x03\\x02\\x02\\x02\\u05f9\\u05fa\\x05йȝ\\x02\\u05fa')\n buf.write('\\u05fb\\x05ёȩ\\x02\\u05fb\\u05fc\\x05эȧ\\x02\\u05fc')\n buf.write('\\u05fd\\x05эȧ\\x02\\u05fd\\u05fe\\x05хȣ\\x02\\u05fe')\n buf.write('\\u05ff\\x05ћȮ\\x02\\u05ffz\\x03\\x02\\x02\\x02\\u0600\\u0601\\x05й')\n buf.write('ȝ\\x02\\u0601\\u0602\\x05ёȩ\\x02\\u0602\\u0603\\x05э')\n buf.write('ȧ\\x02\\u0603\\u0604\\x05эȧ\\x02\\u0604\\u0605\\x05х')\n buf.write('ȣ\\x02\\u0605؆\\x05ћȮ\\x02؆؇\\x05ћ')\n buf.write('Ȯ\\x02؇؈\\x05нȟ\\x02؈؉\\x05л')\n buf.write('Ȟ\\x02؉|\\x03\\x02\\x02\\x02؊؋\\x05йȝ\\x02؋')\n buf.write('،\\x05ёȩ\\x02،؍\\x05эȧ\\x02؍')\n buf.write('؎\\x05ѓȪ\\x02؎؏\\x05еț\\x02؏')\n buf.write('ؐ\\x05ћȮ\\x02ؐؑ\\x05хȣ\\x02ؑ')\n buf.write('ؒ\\x05зȜ\\x02ؒؓ\\x05хȣ\\x02ؓ')\n buf.write('ؔ\\x05ыȦ\\x02ؔؕ\\x05хȣ\\x02ؕ')\n buf.write('ؖ\\x05ћȮ\\x02ؖؗ\\x05ѥȳ\\x02ؗ')\n buf.write('~\\x03\\x02\\x02\\x02ؘؙ\\x05йȝ\\x02ؙؚ\\x05ё')\n buf.write('ȩ\\x02ؚ؛\\x05эȧ\\x02؛\\u061c\\x05ѓ')\n buf.write('Ȫ\\x02\\u061c؝\\x05хȣ\\x02؝؞\\x05ы')\n buf.write('Ȧ\\x02؞؟\\x05нȟ\\x02؟\\x80\\x03\\x02\\x02')\n buf.write('\\x02ؠء\\x05йȝ\\x02ءآ\\x05ёȩ')\n buf.write('\\x02آأ\\x05эȧ\\x02أؤ\\x05ѓȪ')\n buf.write('\\x02ؤإ\\x05ёȩ\\x02إئ\\x05ѝȯ')\n buf.write('\\x02ئا\\x05яȨ\\x02اب\\x05лȞ')\n buf.write('\\x02ب\\x82\\x03\\x02\\x02\\x02ةت\\x05йȝ\\x02ت')\n buf.write('ث\\x05ёȩ\\x02ثج\\x05яȨ\\x02ج')\n buf.write('ح\\x05яȨ\\x02حخ\\x05нȟ\\x02خ')\n buf.write('د\\x05йȝ\\x02دذ\\x05ћȮ\\x02ذ')\n buf.write('\\x84\\x03\\x02\\x02\\x02رز\\x05йȝ\\x02زس')\n buf.write('\\x05ёȩ\\x02سش\\x05яȨ\\x02شص')\n buf.write('\\x05яȨ\\x02صض\\x05нȟ\\x02ضط')\n buf.write('\\x05йȝ\\x02طظ\\x05ћȮ\\x02ظع')\n buf.write('\\x07a\\x02\\x02عغ\\x05зȜ\\x02غػ\\x05ѥ')\n buf.write('ȳ\\x02ػؼ\\x07a\\x02\\x02ؼؽ\\x05їȬ')\n buf.write('\\x02ؽؾ\\x05ёȩ\\x02ؾؿ\\x05ёȩ')\n buf.write('\\x02ؿـ\\x05ћȮ\\x02ـ\\x86\\x03\\x02\\x02\\x02ف')\n buf.write('ق\\x05йȝ\\x02قك\\x05ёȩ\\x02ك')\n buf.write('ل\\x05яȨ\\x02لم\\x05љȭ\\x02م')\n buf.write('ن\\x05ћȮ\\x02نه\\x05еț\\x02ه')\n buf.write('و\\x05яȨ\\x02وى\\x05ћȮ\\x02ى')\n buf.write('\\x88\\x03\\x02\\x02\\x02يً\\x05йȝ\\x02ًٌ')\n buf.write('\\x05ёȩ\\x02ٌٍ\\x05яȨ\\x02ٍَ')\n buf.write('\\x05љȭ\\x02َُ\\x05ћȮ\\x02ُِ')\n buf.write('\\x05їȬ\\x02ِّ\\x05еț\\x02ّْ')\n buf.write('\\x05хȣ\\x02ْٓ\\x05яȨ\\x02ٓٔ')\n buf.write('\\x05ћȮ\\x02ٔ\\x8a\\x03\\x02\\x02\\x02ٕٖ\\x05й')\n buf.write('ȝ\\x02ٖٗ\\x05ёȩ\\x02ٗ٘\\x05я')\n buf.write('Ȩ\\x02٘ٙ\\x05љȭ\\x02ٙٚ\\x05ћ')\n buf.write('Ȯ\\x02ٚٛ\\x05їȬ\\x02ٜٛ\\x05е')\n buf.write('ț\\x02ٜٝ\\x05хȣ\\x02ٝٞ\\x05я')\n buf.write('Ȩ\\x02ٟٞ\\x05ћȮ\\x02ٟ٠\\x05љ')\n buf.write('ȭ\\x02٠\\x8c\\x03\\x02\\x02\\x02١٢\\x05йȝ')\n buf.write('\\x02٢٣\\x05ёȩ\\x02٣٤\\x05яȨ')\n buf.write('\\x02٤٥\\x05љȭ\\x02٥٦\\x05ћȮ')\n buf.write('\\x02٦٧\\x05їȬ\\x02٧٨\\x05ѝȯ')\n buf.write('\\x02٨٩\\x05йȝ\\x02٩٪\\x05ћȮ')\n buf.write('\\x02٪٫\\x05ёȩ\\x02٫٬\\x05їȬ')\n buf.write('\\x02٬\\x8e\\x03\\x02\\x02\\x02٭ٮ\\x05йȝ\\x02ٮ')\n buf.write('ٯ\\x05ёȩ\\x02ٯٰ\\x05яȨ\\x02ٰ')\n buf.write('ٱ\\x05ћȮ\\x02ٱٲ\\x05нȟ\\x02ٲ')\n buf.write('ٳ\\x05яȨ\\x02ٳٴ\\x05ћȮ\\x02ٴ')\n buf.write('\\x90\\x03\\x02\\x02\\x02ٵٶ\\x05йȝ\\x02ٶٷ')\n buf.write('\\x05ёȩ\\x02ٷٸ\\x05яȨ\\x02ٸٹ')\n buf.write('\\x05ћȮ\\x02ٹٺ\\x05нȟ\\x02ٺٻ')\n buf.write('\\x05ѣȲ\\x02ٻټ\\x05ћȮ\\x02ټ\\x92')\n buf.write('\\x03\\x02\\x02\\x02ٽپ\\x05йȝ\\x02پٿ\\x05ё')\n buf.write('ȩ\\x02ٿڀ\\x05яȨ\\x02ڀځ\\x05ћ')\n buf.write('Ȯ\\x02ځڂ\\x05хȣ\\x02ڂڃ\\x05я')\n buf.write('Ȩ\\x02ڃڄ\\x05ѝȯ\\x02ڄڅ\\x05н')\n buf.write('ȟ\\x02څ\\x94\\x03\\x02\\x02\\x02چڇ\\x05йȝ')\n buf.write('\\x02ڇڈ\\x05ёȩ\\x02ڈډ\\x05яȨ')\n buf.write('\\x02ډڊ\\x05џȰ\\x02ڊڋ\\x05нȟ')\n buf.write('\\x02ڋڌ\\x05їȬ\\x02ڌڍ\\x05ћȮ')\n buf.write('\\x02ڍ\\x96\\x03\\x02\\x02\\x02ڎڏ\\x05йȝ\\x02ڏ')\n buf.write('ڐ\\x05ёȩ\\x02ڐڑ\\x05їȬ\\x02ڑ')\n buf.write('ڒ\\x05їȬ\\x02ڒړ\\x05ѝȯ\\x02ړ')\n buf.write('ڔ\\x05ѓȪ\\x02ڔڕ\\x05ћȮ\\x02ڕ')\n buf.write('ږ\\x07a\\x02\\x02ږڗ\\x05ѣȲ\\x02ڗژ')\n buf.write('\\x05хȣ\\x02ژڙ\\x05лȞ\\x02ڙ\\x98')\n buf.write('\\x03\\x02\\x02\\x02ښڛ\\x05йȝ\\x02ڛڜ\\x05ё')\n buf.write('ȩ\\x02ڜڝ\\x05їȬ\\x02ڝڞ\\x05ї')\n buf.write('Ȭ\\x02ڞڟ\\x05ѝȯ\\x02ڟڠ\\x05ѓ')\n buf.write('Ȫ\\x02ڠڡ\\x05ћȮ\\x02ڡڢ\\x07a\\x02')\n buf.write('\\x02ڢڣ\\x05ѣȲ\\x02ڣڤ\\x05хȣ')\n buf.write('\\x02ڤڥ\\x05лȞ\\x02ڥڦ\\x07a\\x02\\x02ڦ')\n buf.write('ڧ\\x05еț\\x02ڧڨ\\x05ыȦ\\x02ڨ')\n buf.write('ک\\x05ыȦ\\x02ک\\x9a\\x03\\x02\\x02\\x02ڪګ')\n buf.write('\\x05йȝ\\x02ګڬ\\x05ёȩ\\x02ڬڭ')\n buf.write('\\x05љȭ\\x02ڭڮ\\x05ћȮ\\x02ڮ\\x9c')\n buf.write('\\x03\\x02\\x02\\x02گڰ\\x05йȝ\\x02ڰڱ\\x05ё')\n buf.write('ȩ\\x02ڱڲ\\x05ѝȯ\\x02ڲڳ\\x05я')\n buf.write('Ȩ\\x02ڳڴ\\x05ћȮ\\x02ڴ\\x9e\\x03\\x02\\x02')\n buf.write('\\x02ڵڶ\\x05йȝ\\x02ڶڷ\\x05їȬ')\n buf.write('\\x02ڷڸ\\x05нȟ\\x02ڸڹ\\x05еț')\n buf.write('\\x02ڹں\\x05ћȮ\\x02ںڻ\\x05нȟ')\n buf.write('\\x02ڻ\\xa0\\x03\\x02\\x02\\x02ڼڽ\\x05йȝ\\x02ڽ')\n buf.write('ھ\\x05їȬ\\x02ھڿ\\x05ёȩ\\x02ڿ')\n buf.write('ۀ\\x05љȭ\\x02ۀہ\\x05љȭ\\x02ہ')\n buf.write('¢\\x03\\x02\\x02\\x02ۂۃ\\x05йȝ\\x02ۃۄ')\n buf.write('\\x05ѝȯ\\x02ۄۅ\\x05зȜ\\x02ۅۆ')\n buf.write('\\x05нȟ\\x02ۆ¤\\x03\\x02\\x02\\x02ۇۈ\\x05й')\n buf.write('ȝ\\x02ۈۉ\\x05ѝȯ\\x02ۉۊ\\x05ї')\n buf.write('Ȭ\\x02ۊۋ\\x05їȬ\\x02ۋی\\x05н')\n buf.write('ȟ\\x02یۍ\\x05яȨ\\x02ۍێ\\x05ћ')\n buf.write('Ȯ\\x02ێ¦\\x03\\x02\\x02\\x02ۏې\\x05йȝ')\n buf.write('\\x02ېۑ\\x05ѝȯ\\x02ۑے\\x05їȬ')\n buf.write('\\x02ےۓ\\x05їȬ\\x02ۓ۔\\x05нȟ')\n buf.write('\\x02۔ە\\x05яȨ\\x02ەۖ\\x05ћȮ')\n buf.write('\\x02ۖۗ\\x07a\\x02\\x02ۗۘ\\x05ѝȯ\\x02ۘ')\n buf.write('ۙ\\x05љȭ\\x02ۙۚ\\x05нȟ\\x02ۚ')\n buf.write('ۛ\\x05їȬ\\x02ۛ¨\\x03\\x02\\x02\\x02ۜ\\u06dd')\n buf.write('\\x05йȝ\\x02\\u06dd۞\\x05ѝȯ\\x02۞۟')\n buf.write('\\x05їȬ\\x02۟۠\\x05љȭ\\x02۠ۡ')\n buf.write('\\x05ёȩ\\x02ۡۢ\\x05їȬ\\x02ۢª')\n buf.write('\\x03\\x02\\x02\\x02ۣۤ\\x05йȝ\\x02ۤۥ\\x05ѝ')\n buf.write('ȯ\\x02ۥۦ\\x05љȭ\\x02ۦۧ\\x05ћ')\n buf.write('Ȯ\\x02ۧۨ\\x05ёȩ\\x02ۨ۩\\x05э')\n buf.write('ȧ\\x02۩۪\\x05лȞ\\x02۪۫\\x05е')\n buf.write('ț\\x02۫۬\\x05ћȮ\\x02ۭ۬\\x05ѝ')\n buf.write('ȯ\\x02ۭۮ\\x05эȧ\\x02ۮ¬\\x03\\x02\\x02')\n buf.write('\\x02ۯ۰\\x05йȝ\\x02۰۱\\x05ѥȳ')\n buf.write('\\x02۱۲\\x05йȝ\\x02۲۳\\x05ыȦ')\n buf.write('\\x02۳۴\\x05нȟ\\x02۴®\\x03\\x02\\x02\\x02۵')\n buf.write('۶\\x05лȞ\\x02۶۷\\x05еț\\x02۷')\n buf.write('۸\\x05ћȮ\\x02۸۹\\x05еț\\x02۹')\n buf.write('°\\x03\\x02\\x02\\x02ۺۻ\\x05лȞ\\x02ۻۼ')\n buf.write('\\x05еț\\x02ۼ۽\\x05ћȮ\\x02۽۾')\n buf.write('\\x05еț\\x02۾ۿ\\x05зȜ\\x02ۿ܀')\n buf.write('\\x05еț\\x02܀܁\\x05љȭ\\x02܁܂')\n buf.write('\\x05нȟ\\x02܂²\\x03\\x02\\x02\\x02܃܄\\x05л')\n buf.write('Ȟ\\x02܄܅\\x05еț\\x02܅܆\\x05ћ')\n buf.write('Ȯ\\x02܆܇\\x05нȟ\\x02܇´\\x03\\x02\\x02')\n buf.write('\\x02܈܉\\x05лȞ\\x02܉܊\\x05еț')\n buf.write('\\x02܊܋\\x05ѥȳ\\x02܋¶\\x03\\x02\\x02\\x02܌')\n buf.write('܍\\x05лȞ\\x02܍\\u070e\\x05зȜ\\x02\\u070e')\n buf.write('\\u070f\\x07a\\x02\\x02\\u070fܐ\\x05їȬ\\x02ܐܑ')\n buf.write('\\x05ёȩ\\x02ܑܒ\\x05ыȦ\\x02ܒܓ')\n buf.write('\\x05нȟ\\x02ܓܔ\\x07a\\x02\\x02ܔܕ\\x05й')\n buf.write('ȝ\\x02ܕܖ\\x05уȢ\\x02ܖܗ\\x05е')\n buf.write('ț\\x02ܗܘ\\x05яȨ\\x02ܘܙ\\x05с')\n buf.write('ȡ\\x02ܙܚ\\x05нȟ\\x02ܚ¸\\x03\\x02\\x02')\n buf.write('\\x02ܛܜ\\x05лȞ\\x02ܜܝ\\x05зȜ')\n buf.write('\\x02ܝܞ\\x05ћȮ\\x02ܞܟ\\x05хȣ')\n buf.write('\\x02ܟܠ\\x05эȧ\\x02ܠܡ\\x05нȟ')\n buf.write('\\x02ܡܢ\\x05ѧȴ\\x02ܢܣ\\x05ёȩ')\n buf.write('\\x02ܣܤ\\x05яȨ\\x02ܤܥ\\x05нȟ')\n buf.write('\\x02ܥº\\x03\\x02\\x02\\x02ܦܧ\\x05лȞ\\x02ܧ')\n buf.write('ܨ\\x05лȞ\\x02ܨܩ\\x05ыȦ\\x02ܩ')\n buf.write('¼\\x03\\x02\\x02\\x02ܪܫ\\x05лȞ\\x02ܫܬ')\n buf.write('\\x05нȟ\\x02ܬܭ\\x05зȜ\\x02ܭܮ')\n buf.write('\\x05ѝȯ\\x02ܮܯ\\x05сȡ\\x02ܯ¾')\n buf.write('\\x03\\x02\\x02\\x02ܱܰ\\x05лȞ\\x02ܱܲ\\x05н')\n buf.write('ȟ\\x02ܲܳ\\x05йȝ\\x02ܳÀ\\x03\\x02\\x02')\n buf.write('\\x02ܴܵ\\x05лȞ\\x02ܵܶ\\x05нȟ')\n buf.write('\\x02ܷܶ\\x05йȝ\\x02ܷܸ\\x05хȣ')\n buf.write('\\x02ܸܹ\\x05эȧ\\x02ܹܺ\\x05еț')\n buf.write('\\x02ܻܺ\\x05ыȦ\\x02ܻÂ\\x03\\x02\\x02\\x02ܼ')\n buf.write('ܽ\\x05лȞ\\x02ܾܽ\\x05нȟ\\x02ܾ')\n buf.write('ܿ\\x05йȝ\\x02ܿ݀\\x05ыȦ\\x02݀')\n buf.write('݁\\x05еț\\x02݂݁\\x05їȬ\\x02݂')\n buf.write('݃\\x05нȟ\\x02݃Ä\\x03\\x02\\x02\\x02݄݅')\n buf.write('\\x05лȞ\\x02݆݅\\x05нȟ\\x02݆݇')\n buf.write('\\x05йȝ\\x02݈݇\\x05ёȩ\\x02݈݉')\n buf.write('\\x05эȧ\\x02݉݊\\x05ѓȪ\\x02݊\\u074b')\n buf.write('\\x05ёȩ\\x02\\u074b\\u074c\\x05љȭ\\x02\\u074cݍ')\n buf.write('\\x05нȟ\\x02ݍÆ\\x03\\x02\\x02\\x02ݎݏ\\x05л')\n buf.write('Ȟ\\x02ݏݐ\\x05нȟ\\x02ݐݑ\\x05й')\n buf.write('ȝ\\x02ݑݒ\\x05їȬ\\x02ݒݓ\\x05н')\n buf.write('ȟ\\x02ݓݔ\\x05эȧ\\x02ݔݕ\\x05н')\n buf.write('ȟ\\x02ݕݖ\\x05яȨ\\x02ݖݗ\\x05ћ')\n buf.write('Ȯ\\x02ݗÈ\\x03\\x02\\x02\\x02ݘݙ\\x05лȞ')\n buf.write('\\x02ݙݚ\\x05нȟ\\x02ݚݛ\\x05пȠ')\n buf.write('\\x02ݛݜ\\x05еț\\x02ݜݝ\\x05ѝȯ')\n buf.write('\\x02ݝݞ\\x05ыȦ\\x02ݞݟ\\x05ћȮ')\n buf.write('\\x02ݟÊ\\x03\\x02\\x02\\x02ݠݡ\\x05лȞ\\x02ݡ')\n buf.write('ݢ\\x05нȟ\\x02ݢݣ\\x05пȠ\\x02ݣ')\n buf.write('ݤ\\x05еț\\x02ݤݥ\\x05ѝȯ\\x02ݥ')\n buf.write('ݦ\\x05ыȦ\\x02ݦݧ\\x05ћȮ\\x02ݧ')\n buf.write('ݨ\\x05љȭ\\x02ݨÌ\\x03\\x02\\x02\\x02ݩݪ')\n buf.write('\\x05лȞ\\x02ݪݫ\\x05нȟ\\x02ݫݬ')\n buf.write('\\x05пȠ\\x02ݬݭ\\x05нȟ\\x02ݭݮ')\n buf.write('\\x05їȬ\\x02ݮݯ\\x05їȬ\\x02ݯݰ')\n buf.write('\\x05нȟ\\x02ݰݱ\\x05лȞ\\x02ݱÎ')\n buf.write('\\x03\\x02\\x02\\x02ݲݳ\\x05лȞ\\x02ݳݴ\\x05н')\n buf.write('ȟ\\x02ݴݵ\\x05пȠ\\x02ݵݶ\\x05х')\n buf.write('ȣ\\x02ݶݷ\\x05яȨ\\x02ݷݸ\\x05н')\n buf.write('ȟ\\x02ݸݹ\\x05їȬ\\x02ݹÐ\\x03\\x02\\x02')\n buf.write('\\x02ݺݻ\\x05лȞ\\x02ݻݼ\\x05нȟ')\n buf.write('\\x02ݼݽ\\x05ыȦ\\x02ݽݾ\\x05нȟ')\n buf.write('\\x02ݾݿ\\x05ћȮ\\x02ݿހ\\x05нȟ')\n buf.write('\\x02ހÒ\\x03\\x02\\x02\\x02ށނ\\x05лȞ\\x02ނ')\n buf.write('ރ\\x05нȟ\\x02ރބ\\x05ѓȪ\\x02ބ')\n buf.write('ޅ\\x05ћȮ\\x02ޅކ\\x05уȢ\\x02ކ')\n buf.write('Ô\\x03\\x02\\x02\\x02އވ\\x05лȞ\\x02ވމ')\n buf.write('\\x05нȟ\\x02މފ\\x05љȭ\\x02ފދ')\n buf.write('\\x05йȝ\\x02ދÖ\\x03\\x02\\x02\\x02ތލ\\x05л')\n buf.write('Ȟ\\x02ލގ\\x05нȟ\\x02ގޏ\\x05ћ')\n buf.write('Ȯ\\x02ޏސ\\x05нȟ\\x02ސޑ\\x05ї')\n buf.write('Ȭ\\x02ޑޒ\\x05эȧ\\x02ޒޓ\\x05х')\n buf.write('ȣ\\x02ޓޔ\\x05яȨ\\x02ޔޕ\\x05х')\n buf.write('ȣ\\x02ޕޖ\\x05љȭ\\x02ޖޗ\\x05ћ')\n buf.write('Ȯ\\x02ޗޘ\\x05хȣ\\x02ޘޙ\\x05й')\n buf.write('ȝ\\x02ޙØ\\x03\\x02\\x02\\x02ޚޛ\\x05лȞ')\n buf.write('\\x02ޛޜ\\x05хȣ\\x02ޜޝ\\x05эȧ')\n buf.write('\\x02ޝޞ\\x05нȟ\\x02ޞޟ\\x05яȨ')\n buf.write('\\x02ޟޠ\\x05љȭ\\x02ޠޡ\\x05хȣ')\n buf.write('\\x02ޡޢ\\x05ёȩ\\x02ޢޣ\\x05яȨ')\n buf.write('\\x02ޣÚ\\x03\\x02\\x02\\x02ޤޥ\\x05лȞ\\x02ޥ')\n buf.write('ަ\\x05хȣ\\x02ަާ\\x05љȭ\\x02ާ')\n buf.write('ި\\x05еț\\x02ިީ\\x05зȜ\\x02ީ')\n buf.write('ު\\x05ыȦ\\x02ުޫ\\x05нȟ\\x02ޫ')\n buf.write('Ü\\x03\\x02\\x02\\x02ެޭ\\x05лȞ\\x02ޭޮ')\n buf.write('\\x05хȣ\\x02ޮޯ\\x05љȭ\\x02ޯް')\n buf.write('\\x05еț\\x02ްޱ\\x05љȭ\\x02ޱ\\u07b2')\n buf.write('\\x05љȭ\\x02\\u07b2\\u07b3\\x05ёȩ\\x02\\u07b3\\u07b4')\n buf.write('\\x05йȝ\\x02\\u07b4\\u07b5\\x05хȣ\\x02\\u07b5\\u07b6')\n buf.write('\\x05еț\\x02\\u07b6\\u07b7\\x05ћȮ\\x02\\u07b7\\u07b8')\n buf.write('\\x05нȟ\\x02\\u07b8Þ\\x03\\x02\\x02\\x02\\u07b9\\u07ba\\x05л')\n buf.write('Ȟ\\x02\\u07ba\\u07bb\\x05хȣ\\x02\\u07bb\\u07bc\\x05љ')\n buf.write('ȭ\\x02\\u07bc\\u07bd\\x05ћȮ\\x02\\u07bd\\u07be\\x05х')\n buf.write('ȣ\\x02\\u07be\\u07bf\\x05яȨ\\x02\\u07bf߀\\x05й')\n buf.write('ȝ\\x02߀߁\\x05ћȮ\\x02߁à\\x03\\x02\\x02')\n buf.write('\\x02߂߃\\x05лȞ\\x02߃߄\\x05ёȩ')\n buf.write('\\x02߄߅\\x05йȝ\\x02߅߆\\x05ѝȯ')\n buf.write('\\x02߆߇\\x05эȧ\\x02߇߈\\x05нȟ')\n buf.write('\\x02߈߉\\x05яȨ\\x02߉ߊ\\x05ћȮ')\n buf.write('\\x02ߊâ\\x03\\x02\\x02\\x02ߋߌ\\x05лȞ\\x02ߌ')\n buf.write('ߍ\\x05ёȩ\\x02ߍߎ\\x05ѝȯ\\x02ߎ')\n buf.write('ߏ\\x05зȜ\\x02ߏߐ\\x05ыȦ\\x02ߐ')\n buf.write('ߑ\\x05нȟ\\x02ߑä\\x03\\x02\\x02\\x02ߒߓ')\n buf.write('\\x05лȞ\\x02ߓߔ\\x05їȬ\\x02ߔߕ')\n buf.write('\\x05ёȩ\\x02ߕߖ\\x05ѓȪ\\x02ߖæ')\n buf.write('\\x03\\x02\\x02\\x02ߗߘ\\x05лȞ\\x02ߘߙ\\x05љ')\n buf.write('ȭ\\x02ߙߚ\\x05хȣ\\x02ߚߛ\\x05я')\n buf.write('Ȩ\\x02ߛߜ\\x05ћȮ\\x02ߜߝ\\x05н')\n buf.write('ȟ\\x02ߝߞ\\x05їȬ\\x02ߞߟ\\x05џ')\n buf.write('Ȱ\\x02ߟߠ\\x05еț\\x02ߠߡ\\x05ы')\n buf.write('Ȧ\\x02ߡߢ\\x07a\\x02\\x02ߢߣ\\x05ѝȯ')\n buf.write('\\x02ߣߤ\\x05яȨ\\x02ߤߥ\\x05йȝ')\n buf.write('\\x02ߥߦ\\x05ёȩ\\x02ߦߧ\\x05яȨ')\n buf.write('\\x02ߧߨ\\x05љȭ\\x02ߨߩ\\x05ћȮ')\n buf.write('\\x02ߩߪ\\x05їȬ\\x02ߪ߫\\x05еț')\n buf.write('\\x02߫߬\\x05хȣ\\x02߬߭\\x05яȨ')\n buf.write('\\x02߭߮\\x05нȟ\\x02߮߯\\x05лȞ')\n buf.write('\\x02߯è\\x03\\x02\\x02\\x02߰߱\\x05нȟ\\x02߱')\n buf.write('߲\\x05еț\\x02߲߳\\x05йȝ\\x02߳')\n buf.write('ߴ\\x05уȢ\\x02ߴê\\x03\\x02\\x02\\x02ߵ߶')\n buf.write('\\x05нȟ\\x02߶߷\\x05ыȦ\\x02߷߸')\n buf.write('\\x05нȟ\\x02߸߹\\x05эȧ\\x02߹ߺ')\n buf.write('\\x05нȟ\\x02ߺ\\u07fb\\x05яȨ\\x02\\u07fb\\u07fc')\n buf.write('\\x05ћȮ\\x02\\u07fcì\\x03\\x02\\x02\\x02߽߾\\x05н')\n buf.write('ȟ\\x02߾߿\\x05ыȦ\\x02߿ࠀ\\x05љ')\n buf.write('ȭ\\x02ࠀࠁ\\x05нȟ\\x02ࠁî\\x03\\x02\\x02')\n buf.write('\\x02ࠂࠃ\\x05нȟ\\x02ࠃࠄ\\x05ыȦ')\n buf.write('\\x02ࠄࠅ\\x05љȭ\\x02ࠅࠆ\\x05хȣ')\n buf.write('\\x02ࠆࠇ\\x05пȠ\\x02ࠇð\\x03\\x02\\x02\\x02ࠈ')\n buf.write('ࠉ\\x05нȟ\\x02ࠉࠊ\\x05эȧ\\x02ࠊ')\n buf.write('ࠋ\\x05ѓȪ\\x02ࠋࠌ\\x05ћȮ\\x02ࠌ')\n buf.write('ࠍ\\x05ѥȳ\\x02ࠍò\\x03\\x02\\x02\\x02ࠎࠏ')\n buf.write('\\x05нȟ\\x02ࠏࠐ\\x05яȨ\\x02ࠐࠑ')\n buf.write('\\x05еț\\x02ࠑࠒ\\x05зȜ\\x02ࠒࠓ')\n buf.write('\\x05ыȦ\\x02ࠓࠔ\\x05нȟ\\x02ࠔô')\n buf.write('\\x03\\x02\\x02\\x02ࠕࠖ\\x05нȟ\\x02ࠖࠗ\\x05я')\n buf.write('Ȩ\\x02ࠗ࠘\\x05йȝ\\x02࠘࠙\\x05ё')\n buf.write('ȩ\\x02࠙ࠚ\\x05лȞ\\x02ࠚࠛ\\x05х')\n buf.write('ȣ\\x02ࠛࠜ\\x05яȨ\\x02ࠜࠝ\\x05с')\n buf.write('ȡ\\x02ࠝö\\x03\\x02\\x02\\x02ࠞࠟ\\x05нȟ')\n buf.write('\\x02ࠟࠠ\\x05яȨ\\x02ࠠࠡ\\x05лȞ')\n buf.write('\\x02ࠡø\\x03\\x02\\x02\\x02ࠢࠣ\\x05нȟ\\x02ࠣ')\n buf.write('ࠤ\\x05яȨ\\x02ࠤࠥ\\x05ћȮ\\x02ࠥ')\n buf.write('ࠦ\\x05хȣ\\x02ࠦࠧ\\x05ћȮ\\x02ࠧ')\n buf.write('ࠨ\\x05ѥȳ\\x02ࠨࠩ\\x05нȟ\\x02ࠩ')\n buf.write('ࠪ\\x05љȭ\\x02ࠪࠫ\\x05йȝ\\x02ࠫ')\n buf.write('ࠬ\\x05еț\\x02ࠬ࠭\\x05ѓȪ\\x02࠭')\n buf.write('\\u082e\\x05хȣ\\x02\\u082e\\u082f\\x05яȨ\\x02\\u082f')\n buf.write('࠰\\x05сȡ\\x02࠰ú\\x03\\x02\\x02\\x02࠱࠲')\n buf.write('\\x05нȟ\\x02࠲࠳\\x05їȬ\\x02࠳࠴')\n buf.write('\\x05їȬ\\x02࠴ü\\x03\\x02\\x02\\x02࠵࠶\\x05н')\n buf.write('ȟ\\x02࠶࠷\\x05їȬ\\x02࠷࠸\\x05ї')\n buf.write('Ȭ\\x02࠸࠹\\x05ёȩ\\x02࠹࠺\\x05ї')\n buf.write('Ȭ\\x02࠺࠻\\x05љȭ\\x02࠻þ\\x03\\x02\\x02')\n buf.write('\\x02࠼࠽\\x05нȟ\\x02࠽࠾\\x05љȭ')\n buf.write('\\x02࠾\\u083f\\x05йȝ\\x02\\u083fࡀ\\x05еț')\n buf.write('\\x02ࡀࡁ\\x05ѓȪ\\x02ࡁࡂ\\x05нȟ')\n buf.write('\\x02ࡂĀ\\x03\\x02\\x02\\x02ࡃࡄ\\x05нȟ\\x02ࡄ')\n buf.write('ࡅ\\x05џȰ\\x02ࡅࡆ\\x05еț\\x02ࡆ')\n buf.write('ࡇ\\x05ыȦ\\x02ࡇࡈ\\x05яȨ\\x02ࡈ')\n buf.write('ࡉ\\x05еț\\x02ࡉࡊ\\x05эȧ\\x02ࡊ')\n buf.write('ࡋ\\x05нȟ\\x02ࡋĂ\\x03\\x02\\x02\\x02ࡌࡍ')\n buf.write('\\x05нȟ\\x02ࡍࡎ\\x05ѣȲ\\x02ࡎࡏ')\n buf.write('\\x05йȝ\\x02ࡏࡐ\\x05нȟ\\x02ࡐࡑ')\n buf.write('\\x05ѓȪ\\x02ࡑࡒ\\x05ћȮ\\x02ࡒĄ')\n buf.write('\\x03\\x02\\x02\\x02ࡓࡔ\\x05нȟ\\x02ࡔࡕ\\x05ѣ')\n buf.write('Ȳ\\x02ࡕࡖ\\x05йȝ\\x02ࡖࡗ\\x05н')\n buf.write('ȟ\\x02ࡗࡘ\\x05ѓȪ\\x02ࡘ࡙\\x05ћ')\n buf.write('Ȯ\\x02࡙࡚\\x05хȣ\\x02࡚࡛\\x05ё')\n buf.write('ȩ\\x02࡛\\u085c\\x05яȨ\\x02\\u085cĆ\\x03\\x02\\x02')\n buf.write('\\x02\\u085d࡞\\x05нȟ\\x02࡞\\u085f\\x05ѣȲ')\n buf.write('\\x02\\u085fࡠ\\x05йȝ\\x02ࡠࡡ\\x05нȟ')\n buf.write('\\x02ࡡࡢ\\x05ѓȪ\\x02ࡢࡣ\\x05ћȮ')\n buf.write('\\x02ࡣࡤ\\x05хȣ\\x02ࡤࡥ\\x05ёȩ')\n buf.write('\\x02ࡥࡦ\\x05яȨ\\x02ࡦࡧ\\x07a\\x02\\x02ࡧ')\n buf.write('ࡨ\\x05хȣ\\x02ࡨࡩ\\x05яȨ\\x02ࡩ')\n buf.write('ࡪ\\x05хȣ\\x02ࡪ\\u086b\\x05ћȮ\\x02\\u086b')\n buf.write('Ĉ\\x03\\x02\\x02\\x02\\u086c\\u086d\\x05нȟ\\x02\\u086d\\u086e')\n buf.write('\\x05ѣȲ\\x02\\u086e\\u086f\\x05йȝ\\x02\\u086fࡰ')\n buf.write('\\x05нȟ\\x02ࡰࡱ\\x05ѓȪ\\x02ࡱࡲ')\n buf.write('\\x05ћȮ\\x02ࡲࡳ\\x05хȣ\\x02ࡳࡴ')\n buf.write('\\x05ёȩ\\x02ࡴࡵ\\x05яȨ\\x02ࡵࡶ')\n buf.write('\\x05љȭ\\x02ࡶĊ\\x03\\x02\\x02\\x02ࡷࡸ\\x05н')\n buf.write('ȟ\\x02ࡸࡹ\\x05ѣȲ\\x02ࡹࡺ\\x05й')\n buf.write('ȝ\\x02ࡺࡻ\\x05ыȦ\\x02ࡻࡼ\\x05ѝ')\n buf.write('ȯ\\x02ࡼࡽ\\x05лȞ\\x02ࡽࡾ\\x05н')\n buf.write('ȟ\\x02ࡾČ\\x03\\x02\\x02\\x02ࡿࢀ\\x05нȟ')\n buf.write('\\x02ࢀࢁ\\x05ѣȲ\\x02ࢁࢂ\\x05йȝ')\n buf.write('\\x02ࢂࢃ\\x05ыȦ\\x02ࢃࢄ\\x05ѝȯ')\n buf.write('\\x02ࢄࢅ\\x05љȭ\\x02ࢅࢆ\\x05хȣ')\n buf.write('\\x02ࢆࢇ\\x05џȰ\\x02ࢇ࢈\\x05нȟ')\n buf.write('\\x02࢈Ď\\x03\\x02\\x02\\x02ࢉࢊ\\x05нȟ\\x02ࢊ')\n buf.write('ࢋ\\x05ѣȲ\\x02ࢋࢌ\\x05нȟ\\x02ࢌ')\n buf.write('ࢍ\\x05йȝ\\x02ࢍࢎ\\x05ѝȯ\\x02ࢎ')\n buf.write('\\u088f\\x05ћȮ\\x02\\u088f\\u0890\\x05нȟ\\x02\\u0890')\n buf.write('Đ\\x03\\x02\\x02\\x02\\u0891\\u0892\\x05нȟ\\x02\\u0892\\u0893')\n buf.write('\\x05ѣȲ\\x02\\u0893\\u0894\\x05хȣ\\x02\\u0894\\u0895')\n buf.write('\\x05љȭ\\x02\\u0895\\u0896\\x05ћȮ\\x02\\u0896\\u0897')\n buf.write('\\x05љȭ\\x02\\u0897Ē\\x03\\x02\\x02\\x02࢙࢘\\x05н')\n buf.write('ȟ\\x02࢙࢚\\x05ѣȲ\\x02࢚࢛\\x05х')\n buf.write('ȣ\\x02࢛࢜\\x05ћȮ\\x02࢜Ĕ\\x03\\x02\\x02')\n buf.write('\\x02࢝࢞\\x05нȟ\\x02࢞࢟\\x05ѣȲ')\n buf.write('\\x02࢟ࢠ\\x05ѓȪ\\x02ࢠࢡ\\x05ыȦ')\n buf.write('\\x02ࢡࢢ\\x05еț\\x02ࢢࢣ\\x05хȣ')\n buf.write('\\x02ࢣࢤ\\x05яȨ\\x02ࢤĖ\\x03\\x02\\x02\\x02ࢥ')\n buf.write('ࢦ\\x05нȟ\\x02ࢦࢧ\\x05ѣȲ\\x02ࢧ')\n buf.write('ࢨ\\x05ћȮ\\x02ࢨࢩ\\x05нȟ\\x02ࢩ')\n buf.write('ࢪ\\x05їȬ\\x02ࢪࢫ\\x05яȨ\\x02ࢫ')\n buf.write('ࢬ\\x05еț\\x02ࢬࢭ\\x05ыȦ\\x02ࢭ')\n buf.write('Ę\\x03\\x02\\x02\\x02ࢮࢯ\\x05нȟ\\x02ࢯࢰ')\n buf.write('\\x05ѣȲ\\x02ࢰࢱ\\x05ћȮ\\x02ࢱࢲ')\n buf.write('\\x05їȬ\\x02ࢲࢳ\\x05еț\\x02ࢳࢴ')\n buf.write('\\x05йȝ\\x02ࢴࢵ\\x05ћȮ\\x02ࢵĚ')\n buf.write('\\x03\\x02\\x02\\x02ࢶࢷ\\x05пȠ\\x02ࢷࢸ\\x05е')\n buf.write('ț\\x02ࢸࢹ\\x05хȣ\\x02ࢹࢺ\\x05ы')\n buf.write('Ȧ\\x02ࢺࢻ\\x05ѝȯ\\x02ࢻࢼ\\x05ї')\n buf.write('Ȭ\\x02ࢼࢽ\\x05нȟ\\x02ࢽĜ\\x03\\x02\\x02')\n buf.write('\\x02ࢾࢿ\\x05пȠ\\x02ࢿࣀ\\x05еț')\n buf.write('\\x02ࣀࣁ\\x05ыȦ\\x02ࣁࣂ\\x05љȭ')\n buf.write('\\x02ࣂࣃ\\x05нȟ\\x02ࣃĞ\\x03\\x02\\x02\\x02ࣄ')\n buf.write('ࣅ\\x05пȠ\\x02ࣅࣆ\\x05нȟ\\x02ࣆ')\n buf.write('ࣇ\\x05ћȮ\\x02ࣇࣈ\\x05йȝ\\x02ࣈ')\n buf.write('ࣉ\\x05уȢ\\x02ࣉĠ\\x03\\x02\\x02\\x02࣊࣋')\n buf.write('\\x05пȠ\\x02࣋࣌\\x05хȣ\\x02࣌࣍')\n buf.write('\\x05яȨ\\x02࣍࣎\\x05еț\\x02࣏࣎')\n buf.write('\\x05ыȦ\\x02࣏Ģ\\x03\\x02\\x02\\x02࣐࣑\\x05п')\n buf.write('Ƞ\\x02࣑࣒\\x05хȣ\\x02࣒࣓\\x05ї')\n buf.write('Ȭ\\x02࣓ࣔ\\x05љȭ\\x02ࣔࣕ\\x05ћ')\n buf.write('Ȯ\\x02ࣕĤ\\x03\\x02\\x02\\x02ࣖࣗ\\x05пȠ')\n buf.write('\\x02ࣗࣘ\\x05хȣ\\x02ࣘࣙ\\x05їȬ')\n buf.write('\\x02ࣙࣚ\\x05љȭ\\x02ࣚࣛ\\x05ћȮ')\n buf.write('\\x02ࣛࣜ\\x07a\\x02\\x02ࣜࣝ\\x05џȰ\\x02ࣝ')\n buf.write('ࣞ\\x05еț\\x02ࣞࣟ\\x05ыȦ\\x02ࣟ')\n buf.write('࣠\\x05ѝȯ\\x02࣠࣡\\x05нȟ\\x02࣡')\n buf.write('Ħ\\x03\\x02\\x02\\x02\\u08e2ࣣ\\x05пȠ\\x02ࣣࣤ')\n buf.write('\\x05ыȦ\\x02ࣤࣥ\\x05ёȩ\\x02ࣦࣥ')\n buf.write('\\x05еț\\x02ࣦࣧ\\x05ћȮ\\x02ࣧĨ')\n buf.write('\\x03\\x02\\x02\\x02ࣩࣨ\\x05пȠ\\x02ࣩ࣪\\x05ё')\n buf.write('ȩ\\x02࣪࣫\\x05ыȦ\\x02࣫࣬\\x05ы')\n buf.write('Ȧ\\x02࣭࣬\\x05ёȩ\\x02࣭࣮\\x05ѡ')\n buf.write('ȱ\\x02࣮࣯\\x05хȣ\\x02ࣰ࣯\\x05я')\n buf.write('Ȩ\\x02ࣰࣱ\\x05сȡ\\x02ࣱĪ\\x03\\x02\\x02')\n buf.write('\\x02ࣲࣳ\\x05пȠ\\x02ࣳࣴ\\x05ёȩ')\n buf.write('\\x02ࣴࣵ\\x05ыȦ\\x02ࣶࣵ\\x05ыȦ')\n buf.write('\\x02ࣶࣷ\\x05ёȩ\\x02ࣷࣸ\\x05ѡȱ')\n buf.write('\\x02ࣹࣸ\\x05љȭ\\x02ࣹĬ\\x03\\x02\\x02\\x02ࣺ')\n buf.write('ࣻ\\x05пȠ\\x02ࣻࣼ\\x05ёȩ\\x02ࣼ')\n buf.write('ࣽ\\x05їȬ\\x02ࣽĮ\\x03\\x02\\x02\\x02ࣾࣿ')\n buf.write('\\x05пȠ\\x02ࣿऀ\\x05ёȩ\\x02ऀँ')\n buf.write('\\x05їȬ\\x02ँं\\x05еț\\x02ंः')\n buf.write('\\x05ыȦ\\x02ःऄ\\x05ыȦ\\x02ऄİ')\n buf.write('\\x03\\x02\\x02\\x02अआ\\x05пȠ\\x02आइ\\x05ё')\n buf.write('ȩ\\x02इई\\x05їȬ\\x02ईउ\\x05й')\n buf.write('ȝ\\x02उऊ\\x05нȟ\\x02ऊIJ\\x03\\x02\\x02')\n buf.write('\\x02ऋऌ\\x05пȠ\\x02ऌऍ\\x05їȬ')\n buf.write('\\x02ऍऎ\\x05ёȩ\\x02ऎए\\x05эȧ')\n buf.write('\\x02एĴ\\x03\\x02\\x02\\x02ऐऑ\\x05пȠ\\x02ऑ')\n buf.write('ऒ\\x05ѝȯ\\x02ऒओ\\x05ыȦ\\x02ओ')\n buf.write('औ\\x05ыȦ\\x02औĶ\\x03\\x02\\x02\\x02कख')\n buf.write('\\x05пȠ\\x02खग\\x05ѝȯ\\x02गघ')\n buf.write('\\x05яȨ\\x02घङ\\x05йȝ\\x02ङच')\n buf.write('\\x05ћȮ\\x02चछ\\x05хȣ\\x02छज')\n buf.write('\\x05ёȩ\\x02जझ\\x05яȨ\\x02झĸ')\n buf.write('\\x03\\x02\\x02\\x02ञट\\x05сȡ\\x02टठ\\x05ё')\n buf.write('ȩ\\x02ठड\\x05ћȮ\\x02डढ\\x05ё')\n buf.write('ȩ\\x02ढĺ\\x03\\x02\\x02\\x02णत\\x05сȡ')\n buf.write('\\x02तथ\\x05їȬ\\x02थद\\x05еț')\n buf.write('\\x02दध\\x05яȨ\\x02धन\\x05ћȮ')\n buf.write('\\x02नļ\\x03\\x02\\x02\\x02ऩप\\x05сȡ\\x02प')\n buf.write('फ\\x05їȬ\\x02फब\\x05ёȩ\\x02ब')\n buf.write('भ\\x05ѝȯ\\x02भम\\x05ѓȪ\\x02म')\n buf.write('ľ\\x03\\x02\\x02\\x02यर\\x05сȡ\\x02रऱ')\n buf.write('\\x05їȬ\\x02ऱल\\x05ёȩ\\x02लळ')\n buf.write('\\x05ѝȯ\\x02ळऴ\\x05ѓȪ\\x02ऴव')\n buf.write('\\x05хȣ\\x02वश\\x05яȨ\\x02शष')\n buf.write('\\x05сȡ\\x02षŀ\\x03\\x02\\x02\\x02सह\\x05у')\n buf.write('Ȣ\\x02हऺ\\x05еț\\x02ऺऻ\\x05љ')\n buf.write('ȭ\\x02ऻ़\\x05уȢ\\x02़ł\\x03\\x02\\x02')\n buf.write('\\x02ऽा\\x05уȢ\\x02ाि\\x05еț')\n buf.write('\\x02िी\\x05џȰ\\x02ीु\\x05хȣ')\n buf.write('\\x02ुू\\x05яȨ\\x02ूृ\\x05сȡ')\n buf.write('\\x02ृń\\x03\\x02\\x02\\x02ॄॅ\\x05уȢ\\x02ॅ')\n buf.write('ॆ\\x05хȣ\\x02ॆे\\x05лȞ\\x02े')\n buf.write('ै\\x05нȟ\\x02ैņ\\x03\\x02\\x02\\x02ॉॊ')\n buf.write('\\x05уȢ\\x02ॊो\\x05ёȩ\\x02ोौ')\n buf.write('\\x05ѝȯ\\x02ौ्\\x05їȬ\\x02्ň')\n buf.write('\\x03\\x02\\x02\\x02ॎॏ\\x05хȣ\\x02ॏॐ\\x05п')\n buf.write('Ƞ\\x02ॐŊ\\x03\\x02\\x02\\x02॒॑\\x05хȣ')\n buf.write('\\x02॒॓\\x05сȡ\\x02॓॔\\x05яȨ')\n buf.write('\\x02॔ॕ\\x05ёȩ\\x02ॕॖ\\x05їȬ')\n buf.write('\\x02ॖॗ\\x05нȟ\\x02ॗŌ\\x03\\x02\\x02\\x02क़')\n buf.write('ख़\\x05хȣ\\x02ख़ग़\\x05эȧ\\x02ग़')\n buf.write('ज़\\x05эȧ\\x02ज़ड़\\x05нȟ\\x02ड़')\n buf.write('ढ़\\x05лȞ\\x02ढ़फ़\\x05хȣ\\x02फ़')\n buf.write('य़\\x05еț\\x02य़ॠ\\x05ћȮ\\x02ॠ')\n buf.write('ॡ\\x05нȟ\\x02ॡŎ\\x03\\x02\\x02\\x02ॢॣ')\n buf.write('\\x05хȣ\\x02ॣ।\\x05яȨ\\x02।Ő')\n buf.write('\\x03\\x02\\x02\\x02॥०\\x05хȣ\\x02०१\\x05я')\n buf.write('Ȩ\\x02१२\\x05йȝ\\x02२३\\x05ы')\n buf.write('Ȧ\\x02३४\\x05ѝȯ\\x02४५\\x05л')\n buf.write('Ȟ\\x02५६\\x05нȟ\\x02६Œ\\x03\\x02\\x02')\n buf.write('\\x02७८\\x05хȣ\\x02८९\\x05яȨ')\n buf.write('\\x02९॰\\x05йȝ\\x02॰ॱ\\x05ыȦ')\n buf.write('\\x02ॱॲ\\x05ѝȯ\\x02ॲॳ\\x05лȞ')\n buf.write('\\x02ॳॴ\\x05хȣ\\x02ॴॵ\\x05яȨ')\n buf.write('\\x02ॵॶ\\x05сȡ\\x02ॶŔ\\x03\\x02\\x02\\x02ॷ')\n buf.write('ॸ\\x05хȣ\\x02ॸॹ\\x05яȨ\\x02ॹ')\n buf.write('ॺ\\x05йȝ\\x02ॺॻ\\x05їȬ\\x02ॻ')\n buf.write('ॼ\\x05нȟ\\x02ॼॽ\\x05эȧ\\x02ॽ')\n buf.write('ॾ\\x05нȟ\\x02ॾॿ\\x05яȨ\\x02ॿ')\n buf.write('ঀ\\x05ћȮ\\x02ঀŖ\\x03\\x02\\x02\\x02ঁং')\n buf.write('\\x05хȣ\\x02ংঃ\\x05яȨ\\x02ঃ\\u0984')\n buf.write('\\x05лȞ\\x02\\u0984অ\\x05нȟ\\x02অআ')\n buf.write('\\x05яȨ\\x02আই\\x05ћȮ\\x02ইŘ')\n buf.write('\\x03\\x02\\x02\\x02ঈউ\\x05хȣ\\x02উঊ\\x05я')\n buf.write('Ȩ\\x02ঊঋ\\x05лȞ\\x02ঋঌ\\x05н')\n buf.write('ȟ\\x02ঌ\\u098d\\x05ѣȲ\\x02\\u098dŚ\\x03\\x02\\x02')\n buf.write('\\x02\\u098eএ\\x05хȣ\\x02এঐ\\x05яȨ')\n buf.write('\\x02ঐ\\u0991\\x05лȞ\\x02\\u0991\\u0992\\x05нȟ')\n buf.write('\\x02\\u0992ও\\x05ѣȲ\\x02ওঔ\\x05нȟ')\n buf.write('\\x02ঔক\\x05лȞ\\x02কŜ\\x03\\x02\\x02\\x02খ')\n buf.write('গ\\x05хȣ\\x02গঘ\\x05яȨ\\x02ঘ')\n buf.write('ঙ\\x05лȞ\\x02ঙচ\\x05хȣ\\x02চ')\n buf.write('ছ\\x05йȝ\\x02ছজ\\x05еț\\x02জ')\n buf.write('ঝ\\x05ћȮ\\x02ঝঞ\\x05ёȩ\\x02ঞ')\n buf.write('ট\\x05їȬ\\x02টŞ\\x03\\x02\\x02\\x02ঠড')\n buf.write('\\x05хȣ\\x02ডঢ\\x05яȨ\\x02ঢণ')\n buf.write('\\x05лȞ\\x02ণত\\x05хȣ\\x02তথ')\n buf.write('\\x05йȝ\\x02থদ\\x05нȟ\\x02দধ')\n buf.write('\\x05љȭ\\x02ধŠ\\x03\\x02\\x02\\x02ন\\u09a9\\x05х')\n buf.write('ȣ\\x02\\u09a9প\\x05яȨ\\x02পফ\\x05п')\n buf.write('Ƞ\\x02ফব\\x05хȣ\\x02বভ\\x05я')\n buf.write('Ȩ\\x02ভম\\x05хȣ\\x02ময\\x05ћ')\n buf.write('Ȯ\\x02যর\\x05нȟ\\x02রŢ\\x03\\x02\\x02')\n buf.write('\\x02\\u09b1ল\\x05хȣ\\x02ল\\u09b3\\x05яȨ')\n buf.write('\\x02\\u09b3\\u09b4\\x05ыȦ\\x02\\u09b4\\u09b5\\x05хȣ')\n buf.write('\\x02\\u09b5শ\\x05яȨ\\x02শষ\\x05нȟ')\n buf.write('\\x02ষŤ\\x03\\x02\\x02\\x02সহ\\x05хȣ\\x02হ')\n buf.write('\\u09ba\\x05яȨ\\x02\\u09ba\\u09bb\\x05яȨ\\x02\\u09bb')\n buf.write('়\\x05нȟ\\x02়ঽ\\x05їȬ\\x02ঽ')\n buf.write('Ŧ\\x03\\x02\\x02\\x02াি\\x05хȣ\\x02িী')\n buf.write('\\x05яȨ\\x02ীু\\x05ёȩ\\x02ুূ')\n buf.write('\\x05ѝȯ\\x02ূৃ\\x05ћȮ\\x02ৃŨ')\n buf.write('\\x03\\x02\\x02\\x02ৄ\\u09c5\\x05хȣ\\x02\\u09c5\\u09c6\\x05я')\n buf.write('Ȩ\\x02\\u09c6ে\\x05љȭ\\x02েৈ\\x05н')\n buf.write('ȟ\\x02ৈ\\u09c9\\x05їȬ\\x02\\u09c9\\u09ca\\x05ћ')\n buf.write('Ȯ\\x02\\u09caŪ\\x03\\x02\\x02\\x02োৌ\\x05хȣ')\n buf.write('\\x02ৌ্\\x05яȨ\\x02্ৎ\\x05љȭ')\n buf.write('\\x02ৎ\\u09cf\\x05ћȮ\\x02\\u09cf\\u09d0\\x05еț')\n buf.write('\\x02\\u09d0\\u09d1\\x05яȨ\\x02\\u09d1\\u09d2\\x05ћȮ')\n buf.write('\\x02\\u09d2\\u09d3\\x05хȣ\\x02\\u09d3\\u09d4\\x05еț')\n buf.write('\\x02\\u09d4\\u09d5\\x05зȜ\\x02\\u09d5\\u09d6\\x05ыȦ')\n buf.write('\\x02\\u09d6ৗ\\x05нȟ\\x02ৗŬ\\x03\\x02\\x02\\x02\\u09d8')\n buf.write('\\u09d9\\x05хȣ\\x02\\u09d9\\u09da\\x05яȨ\\x02\\u09da')\n buf.write('\\u09db\\x05љȭ\\x02\\u09dbড়\\x05ћȮ\\x02ড়')\n buf.write('ঢ়\\x05нȟ\\x02ঢ়\\u09de\\x05еț\\x02\\u09de')\n buf.write('য়\\x05лȞ\\x02য়Ů\\x03\\x02\\x02\\x02ৠৡ')\n buf.write('\\x05хȣ\\x02ৡৢ\\x05яȨ\\x02ৢৣ')\n buf.write('\\x05ћȮ\\x02ৣŰ\\x03\\x02\\x02\\x02\\u09e4\\u09e5\\x05х')\n buf.write('ȣ\\x02\\u09e5০\\x05яȨ\\x02০১\\x05ћ')\n buf.write('Ȯ\\x02১২\\x05нȟ\\x02২৩\\x05с')\n buf.write('ȡ\\x02৩৪\\x05нȟ\\x02৪৫\\x05ї')\n buf.write('Ȭ\\x02৫Ų\\x03\\x02\\x02\\x02৬৭\\x05хȣ')\n buf.write('\\x02৭৮\\x05яȨ\\x02৮৯\\x05ћȮ')\n buf.write('\\x02৯ৰ\\x05нȟ\\x02ৰৱ\\x05їȬ')\n buf.write('\\x02ৱ৲\\x05љȭ\\x02৲৳\\x05нȟ')\n buf.write('\\x02৳৴\\x05йȝ\\x02৴৵\\x05ћȮ')\n buf.write('\\x02৵Ŵ\\x03\\x02\\x02\\x02৶৷\\x05хȣ\\x02৷')\n buf.write('৸\\x05яȨ\\x02৸৹\\x05ћȮ\\x02৹')\n buf.write('৺\\x05нȟ\\x02৺৻\\x05їȬ\\x02৻')\n buf.write('ৼ\\x05џȰ\\x02ৼ৽\\x05еț\\x02৽')\n buf.write('৾\\x05ыȦ\\x02৾Ŷ\\x03\\x02\\x02\\x02\\u09ff\\u0a00')\n buf.write('\\x05хȣ\\x02\\u0a00ਁ\\x05яȨ\\x02ਁਂ')\n buf.write('\\x05ћȮ\\x02ਂਃ\\x05ёȩ\\x02ਃŸ')\n buf.write('\\x03\\x02\\x02\\x02\\u0a04ਅ\\x05хȣ\\x02ਅਆ\\x05я')\n buf.write('Ȩ\\x02ਆਇ\\x05џȰ\\x02ਇਈ\\x05е')\n buf.write('ț\\x02ਈਉ\\x05ыȦ\\x02ਉਊ\\x05х')\n buf.write('ȣ\\x02ਊ\\u0a0b\\x05лȞ\\x02\\u0a0b\\u0a0c\\x05е')\n buf.write('ț\\x02\\u0a0c\\u0a0d\\x05ћȮ\\x02\\u0a0d\\u0a0e\\x05н')\n buf.write('ȟ\\x02\\u0a0eź\\x03\\x02\\x02\\x02ਏਐ\\x05хȣ')\n buf.write('\\x02ਐ\\u0a11\\x05љȭ\\x02\\u0a11ż\\x03\\x02\\x02\\x02\\u0a12')\n buf.write('ਓ\\x05хȣ\\x02ਓਔ\\x05љȭ\\x02ਔ')\n buf.write('ਕ\\x05ёȩ\\x02ਕਖ\\x05ыȦ\\x02ਖ')\n buf.write('ਗ\\x05еț\\x02ਗਘ\\x05ћȮ\\x02ਘ')\n buf.write('ਙ\\x05хȣ\\x02ਙਚ\\x05ёȩ\\x02ਚ')\n buf.write('ਛ\\x05яȨ\\x02ਛž\\x03\\x02\\x02\\x02ਜਝ')\n buf.write('\\x05хȣ\\x02ਝਞ\\x05ћȮ\\x02ਞਟ')\n buf.write('\\x05нȟ\\x02ਟਠ\\x05їȬ\\x02ਠਡ')\n buf.write('\\x05еț\\x02ਡਢ\\x05ћȮ\\x02ਢਣ')\n buf.write('\\x05нȟ\\x02ਣƀ\\x03\\x02\\x02\\x02ਤਥ\\x05ч')\n buf.write('Ȥ\\x02ਥਦ\\x05еț\\x02ਦਧ\\x05џ')\n buf.write('Ȱ\\x02ਧਨ\\x05еț\\x02ਨƂ\\x03\\x02\\x02')\n buf.write('\\x02\\u0a29ਪ\\x05чȤ\\x02ਪਫ\\x05ёȩ')\n buf.write('\\x02ਫਬ\\x05хȣ\\x02ਬਭ\\x05яȨ')\n buf.write('\\x02ਭƄ\\x03\\x02\\x02\\x02ਮਯ\\x05щȥ\\x02ਯ')\n buf.write('ਰ\\x05нȟ\\x02ਰ\\u0a31\\x05нȟ\\x02\\u0a31')\n buf.write('ਲ\\x05ѓȪ\\x02ਲƆ\\x03\\x02\\x02\\x02ਲ਼\\u0a34')\n buf.write('\\x05ыȦ\\x02\\u0a34ਵ\\x05еț\\x02ਵਸ਼')\n buf.write('\\x05яȨ\\x02ਸ਼\\u0a37\\x05сȡ\\x02\\u0a37ਸ')\n buf.write('\\x05ѝȯ\\x02ਸਹ\\x05еț\\x02ਹ\\u0a3a')\n buf.write('\\x05сȡ\\x02\\u0a3a\\u0a3b\\x05нȟ\\x02\\u0a3bƈ')\n buf.write('\\x03\\x02\\x02\\x02਼\\u0a3d\\x05ыȦ\\x02\\u0a3dਾ\\x05е')\n buf.write('ț\\x02ਾਿ\\x05љȭ\\x02ਿੀ\\x05ћ')\n buf.write('Ȯ\\x02ੀƊ\\x03\\x02\\x02\\x02ੁੂ\\x05ыȦ')\n buf.write('\\x02ੂ\\u0a43\\x05еț\\x02\\u0a43\\u0a44\\x05љȭ')\n buf.write('\\x02\\u0a44\\u0a45\\x05ћȮ\\x02\\u0a45\\u0a46\\x07a\\x02\\x02\\u0a46')\n buf.write('ੇ\\x05џȰ\\x02ੇੈ\\x05еț\\x02ੈ')\n buf.write('\\u0a49\\x05ыȦ\\x02\\u0a49\\u0a4a\\x05ѝȯ\\x02\\u0a4a')\n buf.write('ੋ\\x05нȟ\\x02ੋƌ\\x03\\x02\\x02\\x02ੌ੍')\n buf.write('\\x05ыȦ\\x02੍\\u0a4e\\x05нȟ\\x02\\u0a4e\\u0a4f')\n buf.write('\\x05еț\\x02\\u0a4f\\u0a50\\x05лȞ\\x02\\u0a50ੑ')\n buf.write('\\x05хȣ\\x02ੑ\\u0a52\\x05яȨ\\x02\\u0a52\\u0a53')\n buf.write('\\x05сȡ\\x02\\u0a53Ǝ\\x03\\x02\\x02\\x02\\u0a54\\u0a55\\x05ы')\n buf.write('Ȧ\\x02\\u0a55\\u0a56\\x05нȟ\\x02\\u0a56\\u0a57\\x05п')\n buf.write('Ƞ\\x02\\u0a57\\u0a58\\x05ћȮ\\x02\\u0a58Ɛ\\x03\\x02\\x02')\n buf.write('\\x02ਖ਼ਗ਼\\x05ыȦ\\x02ਗ਼ਜ਼\\x05нȟ')\n buf.write('\\x02ਜ਼ੜ\\x05џȰ\\x02ੜ\\u0a5d\\x05нȟ')\n buf.write('\\x02\\u0a5dਫ਼\\x05ыȦ\\x02ਫ਼ƒ\\x03\\x02\\x02\\x02\\u0a5f')\n buf.write('\\u0a60\\x05ыȦ\\x02\\u0a60\\u0a61\\x05хȣ\\x02\\u0a61')\n buf.write('\\u0a62\\x05зȜ\\x02\\u0a62\\u0a63\\x05їȬ\\x02\\u0a63')\n buf.write('\\u0a64\\x05еț\\x02\\u0a64\\u0a65\\x05їȬ\\x02\\u0a65')\n buf.write('੦\\x05ѥȳ\\x02੦Ɣ\\x03\\x02\\x02\\x02੧੨')\n buf.write('\\x05ыȦ\\x02੨੩\\x05хȣ\\x02੩੪')\n buf.write('\\x05щȥ\\x02੪੫\\x05нȟ\\x02੫Ɩ')\n buf.write('\\x03\\x02\\x02\\x02੬੭\\x05ыȦ\\x02੭੮\\x05х')\n buf.write('ȣ\\x02੮੯\\x05щȥ\\x02੯ੰ\\x05н')\n buf.write('ȟ\\x02ੰੱ\\x074\\x02\\x02ੱƘ\\x03\\x02\\x02\\x02ੲ')\n buf.write('ੳ\\x05ыȦ\\x02ੳੴ\\x05хȣ\\x02ੴ')\n buf.write('ੵ\\x05щȥ\\x02ੵ੶\\x05нȟ\\x02੶')\n buf.write('\\u0a77\\x076\\x02\\x02\\u0a77ƚ\\x03\\x02\\x02\\x02\\u0a78\\u0a79\\x05ы'\n )\n buf.write('Ȧ\\x02\\u0a79\\u0a7a\\x05хȣ\\x02\\u0a7a\\u0a7b\\x05щ')\n buf.write('ȥ\\x02\\u0a7b\\u0a7c\\x05нȟ\\x02\\u0a7c\\u0a7d\\x05й')\n buf.write('ȝ\\x02\\u0a7dƜ\\x03\\x02\\x02\\x02\\u0a7e\\u0a7f\\x05ыȦ')\n buf.write('\\x02\\u0a7f\\u0a80\\x05хȣ\\x02\\u0a80ઁ\\x05эȧ')\n buf.write('\\x02ઁં\\x05хȣ\\x02ંઃ\\x05ћȮ')\n buf.write('\\x02ઃƞ\\x03\\x02\\x02\\x02\\u0a84અ\\x05ыȦ\\x02અ')\n buf.write('આ\\x05ёȩ\\x02આઇ\\x05йȝ\\x02ઇ')\n buf.write('ઈ\\x05еț\\x02ઈઉ\\x05ыȦ\\x02ઉ')\n buf.write('Ơ\\x03\\x02\\x02\\x02ઊઋ\\x05ыȦ\\x02ઋઌ')\n buf.write('\\x05ёȩ\\x02ઌઍ\\x05йȝ\\x02ઍ\\u0a8e')\n buf.write('\\x05щȥ\\x02\\u0a8eƢ\\x03\\x02\\x02\\x02એઐ\\x05ы')\n buf.write('Ȧ\\x02ઐઑ\\x05ёȩ\\x02ઑ\\u0a92\\x05й')\n buf.write('ȝ\\x02\\u0a92ઓ\\x05щȥ\\x02ઓઔ\\x05н')\n buf.write('ȟ\\x02ઔક\\x05лȞ\\x02કƤ\\x03\\x02\\x02')\n buf.write('\\x02ખગ\\x05ыȦ\\x02ગઘ\\x05ёȩ')\n buf.write('\\x02ઘઙ\\x05сȡ\\x02ઙƦ\\x03\\x02\\x02\\x02ચ')\n buf.write('છ\\x05ыȦ\\x02છજ\\x05ёȩ\\x02જ')\n buf.write('ઝ\\x05сȡ\\x02ઝઞ\\x05ёȩ\\x02ઞ')\n buf.write('ટ\\x05пȠ\\x02ટઠ\\x05пȠ\\x02ઠ')\n buf.write('ƨ\\x03\\x02\\x02\\x02ડઢ\\x05ыȦ\\x02ઢણ')\n buf.write('\\x05ёȩ\\x02ણત\\x05сȡ\\x02તથ')\n buf.write('\\x05ёȩ\\x02થદ\\x05яȨ\\x02દƪ')\n buf.write('\\x03\\x02\\x02\\x02ધન\\x05ыȦ\\x02ન\\u0aa9\\x05ё')\n buf.write('ȩ\\x02\\u0aa9પ\\x05яȨ\\x02પફ\\x05с')\n buf.write('ȡ\\x02ફƬ\\x03\\x02\\x02\\x02બભ\\x05ыȦ')\n buf.write('\\x02ભમ\\x05ёȩ\\x02મય\\x05ёȩ')\n buf.write('\\x02યર\\x05ѓȪ\\x02રƮ\\x03\\x02\\x02\\x02\\u0ab1')\n buf.write('લ\\x05эȧ\\x02લળ\\x05еț\\x02ળ')\n buf.write('\\u0ab4\\x05хȣ\\x02\\u0ab4વ\\x05яȨ\\x02વ')\n buf.write('ư\\x03\\x02\\x02\\x02શષ\\x05эȧ\\x02ષસ')\n buf.write('\\x05еț\\x02સહ\\x05ѓȪ\\x02હƲ')\n buf.write('\\x03\\x02\\x02\\x02\\u0aba\\u0abb\\x05эȧ\\x02\\u0abb઼\\x05е')\n buf.write('ț\\x02઼ઽ\\x05ћȮ\\x02ઽા\\x05й')\n buf.write('ȝ\\x02ાિ\\x05уȢ\\x02િી\\x05н')\n buf.write('ȟ\\x02ીુ\\x05лȞ\\x02ુƴ\\x03\\x02\\x02')\n buf.write('\\x02ૂૃ\\x05эȧ\\x02ૃૄ\\x05еț')\n buf.write('\\x02ૄૅ\\x05ѣȲ\\x02ૅ\\u0ac6\\x05џȰ')\n buf.write('\\x02\\u0ac6ે\\x05еț\\x02ેૈ\\x05ыȦ')\n buf.write('\\x02ૈૉ\\x05ѝȯ\\x02ૉ\\u0aca\\x05нȟ')\n buf.write('\\x02\\u0acaƶ\\x03\\x02\\x02\\x02ોૌ\\x05эȧ\\x02ૌ')\n buf.write('્\\x05нȟ\\x02્\\u0ace\\x05еț\\x02\\u0ace')\n buf.write('\\u0acf\\x05љȭ\\x02\\u0acfૐ\\x05ѝȯ\\x02ૐ')\n buf.write('\\u0ad1\\x05їȬ\\x02\\u0ad1\\u0ad2\\x05нȟ\\x02\\u0ad2')\n buf.write('\\u0ad3\\x05љȭ\\x02\\u0ad3Ƹ\\x03\\x02\\x02\\x02\\u0ad4\\u0ad5')\n buf.write('\\x05эȧ\\x02\\u0ad5\\u0ad6\\x05нȟ\\x02\\u0ad6\\u0ad7')\n buf.write('\\x05эȧ\\x02\\u0ad7\\u0ad8\\x05зȜ\\x02\\u0ad8\\u0ad9')\n buf.write('\\x05нȟ\\x02\\u0ad9\\u0ada\\x05їȬ\\x02\\u0adaƺ')\n buf.write('\\x03\\x02\\x02\\x02\\u0adb\\u0adc\\x05эȧ\\x02\\u0adc\\u0add\\x05н')\n buf.write('ȟ\\x02\\u0add\\u0ade\\x05їȬ\\x02\\u0ade\\u0adf\\x05с')\n buf.write('ȡ\\x02\\u0adfૠ\\x05нȟ\\x02ૠƼ\\x03\\x02\\x02')\n buf.write('\\x02ૡૢ\\x05эȧ\\x02ૢૣ\\x05хȣ')\n buf.write('\\x02ૣ\\u0ae4\\x05яȨ\\x02\\u0ae4\\u0ae5\\x05ѝȯ')\n buf.write('\\x02\\u0ae5૦\\x05љȭ\\x02૦ƾ\\x03\\x02\\x02\\x02૧')\n buf.write('૨\\x05эȧ\\x02૨૩\\x05хȣ\\x02૩')\n buf.write('૪\\x05яȨ\\x02૪૫\\x05ѝȯ\\x02૫')\n buf.write('૬\\x05ћȮ\\x02૬૭\\x05нȟ\\x02૭')\n buf.write('ǀ\\x03\\x02\\x02\\x02૮૯\\x05эȧ\\x02૯૰')\n buf.write('\\x05хȣ\\x02૰૱\\x05яȨ\\x02૱\\u0af2')\n buf.write('\\x05џȰ\\x02\\u0af2\\u0af3\\x05еț\\x02\\u0af3\\u0af4')\n buf.write('\\x05ыȦ\\x02\\u0af4\\u0af5\\x05ѝȯ\\x02\\u0af5\\u0af6')\n buf.write('\\x05нȟ\\x02\\u0af6ǂ\\x03\\x02\\x02\\x02\\u0af7\\u0af8\\x05э')\n buf.write('ȧ\\x02\\u0af8ૹ\\x05ыȦ\\x02ૹૺ\\x05љ')\n buf.write('ȭ\\x02ૺૻ\\x05ыȦ\\x02ૻૼ\\x05е')\n buf.write('ț\\x02ૼ૽\\x05зȜ\\x02૽૾\\x05н')\n buf.write('ȟ\\x02૾૿\\x05ыȦ\\x02૿DŽ\\x03\\x02\\x02')\n buf.write('\\x02\\u0b00ଁ\\x05эȧ\\x02ଁଂ\\x05ёȩ')\n buf.write('\\x02ଂଃ\\x05лȞ\\x02ଃ\\u0b04\\x05нȟ')\n buf.write('\\x02\\u0b04dž\\x03\\x02\\x02\\x02ଅଆ\\x05эȧ\\x02ଆ')\n buf.write('ଇ\\x05ёȩ\\x02ଇଈ\\x05лȞ\\x02ଈ')\n buf.write('ଉ\\x05нȟ\\x02ଉଊ\\x05ыȦ\\x02ଊ')\n buf.write('Lj\\x03\\x02\\x02\\x02ଋଌ\\x05эȧ\\x02ଌ\\u0b0d')\n buf.write('\\x05ёȩ\\x02\\u0b0d\\u0b0e\\x05лȞ\\x02\\u0b0eଏ')\n buf.write('\\x05хȣ\\x02ଏଐ\\x05пȠ\\x02ଐ\\u0b11')\n buf.write('\\x05ѥȳ\\x02\\u0b11NJ\\x03\\x02\\x02\\x02\\u0b12ଓ\\x05э')\n buf.write('ȧ\\x02ଓଔ\\x05ёȩ\\x02ଔକ\\x05я')\n buf.write('Ȩ\\x02କଖ\\x05ћȮ\\x02ଖଗ\\x05у')\n buf.write('Ȣ\\x02ଗnj\\x03\\x02\\x02\\x02ଘଙ\\x05эȧ')\n buf.write('\\x02ଙଚ\\x05ѝȯ\\x02ଚଛ\\x05ыȦ')\n buf.write('\\x02ଛଜ\\x05ћȮ\\x02ଜଝ\\x05хȣ')\n buf.write('\\x02ଝଞ\\x05љȭ\\x02ଞଟ\\x05нȟ')\n buf.write('\\x02ଟଠ\\x05ћȮ\\x02ଠǎ\\x03\\x02\\x02\\x02ଡ')\n buf.write('ଢ\\x05яȨ\\x02ଢଣ\\x05еț\\x02ଣ')\n buf.write('ତ\\x05эȧ\\x02ତଥ\\x05нȟ\\x02ଥ')\n buf.write('ǐ\\x03\\x02\\x02\\x02ଦଧ\\x05яȨ\\x02ଧନ')\n buf.write('\\x05еț\\x02ନ\\u0b29\\x05яȨ\\x02\\u0b29ǒ')\n buf.write('\\x03\\x02\\x02\\x02ପଫ\\x05яȨ\\x02ଫବ\\x05е')\n buf.write('ț\\x02ବଭ\\x05ћȮ\\x02ଭମ\\x05ѝ')\n buf.write('ȯ\\x02ମଯ\\x05їȬ\\x02ଯର\\x05е')\n buf.write('ț\\x02ର\\u0b31\\x05ыȦ\\x02\\u0b31ǔ\\x03\\x02\\x02')\n buf.write('\\x02ଲଳ\\x05яȨ\\x02ଳ\\u0b34\\x05еț')\n buf.write('\\x02\\u0b34ଵ\\x05ћȮ\\x02ଵଶ\\x05ѝȯ')\n buf.write('\\x02ଶଷ\\x05їȬ\\x02ଷସ\\x05еț')\n buf.write('\\x02ସହ\\x05ыȦ\\x02ହ\\u0b3a\\x05яȨ')\n buf.write('\\x02\\u0b3aǖ\\x03\\x02\\x02\\x02\\u0b3b଼\\x05яȨ\\x02଼')\n buf.write('ଽ\\x05еț\\x02ଽା\\x05џȰ\\x02ା')\n buf.write('ǘ\\x03\\x02\\x02\\x02ିୀ\\x05яȨ\\x02ୀୁ')\n buf.write('\\x05йȝ\\x02ୁୂ\\x05уȢ\\x02ୂୃ')\n buf.write('\\x05еț\\x02ୃୄ\\x05їȬ\\x02ୄǚ')\n buf.write('\\x03\\x02\\x02\\x02\\u0b45\\u0b46\\x05яȨ\\x02\\u0b46େ\\x05й')\n buf.write('ȝ\\x02େୈ\\x05уȢ\\x02ୈ\\u0b49\\x05е')\n buf.write('ț\\x02\\u0b49\\u0b4a\\x05їȬ\\x02\\u0b4aୋ\\x07a\\x02')\n buf.write('\\x02ୋୌ\\x05йȝ\\x02ୌ୍\\x05љȭ')\n buf.write('\\x02୍ǜ\\x03\\x02\\x02\\x02\\u0b4e\\u0b4f\\x05яȨ\\x02\\u0b4f')\n buf.write('\\u0b50\\x05йȝ\\x02\\u0b50\\u0b51\\x05ыȦ\\x02\\u0b51')\n buf.write('\\u0b52\\x05ёȩ\\x02\\u0b52\\u0b53\\x05зȜ\\x02\\u0b53')\n buf.write('Ǟ\\x03\\x02\\x02\\x02\\u0b54୕\\x05яȨ\\x02୕ୖ')\n buf.write('\\x05нȟ\\x02ୖୗ\\x05љȭ\\x02ୗ\\u0b58')\n buf.write('\\x05ћȮ\\x02\\u0b58\\u0b59\\x05нȟ\\x02\\u0b59\\u0b5a')\n buf.write('\\x05лȞ\\x02\\u0b5aǠ\\x03\\x02\\x02\\x02\\u0b5bଡ଼\\x05я')\n buf.write('Ȩ\\x02ଡ଼ଢ଼\\x05нȟ\\x02ଢ଼\\u0b5e\\x05ѡ')\n buf.write('ȱ\\x02\\u0b5eǢ\\x03\\x02\\x02\\x02ୟୠ\\x05яȨ')\n buf.write('\\x02ୠୡ\\x05ёȩ\\x02ୡǤ\\x03\\x02\\x02\\x02ୢ')\n buf.write('ୣ\\x05яȨ\\x02ୣ\\u0b64\\x05ёȩ\\x02\\u0b64')\n buf.write('\\u0b65\\x05еț\\x02\\u0b65୦\\x05ѝȯ\\x02୦')\n buf.write('୧\\x05лȞ\\x02୧୨\\x05хȣ\\x02୨')\n buf.write('୩\\x05ћȮ\\x02୩Ǧ\\x03\\x02\\x02\\x02୪୫')\n buf.write('\\x05яȨ\\x02୫୬\\x05ёȩ\\x02୬୭')\n buf.write('\\x05йȝ\\x02୭୮\\x05еț\\x02୮୯')\n buf.write('\\x05йȝ\\x02୯୰\\x05уȢ\\x02୰ୱ')\n buf.write('\\x05нȟ\\x02ୱǨ\\x03\\x02\\x02\\x02୲୳\\x05я')\n buf.write('Ȩ\\x02୳୴\\x05ёȩ\\x02୴୵\\x05й')\n buf.write('ȝ\\x02୵୶\\x05ёȩ\\x02୶୷\\x05ѓ')\n buf.write('Ȫ\\x02୷\\u0b78\\x05ѥȳ\\x02\\u0b78Ǫ\\x03\\x02\\x02')\n buf.write('\\x02\\u0b79\\u0b7a\\x05яȨ\\x02\\u0b7a\\u0b7b\\x05ёȩ')\n buf.write('\\x02\\u0b7b\\u0b7c\\x05йȝ\\x02\\u0b7c\\u0b7d\\x05ѥȳ')\n buf.write('\\x02\\u0b7d\\u0b7e\\x05йȝ\\x02\\u0b7e\\u0b7f\\x05ыȦ')\n buf.write('\\x02\\u0b7f\\u0b80\\x05нȟ\\x02\\u0b80Ǭ\\x03\\x02\\x02\\x02\\u0b81')\n buf.write('ஂ\\x05яȨ\\x02ஂஃ\\x05ёȩ\\x02ஃ')\n buf.write('\\u0b84\\x05нȟ\\x02\\u0b84அ\\x05яȨ\\x02அ')\n buf.write('ஆ\\x05ћȮ\\x02ஆஇ\\x05хȣ\\x02இ')\n buf.write('ஈ\\x05ћȮ\\x02ஈஉ\\x05ѥȳ\\x02உ')\n buf.write('ஊ\\x05нȟ\\x02ஊ\\u0b8b\\x05љȭ\\x02\\u0b8b')\n buf.write('\\u0b8c\\x05йȝ\\x02\\u0b8c\\u0b8d\\x05еț\\x02\\u0b8d')\n buf.write('எ\\x05ѓȪ\\x02எஏ\\x05хȣ\\x02ஏ')\n buf.write('ஐ\\x05яȨ\\x02ஐ\\u0b91\\x05сȡ\\x02\\u0b91')\n buf.write('Ǯ\\x03\\x02\\x02\\x02ஒஓ\\x05яȨ\\x02ஓஔ')\n buf.write('\\x05ёȩ\\x02ஔக\\x05эȧ\\x02க\\u0b96')\n buf.write('\\x05еț\\x02\\u0b96\\u0b97\\x05ѣȲ\\x02\\u0b97\\u0b98')\n buf.write('\\x05џȰ\\x02\\u0b98ங\\x05еț\\x02ஙச')\n buf.write('\\x05ыȦ\\x02ச\\u0b9b\\x05ѝȯ\\x02\\u0b9bஜ')\n buf.write('\\x05нȟ\\x02ஜǰ\\x03\\x02\\x02\\x02\\u0b9dஞ\\x05я')\n buf.write('Ȩ\\x02ஞட\\x05ёȩ\\x02ட\\u0ba0\\x05э')\n buf.write('ȧ\\x02\\u0ba0\\u0ba1\\x05хȣ\\x02\\u0ba1\\u0ba2\\x05я')\n buf.write('Ȩ\\x02\\u0ba2ண\\x05џȰ\\x02ணத\\x05е')\n buf.write('ț\\x02த\\u0ba5\\x05ыȦ\\x02\\u0ba5\\u0ba6\\x05ѝ')\n buf.write('ȯ\\x02\\u0ba6\\u0ba7\\x05нȟ\\x02\\u0ba7Dz\\x03\\x02\\x02')\n buf.write('\\x02நன\\x05яȨ\\x02னப\\x05ёȩ')\n buf.write('\\x02ப\\u0bab\\x05яȨ\\x02\\u0bab\\u0bac\\x05нȟ')\n buf.write('\\x02\\u0bacǴ\\x03\\x02\\x02\\x02\\u0badம\\x05яȨ\\x02ம')\n buf.write('ய\\x05ёȩ\\x02யர\\x05ёȩ\\x02ர')\n buf.write('ற\\x05їȬ\\x02றல\\x05лȞ\\x02ல')\n buf.write('ள\\x05нȟ\\x02ளழ\\x05їȬ\\x02ழ')\n buf.write('Ƕ\\x03\\x02\\x02\\x02வஶ\\x05яȨ\\x02ஶஷ')\n buf.write('\\x05ёȩ\\x02ஷஸ\\x05љȭ\\x02ஸஹ')\n buf.write('\\x05йȝ\\x02ஹ\\u0bba\\x05уȢ\\x02\\u0bba\\u0bbb')\n buf.write('\\x05нȟ\\x02\\u0bbb\\u0bbc\\x05эȧ\\x02\\u0bbc\\u0bbd')\n buf.write('\\x05еț\\x02\\u0bbdா\\x05йȝ\\x02ாி')\n buf.write('\\x05уȢ\\x02ிீ\\x05нȟ\\x02ீு')\n buf.write('\\x05йȝ\\x02ுூ\\x05щȥ\\x02ூǸ')\n buf.write('\\x03\\x02\\x02\\x02\\u0bc3\\u0bc4\\x05яȨ\\x02\\u0bc4\\u0bc5\\x05ё')\n buf.write('ȩ\\x02\\u0bc5ெ\\x05ћȮ\\x02ெǺ\\x03\\x02\\x02')\n buf.write('\\x02ேை\\x05яȨ\\x02ை\\u0bc9\\x05ёȩ')\n buf.write('\\x02\\u0bc9ொ\\x05ѡȱ\\x02ொோ\\x05еț')\n buf.write('\\x02ோௌ\\x05хȣ\\x02ௌ்\\x05ћȮ')\n buf.write('\\x02்Ǽ\\x03\\x02\\x02\\x02\\u0bce\\u0bcf\\x05яȨ\\x02\\u0bcf')\n buf.write('ௐ\\x05ѝȯ\\x02ௐ\\u0bd1\\x05ыȦ\\x02\\u0bd1')\n buf.write('\\u0bd2\\x05ыȦ\\x02\\u0bd2Ǿ\\x03\\x02\\x02\\x02\\u0bd3\\u0bd4')\n buf.write('\\x05яȨ\\x02\\u0bd4\\u0bd5\\x05ѝȯ\\x02\\u0bd5\\u0bd6')\n buf.write('\\x05ыȦ\\x02\\u0bd6ௗ\\x05ыȦ\\x02ௗ\\u0bd8')\n buf.write('\\x05љȭ\\x02\\u0bd8Ȁ\\x03\\x02\\x02\\x02\\u0bd9\\u0bda\\x05я')\n buf.write('Ȩ\\x02\\u0bda\\u0bdb\\x05ѝȯ\\x02\\u0bdb\\u0bdc\\x05э')\n buf.write('ȧ\\x02\\u0bdc\\u0bdd\\x05зȜ\\x02\\u0bdd\\u0bde\\x05н')\n buf.write('ȟ\\x02\\u0bde\\u0bdf\\x05їȬ\\x02\\u0bdfȂ\\x03\\x02\\x02')\n buf.write('\\x02\\u0be0\\u0be1\\x05яȨ\\x02\\u0be1\\u0be2\\x05ѝȯ')\n buf.write('\\x02\\u0be2\\u0be3\\x05эȧ\\x02\\u0be3\\u0be4\\x05нȟ')\n buf.write('\\x02\\u0be4\\u0be5\\x05їȬ\\x02\\u0be5௦\\x05хȣ')\n buf.write('\\x02௦௧\\x05йȝ\\x02௧Ȅ\\x03\\x02\\x02\\x02௨')\n buf.write('௩\\x05яȨ\\x02௩௪\\x05џȰ\\x02௪')\n buf.write('௫\\x05еț\\x02௫௬\\x05їȬ\\x02௬')\n buf.write('௭\\x05йȝ\\x02௭௮\\x05уȢ\\x02௮')\n buf.write('௯\\x05еț\\x02௯௰\\x05їȬ\\x02௰')\n buf.write('௱\\x074\\x02\\x02௱Ȇ\\x03\\x02\\x02\\x02௲௳\\x05ё')\n buf.write('ȩ\\x02௳௴\\x05зȜ\\x02௴௵\\x05ч')\n buf.write('Ȥ\\x02௵௶\\x05нȟ\\x02௶௷\\x05й')\n buf.write('ȝ\\x02௷௸\\x05ћȮ\\x02௸Ȉ\\x03\\x02\\x02')\n buf.write('\\x02௹௺\\x05ёȩ\\x02௺\\u0bfb\\x05пȠ')\n buf.write('\\x02\\u0bfbȊ\\x03\\x02\\x02\\x02\\u0bfc\\u0bfd\\x05ёȩ\\x02\\u0bfd')\n buf.write('\\u0bfe\\x05пȠ\\x02\\u0bfe\\u0bff\\x05пȠ\\x02\\u0bff')\n buf.write('Ȍ\\x03\\x02\\x02\\x02ఀఁ\\x05ёȩ\\x02ఁం')\n buf.write('\\x05хȣ\\x02ంః\\x05лȞ\\x02ఃȎ')\n buf.write('\\x03\\x02\\x02\\x02ఄఅ\\x05ёȩ\\x02అఆ\\x05ы')\n buf.write('Ȧ\\x02ఆఇ\\x05лȞ\\x02ఇȐ\\x03\\x02\\x02')\n buf.write('\\x02ఈఉ\\x05ёȩ\\x02ఉఊ\\x05яȨ')\n buf.write('\\x02ఊȒ\\x03\\x02\\x02\\x02ఋఌ\\x05ёȩ\\x02ఌ')\n buf.write('\\u0c0d\\x05яȨ\\x02\\u0c0dఎ\\x05ыȦ\\x02ఎ')\n buf.write('ఏ\\x05ѥȳ\\x02ఏȔ\\x03\\x02\\x02\\x02ఐ\\u0c11')\n buf.write('\\x05ёȩ\\x02\\u0c11ఒ\\x05ѓȪ\\x02ఒఓ')\n buf.write('\\x05нȟ\\x02ఓఔ\\x05яȨ\\x02ఔȖ')\n buf.write('\\x03\\x02\\x02\\x02కఖ\\x05ёȩ\\x02ఖగ\\x05ѓ')\n buf.write('Ȫ\\x02గఘ\\x05ћȮ\\x02ఘఙ\\x05х')\n buf.write('ȣ\\x02ఙచ\\x05ёȩ\\x02చఛ\\x05я')\n buf.write('Ȩ\\x02ఛȘ\\x03\\x02\\x02\\x02జఝ\\x05ёȩ')\n buf.write('\\x02ఝఞ\\x05їȬ\\x02ఞȚ\\x03\\x02\\x02\\x02ట')\n buf.write('ఠ\\x05ёȩ\\x02ఠడ\\x05їȬ\\x02డ')\n buf.write('ఢ\\x05еț\\x02ఢణ\\x05лȞ\\x02ణ')\n buf.write('త\\x05еț\\x02తథ\\x05ћȮ\\x02థ')\n buf.write('ద\\x05еț\\x02దȜ\\x03\\x02\\x02\\x02ధన')\n buf.write('\\x05ёȩ\\x02న\\u0c29\\x05їȬ\\x02\\u0c29ప')\n buf.write('\\x05лȞ\\x02పఫ\\x05нȟ\\x02ఫబ')\n buf.write('\\x05їȬ\\x02బȞ\\x03\\x02\\x02\\x02భమ\\x05ё')\n buf.write('ȩ\\x02మయ\\x05їȬ\\x02యర\\x05л')\n buf.write('Ȟ\\x02రఱ\\x05хȣ\\x02ఱల\\x05я')\n buf.write('Ȩ\\x02లళ\\x05еț\\x02ళఴ\\x05ы')\n buf.write('Ȧ\\x02ఴవ\\x05хȣ\\x02వశ\\x05ћ')\n buf.write('Ȯ\\x02శష\\x05ѥȳ\\x02షȠ\\x03\\x02\\x02')\n buf.write('\\x02సహ\\x05ёȩ\\x02హ\\u0c3a\\x05љȭ')\n buf.write('\\x02\\u0c3a\\u0c3b\\x05нȟ\\x02\\u0c3b఼\\x05їȬ')\n buf.write('\\x02఼ఽ\\x05їȬ\\x02ఽా\\x05ёȩ')\n buf.write('\\x02ాి\\x05їȬ\\x02ిȢ\\x03\\x02\\x02\\x02ీ')\n buf.write('ు\\x05ёȩ\\x02ుూ\\x05ѝȯ\\x02ూ')\n buf.write('ృ\\x05ћȮ\\x02ృȤ\\x03\\x02\\x02\\x02ౄ\\u0c45')\n buf.write('\\x05ёȩ\\x02\\u0c45ె\\x05ѝȯ\\x02ెే')\n buf.write('\\x05ћȮ\\x02ేై\\x05нȟ\\x02ై\\u0c49')\n buf.write('\\x05їȬ\\x02\\u0c49Ȧ\\x03\\x02\\x02\\x02ొో\\x05ё')\n buf.write('ȩ\\x02ోౌ\\x05џȰ\\x02ౌ్\\x05н')\n buf.write('ȟ\\x02్\\u0c4e\\x05їȬ\\x02\\u0c4eȨ\\x03\\x02\\x02')\n buf.write('\\x02\\u0c4f\\u0c50\\x05ёȩ\\x02\\u0c50\\u0c51\\x05џȰ')\n buf.write('\\x02\\u0c51\\u0c52\\x05нȟ\\x02\\u0c52\\u0c53\\x05їȬ')\n buf.write('\\x02\\u0c53\\u0c54\\x05їȬ\\x02\\u0c54ౕ\\x05хȣ')\n buf.write('\\x02ౕౖ\\x05лȞ\\x02ౖ\\u0c57\\x05хȣ')\n buf.write('\\x02\\u0c57ౘ\\x05яȨ\\x02ౘౙ\\x05сȡ')\n buf.write('\\x02ౙȪ\\x03\\x02\\x02\\x02ౚ\\u0c5b\\x05ѓȪ\\x02\\u0c5b')\n buf.write('\\u0c5c\\x05еț\\x02\\u0c5cౝ\\x05йȝ\\x02ౝ')\n buf.write('\\u0c5e\\x05щȥ\\x02\\u0c5e\\u0c5f\\x05еț\\x02\\u0c5f')\n buf.write('ౠ\\x05сȡ\\x02ౠౡ\\x05нȟ\\x02ౡ')\n buf.write('Ȭ\\x03\\x02\\x02\\x02ౢౣ\\x05ѓȪ\\x02ౣ\\u0c64')\n buf.write('\\x05еț\\x02\\u0c64\\u0c65\\x05їȬ\\x02\\u0c65౦')\n buf.write('\\x05еț\\x02౦౧\\x05ыȦ\\x02౧౨')\n buf.write('\\x05ыȦ\\x02౨౩\\x05нȟ\\x02౩౪')\n buf.write('\\x05ыȦ\\x02౪౫\\x07a\\x02\\x02౫౬\\x05н')\n buf.write('ȟ\\x02౬౭\\x05яȨ\\x02౭౮\\x05е')\n buf.write('ț\\x02౮౯\\x05зȜ\\x02౯\\u0c70\\x05ы')\n buf.write('Ȧ\\x02\\u0c70\\u0c71\\x05нȟ\\x02\\u0c71Ȯ\\x03\\x02\\x02')\n buf.write('\\x02\\u0c72\\u0c73\\x05ѓȪ\\x02\\u0c73\\u0c74\\x05еț')\n buf.write('\\x02\\u0c74\\u0c75\\x05їȬ\\x02\\u0c75\\u0c76\\x05еț')\n buf.write('\\x02\\u0c76౷\\x05эȧ\\x02౷౸\\x05нȟ')\n buf.write('\\x02౸౹\\x05ћȮ\\x02౹౺\\x05нȟ')\n buf.write('\\x02౺౻\\x05їȬ\\x02౻౼\\x05љȭ')\n buf.write('\\x02౼Ȱ\\x03\\x02\\x02\\x02౽౾\\x05ѓȪ\\x02౾')\n buf.write('౿\\x05еț\\x02౿ಀ\\x05їȬ\\x02ಀ')\n buf.write('ಁ\\x05нȟ\\x02ಁಂ\\x05яȨ\\x02ಂ')\n buf.write('ಃ\\x05ћȮ\\x02ಃȲ\\x03\\x02\\x02\\x02಄ಅ')\n buf.write('\\x05ѓȪ\\x02ಅಆ\\x05еț\\x02ಆಇ')\n buf.write('\\x05їȬ\\x02ಇಈ\\x05ћȮ\\x02ಈಉ')\n buf.write('\\x05хȣ\\x02ಉಊ\\x05ћȮ\\x02ಊಋ')\n buf.write('\\x05хȣ\\x02ಋಌ\\x05ёȩ\\x02ಌ\\u0c8d')\n buf.write('\\x05яȨ\\x02\\u0c8dȴ\\x03\\x02\\x02\\x02ಎಏ\\x05ѓ')\n buf.write('Ȫ\\x02ಏಐ\\x05еț\\x02ಐ\\u0c91\\x05љ')\n buf.write('ȭ\\x02\\u0c91ಒ\\x05љȭ\\x02ಒಓ\\x05х')\n buf.write('ȣ\\x02ಓಔ\\x05яȨ\\x02ಔಕ\\x05с')\n buf.write('ȡ\\x02ಕȶ\\x03\\x02\\x02\\x02ಖಗ\\x05ѓȪ')\n buf.write('\\x02ಗಘ\\x05еț\\x02ಘಙ\\x05ћȮ')\n buf.write('\\x02ಙಚ\\x05уȢ\\x02ಚȸ\\x03\\x02\\x02\\x02ಛ')\n buf.write(\"ಜ\\x07'\\x02\\x02ಜಝ\\x05їȬ\\x02ಝಞ\")\n buf.write('\\x05ёȩ\\x02ಞಟ\\x05ѡȱ\\x02ಟಠ')\n buf.write('\\x05ћȮ\\x02ಠಡ\\x05ѥȳ\\x02ಡಢ')\n buf.write('\\x05ѓȪ\\x02ಢಣ\\x05нȟ\\x02ಣȺ')\n buf.write(\"\\x03\\x02\\x02\\x02ತಥ\\x07'\\x02\\x02ಥದ\\x05ћȮ\")\n buf.write('\\x02ದಧ\\x05ѥȳ\\x02ಧನ\\x05ѓȪ')\n buf.write('\\x02ನ\\u0ca9\\x05нȟ\\x02\\u0ca9ȼ\\x03\\x02\\x02\\x02ಪ')\n buf.write('ಫ\\x05ѓȪ\\x02ಫಬ\\x05хȣ\\x02ಬ')\n buf.write('ಭ\\x05ѓȪ\\x02ಭಮ\\x05нȟ\\x02ಮ')\n buf.write('ಯ\\x05ыȦ\\x02ಯರ\\x05хȣ\\x02ರ')\n buf.write('ಱ\\x05яȨ\\x02ಱಲ\\x05нȟ\\x02ಲ')\n buf.write('ಳ\\x05лȞ\\x02ಳȾ\\x03\\x02\\x02\\x02\\u0cb4ವ')\n buf.write('\\x05ѓȪ\\x02ವಶ\\x05хȣ\\x02ಶಷ')\n buf.write('\\x05џȰ\\x02ಷಸ\\x05ёȩ\\x02ಸಹ')\n buf.write('\\x05ћȮ\\x02ಹɀ\\x03\\x02\\x02\\x02\\u0cba\\u0cbb\\x05ѓ')\n buf.write('Ȫ\\x02\\u0cbb಼\\x05ыȦ\\x02಼ಽ\\x05е')\n buf.write('ț\\x02ಽಾ\\x05яȨ\\x02ಾɂ\\x03\\x02\\x02')\n buf.write('\\x02ಿೀ\\x05ѓȪ\\x02ೀು\\x05ыȦ')\n buf.write('\\x02ುೂ\\x05љȭ\\x02ೂೃ\\x07a\\x02\\x02ೃ')\n buf.write('ೄ\\x05хȣ\\x02ೄ\\u0cc5\\x05яȨ\\x02\\u0cc5')\n buf.write('ೆ\\x05ћȮ\\x02ೆೇ\\x05нȟ\\x02ೇ')\n buf.write('ೈ\\x05сȡ\\x02ೈ\\u0cc9\\x05нȟ\\x02\\u0cc9')\n buf.write('ೊ\\x05їȬ\\x02ೊɄ\\x03\\x02\\x02\\x02ೋೌ')\n buf.write('\\x05ѓȪ\\x02ೌ್\\x05ёȩ\\x02್\\u0cce')\n buf.write('\\x05љȭ\\x02\\u0cce\\u0ccf\\x05хȣ\\x02\\u0ccf\\u0cd0')\n buf.write('\\x05ћȮ\\x02\\u0cd0\\u0cd1\\x05хȣ\\x02\\u0cd1\\u0cd2')\n buf.write('\\x05џȰ\\x02\\u0cd2\\u0cd3\\x05нȟ\\x02\\u0cd3Ɇ')\n buf.write('\\x03\\x02\\x02\\x02\\u0cd4ೕ\\x05ѓȪ\\x02ೕೖ\\x05ё')\n buf.write('ȩ\\x02ೖ\\u0cd7\\x05љȭ\\x02\\u0cd7\\u0cd8\\x05х')\n buf.write('ȣ\\x02\\u0cd8\\u0cd9\\x05ћȮ\\x02\\u0cd9\\u0cda\\x05х')\n buf.write('ȣ\\x02\\u0cda\\u0cdb\\x05џȰ\\x02\\u0cdb\\u0cdc\\x05н')\n buf.write('ȟ\\x02\\u0cdcೝ\\x05яȨ\\x02ೝɈ\\x03\\x02\\x02')\n buf.write('\\x02ೞ\\u0cdf\\x05ѓȪ\\x02\\u0cdfೠ\\x05їȬ')\n buf.write('\\x02ೠೡ\\x05еț\\x02ೡೢ\\x05сȡ')\n buf.write('\\x02ೢೣ\\x05эȧ\\x02ೣ\\u0ce4\\x05еț')\n buf.write('\\x02\\u0ce4Ɋ\\x03\\x02\\x02\\x02\\u0ce5೦\\x05ѓȪ\\x02೦')\n buf.write('೧\\x05їȬ\\x02೧೨\\x05нȟ\\x02೨')\n buf.write('೩\\x05йȝ\\x02೩೪\\x05нȟ\\x02೪')\n buf.write('೫\\x05лȞ\\x02೫೬\\x05хȣ\\x02೬')\n buf.write('೭\\x05яȨ\\x02೭೮\\x05сȡ\\x02೮')\n buf.write('Ɍ\\x03\\x02\\x02\\x02೯\\u0cf0\\x05ѓȪ\\x02\\u0cf0ೱ')\n buf.write('\\x05їȬ\\x02ೱೲ\\x05нȟ\\x02ೲ\\u0cf3')\n buf.write('\\x05йȝ\\x02\\u0cf3\\u0cf4\\x05хȣ\\x02\\u0cf4\\u0cf5')\n buf.write('\\x05љȭ\\x02\\u0cf5\\u0cf6\\x05хȣ\\x02\\u0cf6\\u0cf7')\n buf.write('\\x05ёȩ\\x02\\u0cf7\\u0cf8\\x05яȨ\\x02\\u0cf8Ɏ')\n buf.write('\\x03\\x02\\x02\\x02\\u0cf9\\u0cfa\\x05ѓȪ\\x02\\u0cfa\\u0cfb\\x05ї')\n buf.write('Ȭ\\x02\\u0cfb\\u0cfc\\x05нȟ\\x02\\u0cfc\\u0cfd\\x05љ')\n buf.write('ȭ\\x02\\u0cfd\\u0cfe\\x05нȟ\\x02\\u0cfe\\u0cff\\x05я')\n buf.write('Ȩ\\x02\\u0cffഀ\\x05ћȮ\\x02ഀɐ\\x03\\x02\\x02')\n buf.write('\\x02ഁം\\x05ѓȪ\\x02ംഃ\\x05їȬ')\n buf.write('\\x02ഃഄ\\x05хȣ\\x02ഄഅ\\x05ёȩ')\n buf.write('\\x02അആ\\x05їȬ\\x02ആɒ\\x03\\x02\\x02\\x02ഇ')\n buf.write('ഈ\\x05ѓȪ\\x02ഈഉ\\x05їȬ\\x02ഉ')\n buf.write('ഊ\\x05ёȩ\\x02ഊഋ\\x05йȝ\\x02ഋ')\n buf.write('ഌ\\x05нȟ\\x02ഌ\\u0d0d\\x05лȞ\\x02\\u0d0d')\n buf.write('എ\\x05ѝȯ\\x02എഏ\\x05їȬ\\x02ഏ')\n buf.write('ഐ\\x05нȟ\\x02ഐɔ\\x03\\x02\\x02\\x02\\u0d11ഒ')\n buf.write('\\x05їȬ\\x02ഒഓ\\x05еț\\x02ഓഔ')\n buf.write('\\x05хȣ\\x02ഔക\\x05љȭ\\x02കഖ')\n buf.write('\\x05нȟ\\x02ഖɖ\\x03\\x02\\x02\\x02ഗഘ\\x05ї')\n buf.write('Ȭ\\x02ഘങ\\x05еț\\x02ങച\\x05я')\n buf.write('Ȩ\\x02ചഛ\\x05сȡ\\x02ഛജ\\x05н')\n buf.write('ȟ\\x02ജɘ\\x03\\x02\\x02\\x02ഝഞ\\x05їȬ')\n buf.write('\\x02ഞട\\x05еț\\x02ടഠ\\x05ѡȱ')\n buf.write('\\x02ഠɚ\\x03\\x02\\x02\\x02ഡഢ\\x05їȬ\\x02ഢ')\n buf.write('ണ\\x05нȟ\\x02ണത\\x05еț\\x02ത')\n buf.write('ഥ\\x05лȞ\\x02ഥɜ\\x03\\x02\\x02\\x02ദധ')\n buf.write('\\x05їȬ\\x02ധന\\x05нȟ\\x02നഩ')\n buf.write('\\x05еț\\x02ഩപ\\x05ыȦ\\x02പɞ')\n buf.write('\\x03\\x02\\x02\\x02ഫബ\\x05їȬ\\x02ബഭ\\x05н')\n buf.write('ȟ\\x02ഭമ\\x05йȝ\\x02മയ\\x05ё')\n buf.write('ȩ\\x02യര\\x05їȬ\\x02രറ\\x05л')\n buf.write('Ȟ\\x02റɠ\\x03\\x02\\x02\\x02ലള\\x05їȬ')\n buf.write('\\x02ളഴ\\x05нȟ\\x02ഴവ\\x05пȠ')\n buf.write('\\x02വɢ\\x03\\x02\\x02\\x02ശഷ\\x05їȬ\\x02ഷ')\n buf.write('സ\\x05нȟ\\x02സഹ\\x05пȠ\\x02ഹ')\n buf.write('ഺ\\x05нȟ\\x02ഺ഻\\x05їȬ\\x02഻')\n buf.write('഼\\x05нȟ\\x02഼ഽ\\x05яȨ\\x02ഽ')\n buf.write('ാ\\x05йȝ\\x02ാി\\x05нȟ\\x02ി')\n buf.write('ɤ\\x03\\x02\\x02\\x02ീു\\x05їȬ\\x02ുൂ')\n buf.write('\\x05нȟ\\x02ൂൃ\\x05пȠ\\x02ൃൄ')\n buf.write('\\x05нȟ\\x02ൄ\\u0d45\\x05їȬ\\x02\\u0d45െ')\n buf.write('\\x05нȟ\\x02െേ\\x05яȨ\\x02േൈ')\n buf.write('\\x05йȝ\\x02ൈ\\u0d49\\x05хȣ\\x02\\u0d49ൊ')\n buf.write('\\x05яȨ\\x02ൊോ\\x05сȡ\\x02ോɦ')\n buf.write('\\x03\\x02\\x02\\x02ൌ്\\x05їȬ\\x02്ൎ\\x05н')\n buf.write('ȟ\\x02ൎ൏\\x05чȤ\\x02൏\\u0d50\\x05н')\n buf.write('ȟ\\x02\\u0d50\\u0d51\\x05йȝ\\x02\\u0d51\\u0d52\\x05ћ')\n buf.write('Ȯ\\x02\\u0d52ɨ\\x03\\x02\\x02\\x02\\u0d53ൔ\\x05їȬ')\n buf.write('\\x02ൔൕ\\x05нȟ\\x02ൕൖ\\x05ыȦ')\n buf.write('\\x02ൖൗ\\x05хȣ\\x02ൗ൘\\x05нȟ')\n buf.write('\\x02൘൙\\x05љȭ\\x02൙൚\\x07a\\x02\\x02൚')\n buf.write('൛\\x05ёȩ\\x02൛൜\\x05яȨ\\x02൜')\n buf.write('ɪ\\x03\\x02\\x02\\x02൝൞\\x05їȬ\\x02൞ൟ')\n buf.write('\\x05нȟ\\x02ൟൠ\\x05яȨ\\x02ൠൡ')\n buf.write('\\x05еț\\x02ൡൢ\\x05эȧ\\x02ൢൣ')\n buf.write('\\x05нȟ\\x02ൣɬ\\x03\\x02\\x02\\x02\\u0d64\\u0d65\\x05ї')\n buf.write('Ȭ\\x02\\u0d65൦\\x05нȟ\\x02൦൧\\x05ѓ')\n buf.write('Ȫ\\x02൧൨\\x05ыȦ\\x02൨൩\\x05е')\n buf.write('ț\\x02൩൪\\x05йȝ\\x02൪൫\\x05н')\n buf.write('ȟ\\x02൫ɮ\\x03\\x02\\x02\\x02൬൭\\x05їȬ')\n buf.write('\\x02൭൮\\x05нȟ\\x02൮൯\\x05љȭ')\n buf.write('\\x02൯൰\\x05ѓȪ\\x02൰൱\\x05нȟ')\n buf.write('\\x02൱൲\\x05йȝ\\x02൲൳\\x05ћȮ')\n buf.write('\\x02൳ɰ\\x03\\x02\\x02\\x02൴൵\\x05їȬ\\x02൵')\n buf.write('൶\\x05нȟ\\x02൶൷\\x05љȭ\\x02൷')\n buf.write('൸\\x05ћȮ\\x02൸൹\\x05їȬ\\x02൹')\n buf.write('ൺ\\x05хȣ\\x02ൺൻ\\x05йȝ\\x02ൻ')\n buf.write('ർ\\x05ћȮ\\x02ർൽ\\x07a\\x02\\x02ൽൾ')\n buf.write('\\x05їȬ\\x02ൾൿ\\x05нȟ\\x02ൿ\\u0d80')\n buf.write('\\x05пȠ\\x02\\u0d80ඁ\\x05нȟ\\x02ඁං')\n buf.write('\\x05їȬ\\x02ංඃ\\x05нȟ\\x02ඃ\\u0d84')\n buf.write('\\x05яȨ\\x02\\u0d84අ\\x05йȝ\\x02අආ')\n buf.write('\\x05нȟ\\x02ආඇ\\x05љȭ\\x02ඇɲ')\n buf.write('\\x03\\x02\\x02\\x02ඈඉ\\x05їȬ\\x02ඉඊ\\x05н')\n buf.write('ȟ\\x02ඊඋ\\x05љȭ\\x02උඌ\\x05ѝ')\n buf.write('ȯ\\x02ඌඍ\\x05ыȦ\\x02ඍඎ\\x05ћ')\n buf.write('Ȯ\\x02ඎɴ\\x03\\x02\\x02\\x02ඏඐ\\x05їȬ')\n buf.write('\\x02ඐඑ\\x05нȟ\\x02එඒ\\x05љȭ')\n buf.write('\\x02ඒඓ\\x05ѝȯ\\x02ඓඔ\\x05ыȦ')\n buf.write('\\x02ඔඕ\\x05ћȮ\\x02ඕඖ\\x07a\\x02\\x02ඖ')\n buf.write('\\u0d97\\x05йȝ\\x02\\u0d97\\u0d98\\x05еț\\x02\\u0d98')\n buf.write('\\u0d99\\x05йȝ\\x02\\u0d99ක\\x05уȢ\\x02ක')\n buf.write('ඛ\\x05нȟ\\x02ඛɶ\\x03\\x02\\x02\\x02ගඝ')\n buf.write('\\x05їȬ\\x02ඝඞ\\x05нȟ\\x02ඞඟ')\n buf.write('\\x05ћȮ\\x02ඟච\\x05ѝȯ\\x02චඡ')\n buf.write('\\x05їȬ\\x02ඡජ\\x05яȨ\\x02ජɸ')\n buf.write('\\x03\\x02\\x02\\x02ඣඤ\\x05їȬ\\x02ඤඥ\\x05н')\n buf.write('ȟ\\x02ඥඦ\\x05ћȮ\\x02ඦට\\x05ѝ')\n buf.write('ȯ\\x02ටඨ\\x05їȬ\\x02ඨඩ\\x05я')\n buf.write('Ȩ\\x02ඩඪ\\x05хȣ\\x02ඪණ\\x05я')\n buf.write('Ȩ\\x02ණඬ\\x05сȡ\\x02ඬɺ\\x03\\x02\\x02')\n buf.write('\\x02තථ\\x05їȬ\\x02ථද\\x05нȟ')\n buf.write('\\x02දධ\\x05ѝȯ\\x02ධන\\x05љȭ')\n buf.write('\\x02න\\u0db2\\x05нȟ\\x02\\u0db2ɼ\\x03\\x02\\x02\\x02ඳ')\n buf.write('ප\\x05їȬ\\x02පඵ\\x05нȟ\\x02ඵ')\n buf.write('බ\\x05џȰ\\x02බභ\\x05нȟ\\x02භ')\n buf.write('ම\\x05їȬ\\x02මඹ\\x05љȭ\\x02ඹ')\n buf.write('ය\\x05нȟ\\x02යɾ\\x03\\x02\\x02\\x02ර\\u0dbc')\n buf.write('\\x05їȬ\\x02\\u0dbcල\\x05нȟ\\x02ල\\u0dbe')\n buf.write('\\x05џȰ\\x02\\u0dbe\\u0dbf\\x05ёȩ\\x02\\u0dbfව')\n buf.write('\\x05щȥ\\x02වශ\\x05нȟ\\x02ශʀ')\n buf.write('\\x03\\x02\\x02\\x02ෂස\\x05їȬ\\x02සහ\\x05х')\n buf.write('ȣ\\x02හළ\\x05сȡ\\x02ළෆ\\x05у')\n buf.write('Ȣ\\x02ෆ\\u0dc7\\x05ћȮ\\x02\\u0dc7ʂ\\x03\\x02\\x02')\n buf.write('\\x02\\u0dc8\\u0dc9\\x05їȬ\\x02\\u0dc9්\\x05ёȩ')\n buf.write('\\x02්\\u0dcb\\x05ыȦ\\x02\\u0dcb\\u0dcc\\x05ыȦ')\n buf.write('\\x02\\u0dcc\\u0dcd\\x05зȜ\\x02\\u0dcd\\u0dce\\x05еț')\n buf.write('\\x02\\u0dceා\\x05йȝ\\x02ාැ\\x05щȥ')\n buf.write('\\x02ැʄ\\x03\\x02\\x02\\x02ෑි\\x05їȬ\\x02ි')\n buf.write('ී\\x05ёȩ\\x02ීු\\x05ыȦ\\x02ු')\n buf.write('\\u0dd5\\x05ыȦ\\x02\\u0dd5ූ\\x05ѝȯ\\x02ූ')\n buf.write('\\u0dd7\\x05ѓȪ\\x02\\u0dd7ʆ\\x03\\x02\\x02\\x02ෘෙ')\n buf.write('\\x05їȬ\\x02ෙේ\\x05ёȩ\\x02ේෛ')\n buf.write('\\x05ѡȱ\\x02ෛʈ\\x03\\x02\\x02\\x02ොෝ\\x05ї')\n buf.write('Ȭ\\x02ෝෞ\\x05ёȩ\\x02ෞෟ\\x05ѡ')\n buf.write('ȱ\\x02ෟ\\u0de0\\x05хȣ\\x02\\u0de0\\u0de1\\x05л')\n buf.write('Ȟ\\x02\\u0de1ʊ\\x03\\x02\\x02\\x02\\u0de2\\u0de3\\x05їȬ')\n buf.write('\\x02\\u0de3\\u0de4\\x05ёȩ\\x02\\u0de4\\u0de5\\x05ѡȱ')\n buf.write('\\x02\\u0de5෦\\x05љȭ\\x02෦ʌ\\x03\\x02\\x02\\x02෧')\n buf.write('෨\\x05їȬ\\x02෨෩\\x05ѝȯ\\x02෩')\n buf.write('෪\\x05ыȦ\\x02෪෫\\x05нȟ\\x02෫')\n buf.write('෬\\x05љȭ\\x02෬ʎ\\x03\\x02\\x02\\x02෭෮')\n buf.write('\\x05љȭ\\x02෮෯\\x05еț\\x02෯\\u0df0')\n buf.write('\\x05эȧ\\x02\\u0df0\\u0df1\\x05ѓȪ\\x02\\u0df1ෲ')\n buf.write('\\x05ыȦ\\x02ෲෳ\\x05нȟ\\x02ෳʐ')\n buf.write('\\x03\\x02\\x02\\x02෴\\u0df5\\x05љȭ\\x02\\u0df5\\u0df6\\x05е')\n buf.write('ț\\x02\\u0df6\\u0df7\\x05џȰ\\x02\\u0df7\\u0df8\\x05н')\n buf.write('ȟ\\x02\\u0df8ʒ\\x03\\x02\\x02\\x02\\u0df9\\u0dfa\\x05љȭ')\n buf.write('\\x02\\u0dfa\\u0dfb\\x05еț\\x02\\u0dfb\\u0dfc\\x05џȰ')\n buf.write('\\x02\\u0dfc\\u0dfd\\x05нȟ\\x02\\u0dfd\\u0dfe\\x05ѓȪ')\n buf.write('\\x02\\u0dfe\\u0dff\\x05ёȩ\\x02\\u0dff\\u0e00\\x05хȣ')\n buf.write('\\x02\\u0e00ก\\x05яȨ\\x02กข\\x05ћȮ')\n buf.write('\\x02ขʔ\\x03\\x02\\x02\\x02ฃค\\x05љȭ\\x02ค')\n buf.write('ฅ\\x05йȝ\\x02ฅฆ\\x05уȢ\\x02ฆ')\n buf.write('ง\\x05нȟ\\x02งจ\\x05эȧ\\x02จ')\n buf.write('ฉ\\x05еț\\x02ฉʖ\\x03\\x02\\x02\\x02ชซ')\n buf.write('\\x05љȭ\\x02ซฌ\\x05йȝ\\x02ฌญ')\n buf.write('\\x05уȢ\\x02ญฎ\\x05нȟ\\x02ฎฏ')\n buf.write('\\x05эȧ\\x02ฏฐ\\x05еț\\x02ฐฑ')\n buf.write('\\x05йȝ\\x02ฑฒ\\x05уȢ\\x02ฒณ')\n buf.write('\\x05нȟ\\x02ณด\\x05йȝ\\x02ดต')\n buf.write('\\x05щȥ\\x02ตʘ\\x03\\x02\\x02\\x02ถท\\x05љ')\n buf.write('ȭ\\x02ทธ\\x05йȝ\\x02ธน\\x05я')\n buf.write('Ȩ\\x02นʚ\\x03\\x02\\x02\\x02บป\\x05љȭ')\n buf.write('\\x02ปผ\\x05нȟ\\x02ผฝ\\x05еț')\n buf.write('\\x02ฝพ\\x05їȬ\\x02พฟ\\x05йȝ')\n buf.write('\\x02ฟภ\\x05уȢ\\x02ภʜ\\x03\\x02\\x02\\x02ม')\n buf.write('ย\\x05љȭ\\x02ยร\\x05нȟ\\x02ร')\n buf.write('ฤ\\x05йȝ\\x02ฤล\\x05ёȩ\\x02ล')\n buf.write('ฦ\\x05яȨ\\x02ฦว\\x05лȞ\\x02ว')\n buf.write('ʞ\\x03\\x02\\x02\\x02ศษ\\x05љȭ\\x02ษส')\n buf.write('\\x05нȟ\\x02สห\\x05нȟ\\x02หฬ')\n buf.write('\\x05лȞ\\x02ฬʠ\\x03\\x02\\x02\\x02อฮ\\x05љ')\n buf.write('ȭ\\x02ฮฯ\\x05нȟ\\x02ฯะ\\x05с')\n buf.write('ȡ\\x02ะั\\x05эȧ\\x02ัา\\x05н')\n buf.write('ȟ\\x02าำ\\x05яȨ\\x02ำิ\\x05ћ')\n buf.write('Ȯ\\x02ิʢ\\x03\\x02\\x02\\x02ีึ\\x05љȭ')\n buf.write('\\x02ึื\\x05нȟ\\x02ืุ\\x05ыȦ')\n buf.write('\\x02ุู\\x05нȟ\\x02ฺู\\x05йȝ')\n buf.write('\\x02ฺ\\u0e3b\\x05ћȮ\\x02\\u0e3bʤ\\x03\\x02\\x02\\x02\\u0e3c')\n buf.write('\\u0e3d\\x05љȭ\\x02\\u0e3d\\u0e3e\\x05нȟ\\x02\\u0e3e')\n buf.write('฿\\x05ыȦ\\x02฿เ\\x05пȠ\\x02เ')\n buf.write('ʦ\\x03\\x02\\x02\\x02แโ\\x05љȭ\\x02โใ')\n buf.write('\\x05нȟ\\x02ใไ\\x05ѕȫ\\x02ไๅ')\n buf.write('\\x05ѝȯ\\x02ๅๆ\\x05нȟ\\x02ๆ็')\n buf.write('\\x05яȨ\\x02็่\\x05йȝ\\x02่้')\n buf.write('\\x05нȟ\\x02้ʨ\\x03\\x02\\x02\\x02๊๋\\x05љ')\n buf.write('ȭ\\x02๋์\\x05нȟ\\x02์ํ\\x05ѕ')\n buf.write('ȫ\\x02ํ๎\\x05ѝȯ\\x02๎๏\\x05н')\n buf.write('ȟ\\x02๏๐\\x05яȨ\\x02๐๑\\x05ћ')\n buf.write('Ȯ\\x02๑๒\\x05хȣ\\x02๒๓\\x05е')\n buf.write('ț\\x02๓๔\\x05ыȦ\\x02๔ʪ\\x03\\x02\\x02')\n buf.write('\\x02๕๖\\x05љȭ\\x02๖๗\\x05нȟ')\n buf.write('\\x02๗๘\\x05їȬ\\x02๘๙\\x05хȣ')\n buf.write('\\x02๙๚\\x05еț\\x02๚๛\\x05ыȦ')\n buf.write('\\x02๛\\u0e5c\\x05хȣ\\x02\\u0e5c\\u0e5d\\x05ѧȴ')\n buf.write('\\x02\\u0e5d\\u0e5e\\x05еț\\x02\\u0e5e\\u0e5f\\x05зȜ')\n buf.write('\\x02\\u0e5f\\u0e60\\x05ыȦ\\x02\\u0e60\\u0e61\\x05нȟ')\n buf.write('\\x02\\u0e61ʬ\\x03\\x02\\x02\\x02\\u0e62\\u0e63\\x05љȭ\\x02\\u0e63')\n buf.write('\\u0e64\\x05нȟ\\x02\\u0e64\\u0e65\\x05їȬ\\x02\\u0e65')\n buf.write('\\u0e66\\x05хȣ\\x02\\u0e66\\u0e67\\x05еț\\x02\\u0e67')\n buf.write('\\u0e68\\x05ыȦ\\x02\\u0e68\\u0e69\\x05ыȦ\\x02\\u0e69')\n buf.write('\\u0e6a\\x05ѥȳ\\x02\\u0e6a\\u0e6b\\x07a\\x02\\x02\\u0e6b\\u0e6c')\n buf.write('\\x05їȬ\\x02\\u0e6c\\u0e6d\\x05нȟ\\x02\\u0e6d\\u0e6e')\n buf.write('\\x05ѝȯ\\x02\\u0e6e\\u0e6f\\x05љȭ\\x02\\u0e6f\\u0e70')\n buf.write('\\x05еț\\x02\\u0e70\\u0e71\\x05зȜ\\x02\\u0e71\\u0e72')\n buf.write('\\x05ыȦ\\x02\\u0e72\\u0e73\\x05нȟ\\x02\\u0e73ʮ')\n buf.write('\\x03\\x02\\x02\\x02\\u0e74\\u0e75\\x05љȭ\\x02\\u0e75\\u0e76\\x05н')\n buf.write('ȟ\\x02\\u0e76\\u0e77\\x05їȬ\\x02\\u0e77\\u0e78\\x05џ')\n buf.write('Ȱ\\x02\\u0e78\\u0e79\\x05нȟ\\x02\\u0e79\\u0e7a\\x05ї')\n buf.write('Ȭ\\x02\\u0e7a\\u0e7b\\x05нȟ\\x02\\u0e7b\\u0e7c\\x05ї')\n buf.write('Ȭ\\x02\\u0e7c\\u0e7d\\x05їȬ\\x02\\u0e7d\\u0e7e\\x05ё')\n buf.write('ȩ\\x02\\u0e7e\\u0e7f\\x05їȬ\\x02\\u0e7fʰ\\x03\\x02\\x02')\n buf.write('\\x02\\u0e80ກ\\x05љȭ\\x02ກຂ\\x05нȟ')\n buf.write('\\x02ຂ\\u0e83\\x05љȭ\\x02\\u0e83ຄ\\x05љȭ')\n buf.write('\\x02ຄ\\u0e85\\x05хȣ\\x02\\u0e85ຆ\\x05ёȩ')\n buf.write('\\x02ຆງ\\x05яȨ\\x02ງຈ\\x05ћȮ')\n buf.write('\\x02ຈຉ\\x05хȣ\\x02ຉຊ\\x05эȧ')\n buf.write('\\x02ຊ\\u0e8b\\x05нȟ\\x02\\u0e8bຌ\\x05ѧȴ')\n buf.write('\\x02ຌຍ\\x05ёȩ\\x02ຍຎ\\x05яȨ')\n buf.write('\\x02ຎຏ\\x05нȟ\\x02ຏʲ\\x03\\x02\\x02\\x02ຐ')\n buf.write('ຑ\\x05љȭ\\x02ຑຒ\\x05нȟ\\x02ຒ')\n buf.write('ຓ\\x05ћȮ\\x02ຓʴ\\x03\\x02\\x02\\x02ດຕ')\n buf.write('\\x05љȭ\\x02ຕຖ\\x05нȟ\\x02ຖທ')\n buf.write('\\x05ћȮ\\x02ທຘ\\x05љȭ\\x02ຘʶ')\n buf.write('\\x03\\x02\\x02\\x02ນບ\\x05љȭ\\x02ບປ\\x05н')\n buf.write('ȟ\\x02ປຜ\\x05ћȮ\\x02ຜຝ\\x05ћ')\n buf.write('Ȯ\\x02ຝພ\\x05хȣ\\x02ພຟ\\x05я')\n buf.write('Ȩ\\x02ຟຠ\\x05сȡ\\x02ຠມ\\x05љ')\n buf.write('ȭ\\x02ມʸ\\x03\\x02\\x02\\x02ຢຣ\\x05љȭ')\n buf.write('\\x02ຣ\\u0ea4\\x05уȢ\\x02\\u0ea4ລ\\x05еț')\n buf.write('\\x02ລ\\u0ea6\\x05їȬ\\x02\\u0ea6ວ\\x05нȟ')\n buf.write('\\x02ວʺ\\x03\\x02\\x02\\x02ຨຩ\\x05љȭ\\x02ຩ')\n buf.write('ສ\\x05уȢ\\x02ສຫ\\x05ёȩ\\x02ຫ')\n buf.write('ຬ\\x05ѡȱ\\x02ຬʼ\\x03\\x02\\x02\\x02ອຮ')\n buf.write('\\x05љȭ\\x02ຮຯ\\x05уȢ\\x02ຯະ')\n buf.write('\\x05ѝȯ\\x02ະັ\\x05ћȮ\\x02ັາ')\n buf.write('\\x05лȞ\\x02າຳ\\x05ёȩ\\x02ຳິ')\n buf.write('\\x05ѡȱ\\x02ິີ\\x05яȨ\\x02ີʾ')\n buf.write('\\x03\\x02\\x02\\x02ຶື\\x05љȭ\\x02ືຸ\\x05х')\n buf.write('ȣ\\x02ຸູ\\x05зȜ\\x02຺ູ\\x05ы')\n buf.write('Ȧ\\x02຺ົ\\x05хȣ\\x02ົຼ\\x05я')\n buf.write('Ȩ\\x02ຼຽ\\x05сȡ\\x02ຽ\\u0ebe\\x05љ')\n buf.write('ȭ\\x02\\u0ebeˀ\\x03\\x02\\x02\\x02\\u0ebfເ\\x05љȭ')\n buf.write('\\x02ເແ\\x05хȣ\\x02ແໂ\\x05сȡ')\n buf.write('\\x02ໂໃ\\x05яȨ\\x02ໃໄ\\x05ћȮ')\n buf.write('\\x02ໄ\\u0ec5\\x05ѥȳ\\x02\\u0ec5ໆ\\x05ѓȪ')\n buf.write('\\x02ໆ\\u0ec7\\x05нȟ\\x02\\u0ec7˂\\x03\\x02\\x02\\x02່')\n buf.write('້\\x05љȭ\\x02້໊\\x05хȣ\\x02໊')\n buf.write('໋\\x05эȧ\\x02໋໌\\x05ѓȪ\\x02໌')\n buf.write('ໍ\\x05ыȦ\\x02ໍ\\u0ece\\x05нȟ\\x02\\u0ece')\n buf.write('\\u0ecf\\x07a\\x02\\x02\\u0ecf໐\\x05хȣ\\x02໐໑')\n buf.write('\\x05яȨ\\x02໑໒\\x05ћȮ\\x02໒໓')\n buf.write('\\x05нȟ\\x02໓໔\\x05сȡ\\x02໔໕')\n buf.write('\\x05нȟ\\x02໕໖\\x05їȬ\\x02໖˄')\n buf.write('\\x03\\x02\\x02\\x02໗໘\\x05љȭ\\x02໘໙\\x05х')\n buf.write('ȣ\\x02໙\\u0eda\\x05яȨ\\x02\\u0eda\\u0edb\\x05с')\n buf.write('ȡ\\x02\\u0edbໜ\\x05ыȦ\\x02ໜໝ\\x05н')\n buf.write('ȟ\\x02ໝˆ\\x03\\x02\\x02\\x02ໞໟ\\x05љȭ')\n buf.write('\\x02ໟ\\u0ee0\\x05хȣ\\x02\\u0ee0\\u0ee1\\x05ѧȴ')\n buf.write('\\x02\\u0ee1\\u0ee2\\x05нȟ\\x02\\u0ee2ˈ\\x03\\x02\\x02\\x02\\u0ee3')\n buf.write('\\u0ee4\\x05љȭ\\x02\\u0ee4\\u0ee5\\x05щȥ\\x02\\u0ee5')\n buf.write('\\u0ee6\\x05хȣ\\x02\\u0ee6\\u0ee7\\x05ѓȪ\\x02\\u0ee7')\n buf.write('ˊ\\x03\\x02\\x02\\x02\\u0ee8\\u0ee9\\x05љȭ\\x02\\u0ee9\\u0eea')\n buf.write('\\x05эȧ\\x02\\u0eea\\u0eeb\\x05еț\\x02\\u0eeb\\u0eec')\n buf.write('\\x05ыȦ\\x02\\u0eec\\u0eed\\x05ыȦ\\x02\\u0eed\\u0eee')\n buf.write('\\x05хȣ\\x02\\u0eee\\u0eef\\x05яȨ\\x02\\u0eef\\u0ef0')\n buf.write('\\x05ћȮ\\x02\\u0ef0ˌ\\x03\\x02\\x02\\x02\\u0ef1\\u0ef2\\x05љ')\n buf.write('ȭ\\x02\\u0ef2\\u0ef3\\x05яȨ\\x02\\u0ef3\\u0ef4\\x05е')\n buf.write('ț\\x02\\u0ef4\\u0ef5\\x05ѓȪ\\x02\\u0ef5\\u0ef6\\x05љ')\n buf.write('ȭ\\x02\\u0ef6\\u0ef7\\x05уȢ\\x02\\u0ef7\\u0ef8\\x05ё')\n buf.write('ȩ\\x02\\u0ef8\\u0ef9\\x05ћȮ\\x02\\u0ef9ˎ\\x03\\x02\\x02')\n buf.write('\\x02\\u0efa\\u0efb\\x05љȭ\\x02\\u0efb\\u0efc\\x05ёȩ')\n buf.write('\\x02\\u0efc\\u0efd\\x05эȧ\\x02\\u0efd\\u0efe\\x05нȟ')\n buf.write('\\x02\\u0efeː\\x03\\x02\\x02\\x02\\u0effༀ\\x05љȭ\\x02ༀ')\n buf.write('༁\\x05ѓȪ\\x02༁༂\\x05нȟ\\x02༂')\n buf.write('༃\\x05йȝ\\x02༃༄\\x05хȣ\\x02༄')\n buf.write('༅\\x05пȠ\\x02༅༆\\x05хȣ\\x02༆')\n buf.write('༇\\x05йȝ\\x02༇༈\\x05еț\\x02༈')\n buf.write('༉\\x05ћȮ\\x02༉༊\\x05хȣ\\x02༊')\n buf.write('་\\x05ёȩ\\x02་༌\\x05яȨ\\x02༌')\n buf.write('˒\\x03\\x02\\x02\\x02།༎\\x05љȭ\\x02༎༏')\n buf.write('\\x05ѕȫ\\x02༏༐\\x05ыȦ\\x02༐༑')\n buf.write('\\x05лȞ\\x02༑༒\\x05еț\\x02༒༓')\n buf.write('\\x05ћȮ\\x02༓༔\\x05еț\\x02༔˔')\n buf.write('\\x03\\x02\\x02\\x02༕༖\\x05љȭ\\x02༖༗\\x05ѕ')\n buf.write('ȫ\\x02༗༘\\x05ыȦ\\x02༘༙\\x05н')\n buf.write('ȟ\\x02༙༚\\x05їȬ\\x02༚༛\\x05ї')\n buf.write('Ȭ\\x02༛༜\\x05ёȩ\\x02༜༝\\x05ї')\n buf.write('Ȭ\\x02༝˖\\x03\\x02\\x02\\x02༞༟\\x05љȭ')\n buf.write('\\x02༟༠\\x05ћȮ\\x02༠༡\\x05еț')\n buf.write('\\x02༡༢\\x05яȨ\\x02༢༣\\x05лȞ')\n buf.write('\\x02༣༤\\x05еț\\x02༤༥\\x05ыȦ')\n buf.write('\\x02༥༦\\x05ёȩ\\x02༦༧\\x05яȨ')\n buf.write('\\x02༧༨\\x05нȟ\\x02༨˘\\x03\\x02\\x02\\x02༩')\n buf.write('༪\\x05љȭ\\x02༪༫\\x05ћȮ\\x02༫')\n buf.write('༬\\x05еț\\x02༬༭\\x05їȬ\\x02༭')\n buf.write('༮\\x05ћȮ\\x02༮˚\\x03\\x02\\x02\\x02༯༰')\n buf.write('\\x05љȭ\\x02༰༱\\x05ћȮ\\x02༱༲')\n buf.write('\\x05еț\\x02༲༳\\x05їȬ\\x02༳༴')\n buf.write('\\x05ћȮ\\x02༴༵\\x05ѝȯ\\x02༵༶')\n buf.write('\\x05ѓȪ\\x02༶˜\\x03\\x02\\x02\\x02༷༸\\x05љ')\n buf.write('ȭ\\x02༸༹\\x05ћȮ\\x02༹༺\\x05е')\n buf.write('ț\\x02༺༻\\x05ћȮ\\x02༻༼\\x05н')\n buf.write('ȟ\\x02༼༽\\x05эȧ\\x02༽༾\\x05н')\n buf.write('ȟ\\x02༾༿\\x05яȨ\\x02༿ཀ\\x05ћ')\n buf.write('Ȯ\\x02ཀ˞\\x03\\x02\\x02\\x02ཁག\\x05љȭ')\n buf.write('\\x02གགྷ\\x05ћȮ\\x02གྷང\\x05еț')\n buf.write('\\x02ངཅ\\x05ћȮ\\x02ཅཆ\\x05нȟ')\n buf.write('\\x02ཆཇ\\x05эȧ\\x02ཇ\\u0f48\\x05нȟ')\n buf.write('\\x02\\u0f48ཉ\\x05яȨ\\x02ཉཊ\\x05ћȮ')\n buf.write('\\x02ཊཋ\\x07a\\x02\\x02ཋཌ\\x05хȣ\\x02ཌ')\n buf.write('ཌྷ\\x05лȞ\\x02ཌྷˠ\\x03\\x02\\x02\\x02ཎཏ')\n buf.write('\\x05љȭ\\x02ཏཐ\\x05ћȮ\\x02ཐད')\n buf.write('\\x05еț\\x02དདྷ\\x05ћȮ\\x02དྷན')\n buf.write('\\x05хȣ\\x02ནཔ\\x05йȝ\\x02པˢ')\n buf.write('\\x03\\x02\\x02\\x02ཕབ\\x05љȭ\\x02བབྷ\\x05ћ')\n buf.write('Ȯ\\x02བྷམ\\x05еț\\x02མཙ\\x05ћ')\n buf.write('Ȯ\\x02ཙཚ\\x05хȣ\\x02ཚཛ\\x05љ')\n buf.write('ȭ\\x02ཛཛྷ\\x05ћȮ\\x02ཛྷཝ\\x05х')\n buf.write('ȣ\\x02ཝཞ\\x05йȝ\\x02ཞཟ\\x05љ')\n buf.write('ȭ\\x02ཟˤ\\x03\\x02\\x02\\x02འཡ\\x05љȭ')\n buf.write('\\x02ཡར\\x05ћȮ\\x02རལ\\x05їȬ')\n buf.write('\\x02ལཤ\\x05хȣ\\x02ཤཥ\\x05яȨ')\n buf.write('\\x02ཥས\\x05сȡ\\x02ས˦\\x03\\x02\\x02\\x02ཧ')\n buf.write('ཨ\\x05љȭ\\x02ཨཀྵ\\x05ѝȯ\\x02ཀྵ')\n buf.write('ཪ\\x05зȜ\\x02ཪཫ\\x05эȧ\\x02ཫ')\n buf.write('ཬ\\x05ѝȯ\\x02ཬ\\u0f6d\\x05ыȦ\\x02\\u0f6d')\n buf.write('\\u0f6e\\x05ћȮ\\x02\\u0f6e\\u0f6f\\x05хȣ\\x02\\u0f6f')\n buf.write('\\u0f70\\x05љȭ\\x02\\u0f70ཱ\\x05нȟ\\x02ཱ')\n buf.write('ི\\x05ћȮ\\x02ི˨\\x03\\x02\\x02\\x02ཱིུ')\n buf.write('\\x05љȭ\\x02ཱུུ\\x05ѝȯ\\x02ཱུྲྀ')\n buf.write('\\x05зȜ\\x02ྲྀཷ\\x05ѓȪ\\x02ཷླྀ')\n buf.write('\\x05еț\\x02ླྀཹ\\x05їȬ\\x02ཹེ')\n buf.write('\\x05ћȮ\\x02ེཻ\\x05хȣ\\x02ཻོ')\n buf.write('\\x05ћȮ\\x02ོཽ\\x05хȣ\\x02ཽཾ')\n buf.write('\\x05ёȩ\\x02ཾཿ\\x05яȨ\\x02ཿ˪')\n buf.write('\\x03\\x02\\x02\\x02ཱྀྀ\\x05љȭ\\x02ཱྀྂ\\x05ѝ')\n buf.write('ȯ\\x02ྂྃ\\x05зȜ\\x02྄ྃ\\x05љ')\n buf.write('ȭ\\x02྄྅\\x05ћȮ\\x02྅྆\\x05х')\n buf.write('ȣ\\x02྆྇\\x05ћȮ\\x02྇ྈ\\x05ѝ')\n buf.write('ȯ\\x02ྈྉ\\x05ћȮ\\x02ྉྊ\\x05е')\n buf.write('ț\\x02ྊྋ\\x05зȜ\\x02ྋྌ\\x05ы')\n buf.write('Ȧ\\x02ྌྍ\\x05нȟ\\x02ྍˬ\\x03\\x02\\x02')\n buf.write('\\x02ྎྏ\\x05љȭ\\x02ྏྐ\\x05ѝȯ')\n buf.write('\\x02ྐྑ\\x05зȜ\\x02ྑྒ\\x05ћȮ')\n buf.write('\\x02ྒྒྷ\\x05ѥȳ\\x02ྒྷྔ\\x05ѓȪ')\n buf.write('\\x02ྔྕ\\x05нȟ\\x02ྕˮ\\x03\\x02\\x02\\x02ྖ')\n buf.write('ྗ\\x05љȭ\\x02ྗ\\u0f98\\x05ѝȯ\\x02\\u0f98')\n buf.write('ྙ\\x05йȝ\\x02ྙྚ\\x05йȝ\\x02ྚ')\n buf.write('ྛ\\x05нȟ\\x02ྛྜ\\x05љȭ\\x02ྜ')\n buf.write('ྜྷ\\x05љȭ\\x02ྜྷ˰\\x03\\x02\\x02\\x02ྞྟ')\n buf.write('\\x05љȭ\\x02ྟྠ\\x05ѝȯ\\x02ྠྡ')\n buf.write('\\x05љȭ\\x02ྡྡྷ\\x05ѓȪ\\x02ྡྷྣ')\n buf.write('\\x05нȟ\\x02ྣྤ\\x05яȨ\\x02ྤྥ')\n buf.write('\\x05лȞ\\x02ྥ˲\\x03\\x02\\x02\\x02ྦྦྷ\\x05ћ')\n buf.write('Ȯ\\x02ྦྷྨ\\x05еț\\x02ྨྩ\\x05з')\n buf.write('Ȝ\\x02ྩྪ\\x05ыȦ\\x02ྪྫ\\x05н')\n buf.write('ȟ\\x02ྫ˴\\x03\\x02\\x02\\x02ྫྷྭ\\x05ћȮ')\n buf.write('\\x02ྭྮ\\x05уȢ\\x02ྮྯ\\x05нȟ')\n buf.write('\\x02ྯ˶\\x03\\x02\\x02\\x02ྰྱ\\x05ћȮ\\x02ྱ')\n buf.write('ྲ\\x05уȢ\\x02ྲླ\\x05нȟ\\x02ླ')\n buf.write('ྴ\\x05яȨ\\x02ྴ˸\\x03\\x02\\x02\\x02ྵྶ')\n buf.write('\\x05ћȮ\\x02ྶྷ\\x05хȣ\\x02ྷྸ')\n buf.write('\\x05эȧ\\x02ྸྐྵ\\x05нȟ\\x02ྐྵ˺')\n buf.write('\\x03\\x02\\x02\\x02ྺྻ\\x05ћȮ\\x02ྻྼ\\x05х')\n buf.write('ȣ\\x02ྼ\\u0fbd\\x05эȧ\\x02\\u0fbd྾\\x05н')\n buf.write('ȟ\\x02྾྿\\x05љȭ\\x02྿࿀\\x05ћ')\n buf.write('Ȯ\\x02࿀࿁\\x05еț\\x02࿁࿂\\x05э')\n buf.write('ȧ\\x02࿂࿃\\x05ѓȪ\\x02࿃˼\\x03\\x02\\x02')\n buf.write('\\x02࿄࿅\\x05ћȮ\\x02࿅࿆\\x05хȣ')\n buf.write('\\x02࿆࿇\\x05эȧ\\x02࿇࿈\\x05нȟ')\n buf.write('\\x02࿈࿉\\x05љȭ\\x02࿉࿊\\x05ћȮ')\n buf.write('\\x02࿊࿋\\x05еț\\x02࿋࿌\\x05эȧ')\n buf.write('\\x02࿌\\u0fcd\\x05ѓȪ\\x02\\u0fcd࿎\\x07a\\x02\\x02࿎')\n buf.write('࿏\\x05ыȦ\\x02࿏࿐\\x05ћȮ\\x02࿐')\n buf.write('࿑\\x05ѧȴ\\x02࿑࿒\\x07a\\x02\\x02࿒࿓')\n buf.write('\\x05ѝȯ\\x02࿓࿔\\x05яȨ\\x02࿔࿕')\n buf.write('\\x05йȝ\\x02࿕࿖\\x05ёȩ\\x02࿖࿗')\n buf.write('\\x05яȨ\\x02࿗࿘\\x05љȭ\\x02࿘࿙')\n buf.write('\\x05ћȮ\\x02࿙࿚\\x05їȬ\\x02࿚\\u0fdb')\n buf.write('\\x05еț\\x02\\u0fdb\\u0fdc\\x05хȣ\\x02\\u0fdc\\u0fdd')\n buf.write('\\x05яȨ\\x02\\u0fdd\\u0fde\\x05нȟ\\x02\\u0fde\\u0fdf')\n buf.write('\\x05лȞ\\x02\\u0fdf˾\\x03\\x02\\x02\\x02\\u0fe0\\u0fe1\\x05ћ')\n buf.write('Ȯ\\x02\\u0fe1\\u0fe2\\x05хȣ\\x02\\u0fe2\\u0fe3\\x05э')\n buf.write('ȧ\\x02\\u0fe3\\u0fe4\\x05нȟ\\x02\\u0fe4\\u0fe5\\x05љ')\n buf.write('ȭ\\x02\\u0fe5\\u0fe6\\x05ћȮ\\x02\\u0fe6\\u0fe7\\x05е')\n buf.write('ț\\x02\\u0fe7\\u0fe8\\x05эȧ\\x02\\u0fe8\\u0fe9\\x05ѓ')\n buf.write('Ȫ\\x02\\u0fe9\\u0fea\\x07a\\x02\\x02\\u0fea\\u0feb\\x05ћȮ')\n buf.write('\\x02\\u0feb\\u0fec\\x05ѧȴ\\x02\\u0fec\\u0fed\\x07a\\x02\\x02\\u0fed')\n buf.write('\\u0fee\\x05ѝȯ\\x02\\u0fee\\u0fef\\x05яȨ\\x02\\u0fef')\n buf.write('\\u0ff0\\x05йȝ\\x02\\u0ff0\\u0ff1\\x05ёȩ\\x02\\u0ff1')\n buf.write('\\u0ff2\\x05яȨ\\x02\\u0ff2\\u0ff3\\x05љȭ\\x02\\u0ff3')\n buf.write('\\u0ff4\\x05ћȮ\\x02\\u0ff4\\u0ff5\\x05їȬ\\x02\\u0ff5')\n buf.write('\\u0ff6\\x05еț\\x02\\u0ff6\\u0ff7\\x05хȣ\\x02\\u0ff7')\n buf.write('\\u0ff8\\x05яȨ\\x02\\u0ff8\\u0ff9\\x05нȟ\\x02\\u0ff9')\n buf.write('\\u0ffa\\x05лȞ\\x02\\u0ffà\\x03\\x02\\x02\\x02\\u0ffb\\u0ffc')\n buf.write('\\x05ћȮ\\x02\\u0ffc\\u0ffd\\x05хȣ\\x02\\u0ffd\\u0ffe')\n buf.write('\\x05эȧ\\x02\\u0ffe\\u0fff\\x05нȟ\\x02\\u0fffက')\n buf.write('\\x05љȭ\\x02ကခ\\x05ћȮ\\x02ခဂ')\n buf.write('\\x05еț\\x02ဂဃ\\x05эȧ\\x02ဃင')\n buf.write('\\x05ѓȪ\\x02ငစ\\x07a\\x02\\x02စဆ\\x05ѝ')\n buf.write('ȯ\\x02ဆဇ\\x05яȨ\\x02ဇဈ\\x05й')\n buf.write('ȝ\\x02ဈဉ\\x05ёȩ\\x02ဉည\\x05я')\n buf.write('Ȩ\\x02ညဋ\\x05љȭ\\x02ဋဌ\\x05ћ')\n buf.write('Ȯ\\x02ဌဍ\\x05їȬ\\x02ဍဎ\\x05е')\n buf.write('ț\\x02ဎဏ\\x05хȣ\\x02ဏတ\\x05я')\n buf.write('Ȩ\\x02တထ\\x05нȟ\\x02ထဒ\\x05л')\n buf.write('Ȟ\\x02ဒ̂\\x03\\x02\\x02\\x02ဓန\\x05ћȮ')\n buf.write('\\x02နပ\\x05хȣ\\x02ပဖ\\x05эȧ')\n buf.write('\\x02ဖဗ\\x05нȟ\\x02ဗဘ\\x05ѧȴ')\n buf.write('\\x02ဘမ\\x05ёȩ\\x02မယ\\x05яȨ')\n buf.write('\\x02ယရ\\x05нȟ\\x02ရလ\\x07a\\x02\\x02လ')\n buf.write('ဝ\\x05еț\\x02ဝသ\\x05зȜ\\x02သ')\n buf.write('ဟ\\x05зȜ\\x02ဟဠ\\x05їȬ\\x02ဠ')\n buf.write('̄\\x03\\x02\\x02\\x02အဢ\\x05ћȮ\\x02ဢဣ')\n buf.write('\\x05хȣ\\x02ဣဤ\\x05эȧ\\x02ဤဥ')\n buf.write('\\x05нȟ\\x02ဥဦ\\x05ѧȴ\\x02ဦဧ')\n buf.write('\\x05ёȩ\\x02ဧဨ\\x05яȨ\\x02ဨဩ')\n buf.write('\\x05нȟ\\x02ဩဪ\\x07a\\x02\\x02ဪါ\\x05у')\n buf.write('Ȣ\\x02ါာ\\x05ёȩ\\x02ာိ\\x05ѝ')\n buf.write('ȯ\\x02ိီ\\x05їȬ\\x02ီ̆\\x03\\x02\\x02')\n buf.write('\\x02ုူ\\x05ћȮ\\x02ူေ\\x05хȣ')\n buf.write('\\x02ေဲ\\x05эȧ\\x02ဲဳ\\x05нȟ')\n buf.write('\\x02ဳဴ\\x05ѧȴ\\x02ဴဵ\\x05ёȩ')\n buf.write('\\x02ဵံ\\x05яȨ\\x02ံ့\\x05нȟ')\n buf.write('\\x02့း\\x07a\\x02\\x02း္\\x05эȧ\\x02္')\n buf.write('်\\x05хȣ\\x02်ျ\\x05яȨ\\x02ျ')\n buf.write('ြ\\x05ѝȯ\\x02ြွ\\x05ћȮ\\x02ွ')\n buf.write('ှ\\x05нȟ\\x02ှ̈\\x03\\x02\\x02\\x02ဿ၀')\n buf.write('\\x05ћȮ\\x02၀၁\\x05хȣ\\x02၁၂')\n buf.write('\\x05эȧ\\x02၂၃\\x05нȟ\\x02၃၄')\n buf.write('\\x05ѧȴ\\x02၄၅\\x05ёȩ\\x02၅၆')\n buf.write('\\x05яȨ\\x02၆၇\\x05нȟ\\x02၇၈')\n buf.write('\\x07a\\x02\\x02၈၉\\x05їȬ\\x02၉၊\\x05н')\n buf.write('ȟ\\x02၊။\\x05сȡ\\x02။၌\\x05х')\n buf.write('ȣ\\x02၌၍\\x05ёȩ\\x02၍၎\\x05я')\n buf.write('Ȩ\\x02၎̊\\x03\\x02\\x02\\x02၏ၐ\\x05ћȮ')\n buf.write('\\x02ၐၑ\\x05ёȩ\\x02ၑ̌\\x03\\x02\\x02\\x02ၒ')\n buf.write('ၓ\\x05ћȮ\\x02ၓၔ\\x05їȬ\\x02ၔ')\n buf.write('ၕ\\x05еț\\x02ၕၖ\\x05хȣ\\x02ၖ')\n buf.write('ၗ\\x05ыȦ\\x02ၗၘ\\x05хȣ\\x02ၘ')\n buf.write('ၙ\\x05яȨ\\x02ၙၚ\\x05сȡ\\x02ၚ')\n buf.write('̎\\x03\\x02\\x02\\x02ၛၜ\\x05ћȮ\\x02ၜၝ')\n buf.write('\\x05їȬ\\x02ၝၞ\\x05еț\\x02ၞၟ')\n buf.write('\\x05яȨ\\x02ၟၠ\\x05љȭ\\x02ၠၡ')\n buf.write('\\x05еț\\x02ၡၢ\\x05йȝ\\x02ၢၣ')\n buf.write('\\x05ћȮ\\x02ၣၤ\\x05хȣ\\x02ၤၥ')\n buf.write('\\x05ёȩ\\x02ၥၦ\\x05яȨ\\x02ၦ̐')\n buf.write('\\x03\\x02\\x02\\x02ၧၨ\\x05ћȮ\\x02ၨၩ\\x05ї')\n buf.write('Ȭ\\x02ၩၪ\\x05еț\\x02ၪၫ\\x05я')\n buf.write('Ȩ\\x02ၫၬ\\x05љȭ\\x02ၬၭ\\x05ы')\n buf.write('Ȧ\\x02ၭၮ\\x05еț\\x02ၮၯ\\x05ћ')\n buf.write('Ȯ\\x02ၯၰ\\x05нȟ\\x02ၰ̒\\x03\\x02\\x02')\n buf.write('\\x02ၱၲ\\x05ћȮ\\x02ၲၳ\\x05їȬ')\n buf.write('\\x02ၳၴ\\x05нȟ\\x02ၴၵ\\x05еț')\n buf.write('\\x02ၵၶ\\x05ћȮ\\x02ၶ̔\\x03\\x02\\x02\\x02ၷ')\n buf.write('ၸ\\x05ћȮ\\x02ၸၹ\\x05їȬ\\x02ၹ')\n buf.write('ၺ\\x05хȣ\\x02ၺၻ\\x05сȡ\\x02ၻ')\n buf.write('ၼ\\x05сȡ\\x02ၼၽ\\x05нȟ\\x02ၽ')\n buf.write('ၾ\\x05їȬ\\x02ၾ̖\\x03\\x02\\x02\\x02ၿႀ')\n buf.write('\\x05ћȮ\\x02ႀႁ\\x05їȬ\\x02ႁႂ')\n buf.write('\\x05хȣ\\x02ႂႃ\\x05эȧ\\x02ႃ̘')\n buf.write('\\x03\\x02\\x02\\x02ႄႅ\\x05ћȮ\\x02ႅႆ\\x05ї')\n buf.write('Ȭ\\x02ႆႇ\\x05ѝȯ\\x02ႇႈ\\x05н')\n buf.write('ȟ\\x02ႈ̚\\x03\\x02\\x02\\x02ႉႊ\\x05ћȮ')\n buf.write('\\x02ႊႋ\\x05їȬ\\x02ႋႌ\\x05ѝȯ')\n buf.write('\\x02ႌႍ\\x05яȨ\\x02ႍႎ\\x05йȝ')\n buf.write('\\x02ႎႏ\\x05еț\\x02ႏ႐\\x05ћȮ')\n buf.write('\\x02႐႑\\x05нȟ\\x02႑̜\\x03\\x02\\x02\\x02႒')\n buf.write('႓\\x05ћȮ\\x02႓႔\\x05ѥȳ\\x02႔')\n buf.write('႕\\x05ѓȪ\\x02႕႖\\x05нȟ\\x02႖')\n buf.write('̞\\x03\\x02\\x02\\x02႗႘\\x05ѝȯ\\x02႘႙')\n buf.write('\\x05яȨ\\x02႙ႚ\\x05зȜ\\x02ႚႛ')\n buf.write('\\x05ёȩ\\x02ႛႜ\\x05ѝȯ\\x02ႜႝ')\n buf.write('\\x05яȨ\\x02ႝ႞\\x05лȞ\\x02႞႟')\n buf.write('\\x05нȟ\\x02႟Ⴀ\\x05лȞ\\x02Ⴀ̠')\n buf.write('\\x03\\x02\\x02\\x02ႡႢ\\x05ѝȯ\\x02ႢႣ\\x05я')\n buf.write('Ȩ\\x02ႣႤ\\x05лȞ\\x02ႤႥ\\x05н')\n buf.write('ȟ\\x02ႥႦ\\x05їȬ\\x02Ⴆ̢\\x03\\x02\\x02')\n buf.write('\\x02ႧႨ\\x05ѝȯ\\x02ႨႩ\\x05яȨ')\n buf.write('\\x02ႩႪ\\x05хȣ\\x02ႪႫ\\x05ёȩ')\n buf.write('\\x02ႫႬ\\x05яȨ\\x02Ⴌ̤\\x03\\x02\\x02\\x02Ⴍ')\n buf.write('Ⴎ\\x05ѝȯ\\x02ႮႯ\\x05яȨ\\x02Ⴏ')\n buf.write('Ⴐ\\x05хȣ\\x02ႰႱ\\x05ѕȫ\\x02Ⴑ')\n buf.write('Ⴒ\\x05ѝȯ\\x02ႲႳ\\x05нȟ\\x02Ⴓ')\n buf.write('̦\\x03\\x02\\x02\\x02ႴႵ\\x05ѝȯ\\x02ႵႶ')\n buf.write('\\x05яȨ\\x02ႶႷ\\x05ыȦ\\x02ႷႸ')\n buf.write('\\x05хȣ\\x02ႸႹ\\x05эȧ\\x02ႹႺ')\n buf.write('\\x05хȣ\\x02ႺႻ\\x05ћȮ\\x02ႻႼ')\n buf.write('\\x05нȟ\\x02ႼႽ\\x05лȞ\\x02Ⴝ̨')\n buf.write('\\x03\\x02\\x02\\x02ႾႿ\\x05ѝȯ\\x02ႿჀ\\x05я')\n buf.write('Ȩ\\x02ჀჁ\\x05ѓȪ\\x02ჁჂ\\x05х')\n buf.write('ȣ\\x02ჂჃ\\x05џȰ\\x02ჃჄ\\x05ё')\n buf.write('ȩ\\x02ჄჅ\\x05ћȮ\\x02Ⴥ̪\\x03\\x02\\x02')\n buf.write('\\x02\\u10c6Ⴧ\\x05ѝȯ\\x02Ⴧ\\u10c8\\x05яȨ')\n buf.write('\\x02\\u10c8\\u10c9\\x05ћȮ\\x02\\u10c9\\u10ca\\x05хȣ')\n buf.write('\\x02\\u10ca\\u10cb\\x05ыȦ\\x02\\u10cb̬\\x03\\x02\\x02\\x02\\u10cc')\n buf.write('Ⴭ\\x05ѝȯ\\x02Ⴭ\\u10ce\\x05ѓȪ\\x02\\u10ce')\n buf.write('\\u10cf\\x05лȞ\\x02\\u10cfა\\x05еț\\x02ა')\n buf.write('ბ\\x05ћȮ\\x02ბგ\\x05нȟ\\x02გ')\n buf.write('̮\\x03\\x02\\x02\\x02დე\\x05ѝȯ\\x02ევ')\n buf.write('\\x05ѓȪ\\x02ვზ\\x05лȞ\\x02ზთ')\n buf.write('\\x05еț\\x02თი\\x05ћȮ\\x02იკ')\n buf.write('\\x05нȟ\\x02კლ\\x05лȞ\\x02ლ̰')\n buf.write('\\x03\\x02\\x02\\x02მნ\\x05ѝȯ\\x02ნო\\x05ѓ')\n buf.write('Ȫ\\x02ოპ\\x05љȭ\\x02პჟ\\x05н')\n buf.write('ȟ\\x02ჟრ\\x05їȬ\\x02რს\\x05ћ')\n buf.write('Ȯ\\x02ს̲\\x03\\x02\\x02\\x02ტუ\\x05ѝȯ')\n buf.write('\\x02უფ\\x05їȬ\\x02ფქ\\x05ёȩ')\n buf.write('\\x02ქღ\\x05ѡȱ\\x02ღყ\\x05хȣ')\n buf.write('\\x02ყშ\\x05лȞ\\x02შ̴\\x03\\x02\\x02\\x02ჩ')\n buf.write('ც\\x05ѝȯ\\x02ცძ\\x05љȭ\\x02ძ')\n buf.write('წ\\x05нȟ\\x02წ̶\\x03\\x02\\x02\\x02ჭხ')\n buf.write('\\x05ѝȯ\\x02ხჯ\\x05љȭ\\x02ჯჰ')\n buf.write('\\x05хȣ\\x02ჰჱ\\x05яȨ\\x02ჱჲ')\n buf.write('\\x05сȡ\\x02ჲ̸\\x03\\x02\\x02\\x02ჳჴ\\x05џ')\n buf.write('Ȱ\\x02ჴჵ\\x05еț\\x02ჵჶ\\x05ы')\n buf.write('Ȧ\\x02ჶჷ\\x05хȣ\\x02ჷჸ\\x05л')\n buf.write('Ȟ\\x02ჸჹ\\x05еț\\x02ჹჺ\\x05ћ')\n buf.write('Ȯ\\x02ჺ჻\\x05нȟ\\x02჻̺\\x03\\x02\\x02')\n buf.write('\\x02ჼჽ\\x05џȰ\\x02ჽჾ\\x05еț')\n buf.write('\\x02ჾჿ\\x05ыȦ\\x02ჿᄀ\\x05ѝȯ')\n buf.write('\\x02ᄀᄁ\\x05нȟ\\x02ᄁ̼\\x03\\x02\\x02\\x02ᄂ')\n buf.write('ᄃ\\x05џȰ\\x02ᄃᄄ\\x05еț\\x02ᄄ')\n buf.write('ᄅ\\x05ыȦ\\x02ᄅᄆ\\x05ѝȯ\\x02ᄆ')\n buf.write('ᄇ\\x05нȟ\\x02ᄇᄈ\\x05љȭ\\x02ᄈ')\n buf.write('̾\\x03\\x02\\x02\\x02ᄉᄊ\\x05џȰ\\x02ᄊᄋ')\n buf.write('\\x05еț\\x02ᄋᄌ\\x05їȬ\\x02ᄌᄍ')\n buf.write('\\x05йȝ\\x02ᄍᄎ\\x05уȢ\\x02ᄎᄏ')\n buf.write('\\x05еț\\x02ᄏᄐ\\x05їȬ\\x02ᄐ̀')\n buf.write('\\x03\\x02\\x02\\x02ᄑᄒ\\x05џȰ\\x02ᄒᄓ\\x05е')\n buf.write('ț\\x02ᄓᄔ\\x05їȬ\\x02ᄔᄕ\\x05й')\n buf.write('ȝ\\x02ᄕᄖ\\x05уȢ\\x02ᄖᄗ\\x05е')\n buf.write('ț\\x02ᄗᄘ\\x05їȬ\\x02ᄘᄙ\\x074')\n buf.write('\\x02\\x02ᄙ͂\\x03\\x02\\x02\\x02ᄚᄛ\\x05џȰ\\x02ᄛ')\n buf.write('ᄜ\\x05еț\\x02ᄜᄝ\\x05їȬ\\x02ᄝ')\n buf.write('ᄞ\\x05хȣ\\x02ᄞᄟ\\x05еț\\x02ᄟ')\n buf.write('ᄠ\\x05зȜ\\x02ᄠᄡ\\x05ыȦ\\x02ᄡ')\n buf.write('ᄢ\\x05нȟ\\x02ᄢ̈́\\x03\\x02\\x02\\x02ᄣᄤ')\n buf.write('\\x05џȰ\\x02ᄤᄥ\\x05еț\\x02ᄥᄦ')\n buf.write('\\x05їȬ\\x02ᄦᄧ\\x05їȬ\\x02ᄧᄨ')\n buf.write('\\x05еț\\x02ᄨᄩ\\x05ѥȳ\\x02ᄩ͆')\n buf.write('\\x03\\x02\\x02\\x02ᄪᄫ\\x05џȰ\\x02ᄫᄬ\\x05е')\n buf.write('ț\\x02ᄬᄭ\\x05їȬ\\x02ᄭᄮ\\x05ѥ')\n buf.write('ȳ\\x02ᄮᄯ\\x05хȣ\\x02ᄯᄰ\\x05я')\n buf.write('Ȩ\\x02ᄰᄱ\\x05сȡ\\x02ᄱ͈\\x03\\x02\\x02')\n buf.write('\\x02ᄲᄳ\\x05џȰ\\x02ᄳᄴ\\x05нȟ')\n buf.write('\\x02ᄴᄵ\\x05їȬ\\x02ᄵᄶ\\x05љȭ')\n buf.write('\\x02ᄶᄷ\\x05хȣ\\x02ᄷᄸ\\x05ёȩ')\n buf.write('\\x02ᄸᄹ\\x05яȨ\\x02ᄹ͊\\x03\\x02\\x02\\x02ᄺ')\n buf.write('ᄻ\\x05џȰ\\x02ᄻᄼ\\x05нȟ\\x02ᄼ')\n buf.write('ᄽ\\x05їȬ\\x02ᄽᄾ\\x05љȭ\\x02ᄾ')\n buf.write('ᄿ\\x05хȣ\\x02ᄿᅀ\\x05ёȩ\\x02ᅀ')\n buf.write('ᅁ\\x05яȨ\\x02ᅁᅂ\\x05љȭ\\x02ᅂ')\n buf.write('͌\\x03\\x02\\x02\\x02ᅃᅄ\\x05ѡȱ\\x02ᅄᅅ')\n buf.write('\\x05еț\\x02ᅅᅆ\\x05хȣ\\x02ᅆᅇ')\n buf.write('\\x05ћȮ\\x02ᅇ͎\\x03\\x02\\x02\\x02ᅈᅉ\\x05ѡ')\n buf.write('ȱ\\x02ᅉᅊ\\x05еț\\x02ᅊᅋ\\x05ї')\n buf.write('Ȭ\\x02ᅋᅌ\\x05яȨ\\x02ᅌᅍ\\x05х')\n buf.write('ȣ\\x02ᅍᅎ\\x05яȨ\\x02ᅎᅏ\\x05с')\n buf.write('ȡ\\x02ᅏ͐\\x03\\x02\\x02\\x02ᅐᅑ\\x05ѡȱ')\n buf.write('\\x02ᅑᅒ\\x05нȟ\\x02ᅒᅓ\\x05ыȦ')\n buf.write('\\x02ᅓᅔ\\x05ыȦ\\x02ᅔᅕ\\x05пȠ')\n buf.write('\\x02ᅕᅖ\\x05ёȩ\\x02ᅖᅗ\\x05їȬ')\n buf.write('\\x02ᅗᅘ\\x05эȧ\\x02ᅘᅙ\\x05нȟ')\n buf.write('\\x02ᅙᅚ\\x05лȞ\\x02ᅚ͒\\x03\\x02\\x02\\x02ᅛ')\n buf.write('ᅜ\\x05ѡȱ\\x02ᅜᅝ\\x05уȢ\\x02ᅝ')\n buf.write('ᅞ\\x05нȟ\\x02ᅞᅟ\\x05яȨ\\x02ᅟ')\n buf.write('͔\\x03\\x02\\x02\\x02ᅠᅡ\\x05ѡȱ\\x02ᅡᅢ')\n buf.write('\\x05уȢ\\x02ᅢᅣ\\x05нȟ\\x02ᅣᅤ')\n buf.write('\\x05яȨ\\x02ᅤᅥ\\x05нȟ\\x02ᅥᅦ')\n buf.write('\\x05џȰ\\x02ᅦᅧ\\x05нȟ\\x02ᅧᅨ')\n buf.write('\\x05їȬ\\x02ᅨ͖\\x03\\x02\\x02\\x02ᅩᅪ\\x05ѡ')\n buf.write('ȱ\\x02ᅪᅫ\\x05уȢ\\x02ᅫᅬ\\x05н')\n buf.write('ȟ\\x02ᅬᅭ\\x05їȬ\\x02ᅭᅮ\\x05н')\n buf.write('ȟ\\x02ᅮ͘\\x03\\x02\\x02\\x02ᅯᅰ\\x05ѡȱ')\n buf.write('\\x02ᅰᅱ\\x05уȢ\\x02ᅱᅲ\\x05хȣ')\n buf.write('\\x02ᅲᅳ\\x05ыȦ\\x02ᅳᅴ\\x05нȟ')\n buf.write('\\x02ᅴ͚\\x03\\x02\\x02\\x02ᅵᅶ\\x05ѡȱ\\x02ᅶ')\n buf.write('ᅷ\\x05хȣ\\x02ᅷᅸ\\x05ћȮ\\x02ᅸ')\n buf.write('ᅹ\\x05уȢ\\x02ᅹ͜\\x03\\x02\\x02\\x02ᅺᅻ')\n buf.write('\\x05ѡȱ\\x02ᅻᅼ\\x05хȣ\\x02ᅼᅽ')\n buf.write('\\x05ћȮ\\x02ᅽᅾ\\x05уȢ\\x02ᅾᅿ')\n buf.write('\\x05хȣ\\x02ᅿᆀ\\x05яȨ\\x02ᆀ͞')\n buf.write('\\x03\\x02\\x02\\x02ᆁᆂ\\x05ѡȱ\\x02ᆂᆃ\\x05ё')\n buf.write('ȩ\\x02ᆃᆄ\\x05їȬ\\x02ᆄᆅ\\x05щ')\n buf.write('ȥ\\x02ᆅ͠\\x03\\x02\\x02\\x02ᆆᆇ\\x05ѡȱ')\n buf.write('\\x02ᆇᆈ\\x05їȬ\\x02ᆈᆉ\\x05хȣ')\n buf.write('\\x02ᆉᆊ\\x05ћȮ\\x02ᆊᆋ\\x05нȟ')\n buf.write('\\x02ᆋ͢\\x03\\x02\\x02\\x02ᆌᆍ\\x05ѣȲ\\x02ᆍ')\n buf.write('ᆎ\\x05эȧ\\x02ᆎᆏ\\x05ыȦ\\x02ᆏ')\n buf.write('ͤ\\x03\\x02\\x02\\x02ᆐᆑ\\x05ѣȲ\\x02ᆑᆒ')\n buf.write('\\x05эȧ\\x02ᆒᆓ\\x05ыȦ\\x02ᆓᆔ')\n buf.write('\\x05еț\\x02ᆔᆕ\\x05сȡ\\x02ᆕᆖ')\n buf.write('\\x05сȡ\\x02ᆖͦ\\x03\\x02\\x02\\x02ᆗᆘ\\x05ѣ')\n buf.write('Ȳ\\x02ᆘᆙ\\x05эȧ\\x02ᆙᆚ\\x05ы')\n buf.write('Ȧ\\x02ᆚᆛ\\x05еț\\x02ᆛᆜ\\x05ћ')\n buf.write('Ȯ\\x02ᆜᆝ\\x05ћȮ\\x02ᆝᆞ\\x05ї')\n buf.write('Ȭ\\x02ᆞᆟ\\x05хȣ\\x02ᆟᆠ\\x05з')\n buf.write('Ȝ\\x02ᆠᆡ\\x05ѝȯ\\x02ᆡᆢ\\x05ћ')\n buf.write('Ȯ\\x02ᆢᆣ\\x05нȟ\\x02ᆣᆤ\\x05љ')\n buf.write('ȭ\\x02ᆤͨ\\x03\\x02\\x02\\x02ᆥᆦ\\x05ѣȲ')\n buf.write('\\x02ᆦᆧ\\x05эȧ\\x02ᆧᆨ\\x05ыȦ')\n buf.write('\\x02ᆨᆩ\\x05йȝ\\x02ᆩᆪ\\x05еț')\n buf.write('\\x02ᆪᆫ\\x05љȭ\\x02ᆫᆬ\\x05ћȮ')\n buf.write('\\x02ᆬͪ\\x03\\x02\\x02\\x02ᆭᆮ\\x05ѣȲ\\x02ᆮ')\n buf.write('ᆯ\\x05эȧ\\x02ᆯᆰ\\x05ыȦ\\x02ᆰ')\n buf.write('ᆱ\\x05йȝ\\x02ᆱᆲ\\x05ёȩ\\x02ᆲ')\n buf.write('ᆳ\\x05ыȦ\\x02ᆳᆴ\\x05еț\\x02ᆴ')\n buf.write('ᆵ\\x05ћȮ\\x02ᆵᆶ\\x05ћȮ\\x02ᆶ')\n buf.write('ᆷ\\x05џȰ\\x02ᆷᆸ\\x05еț\\x02ᆸ')\n buf.write('ᆹ\\x05ыȦ\\x02ᆹͬ\\x03\\x02\\x02\\x02ᆺᆻ')\n buf.write('\\x05ѣȲ\\x02ᆻᆼ\\x05эȧ\\x02ᆼᆽ')\n buf.write('\\x05ыȦ\\x02ᆽᆾ\\x05нȟ\\x02ᆾᆿ')\n buf.write('\\x05ыȦ\\x02ᆿᇀ\\x05нȟ\\x02ᇀᇁ')\n buf.write('\\x05эȧ\\x02ᇁᇂ\\x05нȟ\\x02ᇂᇃ')\n buf.write('\\x05яȨ\\x02ᇃᇄ\\x05ћȮ\\x02ᇄͮ')\n buf.write('\\x03\\x02\\x02\\x02ᇅᇆ\\x05ѣȲ\\x02ᇆᇇ\\x05э')\n buf.write('ȧ\\x02ᇇᇈ\\x05ыȦ\\x02ᇈᇉ\\x05н')\n buf.write('ȟ\\x02ᇉᇊ\\x05ѣȲ\\x02ᇊᇋ\\x05х')\n buf.write('ȣ\\x02ᇋᇌ\\x05љȭ\\x02ᇌᇍ\\x05ћ')\n buf.write('Ȯ\\x02ᇍᇎ\\x05љȭ\\x02ᇎͰ\\x03\\x02\\x02')\n buf.write('\\x02ᇏᇐ\\x05ѣȲ\\x02ᇐᇑ\\x05эȧ')\n buf.write('\\x02ᇑᇒ\\x05ыȦ\\x02ᇒᇓ\\x05пȠ')\n buf.write('\\x02ᇓᇔ\\x05ёȩ\\x02ᇔᇕ\\x05їȬ')\n buf.write('\\x02ᇕᇖ\\x05нȟ\\x02ᇖᇗ\\x05љȭ')\n buf.write('\\x02ᇗᇘ\\x05ћȮ\\x02ᇘͲ\\x03\\x02\\x02\\x02ᇙ')\n buf.write('ᇚ\\x05ѣȲ\\x02ᇚᇛ\\x05эȧ\\x02ᇛ')\n buf.write('ᇜ\\x05ыȦ\\x02ᇜᇝ\\x05яȨ\\x02ᇝ')\n buf.write('ᇞ\\x05еț\\x02ᇞᇟ\\x05эȧ\\x02ᇟ')\n buf.write('ᇠ\\x05нȟ\\x02ᇠᇡ\\x05љȭ\\x02ᇡ')\n buf.write('ᇢ\\x05ѓȪ\\x02ᇢᇣ\\x05еț\\x02ᇣ')\n buf.write('ᇤ\\x05йȝ\\x02ᇤᇥ\\x05нȟ\\x02ᇥ')\n buf.write('ᇦ\\x05љȭ\\x02ᇦʹ\\x03\\x02\\x02\\x02ᇧᇨ')\n buf.write('\\x05ѣȲ\\x02ᇨᇩ\\x05эȧ\\x02ᇩᇪ')\n buf.write('\\x05ыȦ\\x02ᇪᇫ\\x05ѓȪ\\x02ᇫᇬ')\n buf.write('\\x05еț\\x02ᇬᇭ\\x05їȬ\\x02ᇭᇮ')\n buf.write('\\x05љȭ\\x02ᇮᇯ\\x05нȟ\\x02ᇯͶ')\n buf.write('\\x03\\x02\\x02\\x02ᇰᇱ\\x05ѣȲ\\x02ᇱᇲ\\x05э')\n buf.write('ȧ\\x02ᇲᇳ\\x05ыȦ\\x02ᇳᇴ\\x05ѓ')\n buf.write('Ȫ\\x02ᇴᇵ\\x05хȣ\\x02ᇵ\\u0378\\x03\\x02\\x02')\n buf.write('\\x02ᇶᇷ\\x05ѣȲ\\x02ᇷᇸ\\x05эȧ')\n buf.write('\\x02ᇸᇹ\\x05ыȦ\\x02ᇹᇺ\\x05ѕȫ')\n buf.write('\\x02ᇺᇻ\\x05ѝȯ\\x02ᇻᇼ\\x05нȟ')\n buf.write('\\x02ᇼᇽ\\x05їȬ\\x02ᇽᇾ\\x05ѥȳ')\n buf.write('\\x02ᇾͺ\\x03\\x02\\x02\\x02ᇿሀ\\x05ѣȲ\\x02ሀ')\n buf.write('ሁ\\x05эȧ\\x02ሁሂ\\x05ыȦ\\x02ሂ')\n buf.write('ሃ\\x05їȬ\\x02ሃሄ\\x05ёȩ\\x02ሄ')\n buf.write('ህ\\x05ёȩ\\x02ህሆ\\x05ћȮ\\x02ሆ')\n buf.write('ͼ\\x03\\x02\\x02\\x02ሇለ\\x05ѣȲ\\x02ለሉ')\n buf.write('\\x05эȧ\\x02ሉሊ\\x05ыȦ\\x02ሊላ')\n buf.write('\\x05љȭ\\x02ላሌ\\x05нȟ\\x02ሌል')\n buf.write('\\x05їȬ\\x02ልሎ\\x05хȣ\\x02ሎሏ')\n buf.write('\\x05еț\\x02ሏሐ\\x05ыȦ\\x02ሐሑ')\n buf.write('\\x05хȣ\\x02ሑሒ\\x05ѧȴ\\x02ሒሓ')\n buf.write('\\x05нȟ\\x02ሓ;\\x03\\x02\\x02\\x02ሔሕ\\x05ѣ')\n buf.write('Ȳ\\x02ሕሖ\\x05эȧ\\x02ሖሗ\\x05ы')\n buf.write('Ȧ\\x02ሗመ\\x05ћȮ\\x02መሙ\\x05е')\n buf.write('ț\\x02ሙሚ\\x05зȜ\\x02ሚማ\\x05ы')\n buf.write('Ȧ\\x02ማሜ\\x05нȟ\\x02ሜ\\u0380\\x03\\x02\\x02')\n buf.write('\\x02ምሞ\\x05ѥȳ\\x02ሞሟ\\x05нȟ')\n buf.write('\\x02ሟሠ\\x05еț\\x02ሠሡ\\x05їȬ')\n buf.write('\\x02ሡ\\u0382\\x03\\x02\\x02\\x02ሢሣ\\x05ѥȳ\\x02ሣ')\n buf.write('ሤ\\x05нȟ\\x02ሤሥ\\x05љȭ\\x02ሥ')\n buf.write('΄\\x03\\x02\\x02\\x02ሦሧ\\x05ѥȳ\\x02ሧረ')\n buf.write('\\x05эȧ\\x02ረሩ\\x05хȣ\\x02ሩሪ')\n buf.write('\\x05яȨ\\x02ሪራ\\x05ћȮ\\x02ራሬ')\n buf.write('\\x05нȟ\\x02ሬር\\x05їȬ\\x02ርሮ')\n buf.write('\\x05џȰ\\x02ሮሯ\\x05еț\\x02ሯሰ')\n buf.write('\\x05ыȦ\\x02ሰሱ\\x07a\\x02\\x02ሱሲ\\x05ѝ')\n buf.write('ȯ\\x02ሲሳ\\x05яȨ\\x02ሳሴ\\x05й')\n buf.write('ȝ\\x02ሴስ\\x05ёȩ\\x02ስሶ\\x05я')\n buf.write('Ȩ\\x02ሶሷ\\x05љȭ\\x02ሷሸ\\x05ћ')\n buf.write('Ȯ\\x02ሸሹ\\x05їȬ\\x02ሹሺ\\x05е')\n buf.write('ț\\x02ሺሻ\\x05хȣ\\x02ሻሼ\\x05я')\n buf.write('Ȩ\\x02ሼሽ\\x05нȟ\\x02ሽሾ\\x05л')\n buf.write('Ȟ\\x02ሾΆ\\x03\\x02\\x02\\x02ሿቀ\\x05ѧȴ')\n buf.write('\\x02ቀቁ\\x05ёȩ\\x02ቁቂ\\x05яȨ')\n buf.write('\\x02ቂቃ\\x05нȟ\\x02ቃΈ\\x03\\x02\\x02\\x02ቄ')\n buf.write('ቅ\\x05ѓȪ\\x02ቅቆ\\x05їȬ\\x02ቆ')\n buf.write('ቇ\\x05нȟ\\x02ቇቈ\\x05лȞ\\x02ቈ')\n buf.write('\\u1249\\x05хȣ\\x02\\u1249ቊ\\x05йȝ\\x02ቊ')\n buf.write('ቋ\\x05ћȮ\\x02ቋቌ\\x05хȣ\\x02ቌ')\n buf.write('ቍ\\x05ёȩ\\x02ቍ\\u124e\\x05яȨ\\x02\\u124e')\n buf.write('Ί\\x03\\x02\\x02\\x02\\u124fቐ\\x05ѓȪ\\x02ቐቑ')\n buf.write('\\x05їȬ\\x02ቑቒ\\x05нȟ\\x02ቒቓ')\n buf.write('\\x05лȞ\\x02ቓቔ\\x05хȣ\\x02ቔቕ')\n buf.write('\\x05йȝ\\x02ቕቖ\\x05ћȮ\\x02ቖ\\u1257')\n buf.write('\\x05хȣ\\x02\\u1257ቘ\\x05ёȩ\\x02ቘ\\u1259')\n buf.write('\\x05яȨ\\x02\\u1259ቚ\\x07a\\x02\\x02ቚቛ\\x05з')\n buf.write('Ȝ\\x02ቛቜ\\x05ёȩ\\x02ቜቝ\\x05ѝ')\n buf.write('ȯ\\x02ቝ\\u125e\\x05яȨ\\x02\\u125e\\u125f\\x05л')\n buf.write('Ȟ\\x02\\u125fበ\\x05љȭ\\x02በΌ\\x03\\x02\\x02')\n buf.write('\\x02ቡቢ\\x05ѓȪ\\x02ቢባ\\x05їȬ')\n buf.write('\\x02ባቤ\\x05нȟ\\x02ቤብ\\x05лȞ')\n buf.write('\\x02ብቦ\\x05хȣ\\x02ቦቧ\\x05йȝ')\n buf.write('\\x02ቧቨ\\x05ћȮ\\x02ቨቩ\\x05хȣ')\n buf.write('\\x02ቩቪ\\x05ёȩ\\x02ቪቫ\\x05яȨ')\n buf.write('\\x02ቫቬ\\x07a\\x02\\x02ቬቭ\\x05йȝ\\x02ቭ')\n buf.write('ቮ\\x05ёȩ\\x02ቮቯ\\x05љȭ\\x02ቯ')\n buf.write('ተ\\x05ћȮ\\x02ተΎ\\x03\\x02\\x02\\x02ቱቲ')\n buf.write('\\x05ѓȪ\\x02ቲታ\\x05їȬ\\x02ታቴ')\n buf.write('\\x05нȟ\\x02ቴት\\x05лȞ\\x02ትቶ')\n buf.write('\\x05хȣ\\x02ቶቷ\\x05йȝ\\x02ቷቸ')\n buf.write('\\x05ћȮ\\x02ቸቹ\\x05хȣ\\x02ቹቺ')\n buf.write('\\x05ёȩ\\x02ቺቻ\\x05яȨ\\x02ቻቼ')\n buf.write('\\x07a\\x02\\x02ቼች\\x05лȞ\\x02ችቾ\\x05н')\n buf.write('ȟ\\x02ቾቿ\\x05ћȮ\\x02ቿኀ\\x05е')\n buf.write('ț\\x02ኀኁ\\x05хȣ\\x02ኁኂ\\x05ы')\n buf.write('Ȧ\\x02ኂኃ\\x05љȭ\\x02ኃΐ\\x03\\x02\\x02')\n buf.write('\\x02ኄኅ\\x05ѓȪ\\x02ኅኆ\\x05їȬ')\n buf.write('\\x02ኆኇ\\x05нȟ\\x02ኇኈ\\x05лȞ')\n buf.write('\\x02ኈ\\u1289\\x05хȣ\\x02\\u1289ኊ\\x05йȝ')\n buf.write('\\x02ኊኋ\\x05ћȮ\\x02ኋኌ\\x05хȣ')\n buf.write('\\x02ኌኍ\\x05ёȩ\\x02ኍ\\u128e\\x05яȨ')\n buf.write('\\x02\\u128e\\u128f\\x07a\\x02\\x02\\u128fነ\\x05ѓȪ\\x02ነ')\n buf.write('ኑ\\x05їȬ\\x02ኑኒ\\x05ёȩ\\x02ኒ')\n buf.write('ና\\x05зȜ\\x02ናኔ\\x05еț\\x02ኔ')\n buf.write('ን\\x05зȜ\\x02ንኖ\\x05хȣ\\x02ኖ')\n buf.write('ኗ\\x05ыȦ\\x02ኗኘ\\x05хȣ\\x02ኘ')\n buf.write('ኙ\\x05ћȮ\\x02ኙኚ\\x05ѥȳ\\x02ኚ')\n buf.write('Β\\x03\\x02\\x02\\x02ኛኜ\\x05ѓȪ\\x02ኜኝ')\n buf.write('\\x05їȬ\\x02ኝኞ\\x05нȟ\\x02ኞኟ')\n buf.write('\\x05лȞ\\x02ኟአ\\x05хȣ\\x02አኡ')\n buf.write('\\x05йȝ\\x02ኡኢ\\x05ћȮ\\x02ኢኣ')\n buf.write('\\x05хȣ\\x02ኣኤ\\x05ёȩ\\x02ኤእ')\n buf.write('\\x05яȨ\\x02እኦ\\x07a\\x02\\x02ኦኧ\\x05љ')\n buf.write('ȭ\\x02ኧከ\\x05нȟ\\x02ከኩ\\x05ћ')\n buf.write('Ȯ\\x02ኩΔ\\x03\\x02\\x02\\x02ኪካ\\x05йȝ')\n buf.write('\\x02ካኬ\\x05ѝȯ\\x02ኬክ\\x05эȧ')\n buf.write('\\x02ክኮ\\x05нȟ\\x02ኮኯ\\x07a\\x02\\x02ኯ')\n buf.write('ኰ\\x05лȞ\\x02ኰ\\u12b1\\x05хȣ\\x02\\u12b1')\n buf.write('ኲ\\x05љȭ\\x02ኲኳ\\x05ћȮ\\x02ኳ')\n buf.write('Ζ\\x03\\x02\\x02\\x02ኴኵ\\x05лȞ\\x02ኵ\\u12b6')\n buf.write('\\x05нȟ\\x02\\u12b6\\u12b7\\x05яȨ\\x02\\u12b7ኸ')\n buf.write('\\x05љȭ\\x02ኸኹ\\x05нȟ\\x02ኹኺ')\n buf.write('\\x07a\\x02\\x02ኺኻ\\x05їȬ\\x02ኻኼ\\x05е')\n buf.write('ț\\x02ኼኽ\\x05яȨ\\x02ኽኾ\\x05щ')\n buf.write('ȥ\\x02ኾΘ\\x03\\x02\\x02\\x02\\u12bfዀ\\x05ыȦ')\n buf.write('\\x02ዀ\\u12c1\\x05хȣ\\x02\\u12c1ዂ\\x05љȭ')\n buf.write('\\x02ዂዃ\\x05ћȮ\\x02ዃዄ\\x05еț')\n buf.write('\\x02ዄዅ\\x05сȡ\\x02ዅ\\u12c6\\x05сȡ')\n buf.write('\\x02\\u12c6Κ\\x03\\x02\\x02\\x02\\u12c7ወ\\x05ѓȪ\\x02ወ')\n buf.write('ዉ\\x05нȟ\\x02ዉዊ\\x05їȬ\\x02ዊ')\n buf.write('ዋ\\x05йȝ\\x02ዋዌ\\x05нȟ\\x02ዌ')\n buf.write('ው\\x05яȨ\\x02ውዎ\\x05ћȮ\\x02ዎ')\n buf.write('ዏ\\x07a\\x02\\x02ዏዐ\\x05їȬ\\x02ዐዑ')\n buf.write('\\x05еț\\x02ዑዒ\\x05яȨ\\x02ዒዓ')\n buf.write('\\x05щȥ\\x02ዓΜ\\x03\\x02\\x02\\x02ዔዕ\\x05ѓ')\n buf.write('Ȫ\\x02ዕዖ\\x05нȟ\\x02ዖ\\u12d7\\x05ї')\n buf.write('Ȭ\\x02\\u12d7ዘ\\x05йȝ\\x02ዘዙ\\x05н')\n buf.write('ȟ\\x02ዙዚ\\x05яȨ\\x02ዚዛ\\x05ћ')\n buf.write('Ȯ\\x02ዛዜ\\x05хȣ\\x02ዜዝ\\x05ы')\n buf.write('Ȧ\\x02ዝዞ\\x05нȟ\\x02ዞዟ\\x07a\\x02')\n buf.write('\\x02ዟዠ\\x05йȝ\\x02ዠዡ\\x05ёȩ')\n buf.write('\\x02ዡዢ\\x05яȨ\\x02ዢዣ\\x05ћȮ')\n buf.write('\\x02ዣΞ\\x03\\x02\\x02\\x02ዤዥ\\x05ѓȪ\\x02ዥ')\n buf.write('ዦ\\x05нȟ\\x02ዦዧ\\x05їȬ\\x02ዧ')\n buf.write('የ\\x05йȝ\\x02የዩ\\x05нȟ\\x02ዩ')\n buf.write('ዪ\\x05яȨ\\x02ዪያ\\x05ћȮ\\x02ያ')\n buf.write('ዬ\\x05хȣ\\x02ዬይ\\x05ыȦ\\x02ይ')\n buf.write('ዮ\\x05нȟ\\x02ዮዯ\\x07a\\x02\\x02ዯደ')\n buf.write('\\x05лȞ\\x02ደዱ\\x05хȣ\\x02ዱዲ')\n buf.write('\\x05љȭ\\x02ዲዳ\\x05йȝ\\x02ዳΠ')\n buf.write('\\x03\\x02\\x02\\x02ዴድ\\x05їȬ\\x02ድዶ\\x05е')\n buf.write('ț\\x02ዶዷ\\x05яȨ\\x02ዷዸ\\x05щ')\n buf.write('ȥ\\x02ዸ\\u03a2\\x03\\x02\\x02\\x02ዹዺ\\x05еț')\n buf.write('\\x02ዺዻ\\x05џȰ\\x02ዻዼ\\x05сȡ')\n buf.write('\\x02ዼΤ\\x03\\x02\\x02\\x02ዽዾ\\x05йȝ\\x02ዾ')\n buf.write('ዿ\\x05ёȩ\\x02ዿጀ\\x05їȬ\\x02ጀ')\n buf.write('ጁ\\x05їȬ\\x02ጁΦ\\x03\\x02\\x02\\x02ጂጃ')\n buf.write('\\x05ыȦ\\x02ጃጄ\\x05еț\\x02ጄጅ')\n buf.write('\\x05сȡ\\x02ጅΨ\\x03\\x02\\x02\\x02ጆጇ\\x05ы')\n buf.write('Ȧ\\x02ጇገ\\x05нȟ\\x02ገጉ\\x05е')\n buf.write('ț\\x02ጉጊ\\x05лȞ\\x02ጊΪ\\x03\\x02\\x02')\n buf.write('\\x02ጋጌ\\x05эȧ\\x02ጌግ\\x05еț')\n buf.write('\\x02ግጎ\\x05ѣȲ\\x02ጎά\\x03\\x02\\x02\\x02ጏ')\n buf.write('ጐ\\x05эȧ\\x02ጐ\\u1311\\x05нȟ\\x02\\u1311')\n buf.write('ጒ\\x05лȞ\\x02ጒጓ\\x05хȣ\\x02ጓ')\n buf.write('ጔ\\x05еț\\x02ጔጕ\\x05яȨ\\x02ጕ')\n buf.write('ή\\x03\\x02\\x02\\x02\\u1316\\u1317\\x05эȧ\\x02\\u1317ጘ')\n buf.write('\\x05хȣ\\x02ጘጙ\\x05яȨ\\x02ጙΰ')\n buf.write('\\x03\\x02\\x02\\x02ጚጛ\\x05яȨ\\x02ጛጜ\\x05ћ')\n buf.write('Ȯ\\x02ጜጝ\\x05хȣ\\x02ጝጞ\\x05ы')\n buf.write('Ȧ\\x02ጞጟ\\x05нȟ\\x02ጟβ\\x03\\x02\\x02')\n buf.write('\\x02ጠጡ\\x05їȬ\\x02ጡጢ\\x05еț')\n buf.write('\\x02ጢጣ\\x05ћȮ\\x02ጣጤ\\x05хȣ')\n buf.write('\\x02ጤጥ\\x05ёȩ\\x02ጥጦ\\x07a\\x02\\x02ጦ')\n buf.write('ጧ\\x05ћȮ\\x02ጧጨ\\x05ёȩ\\x02ጨ')\n buf.write('ጩ\\x07a\\x02\\x02ጩጪ\\x05їȬ\\x02ጪጫ')\n buf.write('\\x05нȟ\\x02ጫጬ\\x05ѓȪ\\x02ጬጭ')\n buf.write('\\x05ёȩ\\x02ጭጮ\\x05їȬ\\x02ጮጯ')\n buf.write('\\x05ћȮ\\x02ጯδ\\x03\\x02\\x02\\x02ጰጱ\\x05ї')\n buf.write('Ȭ\\x02ጱጲ\\x05ёȩ\\x02ጲጳ\\x05ѡ')\n buf.write('ȱ\\x02ጳጴ\\x07a\\x02\\x02ጴጵ\\x05яȨ')\n buf.write('\\x02ጵጶ\\x05ѝȯ\\x02ጶጷ\\x05эȧ')\n buf.write('\\x02ጷጸ\\x05зȜ\\x02ጸጹ\\x05нȟ')\n buf.write('\\x02ጹጺ\\x05їȬ\\x02ጺζ\\x03\\x02\\x02\\x02ጻ')\n buf.write('ጼ\\x05љȭ\\x02ጼጽ\\x05ѝȯ\\x02ጽ')\n buf.write('ጾ\\x05эȧ\\x02ጾθ\\x03\\x02\\x02\\x02ጿፀ')\n buf.write('\\x05џȰ\\x02ፀፁ\\x05еț\\x02ፁፂ')\n buf.write('\\x05їȬ\\x02ፂፃ\\x05хȣ\\x02ፃፄ')\n buf.write('\\x05еț\\x02ፄፅ\\x05яȨ\\x02ፅፆ')\n buf.write('\\x05йȝ\\x02ፆፇ\\x05нȟ\\x02ፇκ')\n buf.write('\\x03\\x02\\x02\\x02ፈፉ\\x05їȬ\\x02ፉፊ\\x05н')\n buf.write('ȟ\\x02ፊፋ\\x05сȡ\\x02ፋፌ\\x05ї')\n buf.write('Ȭ\\x02ፌፍ\\x07a\\x02\\x02ፍμ\\x03\\x02\\x02\\x02ፎ')\n buf.write('ፏ\\x05љȭ\\x02ፏፐ\\x05ћȮ\\x02ፐ')\n buf.write('ፑ\\x05лȞ\\x02ፑፒ\\x05лȞ\\x02ፒ')\n buf.write('ፓ\\x05нȟ\\x02ፓፔ\\x05џȰ\\x02ፔ')\n buf.write('ξ\\x03\\x02\\x02\\x02ፕፖ\\x05џȰ\\x02ፖፗ')\n buf.write('\\x05еț\\x02ፗፘ\\x05їȬ\\x02ፘፙ')\n buf.write('\\x07a\\x02\\x02ፙπ\\x03\\x02\\x02\\x02ፚ\\u135b\\x05йȝ')\n buf.write('\\x02\\u135b\\u135c\\x05ёȩ\\x02\\u135c፝\\x05џȰ')\n buf.write('\\x02፝፞\\x05еț\\x02፞፟\\x05їȬ')\n buf.write('\\x02፟፠\\x07a\\x02\\x02፠ς\\x03\\x02\\x02\\x02፡።')\n buf.write('\\x05яȨ\\x02።፩\\x07)\\x02\\x02፣፨\\n\\x02\\x02')\n buf.write('\\x02፤፥\\x07)\\x02\\x02፥፨\\x07)\\x02\\x02፦፨\\x05')\n buf.write('Эȗ\\x02፧፣\\x03\\x02\\x02\\x02፧፤\\x03\\x02\\x02\\x02')\n buf.write('፧፦\\x03\\x02\\x02\\x02፨፫\\x03\\x02\\x02\\x02፩፧\\x03')\n buf.write('\\x02\\x02\\x02፩፪\\x03\\x02\\x02\\x02፪፬\\x03\\x02\\x02\\x02፫፩')\n buf.write('\\x03\\x02\\x02\\x02፬፭\\x07)\\x02\\x02፭τ\\x03\\x02\\x02\\x02፮')\n buf.write('፷\\x05зȜ\\x02፯፳\\x07)\\x02\\x02፰፲')\n buf.write('\\x0423\\x02፱፰\\x03\\x02\\x02\\x02፲፵\\x03\\x02\\x02\\x02፳')\n buf.write('፱\\x03\\x02\\x02\\x02፳፴\\x03\\x02\\x02\\x02፴፶\\x03\\x02\\x02\\x02')\n buf.write('፵፳\\x03\\x02\\x02\\x02፶፸\\x07)\\x02\\x02፷፯\\x03')\n buf.write('\\x02\\x02\\x02፸፹\\x03\\x02\\x02\\x02፹፷\\x03\\x02\\x02\\x02፹፺')\n buf.write('\\x03\\x02\\x02\\x02፺φ\\x03\\x02\\x02\\x02፻ᎄ\\x05ѣȲ')\n buf.write('\\x02፼ᎀ\\x07)\\x02\\x02\\u137d\\u137f\\t\\x03\\x02\\x02\\u137e\\u137d')\n buf.write(\n '\\x03\\x02\\x02\\x02\\u137fᎂ\\x03\\x02\\x02\\x02ᎀ\\u137e\\x03\\x02\\x02\\x02ᎀ')\n buf.write('ᎁ\\x03\\x02\\x02\\x02ᎁᎃ\\x03\\x02\\x02\\x02ᎂᎀ\\x03\\x02\\x02\\x02')\n buf.write('ᎃᎅ\\x07)\\x02\\x02ᎄ፼\\x03\\x02\\x02\\x02ᎅᎆ\\x03')\n buf.write('\\x02\\x02\\x02ᎆᎄ\\x03\\x02\\x02\\x02ᎆᎇ\\x03\\x02\\x02\\x02ᎇψ')\n buf.write('\\x03\\x02\\x02\\x02ᎈᎉ\\x070\\x02\\x02ᎉᎊ\\x070\\x02\\x02ᎊ')\n buf.write('ϊ\\x03\\x02\\x02\\x02ᎋᎌ\\x070\\x02\\x02ᎌό\\x03\\x02\\x02')\n buf.write('\\x02ᎍᎎ\\x05УȒ\\x02ᎎώ\\x03\\x02\\x02\\x02ᎏ')\n buf.write('᎘\\x05Хȓ\\x02᎐᎒\\t\\x04\\x02\\x02᎑᎓')\n buf.write('\\t\\x05\\x02\\x02᎒᎑\\x03\\x02\\x02\\x02᎒᎓\\x03\\x02\\x02\\x02᎓')\n buf.write('᎖\\x03\\x02\\x02\\x02᎔᎗\\x05Хȓ\\x02᎕᎗')\n buf.write('\\x05УȒ\\x02᎖᎔\\x03\\x02\\x02\\x02᎖᎕\\x03\\x02\\x02')\n buf.write('\\x02᎗᎙\\x03\\x02\\x02\\x02᎘᎐\\x03\\x02\\x02\\x02᎘᎙')\n buf.write('\\x03\\x02\\x02\\x02᎙\\u139c\\x03\\x02\\x02\\x02\\u139a\\u139d\\x05лȞ')\n buf.write(\n '\\x02\\u139b\\u139d\\x05пȠ\\x02\\u139c\\u139a\\x03\\x02\\x02\\x02\\u139c')\n buf.write(\n '\\u139b\\x03\\x02\\x02\\x02\\u139c\\u139d\\x03\\x02\\x02\\x02\\u139dϐ\\x03\\x02\\x02\\x02'\n )\n buf.write('\\u139eᎥ\\x07)\\x02\\x02\\u139fᎤ\\n\\x02\\x02\\x02ᎠᎡ\\x07')\n buf.write(')\\x02\\x02ᎡᎤ\\x07)\\x02\\x02ᎢᎤ\\x05Эȗ\\x02Ꭳ')\n buf.write('\\u139f\\x03\\x02\\x02\\x02ᎣᎠ\\x03\\x02\\x02\\x02ᎣᎢ\\x03\\x02\\x02\\x02')\n buf.write('ᎤᎧ\\x03\\x02\\x02\\x02ᎥᎣ\\x03\\x02\\x02\\x02ᎥᎦ\\x03')\n buf.write('\\x02\\x02\\x02ᎦᎨ\\x03\\x02\\x02\\x02ᎧᎥ\\x03\\x02\\x02\\x02ᎨᎩ')\n buf.write('\\x07)\\x02\\x02Ꭹϒ\\x03\\x02\\x02\\x02ᎪᎯ\\x05ѕȫ')\n buf.write('\\x02ᎫᎰ\\x05ϗǬ\\x02ᎬᎰ\\x05ϙǭ')\n buf.write('\\x02ᎭᎰ\\x05ϛǮ\\x02ᎮᎰ\\x05ϝǯ')\n buf.write('\\x02ᎯᎫ\\x03\\x02\\x02\\x02ᎯᎬ\\x03\\x02\\x02\\x02ᎯᎭ')\n buf.write('\\x03\\x02\\x02\\x02ᎯᎮ\\x03\\x02\\x02\\x02ᎰᎱ\\x03\\x02\\x02\\x02Ꮁ')\n buf.write('Ꮂ\\x08Ǫ\\x02\\x02Ꮂϔ\\x03\\x02\\x02\\x02ᎳᎴ\\x07)')\n buf.write('\\x02\\x02Ꮄϖ\\x03\\x02\\x02\\x02ᎵᎶ\\x05ϕǫ\\x02Ꮆ')\n buf.write('Ꮊ\\x07>\\x02\\x02ᎷᎹ\\x0b\\x02\\x02\\x02ᎸᎷ\\x03\\x02\\x02\\x02')\n buf.write('ᎹᎼ\\x03\\x02\\x02\\x02ᎺᎻ\\x03\\x02\\x02\\x02ᎺᎸ\\x03')\n buf.write('\\x02\\x02\\x02ᎻᎽ\\x03\\x02\\x02\\x02ᎼᎺ\\x03\\x02\\x02\\x02ᎽᎾ')\n buf.write('\\x07@\\x02\\x02ᎾᎿ\\x05ϕǫ\\x02ᎿϘ\\x03\\x02\\x02')\n buf.write('\\x02ᏀᏁ\\x05ϕǫ\\x02ᏁᏅ\\x07}\\x02\\x02Ꮒ')\n buf.write('Ꮔ\\x0b\\x02\\x02\\x02ᏃᏂ\\x03\\x02\\x02\\x02ᏄᏇ\\x03\\x02\\x02')\n buf.write('\\x02ᏅᏆ\\x03\\x02\\x02\\x02ᏅᏃ\\x03\\x02\\x02\\x02ᏆᏈ')\n buf.write('\\x03\\x02\\x02\\x02ᏇᏅ\\x03\\x02\\x02\\x02ᏈᏉ\\x07\\x7f\\x02\\x02Ꮙ')\n buf.write('Ꮚ\\x05ϕǫ\\x02ᏊϚ\\x03\\x02\\x02\\x02ᏋᏌ')\n buf.write('\\x05ϕǫ\\x02ᏌᏐ\\x07]\\x02\\x02ᏍᏏ\\x0b\\x02\\x02')\n buf.write('\\x02ᏎᏍ\\x03\\x02\\x02\\x02ᏏᏒ\\x03\\x02\\x02\\x02ᏐᏑ')\n buf.write('\\x03\\x02\\x02\\x02ᏐᏎ\\x03\\x02\\x02\\x02ᏑᏓ\\x03\\x02\\x02\\x02Ꮢ')\n buf.write('Ꮠ\\x03\\x02\\x02\\x02ᏓᏔ\\x07_\\x02\\x02ᏔᏕ\\x05ϕ')\n buf.write('ǫ\\x02ᏕϜ\\x03\\x02\\x02\\x02ᏖᏗ\\x05ϕǫ')\n buf.write('\\x02ᏗᏛ\\x07*\\x02\\x02ᏘᏚ\\x0b\\x02\\x02\\x02ᏙᏘ')\n buf.write('\\x03\\x02\\x02\\x02ᏚᏝ\\x03\\x02\\x02\\x02ᏛᏜ\\x03\\x02\\x02\\x02Ꮫ')\n buf.write('Ꮩ\\x03\\x02\\x02\\x02ᏜᏞ\\x03\\x02\\x02\\x02ᏝᏛ\\x03\\x02\\x02\\x02')\n buf.write('ᏞᏟ\\x07+\\x02\\x02ᏟᏠ\\x05ϕǫ\\x02Ꮰ')\n buf.write('Ϟ\\x03\\x02\\x02\\x02ᏡᏢ\\n\\x06\\x02\\x02ᏢϠ\\x03\\x02\\x02\\x02')\n buf.write('ᏣᏧ\\x07$\\x02\\x02ᏤᏨ\\n\\x07\\x02\\x02ᏥᏦ\\x07')\n buf.write('$\\x02\\x02ᏦᏨ\\x07$\\x02\\x02ᏧᏤ\\x03\\x02\\x02\\x02ᏧᏥ')\n buf.write('\\x03\\x02\\x02\\x02ᏨᏩ\\x03\\x02\\x02\\x02ᏩᏧ\\x03\\x02\\x02\\x02Ꮹ')\n buf.write('Ꮺ\\x03\\x02\\x02\\x02ᏪᏫ\\x03\\x02\\x02\\x02ᏫᏬ\\x07$\\x02\\x02')\n buf.write(\"ᏬϢ\\x03\\x02\\x02\\x02ᏭᏮ\\x07'\\x02\\x02ᏮϤ\\x03\")\n buf.write('\\x02\\x02\\x02ᏯᏰ\\x07(\\x02\\x02ᏰϦ\\x03\\x02\\x02\\x02ᏱᏲ')\n buf.write('\\x07*\\x02\\x02ᏲϨ\\x03\\x02\\x02\\x02ᏳᏴ\\x07+\\x02\\x02ᏴϪ')\n buf.write(\n '\\x03\\x02\\x02\\x02Ᏽ\\u13f6\\x07,\\x02\\x02\\u13f6\\u13f7\\x07,\\x02\\x02\\u13f7Ϭ'\n )\n buf.write('\\x03\\x02\\x02\\x02ᏸᏹ\\x07,\\x02\\x02ᏹϮ\\x03\\x02\\x02\\x02ᏺ')\n buf.write('ᏻ\\x07-\\x02\\x02ᏻϰ\\x03\\x02\\x02\\x02ᏼᏽ\\x07/\\x02\\x02ᏽ')\n buf.write(\n 'ϲ\\x03\\x02\\x02\\x02\\u13fe\\u13ff\\x07.\\x02\\x02\\u13ffϴ\\x03\\x02\\x02\\x02'\n )\n buf.write('᐀ᐁ\\x071\\x02\\x02ᐁ϶\\x03\\x02\\x02\\x02ᐂᐃ')\n buf.write('\\x07B\\x02\\x02ᐃϸ\\x03\\x02\\x02\\x02ᐄᐅ\\x07<\\x02\\x02ᐅᐆ')\n buf.write('\\x07?\\x02\\x02ᐆϺ\\x03\\x02\\x02\\x02ᐇᐈ\\x07<\\x02\\x02ᐈᐍ')\n buf.write('\\x05Сȑ\\x02ᐉᐌ\\x05Сȑ\\x02ᐊᐌ')\n buf.write('\\t\\x08\\x02\\x02ᐋᐉ\\x03\\x02\\x02\\x02ᐋᐊ\\x03\\x02\\x02\\x02ᐌ')\n buf.write('ᐏ\\x03\\x02\\x02\\x02ᐍᐋ\\x03\\x02\\x02\\x02ᐍᐎ\\x03\\x02\\x02\\x02')\n buf.write('ᐎᐖ\\x03\\x02\\x02\\x02ᐏᐍ\\x03\\x02\\x02\\x02ᐐᐑ\\x07')\n buf.write('<\\x02\\x02ᐑᐖ\\x05ϡDZ\\x02ᐒᐓ\\x07<\\x02\\x02ᐓ')\n buf.write('ᐖ\\x05ύǧ\\x02ᐔᐖ\\x05Бȉ\\x02ᐕ')\n buf.write('ᐇ\\x03\\x02\\x02\\x02ᐕᐐ\\x03\\x02\\x02\\x02ᐕᐒ\\x03\\x02\\x02\\x02')\n buf.write('ᐕᐔ\\x03\\x02\\x02\\x02ᐖϼ\\x03\\x02\\x02\\x02ᐗᐘ\\x07')\n buf.write('<\\x02\\x02ᐘϾ\\x03\\x02\\x02\\x02ᐙᐚ\\x07=\\x02\\x02ᐚЀ')\n buf.write('\\x03\\x02\\x02\\x02ᐛᐜ\\x07>\\x02\\x02ᐜᐝ\\x07?\\x02\\x02ᐝЂ')\n buf.write('\\x03\\x02\\x02\\x02ᐞᐟ\\x07>\\x02\\x02ᐟЄ\\x03\\x02\\x02\\x02ᐠ')\n buf.write('ᐡ\\x07@\\x02\\x02ᐡᐢ\\x07?\\x02\\x02ᐢІ\\x03\\x02\\x02\\x02ᐣ')\n buf.write('ᐤ\\x07#\\x02\\x02ᐤᐬ\\x07?\\x02\\x02ᐥᐦ\\x07>\\x02\\x02ᐦ')\n buf.write('ᐬ\\x07@\\x02\\x02ᐧᐨ\\x07`\\x02\\x02ᐨᐬ\\x07?\\x02\\x02ᐩ')\n buf.write('ᐪ\\x07\\x80\\x02\\x02ᐪᐬ\\x07?\\x02\\x02ᐫᐣ\\x03\\x02')\n buf.write('\\x02\\x02ᐫᐥ\\x03\\x02\\x02\\x02ᐫᐧ\\x03\\x02\\x02\\x02ᐫᐩ')\n buf.write('\\x03\\x02\\x02\\x02ᐬЈ\\x03\\x02\\x02\\x02ᐭᐮ\\x07`\\x02\\x02ᐮ')\n buf.write('Њ\\x03\\x02\\x02\\x02ᐯᐰ\\x07\\x80\\x02\\x02ᐰЌ\\x03\\x02')\n buf.write('\\x02\\x02ᐱᐲ\\x07#\\x02\\x02ᐲЎ\\x03\\x02\\x02\\x02ᐳᐴ')\n buf.write('\\x07@\\x02\\x02ᐴА\\x03\\x02\\x02\\x02ᐵᐶ\\x07A\\x02\\x02ᐶВ')\n buf.write('\\x03\\x02\\x02\\x02ᐷᐸ\\x07~\\x02\\x02ᐸᐹ\\x07~\\x02\\x02ᐹД')\n buf.write('\\x03\\x02\\x02\\x02ᐺᐻ\\x07~\\x02\\x02ᐻЖ\\x03\\x02\\x02\\x02ᐼ')\n buf.write('ᐽ\\x07?\\x02\\x02ᐽИ\\x03\\x02\\x02\\x02ᐾᐿ\\x07]\\x02\\x02ᐿ')\n buf.write('К\\x03\\x02\\x02\\x02ᑀᑁ\\x07_\\x02\\x02ᑁМ\\x03\\x02\\x02\\x02')\n buf.write('ᑂᑃ\\x07a\\x02\\x02ᑃО\\x03\\x02\\x02\\x02ᑄᑆ\\t')\n buf.write('\\t\\x02\\x02ᑅᑄ\\x03\\x02\\x02\\x02ᑆᑇ\\x03\\x02\\x02\\x02ᑇᑅ')\n buf.write('\\x03\\x02\\x02\\x02ᑇᑈ\\x03\\x02\\x02\\x02ᑈᑉ\\x03\\x02\\x02\\x02ᑉ')\n buf.write('ᑊ\\x08Ȑ\\x03\\x02ᑊР\\x03\\x02\\x02\\x02ᑋᑌ\\t\\n')\n buf.write('\\x02\\x02ᑌТ\\x03\\x02\\x02\\x02ᑍᑏ\\x042;\\x02ᑎᑍ')\n buf.write('\\x03\\x02\\x02\\x02ᑏᑐ\\x03\\x02\\x02\\x02ᑐᑎ\\x03\\x02\\x02\\x02ᑐ')\n buf.write('ᑑ\\x03\\x02\\x02\\x02ᑑФ\\x03\\x02\\x02\\x02ᑒᑔ\\x05ύ')\n buf.write('ǧ\\x02ᑓᑒ\\x03\\x02\\x02\\x02ᑔᑗ\\x03\\x02\\x02\\x02ᑕ')\n buf.write('ᑓ\\x03\\x02\\x02\\x02ᑕᑖ\\x03\\x02\\x02\\x02ᑖᑙ\\x03\\x02\\x02\\x02')\n buf.write('ᑗᑕ\\x03\\x02\\x02\\x02ᑘᑚ\\x070\\x02\\x02ᑙᑘ')\n buf.write('\\x03\\x02\\x02\\x02ᑙᑚ\\x03\\x02\\x02\\x02ᑚᑜ\\x03\\x02\\x02\\x02ᑛ')\n buf.write('ᑝ\\x05ύǧ\\x02ᑜᑛ\\x03\\x02\\x02\\x02ᑝᑞ')\n buf.write('\\x03\\x02\\x02\\x02ᑞᑜ\\x03\\x02\\x02\\x02ᑞᑟ\\x03\\x02\\x02\\x02ᑟ')\n buf.write('Ц\\x03\\x02\\x02\\x02ᑠᑡ\\x07/\\x02\\x02ᑡᑢ\\x07/\\x02\\x02ᑢ')\n buf.write('ᑦ\\x03\\x02\\x02\\x02ᑣᑥ\\n\\x0b\\x02\\x02ᑤᑣ\\x03\\x02\\x02')\n buf.write('\\x02ᑥᑨ\\x03\\x02\\x02\\x02ᑦᑤ\\x03\\x02\\x02\\x02ᑦᑧ')\n buf.write('\\x03\\x02\\x02\\x02ᑧᑫ\\x03\\x02\\x02\\x02ᑨᑦ\\x03\\x02\\x02\\x02ᑩ')\n buf.write('ᑬ\\x05Эȗ\\x02ᑪᑬ\\x07\\x02\\x02\\x03ᑫᑩ')\n buf.write('\\x03\\x02\\x02\\x02ᑫᑪ\\x03\\x02\\x02\\x02ᑬᑭ\\x03\\x02\\x02\\x02ᑭ')\n buf.write('ᑮ\\x08Ȕ\\x04\\x02ᑮШ\\x03\\x02\\x02\\x02ᑯᑰ\\x071')\n buf.write('\\x02\\x02ᑰᑱ\\x07,\\x02\\x02ᑱᑵ\\x03\\x02\\x02\\x02ᑲᑴ')\n buf.write('\\x0b\\x02\\x02\\x02ᑳᑲ\\x03\\x02\\x02\\x02ᑴᑷ\\x03\\x02\\x02\\x02ᑵ')\n buf.write('ᑶ\\x03\\x02\\x02\\x02ᑵᑳ\\x03\\x02\\x02\\x02ᑶᑸ\\x03\\x02\\x02\\x02')\n buf.write('ᑷᑵ\\x03\\x02\\x02\\x02ᑸᑹ\\x07,\\x02\\x02ᑹᑺ\\x07')\n buf.write('1\\x02\\x02ᑺᑻ\\x03\\x02\\x02\\x02ᑻᑼ\\x08ȕ\\x04\\x02ᑼ')\n buf.write('Ъ\\x03\\x02\\x02\\x02ᑽᑾ\\x07r\\x02\\x02ᑾᑿ\\x07t\\x02\\x02ᑿ')\n buf.write('ᒀ\\x07q\\x02\\x02ᒀᒁ\\x07o\\x02\\x02ᒁᒂ\\x07r\\x02\\x02ᒂ')\n buf.write('ᒃ\\x07v\\x02\\x02ᒃᒄ\\x03\\x02\\x02\\x02ᒄᒈ\\x05Я')\n buf.write('Ș\\x02ᒅᒇ\\n\\x0b\\x02\\x02ᒆᒅ\\x03\\x02\\x02\\x02ᒇ')\n buf.write('ᒊ\\x03\\x02\\x02\\x02ᒈᒆ\\x03\\x02\\x02\\x02ᒈᒉ\\x03\\x02\\x02\\x02')\n buf.write('ᒉᒍ\\x03\\x02\\x02\\x02ᒊᒈ\\x03\\x02\\x02\\x02ᒋᒎ\\x05')\n buf.write('Эȗ\\x02ᒌᒎ\\x07\\x02\\x02\\x03ᒍᒋ\\x03\\x02\\x02\\x02')\n buf.write('ᒍᒌ\\x03\\x02\\x02\\x02ᒎЬ\\x03\\x02\\x02\\x02ᒏᒑ\\x07')\n buf.write('\\x0f\\x02\\x02ᒐᒏ\\x03\\x02\\x02\\x02ᒐᒑ\\x03\\x02\\x02\\x02ᒑ')\n buf.write('ᒒ\\x03\\x02\\x02\\x02ᒒᒓ\\x07\\x0c\\x02\\x02ᒓЮ\\x03\\x02\\x02\\x02')\n buf.write('ᒔᒕ\\t\\x0c\\x02\\x02ᒕа\\x03\\x02\\x02\\x02ᒖᒛ\\x05')\n buf.write('Сȑ\\x02ᒗᒚ\\x05Сȑ\\x02ᒘᒚ')\n buf.write('\\t\\r\\x02\\x02ᒙᒗ\\x03\\x02\\x02\\x02ᒙᒘ\\x03\\x02\\x02\\x02ᒚ')\n buf.write('ᒝ\\x03\\x02\\x02\\x02ᒛᒙ\\x03\\x02\\x02\\x02ᒛᒜ\\x03\\x02\\x02\\x02')\n buf.write('ᒜв\\x03\\x02\\x02\\x02ᒝᒛ\\x03\\x02\\x02\\x02ᒞᒟ\\x07')\n buf.write('B\\x02\\x02ᒟᒠ\\x07#\\x02\\x02ᒠᒡ\\x03\\x02\\x02\\x02ᒡᒢ')\n buf.write('\\x08Ț\\x04\\x02ᒢд\\x03\\x02\\x02\\x02ᒣᒤ\\t\\x0e\\x02\\x02')\n buf.write('ᒤж\\x03\\x02\\x02\\x02ᒥᒦ\\t\\x0f\\x02\\x02ᒦи')\n buf.write('\\x03\\x02\\x02\\x02ᒧᒨ\\t\\x10\\x02\\x02ᒨк\\x03\\x02\\x02\\x02ᒩ')\n buf.write('ᒪ\\t\\x11\\x02\\x02ᒪм\\x03\\x02\\x02\\x02ᒫᒬ\\t\\x04\\x02')\n buf.write('\\x02ᒬо\\x03\\x02\\x02\\x02ᒭᒮ\\t\\x12\\x02\\x02ᒮр')\n buf.write('\\x03\\x02\\x02\\x02ᒯᒰ\\t\\x13\\x02\\x02ᒰт\\x03\\x02\\x02\\x02ᒱ')\n buf.write('ᒲ\\t\\x14\\x02\\x02ᒲф\\x03\\x02\\x02\\x02ᒳᒴ\\t\\x15\\x02')\n buf.write('\\x02ᒴц\\x03\\x02\\x02\\x02ᒵᒶ\\t\\x16\\x02\\x02ᒶш')\n buf.write('\\x03\\x02\\x02\\x02ᒷᒸ\\t\\x17\\x02\\x02ᒸъ\\x03\\x02\\x02\\x02ᒹ')\n buf.write('ᒺ\\t\\x18\\x02\\x02ᒺь\\x03\\x02\\x02\\x02ᒻᒼ\\t\\x19\\x02')\n buf.write('\\x02ᒼю\\x03\\x02\\x02\\x02ᒽᒾ\\t\\x1a\\x02\\x02ᒾѐ')\n buf.write('\\x03\\x02\\x02\\x02ᒿᓀ\\t\\x1b\\x02\\x02ᓀђ\\x03\\x02\\x02\\x02ᓁ')\n buf.write('ᓂ\\t\\x1c\\x02\\x02ᓂє\\x03\\x02\\x02\\x02ᓃᓄ\\t\\x1d\\x02')\n buf.write('\\x02ᓄі\\x03\\x02\\x02\\x02ᓅᓆ\\t\\x1e\\x02\\x02ᓆј')\n buf.write('\\x03\\x02\\x02\\x02ᓇᓈ\\t\\x1f\\x02\\x02ᓈњ\\x03\\x02\\x02\\x02ᓉ')\n buf.write('ᓊ\\t \\x02\\x02ᓊќ\\x03\\x02\\x02\\x02ᓋᓌ\\t!\\x02\\x02ᓌ')\n buf.write('ў\\x03\\x02\\x02\\x02ᓍᓎ\\t\"\\x02\\x02ᓎѠ\\x03\\x02\\x02\\x02')\n buf.write('ᓏᓐ\\t#\\x02\\x02ᓐѢ\\x03\\x02\\x02\\x02ᓑᓒ\\t')\n buf.write('$\\x02\\x02ᓒѤ\\x03\\x02\\x02\\x02ᓓᓔ\\t%\\x02\\x02ᓔѦ')\n buf.write(\"\\x03\\x02\\x02\\x02ᓕᓖ\\t&\\x02\\x02ᓖѨ\\x03\\x02\\x02\\x02'\\x02፧\")\n buf.write('፩፳፹ᎀᎆ᎒᎖᎘\\u139c')\n buf.write('ᎣᎥᎯᎺᏅᏐᏛᏧᏩ')\n buf.write('ᐋᐍᐕᐫᑇᑐᑕᑙᑞ')\n buf.write('ᑦᑫᑵᒈᒍᒐᒙᒛ\\x05\\tǪ')\n buf.write('\\x02\\x08\\x02\\x02\\x02\\x03\\x02')\n return buf.getvalue()\n\n\nclass PlSqlLexer(Lexer):\n atn = ATNDeserializer().deserialize(serializedATN())\n decisionsToDFA = [DFA(ds, i) for i, ds in enumerate(atn.decisionToState)]\n T__0 = 1\n A_LETTER = 2\n ADD = 3\n AFTER = 4\n AGENT = 5\n AGGREGATE = 6\n ALL = 7\n ALTER = 8\n ANALYZE = 9\n AND = 10\n ANY = 11\n ARRAY = 12\n AS = 13\n ASSUME = 14\n ASSERT = 15\n ASC = 16\n ASSOCIATE = 17\n AT = 18\n ATTRIBUTE = 19\n AUDIT = 20\n AUTHID = 21\n AUTO = 22\n AUTOMATIC = 23\n AUTONOMOUS_TRANSACTION = 24\n BATCH = 25\n BEFORE = 26\n BEGIN = 27\n BETWEEN = 28\n BFILE = 29\n BINARY_DOUBLE = 30\n BINARY_FLOAT = 31\n BINARY_INTEGER = 32\n BLOB = 33\n BLOCK = 34\n BODY = 35\n BOOLEAN = 36\n BOTH = 37\n BREADTH = 38\n BULK = 39\n BY = 40\n BYTE = 41\n C_LETTER = 42\n CACHE = 43\n CALL = 44\n CANONICAL = 45\n CASCADE = 46\n CASE = 47\n CAST = 48\n CHAR = 49\n CHAR_CS = 50\n CHARACTER = 51\n CHECK = 52\n CHR = 53\n CLOB = 54\n CLOSE = 55\n CLUSTER = 56\n COLLECT = 57\n COLUMNS = 58\n COMMENT = 59\n COMMIT = 60\n COMMITTED = 61\n COMPATIBILITY = 62\n COMPILE = 63\n COMPOUND = 64\n CONNECT = 65\n CONNECT_BY_ROOT = 66\n CONSTANT = 67\n CONSTRAINT = 68\n CONSTRAINTS = 69\n CONSTRUCTOR = 70\n CONTENT = 71\n CONTEXT = 72\n CONTINUE = 73\n CONVERT = 74\n CORRUPT_XID = 75\n CORRUPT_XID_ALL = 76\n COST = 77\n COUNT = 78\n CREATE = 79\n CROSS = 80\n CUBE = 81\n CURRENT = 82\n CURRENT_USER = 83\n CURSOR = 84\n CUSTOMDATUM = 85\n CYCLE = 86\n DATA = 87\n DATABASE = 88\n DATE = 89\n DAY = 90\n DB_ROLE_CHANGE = 91\n DBTIMEZONE = 92\n DDL = 93\n DEBUG = 94\n DEC = 95\n DECIMAL = 96\n DECLARE = 97\n DECOMPOSE = 98\n DECREMENT = 99\n DEFAULT = 100\n DEFAULTS = 101\n DEFERRED = 102\n DEFINER = 103\n DELETE = 104\n DEPTH = 105\n DESC = 106\n DETERMINISTIC = 107\n DIMENSION = 108\n DISABLE = 109\n DISASSOCIATE = 110\n DISTINCT = 111\n DOCUMENT = 112\n DOUBLE = 113\n DROP = 114\n DSINTERVAL_UNCONSTRAINED = 115\n EACH = 116\n ELEMENT = 117\n ELSE = 118\n ELSIF = 119\n EMPTY = 120\n ENABLE = 121\n ENCODING = 122\n END = 123\n ENTITYESCAPING = 124\n ERR = 125\n ERRORS = 126\n ESCAPE = 127\n EVALNAME = 128\n EXCEPT = 129\n EXCEPTION = 130\n EXCEPTION_INIT = 131\n EXCEPTIONS = 132\n EXCLUDE = 133\n EXCLUSIVE = 134\n EXECUTE = 135\n EXISTS = 136\n EXIT = 137\n EXPLAIN = 138\n EXTERNAL = 139\n EXTRACT = 140\n FAILURE = 141\n FALSE = 142\n FETCH = 143\n FINAL = 144\n FIRST = 145\n FIRST_VALUE = 146\n FLOAT = 147\n FOLLOWING = 148\n FOLLOWS = 149\n FOR = 150\n FORALL = 151\n FORCE = 152\n FROM = 153\n FULL = 154\n FUNCTION = 155\n GOTO = 156\n GRANT = 157\n GROUP = 158\n GROUPING = 159\n HASH = 160\n HAVING = 161\n HIDE = 162\n HOUR = 163\n IF = 164\n IGNORE = 165\n IMMEDIATE = 166\n IN = 167\n INCLUDE = 168\n INCLUDING = 169\n INCREMENT = 170\n INDENT = 171\n INDEX = 172\n INDEXED = 173\n INDICATOR = 174\n INDICES = 175\n INFINITE = 176\n INLINE = 177\n INNER = 178\n INOUT = 179\n INSERT = 180\n INSTANTIABLE = 181\n INSTEAD = 182\n INT = 183\n INTEGER = 184\n INTERSECT = 185\n INTERVAL = 186\n INTO = 187\n INVALIDATE = 188\n IS = 189\n ISOLATION = 190\n ITERATE = 191\n JAVA = 192\n JOIN = 193\n KEEP = 194\n LANGUAGE = 195\n LAST = 196\n LAST_VALUE = 197\n LEADING = 198\n LEFT = 199\n LEVEL = 200\n LIBRARY = 201\n LIKE = 202\n LIKE2 = 203\n LIKE4 = 204\n LIKEC = 205\n LIMIT = 206\n LOCAL = 207\n LOCK = 208\n LOCKED = 209\n LOG = 210\n LOGOFF = 211\n LOGON = 212\n LONG = 213\n LOOP = 214\n MAIN = 215\n MAP = 216\n MATCHED = 217\n MAXVALUE = 218\n MEASURES = 219\n MEMBER = 220\n MERGE = 221\n MINUS = 222\n MINUTE = 223\n MINVALUE = 224\n MLSLABEL = 225\n MODE = 226\n MODEL = 227\n MODIFY = 228\n MONTH = 229\n MULTISET = 230\n NAME = 231\n NAN = 232\n NATURAL = 233\n NATURALN = 234\n NAV = 235\n NCHAR = 236\n NCHAR_CS = 237\n NCLOB = 238\n NESTED = 239\n NEW = 240\n NO = 241\n NOAUDIT = 242\n NOCACHE = 243\n NOCOPY = 244\n NOCYCLE = 245\n NOENTITYESCAPING = 246\n NOMAXVALUE = 247\n NOMINVALUE = 248\n NONE = 249\n NOORDER = 250\n NOSCHEMACHECK = 251\n NOT = 252\n NOWAIT = 253\n NULL = 254\n NULLS = 255\n NUMBER = 256\n NUMERIC = 257\n NVARCHAR2 = 258\n OBJECT = 259\n OF = 260\n OFF = 261\n OID = 262\n OLD = 263\n ON = 264\n ONLY = 265\n OPEN = 266\n OPTION = 267\n OR = 268\n ORADATA = 269\n ORDER = 270\n ORDINALITY = 271\n OSERROR = 272\n OUT = 273\n OUTER = 274\n OVER = 275\n OVERRIDING = 276\n PACKAGE = 277\n PARALLEL_ENABLE = 278\n PARAMETERS = 279\n PARENT = 280\n PARTITION = 281\n PASSING = 282\n PATH = 283\n PERCENT_ROWTYPE = 284\n PERCENT_TYPE = 285\n PIPELINED = 286\n PIVOT = 287\n PLAN = 288\n PLS_INTEGER = 289\n POSITIVE = 290\n POSITIVEN = 291\n PRAGMA = 292\n PRECEDING = 293\n PRECISION = 294\n PRESENT = 295\n PRIOR = 296\n PROCEDURE = 297\n RAISE = 298\n RANGE = 299\n RAW = 300\n READ = 301\n REAL = 302\n RECORD = 303\n REF = 304\n REFERENCE = 305\n REFERENCING = 306\n REJECT = 307\n RELIES_ON = 308\n RENAME = 309\n REPLACE = 310\n RESPECT = 311\n RESTRICT_REFERENCES = 312\n RESULT = 313\n RESULT_CACHE = 314\n RETURN = 315\n RETURNING = 316\n REUSE = 317\n REVERSE = 318\n REVOKE = 319\n RIGHT = 320\n ROLLBACK = 321\n ROLLUP = 322\n ROW = 323\n ROWID = 324\n ROWS = 325\n RULES = 326\n SAMPLE = 327\n SAVE = 328\n SAVEPOINT = 329\n SCHEMA = 330\n SCHEMACHECK = 331\n SCN = 332\n SEARCH = 333\n SECOND = 334\n SEED = 335\n SEGMENT = 336\n SELECT = 337\n SELF = 338\n SEQUENCE = 339\n SEQUENTIAL = 340\n SERIALIZABLE = 341\n SERIALLY_REUSABLE = 342\n SERVERERROR = 343\n SESSIONTIMEZONE = 344\n SET = 345\n SETS = 346\n SETTINGS = 347\n SHARE = 348\n SHOW = 349\n SHUTDOWN = 350\n SIBLINGS = 351\n SIGNTYPE = 352\n SIMPLE_INTEGER = 353\n SINGLE = 354\n SIZE = 355\n SKIP_ = 356\n SMALLINT = 357\n SNAPSHOT = 358\n SOME = 359\n SPECIFICATION = 360\n SQLDATA = 361\n SQLERROR = 362\n STANDALONE = 363\n START = 364\n STARTUP = 365\n STATEMENT = 366\n STATEMENT_ID = 367\n STATIC = 368\n STATISTICS = 369\n STRING = 370\n SUBMULTISET = 371\n SUBPARTITION = 372\n SUBSTITUTABLE = 373\n SUBTYPE = 374\n SUCCESS = 375\n SUSPEND = 376\n TABLE = 377\n THE = 378\n THEN = 379\n TIME = 380\n TIMESTAMP = 381\n TIMESTAMP_LTZ_UNCONSTRAINED = 382\n TIMESTAMP_TZ_UNCONSTRAINED = 383\n TIMESTAMP_UNCONSTRAINED = 384\n TIMEZONE_ABBR = 385\n TIMEZONE_HOUR = 386\n TIMEZONE_MINUTE = 387\n TIMEZONE_REGION = 388\n TO = 389\n TRAILING = 390\n TRANSACTION = 391\n TRANSLATE = 392\n TREAT = 393\n TRIGGER = 394\n TRIM = 395\n TRUE = 396\n TRUNCATE = 397\n TYPE = 398\n UNBOUNDED = 399\n UNDER = 400\n UNION = 401\n UNIQUE = 402\n UNLIMITED = 403\n UNPIVOT = 404\n UNTIL = 405\n UPDATE = 406\n UPDATED = 407\n UPSERT = 408\n UROWID = 409\n USE = 410\n USING = 411\n VALIDATE = 412\n VALUE = 413\n VALUES = 414\n VARCHAR = 415\n VARCHAR2 = 416\n VARIABLE = 417\n VARRAY = 418\n VARYING = 419\n VERSION = 420\n VERSIONS = 421\n WAIT = 422\n WARNING = 423\n WELLFORMED = 424\n WHEN = 425\n WHENEVER = 426\n WHERE = 427\n WHILE = 428\n WITH = 429\n WITHIN = 430\n WORK = 431\n WRITE = 432\n XML = 433\n XMLAGG = 434\n XMLATTRIBUTES = 435\n XMLCAST = 436\n XMLCOLATTVAL = 437\n XMLELEMENT = 438\n XMLEXISTS = 439\n XMLFOREST = 440\n XMLNAMESPACES = 441\n XMLPARSE = 442\n XMLPI = 443\n XMLQUERY = 444\n XMLROOT = 445\n XMLSERIALIZE = 446\n XMLTABLE = 447\n YEAR = 448\n YES = 449\n YMINTERVAL_UNCONSTRAINED = 450\n ZONE = 451\n PREDICTION = 452\n PREDICTION_BOUNDS = 453\n PREDICTION_COST = 454\n PREDICTION_DETAILS = 455\n PREDICTION_PROBABILITY = 456\n PREDICTION_SET = 457\n CUME_DIST = 458\n DENSE_RANK = 459\n LISTAGG = 460\n PERCENT_RANK = 461\n PERCENTILE_CONT = 462\n PERCENTILE_DISC = 463\n RANK = 464\n AVG = 465\n CORR = 466\n LAG = 467\n LEAD = 468\n MAX = 469\n MEDIAN = 470\n MIN = 471\n NTILE = 472\n RATIO_TO_REPORT = 473\n ROW_NUMBER = 474\n SUM = 475\n VARIANCE = 476\n REGR_ = 477\n STDDEV = 478\n VAR_ = 479\n COVAR_ = 480\n NATIONAL_CHAR_STRING_LIT = 481\n BIT_STRING_LIT = 482\n HEX_STRING_LIT = 483\n DOUBLE_PERIOD = 484\n PERIOD = 485\n UNSIGNED_INTEGER = 486\n APPROXIMATE_NUM_LIT = 487\n CHAR_STRING = 488\n DELIMITED_ID = 489\n PERCENT = 490\n AMPERSAND = 491\n LEFT_PAREN = 492\n RIGHT_PAREN = 493\n DOUBLE_ASTERISK = 494\n ASTERISK = 495\n PLUS_SIGN = 496\n MINUS_SIGN = 497\n COMMA = 498\n SOLIDUS = 499\n AT_SIGN = 500\n ASSIGN_OP = 501\n BINDVAR = 502\n COLON = 503\n SEMICOLON = 504\n LESS_THAN_OR_EQUALS_OP = 505\n LESS_THAN_OP = 506\n GREATER_THAN_OR_EQUALS_OP = 507\n NOT_EQUAL_OP = 508\n CARRET_OPERATOR_PART = 509\n TILDE_OPERATOR_PART = 510\n EXCLAMATION_OPERATOR_PART = 511\n GREATER_THAN_OP = 512\n CONCATENATION_OP = 513\n VERTICAL_BAR = 514\n EQUALS_OP = 515\n LEFT_BRACKET = 516\n RIGHT_BRACKET = 517\n INTRODUCER = 518\n SPACES = 519\n SINGLE_LINE_COMMENT = 520\n MULTI_LINE_COMMENT = 521\n PROMPT = 522\n REGULAR_ID = 523\n ZV = 524\n channelNames = [u'DEFAULT_TOKEN_CHANNEL', u'HIDDEN']\n modeNames = ['DEFAULT_MODE']\n literalNames = ['<INVALID>', \"'..'\", \"'.'\", \"'%'\", \"'&'\", \"'('\", \"')'\",\n \"'**'\", \"'*'\", \"'+'\", \"'-'\", \"','\", \"'/'\", \"'@'\", \"':='\", \"':'\",\n \"';'\", \"'<='\", \"'<'\", \"'>='\", \"'^'\", \"'~'\", \"'!'\", \"'>'\", \"'||'\",\n \"'|'\", \"'='\", \"'['\", \"']'\", \"'_'\", \"'@!'\"]\n symbolicNames = ['<INVALID>', 'A_LETTER', 'ADD', 'AFTER', 'AGENT',\n 'AGGREGATE', 'ALL', 'ALTER', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS',\n 'ASSUME', 'ASSERT', 'ASC', 'ASSOCIATE', 'AT', 'ATTRIBUTE', 'AUDIT',\n 'AUTHID', 'AUTO', 'AUTOMATIC', 'AUTONOMOUS_TRANSACTION', 'BATCH',\n 'BEFORE', 'BEGIN', 'BETWEEN', 'BFILE', 'BINARY_DOUBLE',\n 'BINARY_FLOAT', 'BINARY_INTEGER', 'BLOB', 'BLOCK', 'BODY',\n 'BOOLEAN', 'BOTH', 'BREADTH', 'BULK', 'BY', 'BYTE', 'C_LETTER',\n 'CACHE', 'CALL', 'CANONICAL', 'CASCADE', 'CASE', 'CAST', 'CHAR',\n 'CHAR_CS', 'CHARACTER', 'CHECK', 'CHR', 'CLOB', 'CLOSE', 'CLUSTER',\n 'COLLECT', 'COLUMNS', 'COMMENT', 'COMMIT', 'COMMITTED',\n 'COMPATIBILITY', 'COMPILE', 'COMPOUND', 'CONNECT',\n 'CONNECT_BY_ROOT', 'CONSTANT', 'CONSTRAINT', 'CONSTRAINTS',\n 'CONSTRUCTOR', 'CONTENT', 'CONTEXT', 'CONTINUE', 'CONVERT',\n 'CORRUPT_XID', 'CORRUPT_XID_ALL', 'COST', 'COUNT', 'CREATE',\n 'CROSS', 'CUBE', 'CURRENT', 'CURRENT_USER', 'CURSOR', 'CUSTOMDATUM',\n 'CYCLE', 'DATA', 'DATABASE', 'DATE', 'DAY', 'DB_ROLE_CHANGE',\n 'DBTIMEZONE', 'DDL', 'DEBUG', 'DEC', 'DECIMAL', 'DECLARE',\n 'DECOMPOSE', 'DECREMENT', 'DEFAULT', 'DEFAULTS', 'DEFERRED',\n 'DEFINER', 'DELETE', 'DEPTH', 'DESC', 'DETERMINISTIC', 'DIMENSION',\n 'DISABLE', 'DISASSOCIATE', 'DISTINCT', 'DOCUMENT', 'DOUBLE', 'DROP',\n 'DSINTERVAL_UNCONSTRAINED', 'EACH', 'ELEMENT', 'ELSE', 'ELSIF',\n 'EMPTY', 'ENABLE', 'ENCODING', 'END', 'ENTITYESCAPING', 'ERR',\n 'ERRORS', 'ESCAPE', 'EVALNAME', 'EXCEPT', 'EXCEPTION',\n 'EXCEPTION_INIT', 'EXCEPTIONS', 'EXCLUDE', 'EXCLUSIVE', 'EXECUTE',\n 'EXISTS', 'EXIT', 'EXPLAIN', 'EXTERNAL', 'EXTRACT', 'FAILURE',\n 'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FIRST_VALUE', 'FLOAT',\n 'FOLLOWING', 'FOLLOWS', 'FOR', 'FORALL', 'FORCE', 'FROM', 'FULL',\n 'FUNCTION', 'GOTO', 'GRANT', 'GROUP', 'GROUPING', 'HASH', 'HAVING',\n 'HIDE', 'HOUR', 'IF', 'IGNORE', 'IMMEDIATE', 'IN', 'INCLUDE',\n 'INCLUDING', 'INCREMENT', 'INDENT', 'INDEX', 'INDEXED', 'INDICATOR',\n 'INDICES', 'INFINITE', 'INLINE', 'INNER', 'INOUT', 'INSERT',\n 'INSTANTIABLE', 'INSTEAD', 'INT', 'INTEGER', 'INTERSECT',\n 'INTERVAL', 'INTO', 'INVALIDATE', 'IS', 'ISOLATION', 'ITERATE',\n 'JAVA', 'JOIN', 'KEEP', 'LANGUAGE', 'LAST', 'LAST_VALUE', 'LEADING',\n 'LEFT', 'LEVEL', 'LIBRARY', 'LIKE', 'LIKE2', 'LIKE4', 'LIKEC',\n 'LIMIT', 'LOCAL', 'LOCK', 'LOCKED', 'LOG', 'LOGOFF', 'LOGON',\n 'LONG', 'LOOP', 'MAIN', 'MAP', 'MATCHED', 'MAXVALUE', 'MEASURES',\n 'MEMBER', 'MERGE', 'MINUS', 'MINUTE', 'MINVALUE', 'MLSLABEL',\n 'MODE', 'MODEL', 'MODIFY', 'MONTH', 'MULTISET', 'NAME', 'NAN',\n 'NATURAL', 'NATURALN', 'NAV', 'NCHAR', 'NCHAR_CS', 'NCLOB',\n 'NESTED', 'NEW', 'NO', 'NOAUDIT', 'NOCACHE', 'NOCOPY', 'NOCYCLE',\n 'NOENTITYESCAPING', 'NOMAXVALUE', 'NOMINVALUE', 'NONE', 'NOORDER',\n 'NOSCHEMACHECK', 'NOT', 'NOWAIT', 'NULL', 'NULLS', 'NUMBER',\n 'NUMERIC', 'NVARCHAR2', 'OBJECT', 'OF', 'OFF', 'OID', 'OLD', 'ON',\n 'ONLY', 'OPEN', 'OPTION', 'OR', 'ORADATA', 'ORDER', 'ORDINALITY',\n 'OSERROR', 'OUT', 'OUTER', 'OVER', 'OVERRIDING', 'PACKAGE',\n 'PARALLEL_ENABLE', 'PARAMETERS', 'PARENT', 'PARTITION', 'PASSING',\n 'PATH', 'PERCENT_ROWTYPE', 'PERCENT_TYPE', 'PIPELINED', 'PIVOT',\n 'PLAN', 'PLS_INTEGER', 'POSITIVE', 'POSITIVEN', 'PRAGMA',\n 'PRECEDING', 'PRECISION', 'PRESENT', 'PRIOR', 'PROCEDURE', 'RAISE',\n 'RANGE', 'RAW', 'READ', 'REAL', 'RECORD', 'REF', 'REFERENCE',\n 'REFERENCING', 'REJECT', 'RELIES_ON', 'RENAME', 'REPLACE',\n 'RESPECT', 'RESTRICT_REFERENCES', 'RESULT', 'RESULT_CACHE',\n 'RETURN', 'RETURNING', 'REUSE', 'REVERSE', 'REVOKE', 'RIGHT',\n 'ROLLBACK', 'ROLLUP', 'ROW', 'ROWID', 'ROWS', 'RULES', 'SAMPLE',\n 'SAVE', 'SAVEPOINT', 'SCHEMA', 'SCHEMACHECK', 'SCN', 'SEARCH',\n 'SECOND', 'SEED', 'SEGMENT', 'SELECT', 'SELF', 'SEQUENCE',\n 'SEQUENTIAL', 'SERIALIZABLE', 'SERIALLY_REUSABLE', 'SERVERERROR',\n 'SESSIONTIMEZONE', 'SET', 'SETS', 'SETTINGS', 'SHARE', 'SHOW',\n 'SHUTDOWN', 'SIBLINGS', 'SIGNTYPE', 'SIMPLE_INTEGER', 'SINGLE',\n 'SIZE', 'SKIP_', 'SMALLINT', 'SNAPSHOT', 'SOME', 'SPECIFICATION',\n 'SQLDATA', 'SQLERROR', 'STANDALONE', 'START', 'STARTUP',\n 'STATEMENT', 'STATEMENT_ID', 'STATIC', 'STATISTICS', 'STRING',\n 'SUBMULTISET', 'SUBPARTITION', 'SUBSTITUTABLE', 'SUBTYPE',\n 'SUCCESS', 'SUSPEND', 'TABLE', 'THE', 'THEN', 'TIME', 'TIMESTAMP',\n 'TIMESTAMP_LTZ_UNCONSTRAINED', 'TIMESTAMP_TZ_UNCONSTRAINED',\n 'TIMESTAMP_UNCONSTRAINED', 'TIMEZONE_ABBR', 'TIMEZONE_HOUR',\n 'TIMEZONE_MINUTE', 'TIMEZONE_REGION', 'TO', 'TRAILING',\n 'TRANSACTION', 'TRANSLATE', 'TREAT', 'TRIGGER', 'TRIM', 'TRUE',\n 'TRUNCATE', 'TYPE', 'UNBOUNDED', 'UNDER', 'UNION', 'UNIQUE',\n 'UNLIMITED', 'UNPIVOT', 'UNTIL', 'UPDATE', 'UPDATED', 'UPSERT',\n 'UROWID', 'USE', 'USING', 'VALIDATE', 'VALUE', 'VALUES', 'VARCHAR',\n 'VARCHAR2', 'VARIABLE', 'VARRAY', 'VARYING', 'VERSION', 'VERSIONS',\n 'WAIT', 'WARNING', 'WELLFORMED', 'WHEN', 'WHENEVER', 'WHERE',\n 'WHILE', 'WITH', 'WITHIN', 'WORK', 'WRITE', 'XML', 'XMLAGG',\n 'XMLATTRIBUTES', 'XMLCAST', 'XMLCOLATTVAL', 'XMLELEMENT',\n 'XMLEXISTS', 'XMLFOREST', 'XMLNAMESPACES', 'XMLPARSE', 'XMLPI',\n 'XMLQUERY', 'XMLROOT', 'XMLSERIALIZE', 'XMLTABLE', 'YEAR', 'YES',\n 'YMINTERVAL_UNCONSTRAINED', 'ZONE', 'PREDICTION',\n 'PREDICTION_BOUNDS', 'PREDICTION_COST', 'PREDICTION_DETAILS',\n 'PREDICTION_PROBABILITY', 'PREDICTION_SET', 'CUME_DIST',\n 'DENSE_RANK', 'LISTAGG', 'PERCENT_RANK', 'PERCENTILE_CONT',\n 'PERCENTILE_DISC', 'RANK', 'AVG', 'CORR', 'LAG', 'LEAD', 'MAX',\n 'MEDIAN', 'MIN', 'NTILE', 'RATIO_TO_REPORT', 'ROW_NUMBER', 'SUM',\n 'VARIANCE', 'REGR_', 'STDDEV', 'VAR_', 'COVAR_',\n 'NATIONAL_CHAR_STRING_LIT', 'BIT_STRING_LIT', 'HEX_STRING_LIT',\n 'DOUBLE_PERIOD', 'PERIOD', 'UNSIGNED_INTEGER',\n 'APPROXIMATE_NUM_LIT', 'CHAR_STRING', 'DELIMITED_ID', 'PERCENT',\n 'AMPERSAND', 'LEFT_PAREN', 'RIGHT_PAREN', 'DOUBLE_ASTERISK',\n 'ASTERISK', 'PLUS_SIGN', 'MINUS_SIGN', 'COMMA', 'SOLIDUS',\n 'AT_SIGN', 'ASSIGN_OP', 'BINDVAR', 'COLON', 'SEMICOLON',\n 'LESS_THAN_OR_EQUALS_OP', 'LESS_THAN_OP',\n 'GREATER_THAN_OR_EQUALS_OP', 'NOT_EQUAL_OP', 'CARRET_OPERATOR_PART',\n 'TILDE_OPERATOR_PART', 'EXCLAMATION_OPERATOR_PART',\n 'GREATER_THAN_OP', 'CONCATENATION_OP', 'VERTICAL_BAR', 'EQUALS_OP',\n 'LEFT_BRACKET', 'RIGHT_BRACKET', 'INTRODUCER', 'SPACES',\n 'SINGLE_LINE_COMMENT', 'MULTI_LINE_COMMENT', 'PROMPT', 'REGULAR_ID',\n 'ZV']\n ruleNames = ['T__0', 'A_LETTER', 'ADD', 'AFTER', 'AGENT', 'AGGREGATE',\n 'ALL', 'ALTER', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS', 'ASSUME',\n 'ASSERT', 'ASC', 'ASSOCIATE', 'AT', 'ATTRIBUTE', 'AUDIT', 'AUTHID',\n 'AUTO', 'AUTOMATIC', 'AUTONOMOUS_TRANSACTION', 'BATCH', 'BEFORE',\n 'BEGIN', 'BETWEEN', 'BFILE', 'BINARY_DOUBLE', 'BINARY_FLOAT',\n 'BINARY_INTEGER', 'BLOB', 'BLOCK', 'BODY', 'BOOLEAN', 'BOTH',\n 'BREADTH', 'BULK', 'BY', 'BYTE', 'C_LETTER', 'CACHE', 'CALL',\n 'CANONICAL', 'CASCADE', 'CASE', 'CAST', 'CHAR', 'CHAR_CS',\n 'CHARACTER', 'CHECK', 'CHR', 'CLOB', 'CLOSE', 'CLUSTER', 'COLLECT',\n 'COLUMNS', 'COMMENT', 'COMMIT', 'COMMITTED', 'COMPATIBILITY',\n 'COMPILE', 'COMPOUND', 'CONNECT', 'CONNECT_BY_ROOT', 'CONSTANT',\n 'CONSTRAINT', 'CONSTRAINTS', 'CONSTRUCTOR', 'CONTENT', 'CONTEXT',\n 'CONTINUE', 'CONVERT', 'CORRUPT_XID', 'CORRUPT_XID_ALL', 'COST',\n 'COUNT', 'CREATE', 'CROSS', 'CUBE', 'CURRENT', 'CURRENT_USER',\n 'CURSOR', 'CUSTOMDATUM', 'CYCLE', 'DATA', 'DATABASE', 'DATE', 'DAY',\n 'DB_ROLE_CHANGE', 'DBTIMEZONE', 'DDL', 'DEBUG', 'DEC', 'DECIMAL',\n 'DECLARE', 'DECOMPOSE', 'DECREMENT', 'DEFAULT', 'DEFAULTS',\n 'DEFERRED', 'DEFINER', 'DELETE', 'DEPTH', 'DESC', 'DETERMINISTIC',\n 'DIMENSION', 'DISABLE', 'DISASSOCIATE', 'DISTINCT', 'DOCUMENT',\n 'DOUBLE', 'DROP', 'DSINTERVAL_UNCONSTRAINED', 'EACH', 'ELEMENT',\n 'ELSE', 'ELSIF', 'EMPTY', 'ENABLE', 'ENCODING', 'END',\n 'ENTITYESCAPING', 'ERR', 'ERRORS', 'ESCAPE', 'EVALNAME', 'EXCEPT',\n 'EXCEPTION', 'EXCEPTION_INIT', 'EXCEPTIONS', 'EXCLUDE', 'EXCLUSIVE',\n 'EXECUTE', 'EXISTS', 'EXIT', 'EXPLAIN', 'EXTERNAL', 'EXTRACT',\n 'FAILURE', 'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FIRST_VALUE',\n 'FLOAT', 'FOLLOWING', 'FOLLOWS', 'FOR', 'FORALL', 'FORCE', 'FROM',\n 'FULL', 'FUNCTION', 'GOTO', 'GRANT', 'GROUP', 'GROUPING', 'HASH',\n 'HAVING', 'HIDE', 'HOUR', 'IF', 'IGNORE', 'IMMEDIATE', 'IN',\n 'INCLUDE', 'INCLUDING', 'INCREMENT', 'INDENT', 'INDEX', 'INDEXED',\n 'INDICATOR', 'INDICES', 'INFINITE', 'INLINE', 'INNER', 'INOUT',\n 'INSERT', 'INSTANTIABLE', 'INSTEAD', 'INT', 'INTEGER', 'INTERSECT',\n 'INTERVAL', 'INTO', 'INVALIDATE', 'IS', 'ISOLATION', 'ITERATE',\n 'JAVA', 'JOIN', 'KEEP', 'LANGUAGE', 'LAST', 'LAST_VALUE', 'LEADING',\n 'LEFT', 'LEVEL', 'LIBRARY', 'LIKE', 'LIKE2', 'LIKE4', 'LIKEC',\n 'LIMIT', 'LOCAL', 'LOCK', 'LOCKED', 'LOG', 'LOGOFF', 'LOGON',\n 'LONG', 'LOOP', 'MAIN', 'MAP', 'MATCHED', 'MAXVALUE', 'MEASURES',\n 'MEMBER', 'MERGE', 'MINUS', 'MINUTE', 'MINVALUE', 'MLSLABEL',\n 'MODE', 'MODEL', 'MODIFY', 'MONTH', 'MULTISET', 'NAME', 'NAN',\n 'NATURAL', 'NATURALN', 'NAV', 'NCHAR', 'NCHAR_CS', 'NCLOB',\n 'NESTED', 'NEW', 'NO', 'NOAUDIT', 'NOCACHE', 'NOCOPY', 'NOCYCLE',\n 'NOENTITYESCAPING', 'NOMAXVALUE', 'NOMINVALUE', 'NONE', 'NOORDER',\n 'NOSCHEMACHECK', 'NOT', 'NOWAIT', 'NULL', 'NULLS', 'NUMBER',\n 'NUMERIC', 'NVARCHAR2', 'OBJECT', 'OF', 'OFF', 'OID', 'OLD', 'ON',\n 'ONLY', 'OPEN', 'OPTION', 'OR', 'ORADATA', 'ORDER', 'ORDINALITY',\n 'OSERROR', 'OUT', 'OUTER', 'OVER', 'OVERRIDING', 'PACKAGE',\n 'PARALLEL_ENABLE', 'PARAMETERS', 'PARENT', 'PARTITION', 'PASSING',\n 'PATH', 'PERCENT_ROWTYPE', 'PERCENT_TYPE', 'PIPELINED', 'PIVOT',\n 'PLAN', 'PLS_INTEGER', 'POSITIVE', 'POSITIVEN', 'PRAGMA',\n 'PRECEDING', 'PRECISION', 'PRESENT', 'PRIOR', 'PROCEDURE', 'RAISE',\n 'RANGE', 'RAW', 'READ', 'REAL', 'RECORD', 'REF', 'REFERENCE',\n 'REFERENCING', 'REJECT', 'RELIES_ON', 'RENAME', 'REPLACE',\n 'RESPECT', 'RESTRICT_REFERENCES', 'RESULT', 'RESULT_CACHE',\n 'RETURN', 'RETURNING', 'REUSE', 'REVERSE', 'REVOKE', 'RIGHT',\n 'ROLLBACK', 'ROLLUP', 'ROW', 'ROWID', 'ROWS', 'RULES', 'SAMPLE',\n 'SAVE', 'SAVEPOINT', 'SCHEMA', 'SCHEMACHECK', 'SCN', 'SEARCH',\n 'SECOND', 'SEED', 'SEGMENT', 'SELECT', 'SELF', 'SEQUENCE',\n 'SEQUENTIAL', 'SERIALIZABLE', 'SERIALLY_REUSABLE', 'SERVERERROR',\n 'SESSIONTIMEZONE', 'SET', 'SETS', 'SETTINGS', 'SHARE', 'SHOW',\n 'SHUTDOWN', 'SIBLINGS', 'SIGNTYPE', 'SIMPLE_INTEGER', 'SINGLE',\n 'SIZE', 'SKIP_', 'SMALLINT', 'SNAPSHOT', 'SOME', 'SPECIFICATION',\n 'SQLDATA', 'SQLERROR', 'STANDALONE', 'START', 'STARTUP',\n 'STATEMENT', 'STATEMENT_ID', 'STATIC', 'STATISTICS', 'STRING',\n 'SUBMULTISET', 'SUBPARTITION', 'SUBSTITUTABLE', 'SUBTYPE',\n 'SUCCESS', 'SUSPEND', 'TABLE', 'THE', 'THEN', 'TIME', 'TIMESTAMP',\n 'TIMESTAMP_LTZ_UNCONSTRAINED', 'TIMESTAMP_TZ_UNCONSTRAINED',\n 'TIMESTAMP_UNCONSTRAINED', 'TIMEZONE_ABBR', 'TIMEZONE_HOUR',\n 'TIMEZONE_MINUTE', 'TIMEZONE_REGION', 'TO', 'TRAILING',\n 'TRANSACTION', 'TRANSLATE', 'TREAT', 'TRIGGER', 'TRIM', 'TRUE',\n 'TRUNCATE', 'TYPE', 'UNBOUNDED', 'UNDER', 'UNION', 'UNIQUE',\n 'UNLIMITED', 'UNPIVOT', 'UNTIL', 'UPDATE', 'UPDATED', 'UPSERT',\n 'UROWID', 'USE', 'USING', 'VALIDATE', 'VALUE', 'VALUES', 'VARCHAR',\n 'VARCHAR2', 'VARIABLE', 'VARRAY', 'VARYING', 'VERSION', 'VERSIONS',\n 'WAIT', 'WARNING', 'WELLFORMED', 'WHEN', 'WHENEVER', 'WHERE',\n 'WHILE', 'WITH', 'WITHIN', 'WORK', 'WRITE', 'XML', 'XMLAGG',\n 'XMLATTRIBUTES', 'XMLCAST', 'XMLCOLATTVAL', 'XMLELEMENT',\n 'XMLEXISTS', 'XMLFOREST', 'XMLNAMESPACES', 'XMLPARSE', 'XMLPI',\n 'XMLQUERY', 'XMLROOT', 'XMLSERIALIZE', 'XMLTABLE', 'YEAR', 'YES',\n 'YMINTERVAL_UNCONSTRAINED', 'ZONE', 'PREDICTION',\n 'PREDICTION_BOUNDS', 'PREDICTION_COST', 'PREDICTION_DETAILS',\n 'PREDICTION_PROBABILITY', 'PREDICTION_SET', 'CUME_DIST',\n 'DENSE_RANK', 'LISTAGG', 'PERCENT_RANK', 'PERCENTILE_CONT',\n 'PERCENTILE_DISC', 'RANK', 'AVG', 'CORR', 'LAG', 'LEAD', 'MAX',\n 'MEDIAN', 'MIN', 'NTILE', 'RATIO_TO_REPORT', 'ROW_NUMBER', 'SUM',\n 'VARIANCE', 'REGR_', 'STDDEV', 'VAR_', 'COVAR_',\n 'NATIONAL_CHAR_STRING_LIT', 'BIT_STRING_LIT', 'HEX_STRING_LIT',\n 'DOUBLE_PERIOD', 'PERIOD', 'UNSIGNED_INTEGER',\n 'APPROXIMATE_NUM_LIT', 'CHAR_STRING', 'CHAR_STRING_PERL', 'QUOTE',\n 'QS_ANGLE', 'QS_BRACE', 'QS_BRACK', 'QS_PAREN', 'QS_OTHER_CH',\n 'DELIMITED_ID', 'PERCENT', 'AMPERSAND', 'LEFT_PAREN', 'RIGHT_PAREN',\n 'DOUBLE_ASTERISK', 'ASTERISK', 'PLUS_SIGN', 'MINUS_SIGN', 'COMMA',\n 'SOLIDUS', 'AT_SIGN', 'ASSIGN_OP', 'BINDVAR', 'COLON', 'SEMICOLON',\n 'LESS_THAN_OR_EQUALS_OP', 'LESS_THAN_OP',\n 'GREATER_THAN_OR_EQUALS_OP', 'NOT_EQUAL_OP', 'CARRET_OPERATOR_PART',\n 'TILDE_OPERATOR_PART', 'EXCLAMATION_OPERATOR_PART',\n 'GREATER_THAN_OP', 'QUESTION_MARK', 'CONCATENATION_OP',\n 'VERTICAL_BAR', 'EQUALS_OP', 'LEFT_BRACKET', 'RIGHT_BRACKET',\n 'INTRODUCER', 'SPACES', 'SIMPLE_LETTER',\n 'UNSIGNED_INTEGER_FRAGMENT', 'FLOAT_FRAGMENT',\n 'SINGLE_LINE_COMMENT', 'MULTI_LINE_COMMENT', 'PROMPT', 'NEWLINE',\n 'SPACE', 'REGULAR_ID', 'ZV', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',\n 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',\n 'V', 'W', 'X', 'Y', 'Z']\n grammarFileName = 'PlSql.g4'\n\n def __init__(self, input=None, output: TextIO=sys.stdout):\n super().__init__(input, output)\n self.checkVersion('4.7.2')\n self._interp = LexerATNSimulator(self, self.atn, self.\n decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None\n",
"step-4": "from antlr4 import *\nfrom io import StringIO\nfrom typing.io import TextIO\nimport sys\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write('\\x03悋Ꜫ脳맭䅼㯧瞆奤\\x02Ȏ')\n buf.write(\n 'ᓗ\\x08\\x01\\x04\\x02\\t\\x02\\x04\\x03\\t\\x03\\x04\\x04\\t\\x04\\x04\\x05\\t\\x05\\x04\\x06\\t\\x06\\x04\\x07'\n )\n buf.write(\n '\\t\\x07\\x04\\x08\\t\\x08\\x04\\t\\t\\t\\x04\\n\\t\\n\\x04\\x0b\\t\\x0b\\x04\\x0c\\t\\x0c\\x04\\r\\t\\r'\n )\n buf.write(\n '\\x04\\x0e\\t\\x0e\\x04\\x0f\\t\\x0f\\x04\\x10\\t\\x10\\x04\\x11\\t\\x11\\x04\\x12\\t\\x12\\x04\\x13'\n )\n buf.write(\n '\\t\\x13\\x04\\x14\\t\\x14\\x04\\x15\\t\\x15\\x04\\x16\\t\\x16\\x04\\x17\\t\\x17\\x04\\x18\\t\\x18'\n )\n buf.write(\n '\\x04\\x19\\t\\x19\\x04\\x1a\\t\\x1a\\x04\\x1b\\t\\x1b\\x04\\x1c\\t\\x1c\\x04\\x1d\\t\\x1d\\x04\\x1e'\n )\n buf.write(\n '\\t\\x1e\\x04\\x1f\\t\\x1f\\x04 \\t \\x04!\\t!\\x04\"\\t\"\\x04#\\t#\\x04$\\t$\\x04%\\t%'\n )\n buf.write(\n \"\\x04&\\t&\\x04'\\t'\\x04(\\t(\\x04)\\t)\\x04*\\t*\\x04+\\t+\\x04,\\t,\\x04-\\t-\\x04.\"\n )\n buf.write('\\t.\\x04/\\t/\\x040\\t0\\x041\\t1\\x042\\t2\\x043\\t3\\x044')\n buf.write('\\t4\\x045\\t5\\x046\\t6\\x047\\t7\\x048\\t8\\x049\\t9\\x04:\\t:')\n buf.write(\n '\\x04;\\t;\\x04<\\t<\\x04=\\t=\\x04>\\t>\\x04?\\t?\\x04@\\t@\\x04A\\tA\\x04B\\tB\\x04C\\t'\n )\n buf.write(\n 'C\\x04D\\tD\\x04E\\tE\\x04F\\tF\\x04G\\tG\\x04H\\tH\\x04I\\tI\\x04J\\tJ\\x04K\\tK\\x04L\\t'\n )\n buf.write(\n 'L\\x04M\\tM\\x04N\\tN\\x04O\\tO\\x04P\\tP\\x04Q\\tQ\\x04R\\tR\\x04S\\tS\\x04T\\tT\\x04U\\t'\n )\n buf.write(\n 'U\\x04V\\tV\\x04W\\tW\\x04X\\tX\\x04Y\\tY\\x04Z\\tZ\\x04[\\t[\\x04\\\\\\t\\\\\\x04]\\t]\\x04'\n )\n buf.write(\n '^\\t^\\x04_\\t_\\x04`\\t`\\x04a\\ta\\x04b\\tb\\x04c\\tc\\x04d\\td\\x04e\\te\\x04f\\tf\\x04'\n )\n buf.write(\n 'g\\tg\\x04h\\th\\x04i\\ti\\x04j\\tj\\x04k\\tk\\x04l\\tl\\x04m\\tm\\x04n\\tn\\x04o\\to\\x04'\n )\n buf.write(\n 'p\\tp\\x04q\\tq\\x04r\\tr\\x04s\\ts\\x04t\\tt\\x04u\\tu\\x04v\\tv\\x04w\\tw\\x04x\\tx\\x04'\n )\n buf.write(\n 'y\\ty\\x04z\\tz\\x04{\\t{\\x04|\\t|\\x04}\\t}\\x04~\\t~\\x04\\x7f\\t\\x7f\\x04\\x80'\n )\n buf.write('\\t\\x80\\x04\\x81\\t\\x81\\x04\\x82\\t\\x82\\x04\\x83\\t\\x83')\n buf.write('\\x04\\x84\\t\\x84\\x04\\x85\\t\\x85\\x04\\x86\\t\\x86\\x04\\x87')\n buf.write('\\t\\x87\\x04\\x88\\t\\x88\\x04\\x89\\t\\x89\\x04\\x8a\\t\\x8a')\n buf.write('\\x04\\x8b\\t\\x8b\\x04\\x8c\\t\\x8c\\x04\\x8d\\t\\x8d\\x04\\x8e')\n buf.write('\\t\\x8e\\x04\\x8f\\t\\x8f\\x04\\x90\\t\\x90\\x04\\x91\\t\\x91')\n buf.write('\\x04\\x92\\t\\x92\\x04\\x93\\t\\x93\\x04\\x94\\t\\x94\\x04\\x95')\n buf.write('\\t\\x95\\x04\\x96\\t\\x96\\x04\\x97\\t\\x97\\x04\\x98\\t\\x98')\n buf.write('\\x04\\x99\\t\\x99\\x04\\x9a\\t\\x9a\\x04\\x9b\\t\\x9b\\x04\\x9c')\n buf.write('\\t\\x9c\\x04\\x9d\\t\\x9d\\x04\\x9e\\t\\x9e\\x04\\x9f\\t\\x9f')\n buf.write('\\x04\\xa0\\t\\xa0\\x04¡\\t¡\\x04¢\\t¢\\x04£')\n buf.write('\\t£\\x04¤\\t¤\\x04¥\\t¥\\x04¦\\t¦')\n buf.write('\\x04§\\t§\\x04¨\\t¨\\x04©\\t©\\x04ª')\n buf.write('\\tª\\x04«\\t«\\x04¬\\t¬\\x04\\xad\\t\\xad')\n buf.write('\\x04®\\t®\\x04¯\\t¯\\x04°\\t°\\x04±')\n buf.write('\\t±\\x04²\\t²\\x04³\\t³\\x04´\\t´')\n buf.write('\\x04µ\\tµ\\x04¶\\t¶\\x04·\\t·\\x04¸')\n buf.write('\\t¸\\x04¹\\t¹\\x04º\\tº\\x04»\\t»')\n buf.write('\\x04¼\\t¼\\x04½\\t½\\x04¾\\t¾\\x04¿')\n buf.write('\\t¿\\x04À\\tÀ\\x04Á\\tÁ\\x04Â\\tÂ')\n buf.write('\\x04Ã\\tÃ\\x04Ä\\tÄ\\x04Å\\tÅ\\x04Æ')\n buf.write('\\tÆ\\x04Ç\\tÇ\\x04È\\tÈ\\x04É\\tÉ')\n buf.write('\\x04Ê\\tÊ\\x04Ë\\tË\\x04Ì\\tÌ\\x04Í')\n buf.write('\\tÍ\\x04Î\\tÎ\\x04Ï\\tÏ\\x04Ð\\tÐ')\n buf.write('\\x04Ñ\\tÑ\\x04Ò\\tÒ\\x04Ó\\tÓ\\x04Ô')\n buf.write('\\tÔ\\x04Õ\\tÕ\\x04Ö\\tÖ\\x04×\\t×')\n buf.write('\\x04Ø\\tØ\\x04Ù\\tÙ\\x04Ú\\tÚ\\x04Û')\n buf.write('\\tÛ\\x04Ü\\tÜ\\x04Ý\\tÝ\\x04Þ\\tÞ')\n buf.write('\\x04ß\\tß\\x04à\\tà\\x04á\\tá\\x04â')\n buf.write('\\tâ\\x04ã\\tã\\x04ä\\tä\\x04å\\tå')\n buf.write('\\x04æ\\tæ\\x04ç\\tç\\x04è\\tè\\x04é')\n buf.write('\\té\\x04ê\\tê\\x04ë\\të\\x04ì\\tì')\n buf.write('\\x04í\\tí\\x04î\\tî\\x04ï\\tï\\x04ð')\n buf.write('\\tð\\x04ñ\\tñ\\x04ò\\tò\\x04ó\\tó')\n buf.write('\\x04ô\\tô\\x04õ\\tõ\\x04ö\\tö\\x04÷')\n buf.write('\\t÷\\x04ø\\tø\\x04ù\\tù\\x04ú\\tú')\n buf.write('\\x04û\\tû\\x04ü\\tü\\x04ý\\tý\\x04þ')\n buf.write('\\tþ\\x04ÿ\\tÿ\\x04Ā\\tĀ\\x04ā\\tā')\n buf.write('\\x04Ă\\tĂ\\x04ă\\tă\\x04Ą\\tĄ\\x04ą')\n buf.write('\\tą\\x04Ć\\tĆ\\x04ć\\tć\\x04Ĉ\\tĈ')\n buf.write('\\x04ĉ\\tĉ\\x04Ċ\\tĊ\\x04ċ\\tċ\\x04Č')\n buf.write('\\tČ\\x04č\\tč\\x04Ď\\tĎ\\x04ď\\tď')\n buf.write('\\x04Đ\\tĐ\\x04đ\\tđ\\x04Ē\\tĒ\\x04ē')\n buf.write('\\tē\\x04Ĕ\\tĔ\\x04ĕ\\tĕ\\x04Ė\\tĖ')\n buf.write('\\x04ė\\tė\\x04Ę\\tĘ\\x04ę\\tę\\x04Ě')\n buf.write('\\tĚ\\x04ě\\tě\\x04Ĝ\\tĜ\\x04ĝ\\tĝ')\n buf.write('\\x04Ğ\\tĞ\\x04ğ\\tğ\\x04Ġ\\tĠ\\x04ġ')\n buf.write('\\tġ\\x04Ģ\\tĢ\\x04ģ\\tģ\\x04Ĥ\\tĤ')\n buf.write('\\x04ĥ\\tĥ\\x04Ħ\\tĦ\\x04ħ\\tħ\\x04Ĩ')\n buf.write('\\tĨ\\x04ĩ\\tĩ\\x04Ī\\tĪ\\x04ī\\tī')\n buf.write('\\x04Ĭ\\tĬ\\x04ĭ\\tĭ\\x04Į\\tĮ\\x04į')\n buf.write('\\tį\\x04İ\\tİ\\x04ı\\tı\\x04IJ\\tIJ')\n buf.write('\\x04ij\\tij\\x04Ĵ\\tĴ\\x04ĵ\\tĵ\\x04Ķ')\n buf.write('\\tĶ\\x04ķ\\tķ\\x04ĸ\\tĸ\\x04Ĺ\\tĹ')\n buf.write('\\x04ĺ\\tĺ\\x04Ļ\\tĻ\\x04ļ\\tļ\\x04Ľ')\n buf.write('\\tĽ\\x04ľ\\tľ\\x04Ŀ\\tĿ\\x04ŀ\\tŀ')\n buf.write('\\x04Ł\\tŁ\\x04ł\\tł\\x04Ń\\tŃ\\x04ń')\n buf.write('\\tń\\x04Ņ\\tŅ\\x04ņ\\tņ\\x04Ň\\tŇ')\n buf.write('\\x04ň\\tň\\x04ʼn\\tʼn\\x04Ŋ\\tŊ\\x04ŋ')\n buf.write('\\tŋ\\x04Ō\\tŌ\\x04ō\\tō\\x04Ŏ\\tŎ')\n buf.write('\\x04ŏ\\tŏ\\x04Ő\\tŐ\\x04ő\\tő\\x04Œ')\n buf.write('\\tŒ\\x04œ\\tœ\\x04Ŕ\\tŔ\\x04ŕ\\tŕ')\n buf.write('\\x04Ŗ\\tŖ\\x04ŗ\\tŗ\\x04Ř\\tŘ\\x04ř')\n buf.write('\\tř\\x04Ś\\tŚ\\x04ś\\tś\\x04Ŝ\\tŜ')\n buf.write('\\x04ŝ\\tŝ\\x04Ş\\tŞ\\x04ş\\tş\\x04Š')\n buf.write('\\tŠ\\x04š\\tš\\x04Ţ\\tŢ\\x04ţ\\tţ')\n buf.write('\\x04Ť\\tŤ\\x04ť\\tť\\x04Ŧ\\tŦ\\x04ŧ')\n buf.write('\\tŧ\\x04Ũ\\tŨ\\x04ũ\\tũ\\x04Ū\\tŪ')\n buf.write('\\x04ū\\tū\\x04Ŭ\\tŬ\\x04ŭ\\tŭ\\x04Ů')\n buf.write('\\tŮ\\x04ů\\tů\\x04Ű\\tŰ\\x04ű\\tű')\n buf.write('\\x04Ų\\tŲ\\x04ų\\tų\\x04Ŵ\\tŴ\\x04ŵ')\n buf.write('\\tŵ\\x04Ŷ\\tŶ\\x04ŷ\\tŷ\\x04Ÿ\\tŸ')\n buf.write('\\x04Ź\\tŹ\\x04ź\\tź\\x04Ż\\tŻ\\x04ż')\n buf.write('\\tż\\x04Ž\\tŽ\\x04ž\\tž\\x04ſ\\tſ')\n buf.write('\\x04ƀ\\tƀ\\x04Ɓ\\tƁ\\x04Ƃ\\tƂ\\x04ƃ')\n buf.write('\\tƃ\\x04Ƅ\\tƄ\\x04ƅ\\tƅ\\x04Ɔ\\tƆ')\n buf.write('\\x04Ƈ\\tƇ\\x04ƈ\\tƈ\\x04Ɖ\\tƉ\\x04Ɗ')\n buf.write('\\tƊ\\x04Ƌ\\tƋ\\x04ƌ\\tƌ\\x04ƍ\\tƍ')\n buf.write('\\x04Ǝ\\tƎ\\x04Ə\\tƏ\\x04Ɛ\\tƐ\\x04Ƒ')\n buf.write('\\tƑ\\x04ƒ\\tƒ\\x04Ɠ\\tƓ\\x04Ɣ\\tƔ')\n buf.write('\\x04ƕ\\tƕ\\x04Ɩ\\tƖ\\x04Ɨ\\tƗ\\x04Ƙ')\n buf.write('\\tƘ\\x04ƙ\\tƙ\\x04ƚ\\tƚ\\x04ƛ\\tƛ')\n buf.write('\\x04Ɯ\\tƜ\\x04Ɲ\\tƝ\\x04ƞ\\tƞ\\x04Ɵ')\n buf.write('\\tƟ\\x04Ơ\\tƠ\\x04ơ\\tơ\\x04Ƣ\\tƢ')\n buf.write('\\x04ƣ\\tƣ\\x04Ƥ\\tƤ\\x04ƥ\\tƥ\\x04Ʀ')\n buf.write('\\tƦ\\x04Ƨ\\tƧ\\x04ƨ\\tƨ\\x04Ʃ\\tƩ')\n buf.write('\\x04ƪ\\tƪ\\x04ƫ\\tƫ\\x04Ƭ\\tƬ\\x04ƭ')\n buf.write('\\tƭ\\x04Ʈ\\tƮ\\x04Ư\\tƯ\\x04ư\\tư')\n buf.write('\\x04Ʊ\\tƱ\\x04Ʋ\\tƲ\\x04Ƴ\\tƳ\\x04ƴ')\n buf.write('\\tƴ\\x04Ƶ\\tƵ\\x04ƶ\\tƶ\\x04Ʒ\\tƷ')\n buf.write('\\x04Ƹ\\tƸ\\x04ƹ\\tƹ\\x04ƺ\\tƺ\\x04ƻ')\n buf.write('\\tƻ\\x04Ƽ\\tƼ\\x04ƽ\\tƽ\\x04ƾ\\tƾ')\n buf.write('\\x04ƿ\\tƿ\\x04ǀ\\tǀ\\x04ǁ\\tǁ\\x04ǂ')\n buf.write('\\tǂ\\x04ǃ\\tǃ\\x04DŽ\\tDŽ\\x04Dž\\tDž')\n buf.write('\\x04dž\\tdž\\x04LJ\\tLJ\\x04Lj\\tLj\\x04lj')\n buf.write('\\tlj\\x04NJ\\tNJ\\x04Nj\\tNj\\x04nj\\tnj')\n buf.write('\\x04Ǎ\\tǍ\\x04ǎ\\tǎ\\x04Ǐ\\tǏ\\x04ǐ')\n buf.write('\\tǐ\\x04Ǒ\\tǑ\\x04ǒ\\tǒ\\x04Ǔ\\tǓ')\n buf.write('\\x04ǔ\\tǔ\\x04Ǖ\\tǕ\\x04ǖ\\tǖ\\x04Ǘ')\n buf.write('\\tǗ\\x04ǘ\\tǘ\\x04Ǚ\\tǙ\\x04ǚ\\tǚ')\n buf.write('\\x04Ǜ\\tǛ\\x04ǜ\\tǜ\\x04ǝ\\tǝ\\x04Ǟ')\n buf.write('\\tǞ\\x04ǟ\\tǟ\\x04Ǡ\\tǠ\\x04ǡ\\tǡ')\n buf.write('\\x04Ǣ\\tǢ\\x04ǣ\\tǣ\\x04Ǥ\\tǤ\\x04ǥ')\n buf.write('\\tǥ\\x04Ǧ\\tǦ\\x04ǧ\\tǧ\\x04Ǩ\\tǨ')\n buf.write('\\x04ǩ\\tǩ\\x04Ǫ\\tǪ\\x04ǫ\\tǫ\\x04Ǭ')\n buf.write('\\tǬ\\x04ǭ\\tǭ\\x04Ǯ\\tǮ\\x04ǯ\\tǯ')\n buf.write('\\x04ǰ\\tǰ\\x04DZ\\tDZ\\x04Dz\\tDz\\x04dz')\n buf.write('\\tdz\\x04Ǵ\\tǴ\\x04ǵ\\tǵ\\x04Ƕ\\tǶ')\n buf.write('\\x04Ƿ\\tǷ\\x04Ǹ\\tǸ\\x04ǹ\\tǹ\\x04Ǻ')\n buf.write('\\tǺ\\x04ǻ\\tǻ\\x04Ǽ\\tǼ\\x04ǽ\\tǽ')\n buf.write('\\x04Ǿ\\tǾ\\x04ǿ\\tǿ\\x04Ȁ\\tȀ\\x04ȁ')\n buf.write('\\tȁ\\x04Ȃ\\tȂ\\x04ȃ\\tȃ\\x04Ȅ\\tȄ')\n buf.write('\\x04ȅ\\tȅ\\x04Ȇ\\tȆ\\x04ȇ\\tȇ\\x04Ȉ')\n buf.write('\\tȈ\\x04ȉ\\tȉ\\x04Ȋ\\tȊ\\x04ȋ\\tȋ')\n buf.write('\\x04Ȍ\\tȌ\\x04ȍ\\tȍ\\x04Ȏ\\tȎ\\x04ȏ')\n buf.write('\\tȏ\\x04Ȑ\\tȐ\\x04ȑ\\tȑ\\x04Ȓ\\tȒ')\n buf.write('\\x04ȓ\\tȓ\\x04Ȕ\\tȔ\\x04ȕ\\tȕ\\x04Ȗ')\n buf.write('\\tȖ\\x04ȗ\\tȗ\\x04Ș\\tȘ\\x04ș\\tș')\n buf.write('\\x04Ț\\tȚ\\x04ț\\tț\\x04Ȝ\\tȜ\\x04ȝ')\n buf.write('\\tȝ\\x04Ȟ\\tȞ\\x04ȟ\\tȟ\\x04Ƞ\\tȠ')\n buf.write('\\x04ȡ\\tȡ\\x04Ȣ\\tȢ\\x04ȣ\\tȣ\\x04Ȥ')\n buf.write('\\tȤ\\x04ȥ\\tȥ\\x04Ȧ\\tȦ\\x04ȧ\\tȧ')\n buf.write('\\x04Ȩ\\tȨ\\x04ȩ\\tȩ\\x04Ȫ\\tȪ\\x04ȫ')\n buf.write('\\tȫ\\x04Ȭ\\tȬ\\x04ȭ\\tȭ\\x04Ȯ\\tȮ')\n buf.write('\\x04ȯ\\tȯ\\x04Ȱ\\tȰ\\x04ȱ\\tȱ\\x04Ȳ')\n buf.write('\\tȲ\\x04ȳ\\tȳ\\x04ȴ\\tȴ\\x03\\x02\\x03\\x02\\x03\\x02\\x03')\n buf.write(\n '\\x03\\x03\\x03\\x03\\x04\\x03\\x04\\x03\\x04\\x03\\x04\\x03\\x05\\x03\\x05\\x03\\x05\\x03\\x05\\x03\\x05\\x03\\x05\\x03\\x06\\x03\\x06'\n )\n buf.write(\n '\\x03\\x06\\x03\\x06\\x03\\x06\\x03\\x06\\x03\\x07\\x03\\x07\\x03\\x07\\x03\\x07\\x03\\x07\\x03\\x07\\x03\\x07\\x03\\x07\\x03\\x07\\x03'\n )\n buf.write(\"\"\"\u0007\u0003\b\u0003\b\u0003\b\u0003\b\u0003\t\u0003\t\u0003\t\u0003\t\u0003\t\u0003\t\u0003\n\u0003\n\u0003\n\"\"\")\n buf.write(\"\"\"\u0003\n\u0003\n\u0003\n\u0003\n\u0003\n\u0003\u000b\u0003\u000b\u0003\u000b\u0003\u000b\u0003\f\u0003\f\u0003\f\u0003\"\"\")\n buf.write(\n '\\x0c\\x03\\r\\x03\\r\\x03\\r\\x03\\r\\x03\\r\\x03\\r\\x03\\x0e\\x03\\x0e\\x03\\x0e\\x03\\x0f\\x03\\x0f\\x03'\n )\n buf.write(\n '\\x0f\\x03\\x0f\\x03\\x0f\\x03\\x0f\\x03\\x0f\\x03\\x10\\x03\\x10\\x03\\x10\\x03\\x10\\x03\\x10\\x03\\x10'\n )\n buf.write(\n '\\x03\\x10\\x03\\x11\\x03\\x11\\x03\\x11\\x03\\x11\\x03\\x12\\x03\\x12\\x03\\x12\\x03\\x12\\x03\\x12\\x03\\x12'\n )\n buf.write(\n '\\x03\\x12\\x03\\x12\\x03\\x12\\x03\\x12\\x03\\x13\\x03\\x13\\x03\\x13\\x03\\x14\\x03\\x14\\x03\\x14\\x03\\x14'\n )\n buf.write(\n '\\x03\\x14\\x03\\x14\\x03\\x14\\x03\\x14\\x03\\x14\\x03\\x14\\x03\\x15\\x03\\x15\\x03\\x15\\x03\\x15\\x03\\x15'\n )\n buf.write(\n '\\x03\\x15\\x03\\x16\\x03\\x16\\x03\\x16\\x03\\x16\\x03\\x16\\x03\\x16\\x03\\x16\\x03\\x17\\x03\\x17\\x03\\x17'\n )\n buf.write(\n '\\x03\\x17\\x03\\x17\\x03\\x18\\x03\\x18\\x03\\x18\\x03\\x18\\x03\\x18\\x03\\x18\\x03\\x18\\x03\\x18\\x03\\x18'\n )\n buf.write(\n '\\x03\\x18\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19'\n )\n buf.write(\n '\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19\\x03\\x19'\n )\n buf.write(\n '\\x03\\x19\\x03\\x19\\x03\\x1a\\x03\\x1a\\x03\\x1a\\x03\\x1a\\x03\\x1a\\x03\\x1a\\x03\\x1b\\x03\\x1b\\x03\\x1b'\n )\n buf.write(\n '\\x03\\x1b\\x03\\x1b\\x03\\x1b\\x03\\x1b\\x03\\x1c\\x03\\x1c\\x03\\x1c\\x03\\x1c\\x03\\x1c\\x03\\x1c\\x03\\x1d'\n )\n buf.write(\n '\\x03\\x1d\\x03\\x1d\\x03\\x1d\\x03\\x1d\\x03\\x1d\\x03\\x1d\\x03\\x1d\\x03\\x1e\\x03\\x1e\\x03\\x1e\\x03\\x1e'\n )\n buf.write(\n '\\x03\\x1e\\x03\\x1e\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03\\x1f'\n )\n buf.write(\n '\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03\\x1f\\x03 \\x03 \\x03 \\x03 \\x03 \\x03 \\x03 \\x03 \\x03 \\x03'\n )\n buf.write(\n ' \\x03 \\x03 \\x03 \\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03!\\x03'\n )\n buf.write(\n '!\\x03\"\\x03\"\\x03\"\\x03\"\\x03\"\\x03#\\x03#\\x03#\\x03#\\x03#\\x03#\\x03$\\x03$\\x03$\\x03$\\x03'\n )\n buf.write(\n \"$\\x03%\\x03%\\x03%\\x03%\\x03%\\x03%\\x03%\\x03%\\x03&\\x03&\\x03&\\x03&\\x03&\\x03'\\x03'\\x03'\\x03\"\n )\n buf.write(\n \"'\\x03'\\x03'\\x03'\\x03'\\x03(\\x03(\\x03(\\x03(\\x03(\\x03)\\x03)\\x03)\\x03*\\x03*\\x03*\\x03\"\n )\n buf.write(\n '*\\x03*\\x03+\\x03+\\x03,\\x03,\\x03,\\x03,\\x03,\\x03,\\x03-\\x03-\\x03-\\x03-\\x03-\\x03.\\x03.\\x03.\\x03'\n )\n buf.write(\n '.\\x03.\\x03.\\x03.\\x03.\\x03.\\x03.\\x03/\\x03/\\x03/\\x03/\\x03/\\x03/\\x03/\\x03/\\x030\\x030'\n )\n buf.write('\\x030\\x030\\x030\\x031\\x031\\x031\\x031\\x031\\x032\\x032\\x032')\n buf.write('\\x032\\x032\\x033\\x033\\x033\\x033\\x033\\x033\\x033\\x033\\x034')\n buf.write('\\x034\\x034\\x034\\x034\\x034\\x034\\x034\\x034\\x034\\x035\\x035')\n buf.write('\\x035\\x035\\x035\\x035\\x036\\x036\\x036\\x036\\x037\\x037\\x037')\n buf.write(\n '\\x037\\x037\\x038\\x038\\x038\\x038\\x038\\x038\\x039\\x039\\x039\\x039\\x039\\x039\\x039\\x039\\x03'\n )\n buf.write(\n ':\\x03:\\x03:\\x03:\\x03:\\x03:\\x03:\\x03:\\x03;\\x03;\\x03;\\x03;\\x03;\\x03;\\x03;\\x03;\\x03<\\x03<\\x03'\n )\n buf.write(\n '<\\x03<\\x03<\\x03<\\x03<\\x03<\\x03=\\x03=\\x03=\\x03=\\x03=\\x03=\\x03=\\x03>\\x03>\\x03>\\x03>\\x03>\\x03'\n )\n buf.write(\n '>\\x03>\\x03>\\x03>\\x03>\\x03?\\x03?\\x03?\\x03?\\x03?\\x03?\\x03?\\x03?\\x03?\\x03?\\x03?\\x03?\\x03?\\x03'\n )\n buf.write(\n '?\\x03@\\x03@\\x03@\\x03@\\x03@\\x03@\\x03@\\x03@\\x03A\\x03A\\x03A\\x03A\\x03A\\x03A\\x03A\\x03A\\x03A\\x03'\n )\n buf.write(\n 'B\\x03B\\x03B\\x03B\\x03B\\x03B\\x03B\\x03B\\x03C\\x03C\\x03C\\x03C\\x03C\\x03C\\x03C\\x03C\\x03C\\x03C\\x03'\n )\n buf.write(\n 'C\\x03C\\x03C\\x03C\\x03C\\x03C\\x03D\\x03D\\x03D\\x03D\\x03D\\x03D\\x03D\\x03D\\x03D\\x03E\\x03E\\x03E\\x03'\n )\n buf.write(\n 'E\\x03E\\x03E\\x03E\\x03E\\x03E\\x03E\\x03E\\x03F\\x03F\\x03F\\x03F\\x03F\\x03F\\x03F\\x03F\\x03F\\x03F\\x03'\n )\n buf.write(\n 'F\\x03F\\x03G\\x03G\\x03G\\x03G\\x03G\\x03G\\x03G\\x03G\\x03G\\x03G\\x03G\\x03G\\x03H\\x03H\\x03H\\x03H\\x03'\n )\n buf.write(\n 'H\\x03H\\x03H\\x03H\\x03I\\x03I\\x03I\\x03I\\x03I\\x03I\\x03I\\x03I\\x03J\\x03J\\x03J\\x03J\\x03J\\x03J\\x03'\n )\n buf.write(\n 'J\\x03J\\x03J\\x03K\\x03K\\x03K\\x03K\\x03K\\x03K\\x03K\\x03K\\x03L\\x03L\\x03L\\x03L\\x03L\\x03L\\x03L\\x03'\n )\n buf.write(\n 'L\\x03L\\x03L\\x03L\\x03L\\x03M\\x03M\\x03M\\x03M\\x03M\\x03M\\x03M\\x03M\\x03M\\x03M\\x03M\\x03M\\x03M\\x03'\n )\n buf.write(\n 'M\\x03M\\x03M\\x03N\\x03N\\x03N\\x03N\\x03N\\x03O\\x03O\\x03O\\x03O\\x03O\\x03O\\x03P\\x03P\\x03P\\x03P\\x03'\n )\n buf.write(\n 'P\\x03P\\x03P\\x03Q\\x03Q\\x03Q\\x03Q\\x03Q\\x03Q\\x03R\\x03R\\x03R\\x03R\\x03R\\x03S\\x03S\\x03S\\x03S\\x03'\n )\n buf.write(\n 'S\\x03S\\x03S\\x03S\\x03T\\x03T\\x03T\\x03T\\x03T\\x03T\\x03T\\x03T\\x03T\\x03T\\x03T\\x03T\\x03T\\x03U\\x03'\n )\n buf.write(\n 'U\\x03U\\x03U\\x03U\\x03U\\x03U\\x03V\\x03V\\x03V\\x03V\\x03V\\x03V\\x03V\\x03V\\x03V\\x03V\\x03V\\x03V\\x03'\n )\n buf.write(\n 'W\\x03W\\x03W\\x03W\\x03W\\x03W\\x03X\\x03X\\x03X\\x03X\\x03X\\x03Y\\x03Y\\x03Y\\x03Y\\x03Y\\x03Y\\x03Y\\x03'\n )\n buf.write(\n 'Y\\x03Y\\x03Z\\x03Z\\x03Z\\x03Z\\x03Z\\x03[\\x03[\\x03[\\x03[\\x03\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03'\n )\n buf.write(\n '\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03\\\\\\x03]\\x03]\\x03]\\x03]\\x03]'\n )\n buf.write(\n '\\x03]\\x03]\\x03]\\x03]\\x03]\\x03]\\x03^\\x03^\\x03^\\x03^\\x03_\\x03_\\x03_\\x03_\\x03_\\x03_\\x03`\\x03'\n )\n buf.write(\n '`\\x03`\\x03`\\x03a\\x03a\\x03a\\x03a\\x03a\\x03a\\x03a\\x03a\\x03b\\x03b\\x03b\\x03b\\x03b\\x03b\\x03b\\x03'\n )\n buf.write(\n 'b\\x03c\\x03c\\x03c\\x03c\\x03c\\x03c\\x03c\\x03c\\x03c\\x03c\\x03d\\x03d\\x03d\\x03d\\x03d\\x03d\\x03d\\x03'\n )\n buf.write(\n 'd\\x03d\\x03d\\x03e\\x03e\\x03e\\x03e\\x03e\\x03e\\x03e\\x03e\\x03f\\x03f\\x03f\\x03f\\x03f\\x03f\\x03f\\x03'\n )\n buf.write(\n 'f\\x03f\\x03g\\x03g\\x03g\\x03g\\x03g\\x03g\\x03g\\x03g\\x03g\\x03h\\x03h\\x03h\\x03h\\x03h\\x03h\\x03h\\x03'\n )\n buf.write(\n 'h\\x03i\\x03i\\x03i\\x03i\\x03i\\x03i\\x03i\\x03j\\x03j\\x03j\\x03j\\x03j\\x03j\\x03k\\x03k\\x03k\\x03k\\x03'\n )\n buf.write(\n 'k\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03l\\x03m\\x03m\\x03m\\x03'\n )\n buf.write(\n 'm\\x03m\\x03m\\x03m\\x03m\\x03m\\x03m\\x03n\\x03n\\x03n\\x03n\\x03n\\x03n\\x03n\\x03n\\x03o\\x03o\\x03o\\x03'\n )\n buf.write(\n 'o\\x03o\\x03o\\x03o\\x03o\\x03o\\x03o\\x03o\\x03o\\x03o\\x03p\\x03p\\x03p\\x03p\\x03p\\x03p\\x03p\\x03p\\x03'\n )\n buf.write(\n 'p\\x03q\\x03q\\x03q\\x03q\\x03q\\x03q\\x03q\\x03q\\x03q\\x03r\\x03r\\x03r\\x03r\\x03r\\x03r\\x03r\\x03s\\x03'\n )\n buf.write(\n 's\\x03s\\x03s\\x03s\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03'\n )\n buf.write(\n 't\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03t\\x03u\\x03u\\x03u\\x03u\\x03u\\x03v\\x03v\\x03'\n )\n buf.write(\n 'v\\x03v\\x03v\\x03v\\x03v\\x03v\\x03w\\x03w\\x03w\\x03w\\x03w\\x03x\\x03x\\x03x\\x03x\\x03x\\x03x\\x03y\\x03'\n )\n buf.write(\n 'y\\x03y\\x03y\\x03y\\x03y\\x03z\\x03z\\x03z\\x03z\\x03z\\x03z\\x03z\\x03{\\x03{\\x03{\\x03{\\x03{\\x03{\\x03'\n )\n buf.write(\n '{\\x03{\\x03{\\x03|\\x03|\\x03|\\x03|\\x03}\\x03}\\x03}\\x03}\\x03}\\x03}\\x03}\\x03}\\x03}\\x03}\\x03}\\x03'\n )\n buf.write(\n '}\\x03}\\x03}\\x03}\\x03~\\x03~\\x03~\\x03~\\x03\\x7f\\x03\\x7f\\x03\\x7f\\x03\\x7f\\x03\\x7f\\x03'\n )\n buf.write(\n '\\x7f\\x03\\x7f\\x03\\x80\\x03\\x80\\x03\\x80\\x03\\x80\\x03\\x80\\x03\\x80')\n buf.write('\\x03\\x80\\x03\\x81\\x03\\x81\\x03\\x81\\x03\\x81\\x03\\x81\\x03\\x81')\n buf.write('\\x03\\x81\\x03\\x81\\x03\\x81\\x03\\x82\\x03\\x82\\x03\\x82\\x03\\x82')\n buf.write('\\x03\\x82\\x03\\x82\\x03\\x82\\x03\\x83\\x03\\x83\\x03\\x83\\x03\\x83')\n buf.write('\\x03\\x83\\x03\\x83\\x03\\x83\\x03\\x83\\x03\\x83\\x03\\x83\\x03\\x84')\n buf.write('\\x03\\x84\\x03\\x84\\x03\\x84\\x03\\x84\\x03\\x84\\x03\\x84\\x03\\x84')\n buf.write('\\x03\\x84\\x03\\x84\\x03\\x84\\x03\\x84\\x03\\x84\\x03\\x84\\x03\\x84')\n buf.write('\\x03\\x85\\x03\\x85\\x03\\x85\\x03\\x85\\x03\\x85\\x03\\x85\\x03\\x85')\n buf.write('\\x03\\x85\\x03\\x85\\x03\\x85\\x03\\x85\\x03\\x86\\x03\\x86\\x03\\x86')\n buf.write('\\x03\\x86\\x03\\x86\\x03\\x86\\x03\\x86\\x03\\x86\\x03\\x87\\x03\\x87')\n buf.write('\\x03\\x87\\x03\\x87\\x03\\x87\\x03\\x87\\x03\\x87\\x03\\x87\\x03\\x87')\n buf.write('\\x03\\x87\\x03\\x88\\x03\\x88\\x03\\x88\\x03\\x88\\x03\\x88\\x03\\x88')\n buf.write('\\x03\\x88\\x03\\x88\\x03\\x89\\x03\\x89\\x03\\x89\\x03\\x89\\x03\\x89')\n buf.write('\\x03\\x89\\x03\\x89\\x03\\x8a\\x03\\x8a\\x03\\x8a\\x03\\x8a\\x03\\x8a')\n buf.write('\\x03\\x8b\\x03\\x8b\\x03\\x8b\\x03\\x8b\\x03\\x8b\\x03\\x8b\\x03\\x8b')\n buf.write('\\x03\\x8b\\x03\\x8c\\x03\\x8c\\x03\\x8c\\x03\\x8c\\x03\\x8c\\x03\\x8c')\n buf.write('\\x03\\x8c\\x03\\x8c\\x03\\x8c\\x03\\x8d\\x03\\x8d\\x03\\x8d\\x03\\x8d')\n buf.write('\\x03\\x8d\\x03\\x8d\\x03\\x8d\\x03\\x8d\\x03\\x8e\\x03\\x8e\\x03\\x8e')\n buf.write('\\x03\\x8e\\x03\\x8e\\x03\\x8e\\x03\\x8e\\x03\\x8e\\x03\\x8f\\x03\\x8f')\n buf.write('\\x03\\x8f\\x03\\x8f\\x03\\x8f\\x03\\x8f\\x03\\x90\\x03\\x90\\x03\\x90')\n buf.write('\\x03\\x90\\x03\\x90\\x03\\x90\\x03\\x91\\x03\\x91\\x03\\x91\\x03\\x91')\n buf.write('\\x03\\x91\\x03\\x91\\x03\\x92\\x03\\x92\\x03\\x92\\x03\\x92\\x03\\x92')\n buf.write('\\x03\\x92\\x03\\x93\\x03\\x93\\x03\\x93\\x03\\x93\\x03\\x93\\x03\\x93')\n buf.write('\\x03\\x93\\x03\\x93\\x03\\x93\\x03\\x93\\x03\\x93\\x03\\x93\\x03\\x94')\n buf.write('\\x03\\x94\\x03\\x94\\x03\\x94\\x03\\x94\\x03\\x94\\x03\\x95\\x03\\x95')\n buf.write('\\x03\\x95\\x03\\x95\\x03\\x95\\x03\\x95\\x03\\x95\\x03\\x95\\x03\\x95')\n buf.write('\\x03\\x95\\x03\\x96\\x03\\x96\\x03\\x96\\x03\\x96\\x03\\x96\\x03\\x96')\n buf.write('\\x03\\x96\\x03\\x96\\x03\\x97\\x03\\x97\\x03\\x97\\x03\\x97\\x03\\x98')\n buf.write('\\x03\\x98\\x03\\x98\\x03\\x98\\x03\\x98\\x03\\x98\\x03\\x98\\x03\\x99')\n buf.write('\\x03\\x99\\x03\\x99\\x03\\x99\\x03\\x99\\x03\\x99\\x03\\x9a\\x03\\x9a')\n buf.write('\\x03\\x9a\\x03\\x9a\\x03\\x9a\\x03\\x9b\\x03\\x9b\\x03\\x9b\\x03\\x9b')\n buf.write('\\x03\\x9b\\x03\\x9c\\x03\\x9c\\x03\\x9c\\x03\\x9c\\x03\\x9c\\x03\\x9c')\n buf.write('\\x03\\x9c\\x03\\x9c\\x03\\x9c\\x03\\x9d\\x03\\x9d\\x03\\x9d\\x03\\x9d')\n buf.write('\\x03\\x9d\\x03\\x9e\\x03\\x9e\\x03\\x9e\\x03\\x9e\\x03\\x9e\\x03\\x9e')\n buf.write('\\x03\\x9f\\x03\\x9f\\x03\\x9f\\x03\\x9f\\x03\\x9f\\x03\\x9f\\x03\\xa0')\n buf.write('\\x03\\xa0\\x03\\xa0\\x03\\xa0\\x03\\xa0\\x03\\xa0\\x03\\xa0\\x03\\xa0')\n buf.write('\\x03\\xa0\\x03¡\\x03¡\\x03¡\\x03¡\\x03¡\\x03¢')\n buf.write('\\x03¢\\x03¢\\x03¢\\x03¢\\x03¢\\x03¢\\x03£')\n buf.write('\\x03£\\x03£\\x03£\\x03£\\x03¤\\x03¤\\x03¤')\n buf.write('\\x03¤\\x03¤\\x03¥\\x03¥\\x03¥\\x03¦\\x03¦')\n buf.write('\\x03¦\\x03¦\\x03¦\\x03¦\\x03¦\\x03§\\x03§')\n buf.write('\\x03§\\x03§\\x03§\\x03§\\x03§\\x03§\\x03§')\n buf.write('\\x03§\\x03¨\\x03¨\\x03¨\\x03©\\x03©\\x03©')\n buf.write('\\x03©\\x03©\\x03©\\x03©\\x03©\\x03ª\\x03ª')\n buf.write('\\x03ª\\x03ª\\x03ª\\x03ª\\x03ª\\x03ª\\x03ª')\n buf.write('\\x03ª\\x03«\\x03«\\x03«\\x03«\\x03«\\x03«')\n buf.write('\\x03«\\x03«\\x03«\\x03«\\x03¬\\x03¬\\x03¬')\n buf.write('\\x03¬\\x03¬\\x03¬\\x03¬\\x03\\xad\\x03\\xad\\x03\\xad')\n buf.write('\\x03\\xad\\x03\\xad\\x03\\xad\\x03®\\x03®\\x03®\\x03®')\n buf.write('\\x03®\\x03®\\x03®\\x03®\\x03¯\\x03¯\\x03¯')\n buf.write('\\x03¯\\x03¯\\x03¯\\x03¯\\x03¯\\x03¯\\x03¯')\n buf.write('\\x03°\\x03°\\x03°\\x03°\\x03°\\x03°\\x03°')\n buf.write('\\x03°\\x03±\\x03±\\x03±\\x03±\\x03±\\x03±')\n buf.write('\\x03±\\x03±\\x03±\\x03²\\x03²\\x03²\\x03²')\n buf.write('\\x03²\\x03²\\x03²\\x03³\\x03³\\x03³\\x03³')\n buf.write('\\x03³\\x03³\\x03´\\x03´\\x03´\\x03´\\x03´')\n buf.write('\\x03´\\x03µ\\x03µ\\x03µ\\x03µ\\x03µ\\x03µ')\n buf.write('\\x03µ\\x03¶\\x03¶\\x03¶\\x03¶\\x03¶\\x03¶')\n buf.write('\\x03¶\\x03¶\\x03¶\\x03¶\\x03¶\\x03¶\\x03¶')\n buf.write('\\x03·\\x03·\\x03·\\x03·\\x03·\\x03·\\x03·')\n buf.write('\\x03·\\x03¸\\x03¸\\x03¸\\x03¸\\x03¹\\x03¹')\n buf.write('\\x03¹\\x03¹\\x03¹\\x03¹\\x03¹\\x03¹\\x03º')\n buf.write('\\x03º\\x03º\\x03º\\x03º\\x03º\\x03º\\x03º')\n buf.write('\\x03º\\x03º\\x03»\\x03»\\x03»\\x03»\\x03»')\n buf.write('\\x03»\\x03»\\x03»\\x03»\\x03¼\\x03¼\\x03¼')\n buf.write('\\x03¼\\x03¼\\x03½\\x03½\\x03½\\x03½\\x03½')\n buf.write('\\x03½\\x03½\\x03½\\x03½\\x03½\\x03½\\x03¾')\n buf.write('\\x03¾\\x03¾\\x03¿\\x03¿\\x03¿\\x03¿\\x03¿')\n buf.write('\\x03¿\\x03¿\\x03¿\\x03¿\\x03¿\\x03À\\x03À')\n buf.write('\\x03À\\x03À\\x03À\\x03À\\x03À\\x03À\\x03Á')\n buf.write('\\x03Á\\x03Á\\x03Á\\x03Á\\x03Â\\x03Â\\x03Â')\n buf.write('\\x03Â\\x03Â\\x03Ã\\x03Ã\\x03Ã\\x03Ã\\x03Ã')\n buf.write('\\x03Ä\\x03Ä\\x03Ä\\x03Ä\\x03Ä\\x03Ä\\x03Ä')\n buf.write('\\x03Ä\\x03Ä\\x03Å\\x03Å\\x03Å\\x03Å\\x03Å')\n buf.write('\\x03Æ\\x03Æ\\x03Æ\\x03Æ\\x03Æ\\x03Æ\\x03Æ')\n buf.write('\\x03Æ\\x03Æ\\x03Æ\\x03Æ\\x03Ç\\x03Ç\\x03Ç')\n buf.write('\\x03Ç\\x03Ç\\x03Ç\\x03Ç\\x03Ç\\x03È\\x03È')\n buf.write('\\x03È\\x03È\\x03È\\x03É\\x03É\\x03É\\x03É')\n buf.write('\\x03É\\x03É\\x03Ê\\x03Ê\\x03Ê\\x03Ê\\x03Ê')\n buf.write('\\x03Ê\\x03Ê\\x03Ê\\x03Ë\\x03Ë\\x03Ë\\x03Ë')\n buf.write('\\x03Ë\\x03Ì\\x03Ì\\x03Ì\\x03Ì\\x03Ì\\x03Ì')\n buf.write('\\x03Í\\x03Í\\x03Í\\x03Í\\x03Í\\x03Í\\x03Î')\n buf.write('\\x03Î\\x03Î\\x03Î\\x03Î\\x03Î\\x03Ï\\x03Ï')\n buf.write('\\x03Ï\\x03Ï\\x03Ï\\x03Ï\\x03Ð\\x03Ð\\x03Ð')\n buf.write('\\x03Ð\\x03Ð\\x03Ð\\x03Ñ\\x03Ñ\\x03Ñ\\x03Ñ')\n buf.write('\\x03Ñ\\x03Ò\\x03Ò\\x03Ò\\x03Ò\\x03Ò\\x03Ò')\n buf.write('\\x03Ò\\x03Ó\\x03Ó\\x03Ó\\x03Ó\\x03Ô\\x03Ô')\n buf.write('\\x03Ô\\x03Ô\\x03Ô\\x03Ô\\x03Ô\\x03Õ\\x03Õ')\n buf.write('\\x03Õ\\x03Õ\\x03Õ\\x03Õ\\x03Ö\\x03Ö\\x03Ö')\n buf.write('\\x03Ö\\x03Ö\\x03×\\x03×\\x03×\\x03×\\x03×')\n buf.write('\\x03Ø\\x03Ø\\x03Ø\\x03Ø\\x03Ø\\x03Ù\\x03Ù')\n buf.write('\\x03Ù\\x03Ù\\x03Ú\\x03Ú\\x03Ú\\x03Ú\\x03Ú')\n buf.write('\\x03Ú\\x03Ú\\x03Ú\\x03Û\\x03Û\\x03Û\\x03Û')\n buf.write('\\x03Û\\x03Û\\x03Û\\x03Û\\x03Û\\x03Ü\\x03Ü')\n buf.write('\\x03Ü\\x03Ü\\x03Ü\\x03Ü\\x03Ü\\x03Ü\\x03Ü')\n buf.write('\\x03Ý\\x03Ý\\x03Ý\\x03Ý\\x03Ý\\x03Ý\\x03Ý')\n buf.write('\\x03Þ\\x03Þ\\x03Þ\\x03Þ\\x03Þ\\x03Þ\\x03ß')\n buf.write('\\x03ß\\x03ß\\x03ß\\x03ß\\x03ß\\x03à\\x03à')\n buf.write('\\x03à\\x03à\\x03à\\x03à\\x03à\\x03á\\x03á')\n buf.write('\\x03á\\x03á\\x03á\\x03á\\x03á\\x03á\\x03á')\n buf.write('\\x03â\\x03â\\x03â\\x03â\\x03â\\x03â\\x03â')\n buf.write('\\x03â\\x03â\\x03ã\\x03ã\\x03ã\\x03ã\\x03ã')\n buf.write('\\x03ä\\x03ä\\x03ä\\x03ä\\x03ä\\x03ä\\x03å')\n buf.write('\\x03å\\x03å\\x03å\\x03å\\x03å\\x03å\\x03æ')\n buf.write('\\x03æ\\x03æ\\x03æ\\x03æ\\x03æ\\x03ç\\x03ç')\n buf.write('\\x03ç\\x03ç\\x03ç\\x03ç\\x03ç\\x03ç\\x03ç')\n buf.write('\\x03è\\x03è\\x03è\\x03è\\x03è\\x03é\\x03é')\n buf.write('\\x03é\\x03é\\x03ê\\x03ê\\x03ê\\x03ê\\x03ê')\n buf.write('\\x03ê\\x03ê\\x03ê\\x03ë\\x03ë\\x03ë\\x03ë')\n buf.write('\\x03ë\\x03ë\\x03ë\\x03ë\\x03ë\\x03ì\\x03ì')\n buf.write('\\x03ì\\x03ì\\x03í\\x03í\\x03í\\x03í\\x03í')\n buf.write('\\x03í\\x03î\\x03î\\x03î\\x03î\\x03î\\x03î')\n buf.write('\\x03î\\x03î\\x03î\\x03ï\\x03ï\\x03ï\\x03ï')\n buf.write('\\x03ï\\x03ï\\x03ð\\x03ð\\x03ð\\x03ð\\x03ð')\n buf.write('\\x03ð\\x03ð\\x03ñ\\x03ñ\\x03ñ\\x03ñ\\x03ò')\n buf.write('\\x03ò\\x03ò\\x03ó\\x03ó\\x03ó\\x03ó\\x03ó')\n buf.write('\\x03ó\\x03ó\\x03ó\\x03ô\\x03ô\\x03ô\\x03ô')\n buf.write('\\x03ô\\x03ô\\x03ô\\x03ô\\x03õ\\x03õ\\x03õ')\n buf.write('\\x03õ\\x03õ\\x03õ\\x03õ\\x03ö\\x03ö\\x03ö')\n buf.write('\\x03ö\\x03ö\\x03ö\\x03ö\\x03ö\\x03÷\\x03÷')\n buf.write('\\x03÷\\x03÷\\x03÷\\x03÷\\x03÷\\x03÷\\x03÷')\n buf.write('\\x03÷\\x03÷\\x03÷\\x03÷\\x03÷\\x03÷\\x03÷')\n buf.write('\\x03÷\\x03ø\\x03ø\\x03ø\\x03ø\\x03ø\\x03ø')\n buf.write('\\x03ø\\x03ø\\x03ø\\x03ø\\x03ø\\x03ù\\x03ù')\n buf.write('\\x03ù\\x03ù\\x03ù\\x03ù\\x03ù\\x03ù\\x03ù')\n buf.write('\\x03ù\\x03ù\\x03ú\\x03ú\\x03ú\\x03ú\\x03ú')\n buf.write('\\x03û\\x03û\\x03û\\x03û\\x03û\\x03û\\x03û')\n buf.write('\\x03û\\x03ü\\x03ü\\x03ü\\x03ü\\x03ü\\x03ü')\n buf.write('\\x03ü\\x03ü\\x03ü\\x03ü\\x03ü\\x03ü\\x03ü')\n buf.write('\\x03ü\\x03ý\\x03ý\\x03ý\\x03ý\\x03þ\\x03þ')\n buf.write('\\x03þ\\x03þ\\x03þ\\x03þ\\x03þ\\x03ÿ\\x03ÿ')\n buf.write('\\x03ÿ\\x03ÿ\\x03ÿ\\x03Ā\\x03Ā\\x03Ā\\x03Ā')\n buf.write('\\x03Ā\\x03Ā\\x03ā\\x03ā\\x03ā\\x03ā\\x03ā')\n buf.write('\\x03ā\\x03ā\\x03Ă\\x03Ă\\x03Ă\\x03Ă\\x03Ă')\n buf.write('\\x03Ă\\x03Ă\\x03Ă\\x03ă\\x03ă\\x03ă\\x03ă')\n buf.write('\\x03ă\\x03ă\\x03ă\\x03ă\\x03ă\\x03ă\\x03Ą')\n buf.write('\\x03Ą\\x03Ą\\x03Ą\\x03Ą\\x03Ą\\x03Ą\\x03ą')\n buf.write('\\x03ą\\x03ą\\x03Ć\\x03Ć\\x03Ć\\x03Ć\\x03ć')\n buf.write('\\x03ć\\x03ć\\x03ć\\x03Ĉ\\x03Ĉ\\x03Ĉ\\x03Ĉ')\n buf.write('\\x03ĉ\\x03ĉ\\x03ĉ\\x03Ċ\\x03Ċ\\x03Ċ\\x03Ċ')\n buf.write('\\x03Ċ\\x03ċ\\x03ċ\\x03ċ\\x03ċ\\x03ċ\\x03Č')\n buf.write('\\x03Č\\x03Č\\x03Č\\x03Č\\x03Č\\x03Č\\x03č')\n buf.write('\\x03č\\x03č\\x03Ď\\x03Ď\\x03Ď\\x03Ď\\x03Ď')\n buf.write('\\x03Ď\\x03Ď\\x03Ď\\x03ď\\x03ď\\x03ď\\x03ď')\n buf.write('\\x03ď\\x03ď\\x03Đ\\x03Đ\\x03Đ\\x03Đ\\x03Đ')\n buf.write('\\x03Đ\\x03Đ\\x03Đ\\x03Đ\\x03Đ\\x03Đ\\x03đ')\n buf.write('\\x03đ\\x03đ\\x03đ\\x03đ\\x03đ\\x03đ\\x03đ')\n buf.write('\\x03Ē\\x03Ē\\x03Ē\\x03Ē\\x03ē\\x03ē\\x03ē')\n buf.write('\\x03ē\\x03ē\\x03ē\\x03Ĕ\\x03Ĕ\\x03Ĕ\\x03Ĕ')\n buf.write('\\x03Ĕ\\x03ĕ\\x03ĕ\\x03ĕ\\x03ĕ\\x03ĕ\\x03ĕ')\n buf.write('\\x03ĕ\\x03ĕ\\x03ĕ\\x03ĕ\\x03ĕ\\x03Ė\\x03Ė')\n buf.write('\\x03Ė\\x03Ė\\x03Ė\\x03Ė\\x03Ė\\x03Ė\\x03ė')\n buf.write('\\x03ė\\x03ė\\x03ė\\x03ė\\x03ė\\x03ė\\x03ė')\n buf.write('\\x03ė\\x03ė\\x03ė\\x03ė\\x03ė\\x03ė\\x03ė')\n buf.write('\\x03ė\\x03Ę\\x03Ę\\x03Ę\\x03Ę\\x03Ę\\x03Ę')\n buf.write('\\x03Ę\\x03Ę\\x03Ę\\x03Ę\\x03Ę\\x03ę\\x03ę')\n buf.write('\\x03ę\\x03ę\\x03ę\\x03ę\\x03ę\\x03Ě\\x03Ě')\n buf.write('\\x03Ě\\x03Ě\\x03Ě\\x03Ě\\x03Ě\\x03Ě\\x03Ě')\n buf.write('\\x03Ě\\x03ě\\x03ě\\x03ě\\x03ě\\x03ě\\x03ě')\n buf.write('\\x03ě\\x03ě\\x03Ĝ\\x03Ĝ\\x03Ĝ\\x03Ĝ\\x03Ĝ')\n buf.write('\\x03ĝ\\x03ĝ\\x03ĝ\\x03ĝ\\x03ĝ\\x03ĝ\\x03ĝ')\n buf.write('\\x03ĝ\\x03ĝ\\x03Ğ\\x03Ğ\\x03Ğ\\x03Ğ\\x03Ğ')\n buf.write('\\x03Ğ\\x03ğ\\x03ğ\\x03ğ\\x03ğ\\x03ğ\\x03ğ')\n buf.write('\\x03ğ\\x03ğ\\x03ğ\\x03ğ\\x03Ġ\\x03Ġ\\x03Ġ')\n buf.write('\\x03Ġ\\x03Ġ\\x03Ġ\\x03ġ\\x03ġ\\x03ġ\\x03ġ')\n buf.write('\\x03ġ\\x03Ģ\\x03Ģ\\x03Ģ\\x03Ģ\\x03Ģ\\x03Ģ')\n buf.write('\\x03Ģ\\x03Ģ\\x03Ģ\\x03Ģ\\x03Ģ\\x03Ģ\\x03ģ')\n buf.write('\\x03ģ\\x03ģ\\x03ģ\\x03ģ\\x03ģ\\x03ģ\\x03ģ')\n buf.write('\\x03ģ\\x03Ĥ\\x03Ĥ\\x03Ĥ\\x03Ĥ\\x03Ĥ\\x03Ĥ')\n buf.write('\\x03Ĥ\\x03Ĥ\\x03Ĥ\\x03Ĥ\\x03ĥ\\x03ĥ\\x03ĥ')\n buf.write('\\x03ĥ\\x03ĥ\\x03ĥ\\x03ĥ\\x03Ħ\\x03Ħ\\x03Ħ')\n buf.write('\\x03Ħ\\x03Ħ\\x03Ħ\\x03Ħ\\x03Ħ\\x03Ħ\\x03Ħ')\n buf.write('\\x03ħ\\x03ħ\\x03ħ\\x03ħ\\x03ħ\\x03ħ\\x03ħ')\n buf.write('\\x03ħ\\x03ħ\\x03ħ\\x03Ĩ\\x03Ĩ\\x03Ĩ\\x03Ĩ')\n buf.write('\\x03Ĩ\\x03Ĩ\\x03Ĩ\\x03Ĩ\\x03ĩ\\x03ĩ\\x03ĩ')\n buf.write('\\x03ĩ\\x03ĩ\\x03ĩ\\x03Ī\\x03Ī\\x03Ī\\x03Ī')\n buf.write('\\x03Ī\\x03Ī\\x03Ī\\x03Ī\\x03Ī\\x03Ī\\x03ī')\n buf.write('\\x03ī\\x03ī\\x03ī\\x03ī\\x03ī\\x03Ĭ\\x03Ĭ')\n buf.write('\\x03Ĭ\\x03Ĭ\\x03Ĭ\\x03Ĭ\\x03ĭ\\x03ĭ\\x03ĭ')\n buf.write('\\x03ĭ\\x03Į\\x03Į\\x03Į\\x03Į\\x03Į\\x03į')\n buf.write('\\x03į\\x03į\\x03į\\x03į\\x03İ\\x03İ\\x03İ')\n buf.write('\\x03İ\\x03İ\\x03İ\\x03İ\\x03ı\\x03ı\\x03ı')\n buf.write('\\x03ı\\x03IJ\\x03IJ\\x03IJ\\x03IJ\\x03IJ\\x03IJ')\n buf.write('\\x03IJ\\x03IJ\\x03IJ\\x03IJ\\x03ij\\x03ij\\x03ij')\n buf.write('\\x03ij\\x03ij\\x03ij\\x03ij\\x03ij\\x03ij\\x03ij')\n buf.write('\\x03ij\\x03ij\\x03Ĵ\\x03Ĵ\\x03Ĵ\\x03Ĵ\\x03Ĵ')\n buf.write('\\x03Ĵ\\x03Ĵ\\x03ĵ\\x03ĵ\\x03ĵ\\x03ĵ\\x03ĵ')\n buf.write('\\x03ĵ\\x03ĵ\\x03ĵ\\x03ĵ\\x03ĵ\\x03Ķ\\x03Ķ')\n buf.write('\\x03Ķ\\x03Ķ\\x03Ķ\\x03Ķ\\x03Ķ\\x03ķ\\x03ķ')\n buf.write('\\x03ķ\\x03ķ\\x03ķ\\x03ķ\\x03ķ\\x03ķ\\x03ĸ')\n buf.write('\\x03ĸ\\x03ĸ\\x03ĸ\\x03ĸ\\x03ĸ\\x03ĸ\\x03ĸ')\n buf.write('\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ')\n buf.write('\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ')\n buf.write('\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03Ĺ\\x03ĺ')\n buf.write('\\x03ĺ\\x03ĺ\\x03ĺ\\x03ĺ\\x03ĺ\\x03ĺ\\x03Ļ')\n buf.write('\\x03Ļ\\x03Ļ\\x03Ļ\\x03Ļ\\x03Ļ\\x03Ļ\\x03Ļ')\n buf.write('\\x03Ļ\\x03Ļ\\x03Ļ\\x03Ļ\\x03Ļ\\x03ļ\\x03ļ')\n buf.write('\\x03ļ\\x03ļ\\x03ļ\\x03ļ\\x03ļ\\x03Ľ\\x03Ľ')\n buf.write('\\x03Ľ\\x03Ľ\\x03Ľ\\x03Ľ\\x03Ľ\\x03Ľ\\x03Ľ')\n buf.write('\\x03Ľ\\x03ľ\\x03ľ\\x03ľ\\x03ľ\\x03ľ\\x03ľ')\n buf.write('\\x03Ŀ\\x03Ŀ\\x03Ŀ\\x03Ŀ\\x03Ŀ\\x03Ŀ\\x03Ŀ')\n buf.write('\\x03Ŀ\\x03ŀ\\x03ŀ\\x03ŀ\\x03ŀ\\x03ŀ\\x03ŀ')\n buf.write('\\x03ŀ\\x03Ł\\x03Ł\\x03Ł\\x03Ł\\x03Ł\\x03Ł')\n buf.write('\\x03ł\\x03ł\\x03ł\\x03ł\\x03ł\\x03ł\\x03ł')\n buf.write('\\x03ł\\x03ł\\x03Ń\\x03Ń\\x03Ń\\x03Ń\\x03Ń')\n buf.write('\\x03Ń\\x03Ń\\x03ń\\x03ń\\x03ń\\x03ń\\x03Ņ')\n buf.write('\\x03Ņ\\x03Ņ\\x03Ņ\\x03Ņ\\x03Ņ\\x03ņ\\x03ņ')\n buf.write('\\x03ņ\\x03ņ\\x03ņ\\x03Ň\\x03Ň\\x03Ň\\x03Ň')\n buf.write('\\x03Ň\\x03Ň\\x03ň\\x03ň\\x03ň\\x03ň\\x03ň')\n buf.write('\\x03ň\\x03ň\\x03ʼn\\x03ʼn\\x03ʼn\\x03ʼn\\x03ʼn')\n buf.write('\\x03Ŋ\\x03Ŋ\\x03Ŋ\\x03Ŋ\\x03Ŋ\\x03Ŋ\\x03Ŋ')\n buf.write('\\x03Ŋ\\x03Ŋ\\x03Ŋ\\x03ŋ\\x03ŋ\\x03ŋ\\x03ŋ')\n buf.write('\\x03ŋ\\x03ŋ\\x03ŋ\\x03Ō\\x03Ō\\x03Ō\\x03Ō')\n buf.write('\\x03Ō\\x03Ō\\x03Ō\\x03Ō\\x03Ō\\x03Ō\\x03Ō')\n buf.write('\\x03Ō\\x03ō\\x03ō\\x03ō\\x03ō\\x03Ŏ\\x03Ŏ')\n buf.write('\\x03Ŏ\\x03Ŏ\\x03Ŏ\\x03Ŏ\\x03Ŏ\\x03ŏ\\x03ŏ')\n buf.write('\\x03ŏ\\x03ŏ\\x03ŏ\\x03ŏ\\x03ŏ\\x03Ő\\x03Ő')\n buf.write('\\x03Ő\\x03Ő\\x03Ő\\x03ő\\x03ő\\x03ő\\x03ő')\n buf.write('\\x03ő\\x03ő\\x03ő\\x03ő\\x03Œ\\x03Œ\\x03Œ')\n buf.write('\\x03Œ\\x03Œ\\x03Œ\\x03Œ\\x03œ\\x03œ\\x03œ')\n buf.write('\\x03œ\\x03œ\\x03Ŕ\\x03Ŕ\\x03Ŕ\\x03Ŕ\\x03Ŕ')\n buf.write('\\x03Ŕ\\x03Ŕ\\x03Ŕ\\x03Ŕ\\x03ŕ\\x03ŕ\\x03ŕ')\n buf.write('\\x03ŕ\\x03ŕ\\x03ŕ\\x03ŕ\\x03ŕ\\x03ŕ\\x03ŕ')\n buf.write('\\x03ŕ\\x03Ŗ\\x03Ŗ\\x03Ŗ\\x03Ŗ\\x03Ŗ\\x03Ŗ')\n buf.write('\\x03Ŗ\\x03Ŗ\\x03Ŗ\\x03Ŗ\\x03Ŗ\\x03Ŗ\\x03Ŗ')\n buf.write('\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ')\n buf.write('\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ')\n buf.write('\\x03ŗ\\x03ŗ\\x03ŗ\\x03ŗ\\x03Ř\\x03Ř\\x03Ř')\n buf.write('\\x03Ř\\x03Ř\\x03Ř\\x03Ř\\x03Ř\\x03Ř\\x03Ř')\n buf.write('\\x03Ř\\x03Ř\\x03ř\\x03ř\\x03ř\\x03ř\\x03ř')\n buf.write('\\x03ř\\x03ř\\x03ř\\x03ř\\x03ř\\x03ř\\x03ř')\n buf.write('\\x03ř\\x03ř\\x03ř\\x03ř\\x03Ś\\x03Ś\\x03Ś')\n buf.write('\\x03Ś\\x03ś\\x03ś\\x03ś\\x03ś\\x03ś\\x03Ŝ')\n buf.write('\\x03Ŝ\\x03Ŝ\\x03Ŝ\\x03Ŝ\\x03Ŝ\\x03Ŝ\\x03Ŝ')\n buf.write('\\x03Ŝ\\x03ŝ\\x03ŝ\\x03ŝ\\x03ŝ\\x03ŝ\\x03ŝ')\n buf.write('\\x03Ş\\x03Ş\\x03Ş\\x03Ş\\x03Ş\\x03ş\\x03ş')\n buf.write('\\x03ş\\x03ş\\x03ş\\x03ş\\x03ş\\x03ş\\x03ş')\n buf.write('\\x03Š\\x03Š\\x03Š\\x03Š\\x03Š\\x03Š\\x03Š')\n buf.write('\\x03Š\\x03Š\\x03š\\x03š\\x03š\\x03š\\x03š')\n buf.write('\\x03š\\x03š\\x03š\\x03š\\x03Ţ\\x03Ţ\\x03Ţ')\n buf.write('\\x03Ţ\\x03Ţ\\x03Ţ\\x03Ţ\\x03Ţ\\x03Ţ\\x03Ţ')\n buf.write('\\x03Ţ\\x03Ţ\\x03Ţ\\x03Ţ\\x03Ţ\\x03ţ\\x03ţ')\n buf.write('\\x03ţ\\x03ţ\\x03ţ\\x03ţ\\x03ţ\\x03Ť\\x03Ť')\n buf.write('\\x03Ť\\x03Ť\\x03Ť\\x03ť\\x03ť\\x03ť\\x03ť')\n buf.write('\\x03ť\\x03Ŧ\\x03Ŧ\\x03Ŧ\\x03Ŧ\\x03Ŧ\\x03Ŧ')\n buf.write('\\x03Ŧ\\x03Ŧ\\x03Ŧ\\x03ŧ\\x03ŧ\\x03ŧ\\x03ŧ')\n buf.write('\\x03ŧ\\x03ŧ\\x03ŧ\\x03ŧ\\x03ŧ\\x03Ũ\\x03Ũ')\n buf.write('\\x03Ũ\\x03Ũ\\x03Ũ\\x03ũ\\x03ũ\\x03ũ\\x03ũ')\n buf.write('\\x03ũ\\x03ũ\\x03ũ\\x03ũ\\x03ũ\\x03ũ\\x03ũ')\n buf.write('\\x03ũ\\x03ũ\\x03ũ\\x03Ū\\x03Ū\\x03Ū\\x03Ū')\n buf.write('\\x03Ū\\x03Ū\\x03Ū\\x03Ū\\x03ū\\x03ū\\x03ū')\n buf.write('\\x03ū\\x03ū\\x03ū\\x03ū\\x03ū\\x03ū\\x03Ŭ')\n buf.write('\\x03Ŭ\\x03Ŭ\\x03Ŭ\\x03Ŭ\\x03Ŭ\\x03Ŭ\\x03Ŭ')\n buf.write('\\x03Ŭ\\x03Ŭ\\x03Ŭ\\x03ŭ\\x03ŭ\\x03ŭ\\x03ŭ')\n buf.write('\\x03ŭ\\x03ŭ\\x03Ů\\x03Ů\\x03Ů\\x03Ů\\x03Ů')\n buf.write('\\x03Ů\\x03Ů\\x03Ů\\x03ů\\x03ů\\x03ů\\x03ů')\n buf.write('\\x03ů\\x03ů\\x03ů\\x03ů\\x03ů\\x03ů\\x03Ű')\n buf.write('\\x03Ű\\x03Ű\\x03Ű\\x03Ű\\x03Ű\\x03Ű\\x03Ű')\n buf.write('\\x03Ű\\x03Ű\\x03Ű\\x03Ű\\x03Ű\\x03ű\\x03ű')\n buf.write('\\x03ű\\x03ű\\x03ű\\x03ű\\x03ű\\x03Ų\\x03Ų')\n buf.write('\\x03Ų\\x03Ų\\x03Ų\\x03Ų\\x03Ų\\x03Ų\\x03Ų')\n buf.write('\\x03Ų\\x03Ų\\x03ų\\x03ų\\x03ų\\x03ų\\x03ų')\n buf.write('\\x03ų\\x03ų\\x03Ŵ\\x03Ŵ\\x03Ŵ\\x03Ŵ\\x03Ŵ')\n buf.write('\\x03Ŵ\\x03Ŵ\\x03Ŵ\\x03Ŵ\\x03Ŵ\\x03Ŵ\\x03Ŵ')\n buf.write('\\x03ŵ\\x03ŵ\\x03ŵ\\x03ŵ\\x03ŵ\\x03ŵ\\x03ŵ')\n buf.write('\\x03ŵ\\x03ŵ\\x03ŵ\\x03ŵ\\x03ŵ\\x03ŵ\\x03Ŷ')\n buf.write('\\x03Ŷ\\x03Ŷ\\x03Ŷ\\x03Ŷ\\x03Ŷ\\x03Ŷ\\x03Ŷ')\n buf.write('\\x03Ŷ\\x03Ŷ\\x03Ŷ\\x03Ŷ\\x03Ŷ\\x03Ŷ\\x03ŷ')\n buf.write('\\x03ŷ\\x03ŷ\\x03ŷ\\x03ŷ\\x03ŷ\\x03ŷ\\x03ŷ')\n buf.write('\\x03Ÿ\\x03Ÿ\\x03Ÿ\\x03Ÿ\\x03Ÿ\\x03Ÿ\\x03Ÿ')\n buf.write('\\x03Ÿ\\x03Ź\\x03Ź\\x03Ź\\x03Ź\\x03Ź\\x03Ź')\n buf.write('\\x03Ź\\x03Ź\\x03ź\\x03ź\\x03ź\\x03ź\\x03ź')\n buf.write('\\x03ź\\x03Ż\\x03Ż\\x03Ż\\x03Ż\\x03ż\\x03ż')\n buf.write('\\x03ż\\x03ż\\x03ż\\x03Ž\\x03Ž\\x03Ž\\x03Ž')\n buf.write('\\x03Ž\\x03ž\\x03ž\\x03ž\\x03ž\\x03ž\\x03ž')\n buf.write('\\x03ž\\x03ž\\x03ž\\x03ž\\x03ſ\\x03ſ\\x03ſ')\n buf.write('\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ')\n buf.write('\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ')\n buf.write('\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ſ')\n buf.write('\\x03ſ\\x03ſ\\x03ſ\\x03ſ\\x03ƀ\\x03ƀ\\x03ƀ')\n buf.write('\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ')\n buf.write('\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ')\n buf.write('\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ\\x03ƀ')\n buf.write('\\x03ƀ\\x03ƀ\\x03ƀ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ')\n buf.write('\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ')\n buf.write('\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ')\n buf.write('\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ɓ\\x03Ƃ')\n buf.write('\\x03Ƃ\\x03Ƃ\\x03Ƃ\\x03Ƃ\\x03Ƃ\\x03Ƃ\\x03Ƃ')\n buf.write('\\x03Ƃ\\x03Ƃ\\x03Ƃ\\x03Ƃ\\x03Ƃ\\x03Ƃ\\x03ƃ')\n buf.write('\\x03ƃ\\x03ƃ\\x03ƃ\\x03ƃ\\x03ƃ\\x03ƃ\\x03ƃ')\n buf.write('\\x03ƃ\\x03ƃ\\x03ƃ\\x03ƃ\\x03ƃ\\x03ƃ\\x03Ƅ')\n buf.write('\\x03Ƅ\\x03Ƅ\\x03Ƅ\\x03Ƅ\\x03Ƅ\\x03Ƅ\\x03Ƅ')\n buf.write('\\x03Ƅ\\x03Ƅ\\x03Ƅ\\x03Ƅ\\x03Ƅ\\x03Ƅ\\x03Ƅ')\n buf.write('\\x03Ƅ\\x03ƅ\\x03ƅ\\x03ƅ\\x03ƅ\\x03ƅ\\x03ƅ')\n buf.write('\\x03ƅ\\x03ƅ\\x03ƅ\\x03ƅ\\x03ƅ\\x03ƅ\\x03ƅ')\n buf.write('\\x03ƅ\\x03ƅ\\x03ƅ\\x03Ɔ\\x03Ɔ\\x03Ɔ\\x03Ƈ')\n buf.write('\\x03Ƈ\\x03Ƈ\\x03Ƈ\\x03Ƈ\\x03Ƈ\\x03Ƈ\\x03Ƈ')\n buf.write('\\x03Ƈ\\x03ƈ\\x03ƈ\\x03ƈ\\x03ƈ\\x03ƈ\\x03ƈ')\n buf.write('\\x03ƈ\\x03ƈ\\x03ƈ\\x03ƈ\\x03ƈ\\x03ƈ\\x03Ɖ')\n buf.write('\\x03Ɖ\\x03Ɖ\\x03Ɖ\\x03Ɖ\\x03Ɖ\\x03Ɖ\\x03Ɖ')\n buf.write('\\x03Ɖ\\x03Ɖ\\x03Ɗ\\x03Ɗ\\x03Ɗ\\x03Ɗ\\x03Ɗ')\n buf.write('\\x03Ɗ\\x03Ƌ\\x03Ƌ\\x03Ƌ\\x03Ƌ\\x03Ƌ\\x03Ƌ')\n buf.write('\\x03Ƌ\\x03Ƌ\\x03ƌ\\x03ƌ\\x03ƌ\\x03ƌ\\x03ƌ')\n buf.write('\\x03ƍ\\x03ƍ\\x03ƍ\\x03ƍ\\x03ƍ\\x03Ǝ\\x03Ǝ')\n buf.write('\\x03Ǝ\\x03Ǝ\\x03Ǝ\\x03Ǝ\\x03Ǝ\\x03Ǝ\\x03Ǝ')\n buf.write('\\x03Ə\\x03Ə\\x03Ə\\x03Ə\\x03Ə\\x03Ɛ\\x03Ɛ')\n buf.write('\\x03Ɛ\\x03Ɛ\\x03Ɛ\\x03Ɛ\\x03Ɛ\\x03Ɛ\\x03Ɛ')\n buf.write('\\x03Ɛ\\x03Ƒ\\x03Ƒ\\x03Ƒ\\x03Ƒ\\x03Ƒ\\x03Ƒ')\n buf.write('\\x03ƒ\\x03ƒ\\x03ƒ\\x03ƒ\\x03ƒ\\x03ƒ\\x03Ɠ')\n buf.write('\\x03Ɠ\\x03Ɠ\\x03Ɠ\\x03Ɠ\\x03Ɠ\\x03Ɠ\\x03Ɣ')\n buf.write('\\x03Ɣ\\x03Ɣ\\x03Ɣ\\x03Ɣ\\x03Ɣ\\x03Ɣ\\x03Ɣ')\n buf.write('\\x03Ɣ\\x03Ɣ\\x03ƕ\\x03ƕ\\x03ƕ\\x03ƕ\\x03ƕ')\n buf.write('\\x03ƕ\\x03ƕ\\x03ƕ\\x03Ɩ\\x03Ɩ\\x03Ɩ\\x03Ɩ')\n buf.write('\\x03Ɩ\\x03Ɩ\\x03Ɨ\\x03Ɨ\\x03Ɨ\\x03Ɨ\\x03Ɨ')\n buf.write('\\x03Ɨ\\x03Ɨ\\x03Ƙ\\x03Ƙ\\x03Ƙ\\x03Ƙ\\x03Ƙ')\n buf.write('\\x03Ƙ\\x03Ƙ\\x03Ƙ\\x03ƙ\\x03ƙ\\x03ƙ\\x03ƙ')\n buf.write('\\x03ƙ\\x03ƙ\\x03ƙ\\x03ƚ\\x03ƚ\\x03ƚ\\x03ƚ')\n buf.write('\\x03ƚ\\x03ƚ\\x03ƚ\\x03ƛ\\x03ƛ\\x03ƛ\\x03ƛ')\n buf.write('\\x03Ɯ\\x03Ɯ\\x03Ɯ\\x03Ɯ\\x03Ɯ\\x03Ɯ\\x03Ɲ')\n buf.write('\\x03Ɲ\\x03Ɲ\\x03Ɲ\\x03Ɲ\\x03Ɲ\\x03Ɲ\\x03Ɲ')\n buf.write('\\x03Ɲ\\x03ƞ\\x03ƞ\\x03ƞ\\x03ƞ\\x03ƞ\\x03ƞ')\n buf.write('\\x03Ɵ\\x03Ɵ\\x03Ɵ\\x03Ɵ\\x03Ɵ\\x03Ɵ\\x03Ɵ')\n buf.write('\\x03Ơ\\x03Ơ\\x03Ơ\\x03Ơ\\x03Ơ\\x03Ơ\\x03Ơ')\n buf.write('\\x03Ơ\\x03ơ\\x03ơ\\x03ơ\\x03ơ\\x03ơ\\x03ơ')\n buf.write('\\x03ơ\\x03ơ\\x03ơ\\x03Ƣ\\x03Ƣ\\x03Ƣ\\x03Ƣ')\n buf.write('\\x03Ƣ\\x03Ƣ\\x03Ƣ\\x03Ƣ\\x03Ƣ\\x03ƣ\\x03ƣ')\n buf.write('\\x03ƣ\\x03ƣ\\x03ƣ\\x03ƣ\\x03ƣ\\x03Ƥ\\x03Ƥ')\n buf.write('\\x03Ƥ\\x03Ƥ\\x03Ƥ\\x03Ƥ\\x03Ƥ\\x03Ƥ\\x03ƥ')\n buf.write('\\x03ƥ\\x03ƥ\\x03ƥ\\x03ƥ\\x03ƥ\\x03ƥ\\x03ƥ')\n buf.write('\\x03Ʀ\\x03Ʀ\\x03Ʀ\\x03Ʀ\\x03Ʀ\\x03Ʀ\\x03Ʀ')\n buf.write('\\x03Ʀ\\x03Ʀ\\x03Ƨ\\x03Ƨ\\x03Ƨ\\x03Ƨ\\x03Ƨ')\n buf.write('\\x03ƨ\\x03ƨ\\x03ƨ\\x03ƨ\\x03ƨ\\x03ƨ\\x03ƨ')\n buf.write('\\x03ƨ\\x03Ʃ\\x03Ʃ\\x03Ʃ\\x03Ʃ\\x03Ʃ\\x03Ʃ')\n buf.write('\\x03Ʃ\\x03Ʃ\\x03Ʃ\\x03Ʃ\\x03Ʃ\\x03ƪ\\x03ƪ')\n buf.write('\\x03ƪ\\x03ƪ\\x03ƪ\\x03ƫ\\x03ƫ\\x03ƫ\\x03ƫ')\n buf.write('\\x03ƫ\\x03ƫ\\x03ƫ\\x03ƫ\\x03ƫ\\x03Ƭ\\x03Ƭ')\n buf.write('\\x03Ƭ\\x03Ƭ\\x03Ƭ\\x03Ƭ\\x03ƭ\\x03ƭ\\x03ƭ')\n buf.write('\\x03ƭ\\x03ƭ\\x03ƭ\\x03Ʈ\\x03Ʈ\\x03Ʈ\\x03Ʈ')\n buf.write('\\x03Ʈ\\x03Ư\\x03Ư\\x03Ư\\x03Ư\\x03Ư\\x03Ư')\n buf.write('\\x03Ư\\x03ư\\x03ư\\x03ư\\x03ư\\x03ư\\x03Ʊ')\n buf.write('\\x03Ʊ\\x03Ʊ\\x03Ʊ\\x03Ʊ\\x03Ʊ\\x03Ʋ\\x03Ʋ')\n buf.write('\\x03Ʋ\\x03Ʋ\\x03Ƴ\\x03Ƴ\\x03Ƴ\\x03Ƴ\\x03Ƴ')\n buf.write('\\x03Ƴ\\x03Ƴ\\x03ƴ\\x03ƴ\\x03ƴ\\x03ƴ\\x03ƴ')\n buf.write('\\x03ƴ\\x03ƴ\\x03ƴ\\x03ƴ\\x03ƴ\\x03ƴ\\x03ƴ')\n buf.write('\\x03ƴ\\x03ƴ\\x03Ƶ\\x03Ƶ\\x03Ƶ\\x03Ƶ\\x03Ƶ')\n buf.write('\\x03Ƶ\\x03Ƶ\\x03Ƶ\\x03ƶ\\x03ƶ\\x03ƶ\\x03ƶ')\n buf.write('\\x03ƶ\\x03ƶ\\x03ƶ\\x03ƶ\\x03ƶ\\x03ƶ\\x03ƶ')\n buf.write('\\x03ƶ\\x03ƶ\\x03Ʒ\\x03Ʒ\\x03Ʒ\\x03Ʒ\\x03Ʒ')\n buf.write('\\x03Ʒ\\x03Ʒ\\x03Ʒ\\x03Ʒ\\x03Ʒ\\x03Ʒ\\x03Ƹ')\n buf.write('\\x03Ƹ\\x03Ƹ\\x03Ƹ\\x03Ƹ\\x03Ƹ\\x03Ƹ\\x03Ƹ')\n buf.write('\\x03Ƹ\\x03Ƹ\\x03ƹ\\x03ƹ\\x03ƹ\\x03ƹ\\x03ƹ')\n buf.write('\\x03ƹ\\x03ƹ\\x03ƹ\\x03ƹ\\x03ƹ\\x03ƺ\\x03ƺ')\n buf.write('\\x03ƺ\\x03ƺ\\x03ƺ\\x03ƺ\\x03ƺ\\x03ƺ\\x03ƺ')\n buf.write('\\x03ƺ\\x03ƺ\\x03ƺ\\x03ƺ\\x03ƺ\\x03ƻ\\x03ƻ')\n buf.write('\\x03ƻ\\x03ƻ\\x03ƻ\\x03ƻ\\x03ƻ\\x03ƻ\\x03ƻ')\n buf.write('\\x03Ƽ\\x03Ƽ\\x03Ƽ\\x03Ƽ\\x03Ƽ\\x03Ƽ\\x03ƽ')\n buf.write('\\x03ƽ\\x03ƽ\\x03ƽ\\x03ƽ\\x03ƽ\\x03ƽ\\x03ƽ')\n buf.write('\\x03ƽ\\x03ƾ\\x03ƾ\\x03ƾ\\x03ƾ\\x03ƾ\\x03ƾ')\n buf.write('\\x03ƾ\\x03ƾ\\x03ƿ\\x03ƿ\\x03ƿ\\x03ƿ\\x03ƿ')\n buf.write('\\x03ƿ\\x03ƿ\\x03ƿ\\x03ƿ\\x03ƿ\\x03ƿ\\x03ƿ')\n buf.write('\\x03ƿ\\x03ǀ\\x03ǀ\\x03ǀ\\x03ǀ\\x03ǀ\\x03ǀ')\n buf.write('\\x03ǀ\\x03ǀ\\x03ǀ\\x03ǁ\\x03ǁ\\x03ǁ\\x03ǁ')\n buf.write('\\x03ǁ\\x03ǂ\\x03ǂ\\x03ǂ\\x03ǂ\\x03ǃ\\x03ǃ')\n buf.write('\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ')\n buf.write('\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ')\n buf.write('\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ\\x03ǃ')\n buf.write('\\x03ǃ\\x03ǃ\\x03DŽ\\x03DŽ\\x03DŽ\\x03DŽ\\x03DŽ')\n buf.write('\\x03Dž\\x03Dž\\x03Dž\\x03Dž\\x03Dž\\x03Dž\\x03Dž')\n buf.write('\\x03Dž\\x03Dž\\x03Dž\\x03Dž\\x03dž\\x03dž\\x03dž')\n buf.write('\\x03dž\\x03dž\\x03dž\\x03dž\\x03dž\\x03dž\\x03dž')\n buf.write('\\x03dž\\x03dž\\x03dž\\x03dž\\x03dž\\x03dž\\x03dž')\n buf.write('\\x03dž\\x03LJ\\x03LJ\\x03LJ\\x03LJ\\x03LJ\\x03LJ')\n buf.write('\\x03LJ\\x03LJ\\x03LJ\\x03LJ\\x03LJ\\x03LJ\\x03LJ')\n buf.write('\\x03LJ\\x03LJ\\x03LJ\\x03Lj\\x03Lj\\x03Lj\\x03Lj')\n buf.write('\\x03Lj\\x03Lj\\x03Lj\\x03Lj\\x03Lj\\x03Lj\\x03Lj')\n buf.write('\\x03Lj\\x03Lj\\x03Lj\\x03Lj\\x03Lj\\x03Lj\\x03Lj')\n buf.write('\\x03Lj\\x03lj\\x03lj\\x03lj\\x03lj\\x03lj\\x03lj')\n buf.write('\\x03lj\\x03lj\\x03lj\\x03lj\\x03lj\\x03lj\\x03lj')\n buf.write('\\x03lj\\x03lj\\x03lj\\x03lj\\x03lj\\x03lj\\x03lj')\n buf.write('\\x03lj\\x03lj\\x03lj\\x03NJ\\x03NJ\\x03NJ\\x03NJ')\n buf.write('\\x03NJ\\x03NJ\\x03NJ\\x03NJ\\x03NJ\\x03NJ\\x03NJ')\n buf.write('\\x03NJ\\x03NJ\\x03NJ\\x03NJ\\x03Nj\\x03Nj\\x03Nj')\n buf.write('\\x03Nj\\x03Nj\\x03Nj\\x03Nj\\x03Nj\\x03Nj\\x03Nj')\n buf.write('\\x03nj\\x03nj\\x03nj\\x03nj\\x03nj\\x03nj\\x03nj')\n buf.write('\\x03nj\\x03nj\\x03nj\\x03nj\\x03Ǎ\\x03Ǎ\\x03Ǎ')\n buf.write('\\x03Ǎ\\x03Ǎ\\x03Ǎ\\x03Ǎ\\x03Ǎ\\x03ǎ\\x03ǎ')\n buf.write('\\x03ǎ\\x03ǎ\\x03ǎ\\x03ǎ\\x03ǎ\\x03ǎ\\x03ǎ')\n buf.write('\\x03ǎ\\x03ǎ\\x03ǎ\\x03ǎ\\x03Ǐ\\x03Ǐ\\x03Ǐ')\n buf.write('\\x03Ǐ\\x03Ǐ\\x03Ǐ\\x03Ǐ\\x03Ǐ\\x03Ǐ\\x03Ǐ')\n buf.write('\\x03Ǐ\\x03Ǐ\\x03Ǐ\\x03Ǐ\\x03Ǐ\\x03Ǐ\\x03ǐ')\n buf.write('\\x03ǐ\\x03ǐ\\x03ǐ\\x03ǐ\\x03ǐ\\x03ǐ\\x03ǐ')\n buf.write('\\x03ǐ\\x03ǐ\\x03ǐ\\x03ǐ\\x03ǐ\\x03ǐ\\x03ǐ')\n buf.write('\\x03ǐ\\x03Ǒ\\x03Ǒ\\x03Ǒ\\x03Ǒ\\x03Ǒ\\x03ǒ')\n buf.write('\\x03ǒ\\x03ǒ\\x03ǒ\\x03Ǔ\\x03Ǔ\\x03Ǔ\\x03Ǔ')\n buf.write('\\x03Ǔ\\x03ǔ\\x03ǔ\\x03ǔ\\x03ǔ\\x03Ǖ\\x03Ǖ')\n buf.write('\\x03Ǖ\\x03Ǖ\\x03Ǖ\\x03ǖ\\x03ǖ\\x03ǖ\\x03ǖ')\n buf.write('\\x03Ǘ\\x03Ǘ\\x03Ǘ\\x03Ǘ\\x03Ǘ\\x03Ǘ\\x03Ǘ')\n buf.write('\\x03ǘ\\x03ǘ\\x03ǘ\\x03ǘ\\x03Ǚ\\x03Ǚ\\x03Ǚ')\n buf.write('\\x03Ǚ\\x03Ǚ\\x03Ǚ\\x03ǚ\\x03ǚ\\x03ǚ\\x03ǚ')\n buf.write('\\x03ǚ\\x03ǚ\\x03ǚ\\x03ǚ\\x03ǚ\\x03ǚ\\x03ǚ')\n buf.write('\\x03ǚ\\x03ǚ\\x03ǚ\\x03ǚ\\x03ǚ\\x03Ǜ\\x03Ǜ')\n buf.write('\\x03Ǜ\\x03Ǜ\\x03Ǜ\\x03Ǜ\\x03Ǜ\\x03Ǜ\\x03Ǜ')\n buf.write('\\x03Ǜ\\x03Ǜ\\x03ǜ\\x03ǜ\\x03ǜ\\x03ǜ\\x03ǝ')\n buf.write('\\x03ǝ\\x03ǝ\\x03ǝ\\x03ǝ\\x03ǝ\\x03ǝ\\x03ǝ')\n buf.write('\\x03ǝ\\x03Ǟ\\x03Ǟ\\x03Ǟ\\x03Ǟ\\x03Ǟ\\x03Ǟ')\n buf.write('\\x03ǟ\\x03ǟ\\x03ǟ\\x03ǟ\\x03ǟ\\x03ǟ\\x03ǟ')\n buf.write('\\x03Ǡ\\x03Ǡ\\x03Ǡ\\x03Ǡ\\x03Ǡ\\x03ǡ\\x03ǡ')\n buf.write('\\x03ǡ\\x03ǡ\\x03ǡ\\x03ǡ\\x03ǡ\\x03Ǣ\\x03Ǣ')\n buf.write('\\x03Ǣ\\x03Ǣ\\x03Ǣ\\x03Ǣ\\x07Ǣ፨\\nǢ')\n buf.write('\\x0cǢ\\x0eǢ፫\\x0bǢ\\x03Ǣ\\x03Ǣ\\x03ǣ')\n buf.write('\\x03ǣ\\x03ǣ\\x07ǣ፲\\nǣ\\x0cǣ\\x0eǣ')\n buf.write('፵\\x0bǣ\\x03ǣ\\x06ǣ፸\\nǣ\\rǣ')\n buf.write('\\x0eǣ፹\\x03Ǥ\\x03Ǥ\\x03Ǥ\\x07Ǥ\\u137f')\n buf.write('\\nǤ\\x0cǤ\\x0eǤᎂ\\x0bǤ\\x03Ǥ\\x06Ǥ')\n buf.write('ᎅ\\nǤ\\rǤ\\x0eǤᎆ\\x03ǥ\\x03ǥ')\n buf.write('\\x03ǥ\\x03Ǧ\\x03Ǧ\\x03ǧ\\x03ǧ\\x03Ǩ\\x03Ǩ')\n buf.write('\\x03Ǩ\\x05Ǩ᎓\\nǨ\\x03Ǩ\\x03Ǩ\\x05Ǩ')\n buf.write('᎗\\nǨ\\x05Ǩ᎙\\nǨ\\x03Ǩ\\x03Ǩ\\x05')\n buf.write('Ǩ\\u139d\\nǨ\\x03ǩ\\x03ǩ\\x03ǩ\\x03ǩ\\x03')\n buf.write('ǩ\\x07ǩᎤ\\nǩ\\x0cǩ\\x0eǩᎧ\\x0b')\n buf.write('ǩ\\x03ǩ\\x03ǩ\\x03Ǫ\\x03Ǫ\\x03Ǫ\\x03Ǫ')\n buf.write('\\x03Ǫ\\x05ǪᎰ\\nǪ\\x03Ǫ\\x03Ǫ\\x03ǫ')\n buf.write('\\x03ǫ\\x03Ǭ\\x03Ǭ\\x03Ǭ\\x07ǬᎹ\\nǬ')\n buf.write('\\x0cǬ\\x0eǬᎼ\\x0bǬ\\x03Ǭ\\x03Ǭ\\x03Ǭ')\n buf.write('\\x03ǭ\\x03ǭ\\x03ǭ\\x07ǭᏄ\\nǭ\\x0cǭ')\n buf.write('\\x0eǭᏇ\\x0bǭ\\x03ǭ\\x03ǭ\\x03ǭ\\x03Ǯ')\n buf.write('\\x03Ǯ\\x03Ǯ\\x07ǮᏏ\\nǮ\\x0cǮ\\x0eǮ')\n buf.write('Ꮢ\\x0bǮ\\x03Ǯ\\x03Ǯ\\x03Ǯ\\x03ǯ\\x03ǯ')\n buf.write('\\x03ǯ\\x07ǯᏚ\\nǯ\\x0cǯ\\x0eǯᏝ')\n buf.write('\\x0bǯ\\x03ǯ\\x03ǯ\\x03ǯ\\x03ǰ\\x03ǰ\\x03DZ')\n buf.write('\\x03DZ\\x03DZ\\x03DZ\\x06DZᏨ\\nDZ\\rDZ')\n buf.write('\\x0eDZᏩ\\x03DZ\\x03DZ\\x03Dz\\x03Dz\\x03dz')\n buf.write('\\x03dz\\x03Ǵ\\x03Ǵ\\x03ǵ\\x03ǵ\\x03Ƕ\\x03Ƕ')\n buf.write('\\x03Ƕ\\x03Ƿ\\x03Ƿ\\x03Ǹ\\x03Ǹ\\x03ǹ\\x03ǹ')\n buf.write('\\x03Ǻ\\x03Ǻ\\x03ǻ\\x03ǻ\\x03Ǽ\\x03Ǽ\\x03ǽ')\n buf.write('\\x03ǽ\\x03ǽ\\x03Ǿ\\x03Ǿ\\x03Ǿ\\x03Ǿ\\x07Ǿ')\n buf.write('ᐌ\\nǾ\\x0cǾ\\x0eǾᐏ\\x0bǾ\\x03Ǿ')\n buf.write('\\x03Ǿ\\x03Ǿ\\x03Ǿ\\x03Ǿ\\x05Ǿᐖ\\nǾ')\n buf.write('\\x03ǿ\\x03ǿ\\x03Ȁ\\x03Ȁ\\x03ȁ\\x03ȁ\\x03ȁ')\n buf.write('\\x03Ȃ\\x03Ȃ\\x03ȃ\\x03ȃ\\x03ȃ\\x03Ȅ\\x03Ȅ')\n buf.write('\\x03Ȅ\\x03Ȅ\\x03Ȅ\\x03Ȅ\\x03Ȅ\\x03Ȅ\\x05Ȅ')\n buf.write('ᐬ\\nȄ\\x03ȅ\\x03ȅ\\x03Ȇ\\x03Ȇ\\x03ȇ')\n buf.write('\\x03ȇ\\x03Ȉ\\x03Ȉ\\x03ȉ\\x03ȉ\\x03Ȋ\\x03Ȋ')\n buf.write('\\x03Ȋ\\x03ȋ\\x03ȋ\\x03Ȍ\\x03Ȍ\\x03ȍ\\x03ȍ')\n buf.write('\\x03Ȏ\\x03Ȏ\\x03ȏ\\x03ȏ\\x03Ȑ\\x06Ȑᑆ')\n buf.write('\\nȐ\\rȐ\\x0eȐᑇ\\x03Ȑ\\x03Ȑ\\x03ȑ')\n buf.write('\\x03ȑ\\x03Ȓ\\x06Ȓᑏ\\nȒ\\rȒ\\x0eȒ')\n buf.write('ᑐ\\x03ȓ\\x07ȓᑔ\\nȓ\\x0cȓ\\x0eȓ')\n buf.write('ᑗ\\x0bȓ\\x03ȓ\\x05ȓᑚ\\nȓ\\x03ȓ')\n buf.write('\\x06ȓᑝ\\nȓ\\rȓ\\x0eȓᑞ\\x03Ȕ')\n buf.write('\\x03Ȕ\\x03Ȕ\\x03Ȕ\\x07Ȕᑥ\\nȔ\\x0cȔ')\n buf.write('\\x0eȔᑨ\\x0bȔ\\x03Ȕ\\x03Ȕ\\x05Ȕᑬ')\n buf.write('\\nȔ\\x03Ȕ\\x03Ȕ\\x03ȕ\\x03ȕ\\x03ȕ\\x03ȕ')\n buf.write('\\x07ȕᑴ\\nȕ\\x0cȕ\\x0eȕᑷ\\x0bȕ')\n buf.write('\\x03ȕ\\x03ȕ\\x03ȕ\\x03ȕ\\x03ȕ\\x03Ȗ\\x03Ȗ')\n buf.write('\\x03Ȗ\\x03Ȗ\\x03Ȗ\\x03Ȗ\\x03Ȗ\\x03Ȗ\\x03Ȗ')\n buf.write('\\x07Ȗᒇ\\nȖ\\x0cȖ\\x0eȖᒊ\\x0bȖ')\n buf.write('\\x03Ȗ\\x03Ȗ\\x05Ȗᒎ\\nȖ\\x03ȗ\\x05ȗ')\n buf.write('ᒑ\\nȗ\\x03ȗ\\x03ȗ\\x03Ș\\x03Ș\\x03ș')\n buf.write('\\x03ș\\x03ș\\x07șᒚ\\nș\\x0cș\\x0eș')\n buf.write('ᒝ\\x0bș\\x03Ț\\x03Ț\\x03Ț\\x03Ț\\x03Ț')\n buf.write('\\x03ț\\x03ț\\x03Ȝ\\x03Ȝ\\x03ȝ\\x03ȝ\\x03Ȟ')\n buf.write('\\x03Ȟ\\x03ȟ\\x03ȟ\\x03Ƞ\\x03Ƞ\\x03ȡ\\x03ȡ')\n buf.write('\\x03Ȣ\\x03Ȣ\\x03ȣ\\x03ȣ\\x03Ȥ\\x03Ȥ\\x03ȥ')\n buf.write('\\x03ȥ\\x03Ȧ\\x03Ȧ\\x03ȧ\\x03ȧ\\x03Ȩ\\x03Ȩ')\n buf.write('\\x03ȩ\\x03ȩ\\x03Ȫ\\x03Ȫ\\x03ȫ\\x03ȫ\\x03Ȭ')\n buf.write('\\x03Ȭ\\x03ȭ\\x03ȭ\\x03Ȯ\\x03Ȯ\\x03ȯ\\x03ȯ')\n buf.write('\\x03Ȱ\\x03Ȱ\\x03ȱ\\x03ȱ\\x03Ȳ\\x03Ȳ\\x03ȳ')\n buf.write('\\x03ȳ\\x03ȴ\\x03ȴ\\x07ᎺᏅᏐᏛᑵ')\n buf.write(\n '\\x02ȵ\\x03\\x03\\x05\\x04\\x07\\x05\\t\\x06\\x0b\\x07\\r\\x08\\x0f\\t\\x11\\n\\x13\\x0b\\x15\\x0c'\n )\n buf.write(\n \"\\x17\\r\\x19\\x0e\\x1b\\x0f\\x1d\\x10\\x1f\\x11!\\x12#\\x13%\\x14'\\x15)\\x16+\\x17\"\n )\n buf.write('-\\x18/\\x191\\x1a3\\x1b5\\x1c7\\x1d9\\x1e;\\x1f= ?!A\"C#E$G%')\n buf.write(\"I&K'M(O)Q*S+U,W-Y.[/]0_1a2c3e4g5i6k7\")\n buf.write('m8o9q:s;u<w=y>{?}@\\x7fA\\x81B\\x83C\\x85D\\x87E\\x89')\n buf.write('F\\x8bG\\x8dH\\x8fI\\x91J\\x93K\\x95L\\x97M\\x99')\n buf.write('N\\x9bO\\x9dP\\x9fQ¡R£S¥T§U©')\n buf.write('V«W\\xadX¯Y±Z³[µ\\\\·]¹')\n buf.write('^»_½`¿aÁbÃcÅdÇeÉ')\n buf.write('fËgÍhÏiÑjÓkÕl×mÙ')\n buf.write('nÛoÝpßqárãsåtçué')\n buf.write('vëwíxïyñzó{õ|÷}ù')\n buf.write('~û\\x7fý\\x80ÿ\\x81ā\\x82ă')\n buf.write('\\x83ą\\x84ć\\x85ĉ\\x86ċ\\x87')\n buf.write('č\\x88ď\\x89đ\\x8aē\\x8bĕ')\n buf.write('\\x8cė\\x8dę\\x8eě\\x8fĝ\\x90')\n buf.write('ğ\\x91ġ\\x92ģ\\x93ĥ\\x94ħ')\n buf.write('\\x95ĩ\\x96ī\\x97ĭ\\x98į\\x99')\n buf.write('ı\\x9aij\\x9bĵ\\x9cķ\\x9dĹ')\n buf.write('\\x9eĻ\\x9fĽ\\xa0Ŀ¡Ł¢')\n buf.write('Ń£Ņ¤Ň¥ʼn¦ŋ')\n buf.write('§ō¨ŏ©őªœ«')\n buf.write('ŕ¬ŗ\\xadř®ś¯ŝ')\n buf.write('°ş±š²ţ³ť´')\n buf.write('ŧµũ¶ū·ŭ¸ů')\n buf.write('¹űºų»ŵ¼ŷ½')\n buf.write('Ź¾Ż¿ŽÀſÁƁ')\n buf.write('ÂƃÃƅÄƇÅƉÆ')\n buf.write('ƋÇƍÈƏÉƑÊƓ')\n buf.write('ËƕÌƗÍƙÎƛÏ')\n buf.write('ƝÐƟÑơÒƣÓƥ')\n buf.write('ÔƧÕƩÖƫ×ƭØ')\n buf.write('ƯÙƱÚƳÛƵÜƷ')\n buf.write('ÝƹÞƻßƽàƿá')\n buf.write('ǁâǃãDžäLJålj')\n buf.write('æNjçǍèǏéǑê')\n buf.write('ǓëǕìǗíǙîǛ')\n buf.write('ïǝðǟñǡòǣó')\n buf.write('ǥôǧõǩöǫ÷ǭ')\n buf.write('øǯùDZúdzûǵü')\n buf.write('ǷýǹþǻÿǽĀǿ')\n buf.write('āȁĂȃăȅĄȇą')\n buf.write('ȉĆȋćȍĈȏĉȑ')\n buf.write('ĊȓċȕČȗčșĎ')\n buf.write('țďȝĐȟđȡĒȣ')\n buf.write('ēȥĔȧĕȩĖȫė')\n buf.write('ȭĘȯęȱĚȳěȵ')\n buf.write('ĜȷĝȹĞȻğȽĠ')\n buf.write('ȿġɁĢɃģɅĤɇ')\n buf.write('ĥɉĦɋħɍĨɏĩ')\n buf.write('ɑĪɓīɕĬɗĭə')\n buf.write('ĮɛįɝİɟıɡIJ')\n buf.write('ɣijɥĴɧĵɩĶɫ')\n buf.write('ķɭĸɯĹɱĺɳĻ')\n buf.write('ɵļɷĽɹľɻĿɽ')\n buf.write('ŀɿŁʁłʃŃʅń')\n buf.write('ʇŅʉņʋŇʍňʏ')\n buf.write('ʼnʑŊʓŋʕŌʗō')\n buf.write('ʙŎʛŏʝŐʟőʡ')\n buf.write('ŒʣœʥŔʧŕʩŖ')\n buf.write('ʫŗʭŘʯřʱŚʳ')\n buf.write('śʵŜʷŝʹŞʻş')\n buf.write('ʽŠʿšˁŢ˃ţ˅')\n buf.write('ŤˇťˉŦˋŧˍŨ')\n buf.write('ˏũˑŪ˓ū˕Ŭ˗')\n buf.write('ŭ˙ٲů˝Ű˟ű')\n buf.write('ˡŲˣų˥Ŵ˧ŵ˩')\n buf.write('Ŷ˫ŷ˭Ÿ˯Ź˱ź')\n buf.write('˳Ż˵ż˷Ž˹ž˻')\n buf.write('ſ˽ƀ˿Ɓ́Ƃ̃ƃ')\n buf.write('̅Ƅ̇ƅ̉Ɔ̋Ƈ̍')\n buf.write('ƈ̏Ɖ̑Ɗ̓Ƌ̕ƌ')\n buf.write('̗ƍ̙Ǝ̛Ə̝Ɛ̟')\n buf.write('Ƒ̡ƒ̣Ɠ̥Ɣ̧ƕ')\n buf.write('̩Ɩ̫Ɨ̭Ƙ̯ƙ̱')\n buf.write('ƚ̳ƛ̵Ɯ̷Ɲ̹ƞ')\n buf.write('̻Ɵ̽Ơ̿ớƢ̓')\n buf.write('ƣͅƤ͇ƥ͉Ʀ͋Ƨ')\n buf.write('͍ƨ͏Ʃ͑ƪ͓ƫ͕')\n buf.write('Ƭ͗ƭ͙Ʈ͛Ư͝ư')\n buf.write('͟Ʊ͡ƲͣƳͥƴͧ')\n buf.write('ƵͩƶͫƷͭƸͯƹ')\n buf.write('ͱƺͳƻ͵Ƽͷƽ\\u0379')\n buf.write('ƾͻƿͽǀͿǁ\\u0381ǂ')\n buf.write('\\u0383ǃ΅DŽ·DžΉdž\\u038b')\n buf.write('LJ\\u038dLjΏljΑNJΓNj')\n buf.write('ΕnjΗǍΙǎΛǏΝ')\n buf.write('ǐΟǑΡǒΣǓΥǔ')\n buf.write('ΧǕΩǖΫǗέǘί')\n buf.write('ǙαǚγǛεǜηǝ')\n buf.write('ιǞλǟνǠοǡρ')\n buf.write('ǢσǣυǤχǥωǦ')\n buf.write('ϋǧύǨϏǩϑǪϓ')\n buf.write('\\x02ϕ\\x02ϗ\\x02ϙ\\x02ϛ\\x02ϝ\\x02ϟ\\x02ϡ')\n buf.write('ǫϣǬϥǭϧǮϩǯ')\n buf.write('ϫǰϭDZϯDzϱdzϳ')\n buf.write('ǴϵǵϷǶϹǷϻǸ')\n buf.write('ϽǹϿǺЁǻЃǼЅ')\n buf.write('ǽЇǾЉǿЋȀЍȁ')\n buf.write('ЏȂБ\\x02ГȃЕȄЗȅ')\n buf.write('ЙȆЛȇНȈПȉС')\n buf.write('\\x02У\\x02Х\\x02ЧȊЩȋЫȌ')\n buf.write('Э\\x02Я\\x02бȍгȎе\\x02з')\n buf.write('\\x02й\\x02л\\x02н\\x02п\\x02с\\x02у\\x02х')\n buf.write('\\x02ч\\x02щ\\x02ы\\x02э\\x02я\\x02ё\\x02ѓ')\n buf.write('\\x02ѕ\\x02ї\\x02љ\\x02ћ\\x02ѝ\\x02џ\\x02ѡ')\n buf.write(\n \"\\x02ѣ\\x02ѥ\\x02ѧ\\x02\\x03\\x02'\\x05\\x02\\x0c\\x0c\\x0f\\x0f))\\x05\\x022\")\n buf.write(\n ';CHch\\x04\\x02GGgg\\x04\\x02--//\\t\\x02\\x0b\\x0c\\x0f\\x0f\"\"**>>]]}}\\x05\\x02\\x0c'\n )\n buf.write(\n '\\x0c\\x0f\\x0f$$\\x04\\x022;aa\\x05\\x02\\x0b\\x0c\\x0f\\x0f\"\"\\x04\\x02C\\\\c|\\x04\\x02\\x0c'\n )\n buf.write(\n '\\x0c\\x0f\\x0f\\x04\\x02\\x0b\\x0b\"\"\\x05\\x02%&2;aa\\x04\\x02CCcc\\x04\\x02DDdd\\x04\\x02'\n )\n buf.write(\n 'EEee\\x04\\x02FFff\\x04\\x02HHhh\\x04\\x02IIii\\x04\\x02JJjj\\x04\\x02KKkk\\x04\\x02LLll\\x04'\n )\n buf.write(\n '\\x02MMmm\\x04\\x02NNnn\\x04\\x02OOoo\\x04\\x02PPpp\\x04\\x02QQqq\\x04\\x02RRrr\\x04\\x02SSs'\n )\n buf.write(\n 's\\x04\\x02TTtt\\x04\\x02UUuu\\x04\\x02VVvv\\x04\\x02WWww\\x04\\x02XXxx\\x04\\x02YYyy\\x04\\x02'\n )\n buf.write(\n 'ZZzz\\x04\\x02[[{{\\x04\\x02\\\\\\\\||\\x02ᓝ\\x02\\x03\\x03\\x02\\x02\\x02\\x02\\x05\\x03\\x02\\x02\\x02'\n )\n buf.write(\n '\\x02\\x07\\x03\\x02\\x02\\x02\\x02\\t\\x03\\x02\\x02\\x02\\x02\\x0b\\x03\\x02\\x02\\x02\\x02\\r\\x03\\x02\\x02\\x02\\x02\\x0f'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02\\x11\\x03\\x02\\x02\\x02\\x02\\x13\\x03\\x02\\x02\\x02\\x02\\x15\\x03\\x02\\x02\\x02\\x02\\x17\\x03'\n )\n buf.write(\n '\\x02\\x02\\x02\\x02\\x19\\x03\\x02\\x02\\x02\\x02\\x1b\\x03\\x02\\x02\\x02\\x02\\x1d\\x03\\x02\\x02\\x02\\x02\\x1f\\x03\\x02'\n )\n buf.write(\n \"\\x02\\x02\\x02!\\x03\\x02\\x02\\x02\\x02#\\x03\\x02\\x02\\x02\\x02%\\x03\\x02\\x02\\x02\\x02'\\x03\\x02\\x02\\x02\\x02)\\x03\"\n )\n buf.write(\n '\\x02\\x02\\x02\\x02+\\x03\\x02\\x02\\x02\\x02-\\x03\\x02\\x02\\x02\\x02/\\x03\\x02\\x02\\x02\\x021\\x03\\x02\\x02\\x02\\x02'\n )\n buf.write(\n '3\\x03\\x02\\x02\\x02\\x025\\x03\\x02\\x02\\x02\\x027\\x03\\x02\\x02\\x02\\x029\\x03\\x02\\x02\\x02\\x02;\\x03'\n )\n buf.write(\n '\\x02\\x02\\x02\\x02=\\x03\\x02\\x02\\x02\\x02?\\x03\\x02\\x02\\x02\\x02A\\x03\\x02\\x02\\x02\\x02C\\x03\\x02\\x02\\x02\\x02E'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02G\\x03\\x02\\x02\\x02\\x02I\\x03\\x02\\x02\\x02\\x02K\\x03\\x02\\x02\\x02\\x02M\\x03\\x02\\x02\\x02\\x02'\n )\n buf.write(\n 'O\\x03\\x02\\x02\\x02\\x02Q\\x03\\x02\\x02\\x02\\x02S\\x03\\x02\\x02\\x02\\x02U\\x03\\x02\\x02\\x02\\x02W\\x03\\x02\\x02\\x02'\n )\n buf.write(\n '\\x02Y\\x03\\x02\\x02\\x02\\x02[\\x03\\x02\\x02\\x02\\x02]\\x03\\x02\\x02\\x02\\x02_\\x03\\x02\\x02\\x02\\x02a\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02c\\x03\\x02\\x02\\x02\\x02e\\x03\\x02\\x02\\x02\\x02g\\x03\\x02\\x02\\x02\\x02i\\x03\\x02\\x02\\x02\\x02k\\x03\\x02'\n )\n buf.write(\n '\\x02\\x02\\x02m\\x03\\x02\\x02\\x02\\x02o\\x03\\x02\\x02\\x02\\x02q\\x03\\x02\\x02\\x02\\x02s\\x03\\x02\\x02\\x02\\x02u\\x03'\n )\n buf.write(\n '\\x02\\x02\\x02\\x02w\\x03\\x02\\x02\\x02\\x02y\\x03\\x02\\x02\\x02\\x02{\\x03\\x02\\x02\\x02\\x02}\\x03\\x02\\x02\\x02\\x02\\x7f'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02\\x81\\x03\\x02\\x02\\x02\\x02\\x83\\x03\\x02\\x02\\x02\\x02\\x85\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02\\x87\\x03\\x02\\x02\\x02\\x02\\x89\\x03\\x02\\x02\\x02\\x02\\x8b\\x03\\x02\\x02\\x02\\x02\\x8d'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02\\x8f\\x03\\x02\\x02\\x02\\x02\\x91\\x03\\x02\\x02\\x02\\x02\\x93\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02\\x95\\x03\\x02\\x02\\x02\\x02\\x97\\x03\\x02\\x02\\x02\\x02\\x99\\x03\\x02\\x02\\x02\\x02\\x9b'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02\\x9d\\x03\\x02\\x02\\x02\\x02\\x9f\\x03\\x02\\x02\\x02\\x02¡\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02£\\x03\\x02\\x02\\x02\\x02¥\\x03\\x02\\x02\\x02\\x02§\\x03\\x02\\x02\\x02\\x02©'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02«\\x03\\x02\\x02\\x02\\x02\\xad\\x03\\x02\\x02\\x02\\x02¯\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02±\\x03\\x02\\x02\\x02\\x02³\\x03\\x02\\x02\\x02\\x02µ\\x03\\x02\\x02\\x02\\x02·'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02¹\\x03\\x02\\x02\\x02\\x02»\\x03\\x02\\x02\\x02\\x02½\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02¿\\x03\\x02\\x02\\x02\\x02Á\\x03\\x02\\x02\\x02\\x02Ã\\x03\\x02\\x02\\x02\\x02Å'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ç\\x03\\x02\\x02\\x02\\x02É\\x03\\x02\\x02\\x02\\x02Ë\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Í\\x03\\x02\\x02\\x02\\x02Ï\\x03\\x02\\x02\\x02\\x02Ñ\\x03\\x02\\x02\\x02\\x02Ó'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Õ\\x03\\x02\\x02\\x02\\x02×\\x03\\x02\\x02\\x02\\x02Ù\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Û\\x03\\x02\\x02\\x02\\x02Ý\\x03\\x02\\x02\\x02\\x02ß\\x03\\x02\\x02\\x02\\x02á'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ã\\x03\\x02\\x02\\x02\\x02å\\x03\\x02\\x02\\x02\\x02ç\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02é\\x03\\x02\\x02\\x02\\x02ë\\x03\\x02\\x02\\x02\\x02í\\x03\\x02\\x02\\x02\\x02ï'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ñ\\x03\\x02\\x02\\x02\\x02ó\\x03\\x02\\x02\\x02\\x02õ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02÷\\x03\\x02\\x02\\x02\\x02ù\\x03\\x02\\x02\\x02\\x02û\\x03\\x02\\x02\\x02\\x02ý'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ÿ\\x03\\x02\\x02\\x02\\x02ā\\x03\\x02\\x02\\x02\\x02ă\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ą\\x03\\x02\\x02\\x02\\x02ć\\x03\\x02\\x02\\x02\\x02ĉ\\x03\\x02\\x02\\x02\\x02ċ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02č\\x03\\x02\\x02\\x02\\x02ď\\x03\\x02\\x02\\x02\\x02đ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ē\\x03\\x02\\x02\\x02\\x02ĕ\\x03\\x02\\x02\\x02\\x02ė\\x03\\x02\\x02\\x02\\x02ę'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ě\\x03\\x02\\x02\\x02\\x02ĝ\\x03\\x02\\x02\\x02\\x02ğ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ġ\\x03\\x02\\x02\\x02\\x02ģ\\x03\\x02\\x02\\x02\\x02ĥ\\x03\\x02\\x02\\x02\\x02ħ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ĩ\\x03\\x02\\x02\\x02\\x02ī\\x03\\x02\\x02\\x02\\x02ĭ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02į\\x03\\x02\\x02\\x02\\x02ı\\x03\\x02\\x02\\x02\\x02ij\\x03\\x02\\x02\\x02\\x02ĵ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ķ\\x03\\x02\\x02\\x02\\x02Ĺ\\x03\\x02\\x02\\x02\\x02Ļ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Ľ\\x03\\x02\\x02\\x02\\x02Ŀ\\x03\\x02\\x02\\x02\\x02Ł\\x03\\x02\\x02\\x02\\x02Ń'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ņ\\x03\\x02\\x02\\x02\\x02Ň\\x03\\x02\\x02\\x02\\x02ʼn\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ŋ\\x03\\x02\\x02\\x02\\x02ō\\x03\\x02\\x02\\x02\\x02ŏ\\x03\\x02\\x02\\x02\\x02ő'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02œ\\x03\\x02\\x02\\x02\\x02ŕ\\x03\\x02\\x02\\x02\\x02ŗ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ř\\x03\\x02\\x02\\x02\\x02ś\\x03\\x02\\x02\\x02\\x02ŝ\\x03\\x02\\x02\\x02\\x02ş'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02š\\x03\\x02\\x02\\x02\\x02ţ\\x03\\x02\\x02\\x02\\x02ť\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ŧ\\x03\\x02\\x02\\x02\\x02ũ\\x03\\x02\\x02\\x02\\x02ū\\x03\\x02\\x02\\x02\\x02ŭ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ů\\x03\\x02\\x02\\x02\\x02ű\\x03\\x02\\x02\\x02\\x02ų\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ŵ\\x03\\x02\\x02\\x02\\x02ŷ\\x03\\x02\\x02\\x02\\x02Ź\\x03\\x02\\x02\\x02\\x02Ż'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ž\\x03\\x02\\x02\\x02\\x02ſ\\x03\\x02\\x02\\x02\\x02Ɓ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ƃ\\x03\\x02\\x02\\x02\\x02ƅ\\x03\\x02\\x02\\x02\\x02Ƈ\\x03\\x02\\x02\\x02\\x02Ɖ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ƌ\\x03\\x02\\x02\\x02\\x02ƍ\\x03\\x02\\x02\\x02\\x02Ə\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Ƒ\\x03\\x02\\x02\\x02\\x02Ɠ\\x03\\x02\\x02\\x02\\x02ƕ\\x03\\x02\\x02\\x02\\x02Ɨ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ƙ\\x03\\x02\\x02\\x02\\x02ƛ\\x03\\x02\\x02\\x02\\x02Ɲ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Ɵ\\x03\\x02\\x02\\x02\\x02ơ\\x03\\x02\\x02\\x02\\x02ƣ\\x03\\x02\\x02\\x02\\x02ƥ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ƨ\\x03\\x02\\x02\\x02\\x02Ʃ\\x03\\x02\\x02\\x02\\x02ƫ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ƭ\\x03\\x02\\x02\\x02\\x02Ư\\x03\\x02\\x02\\x02\\x02Ʊ\\x03\\x02\\x02\\x02\\x02Ƴ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ƶ\\x03\\x02\\x02\\x02\\x02Ʒ\\x03\\x02\\x02\\x02\\x02ƹ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ƻ\\x03\\x02\\x02\\x02\\x02ƽ\\x03\\x02\\x02\\x02\\x02ƿ\\x03\\x02\\x02\\x02\\x02ǁ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ǃ\\x03\\x02\\x02\\x02\\x02Dž\\x03\\x02\\x02\\x02\\x02LJ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02lj\\x03\\x02\\x02\\x02\\x02Nj\\x03\\x02\\x02\\x02\\x02Ǎ\\x03\\x02\\x02\\x02\\x02Ǐ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ǒ\\x03\\x02\\x02\\x02\\x02Ǔ\\x03\\x02\\x02\\x02\\x02Ǖ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Ǘ\\x03\\x02\\x02\\x02\\x02Ǚ\\x03\\x02\\x02\\x02\\x02Ǜ\\x03\\x02\\x02\\x02\\x02ǝ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ǟ\\x03\\x02\\x02\\x02\\x02ǡ\\x03\\x02\\x02\\x02\\x02ǣ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ǥ\\x03\\x02\\x02\\x02\\x02ǧ\\x03\\x02\\x02\\x02\\x02ǩ\\x03\\x02\\x02\\x02\\x02ǫ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ǭ\\x03\\x02\\x02\\x02\\x02ǯ\\x03\\x02\\x02\\x02\\x02DZ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02dz\\x03\\x02\\x02\\x02\\x02ǵ\\x03\\x02\\x02\\x02\\x02Ƿ\\x03\\x02\\x02\\x02\\x02ǹ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ǻ\\x03\\x02\\x02\\x02\\x02ǽ\\x03\\x02\\x02\\x02\\x02ǿ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ȁ\\x03\\x02\\x02\\x02\\x02ȃ\\x03\\x02\\x02\\x02\\x02ȅ\\x03\\x02\\x02\\x02\\x02ȇ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ȉ\\x03\\x02\\x02\\x02\\x02ȋ\\x03\\x02\\x02\\x02\\x02ȍ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ȏ\\x03\\x02\\x02\\x02\\x02ȑ\\x03\\x02\\x02\\x02\\x02ȓ\\x03\\x02\\x02\\x02\\x02ȕ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ȗ\\x03\\x02\\x02\\x02\\x02ș\\x03\\x02\\x02\\x02\\x02ț\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ȝ\\x03\\x02\\x02\\x02\\x02ȟ\\x03\\x02\\x02\\x02\\x02ȡ\\x03\\x02\\x02\\x02\\x02ȣ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ȥ\\x03\\x02\\x02\\x02\\x02ȧ\\x03\\x02\\x02\\x02\\x02ȩ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ȫ\\x03\\x02\\x02\\x02\\x02ȭ\\x03\\x02\\x02\\x02\\x02ȯ\\x03\\x02\\x02\\x02\\x02ȱ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ȳ\\x03\\x02\\x02\\x02\\x02ȵ\\x03\\x02\\x02\\x02\\x02ȷ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ȹ\\x03\\x02\\x02\\x02\\x02Ȼ\\x03\\x02\\x02\\x02\\x02Ƚ\\x03\\x02\\x02\\x02\\x02ȿ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ɂ\\x03\\x02\\x02\\x02\\x02Ƀ\\x03\\x02\\x02\\x02\\x02Ʌ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ɇ\\x03\\x02\\x02\\x02\\x02ɉ\\x03\\x02\\x02\\x02\\x02ɋ\\x03\\x02\\x02\\x02\\x02ɍ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ɏ\\x03\\x02\\x02\\x02\\x02ɑ\\x03\\x02\\x02\\x02\\x02ɓ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ɕ\\x03\\x02\\x02\\x02\\x02ɗ\\x03\\x02\\x02\\x02\\x02ə\\x03\\x02\\x02\\x02\\x02ɛ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ɝ\\x03\\x02\\x02\\x02\\x02ɟ\\x03\\x02\\x02\\x02\\x02ɡ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ɣ\\x03\\x02\\x02\\x02\\x02ɥ\\x03\\x02\\x02\\x02\\x02ɧ\\x03\\x02\\x02\\x02\\x02ɩ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ɫ\\x03\\x02\\x02\\x02\\x02ɭ\\x03\\x02\\x02\\x02\\x02ɯ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ɱ\\x03\\x02\\x02\\x02\\x02ɳ\\x03\\x02\\x02\\x02\\x02ɵ\\x03\\x02\\x02\\x02\\x02ɷ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ɹ\\x03\\x02\\x02\\x02\\x02ɻ\\x03\\x02\\x02\\x02\\x02ɽ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ɿ\\x03\\x02\\x02\\x02\\x02ʁ\\x03\\x02\\x02\\x02\\x02ʃ\\x03\\x02\\x02\\x02\\x02ʅ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ʇ\\x03\\x02\\x02\\x02\\x02ʉ\\x03\\x02\\x02\\x02\\x02ʋ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ʍ\\x03\\x02\\x02\\x02\\x02ʏ\\x03\\x02\\x02\\x02\\x02ʑ\\x03\\x02\\x02\\x02\\x02ʓ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ʕ\\x03\\x02\\x02\\x02\\x02ʗ\\x03\\x02\\x02\\x02\\x02ʙ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ʛ\\x03\\x02\\x02\\x02\\x02ʝ\\x03\\x02\\x02\\x02\\x02ʟ\\x03\\x02\\x02\\x02\\x02ʡ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ʣ\\x03\\x02\\x02\\x02\\x02ʥ\\x03\\x02\\x02\\x02\\x02ʧ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ʩ\\x03\\x02\\x02\\x02\\x02ʫ\\x03\\x02\\x02\\x02\\x02ʭ\\x03\\x02\\x02\\x02\\x02ʯ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ʱ\\x03\\x02\\x02\\x02\\x02ʳ\\x03\\x02\\x02\\x02\\x02ʵ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ʷ\\x03\\x02\\x02\\x02\\x02ʹ\\x03\\x02\\x02\\x02\\x02ʻ\\x03\\x02\\x02\\x02\\x02ʽ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ʿ\\x03\\x02\\x02\\x02\\x02ˁ\\x03\\x02\\x02\\x02\\x02˃\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02˅\\x03\\x02\\x02\\x02\\x02ˇ\\x03\\x02\\x02\\x02\\x02ˉ\\x03\\x02\\x02\\x02\\x02ˋ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ˍ\\x03\\x02\\x02\\x02\\x02ˏ\\x03\\x02\\x02\\x02\\x02ˑ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02˓\\x03\\x02\\x02\\x02\\x02˕\\x03\\x02\\x02\\x02\\x02˗\\x03\\x02\\x02\\x02\\x02˙'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02˛\\x03\\x02\\x02\\x02\\x02˝\\x03\\x02\\x02\\x02\\x02˟\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ˡ\\x03\\x02\\x02\\x02\\x02ˣ\\x03\\x02\\x02\\x02\\x02˥\\x03\\x02\\x02\\x02\\x02˧'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02˩\\x03\\x02\\x02\\x02\\x02˫\\x03\\x02\\x02\\x02\\x02˭\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02˯\\x03\\x02\\x02\\x02\\x02˱\\x03\\x02\\x02\\x02\\x02˳\\x03\\x02\\x02\\x02\\x02˵'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02˷\\x03\\x02\\x02\\x02\\x02˹\\x03\\x02\\x02\\x02\\x02˻\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02˽\\x03\\x02\\x02\\x02\\x02˿\\x03\\x02\\x02\\x02\\x02́\\x03\\x02\\x02\\x02\\x02̃'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02̅\\x03\\x02\\x02\\x02\\x02̇\\x03\\x02\\x02\\x02\\x02̉\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02̋\\x03\\x02\\x02\\x02\\x02̍\\x03\\x02\\x02\\x02\\x02̏\\x03\\x02\\x02\\x02\\x02̑'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02̓\\x03\\x02\\x02\\x02\\x02̕\\x03\\x02\\x02\\x02\\x02̗\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02̙\\x03\\x02\\x02\\x02\\x02̛\\x03\\x02\\x02\\x02\\x02̝\\x03\\x02\\x02\\x02\\x02̟'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02̡\\x03\\x02\\x02\\x02\\x02̣\\x03\\x02\\x02\\x02\\x02̥\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02̧\\x03\\x02\\x02\\x02\\x02̩\\x03\\x02\\x02\\x02\\x02̫\\x03\\x02\\x02\\x02\\x02̭'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02̯\\x03\\x02\\x02\\x02\\x02̱\\x03\\x02\\x02\\x02\\x02̳\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02̵\\x03\\x02\\x02\\x02\\x02̷\\x03\\x02\\x02\\x02\\x02̹\\x03\\x02\\x02\\x02\\x02̻'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02̽\\x03\\x02\\x02\\x02\\x02̿\\x03\\x02\\x02\\x02\\x02́\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02̓\\x03\\x02\\x02\\x02\\x02ͅ\\x03\\x02\\x02\\x02\\x02͇\\x03\\x02\\x02\\x02\\x02͉'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02͋\\x03\\x02\\x02\\x02\\x02͍\\x03\\x02\\x02\\x02\\x02͏\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02͑\\x03\\x02\\x02\\x02\\x02͓\\x03\\x02\\x02\\x02\\x02͕\\x03\\x02\\x02\\x02\\x02͗'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02͙\\x03\\x02\\x02\\x02\\x02͛\\x03\\x02\\x02\\x02\\x02͝\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02͟\\x03\\x02\\x02\\x02\\x02͡\\x03\\x02\\x02\\x02\\x02ͣ\\x03\\x02\\x02\\x02\\x02ͥ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ͧ\\x03\\x02\\x02\\x02\\x02ͩ\\x03\\x02\\x02\\x02\\x02ͫ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ͭ\\x03\\x02\\x02\\x02\\x02ͯ\\x03\\x02\\x02\\x02\\x02ͱ\\x03\\x02\\x02\\x02\\x02ͳ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02͵\\x03\\x02\\x02\\x02\\x02ͷ\\x03\\x02\\x02\\x02\\x02\\u0379\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ͻ\\x03\\x02\\x02\\x02\\x02ͽ\\x03\\x02\\x02\\x02\\x02Ϳ\\x03\\x02\\x02\\x02\\x02\\u0381'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02\\u0383\\x03\\x02\\x02\\x02\\x02΅\\x03\\x02\\x02\\x02\\x02·\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Ή\\x03\\x02\\x02\\x02\\x02\\u038b\\x03\\x02\\x02\\x02\\x02\\u038d\\x03\\x02\\x02\\x02\\x02Ώ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Α\\x03\\x02\\x02\\x02\\x02Γ\\x03\\x02\\x02\\x02\\x02Ε\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Η\\x03\\x02\\x02\\x02\\x02Ι\\x03\\x02\\x02\\x02\\x02Λ\\x03\\x02\\x02\\x02\\x02Ν'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ο\\x03\\x02\\x02\\x02\\x02Ρ\\x03\\x02\\x02\\x02\\x02Σ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Υ\\x03\\x02\\x02\\x02\\x02Χ\\x03\\x02\\x02\\x02\\x02Ω\\x03\\x02\\x02\\x02\\x02Ϋ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02έ\\x03\\x02\\x02\\x02\\x02ί\\x03\\x02\\x02\\x02\\x02α\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02γ\\x03\\x02\\x02\\x02\\x02ε\\x03\\x02\\x02\\x02\\x02η\\x03\\x02\\x02\\x02\\x02ι'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02λ\\x03\\x02\\x02\\x02\\x02ν\\x03\\x02\\x02\\x02\\x02ο\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ρ\\x03\\x02\\x02\\x02\\x02σ\\x03\\x02\\x02\\x02\\x02υ\\x03\\x02\\x02\\x02\\x02χ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ω\\x03\\x02\\x02\\x02\\x02ϋ\\x03\\x02\\x02\\x02\\x02ύ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Ϗ\\x03\\x02\\x02\\x02\\x02ϑ\\x03\\x02\\x02\\x02\\x02ϓ\\x03\\x02\\x02\\x02\\x02ϡ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ϣ\\x03\\x02\\x02\\x02\\x02ϥ\\x03\\x02\\x02\\x02\\x02ϧ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02ϩ\\x03\\x02\\x02\\x02\\x02ϫ\\x03\\x02\\x02\\x02\\x02ϭ\\x03\\x02\\x02\\x02\\x02ϯ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02ϱ\\x03\\x02\\x02\\x02\\x02ϳ\\x03\\x02\\x02\\x02\\x02ϵ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Ϸ\\x03\\x02\\x02\\x02\\x02Ϲ\\x03\\x02\\x02\\x02\\x02ϻ\\x03\\x02\\x02\\x02\\x02Ͻ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ͽ\\x03\\x02\\x02\\x02\\x02Ё\\x03\\x02\\x02\\x02\\x02Ѓ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Ѕ\\x03\\x02\\x02\\x02\\x02Ї\\x03\\x02\\x02\\x02\\x02Љ\\x03\\x02\\x02\\x02\\x02Ћ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Ѝ\\x03\\x02\\x02\\x02\\x02Џ\\x03\\x02\\x02\\x02\\x02Г\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Е\\x03\\x02\\x02\\x02\\x02З\\x03\\x02\\x02\\x02\\x02Й\\x03\\x02\\x02\\x02\\x02Л'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x02Н\\x03\\x02\\x02\\x02\\x02П\\x03\\x02\\x02\\x02\\x02Ч\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\x02Щ\\x03\\x02\\x02\\x02\\x02Ы\\x03\\x02\\x02\\x02\\x02б\\x03\\x02\\x02\\x02\\x02г'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x03ѩ\\x03\\x02\\x02\\x02\\x05Ѭ\\x03\\x02\\x02\\x02\\x07Ѯ\\x03\\x02\\x02'\n )\n buf.write(\n '\\x02\\tѲ\\x03\\x02\\x02\\x02\\x0bѸ\\x03\\x02\\x02\\x02\\rѾ\\x03\\x02\\x02\\x02\\x0f'\n )\n buf.write(\n '҈\\x03\\x02\\x02\\x02\\x11Ҍ\\x03\\x02\\x02\\x02\\x13Ғ\\x03\\x02\\x02\\x02\\x15Қ')\n buf.write(\n '\\x03\\x02\\x02\\x02\\x17Ҟ\\x03\\x02\\x02\\x02\\x19Ң\\x03\\x02\\x02\\x02\\x1bҨ\\x03'\n )\n buf.write(\n '\\x02\\x02\\x02\\x1dҫ\\x03\\x02\\x02\\x02\\x1fҲ\\x03\\x02\\x02\\x02!ҹ\\x03\\x02\\x02'\n )\n buf.write(\n \"\\x02#ҽ\\x03\\x02\\x02\\x02%Ӈ\\x03\\x02\\x02\\x02'ӊ\\x03\\x02\\x02\\x02)Ӕ\")\n buf.write(\n '\\x03\\x02\\x02\\x02+Ӛ\\x03\\x02\\x02\\x02-ӡ\\x03\\x02\\x02\\x02/Ӧ\\x03\\x02\\x02\\x02'\n )\n buf.write('1Ӱ\\x03\\x02\\x02\\x023ԇ\\x03\\x02\\x02\\x025ԍ\\x03\\x02\\x02\\x027')\n buf.write('Ԕ\\x03\\x02\\x02\\x029Ԛ\\x03\\x02\\x02\\x02;Ԣ\\x03\\x02\\x02\\x02=Ԩ\\x03'\n )\n buf.write(\n '\\x02\\x02\\x02?Զ\\x03\\x02\\x02\\x02AՃ\\x03\\x02\\x02\\x02CՒ\\x03\\x02\\x02\\x02E\\u0557'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02G՝\\x03\\x02\\x02\\x02Iբ\\x03\\x02\\x02\\x02Kժ\\x03\\x02\\x02\\x02'\n )\n buf.write(\n 'Mկ\\x03\\x02\\x02\\x02Oշ\\x03\\x02\\x02\\x02Qռ\\x03\\x02\\x02\\x02Sտ\\x03')\n buf.write(\n '\\x02\\x02\\x02Uք\\x03\\x02\\x02\\x02Wֆ\\x03\\x02\\x02\\x02Y\\u058c\\x03\\x02\\x02\\x02[֑'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02]֛\\x03\\x02\\x02\\x02_֣\\x03\\x02\\x02\\x02a֨\\x03\\x02\\x02\\x02'\n )\n buf.write(\n 'c֭\\x03\\x02\\x02\\x02eֲ\\x03\\x02\\x02\\x02gֺ\\x03\\x02\\x02\\x02iׄ\\x03')\n buf.write(\n '\\x02\\x02\\x02k\\u05ca\\x03\\x02\\x02\\x02m\\u05ce\\x03\\x02\\x02\\x02oד\\x03\\x02\\x02\\x02qי'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02sס\\x03\\x02\\x02\\x02uש\\x03\\x02\\x02\\x02wױ\\x03\\x02\\x02\\x02'\n )\n buf.write(\n 'y\\u05f9\\x03\\x02\\x02\\x02{\\u0600\\x03\\x02\\x02\\x02}؊\\x03\\x02\\x02\\x02\\x7fؘ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x81ؠ\\x03\\x02\\x02\\x02\\x83ة\\x03\\x02\\x02\\x02\\x85')\n buf.write('ر\\x03\\x02\\x02\\x02\\x87ف\\x03\\x02\\x02\\x02\\x89ي\\x03\\x02\\x02\\x02'\n )\n buf.write('\\x8bٕ\\x03\\x02\\x02\\x02\\x8d١\\x03\\x02\\x02\\x02\\x8f٭\\x03')\n buf.write('\\x02\\x02\\x02\\x91ٵ\\x03\\x02\\x02\\x02\\x93ٽ\\x03\\x02\\x02\\x02\\x95چ'\n )\n buf.write(\n '\\x03\\x02\\x02\\x02\\x97ڎ\\x03\\x02\\x02\\x02\\x99ښ\\x03\\x02\\x02\\x02\\x9b')\n buf.write('ڪ\\x03\\x02\\x02\\x02\\x9dگ\\x03\\x02\\x02\\x02\\x9fڵ\\x03\\x02\\x02\\x02'\n )\n buf.write('¡ڼ\\x03\\x02\\x02\\x02£ۂ\\x03\\x02\\x02\\x02¥ۇ\\x03')\n buf.write('\\x02\\x02\\x02§ۏ\\x03\\x02\\x02\\x02©ۜ\\x03\\x02\\x02\\x02«ۣ')\n buf.write('\\x03\\x02\\x02\\x02\\xadۯ\\x03\\x02\\x02\\x02¯۵\\x03\\x02\\x02\\x02±')\n buf.write('ۺ\\x03\\x02\\x02\\x02³܃\\x03\\x02\\x02\\x02µ܈\\x03\\x02\\x02\\x02')\n buf.write('·܌\\x03\\x02\\x02\\x02¹ܛ\\x03\\x02\\x02\\x02»ܦ\\x03')\n buf.write('\\x02\\x02\\x02½ܪ\\x03\\x02\\x02\\x02¿ܰ\\x03\\x02\\x02\\x02Áܴ')\n buf.write('\\x03\\x02\\x02\\x02Ãܼ\\x03\\x02\\x02\\x02Å݄\\x03\\x02\\x02\\x02Ç')\n buf.write('ݎ\\x03\\x02\\x02\\x02Éݘ\\x03\\x02\\x02\\x02Ëݠ\\x03\\x02\\x02\\x02')\n buf.write('Íݩ\\x03\\x02\\x02\\x02Ïݲ\\x03\\x02\\x02\\x02Ñݺ\\x03')\n buf.write('\\x02\\x02\\x02Óށ\\x03\\x02\\x02\\x02Õއ\\x03\\x02\\x02\\x02×ތ')\n buf.write('\\x03\\x02\\x02\\x02Ùޚ\\x03\\x02\\x02\\x02Ûޤ\\x03\\x02\\x02\\x02Ý')\n buf.write('ެ\\x03\\x02\\x02\\x02ß\\u07b9\\x03\\x02\\x02\\x02á߂\\x03\\x02\\x02\\x02')\n buf.write('ãߋ\\x03\\x02\\x02\\x02åߒ\\x03\\x02\\x02\\x02çߗ\\x03')\n buf.write('\\x02\\x02\\x02é߰\\x03\\x02\\x02\\x02ëߵ\\x03\\x02\\x02\\x02í߽')\n buf.write('\\x03\\x02\\x02\\x02ïࠂ\\x03\\x02\\x02\\x02ñࠈ\\x03\\x02\\x02\\x02ó')\n buf.write('ࠎ\\x03\\x02\\x02\\x02õࠕ\\x03\\x02\\x02\\x02÷ࠞ\\x03\\x02\\x02\\x02')\n buf.write('ùࠢ\\x03\\x02\\x02\\x02û࠱\\x03\\x02\\x02\\x02ý࠵\\x03')\n buf.write('\\x02\\x02\\x02ÿ࠼\\x03\\x02\\x02\\x02āࡃ\\x03\\x02\\x02\\x02ăࡌ')\n buf.write('\\x03\\x02\\x02\\x02ąࡓ\\x03\\x02\\x02\\x02ć\\u085d\\x03\\x02\\x02\\x02ĉ')\n buf.write('\\u086c\\x03\\x02\\x02\\x02ċࡷ\\x03\\x02\\x02\\x02čࡿ\\x03\\x02\\x02\\x02')\n buf.write('ďࢉ\\x03\\x02\\x02\\x02đ\\u0891\\x03\\x02\\x02\\x02ē࢘\\x03')\n buf.write('\\x02\\x02\\x02ĕ࢝\\x03\\x02\\x02\\x02ėࢥ\\x03\\x02\\x02\\x02ęࢮ')\n buf.write('\\x03\\x02\\x02\\x02ěࢶ\\x03\\x02\\x02\\x02ĝࢾ\\x03\\x02\\x02\\x02ğ')\n buf.write('ࣄ\\x03\\x02\\x02\\x02ġ࣊\\x03\\x02\\x02\\x02ģ࣐\\x03\\x02\\x02\\x02')\n buf.write('ĥࣖ\\x03\\x02\\x02\\x02ħ\\u08e2\\x03\\x02\\x02\\x02ĩࣨ\\x03')\n buf.write('\\x02\\x02\\x02īࣲ\\x03\\x02\\x02\\x02ĭࣺ\\x03\\x02\\x02\\x02įࣾ')\n buf.write('\\x03\\x02\\x02\\x02ıअ\\x03\\x02\\x02\\x02ijऋ\\x03\\x02\\x02\\x02ĵ')\n buf.write('ऐ\\x03\\x02\\x02\\x02ķक\\x03\\x02\\x02\\x02Ĺञ\\x03\\x02\\x02\\x02')\n buf.write('Ļण\\x03\\x02\\x02\\x02Ľऩ\\x03\\x02\\x02\\x02Ŀय\\x03')\n buf.write('\\x02\\x02\\x02Łस\\x03\\x02\\x02\\x02Ńऽ\\x03\\x02\\x02\\x02Ņॄ')\n buf.write('\\x03\\x02\\x02\\x02Ňॉ\\x03\\x02\\x02\\x02ʼnॎ\\x03\\x02\\x02\\x02ŋ')\n buf.write('॑\\x03\\x02\\x02\\x02ōक़\\x03\\x02\\x02\\x02ŏॢ\\x03\\x02\\x02\\x02')\n buf.write('ő॥\\x03\\x02\\x02\\x02œ७\\x03\\x02\\x02\\x02ŕॷ\\x03')\n buf.write('\\x02\\x02\\x02ŗঁ\\x03\\x02\\x02\\x02řঈ\\x03\\x02\\x02\\x02ś\\u098e')\n buf.write('\\x03\\x02\\x02\\x02ŝখ\\x03\\x02\\x02\\x02şঠ\\x03\\x02\\x02\\x02š')\n buf.write('ন\\x03\\x02\\x02\\x02ţ\\u09b1\\x03\\x02\\x02\\x02ťস\\x03\\x02\\x02\\x02')\n buf.write('ŧা\\x03\\x02\\x02\\x02ũৄ\\x03\\x02\\x02\\x02ūো\\x03')\n buf.write(\n '\\x02\\x02\\x02ŭ\\u09d8\\x03\\x02\\x02\\x02ůৠ\\x03\\x02\\x02\\x02ű\\u09e4')\n buf.write('\\x03\\x02\\x02\\x02ų৬\\x03\\x02\\x02\\x02ŵ৶\\x03\\x02\\x02\\x02ŷ')\n buf.write(\n '\\u09ff\\x03\\x02\\x02\\x02Ź\\u0a04\\x03\\x02\\x02\\x02Żਏ\\x03\\x02\\x02\\x02')\n buf.write('Ž\\u0a12\\x03\\x02\\x02\\x02ſਜ\\x03\\x02\\x02\\x02Ɓਤ\\x03')\n buf.write('\\x02\\x02\\x02ƃ\\u0a29\\x03\\x02\\x02\\x02ƅਮ\\x03\\x02\\x02\\x02Ƈਲ਼')\n buf.write('\\x03\\x02\\x02\\x02Ɖ਼\\x03\\x02\\x02\\x02Ƌੁ\\x03\\x02\\x02\\x02ƍ')\n buf.write('ੌ\\x03\\x02\\x02\\x02Ə\\u0a54\\x03\\x02\\x02\\x02Ƒਖ਼\\x03\\x02\\x02\\x02')\n buf.write('Ɠ\\u0a5f\\x03\\x02\\x02\\x02ƕ੧\\x03\\x02\\x02\\x02Ɨ੬\\x03')\n buf.write(\n '\\x02\\x02\\x02ƙੲ\\x03\\x02\\x02\\x02ƛ\\u0a78\\x03\\x02\\x02\\x02Ɲ\\u0a7e')\n buf.write('\\x03\\x02\\x02\\x02Ɵ\\u0a84\\x03\\x02\\x02\\x02ơઊ\\x03\\x02\\x02\\x02ƣ')\n buf.write('એ\\x03\\x02\\x02\\x02ƥખ\\x03\\x02\\x02\\x02Ƨચ\\x03\\x02\\x02\\x02')\n buf.write('Ʃડ\\x03\\x02\\x02\\x02ƫધ\\x03\\x02\\x02\\x02ƭબ\\x03')\n buf.write(\n '\\x02\\x02\\x02Ư\\u0ab1\\x03\\x02\\x02\\x02Ʊશ\\x03\\x02\\x02\\x02Ƴ\\u0aba')\n buf.write('\\x03\\x02\\x02\\x02Ƶૂ\\x03\\x02\\x02\\x02Ʒો\\x03\\x02\\x02\\x02ƹ')\n buf.write(\n '\\u0ad4\\x03\\x02\\x02\\x02ƻ\\u0adb\\x03\\x02\\x02\\x02ƽૡ\\x03\\x02\\x02\\x02')\n buf.write('ƿ૧\\x03\\x02\\x02\\x02ǁ૮\\x03\\x02\\x02\\x02ǃ\\u0af7\\x03')\n buf.write('\\x02\\x02\\x02Dž\\u0b00\\x03\\x02\\x02\\x02LJଅ\\x03\\x02\\x02\\x02ljଋ')\n buf.write('\\x03\\x02\\x02\\x02Nj\\u0b12\\x03\\x02\\x02\\x02Ǎଘ\\x03\\x02\\x02\\x02Ǐ')\n buf.write('ଡ\\x03\\x02\\x02\\x02Ǒଦ\\x03\\x02\\x02\\x02Ǔପ\\x03\\x02\\x02\\x02')\n buf.write('Ǖଲ\\x03\\x02\\x02\\x02Ǘ\\u0b3b\\x03\\x02\\x02\\x02Ǚି\\x03')\n buf.write(\n '\\x02\\x02\\x02Ǜ\\u0b45\\x03\\x02\\x02\\x02ǝ\\u0b4e\\x03\\x02\\x02\\x02ǟ\\u0b54'\n )\n buf.write('\\x03\\x02\\x02\\x02ǡ\\u0b5b\\x03\\x02\\x02\\x02ǣୟ\\x03\\x02\\x02\\x02ǥ')\n buf.write('ୢ\\x03\\x02\\x02\\x02ǧ୪\\x03\\x02\\x02\\x02ǩ୲\\x03\\x02\\x02\\x02')\n buf.write('ǫ\\u0b79\\x03\\x02\\x02\\x02ǭ\\u0b81\\x03\\x02\\x02\\x02ǯஒ\\x03')\n buf.write(\n '\\x02\\x02\\x02DZ\\u0b9d\\x03\\x02\\x02\\x02dzந\\x03\\x02\\x02\\x02ǵ\\u0bad')\n buf.write('\\x03\\x02\\x02\\x02Ƿவ\\x03\\x02\\x02\\x02ǹ\\u0bc3\\x03\\x02\\x02\\x02ǻ')\n buf.write(\n 'ே\\x03\\x02\\x02\\x02ǽ\\u0bce\\x03\\x02\\x02\\x02ǿ\\u0bd3\\x03\\x02\\x02\\x02')\n buf.write('ȁ\\u0bd9\\x03\\x02\\x02\\x02ȃ\\u0be0\\x03\\x02\\x02\\x02ȅ௨\\x03')\n buf.write('\\x02\\x02\\x02ȇ௲\\x03\\x02\\x02\\x02ȉ௹\\x03\\x02\\x02\\x02ȋ\\u0bfc')\n buf.write('\\x03\\x02\\x02\\x02ȍఀ\\x03\\x02\\x02\\x02ȏఄ\\x03\\x02\\x02\\x02ȑ')\n buf.write('ఈ\\x03\\x02\\x02\\x02ȓఋ\\x03\\x02\\x02\\x02ȕఐ\\x03\\x02\\x02\\x02')\n buf.write('ȗక\\x03\\x02\\x02\\x02șజ\\x03\\x02\\x02\\x02țట\\x03')\n buf.write('\\x02\\x02\\x02ȝధ\\x03\\x02\\x02\\x02ȟభ\\x03\\x02\\x02\\x02ȡస')\n buf.write('\\x03\\x02\\x02\\x02ȣీ\\x03\\x02\\x02\\x02ȥౄ\\x03\\x02\\x02\\x02ȧ')\n buf.write('ొ\\x03\\x02\\x02\\x02ȩ\\u0c4f\\x03\\x02\\x02\\x02ȫౚ\\x03\\x02\\x02\\x02')\n buf.write('ȭౢ\\x03\\x02\\x02\\x02ȯ\\u0c72\\x03\\x02\\x02\\x02ȱ౽\\x03')\n buf.write('\\x02\\x02\\x02ȳ಄\\x03\\x02\\x02\\x02ȵಎ\\x03\\x02\\x02\\x02ȷಖ')\n buf.write('\\x03\\x02\\x02\\x02ȹಛ\\x03\\x02\\x02\\x02Ȼತ\\x03\\x02\\x02\\x02Ƚ')\n buf.write(\n 'ಪ\\x03\\x02\\x02\\x02ȿ\\u0cb4\\x03\\x02\\x02\\x02Ɂ\\u0cba\\x03\\x02\\x02\\x02')\n buf.write('Ƀಿ\\x03\\x02\\x02\\x02Ʌೋ\\x03\\x02\\x02\\x02ɇ\\u0cd4\\x03')\n buf.write('\\x02\\x02\\x02ɉೞ\\x03\\x02\\x02\\x02ɋ\\u0ce5\\x03\\x02\\x02\\x02ɍ೯')\n buf.write('\\x03\\x02\\x02\\x02ɏ\\u0cf9\\x03\\x02\\x02\\x02ɑഁ\\x03\\x02\\x02\\x02ɓ')\n buf.write('ഇ\\x03\\x02\\x02\\x02ɕ\\u0d11\\x03\\x02\\x02\\x02ɗഗ\\x03\\x02\\x02\\x02')\n buf.write('əഝ\\x03\\x02\\x02\\x02ɛഡ\\x03\\x02\\x02\\x02ɝദ\\x03')\n buf.write('\\x02\\x02\\x02ɟഫ\\x03\\x02\\x02\\x02ɡല\\x03\\x02\\x02\\x02ɣശ')\n buf.write('\\x03\\x02\\x02\\x02ɥീ\\x03\\x02\\x02\\x02ɧൌ\\x03\\x02\\x02\\x02ɩ')\n buf.write(\n '\\u0d53\\x03\\x02\\x02\\x02ɫ൝\\x03\\x02\\x02\\x02ɭ\\u0d64\\x03\\x02\\x02\\x02')\n buf.write('ɯ൬\\x03\\x02\\x02\\x02ɱ൴\\x03\\x02\\x02\\x02ɳඈ\\x03')\n buf.write('\\x02\\x02\\x02ɵඏ\\x03\\x02\\x02\\x02ɷග\\x03\\x02\\x02\\x02ɹඣ')\n buf.write('\\x03\\x02\\x02\\x02ɻත\\x03\\x02\\x02\\x02ɽඳ\\x03\\x02\\x02\\x02ɿ')\n buf.write('ර\\x03\\x02\\x02\\x02ʁෂ\\x03\\x02\\x02\\x02ʃ\\u0dc8\\x03\\x02\\x02\\x02')\n buf.write('ʅෑ\\x03\\x02\\x02\\x02ʇෘ\\x03\\x02\\x02\\x02ʉො\\x03')\n buf.write('\\x02\\x02\\x02ʋ\\u0de2\\x03\\x02\\x02\\x02ʍ෧\\x03\\x02\\x02\\x02ʏ෭')\n buf.write('\\x03\\x02\\x02\\x02ʑ෴\\x03\\x02\\x02\\x02ʓ\\u0df9\\x03\\x02\\x02\\x02ʕ')\n buf.write('ฃ\\x03\\x02\\x02\\x02ʗช\\x03\\x02\\x02\\x02ʙถ\\x03\\x02\\x02\\x02')\n buf.write('ʛบ\\x03\\x02\\x02\\x02ʝม\\x03\\x02\\x02\\x02ʟศ\\x03')\n buf.write('\\x02\\x02\\x02ʡอ\\x03\\x02\\x02\\x02ʣี\\x03\\x02\\x02\\x02ʥ\\u0e3c')\n buf.write('\\x03\\x02\\x02\\x02ʧแ\\x03\\x02\\x02\\x02ʩ๊\\x03\\x02\\x02\\x02ʫ')\n buf.write(\n '๕\\x03\\x02\\x02\\x02ʭ\\u0e62\\x03\\x02\\x02\\x02ʯ\\u0e74\\x03\\x02\\x02\\x02')\n buf.write('ʱ\\u0e80\\x03\\x02\\x02\\x02ʳຐ\\x03\\x02\\x02\\x02ʵດ\\x03')\n buf.write('\\x02\\x02\\x02ʷນ\\x03\\x02\\x02\\x02ʹຢ\\x03\\x02\\x02\\x02ʻຨ')\n buf.write('\\x03\\x02\\x02\\x02ʽອ\\x03\\x02\\x02\\x02ʿຶ\\x03\\x02\\x02\\x02ˁ')\n buf.write('\\u0ebf\\x03\\x02\\x02\\x02˃່\\x03\\x02\\x02\\x02˅໗\\x03\\x02\\x02\\x02')\n buf.write('ˇໞ\\x03\\x02\\x02\\x02ˉ\\u0ee3\\x03\\x02\\x02\\x02ˋ\\u0ee8\\x03')\n buf.write(\n '\\x02\\x02\\x02ˍ\\u0ef1\\x03\\x02\\x02\\x02ˏ\\u0efa\\x03\\x02\\x02\\x02ˑ\\u0eff'\n )\n buf.write('\\x03\\x02\\x02\\x02˓།\\x03\\x02\\x02\\x02˕༕\\x03\\x02\\x02\\x02˗')\n buf.write('༞\\x03\\x02\\x02\\x02˙༩\\x03\\x02\\x02\\x02˛༯\\x03\\x02\\x02\\x02')\n buf.write('˝༷\\x03\\x02\\x02\\x02˟ཁ\\x03\\x02\\x02\\x02ˡཎ\\x03')\n buf.write('\\x02\\x02\\x02ˣཕ\\x03\\x02\\x02\\x02˥འ\\x03\\x02\\x02\\x02˧ཧ')\n buf.write('\\x03\\x02\\x02\\x02˩ཱི\\x03\\x02\\x02\\x02˫ྀ\\x03\\x02\\x02\\x02˭')\n buf.write('ྎ\\x03\\x02\\x02\\x02˯ྖ\\x03\\x02\\x02\\x02˱ྞ\\x03\\x02\\x02\\x02')\n buf.write('˳ྦ\\x03\\x02\\x02\\x02˵ྫྷ\\x03\\x02\\x02\\x02˷ྰ\\x03')\n buf.write('\\x02\\x02\\x02˹ྵ\\x03\\x02\\x02\\x02˻ྺ\\x03\\x02\\x02\\x02˽࿄')\n buf.write(\n '\\x03\\x02\\x02\\x02˿\\u0fe0\\x03\\x02\\x02\\x02́\\u0ffb\\x03\\x02\\x02\\x02̃')\n buf.write('ဓ\\x03\\x02\\x02\\x02̅အ\\x03\\x02\\x02\\x02̇ု\\x03\\x02\\x02\\x02')\n buf.write('̉ဿ\\x03\\x02\\x02\\x02̋၏\\x03\\x02\\x02\\x02̍ၒ\\x03')\n buf.write('\\x02\\x02\\x02̏ၛ\\x03\\x02\\x02\\x02̑ၧ\\x03\\x02\\x02\\x02̓ၱ')\n buf.write('\\x03\\x02\\x02\\x02̕ၷ\\x03\\x02\\x02\\x02̗ၿ\\x03\\x02\\x02\\x02̙')\n buf.write('ႄ\\x03\\x02\\x02\\x02̛ႉ\\x03\\x02\\x02\\x02̝႒\\x03\\x02\\x02\\x02')\n buf.write('̟႗\\x03\\x02\\x02\\x02̡Ⴁ\\x03\\x02\\x02\\x02̣Ⴇ\\x03')\n buf.write('\\x02\\x02\\x02̥Ⴍ\\x03\\x02\\x02\\x02̧Ⴔ\\x03\\x02\\x02\\x02̩Ⴞ')\n buf.write(\n '\\x03\\x02\\x02\\x02̫\\u10c6\\x03\\x02\\x02\\x02̭\\u10cc\\x03\\x02\\x02\\x02̯')\n buf.write('დ\\x03\\x02\\x02\\x02̱მ\\x03\\x02\\x02\\x02̳ტ\\x03\\x02\\x02\\x02')\n buf.write('̵ჩ\\x03\\x02\\x02\\x02̷ჭ\\x03\\x02\\x02\\x02̹ჳ\\x03')\n buf.write('\\x02\\x02\\x02̻ჼ\\x03\\x02\\x02\\x02̽ᄂ\\x03\\x02\\x02\\x02̿ᄉ')\n buf.write('\\x03\\x02\\x02\\x02́ᄑ\\x03\\x02\\x02\\x02̓ᄚ\\x03\\x02\\x02\\x02ͅ')\n buf.write('ᄣ\\x03\\x02\\x02\\x02͇ᄪ\\x03\\x02\\x02\\x02͉ᄲ\\x03\\x02\\x02\\x02')\n buf.write('͋ᄺ\\x03\\x02\\x02\\x02͍ᅃ\\x03\\x02\\x02\\x02͏ᅈ\\x03')\n buf.write('\\x02\\x02\\x02͑ᅐ\\x03\\x02\\x02\\x02͓ᅛ\\x03\\x02\\x02\\x02͕ᅠ')\n buf.write('\\x03\\x02\\x02\\x02͗ᅩ\\x03\\x02\\x02\\x02͙ᅯ\\x03\\x02\\x02\\x02͛')\n buf.write('ᅵ\\x03\\x02\\x02\\x02͝ᅺ\\x03\\x02\\x02\\x02͟ᆁ\\x03\\x02\\x02\\x02')\n buf.write('͡ᆆ\\x03\\x02\\x02\\x02ͣᆌ\\x03\\x02\\x02\\x02ͥᆐ\\x03')\n buf.write('\\x02\\x02\\x02ͧᆗ\\x03\\x02\\x02\\x02ͩᆥ\\x03\\x02\\x02\\x02ͫᆭ')\n buf.write('\\x03\\x02\\x02\\x02ͭᆺ\\x03\\x02\\x02\\x02ͯᇅ\\x03\\x02\\x02\\x02ͱ')\n buf.write('ᇏ\\x03\\x02\\x02\\x02ͳᇙ\\x03\\x02\\x02\\x02͵ᇧ\\x03\\x02\\x02\\x02')\n buf.write('ͷᇰ\\x03\\x02\\x02\\x02\\u0379ᇶ\\x03\\x02\\x02\\x02ͻᇿ\\x03')\n buf.write('\\x02\\x02\\x02ͽሇ\\x03\\x02\\x02\\x02Ϳሔ\\x03\\x02\\x02\\x02\\u0381ም')\n buf.write('\\x03\\x02\\x02\\x02\\u0383ሢ\\x03\\x02\\x02\\x02΅ሦ\\x03\\x02\\x02\\x02·')\n buf.write(\n 'ሿ\\x03\\x02\\x02\\x02Ήቄ\\x03\\x02\\x02\\x02\\u038b\\u124f\\x03\\x02\\x02\\x02')\n buf.write('\\u038dቡ\\x03\\x02\\x02\\x02Ώቱ\\x03\\x02\\x02\\x02Αኄ\\x03')\n buf.write('\\x02\\x02\\x02Γኛ\\x03\\x02\\x02\\x02Εኪ\\x03\\x02\\x02\\x02Ηኴ')\n buf.write(\n '\\x03\\x02\\x02\\x02Ι\\u12bf\\x03\\x02\\x02\\x02Λ\\u12c7\\x03\\x02\\x02\\x02Ν')\n buf.write('ዔ\\x03\\x02\\x02\\x02Οዤ\\x03\\x02\\x02\\x02Ρዴ\\x03\\x02\\x02\\x02')\n buf.write('Σዹ\\x03\\x02\\x02\\x02Υዽ\\x03\\x02\\x02\\x02Χጂ\\x03')\n buf.write('\\x02\\x02\\x02Ωጆ\\x03\\x02\\x02\\x02Ϋጋ\\x03\\x02\\x02\\x02έጏ')\n buf.write('\\x03\\x02\\x02\\x02ί\\u1316\\x03\\x02\\x02\\x02αጚ\\x03\\x02\\x02\\x02γ')\n buf.write('ጠ\\x03\\x02\\x02\\x02εጰ\\x03\\x02\\x02\\x02ηጻ\\x03\\x02\\x02\\x02')\n buf.write('ιጿ\\x03\\x02\\x02\\x02λፈ\\x03\\x02\\x02\\x02νፎ\\x03')\n buf.write('\\x02\\x02\\x02οፕ\\x03\\x02\\x02\\x02ρፚ\\x03\\x02\\x02\\x02σ፡')\n buf.write('\\x03\\x02\\x02\\x02υ፮\\x03\\x02\\x02\\x02χ፻\\x03\\x02\\x02\\x02ω')\n buf.write('ᎈ\\x03\\x02\\x02\\x02ϋᎋ\\x03\\x02\\x02\\x02ύᎍ\\x03\\x02\\x02\\x02')\n buf.write('Ϗᎏ\\x03\\x02\\x02\\x02ϑ\\u139e\\x03\\x02\\x02\\x02ϓᎪ\\x03')\n buf.write('\\x02\\x02\\x02ϕᎳ\\x03\\x02\\x02\\x02ϗᎵ\\x03\\x02\\x02\\x02ϙᏀ')\n buf.write('\\x03\\x02\\x02\\x02ϛᏋ\\x03\\x02\\x02\\x02ϝᏖ\\x03\\x02\\x02\\x02ϟ')\n buf.write('Ꮱ\\x03\\x02\\x02\\x02ϡᏣ\\x03\\x02\\x02\\x02ϣᏭ\\x03\\x02\\x02\\x02')\n buf.write('ϥᏯ\\x03\\x02\\x02\\x02ϧᏱ\\x03\\x02\\x02\\x02ϩᏳ\\x03')\n buf.write('\\x02\\x02\\x02ϫᏵ\\x03\\x02\\x02\\x02ϭᏸ\\x03\\x02\\x02\\x02ϯᏺ')\n buf.write('\\x03\\x02\\x02\\x02ϱᏼ\\x03\\x02\\x02\\x02ϳ\\u13fe\\x03\\x02\\x02\\x02ϵ')\n buf.write('᐀\\x03\\x02\\x02\\x02Ϸᐂ\\x03\\x02\\x02\\x02Ϲᐄ\\x03\\x02\\x02\\x02')\n buf.write('ϻᐕ\\x03\\x02\\x02\\x02Ͻᐗ\\x03\\x02\\x02\\x02Ͽᐙ\\x03')\n buf.write('\\x02\\x02\\x02Ёᐛ\\x03\\x02\\x02\\x02Ѓᐞ\\x03\\x02\\x02\\x02Ѕᐠ')\n buf.write('\\x03\\x02\\x02\\x02Їᐫ\\x03\\x02\\x02\\x02Љᐭ\\x03\\x02\\x02\\x02Ћ')\n buf.write('ᐯ\\x03\\x02\\x02\\x02Ѝᐱ\\x03\\x02\\x02\\x02Џᐳ\\x03\\x02\\x02\\x02')\n buf.write('Бᐵ\\x03\\x02\\x02\\x02Гᐷ\\x03\\x02\\x02\\x02Еᐺ\\x03')\n buf.write('\\x02\\x02\\x02Зᐼ\\x03\\x02\\x02\\x02Йᐾ\\x03\\x02\\x02\\x02Лᑀ')\n buf.write('\\x03\\x02\\x02\\x02Нᑂ\\x03\\x02\\x02\\x02Пᑅ\\x03\\x02\\x02\\x02С')\n buf.write('ᑋ\\x03\\x02\\x02\\x02Уᑎ\\x03\\x02\\x02\\x02Хᑕ\\x03\\x02\\x02\\x02')\n buf.write('Чᑠ\\x03\\x02\\x02\\x02Щᑯ\\x03\\x02\\x02\\x02Ыᑽ\\x03')\n buf.write('\\x02\\x02\\x02Эᒐ\\x03\\x02\\x02\\x02Яᒔ\\x03\\x02\\x02\\x02бᒖ')\n buf.write('\\x03\\x02\\x02\\x02гᒞ\\x03\\x02\\x02\\x02еᒣ\\x03\\x02\\x02\\x02з')\n buf.write('ᒥ\\x03\\x02\\x02\\x02йᒧ\\x03\\x02\\x02\\x02лᒩ\\x03\\x02\\x02\\x02')\n buf.write('нᒫ\\x03\\x02\\x02\\x02пᒭ\\x03\\x02\\x02\\x02сᒯ\\x03')\n buf.write('\\x02\\x02\\x02уᒱ\\x03\\x02\\x02\\x02хᒳ\\x03\\x02\\x02\\x02чᒵ')\n buf.write('\\x03\\x02\\x02\\x02щᒷ\\x03\\x02\\x02\\x02ыᒹ\\x03\\x02\\x02\\x02э')\n buf.write('ᒻ\\x03\\x02\\x02\\x02яᒽ\\x03\\x02\\x02\\x02ёᒿ\\x03\\x02\\x02\\x02')\n buf.write('ѓᓁ\\x03\\x02\\x02\\x02ѕᓃ\\x03\\x02\\x02\\x02їᓅ\\x03')\n buf.write('\\x02\\x02\\x02љᓇ\\x03\\x02\\x02\\x02ћᓉ\\x03\\x02\\x02\\x02ѝᓋ')\n buf.write('\\x03\\x02\\x02\\x02џᓍ\\x03\\x02\\x02\\x02ѡᓏ\\x03\\x02\\x02\\x02ѣ')\n buf.write('ᓑ\\x03\\x02\\x02\\x02ѥᓓ\\x03\\x02\\x02\\x02ѧᓕ\\x03\\x02\\x02\\x02')\n buf.write('ѩѪ\\x070\\x02\\x02Ѫѫ\\x070\\x02\\x02ѫ\\x04\\x03\\x02')\n buf.write('\\x02\\x02Ѭѭ\\x05еț\\x02ѭ\\x06\\x03\\x02\\x02\\x02Ѯ')\n buf.write('ѯ\\x05еț\\x02ѯѰ\\x05лȞ\\x02Ѱ')\n buf.write('ѱ\\x05лȞ\\x02ѱ\\x08\\x03\\x02\\x02\\x02Ѳѳ\\x05е')\n buf.write('ț\\x02ѳѴ\\x05пȠ\\x02Ѵѵ\\x05ћ')\n buf.write('Ȯ\\x02ѵѶ\\x05нȟ\\x02Ѷѷ\\x05ї')\n buf.write('Ȭ\\x02ѷ\\n\\x03\\x02\\x02\\x02Ѹѹ\\x05еț\\x02ѹ')\n buf.write('Ѻ\\x05сȡ\\x02Ѻѻ\\x05нȟ\\x02ѻ')\n buf.write('Ѽ\\x05яȨ\\x02Ѽѽ\\x05ћȮ\\x02ѽ')\n buf.write('\\x0c\\x03\\x02\\x02\\x02Ѿѿ\\x05еț\\x02ѿҀ\\x05с')\n buf.write('ȡ\\x02Ҁҁ\\x05сȡ\\x02ҁ҂\\x05ї')\n buf.write('Ȭ\\x02҂҃\\x05нȟ\\x02҃҄\\x05с')\n buf.write('ȡ\\x02҄҅\\x05еț\\x02҅҆\\x05ћ')\n buf.write('Ȯ\\x02҆҇\\x05нȟ\\x02҇\\x0e\\x03\\x02\\x02\\x02')\n buf.write('҈҉\\x05еț\\x02҉Ҋ\\x05ыȦ')\n buf.write('\\x02Ҋҋ\\x05ыȦ\\x02ҋ\\x10\\x03\\x02\\x02\\x02Ҍ')\n buf.write('ҍ\\x05еț\\x02ҍҎ\\x05ыȦ\\x02Ҏ')\n buf.write('ҏ\\x05ћȮ\\x02ҏҐ\\x05нȟ\\x02Ґ')\n buf.write('ґ\\x05їȬ\\x02ґ\\x12\\x03\\x02\\x02\\x02Ғғ\\x05')\n buf.write('еț\\x02ғҔ\\x05яȨ\\x02Ҕҕ')\n buf.write('\\x05еț\\x02ҕҖ\\x05ыȦ\\x02Җҗ')\n buf.write('\\x05ѥȳ\\x02җҘ\\x05ѧȴ\\x02Ҙҙ')\n buf.write('\\x05нȟ\\x02ҙ\\x14\\x03\\x02\\x02\\x02Ққ\\x05е')\n buf.write('ț\\x02қҜ\\x05яȨ\\x02Ҝҝ\\x05л')\n buf.write('Ȟ\\x02ҝ\\x16\\x03\\x02\\x02\\x02Ҟҟ\\x05еț\\x02')\n buf.write('ҟҠ\\x05яȨ\\x02Ҡҡ\\x05ѥȳ')\n buf.write('\\x02ҡ\\x18\\x03\\x02\\x02\\x02Ңң\\x05еț\\x02ң')\n buf.write('Ҥ\\x05їȬ\\x02Ҥҥ\\x05їȬ\\x02ҥ')\n buf.write('Ҧ\\x05еț\\x02Ҧҧ\\x05ѥȳ\\x02ҧ')\n buf.write('\\x1a\\x03\\x02\\x02\\x02Ҩҩ\\x05еț\\x02ҩҪ\\x05')\n buf.write('љȭ\\x02Ҫ\\x1c\\x03\\x02\\x02\\x02ҫҬ\\x05еț')\n buf.write('\\x02Ҭҭ\\x05љȭ\\x02ҭҮ\\x05љȭ')\n buf.write('\\x02Үү\\x05ѝȯ\\x02үҰ\\x05эȧ')\n buf.write('\\x02Ұұ\\x05нȟ\\x02ұ\\x1e\\x03\\x02\\x02\\x02Ҳ')\n buf.write('ҳ\\x05еț\\x02ҳҴ\\x05љȭ\\x02Ҵ')\n buf.write('ҵ\\x05љȭ\\x02ҵҶ\\x05нȟ\\x02Ҷ')\n buf.write('ҷ\\x05їȬ\\x02ҷҸ\\x05ћȮ\\x02Ҹ')\n buf.write(' \\x03\\x02\\x02\\x02ҹҺ\\x05еț\\x02Һһ\\x05љ')\n buf.write('ȭ\\x02һҼ\\x05йȝ\\x02Ҽ\"\\x03\\x02\\x02\\x02ҽ')\n buf.write('Ҿ\\x05еț\\x02Ҿҿ\\x05љȭ\\x02ҿ')\n buf.write('Ӏ\\x05љȭ\\x02ӀӁ\\x05ёȩ\\x02Ӂ')\n buf.write('ӂ\\x05йȝ\\x02ӂӃ\\x05хȣ\\x02Ӄ')\n buf.write('ӄ\\x05еț\\x02ӄӅ\\x05ћȮ\\x02Ӆ')\n buf.write('ӆ\\x05нȟ\\x02ӆ$\\x03\\x02\\x02\\x02Ӈӈ\\x05е')\n buf.write('ț\\x02ӈӉ\\x05ћȮ\\x02Ӊ&\\x03\\x02\\x02\\x02ӊ')\n buf.write('Ӌ\\x05еț\\x02Ӌӌ\\x05ћȮ\\x02ӌ')\n buf.write('Ӎ\\x05ћȮ\\x02Ӎӎ\\x05їȬ\\x02ӎ')\n buf.write('ӏ\\x05хȣ\\x02ӏӐ\\x05зȜ\\x02Ӑ')\n buf.write('ӑ\\x05ѝȯ\\x02ӑӒ\\x05ћȮ\\x02Ӓ')\n buf.write('ӓ\\x05нȟ\\x02ӓ(\\x03\\x02\\x02\\x02Ӕӕ\\x05е')\n buf.write('ț\\x02ӕӖ\\x05ѝȯ\\x02Ӗӗ\\x05л')\n buf.write('Ȟ\\x02ӗӘ\\x05хȣ\\x02Әә\\x05ћ')\n buf.write('Ȯ\\x02ә*\\x03\\x02\\x02\\x02Ӛӛ\\x05еț\\x02ӛ')\n buf.write('Ӝ\\x05ѝȯ\\x02Ӝӝ\\x05ћȮ\\x02ӝ')\n buf.write('Ӟ\\x05уȢ\\x02Ӟӟ\\x05хȣ\\x02ӟ')\n buf.write('Ӡ\\x05лȞ\\x02Ӡ,\\x03\\x02\\x02\\x02ӡӢ\\x05е')\n buf.write('ț\\x02Ӣӣ\\x05ѝȯ\\x02ӣӤ\\x05ћ')\n buf.write('Ȯ\\x02Ӥӥ\\x05ёȩ\\x02ӥ.\\x03\\x02\\x02\\x02Ӧ')\n buf.write('ӧ\\x05еț\\x02ӧӨ\\x05ѝȯ\\x02Ө')\n buf.write('ө\\x05ћȮ\\x02өӪ\\x05ёȩ\\x02Ӫ')\n buf.write('ӫ\\x05эȧ\\x02ӫӬ\\x05еț\\x02Ӭ')\n buf.write('ӭ\\x05ћȮ\\x02ӭӮ\\x05хȣ\\x02Ӯ')\n buf.write('ӯ\\x05йȝ\\x02ӯ0\\x03\\x02\\x02\\x02Ӱӱ\\x05')\n buf.write('еț\\x02ӱӲ\\x05ѝȯ\\x02Ӳӳ')\n buf.write('\\x05ћȮ\\x02ӳӴ\\x05ёȩ\\x02Ӵӵ')\n buf.write('\\x05яȨ\\x02ӵӶ\\x05ёȩ\\x02Ӷӷ')\n buf.write('\\x05эȧ\\x02ӷӸ\\x05ёȩ\\x02Ӹӹ')\n buf.write('\\x05ѝȯ\\x02ӹӺ\\x05љȭ\\x02Ӻӻ')\n buf.write('\\x07a\\x02\\x02ӻӼ\\x05ћȮ\\x02Ӽӽ\\x05ї')\n buf.write('Ȭ\\x02ӽӾ\\x05еț\\x02Ӿӿ\\x05я')\n buf.write('Ȩ\\x02ӿԀ\\x05љȭ\\x02Ԁԁ\\x05е')\n buf.write('ț\\x02ԁԂ\\x05йȝ\\x02Ԃԃ\\x05ћ')\n buf.write('Ȯ\\x02ԃԄ\\x05хȣ\\x02Ԅԅ\\x05ё')\n buf.write('ȩ\\x02ԅԆ\\x05яȨ\\x02Ԇ2\\x03\\x02\\x02\\x02')\n buf.write('ԇԈ\\x05зȜ\\x02Ԉԉ\\x05еț')\n buf.write('\\x02ԉԊ\\x05ћȮ\\x02Ԋԋ\\x05йȝ')\n buf.write('\\x02ԋԌ\\x05уȢ\\x02Ԍ4\\x03\\x02\\x02\\x02ԍ')\n buf.write('Ԏ\\x05зȜ\\x02Ԏԏ\\x05нȟ\\x02ԏ')\n buf.write('Ԑ\\x05пȠ\\x02Ԑԑ\\x05ёȩ\\x02ԑ')\n buf.write('Ԓ\\x05їȬ\\x02Ԓԓ\\x05нȟ\\x02ԓ')\n buf.write('6\\x03\\x02\\x02\\x02Ԕԕ\\x05зȜ\\x02ԕԖ\\x05')\n buf.write('нȟ\\x02Ԗԗ\\x05сȡ\\x02ԗԘ')\n buf.write('\\x05хȣ\\x02Ԙԙ\\x05яȨ\\x02ԙ8\\x03')\n buf.write('\\x02\\x02\\x02Ԛԛ\\x05зȜ\\x02ԛԜ\\x05н')\n buf.write('ȟ\\x02Ԝԝ\\x05ћȮ\\x02ԝԞ\\x05ѡ')\n buf.write('ȱ\\x02Ԟԟ\\x05нȟ\\x02ԟԠ\\x05н')\n buf.write('ȟ\\x02Ԡԡ\\x05яȨ\\x02ԡ:\\x03\\x02\\x02\\x02Ԣ')\n buf.write('ԣ\\x05зȜ\\x02ԣԤ\\x05пȠ\\x02Ԥ')\n buf.write('ԥ\\x05хȣ\\x02ԥԦ\\x05ыȦ\\x02Ԧ')\n buf.write('ԧ\\x05нȟ\\x02ԧ<\\x03\\x02\\x02\\x02Ԩԩ\\x05з')\n buf.write('Ȝ\\x02ԩԪ\\x05хȣ\\x02Ԫԫ\\x05я')\n buf.write('Ȩ\\x02ԫԬ\\x05еț\\x02Ԭԭ\\x05ї')\n buf.write('Ȭ\\x02ԭԮ\\x05ѥȳ\\x02Ԯԯ\\x07a\\x02')\n buf.write('\\x02ԯ\\u0530\\x05лȞ\\x02\\u0530Ա\\x05ёȩ')\n buf.write('\\x02ԱԲ\\x05ѝȯ\\x02ԲԳ\\x05зȜ')\n buf.write('\\x02ԳԴ\\x05ыȦ\\x02ԴԵ\\x05нȟ')\n buf.write('\\x02Ե>\\x03\\x02\\x02\\x02ԶԷ\\x05зȜ\\x02ԷԸ')\n buf.write('\\x05хȣ\\x02ԸԹ\\x05яȨ\\x02ԹԺ')\n buf.write('\\x05еț\\x02ԺԻ\\x05їȬ\\x02ԻԼ')\n buf.write('\\x05ѥȳ\\x02ԼԽ\\x07a\\x02\\x02ԽԾ\\x05п')\n buf.write('Ƞ\\x02ԾԿ\\x05ыȦ\\x02ԿՀ\\x05ё')\n buf.write('ȩ\\x02ՀՁ\\x05еț\\x02ՁՂ\\x05ћ')\n buf.write('Ȯ\\x02Ղ@\\x03\\x02\\x02\\x02ՃՄ\\x05зȜ\\x02Մ')\n buf.write('Յ\\x05хȣ\\x02ՅՆ\\x05яȨ\\x02Ն')\n buf.write('Շ\\x05еț\\x02ՇՈ\\x05їȬ\\x02Ո')\n buf.write('Չ\\x05ѥȳ\\x02ՉՊ\\x07a\\x02\\x02ՊՋ')\n buf.write('\\x05хȣ\\x02ՋՌ\\x05яȨ\\x02ՌՍ')\n buf.write('\\x05ћȮ\\x02ՍՎ\\x05нȟ\\x02ՎՏ')\n buf.write('\\x05сȡ\\x02ՏՐ\\x05нȟ\\x02ՐՑ')\n buf.write('\\x05їȬ\\x02ՑB\\x03\\x02\\x02\\x02ՒՓ\\x05зȜ')\n buf.write('\\x02ՓՔ\\x05ыȦ\\x02ՔՕ\\x05ёȩ')\n buf.write('\\x02ՕՖ\\x05зȜ\\x02ՖD\\x03\\x02\\x02\\x02\\u0557\\u0558')\n buf.write('\\x05зȜ\\x02\\u0558ՙ\\x05ыȦ\\x02ՙ՚')\n buf.write('\\x05ёȩ\\x02՚՛\\x05йȝ\\x02՛՜')\n buf.write('\\x05щȥ\\x02՜F\\x03\\x02\\x02\\x02՝՞\\x05зȜ')\n buf.write('\\x02՞՟\\x05ёȩ\\x02՟ՠ\\x05лȞ')\n buf.write('\\x02ՠա\\x05ѥȳ\\x02աH\\x03\\x02\\x02\\x02բգ')\n buf.write('\\x05зȜ\\x02գդ\\x05ёȩ\\x02դե')\n buf.write('\\x05ёȩ\\x02եզ\\x05ыȦ\\x02զէ')\n buf.write('\\x05нȟ\\x02էը\\x05еț\\x02ըթ')\n buf.write('\\x05яȨ\\x02թJ\\x03\\x02\\x02\\x02ժի\\x05зȜ')\n buf.write('\\x02իլ\\x05ёȩ\\x02լխ\\x05ћȮ')\n buf.write('\\x02խծ\\x05уȢ\\x02ծL\\x03\\x02\\x02\\x02կհ')\n buf.write('\\x05зȜ\\x02հձ\\x05їȬ\\x02ձղ')\n buf.write('\\x05нȟ\\x02ղճ\\x05еț\\x02ճմ')\n buf.write('\\x05лȞ\\x02մյ\\x05ћȮ\\x02յն')\n buf.write('\\x05уȢ\\x02նN\\x03\\x02\\x02\\x02շո\\x05зȜ')\n buf.write('\\x02ոչ\\x05ѝȯ\\x02չպ\\x05ыȦ')\n buf.write('\\x02պջ\\x05щȥ\\x02ջP\\x03\\x02\\x02\\x02ռս')\n buf.write('\\x05зȜ\\x02սվ\\x05ѥȳ\\x02վR\\x03')\n buf.write('\\x02\\x02\\x02տր\\x05зȜ\\x02րց\\x05ѥ')\n buf.write('ȳ\\x02ցւ\\x05ћȮ\\x02ւփ\\x05н')\n buf.write('ȟ\\x02փT\\x03\\x02\\x02\\x02քօ\\x05йȝ\\x02օ')\n buf.write('V\\x03\\x02\\x02\\x02ֆև\\x05йȝ\\x02ևֈ\\x05е')\n buf.write('ț\\x02ֈ։\\x05йȝ\\x02։֊\\x05у')\n buf.write('Ȣ\\x02֊\\u058b\\x05нȟ\\x02\\u058bX\\x03\\x02\\x02\\x02\\u058c')\n buf.write('֍\\x05йȝ\\x02֍֎\\x05еț\\x02֎')\n buf.write('֏\\x05ыȦ\\x02֏\\u0590\\x05ыȦ\\x02\\u0590')\n buf.write('Z\\x03\\x02\\x02\\x02֑֒\\x05йȝ\\x02֒֓\\x05е')\n buf.write('ț\\x02֓֔\\x05яȨ\\x02֔֕\\x05ё')\n buf.write('ȩ\\x02֖֕\\x05яȨ\\x02֖֗\\x05х')\n buf.write('ȣ\\x02֗֘\\x05йȝ\\x02֘֙\\x05е')\n buf.write('ț\\x02֚֙\\x05ыȦ\\x02֚\\\\\\x03\\x02\\x02\\x02֛')\n buf.write('֜\\x05йȝ\\x02֜֝\\x05еț\\x02֝')\n buf.write('֞\\x05љȭ\\x02֞֟\\x05йȝ\\x02֟')\n buf.write('֠\\x05еț\\x02֠֡\\x05лȞ\\x02֡')\n buf.write('֢\\x05нȟ\\x02֢^\\x03\\x02\\x02\\x02֣֤\\x05й')\n buf.write('ȝ\\x02֤֥\\x05еț\\x02֥֦\\x05љ')\n buf.write('ȭ\\x02֦֧\\x05нȟ\\x02֧`\\x03\\x02\\x02\\x02֨')\n buf.write('֩\\x05йȝ\\x02֪֩\\x05еț\\x02֪')\n buf.write('֫\\x05љȭ\\x02֫֬\\x05ћȮ\\x02֬')\n buf.write('b\\x03\\x02\\x02\\x02֭֮\\x05йȝ\\x02֮֯\\x05у')\n buf.write('Ȣ\\x02ְ֯\\x05еț\\x02ְֱ\\x05ї')\n buf.write('Ȭ\\x02ֱd\\x03\\x02\\x02\\x02ֲֳ\\x05йȝ\\x02ֳ')\n buf.write('ִ\\x05уȢ\\x02ִֵ\\x05еț\\x02ֵ')\n buf.write('ֶ\\x05їȬ\\x02ֶַ\\x07a\\x02\\x02ַָ')\n buf.write('\\x05йȝ\\x02ָֹ\\x05љȭ\\x02ֹf\\x03')\n buf.write('\\x02\\x02\\x02ֺֻ\\x05йȝ\\x02ֻּ\\x05у')\n buf.write('Ȣ\\x02ּֽ\\x05еț\\x02ֽ־\\x05ї')\n buf.write('Ȭ\\x02־ֿ\\x05еț\\x02ֿ׀\\x05й')\n buf.write('ȝ\\x02׀ׁ\\x05ћȮ\\x02ׁׂ\\x05н')\n buf.write('ȟ\\x02ׂ׃\\x05їȬ\\x02׃h\\x03\\x02\\x02\\x02ׄ')\n buf.write('ׅ\\x05йȝ\\x02ׅ׆\\x05уȢ\\x02׆')\n buf.write('ׇ\\x05нȟ\\x02ׇ\\u05c8\\x05йȝ\\x02\\u05c8')\n buf.write('\\u05c9\\x05щȥ\\x02\\u05c9j\\x03\\x02\\x02\\x02\\u05ca\\u05cb\\x05й')\n buf.write('ȝ\\x02\\u05cb\\u05cc\\x05уȢ\\x02\\u05cc\\u05cd\\x05ї')\n buf.write('Ȭ\\x02\\u05cdl\\x03\\x02\\x02\\x02\\u05ce\\u05cf\\x05йȝ\\x02\\u05cf')\n buf.write('א\\x05ыȦ\\x02אב\\x05ёȩ\\x02ב')\n buf.write('ג\\x05зȜ\\x02גn\\x03\\x02\\x02\\x02דה\\x05й')\n buf.write('ȝ\\x02הו\\x05ыȦ\\x02וז\\x05ё')\n buf.write('ȩ\\x02זח\\x05љȭ\\x02חט\\x05н')\n buf.write('ȟ\\x02טp\\x03\\x02\\x02\\x02יך\\x05йȝ\\x02ך')\n buf.write('כ\\x05ыȦ\\x02כל\\x05ѝȯ\\x02ל')\n buf.write('ם\\x05љȭ\\x02םמ\\x05ћȮ\\x02מ')\n buf.write('ן\\x05нȟ\\x02ןנ\\x05їȬ\\x02נ')\n buf.write('r\\x03\\x02\\x02\\x02סע\\x05йȝ\\x02עף\\x05ё')\n buf.write('ȩ\\x02ףפ\\x05ыȦ\\x02פץ\\x05ы')\n buf.write('Ȧ\\x02ץצ\\x05нȟ\\x02צק\\x05й')\n buf.write('ȝ\\x02קר\\x05ћȮ\\x02רt\\x03\\x02\\x02\\x02ש')\n buf.write('ת\\x05йȝ\\x02ת\\u05eb\\x05ёȩ\\x02\\u05eb')\n buf.write('\\u05ec\\x05ыȦ\\x02\\u05ec\\u05ed\\x05ѝȯ\\x02\\u05ed')\n buf.write('\\u05ee\\x05эȧ\\x02\\u05eeׯ\\x05яȨ\\x02ׯ')\n buf.write('װ\\x05љȭ\\x02װv\\x03\\x02\\x02\\x02ױײ\\x05й')\n buf.write('ȝ\\x02ײ׳\\x05ёȩ\\x02׳״\\x05э')\n buf.write('ȧ\\x02״\\u05f5\\x05эȧ\\x02\\u05f5\\u05f6\\x05н')\n buf.write('ȟ\\x02\\u05f6\\u05f7\\x05яȨ\\x02\\u05f7\\u05f8\\x05ћ')\n buf.write('Ȯ\\x02\\u05f8x\\x03\\x02\\x02\\x02\\u05f9\\u05fa\\x05йȝ\\x02\\u05fa')\n buf.write('\\u05fb\\x05ёȩ\\x02\\u05fb\\u05fc\\x05эȧ\\x02\\u05fc')\n buf.write('\\u05fd\\x05эȧ\\x02\\u05fd\\u05fe\\x05хȣ\\x02\\u05fe')\n buf.write('\\u05ff\\x05ћȮ\\x02\\u05ffz\\x03\\x02\\x02\\x02\\u0600\\u0601\\x05й')\n buf.write('ȝ\\x02\\u0601\\u0602\\x05ёȩ\\x02\\u0602\\u0603\\x05э')\n buf.write('ȧ\\x02\\u0603\\u0604\\x05эȧ\\x02\\u0604\\u0605\\x05х')\n buf.write('ȣ\\x02\\u0605؆\\x05ћȮ\\x02؆؇\\x05ћ')\n buf.write('Ȯ\\x02؇؈\\x05нȟ\\x02؈؉\\x05л')\n buf.write('Ȟ\\x02؉|\\x03\\x02\\x02\\x02؊؋\\x05йȝ\\x02؋')\n buf.write('،\\x05ёȩ\\x02،؍\\x05эȧ\\x02؍')\n buf.write('؎\\x05ѓȪ\\x02؎؏\\x05еț\\x02؏')\n buf.write('ؐ\\x05ћȮ\\x02ؐؑ\\x05хȣ\\x02ؑ')\n buf.write('ؒ\\x05зȜ\\x02ؒؓ\\x05хȣ\\x02ؓ')\n buf.write('ؔ\\x05ыȦ\\x02ؔؕ\\x05хȣ\\x02ؕ')\n buf.write('ؖ\\x05ћȮ\\x02ؖؗ\\x05ѥȳ\\x02ؗ')\n buf.write('~\\x03\\x02\\x02\\x02ؘؙ\\x05йȝ\\x02ؙؚ\\x05ё')\n buf.write('ȩ\\x02ؚ؛\\x05эȧ\\x02؛\\u061c\\x05ѓ')\n buf.write('Ȫ\\x02\\u061c؝\\x05хȣ\\x02؝؞\\x05ы')\n buf.write('Ȧ\\x02؞؟\\x05нȟ\\x02؟\\x80\\x03\\x02\\x02')\n buf.write('\\x02ؠء\\x05йȝ\\x02ءآ\\x05ёȩ')\n buf.write('\\x02آأ\\x05эȧ\\x02أؤ\\x05ѓȪ')\n buf.write('\\x02ؤإ\\x05ёȩ\\x02إئ\\x05ѝȯ')\n buf.write('\\x02ئا\\x05яȨ\\x02اب\\x05лȞ')\n buf.write('\\x02ب\\x82\\x03\\x02\\x02\\x02ةت\\x05йȝ\\x02ت')\n buf.write('ث\\x05ёȩ\\x02ثج\\x05яȨ\\x02ج')\n buf.write('ح\\x05яȨ\\x02حخ\\x05нȟ\\x02خ')\n buf.write('د\\x05йȝ\\x02دذ\\x05ћȮ\\x02ذ')\n buf.write('\\x84\\x03\\x02\\x02\\x02رز\\x05йȝ\\x02زس')\n buf.write('\\x05ёȩ\\x02سش\\x05яȨ\\x02شص')\n buf.write('\\x05яȨ\\x02صض\\x05нȟ\\x02ضط')\n buf.write('\\x05йȝ\\x02طظ\\x05ћȮ\\x02ظع')\n buf.write('\\x07a\\x02\\x02عغ\\x05зȜ\\x02غػ\\x05ѥ')\n buf.write('ȳ\\x02ػؼ\\x07a\\x02\\x02ؼؽ\\x05їȬ')\n buf.write('\\x02ؽؾ\\x05ёȩ\\x02ؾؿ\\x05ёȩ')\n buf.write('\\x02ؿـ\\x05ћȮ\\x02ـ\\x86\\x03\\x02\\x02\\x02ف')\n buf.write('ق\\x05йȝ\\x02قك\\x05ёȩ\\x02ك')\n buf.write('ل\\x05яȨ\\x02لم\\x05љȭ\\x02م')\n buf.write('ن\\x05ћȮ\\x02نه\\x05еț\\x02ه')\n buf.write('و\\x05яȨ\\x02وى\\x05ћȮ\\x02ى')\n buf.write('\\x88\\x03\\x02\\x02\\x02يً\\x05йȝ\\x02ًٌ')\n buf.write('\\x05ёȩ\\x02ٌٍ\\x05яȨ\\x02ٍَ')\n buf.write('\\x05љȭ\\x02َُ\\x05ћȮ\\x02ُِ')\n buf.write('\\x05їȬ\\x02ِّ\\x05еț\\x02ّْ')\n buf.write('\\x05хȣ\\x02ْٓ\\x05яȨ\\x02ٓٔ')\n buf.write('\\x05ћȮ\\x02ٔ\\x8a\\x03\\x02\\x02\\x02ٕٖ\\x05й')\n buf.write('ȝ\\x02ٖٗ\\x05ёȩ\\x02ٗ٘\\x05я')\n buf.write('Ȩ\\x02٘ٙ\\x05љȭ\\x02ٙٚ\\x05ћ')\n buf.write('Ȯ\\x02ٚٛ\\x05їȬ\\x02ٜٛ\\x05е')\n buf.write('ț\\x02ٜٝ\\x05хȣ\\x02ٝٞ\\x05я')\n buf.write('Ȩ\\x02ٟٞ\\x05ћȮ\\x02ٟ٠\\x05љ')\n buf.write('ȭ\\x02٠\\x8c\\x03\\x02\\x02\\x02١٢\\x05йȝ')\n buf.write('\\x02٢٣\\x05ёȩ\\x02٣٤\\x05яȨ')\n buf.write('\\x02٤٥\\x05љȭ\\x02٥٦\\x05ћȮ')\n buf.write('\\x02٦٧\\x05їȬ\\x02٧٨\\x05ѝȯ')\n buf.write('\\x02٨٩\\x05йȝ\\x02٩٪\\x05ћȮ')\n buf.write('\\x02٪٫\\x05ёȩ\\x02٫٬\\x05їȬ')\n buf.write('\\x02٬\\x8e\\x03\\x02\\x02\\x02٭ٮ\\x05йȝ\\x02ٮ')\n buf.write('ٯ\\x05ёȩ\\x02ٯٰ\\x05яȨ\\x02ٰ')\n buf.write('ٱ\\x05ћȮ\\x02ٱٲ\\x05нȟ\\x02ٲ')\n buf.write('ٳ\\x05яȨ\\x02ٳٴ\\x05ћȮ\\x02ٴ')\n buf.write('\\x90\\x03\\x02\\x02\\x02ٵٶ\\x05йȝ\\x02ٶٷ')\n buf.write('\\x05ёȩ\\x02ٷٸ\\x05яȨ\\x02ٸٹ')\n buf.write('\\x05ћȮ\\x02ٹٺ\\x05нȟ\\x02ٺٻ')\n buf.write('\\x05ѣȲ\\x02ٻټ\\x05ћȮ\\x02ټ\\x92')\n buf.write('\\x03\\x02\\x02\\x02ٽپ\\x05йȝ\\x02پٿ\\x05ё')\n buf.write('ȩ\\x02ٿڀ\\x05яȨ\\x02ڀځ\\x05ћ')\n buf.write('Ȯ\\x02ځڂ\\x05хȣ\\x02ڂڃ\\x05я')\n buf.write('Ȩ\\x02ڃڄ\\x05ѝȯ\\x02ڄڅ\\x05н')\n buf.write('ȟ\\x02څ\\x94\\x03\\x02\\x02\\x02چڇ\\x05йȝ')\n buf.write('\\x02ڇڈ\\x05ёȩ\\x02ڈډ\\x05яȨ')\n buf.write('\\x02ډڊ\\x05џȰ\\x02ڊڋ\\x05нȟ')\n buf.write('\\x02ڋڌ\\x05їȬ\\x02ڌڍ\\x05ћȮ')\n buf.write('\\x02ڍ\\x96\\x03\\x02\\x02\\x02ڎڏ\\x05йȝ\\x02ڏ')\n buf.write('ڐ\\x05ёȩ\\x02ڐڑ\\x05їȬ\\x02ڑ')\n buf.write('ڒ\\x05їȬ\\x02ڒړ\\x05ѝȯ\\x02ړ')\n buf.write('ڔ\\x05ѓȪ\\x02ڔڕ\\x05ћȮ\\x02ڕ')\n buf.write('ږ\\x07a\\x02\\x02ږڗ\\x05ѣȲ\\x02ڗژ')\n buf.write('\\x05хȣ\\x02ژڙ\\x05лȞ\\x02ڙ\\x98')\n buf.write('\\x03\\x02\\x02\\x02ښڛ\\x05йȝ\\x02ڛڜ\\x05ё')\n buf.write('ȩ\\x02ڜڝ\\x05їȬ\\x02ڝڞ\\x05ї')\n buf.write('Ȭ\\x02ڞڟ\\x05ѝȯ\\x02ڟڠ\\x05ѓ')\n buf.write('Ȫ\\x02ڠڡ\\x05ћȮ\\x02ڡڢ\\x07a\\x02')\n buf.write('\\x02ڢڣ\\x05ѣȲ\\x02ڣڤ\\x05хȣ')\n buf.write('\\x02ڤڥ\\x05лȞ\\x02ڥڦ\\x07a\\x02\\x02ڦ')\n buf.write('ڧ\\x05еț\\x02ڧڨ\\x05ыȦ\\x02ڨ')\n buf.write('ک\\x05ыȦ\\x02ک\\x9a\\x03\\x02\\x02\\x02ڪګ')\n buf.write('\\x05йȝ\\x02ګڬ\\x05ёȩ\\x02ڬڭ')\n buf.write('\\x05љȭ\\x02ڭڮ\\x05ћȮ\\x02ڮ\\x9c')\n buf.write('\\x03\\x02\\x02\\x02گڰ\\x05йȝ\\x02ڰڱ\\x05ё')\n buf.write('ȩ\\x02ڱڲ\\x05ѝȯ\\x02ڲڳ\\x05я')\n buf.write('Ȩ\\x02ڳڴ\\x05ћȮ\\x02ڴ\\x9e\\x03\\x02\\x02')\n buf.write('\\x02ڵڶ\\x05йȝ\\x02ڶڷ\\x05їȬ')\n buf.write('\\x02ڷڸ\\x05нȟ\\x02ڸڹ\\x05еț')\n buf.write('\\x02ڹں\\x05ћȮ\\x02ںڻ\\x05нȟ')\n buf.write('\\x02ڻ\\xa0\\x03\\x02\\x02\\x02ڼڽ\\x05йȝ\\x02ڽ')\n buf.write('ھ\\x05їȬ\\x02ھڿ\\x05ёȩ\\x02ڿ')\n buf.write('ۀ\\x05љȭ\\x02ۀہ\\x05љȭ\\x02ہ')\n buf.write('¢\\x03\\x02\\x02\\x02ۂۃ\\x05йȝ\\x02ۃۄ')\n buf.write('\\x05ѝȯ\\x02ۄۅ\\x05зȜ\\x02ۅۆ')\n buf.write('\\x05нȟ\\x02ۆ¤\\x03\\x02\\x02\\x02ۇۈ\\x05й')\n buf.write('ȝ\\x02ۈۉ\\x05ѝȯ\\x02ۉۊ\\x05ї')\n buf.write('Ȭ\\x02ۊۋ\\x05їȬ\\x02ۋی\\x05н')\n buf.write('ȟ\\x02یۍ\\x05яȨ\\x02ۍێ\\x05ћ')\n buf.write('Ȯ\\x02ێ¦\\x03\\x02\\x02\\x02ۏې\\x05йȝ')\n buf.write('\\x02ېۑ\\x05ѝȯ\\x02ۑے\\x05їȬ')\n buf.write('\\x02ےۓ\\x05їȬ\\x02ۓ۔\\x05нȟ')\n buf.write('\\x02۔ە\\x05яȨ\\x02ەۖ\\x05ћȮ')\n buf.write('\\x02ۖۗ\\x07a\\x02\\x02ۗۘ\\x05ѝȯ\\x02ۘ')\n buf.write('ۙ\\x05љȭ\\x02ۙۚ\\x05нȟ\\x02ۚ')\n buf.write('ۛ\\x05їȬ\\x02ۛ¨\\x03\\x02\\x02\\x02ۜ\\u06dd')\n buf.write('\\x05йȝ\\x02\\u06dd۞\\x05ѝȯ\\x02۞۟')\n buf.write('\\x05їȬ\\x02۟۠\\x05љȭ\\x02۠ۡ')\n buf.write('\\x05ёȩ\\x02ۡۢ\\x05їȬ\\x02ۢª')\n buf.write('\\x03\\x02\\x02\\x02ۣۤ\\x05йȝ\\x02ۤۥ\\x05ѝ')\n buf.write('ȯ\\x02ۥۦ\\x05љȭ\\x02ۦۧ\\x05ћ')\n buf.write('Ȯ\\x02ۧۨ\\x05ёȩ\\x02ۨ۩\\x05э')\n buf.write('ȧ\\x02۩۪\\x05лȞ\\x02۪۫\\x05е')\n buf.write('ț\\x02۫۬\\x05ћȮ\\x02ۭ۬\\x05ѝ')\n buf.write('ȯ\\x02ۭۮ\\x05эȧ\\x02ۮ¬\\x03\\x02\\x02')\n buf.write('\\x02ۯ۰\\x05йȝ\\x02۰۱\\x05ѥȳ')\n buf.write('\\x02۱۲\\x05йȝ\\x02۲۳\\x05ыȦ')\n buf.write('\\x02۳۴\\x05нȟ\\x02۴®\\x03\\x02\\x02\\x02۵')\n buf.write('۶\\x05лȞ\\x02۶۷\\x05еț\\x02۷')\n buf.write('۸\\x05ћȮ\\x02۸۹\\x05еț\\x02۹')\n buf.write('°\\x03\\x02\\x02\\x02ۺۻ\\x05лȞ\\x02ۻۼ')\n buf.write('\\x05еț\\x02ۼ۽\\x05ћȮ\\x02۽۾')\n buf.write('\\x05еț\\x02۾ۿ\\x05зȜ\\x02ۿ܀')\n buf.write('\\x05еț\\x02܀܁\\x05љȭ\\x02܁܂')\n buf.write('\\x05нȟ\\x02܂²\\x03\\x02\\x02\\x02܃܄\\x05л')\n buf.write('Ȟ\\x02܄܅\\x05еț\\x02܅܆\\x05ћ')\n buf.write('Ȯ\\x02܆܇\\x05нȟ\\x02܇´\\x03\\x02\\x02')\n buf.write('\\x02܈܉\\x05лȞ\\x02܉܊\\x05еț')\n buf.write('\\x02܊܋\\x05ѥȳ\\x02܋¶\\x03\\x02\\x02\\x02܌')\n buf.write('܍\\x05лȞ\\x02܍\\u070e\\x05зȜ\\x02\\u070e')\n buf.write('\\u070f\\x07a\\x02\\x02\\u070fܐ\\x05їȬ\\x02ܐܑ')\n buf.write('\\x05ёȩ\\x02ܑܒ\\x05ыȦ\\x02ܒܓ')\n buf.write('\\x05нȟ\\x02ܓܔ\\x07a\\x02\\x02ܔܕ\\x05й')\n buf.write('ȝ\\x02ܕܖ\\x05уȢ\\x02ܖܗ\\x05е')\n buf.write('ț\\x02ܗܘ\\x05яȨ\\x02ܘܙ\\x05с')\n buf.write('ȡ\\x02ܙܚ\\x05нȟ\\x02ܚ¸\\x03\\x02\\x02')\n buf.write('\\x02ܛܜ\\x05лȞ\\x02ܜܝ\\x05зȜ')\n buf.write('\\x02ܝܞ\\x05ћȮ\\x02ܞܟ\\x05хȣ')\n buf.write('\\x02ܟܠ\\x05эȧ\\x02ܠܡ\\x05нȟ')\n buf.write('\\x02ܡܢ\\x05ѧȴ\\x02ܢܣ\\x05ёȩ')\n buf.write('\\x02ܣܤ\\x05яȨ\\x02ܤܥ\\x05нȟ')\n buf.write('\\x02ܥº\\x03\\x02\\x02\\x02ܦܧ\\x05лȞ\\x02ܧ')\n buf.write('ܨ\\x05лȞ\\x02ܨܩ\\x05ыȦ\\x02ܩ')\n buf.write('¼\\x03\\x02\\x02\\x02ܪܫ\\x05лȞ\\x02ܫܬ')\n buf.write('\\x05нȟ\\x02ܬܭ\\x05зȜ\\x02ܭܮ')\n buf.write('\\x05ѝȯ\\x02ܮܯ\\x05сȡ\\x02ܯ¾')\n buf.write('\\x03\\x02\\x02\\x02ܱܰ\\x05лȞ\\x02ܱܲ\\x05н')\n buf.write('ȟ\\x02ܲܳ\\x05йȝ\\x02ܳÀ\\x03\\x02\\x02')\n buf.write('\\x02ܴܵ\\x05лȞ\\x02ܵܶ\\x05нȟ')\n buf.write('\\x02ܷܶ\\x05йȝ\\x02ܷܸ\\x05хȣ')\n buf.write('\\x02ܸܹ\\x05эȧ\\x02ܹܺ\\x05еț')\n buf.write('\\x02ܻܺ\\x05ыȦ\\x02ܻÂ\\x03\\x02\\x02\\x02ܼ')\n buf.write('ܽ\\x05лȞ\\x02ܾܽ\\x05нȟ\\x02ܾ')\n buf.write('ܿ\\x05йȝ\\x02ܿ݀\\x05ыȦ\\x02݀')\n buf.write('݁\\x05еț\\x02݂݁\\x05їȬ\\x02݂')\n buf.write('݃\\x05нȟ\\x02݃Ä\\x03\\x02\\x02\\x02݄݅')\n buf.write('\\x05лȞ\\x02݆݅\\x05нȟ\\x02݆݇')\n buf.write('\\x05йȝ\\x02݈݇\\x05ёȩ\\x02݈݉')\n buf.write('\\x05эȧ\\x02݉݊\\x05ѓȪ\\x02݊\\u074b')\n buf.write('\\x05ёȩ\\x02\\u074b\\u074c\\x05љȭ\\x02\\u074cݍ')\n buf.write('\\x05нȟ\\x02ݍÆ\\x03\\x02\\x02\\x02ݎݏ\\x05л')\n buf.write('Ȟ\\x02ݏݐ\\x05нȟ\\x02ݐݑ\\x05й')\n buf.write('ȝ\\x02ݑݒ\\x05їȬ\\x02ݒݓ\\x05н')\n buf.write('ȟ\\x02ݓݔ\\x05эȧ\\x02ݔݕ\\x05н')\n buf.write('ȟ\\x02ݕݖ\\x05яȨ\\x02ݖݗ\\x05ћ')\n buf.write('Ȯ\\x02ݗÈ\\x03\\x02\\x02\\x02ݘݙ\\x05лȞ')\n buf.write('\\x02ݙݚ\\x05нȟ\\x02ݚݛ\\x05пȠ')\n buf.write('\\x02ݛݜ\\x05еț\\x02ݜݝ\\x05ѝȯ')\n buf.write('\\x02ݝݞ\\x05ыȦ\\x02ݞݟ\\x05ћȮ')\n buf.write('\\x02ݟÊ\\x03\\x02\\x02\\x02ݠݡ\\x05лȞ\\x02ݡ')\n buf.write('ݢ\\x05нȟ\\x02ݢݣ\\x05пȠ\\x02ݣ')\n buf.write('ݤ\\x05еț\\x02ݤݥ\\x05ѝȯ\\x02ݥ')\n buf.write('ݦ\\x05ыȦ\\x02ݦݧ\\x05ћȮ\\x02ݧ')\n buf.write('ݨ\\x05љȭ\\x02ݨÌ\\x03\\x02\\x02\\x02ݩݪ')\n buf.write('\\x05лȞ\\x02ݪݫ\\x05нȟ\\x02ݫݬ')\n buf.write('\\x05пȠ\\x02ݬݭ\\x05нȟ\\x02ݭݮ')\n buf.write('\\x05їȬ\\x02ݮݯ\\x05їȬ\\x02ݯݰ')\n buf.write('\\x05нȟ\\x02ݰݱ\\x05лȞ\\x02ݱÎ')\n buf.write('\\x03\\x02\\x02\\x02ݲݳ\\x05лȞ\\x02ݳݴ\\x05н')\n buf.write('ȟ\\x02ݴݵ\\x05пȠ\\x02ݵݶ\\x05х')\n buf.write('ȣ\\x02ݶݷ\\x05яȨ\\x02ݷݸ\\x05н')\n buf.write('ȟ\\x02ݸݹ\\x05їȬ\\x02ݹÐ\\x03\\x02\\x02')\n buf.write('\\x02ݺݻ\\x05лȞ\\x02ݻݼ\\x05нȟ')\n buf.write('\\x02ݼݽ\\x05ыȦ\\x02ݽݾ\\x05нȟ')\n buf.write('\\x02ݾݿ\\x05ћȮ\\x02ݿހ\\x05нȟ')\n buf.write('\\x02ހÒ\\x03\\x02\\x02\\x02ށނ\\x05лȞ\\x02ނ')\n buf.write('ރ\\x05нȟ\\x02ރބ\\x05ѓȪ\\x02ބ')\n buf.write('ޅ\\x05ћȮ\\x02ޅކ\\x05уȢ\\x02ކ')\n buf.write('Ô\\x03\\x02\\x02\\x02އވ\\x05лȞ\\x02ވމ')\n buf.write('\\x05нȟ\\x02މފ\\x05љȭ\\x02ފދ')\n buf.write('\\x05йȝ\\x02ދÖ\\x03\\x02\\x02\\x02ތލ\\x05л')\n buf.write('Ȟ\\x02ލގ\\x05нȟ\\x02ގޏ\\x05ћ')\n buf.write('Ȯ\\x02ޏސ\\x05нȟ\\x02ސޑ\\x05ї')\n buf.write('Ȭ\\x02ޑޒ\\x05эȧ\\x02ޒޓ\\x05х')\n buf.write('ȣ\\x02ޓޔ\\x05яȨ\\x02ޔޕ\\x05х')\n buf.write('ȣ\\x02ޕޖ\\x05љȭ\\x02ޖޗ\\x05ћ')\n buf.write('Ȯ\\x02ޗޘ\\x05хȣ\\x02ޘޙ\\x05й')\n buf.write('ȝ\\x02ޙØ\\x03\\x02\\x02\\x02ޚޛ\\x05лȞ')\n buf.write('\\x02ޛޜ\\x05хȣ\\x02ޜޝ\\x05эȧ')\n buf.write('\\x02ޝޞ\\x05нȟ\\x02ޞޟ\\x05яȨ')\n buf.write('\\x02ޟޠ\\x05љȭ\\x02ޠޡ\\x05хȣ')\n buf.write('\\x02ޡޢ\\x05ёȩ\\x02ޢޣ\\x05яȨ')\n buf.write('\\x02ޣÚ\\x03\\x02\\x02\\x02ޤޥ\\x05лȞ\\x02ޥ')\n buf.write('ަ\\x05хȣ\\x02ަާ\\x05љȭ\\x02ާ')\n buf.write('ި\\x05еț\\x02ިީ\\x05зȜ\\x02ީ')\n buf.write('ު\\x05ыȦ\\x02ުޫ\\x05нȟ\\x02ޫ')\n buf.write('Ü\\x03\\x02\\x02\\x02ެޭ\\x05лȞ\\x02ޭޮ')\n buf.write('\\x05хȣ\\x02ޮޯ\\x05љȭ\\x02ޯް')\n buf.write('\\x05еț\\x02ްޱ\\x05љȭ\\x02ޱ\\u07b2')\n buf.write('\\x05љȭ\\x02\\u07b2\\u07b3\\x05ёȩ\\x02\\u07b3\\u07b4')\n buf.write('\\x05йȝ\\x02\\u07b4\\u07b5\\x05хȣ\\x02\\u07b5\\u07b6')\n buf.write('\\x05еț\\x02\\u07b6\\u07b7\\x05ћȮ\\x02\\u07b7\\u07b8')\n buf.write('\\x05нȟ\\x02\\u07b8Þ\\x03\\x02\\x02\\x02\\u07b9\\u07ba\\x05л')\n buf.write('Ȟ\\x02\\u07ba\\u07bb\\x05хȣ\\x02\\u07bb\\u07bc\\x05љ')\n buf.write('ȭ\\x02\\u07bc\\u07bd\\x05ћȮ\\x02\\u07bd\\u07be\\x05х')\n buf.write('ȣ\\x02\\u07be\\u07bf\\x05яȨ\\x02\\u07bf߀\\x05й')\n buf.write('ȝ\\x02߀߁\\x05ћȮ\\x02߁à\\x03\\x02\\x02')\n buf.write('\\x02߂߃\\x05лȞ\\x02߃߄\\x05ёȩ')\n buf.write('\\x02߄߅\\x05йȝ\\x02߅߆\\x05ѝȯ')\n buf.write('\\x02߆߇\\x05эȧ\\x02߇߈\\x05нȟ')\n buf.write('\\x02߈߉\\x05яȨ\\x02߉ߊ\\x05ћȮ')\n buf.write('\\x02ߊâ\\x03\\x02\\x02\\x02ߋߌ\\x05лȞ\\x02ߌ')\n buf.write('ߍ\\x05ёȩ\\x02ߍߎ\\x05ѝȯ\\x02ߎ')\n buf.write('ߏ\\x05зȜ\\x02ߏߐ\\x05ыȦ\\x02ߐ')\n buf.write('ߑ\\x05нȟ\\x02ߑä\\x03\\x02\\x02\\x02ߒߓ')\n buf.write('\\x05лȞ\\x02ߓߔ\\x05їȬ\\x02ߔߕ')\n buf.write('\\x05ёȩ\\x02ߕߖ\\x05ѓȪ\\x02ߖæ')\n buf.write('\\x03\\x02\\x02\\x02ߗߘ\\x05лȞ\\x02ߘߙ\\x05љ')\n buf.write('ȭ\\x02ߙߚ\\x05хȣ\\x02ߚߛ\\x05я')\n buf.write('Ȩ\\x02ߛߜ\\x05ћȮ\\x02ߜߝ\\x05н')\n buf.write('ȟ\\x02ߝߞ\\x05їȬ\\x02ߞߟ\\x05џ')\n buf.write('Ȱ\\x02ߟߠ\\x05еț\\x02ߠߡ\\x05ы')\n buf.write('Ȧ\\x02ߡߢ\\x07a\\x02\\x02ߢߣ\\x05ѝȯ')\n buf.write('\\x02ߣߤ\\x05яȨ\\x02ߤߥ\\x05йȝ')\n buf.write('\\x02ߥߦ\\x05ёȩ\\x02ߦߧ\\x05яȨ')\n buf.write('\\x02ߧߨ\\x05љȭ\\x02ߨߩ\\x05ћȮ')\n buf.write('\\x02ߩߪ\\x05їȬ\\x02ߪ߫\\x05еț')\n buf.write('\\x02߫߬\\x05хȣ\\x02߬߭\\x05яȨ')\n buf.write('\\x02߭߮\\x05нȟ\\x02߮߯\\x05лȞ')\n buf.write('\\x02߯è\\x03\\x02\\x02\\x02߰߱\\x05нȟ\\x02߱')\n buf.write('߲\\x05еț\\x02߲߳\\x05йȝ\\x02߳')\n buf.write('ߴ\\x05уȢ\\x02ߴê\\x03\\x02\\x02\\x02ߵ߶')\n buf.write('\\x05нȟ\\x02߶߷\\x05ыȦ\\x02߷߸')\n buf.write('\\x05нȟ\\x02߸߹\\x05эȧ\\x02߹ߺ')\n buf.write('\\x05нȟ\\x02ߺ\\u07fb\\x05яȨ\\x02\\u07fb\\u07fc')\n buf.write('\\x05ћȮ\\x02\\u07fcì\\x03\\x02\\x02\\x02߽߾\\x05н')\n buf.write('ȟ\\x02߾߿\\x05ыȦ\\x02߿ࠀ\\x05љ')\n buf.write('ȭ\\x02ࠀࠁ\\x05нȟ\\x02ࠁî\\x03\\x02\\x02')\n buf.write('\\x02ࠂࠃ\\x05нȟ\\x02ࠃࠄ\\x05ыȦ')\n buf.write('\\x02ࠄࠅ\\x05љȭ\\x02ࠅࠆ\\x05хȣ')\n buf.write('\\x02ࠆࠇ\\x05пȠ\\x02ࠇð\\x03\\x02\\x02\\x02ࠈ')\n buf.write('ࠉ\\x05нȟ\\x02ࠉࠊ\\x05эȧ\\x02ࠊ')\n buf.write('ࠋ\\x05ѓȪ\\x02ࠋࠌ\\x05ћȮ\\x02ࠌ')\n buf.write('ࠍ\\x05ѥȳ\\x02ࠍò\\x03\\x02\\x02\\x02ࠎࠏ')\n buf.write('\\x05нȟ\\x02ࠏࠐ\\x05яȨ\\x02ࠐࠑ')\n buf.write('\\x05еț\\x02ࠑࠒ\\x05зȜ\\x02ࠒࠓ')\n buf.write('\\x05ыȦ\\x02ࠓࠔ\\x05нȟ\\x02ࠔô')\n buf.write('\\x03\\x02\\x02\\x02ࠕࠖ\\x05нȟ\\x02ࠖࠗ\\x05я')\n buf.write('Ȩ\\x02ࠗ࠘\\x05йȝ\\x02࠘࠙\\x05ё')\n buf.write('ȩ\\x02࠙ࠚ\\x05лȞ\\x02ࠚࠛ\\x05х')\n buf.write('ȣ\\x02ࠛࠜ\\x05яȨ\\x02ࠜࠝ\\x05с')\n buf.write('ȡ\\x02ࠝö\\x03\\x02\\x02\\x02ࠞࠟ\\x05нȟ')\n buf.write('\\x02ࠟࠠ\\x05яȨ\\x02ࠠࠡ\\x05лȞ')\n buf.write('\\x02ࠡø\\x03\\x02\\x02\\x02ࠢࠣ\\x05нȟ\\x02ࠣ')\n buf.write('ࠤ\\x05яȨ\\x02ࠤࠥ\\x05ћȮ\\x02ࠥ')\n buf.write('ࠦ\\x05хȣ\\x02ࠦࠧ\\x05ћȮ\\x02ࠧ')\n buf.write('ࠨ\\x05ѥȳ\\x02ࠨࠩ\\x05нȟ\\x02ࠩ')\n buf.write('ࠪ\\x05љȭ\\x02ࠪࠫ\\x05йȝ\\x02ࠫ')\n buf.write('ࠬ\\x05еț\\x02ࠬ࠭\\x05ѓȪ\\x02࠭')\n buf.write('\\u082e\\x05хȣ\\x02\\u082e\\u082f\\x05яȨ\\x02\\u082f')\n buf.write('࠰\\x05сȡ\\x02࠰ú\\x03\\x02\\x02\\x02࠱࠲')\n buf.write('\\x05нȟ\\x02࠲࠳\\x05їȬ\\x02࠳࠴')\n buf.write('\\x05їȬ\\x02࠴ü\\x03\\x02\\x02\\x02࠵࠶\\x05н')\n buf.write('ȟ\\x02࠶࠷\\x05їȬ\\x02࠷࠸\\x05ї')\n buf.write('Ȭ\\x02࠸࠹\\x05ёȩ\\x02࠹࠺\\x05ї')\n buf.write('Ȭ\\x02࠺࠻\\x05љȭ\\x02࠻þ\\x03\\x02\\x02')\n buf.write('\\x02࠼࠽\\x05нȟ\\x02࠽࠾\\x05љȭ')\n buf.write('\\x02࠾\\u083f\\x05йȝ\\x02\\u083fࡀ\\x05еț')\n buf.write('\\x02ࡀࡁ\\x05ѓȪ\\x02ࡁࡂ\\x05нȟ')\n buf.write('\\x02ࡂĀ\\x03\\x02\\x02\\x02ࡃࡄ\\x05нȟ\\x02ࡄ')\n buf.write('ࡅ\\x05џȰ\\x02ࡅࡆ\\x05еț\\x02ࡆ')\n buf.write('ࡇ\\x05ыȦ\\x02ࡇࡈ\\x05яȨ\\x02ࡈ')\n buf.write('ࡉ\\x05еț\\x02ࡉࡊ\\x05эȧ\\x02ࡊ')\n buf.write('ࡋ\\x05нȟ\\x02ࡋĂ\\x03\\x02\\x02\\x02ࡌࡍ')\n buf.write('\\x05нȟ\\x02ࡍࡎ\\x05ѣȲ\\x02ࡎࡏ')\n buf.write('\\x05йȝ\\x02ࡏࡐ\\x05нȟ\\x02ࡐࡑ')\n buf.write('\\x05ѓȪ\\x02ࡑࡒ\\x05ћȮ\\x02ࡒĄ')\n buf.write('\\x03\\x02\\x02\\x02ࡓࡔ\\x05нȟ\\x02ࡔࡕ\\x05ѣ')\n buf.write('Ȳ\\x02ࡕࡖ\\x05йȝ\\x02ࡖࡗ\\x05н')\n buf.write('ȟ\\x02ࡗࡘ\\x05ѓȪ\\x02ࡘ࡙\\x05ћ')\n buf.write('Ȯ\\x02࡙࡚\\x05хȣ\\x02࡚࡛\\x05ё')\n buf.write('ȩ\\x02࡛\\u085c\\x05яȨ\\x02\\u085cĆ\\x03\\x02\\x02')\n buf.write('\\x02\\u085d࡞\\x05нȟ\\x02࡞\\u085f\\x05ѣȲ')\n buf.write('\\x02\\u085fࡠ\\x05йȝ\\x02ࡠࡡ\\x05нȟ')\n buf.write('\\x02ࡡࡢ\\x05ѓȪ\\x02ࡢࡣ\\x05ћȮ')\n buf.write('\\x02ࡣࡤ\\x05хȣ\\x02ࡤࡥ\\x05ёȩ')\n buf.write('\\x02ࡥࡦ\\x05яȨ\\x02ࡦࡧ\\x07a\\x02\\x02ࡧ')\n buf.write('ࡨ\\x05хȣ\\x02ࡨࡩ\\x05яȨ\\x02ࡩ')\n buf.write('ࡪ\\x05хȣ\\x02ࡪ\\u086b\\x05ћȮ\\x02\\u086b')\n buf.write('Ĉ\\x03\\x02\\x02\\x02\\u086c\\u086d\\x05нȟ\\x02\\u086d\\u086e')\n buf.write('\\x05ѣȲ\\x02\\u086e\\u086f\\x05йȝ\\x02\\u086fࡰ')\n buf.write('\\x05нȟ\\x02ࡰࡱ\\x05ѓȪ\\x02ࡱࡲ')\n buf.write('\\x05ћȮ\\x02ࡲࡳ\\x05хȣ\\x02ࡳࡴ')\n buf.write('\\x05ёȩ\\x02ࡴࡵ\\x05яȨ\\x02ࡵࡶ')\n buf.write('\\x05љȭ\\x02ࡶĊ\\x03\\x02\\x02\\x02ࡷࡸ\\x05н')\n buf.write('ȟ\\x02ࡸࡹ\\x05ѣȲ\\x02ࡹࡺ\\x05й')\n buf.write('ȝ\\x02ࡺࡻ\\x05ыȦ\\x02ࡻࡼ\\x05ѝ')\n buf.write('ȯ\\x02ࡼࡽ\\x05лȞ\\x02ࡽࡾ\\x05н')\n buf.write('ȟ\\x02ࡾČ\\x03\\x02\\x02\\x02ࡿࢀ\\x05нȟ')\n buf.write('\\x02ࢀࢁ\\x05ѣȲ\\x02ࢁࢂ\\x05йȝ')\n buf.write('\\x02ࢂࢃ\\x05ыȦ\\x02ࢃࢄ\\x05ѝȯ')\n buf.write('\\x02ࢄࢅ\\x05љȭ\\x02ࢅࢆ\\x05хȣ')\n buf.write('\\x02ࢆࢇ\\x05џȰ\\x02ࢇ࢈\\x05нȟ')\n buf.write('\\x02࢈Ď\\x03\\x02\\x02\\x02ࢉࢊ\\x05нȟ\\x02ࢊ')\n buf.write('ࢋ\\x05ѣȲ\\x02ࢋࢌ\\x05нȟ\\x02ࢌ')\n buf.write('ࢍ\\x05йȝ\\x02ࢍࢎ\\x05ѝȯ\\x02ࢎ')\n buf.write('\\u088f\\x05ћȮ\\x02\\u088f\\u0890\\x05нȟ\\x02\\u0890')\n buf.write('Đ\\x03\\x02\\x02\\x02\\u0891\\u0892\\x05нȟ\\x02\\u0892\\u0893')\n buf.write('\\x05ѣȲ\\x02\\u0893\\u0894\\x05хȣ\\x02\\u0894\\u0895')\n buf.write('\\x05љȭ\\x02\\u0895\\u0896\\x05ћȮ\\x02\\u0896\\u0897')\n buf.write('\\x05љȭ\\x02\\u0897Ē\\x03\\x02\\x02\\x02࢙࢘\\x05н')\n buf.write('ȟ\\x02࢙࢚\\x05ѣȲ\\x02࢚࢛\\x05х')\n buf.write('ȣ\\x02࢛࢜\\x05ћȮ\\x02࢜Ĕ\\x03\\x02\\x02')\n buf.write('\\x02࢝࢞\\x05нȟ\\x02࢞࢟\\x05ѣȲ')\n buf.write('\\x02࢟ࢠ\\x05ѓȪ\\x02ࢠࢡ\\x05ыȦ')\n buf.write('\\x02ࢡࢢ\\x05еț\\x02ࢢࢣ\\x05хȣ')\n buf.write('\\x02ࢣࢤ\\x05яȨ\\x02ࢤĖ\\x03\\x02\\x02\\x02ࢥ')\n buf.write('ࢦ\\x05нȟ\\x02ࢦࢧ\\x05ѣȲ\\x02ࢧ')\n buf.write('ࢨ\\x05ћȮ\\x02ࢨࢩ\\x05нȟ\\x02ࢩ')\n buf.write('ࢪ\\x05їȬ\\x02ࢪࢫ\\x05яȨ\\x02ࢫ')\n buf.write('ࢬ\\x05еț\\x02ࢬࢭ\\x05ыȦ\\x02ࢭ')\n buf.write('Ę\\x03\\x02\\x02\\x02ࢮࢯ\\x05нȟ\\x02ࢯࢰ')\n buf.write('\\x05ѣȲ\\x02ࢰࢱ\\x05ћȮ\\x02ࢱࢲ')\n buf.write('\\x05їȬ\\x02ࢲࢳ\\x05еț\\x02ࢳࢴ')\n buf.write('\\x05йȝ\\x02ࢴࢵ\\x05ћȮ\\x02ࢵĚ')\n buf.write('\\x03\\x02\\x02\\x02ࢶࢷ\\x05пȠ\\x02ࢷࢸ\\x05е')\n buf.write('ț\\x02ࢸࢹ\\x05хȣ\\x02ࢹࢺ\\x05ы')\n buf.write('Ȧ\\x02ࢺࢻ\\x05ѝȯ\\x02ࢻࢼ\\x05ї')\n buf.write('Ȭ\\x02ࢼࢽ\\x05нȟ\\x02ࢽĜ\\x03\\x02\\x02')\n buf.write('\\x02ࢾࢿ\\x05пȠ\\x02ࢿࣀ\\x05еț')\n buf.write('\\x02ࣀࣁ\\x05ыȦ\\x02ࣁࣂ\\x05љȭ')\n buf.write('\\x02ࣂࣃ\\x05нȟ\\x02ࣃĞ\\x03\\x02\\x02\\x02ࣄ')\n buf.write('ࣅ\\x05пȠ\\x02ࣅࣆ\\x05нȟ\\x02ࣆ')\n buf.write('ࣇ\\x05ћȮ\\x02ࣇࣈ\\x05йȝ\\x02ࣈ')\n buf.write('ࣉ\\x05уȢ\\x02ࣉĠ\\x03\\x02\\x02\\x02࣊࣋')\n buf.write('\\x05пȠ\\x02࣋࣌\\x05хȣ\\x02࣌࣍')\n buf.write('\\x05яȨ\\x02࣍࣎\\x05еț\\x02࣏࣎')\n buf.write('\\x05ыȦ\\x02࣏Ģ\\x03\\x02\\x02\\x02࣐࣑\\x05п')\n buf.write('Ƞ\\x02࣑࣒\\x05хȣ\\x02࣒࣓\\x05ї')\n buf.write('Ȭ\\x02࣓ࣔ\\x05љȭ\\x02ࣔࣕ\\x05ћ')\n buf.write('Ȯ\\x02ࣕĤ\\x03\\x02\\x02\\x02ࣖࣗ\\x05пȠ')\n buf.write('\\x02ࣗࣘ\\x05хȣ\\x02ࣘࣙ\\x05їȬ')\n buf.write('\\x02ࣙࣚ\\x05љȭ\\x02ࣚࣛ\\x05ћȮ')\n buf.write('\\x02ࣛࣜ\\x07a\\x02\\x02ࣜࣝ\\x05џȰ\\x02ࣝ')\n buf.write('ࣞ\\x05еț\\x02ࣞࣟ\\x05ыȦ\\x02ࣟ')\n buf.write('࣠\\x05ѝȯ\\x02࣠࣡\\x05нȟ\\x02࣡')\n buf.write('Ħ\\x03\\x02\\x02\\x02\\u08e2ࣣ\\x05пȠ\\x02ࣣࣤ')\n buf.write('\\x05ыȦ\\x02ࣤࣥ\\x05ёȩ\\x02ࣦࣥ')\n buf.write('\\x05еț\\x02ࣦࣧ\\x05ћȮ\\x02ࣧĨ')\n buf.write('\\x03\\x02\\x02\\x02ࣩࣨ\\x05пȠ\\x02ࣩ࣪\\x05ё')\n buf.write('ȩ\\x02࣪࣫\\x05ыȦ\\x02࣫࣬\\x05ы')\n buf.write('Ȧ\\x02࣭࣬\\x05ёȩ\\x02࣭࣮\\x05ѡ')\n buf.write('ȱ\\x02࣮࣯\\x05хȣ\\x02ࣰ࣯\\x05я')\n buf.write('Ȩ\\x02ࣰࣱ\\x05сȡ\\x02ࣱĪ\\x03\\x02\\x02')\n buf.write('\\x02ࣲࣳ\\x05пȠ\\x02ࣳࣴ\\x05ёȩ')\n buf.write('\\x02ࣴࣵ\\x05ыȦ\\x02ࣶࣵ\\x05ыȦ')\n buf.write('\\x02ࣶࣷ\\x05ёȩ\\x02ࣷࣸ\\x05ѡȱ')\n buf.write('\\x02ࣹࣸ\\x05љȭ\\x02ࣹĬ\\x03\\x02\\x02\\x02ࣺ')\n buf.write('ࣻ\\x05пȠ\\x02ࣻࣼ\\x05ёȩ\\x02ࣼ')\n buf.write('ࣽ\\x05їȬ\\x02ࣽĮ\\x03\\x02\\x02\\x02ࣾࣿ')\n buf.write('\\x05пȠ\\x02ࣿऀ\\x05ёȩ\\x02ऀँ')\n buf.write('\\x05їȬ\\x02ँं\\x05еț\\x02ंः')\n buf.write('\\x05ыȦ\\x02ःऄ\\x05ыȦ\\x02ऄİ')\n buf.write('\\x03\\x02\\x02\\x02अआ\\x05пȠ\\x02आइ\\x05ё')\n buf.write('ȩ\\x02इई\\x05їȬ\\x02ईउ\\x05й')\n buf.write('ȝ\\x02उऊ\\x05нȟ\\x02ऊIJ\\x03\\x02\\x02')\n buf.write('\\x02ऋऌ\\x05пȠ\\x02ऌऍ\\x05їȬ')\n buf.write('\\x02ऍऎ\\x05ёȩ\\x02ऎए\\x05эȧ')\n buf.write('\\x02एĴ\\x03\\x02\\x02\\x02ऐऑ\\x05пȠ\\x02ऑ')\n buf.write('ऒ\\x05ѝȯ\\x02ऒओ\\x05ыȦ\\x02ओ')\n buf.write('औ\\x05ыȦ\\x02औĶ\\x03\\x02\\x02\\x02कख')\n buf.write('\\x05пȠ\\x02खग\\x05ѝȯ\\x02गघ')\n buf.write('\\x05яȨ\\x02घङ\\x05йȝ\\x02ङच')\n buf.write('\\x05ћȮ\\x02चछ\\x05хȣ\\x02छज')\n buf.write('\\x05ёȩ\\x02जझ\\x05яȨ\\x02झĸ')\n buf.write('\\x03\\x02\\x02\\x02ञट\\x05сȡ\\x02टठ\\x05ё')\n buf.write('ȩ\\x02ठड\\x05ћȮ\\x02डढ\\x05ё')\n buf.write('ȩ\\x02ढĺ\\x03\\x02\\x02\\x02णत\\x05сȡ')\n buf.write('\\x02तथ\\x05їȬ\\x02थद\\x05еț')\n buf.write('\\x02दध\\x05яȨ\\x02धन\\x05ћȮ')\n buf.write('\\x02नļ\\x03\\x02\\x02\\x02ऩप\\x05сȡ\\x02प')\n buf.write('फ\\x05їȬ\\x02फब\\x05ёȩ\\x02ब')\n buf.write('भ\\x05ѝȯ\\x02भम\\x05ѓȪ\\x02म')\n buf.write('ľ\\x03\\x02\\x02\\x02यर\\x05сȡ\\x02रऱ')\n buf.write('\\x05їȬ\\x02ऱल\\x05ёȩ\\x02लळ')\n buf.write('\\x05ѝȯ\\x02ळऴ\\x05ѓȪ\\x02ऴव')\n buf.write('\\x05хȣ\\x02वश\\x05яȨ\\x02शष')\n buf.write('\\x05сȡ\\x02षŀ\\x03\\x02\\x02\\x02सह\\x05у')\n buf.write('Ȣ\\x02हऺ\\x05еț\\x02ऺऻ\\x05љ')\n buf.write('ȭ\\x02ऻ़\\x05уȢ\\x02़ł\\x03\\x02\\x02')\n buf.write('\\x02ऽा\\x05уȢ\\x02ाि\\x05еț')\n buf.write('\\x02िी\\x05џȰ\\x02ीु\\x05хȣ')\n buf.write('\\x02ुू\\x05яȨ\\x02ूृ\\x05сȡ')\n buf.write('\\x02ृń\\x03\\x02\\x02\\x02ॄॅ\\x05уȢ\\x02ॅ')\n buf.write('ॆ\\x05хȣ\\x02ॆे\\x05лȞ\\x02े')\n buf.write('ै\\x05нȟ\\x02ैņ\\x03\\x02\\x02\\x02ॉॊ')\n buf.write('\\x05уȢ\\x02ॊो\\x05ёȩ\\x02ोौ')\n buf.write('\\x05ѝȯ\\x02ौ्\\x05їȬ\\x02्ň')\n buf.write('\\x03\\x02\\x02\\x02ॎॏ\\x05хȣ\\x02ॏॐ\\x05п')\n buf.write('Ƞ\\x02ॐŊ\\x03\\x02\\x02\\x02॒॑\\x05хȣ')\n buf.write('\\x02॒॓\\x05сȡ\\x02॓॔\\x05яȨ')\n buf.write('\\x02॔ॕ\\x05ёȩ\\x02ॕॖ\\x05їȬ')\n buf.write('\\x02ॖॗ\\x05нȟ\\x02ॗŌ\\x03\\x02\\x02\\x02क़')\n buf.write('ख़\\x05хȣ\\x02ख़ग़\\x05эȧ\\x02ग़')\n buf.write('ज़\\x05эȧ\\x02ज़ड़\\x05нȟ\\x02ड़')\n buf.write('ढ़\\x05лȞ\\x02ढ़फ़\\x05хȣ\\x02फ़')\n buf.write('य़\\x05еț\\x02य़ॠ\\x05ћȮ\\x02ॠ')\n buf.write('ॡ\\x05нȟ\\x02ॡŎ\\x03\\x02\\x02\\x02ॢॣ')\n buf.write('\\x05хȣ\\x02ॣ।\\x05яȨ\\x02।Ő')\n buf.write('\\x03\\x02\\x02\\x02॥०\\x05хȣ\\x02०१\\x05я')\n buf.write('Ȩ\\x02१२\\x05йȝ\\x02२३\\x05ы')\n buf.write('Ȧ\\x02३४\\x05ѝȯ\\x02४५\\x05л')\n buf.write('Ȟ\\x02५६\\x05нȟ\\x02६Œ\\x03\\x02\\x02')\n buf.write('\\x02७८\\x05хȣ\\x02८९\\x05яȨ')\n buf.write('\\x02९॰\\x05йȝ\\x02॰ॱ\\x05ыȦ')\n buf.write('\\x02ॱॲ\\x05ѝȯ\\x02ॲॳ\\x05лȞ')\n buf.write('\\x02ॳॴ\\x05хȣ\\x02ॴॵ\\x05яȨ')\n buf.write('\\x02ॵॶ\\x05сȡ\\x02ॶŔ\\x03\\x02\\x02\\x02ॷ')\n buf.write('ॸ\\x05хȣ\\x02ॸॹ\\x05яȨ\\x02ॹ')\n buf.write('ॺ\\x05йȝ\\x02ॺॻ\\x05їȬ\\x02ॻ')\n buf.write('ॼ\\x05нȟ\\x02ॼॽ\\x05эȧ\\x02ॽ')\n buf.write('ॾ\\x05нȟ\\x02ॾॿ\\x05яȨ\\x02ॿ')\n buf.write('ঀ\\x05ћȮ\\x02ঀŖ\\x03\\x02\\x02\\x02ঁং')\n buf.write('\\x05хȣ\\x02ংঃ\\x05яȨ\\x02ঃ\\u0984')\n buf.write('\\x05лȞ\\x02\\u0984অ\\x05нȟ\\x02অআ')\n buf.write('\\x05яȨ\\x02আই\\x05ћȮ\\x02ইŘ')\n buf.write('\\x03\\x02\\x02\\x02ঈউ\\x05хȣ\\x02উঊ\\x05я')\n buf.write('Ȩ\\x02ঊঋ\\x05лȞ\\x02ঋঌ\\x05н')\n buf.write('ȟ\\x02ঌ\\u098d\\x05ѣȲ\\x02\\u098dŚ\\x03\\x02\\x02')\n buf.write('\\x02\\u098eএ\\x05хȣ\\x02এঐ\\x05яȨ')\n buf.write('\\x02ঐ\\u0991\\x05лȞ\\x02\\u0991\\u0992\\x05нȟ')\n buf.write('\\x02\\u0992ও\\x05ѣȲ\\x02ওঔ\\x05нȟ')\n buf.write('\\x02ঔক\\x05лȞ\\x02কŜ\\x03\\x02\\x02\\x02খ')\n buf.write('গ\\x05хȣ\\x02গঘ\\x05яȨ\\x02ঘ')\n buf.write('ঙ\\x05лȞ\\x02ঙচ\\x05хȣ\\x02চ')\n buf.write('ছ\\x05йȝ\\x02ছজ\\x05еț\\x02জ')\n buf.write('ঝ\\x05ћȮ\\x02ঝঞ\\x05ёȩ\\x02ঞ')\n buf.write('ট\\x05їȬ\\x02টŞ\\x03\\x02\\x02\\x02ঠড')\n buf.write('\\x05хȣ\\x02ডঢ\\x05яȨ\\x02ঢণ')\n buf.write('\\x05лȞ\\x02ণত\\x05хȣ\\x02তথ')\n buf.write('\\x05йȝ\\x02থদ\\x05нȟ\\x02দধ')\n buf.write('\\x05љȭ\\x02ধŠ\\x03\\x02\\x02\\x02ন\\u09a9\\x05х')\n buf.write('ȣ\\x02\\u09a9প\\x05яȨ\\x02পফ\\x05п')\n buf.write('Ƞ\\x02ফব\\x05хȣ\\x02বভ\\x05я')\n buf.write('Ȩ\\x02ভম\\x05хȣ\\x02ময\\x05ћ')\n buf.write('Ȯ\\x02যর\\x05нȟ\\x02রŢ\\x03\\x02\\x02')\n buf.write('\\x02\\u09b1ল\\x05хȣ\\x02ল\\u09b3\\x05яȨ')\n buf.write('\\x02\\u09b3\\u09b4\\x05ыȦ\\x02\\u09b4\\u09b5\\x05хȣ')\n buf.write('\\x02\\u09b5শ\\x05яȨ\\x02শষ\\x05нȟ')\n buf.write('\\x02ষŤ\\x03\\x02\\x02\\x02সহ\\x05хȣ\\x02হ')\n buf.write('\\u09ba\\x05яȨ\\x02\\u09ba\\u09bb\\x05яȨ\\x02\\u09bb')\n buf.write('়\\x05нȟ\\x02়ঽ\\x05їȬ\\x02ঽ')\n buf.write('Ŧ\\x03\\x02\\x02\\x02াি\\x05хȣ\\x02িী')\n buf.write('\\x05яȨ\\x02ীু\\x05ёȩ\\x02ুূ')\n buf.write('\\x05ѝȯ\\x02ূৃ\\x05ћȮ\\x02ৃŨ')\n buf.write('\\x03\\x02\\x02\\x02ৄ\\u09c5\\x05хȣ\\x02\\u09c5\\u09c6\\x05я')\n buf.write('Ȩ\\x02\\u09c6ে\\x05љȭ\\x02েৈ\\x05н')\n buf.write('ȟ\\x02ৈ\\u09c9\\x05їȬ\\x02\\u09c9\\u09ca\\x05ћ')\n buf.write('Ȯ\\x02\\u09caŪ\\x03\\x02\\x02\\x02োৌ\\x05хȣ')\n buf.write('\\x02ৌ্\\x05яȨ\\x02্ৎ\\x05љȭ')\n buf.write('\\x02ৎ\\u09cf\\x05ћȮ\\x02\\u09cf\\u09d0\\x05еț')\n buf.write('\\x02\\u09d0\\u09d1\\x05яȨ\\x02\\u09d1\\u09d2\\x05ћȮ')\n buf.write('\\x02\\u09d2\\u09d3\\x05хȣ\\x02\\u09d3\\u09d4\\x05еț')\n buf.write('\\x02\\u09d4\\u09d5\\x05зȜ\\x02\\u09d5\\u09d6\\x05ыȦ')\n buf.write('\\x02\\u09d6ৗ\\x05нȟ\\x02ৗŬ\\x03\\x02\\x02\\x02\\u09d8')\n buf.write('\\u09d9\\x05хȣ\\x02\\u09d9\\u09da\\x05яȨ\\x02\\u09da')\n buf.write('\\u09db\\x05љȭ\\x02\\u09dbড়\\x05ћȮ\\x02ড়')\n buf.write('ঢ়\\x05нȟ\\x02ঢ়\\u09de\\x05еț\\x02\\u09de')\n buf.write('য়\\x05лȞ\\x02য়Ů\\x03\\x02\\x02\\x02ৠৡ')\n buf.write('\\x05хȣ\\x02ৡৢ\\x05яȨ\\x02ৢৣ')\n buf.write('\\x05ћȮ\\x02ৣŰ\\x03\\x02\\x02\\x02\\u09e4\\u09e5\\x05х')\n buf.write('ȣ\\x02\\u09e5০\\x05яȨ\\x02০১\\x05ћ')\n buf.write('Ȯ\\x02১২\\x05нȟ\\x02২৩\\x05с')\n buf.write('ȡ\\x02৩৪\\x05нȟ\\x02৪৫\\x05ї')\n buf.write('Ȭ\\x02৫Ų\\x03\\x02\\x02\\x02৬৭\\x05хȣ')\n buf.write('\\x02৭৮\\x05яȨ\\x02৮৯\\x05ћȮ')\n buf.write('\\x02৯ৰ\\x05нȟ\\x02ৰৱ\\x05їȬ')\n buf.write('\\x02ৱ৲\\x05љȭ\\x02৲৳\\x05нȟ')\n buf.write('\\x02৳৴\\x05йȝ\\x02৴৵\\x05ћȮ')\n buf.write('\\x02৵Ŵ\\x03\\x02\\x02\\x02৶৷\\x05хȣ\\x02৷')\n buf.write('৸\\x05яȨ\\x02৸৹\\x05ћȮ\\x02৹')\n buf.write('৺\\x05нȟ\\x02৺৻\\x05їȬ\\x02৻')\n buf.write('ৼ\\x05џȰ\\x02ৼ৽\\x05еț\\x02৽')\n buf.write('৾\\x05ыȦ\\x02৾Ŷ\\x03\\x02\\x02\\x02\\u09ff\\u0a00')\n buf.write('\\x05хȣ\\x02\\u0a00ਁ\\x05яȨ\\x02ਁਂ')\n buf.write('\\x05ћȮ\\x02ਂਃ\\x05ёȩ\\x02ਃŸ')\n buf.write('\\x03\\x02\\x02\\x02\\u0a04ਅ\\x05хȣ\\x02ਅਆ\\x05я')\n buf.write('Ȩ\\x02ਆਇ\\x05џȰ\\x02ਇਈ\\x05е')\n buf.write('ț\\x02ਈਉ\\x05ыȦ\\x02ਉਊ\\x05х')\n buf.write('ȣ\\x02ਊ\\u0a0b\\x05лȞ\\x02\\u0a0b\\u0a0c\\x05е')\n buf.write('ț\\x02\\u0a0c\\u0a0d\\x05ћȮ\\x02\\u0a0d\\u0a0e\\x05н')\n buf.write('ȟ\\x02\\u0a0eź\\x03\\x02\\x02\\x02ਏਐ\\x05хȣ')\n buf.write('\\x02ਐ\\u0a11\\x05љȭ\\x02\\u0a11ż\\x03\\x02\\x02\\x02\\u0a12')\n buf.write('ਓ\\x05хȣ\\x02ਓਔ\\x05љȭ\\x02ਔ')\n buf.write('ਕ\\x05ёȩ\\x02ਕਖ\\x05ыȦ\\x02ਖ')\n buf.write('ਗ\\x05еț\\x02ਗਘ\\x05ћȮ\\x02ਘ')\n buf.write('ਙ\\x05хȣ\\x02ਙਚ\\x05ёȩ\\x02ਚ')\n buf.write('ਛ\\x05яȨ\\x02ਛž\\x03\\x02\\x02\\x02ਜਝ')\n buf.write('\\x05хȣ\\x02ਝਞ\\x05ћȮ\\x02ਞਟ')\n buf.write('\\x05нȟ\\x02ਟਠ\\x05їȬ\\x02ਠਡ')\n buf.write('\\x05еț\\x02ਡਢ\\x05ћȮ\\x02ਢਣ')\n buf.write('\\x05нȟ\\x02ਣƀ\\x03\\x02\\x02\\x02ਤਥ\\x05ч')\n buf.write('Ȥ\\x02ਥਦ\\x05еț\\x02ਦਧ\\x05џ')\n buf.write('Ȱ\\x02ਧਨ\\x05еț\\x02ਨƂ\\x03\\x02\\x02')\n buf.write('\\x02\\u0a29ਪ\\x05чȤ\\x02ਪਫ\\x05ёȩ')\n buf.write('\\x02ਫਬ\\x05хȣ\\x02ਬਭ\\x05яȨ')\n buf.write('\\x02ਭƄ\\x03\\x02\\x02\\x02ਮਯ\\x05щȥ\\x02ਯ')\n buf.write('ਰ\\x05нȟ\\x02ਰ\\u0a31\\x05нȟ\\x02\\u0a31')\n buf.write('ਲ\\x05ѓȪ\\x02ਲƆ\\x03\\x02\\x02\\x02ਲ਼\\u0a34')\n buf.write('\\x05ыȦ\\x02\\u0a34ਵ\\x05еț\\x02ਵਸ਼')\n buf.write('\\x05яȨ\\x02ਸ਼\\u0a37\\x05сȡ\\x02\\u0a37ਸ')\n buf.write('\\x05ѝȯ\\x02ਸਹ\\x05еț\\x02ਹ\\u0a3a')\n buf.write('\\x05сȡ\\x02\\u0a3a\\u0a3b\\x05нȟ\\x02\\u0a3bƈ')\n buf.write('\\x03\\x02\\x02\\x02਼\\u0a3d\\x05ыȦ\\x02\\u0a3dਾ\\x05е')\n buf.write('ț\\x02ਾਿ\\x05љȭ\\x02ਿੀ\\x05ћ')\n buf.write('Ȯ\\x02ੀƊ\\x03\\x02\\x02\\x02ੁੂ\\x05ыȦ')\n buf.write('\\x02ੂ\\u0a43\\x05еț\\x02\\u0a43\\u0a44\\x05љȭ')\n buf.write('\\x02\\u0a44\\u0a45\\x05ћȮ\\x02\\u0a45\\u0a46\\x07a\\x02\\x02\\u0a46')\n buf.write('ੇ\\x05џȰ\\x02ੇੈ\\x05еț\\x02ੈ')\n buf.write('\\u0a49\\x05ыȦ\\x02\\u0a49\\u0a4a\\x05ѝȯ\\x02\\u0a4a')\n buf.write('ੋ\\x05нȟ\\x02ੋƌ\\x03\\x02\\x02\\x02ੌ੍')\n buf.write('\\x05ыȦ\\x02੍\\u0a4e\\x05нȟ\\x02\\u0a4e\\u0a4f')\n buf.write('\\x05еț\\x02\\u0a4f\\u0a50\\x05лȞ\\x02\\u0a50ੑ')\n buf.write('\\x05хȣ\\x02ੑ\\u0a52\\x05яȨ\\x02\\u0a52\\u0a53')\n buf.write('\\x05сȡ\\x02\\u0a53Ǝ\\x03\\x02\\x02\\x02\\u0a54\\u0a55\\x05ы')\n buf.write('Ȧ\\x02\\u0a55\\u0a56\\x05нȟ\\x02\\u0a56\\u0a57\\x05п')\n buf.write('Ƞ\\x02\\u0a57\\u0a58\\x05ћȮ\\x02\\u0a58Ɛ\\x03\\x02\\x02')\n buf.write('\\x02ਖ਼ਗ਼\\x05ыȦ\\x02ਗ਼ਜ਼\\x05нȟ')\n buf.write('\\x02ਜ਼ੜ\\x05џȰ\\x02ੜ\\u0a5d\\x05нȟ')\n buf.write('\\x02\\u0a5dਫ਼\\x05ыȦ\\x02ਫ਼ƒ\\x03\\x02\\x02\\x02\\u0a5f')\n buf.write('\\u0a60\\x05ыȦ\\x02\\u0a60\\u0a61\\x05хȣ\\x02\\u0a61')\n buf.write('\\u0a62\\x05зȜ\\x02\\u0a62\\u0a63\\x05їȬ\\x02\\u0a63')\n buf.write('\\u0a64\\x05еț\\x02\\u0a64\\u0a65\\x05їȬ\\x02\\u0a65')\n buf.write('੦\\x05ѥȳ\\x02੦Ɣ\\x03\\x02\\x02\\x02੧੨')\n buf.write('\\x05ыȦ\\x02੨੩\\x05хȣ\\x02੩੪')\n buf.write('\\x05щȥ\\x02੪੫\\x05нȟ\\x02੫Ɩ')\n buf.write('\\x03\\x02\\x02\\x02੬੭\\x05ыȦ\\x02੭੮\\x05х')\n buf.write('ȣ\\x02੮੯\\x05щȥ\\x02੯ੰ\\x05н')\n buf.write('ȟ\\x02ੰੱ\\x074\\x02\\x02ੱƘ\\x03\\x02\\x02\\x02ੲ')\n buf.write('ੳ\\x05ыȦ\\x02ੳੴ\\x05хȣ\\x02ੴ')\n buf.write('ੵ\\x05щȥ\\x02ੵ੶\\x05нȟ\\x02੶')\n buf.write('\\u0a77\\x076\\x02\\x02\\u0a77ƚ\\x03\\x02\\x02\\x02\\u0a78\\u0a79\\x05ы'\n )\n buf.write('Ȧ\\x02\\u0a79\\u0a7a\\x05хȣ\\x02\\u0a7a\\u0a7b\\x05щ')\n buf.write('ȥ\\x02\\u0a7b\\u0a7c\\x05нȟ\\x02\\u0a7c\\u0a7d\\x05й')\n buf.write('ȝ\\x02\\u0a7dƜ\\x03\\x02\\x02\\x02\\u0a7e\\u0a7f\\x05ыȦ')\n buf.write('\\x02\\u0a7f\\u0a80\\x05хȣ\\x02\\u0a80ઁ\\x05эȧ')\n buf.write('\\x02ઁં\\x05хȣ\\x02ંઃ\\x05ћȮ')\n buf.write('\\x02ઃƞ\\x03\\x02\\x02\\x02\\u0a84અ\\x05ыȦ\\x02અ')\n buf.write('આ\\x05ёȩ\\x02આઇ\\x05йȝ\\x02ઇ')\n buf.write('ઈ\\x05еț\\x02ઈઉ\\x05ыȦ\\x02ઉ')\n buf.write('Ơ\\x03\\x02\\x02\\x02ઊઋ\\x05ыȦ\\x02ઋઌ')\n buf.write('\\x05ёȩ\\x02ઌઍ\\x05йȝ\\x02ઍ\\u0a8e')\n buf.write('\\x05щȥ\\x02\\u0a8eƢ\\x03\\x02\\x02\\x02એઐ\\x05ы')\n buf.write('Ȧ\\x02ઐઑ\\x05ёȩ\\x02ઑ\\u0a92\\x05й')\n buf.write('ȝ\\x02\\u0a92ઓ\\x05щȥ\\x02ઓઔ\\x05н')\n buf.write('ȟ\\x02ઔક\\x05лȞ\\x02કƤ\\x03\\x02\\x02')\n buf.write('\\x02ખગ\\x05ыȦ\\x02ગઘ\\x05ёȩ')\n buf.write('\\x02ઘઙ\\x05сȡ\\x02ઙƦ\\x03\\x02\\x02\\x02ચ')\n buf.write('છ\\x05ыȦ\\x02છજ\\x05ёȩ\\x02જ')\n buf.write('ઝ\\x05сȡ\\x02ઝઞ\\x05ёȩ\\x02ઞ')\n buf.write('ટ\\x05пȠ\\x02ટઠ\\x05пȠ\\x02ઠ')\n buf.write('ƨ\\x03\\x02\\x02\\x02ડઢ\\x05ыȦ\\x02ઢણ')\n buf.write('\\x05ёȩ\\x02ણત\\x05сȡ\\x02તથ')\n buf.write('\\x05ёȩ\\x02થદ\\x05яȨ\\x02દƪ')\n buf.write('\\x03\\x02\\x02\\x02ધન\\x05ыȦ\\x02ન\\u0aa9\\x05ё')\n buf.write('ȩ\\x02\\u0aa9પ\\x05яȨ\\x02પફ\\x05с')\n buf.write('ȡ\\x02ફƬ\\x03\\x02\\x02\\x02બભ\\x05ыȦ')\n buf.write('\\x02ભમ\\x05ёȩ\\x02મય\\x05ёȩ')\n buf.write('\\x02યર\\x05ѓȪ\\x02રƮ\\x03\\x02\\x02\\x02\\u0ab1')\n buf.write('લ\\x05эȧ\\x02લળ\\x05еț\\x02ળ')\n buf.write('\\u0ab4\\x05хȣ\\x02\\u0ab4વ\\x05яȨ\\x02વ')\n buf.write('ư\\x03\\x02\\x02\\x02શષ\\x05эȧ\\x02ષસ')\n buf.write('\\x05еț\\x02સહ\\x05ѓȪ\\x02હƲ')\n buf.write('\\x03\\x02\\x02\\x02\\u0aba\\u0abb\\x05эȧ\\x02\\u0abb઼\\x05е')\n buf.write('ț\\x02઼ઽ\\x05ћȮ\\x02ઽા\\x05й')\n buf.write('ȝ\\x02ાિ\\x05уȢ\\x02િી\\x05н')\n buf.write('ȟ\\x02ીુ\\x05лȞ\\x02ુƴ\\x03\\x02\\x02')\n buf.write('\\x02ૂૃ\\x05эȧ\\x02ૃૄ\\x05еț')\n buf.write('\\x02ૄૅ\\x05ѣȲ\\x02ૅ\\u0ac6\\x05џȰ')\n buf.write('\\x02\\u0ac6ે\\x05еț\\x02ેૈ\\x05ыȦ')\n buf.write('\\x02ૈૉ\\x05ѝȯ\\x02ૉ\\u0aca\\x05нȟ')\n buf.write('\\x02\\u0acaƶ\\x03\\x02\\x02\\x02ોૌ\\x05эȧ\\x02ૌ')\n buf.write('્\\x05нȟ\\x02્\\u0ace\\x05еț\\x02\\u0ace')\n buf.write('\\u0acf\\x05љȭ\\x02\\u0acfૐ\\x05ѝȯ\\x02ૐ')\n buf.write('\\u0ad1\\x05їȬ\\x02\\u0ad1\\u0ad2\\x05нȟ\\x02\\u0ad2')\n buf.write('\\u0ad3\\x05љȭ\\x02\\u0ad3Ƹ\\x03\\x02\\x02\\x02\\u0ad4\\u0ad5')\n buf.write('\\x05эȧ\\x02\\u0ad5\\u0ad6\\x05нȟ\\x02\\u0ad6\\u0ad7')\n buf.write('\\x05эȧ\\x02\\u0ad7\\u0ad8\\x05зȜ\\x02\\u0ad8\\u0ad9')\n buf.write('\\x05нȟ\\x02\\u0ad9\\u0ada\\x05їȬ\\x02\\u0adaƺ')\n buf.write('\\x03\\x02\\x02\\x02\\u0adb\\u0adc\\x05эȧ\\x02\\u0adc\\u0add\\x05н')\n buf.write('ȟ\\x02\\u0add\\u0ade\\x05їȬ\\x02\\u0ade\\u0adf\\x05с')\n buf.write('ȡ\\x02\\u0adfૠ\\x05нȟ\\x02ૠƼ\\x03\\x02\\x02')\n buf.write('\\x02ૡૢ\\x05эȧ\\x02ૢૣ\\x05хȣ')\n buf.write('\\x02ૣ\\u0ae4\\x05яȨ\\x02\\u0ae4\\u0ae5\\x05ѝȯ')\n buf.write('\\x02\\u0ae5૦\\x05љȭ\\x02૦ƾ\\x03\\x02\\x02\\x02૧')\n buf.write('૨\\x05эȧ\\x02૨૩\\x05хȣ\\x02૩')\n buf.write('૪\\x05яȨ\\x02૪૫\\x05ѝȯ\\x02૫')\n buf.write('૬\\x05ћȮ\\x02૬૭\\x05нȟ\\x02૭')\n buf.write('ǀ\\x03\\x02\\x02\\x02૮૯\\x05эȧ\\x02૯૰')\n buf.write('\\x05хȣ\\x02૰૱\\x05яȨ\\x02૱\\u0af2')\n buf.write('\\x05џȰ\\x02\\u0af2\\u0af3\\x05еț\\x02\\u0af3\\u0af4')\n buf.write('\\x05ыȦ\\x02\\u0af4\\u0af5\\x05ѝȯ\\x02\\u0af5\\u0af6')\n buf.write('\\x05нȟ\\x02\\u0af6ǂ\\x03\\x02\\x02\\x02\\u0af7\\u0af8\\x05э')\n buf.write('ȧ\\x02\\u0af8ૹ\\x05ыȦ\\x02ૹૺ\\x05љ')\n buf.write('ȭ\\x02ૺૻ\\x05ыȦ\\x02ૻૼ\\x05е')\n buf.write('ț\\x02ૼ૽\\x05зȜ\\x02૽૾\\x05н')\n buf.write('ȟ\\x02૾૿\\x05ыȦ\\x02૿DŽ\\x03\\x02\\x02')\n buf.write('\\x02\\u0b00ଁ\\x05эȧ\\x02ଁଂ\\x05ёȩ')\n buf.write('\\x02ଂଃ\\x05лȞ\\x02ଃ\\u0b04\\x05нȟ')\n buf.write('\\x02\\u0b04dž\\x03\\x02\\x02\\x02ଅଆ\\x05эȧ\\x02ଆ')\n buf.write('ଇ\\x05ёȩ\\x02ଇଈ\\x05лȞ\\x02ଈ')\n buf.write('ଉ\\x05нȟ\\x02ଉଊ\\x05ыȦ\\x02ଊ')\n buf.write('Lj\\x03\\x02\\x02\\x02ଋଌ\\x05эȧ\\x02ଌ\\u0b0d')\n buf.write('\\x05ёȩ\\x02\\u0b0d\\u0b0e\\x05лȞ\\x02\\u0b0eଏ')\n buf.write('\\x05хȣ\\x02ଏଐ\\x05пȠ\\x02ଐ\\u0b11')\n buf.write('\\x05ѥȳ\\x02\\u0b11NJ\\x03\\x02\\x02\\x02\\u0b12ଓ\\x05э')\n buf.write('ȧ\\x02ଓଔ\\x05ёȩ\\x02ଔକ\\x05я')\n buf.write('Ȩ\\x02କଖ\\x05ћȮ\\x02ଖଗ\\x05у')\n buf.write('Ȣ\\x02ଗnj\\x03\\x02\\x02\\x02ଘଙ\\x05эȧ')\n buf.write('\\x02ଙଚ\\x05ѝȯ\\x02ଚଛ\\x05ыȦ')\n buf.write('\\x02ଛଜ\\x05ћȮ\\x02ଜଝ\\x05хȣ')\n buf.write('\\x02ଝଞ\\x05љȭ\\x02ଞଟ\\x05нȟ')\n buf.write('\\x02ଟଠ\\x05ћȮ\\x02ଠǎ\\x03\\x02\\x02\\x02ଡ')\n buf.write('ଢ\\x05яȨ\\x02ଢଣ\\x05еț\\x02ଣ')\n buf.write('ତ\\x05эȧ\\x02ତଥ\\x05нȟ\\x02ଥ')\n buf.write('ǐ\\x03\\x02\\x02\\x02ଦଧ\\x05яȨ\\x02ଧନ')\n buf.write('\\x05еț\\x02ନ\\u0b29\\x05яȨ\\x02\\u0b29ǒ')\n buf.write('\\x03\\x02\\x02\\x02ପଫ\\x05яȨ\\x02ଫବ\\x05е')\n buf.write('ț\\x02ବଭ\\x05ћȮ\\x02ଭମ\\x05ѝ')\n buf.write('ȯ\\x02ମଯ\\x05їȬ\\x02ଯର\\x05е')\n buf.write('ț\\x02ର\\u0b31\\x05ыȦ\\x02\\u0b31ǔ\\x03\\x02\\x02')\n buf.write('\\x02ଲଳ\\x05яȨ\\x02ଳ\\u0b34\\x05еț')\n buf.write('\\x02\\u0b34ଵ\\x05ћȮ\\x02ଵଶ\\x05ѝȯ')\n buf.write('\\x02ଶଷ\\x05їȬ\\x02ଷସ\\x05еț')\n buf.write('\\x02ସହ\\x05ыȦ\\x02ହ\\u0b3a\\x05яȨ')\n buf.write('\\x02\\u0b3aǖ\\x03\\x02\\x02\\x02\\u0b3b଼\\x05яȨ\\x02଼')\n buf.write('ଽ\\x05еț\\x02ଽା\\x05џȰ\\x02ା')\n buf.write('ǘ\\x03\\x02\\x02\\x02ିୀ\\x05яȨ\\x02ୀୁ')\n buf.write('\\x05йȝ\\x02ୁୂ\\x05уȢ\\x02ୂୃ')\n buf.write('\\x05еț\\x02ୃୄ\\x05їȬ\\x02ୄǚ')\n buf.write('\\x03\\x02\\x02\\x02\\u0b45\\u0b46\\x05яȨ\\x02\\u0b46େ\\x05й')\n buf.write('ȝ\\x02େୈ\\x05уȢ\\x02ୈ\\u0b49\\x05е')\n buf.write('ț\\x02\\u0b49\\u0b4a\\x05їȬ\\x02\\u0b4aୋ\\x07a\\x02')\n buf.write('\\x02ୋୌ\\x05йȝ\\x02ୌ୍\\x05љȭ')\n buf.write('\\x02୍ǜ\\x03\\x02\\x02\\x02\\u0b4e\\u0b4f\\x05яȨ\\x02\\u0b4f')\n buf.write('\\u0b50\\x05йȝ\\x02\\u0b50\\u0b51\\x05ыȦ\\x02\\u0b51')\n buf.write('\\u0b52\\x05ёȩ\\x02\\u0b52\\u0b53\\x05зȜ\\x02\\u0b53')\n buf.write('Ǟ\\x03\\x02\\x02\\x02\\u0b54୕\\x05яȨ\\x02୕ୖ')\n buf.write('\\x05нȟ\\x02ୖୗ\\x05љȭ\\x02ୗ\\u0b58')\n buf.write('\\x05ћȮ\\x02\\u0b58\\u0b59\\x05нȟ\\x02\\u0b59\\u0b5a')\n buf.write('\\x05лȞ\\x02\\u0b5aǠ\\x03\\x02\\x02\\x02\\u0b5bଡ଼\\x05я')\n buf.write('Ȩ\\x02ଡ଼ଢ଼\\x05нȟ\\x02ଢ଼\\u0b5e\\x05ѡ')\n buf.write('ȱ\\x02\\u0b5eǢ\\x03\\x02\\x02\\x02ୟୠ\\x05яȨ')\n buf.write('\\x02ୠୡ\\x05ёȩ\\x02ୡǤ\\x03\\x02\\x02\\x02ୢ')\n buf.write('ୣ\\x05яȨ\\x02ୣ\\u0b64\\x05ёȩ\\x02\\u0b64')\n buf.write('\\u0b65\\x05еț\\x02\\u0b65୦\\x05ѝȯ\\x02୦')\n buf.write('୧\\x05лȞ\\x02୧୨\\x05хȣ\\x02୨')\n buf.write('୩\\x05ћȮ\\x02୩Ǧ\\x03\\x02\\x02\\x02୪୫')\n buf.write('\\x05яȨ\\x02୫୬\\x05ёȩ\\x02୬୭')\n buf.write('\\x05йȝ\\x02୭୮\\x05еț\\x02୮୯')\n buf.write('\\x05йȝ\\x02୯୰\\x05уȢ\\x02୰ୱ')\n buf.write('\\x05нȟ\\x02ୱǨ\\x03\\x02\\x02\\x02୲୳\\x05я')\n buf.write('Ȩ\\x02୳୴\\x05ёȩ\\x02୴୵\\x05й')\n buf.write('ȝ\\x02୵୶\\x05ёȩ\\x02୶୷\\x05ѓ')\n buf.write('Ȫ\\x02୷\\u0b78\\x05ѥȳ\\x02\\u0b78Ǫ\\x03\\x02\\x02')\n buf.write('\\x02\\u0b79\\u0b7a\\x05яȨ\\x02\\u0b7a\\u0b7b\\x05ёȩ')\n buf.write('\\x02\\u0b7b\\u0b7c\\x05йȝ\\x02\\u0b7c\\u0b7d\\x05ѥȳ')\n buf.write('\\x02\\u0b7d\\u0b7e\\x05йȝ\\x02\\u0b7e\\u0b7f\\x05ыȦ')\n buf.write('\\x02\\u0b7f\\u0b80\\x05нȟ\\x02\\u0b80Ǭ\\x03\\x02\\x02\\x02\\u0b81')\n buf.write('ஂ\\x05яȨ\\x02ஂஃ\\x05ёȩ\\x02ஃ')\n buf.write('\\u0b84\\x05нȟ\\x02\\u0b84அ\\x05яȨ\\x02அ')\n buf.write('ஆ\\x05ћȮ\\x02ஆஇ\\x05хȣ\\x02இ')\n buf.write('ஈ\\x05ћȮ\\x02ஈஉ\\x05ѥȳ\\x02உ')\n buf.write('ஊ\\x05нȟ\\x02ஊ\\u0b8b\\x05љȭ\\x02\\u0b8b')\n buf.write('\\u0b8c\\x05йȝ\\x02\\u0b8c\\u0b8d\\x05еț\\x02\\u0b8d')\n buf.write('எ\\x05ѓȪ\\x02எஏ\\x05хȣ\\x02ஏ')\n buf.write('ஐ\\x05яȨ\\x02ஐ\\u0b91\\x05сȡ\\x02\\u0b91')\n buf.write('Ǯ\\x03\\x02\\x02\\x02ஒஓ\\x05яȨ\\x02ஓஔ')\n buf.write('\\x05ёȩ\\x02ஔக\\x05эȧ\\x02க\\u0b96')\n buf.write('\\x05еț\\x02\\u0b96\\u0b97\\x05ѣȲ\\x02\\u0b97\\u0b98')\n buf.write('\\x05џȰ\\x02\\u0b98ங\\x05еț\\x02ஙச')\n buf.write('\\x05ыȦ\\x02ச\\u0b9b\\x05ѝȯ\\x02\\u0b9bஜ')\n buf.write('\\x05нȟ\\x02ஜǰ\\x03\\x02\\x02\\x02\\u0b9dஞ\\x05я')\n buf.write('Ȩ\\x02ஞட\\x05ёȩ\\x02ட\\u0ba0\\x05э')\n buf.write('ȧ\\x02\\u0ba0\\u0ba1\\x05хȣ\\x02\\u0ba1\\u0ba2\\x05я')\n buf.write('Ȩ\\x02\\u0ba2ண\\x05џȰ\\x02ணத\\x05е')\n buf.write('ț\\x02த\\u0ba5\\x05ыȦ\\x02\\u0ba5\\u0ba6\\x05ѝ')\n buf.write('ȯ\\x02\\u0ba6\\u0ba7\\x05нȟ\\x02\\u0ba7Dz\\x03\\x02\\x02')\n buf.write('\\x02நன\\x05яȨ\\x02னப\\x05ёȩ')\n buf.write('\\x02ப\\u0bab\\x05яȨ\\x02\\u0bab\\u0bac\\x05нȟ')\n buf.write('\\x02\\u0bacǴ\\x03\\x02\\x02\\x02\\u0badம\\x05яȨ\\x02ம')\n buf.write('ய\\x05ёȩ\\x02யர\\x05ёȩ\\x02ர')\n buf.write('ற\\x05їȬ\\x02றல\\x05лȞ\\x02ல')\n buf.write('ள\\x05нȟ\\x02ளழ\\x05їȬ\\x02ழ')\n buf.write('Ƕ\\x03\\x02\\x02\\x02வஶ\\x05яȨ\\x02ஶஷ')\n buf.write('\\x05ёȩ\\x02ஷஸ\\x05љȭ\\x02ஸஹ')\n buf.write('\\x05йȝ\\x02ஹ\\u0bba\\x05уȢ\\x02\\u0bba\\u0bbb')\n buf.write('\\x05нȟ\\x02\\u0bbb\\u0bbc\\x05эȧ\\x02\\u0bbc\\u0bbd')\n buf.write('\\x05еț\\x02\\u0bbdா\\x05йȝ\\x02ாி')\n buf.write('\\x05уȢ\\x02ிீ\\x05нȟ\\x02ீு')\n buf.write('\\x05йȝ\\x02ுூ\\x05щȥ\\x02ூǸ')\n buf.write('\\x03\\x02\\x02\\x02\\u0bc3\\u0bc4\\x05яȨ\\x02\\u0bc4\\u0bc5\\x05ё')\n buf.write('ȩ\\x02\\u0bc5ெ\\x05ћȮ\\x02ெǺ\\x03\\x02\\x02')\n buf.write('\\x02ேை\\x05яȨ\\x02ை\\u0bc9\\x05ёȩ')\n buf.write('\\x02\\u0bc9ொ\\x05ѡȱ\\x02ொோ\\x05еț')\n buf.write('\\x02ோௌ\\x05хȣ\\x02ௌ்\\x05ћȮ')\n buf.write('\\x02்Ǽ\\x03\\x02\\x02\\x02\\u0bce\\u0bcf\\x05яȨ\\x02\\u0bcf')\n buf.write('ௐ\\x05ѝȯ\\x02ௐ\\u0bd1\\x05ыȦ\\x02\\u0bd1')\n buf.write('\\u0bd2\\x05ыȦ\\x02\\u0bd2Ǿ\\x03\\x02\\x02\\x02\\u0bd3\\u0bd4')\n buf.write('\\x05яȨ\\x02\\u0bd4\\u0bd5\\x05ѝȯ\\x02\\u0bd5\\u0bd6')\n buf.write('\\x05ыȦ\\x02\\u0bd6ௗ\\x05ыȦ\\x02ௗ\\u0bd8')\n buf.write('\\x05љȭ\\x02\\u0bd8Ȁ\\x03\\x02\\x02\\x02\\u0bd9\\u0bda\\x05я')\n buf.write('Ȩ\\x02\\u0bda\\u0bdb\\x05ѝȯ\\x02\\u0bdb\\u0bdc\\x05э')\n buf.write('ȧ\\x02\\u0bdc\\u0bdd\\x05зȜ\\x02\\u0bdd\\u0bde\\x05н')\n buf.write('ȟ\\x02\\u0bde\\u0bdf\\x05їȬ\\x02\\u0bdfȂ\\x03\\x02\\x02')\n buf.write('\\x02\\u0be0\\u0be1\\x05яȨ\\x02\\u0be1\\u0be2\\x05ѝȯ')\n buf.write('\\x02\\u0be2\\u0be3\\x05эȧ\\x02\\u0be3\\u0be4\\x05нȟ')\n buf.write('\\x02\\u0be4\\u0be5\\x05їȬ\\x02\\u0be5௦\\x05хȣ')\n buf.write('\\x02௦௧\\x05йȝ\\x02௧Ȅ\\x03\\x02\\x02\\x02௨')\n buf.write('௩\\x05яȨ\\x02௩௪\\x05џȰ\\x02௪')\n buf.write('௫\\x05еț\\x02௫௬\\x05їȬ\\x02௬')\n buf.write('௭\\x05йȝ\\x02௭௮\\x05уȢ\\x02௮')\n buf.write('௯\\x05еț\\x02௯௰\\x05їȬ\\x02௰')\n buf.write('௱\\x074\\x02\\x02௱Ȇ\\x03\\x02\\x02\\x02௲௳\\x05ё')\n buf.write('ȩ\\x02௳௴\\x05зȜ\\x02௴௵\\x05ч')\n buf.write('Ȥ\\x02௵௶\\x05нȟ\\x02௶௷\\x05й')\n buf.write('ȝ\\x02௷௸\\x05ћȮ\\x02௸Ȉ\\x03\\x02\\x02')\n buf.write('\\x02௹௺\\x05ёȩ\\x02௺\\u0bfb\\x05пȠ')\n buf.write('\\x02\\u0bfbȊ\\x03\\x02\\x02\\x02\\u0bfc\\u0bfd\\x05ёȩ\\x02\\u0bfd')\n buf.write('\\u0bfe\\x05пȠ\\x02\\u0bfe\\u0bff\\x05пȠ\\x02\\u0bff')\n buf.write('Ȍ\\x03\\x02\\x02\\x02ఀఁ\\x05ёȩ\\x02ఁం')\n buf.write('\\x05хȣ\\x02ంః\\x05лȞ\\x02ఃȎ')\n buf.write('\\x03\\x02\\x02\\x02ఄఅ\\x05ёȩ\\x02అఆ\\x05ы')\n buf.write('Ȧ\\x02ఆఇ\\x05лȞ\\x02ఇȐ\\x03\\x02\\x02')\n buf.write('\\x02ఈఉ\\x05ёȩ\\x02ఉఊ\\x05яȨ')\n buf.write('\\x02ఊȒ\\x03\\x02\\x02\\x02ఋఌ\\x05ёȩ\\x02ఌ')\n buf.write('\\u0c0d\\x05яȨ\\x02\\u0c0dఎ\\x05ыȦ\\x02ఎ')\n buf.write('ఏ\\x05ѥȳ\\x02ఏȔ\\x03\\x02\\x02\\x02ఐ\\u0c11')\n buf.write('\\x05ёȩ\\x02\\u0c11ఒ\\x05ѓȪ\\x02ఒఓ')\n buf.write('\\x05нȟ\\x02ఓఔ\\x05яȨ\\x02ఔȖ')\n buf.write('\\x03\\x02\\x02\\x02కఖ\\x05ёȩ\\x02ఖగ\\x05ѓ')\n buf.write('Ȫ\\x02గఘ\\x05ћȮ\\x02ఘఙ\\x05х')\n buf.write('ȣ\\x02ఙచ\\x05ёȩ\\x02చఛ\\x05я')\n buf.write('Ȩ\\x02ఛȘ\\x03\\x02\\x02\\x02జఝ\\x05ёȩ')\n buf.write('\\x02ఝఞ\\x05їȬ\\x02ఞȚ\\x03\\x02\\x02\\x02ట')\n buf.write('ఠ\\x05ёȩ\\x02ఠడ\\x05їȬ\\x02డ')\n buf.write('ఢ\\x05еț\\x02ఢణ\\x05лȞ\\x02ణ')\n buf.write('త\\x05еț\\x02తథ\\x05ћȮ\\x02థ')\n buf.write('ద\\x05еț\\x02దȜ\\x03\\x02\\x02\\x02ధన')\n buf.write('\\x05ёȩ\\x02న\\u0c29\\x05їȬ\\x02\\u0c29ప')\n buf.write('\\x05лȞ\\x02పఫ\\x05нȟ\\x02ఫబ')\n buf.write('\\x05їȬ\\x02బȞ\\x03\\x02\\x02\\x02భమ\\x05ё')\n buf.write('ȩ\\x02మయ\\x05їȬ\\x02యర\\x05л')\n buf.write('Ȟ\\x02రఱ\\x05хȣ\\x02ఱల\\x05я')\n buf.write('Ȩ\\x02లళ\\x05еț\\x02ళఴ\\x05ы')\n buf.write('Ȧ\\x02ఴవ\\x05хȣ\\x02వశ\\x05ћ')\n buf.write('Ȯ\\x02శష\\x05ѥȳ\\x02షȠ\\x03\\x02\\x02')\n buf.write('\\x02సహ\\x05ёȩ\\x02హ\\u0c3a\\x05љȭ')\n buf.write('\\x02\\u0c3a\\u0c3b\\x05нȟ\\x02\\u0c3b఼\\x05їȬ')\n buf.write('\\x02఼ఽ\\x05їȬ\\x02ఽా\\x05ёȩ')\n buf.write('\\x02ాి\\x05їȬ\\x02ిȢ\\x03\\x02\\x02\\x02ీ')\n buf.write('ు\\x05ёȩ\\x02ుూ\\x05ѝȯ\\x02ూ')\n buf.write('ృ\\x05ћȮ\\x02ృȤ\\x03\\x02\\x02\\x02ౄ\\u0c45')\n buf.write('\\x05ёȩ\\x02\\u0c45ె\\x05ѝȯ\\x02ెే')\n buf.write('\\x05ћȮ\\x02ేై\\x05нȟ\\x02ై\\u0c49')\n buf.write('\\x05їȬ\\x02\\u0c49Ȧ\\x03\\x02\\x02\\x02ొో\\x05ё')\n buf.write('ȩ\\x02ోౌ\\x05џȰ\\x02ౌ్\\x05н')\n buf.write('ȟ\\x02్\\u0c4e\\x05їȬ\\x02\\u0c4eȨ\\x03\\x02\\x02')\n buf.write('\\x02\\u0c4f\\u0c50\\x05ёȩ\\x02\\u0c50\\u0c51\\x05џȰ')\n buf.write('\\x02\\u0c51\\u0c52\\x05нȟ\\x02\\u0c52\\u0c53\\x05їȬ')\n buf.write('\\x02\\u0c53\\u0c54\\x05їȬ\\x02\\u0c54ౕ\\x05хȣ')\n buf.write('\\x02ౕౖ\\x05лȞ\\x02ౖ\\u0c57\\x05хȣ')\n buf.write('\\x02\\u0c57ౘ\\x05яȨ\\x02ౘౙ\\x05сȡ')\n buf.write('\\x02ౙȪ\\x03\\x02\\x02\\x02ౚ\\u0c5b\\x05ѓȪ\\x02\\u0c5b')\n buf.write('\\u0c5c\\x05еț\\x02\\u0c5cౝ\\x05йȝ\\x02ౝ')\n buf.write('\\u0c5e\\x05щȥ\\x02\\u0c5e\\u0c5f\\x05еț\\x02\\u0c5f')\n buf.write('ౠ\\x05сȡ\\x02ౠౡ\\x05нȟ\\x02ౡ')\n buf.write('Ȭ\\x03\\x02\\x02\\x02ౢౣ\\x05ѓȪ\\x02ౣ\\u0c64')\n buf.write('\\x05еț\\x02\\u0c64\\u0c65\\x05їȬ\\x02\\u0c65౦')\n buf.write('\\x05еț\\x02౦౧\\x05ыȦ\\x02౧౨')\n buf.write('\\x05ыȦ\\x02౨౩\\x05нȟ\\x02౩౪')\n buf.write('\\x05ыȦ\\x02౪౫\\x07a\\x02\\x02౫౬\\x05н')\n buf.write('ȟ\\x02౬౭\\x05яȨ\\x02౭౮\\x05е')\n buf.write('ț\\x02౮౯\\x05зȜ\\x02౯\\u0c70\\x05ы')\n buf.write('Ȧ\\x02\\u0c70\\u0c71\\x05нȟ\\x02\\u0c71Ȯ\\x03\\x02\\x02')\n buf.write('\\x02\\u0c72\\u0c73\\x05ѓȪ\\x02\\u0c73\\u0c74\\x05еț')\n buf.write('\\x02\\u0c74\\u0c75\\x05їȬ\\x02\\u0c75\\u0c76\\x05еț')\n buf.write('\\x02\\u0c76౷\\x05эȧ\\x02౷౸\\x05нȟ')\n buf.write('\\x02౸౹\\x05ћȮ\\x02౹౺\\x05нȟ')\n buf.write('\\x02౺౻\\x05їȬ\\x02౻౼\\x05љȭ')\n buf.write('\\x02౼Ȱ\\x03\\x02\\x02\\x02౽౾\\x05ѓȪ\\x02౾')\n buf.write('౿\\x05еț\\x02౿ಀ\\x05їȬ\\x02ಀ')\n buf.write('ಁ\\x05нȟ\\x02ಁಂ\\x05яȨ\\x02ಂ')\n buf.write('ಃ\\x05ћȮ\\x02ಃȲ\\x03\\x02\\x02\\x02಄ಅ')\n buf.write('\\x05ѓȪ\\x02ಅಆ\\x05еț\\x02ಆಇ')\n buf.write('\\x05їȬ\\x02ಇಈ\\x05ћȮ\\x02ಈಉ')\n buf.write('\\x05хȣ\\x02ಉಊ\\x05ћȮ\\x02ಊಋ')\n buf.write('\\x05хȣ\\x02ಋಌ\\x05ёȩ\\x02ಌ\\u0c8d')\n buf.write('\\x05яȨ\\x02\\u0c8dȴ\\x03\\x02\\x02\\x02ಎಏ\\x05ѓ')\n buf.write('Ȫ\\x02ಏಐ\\x05еț\\x02ಐ\\u0c91\\x05љ')\n buf.write('ȭ\\x02\\u0c91ಒ\\x05љȭ\\x02ಒಓ\\x05х')\n buf.write('ȣ\\x02ಓಔ\\x05яȨ\\x02ಔಕ\\x05с')\n buf.write('ȡ\\x02ಕȶ\\x03\\x02\\x02\\x02ಖಗ\\x05ѓȪ')\n buf.write('\\x02ಗಘ\\x05еț\\x02ಘಙ\\x05ћȮ')\n buf.write('\\x02ಙಚ\\x05уȢ\\x02ಚȸ\\x03\\x02\\x02\\x02ಛ')\n buf.write(\"ಜ\\x07'\\x02\\x02ಜಝ\\x05їȬ\\x02ಝಞ\")\n buf.write('\\x05ёȩ\\x02ಞಟ\\x05ѡȱ\\x02ಟಠ')\n buf.write('\\x05ћȮ\\x02ಠಡ\\x05ѥȳ\\x02ಡಢ')\n buf.write('\\x05ѓȪ\\x02ಢಣ\\x05нȟ\\x02ಣȺ')\n buf.write(\"\\x03\\x02\\x02\\x02ತಥ\\x07'\\x02\\x02ಥದ\\x05ћȮ\")\n buf.write('\\x02ದಧ\\x05ѥȳ\\x02ಧನ\\x05ѓȪ')\n buf.write('\\x02ನ\\u0ca9\\x05нȟ\\x02\\u0ca9ȼ\\x03\\x02\\x02\\x02ಪ')\n buf.write('ಫ\\x05ѓȪ\\x02ಫಬ\\x05хȣ\\x02ಬ')\n buf.write('ಭ\\x05ѓȪ\\x02ಭಮ\\x05нȟ\\x02ಮ')\n buf.write('ಯ\\x05ыȦ\\x02ಯರ\\x05хȣ\\x02ರ')\n buf.write('ಱ\\x05яȨ\\x02ಱಲ\\x05нȟ\\x02ಲ')\n buf.write('ಳ\\x05лȞ\\x02ಳȾ\\x03\\x02\\x02\\x02\\u0cb4ವ')\n buf.write('\\x05ѓȪ\\x02ವಶ\\x05хȣ\\x02ಶಷ')\n buf.write('\\x05џȰ\\x02ಷಸ\\x05ёȩ\\x02ಸಹ')\n buf.write('\\x05ћȮ\\x02ಹɀ\\x03\\x02\\x02\\x02\\u0cba\\u0cbb\\x05ѓ')\n buf.write('Ȫ\\x02\\u0cbb಼\\x05ыȦ\\x02಼ಽ\\x05е')\n buf.write('ț\\x02ಽಾ\\x05яȨ\\x02ಾɂ\\x03\\x02\\x02')\n buf.write('\\x02ಿೀ\\x05ѓȪ\\x02ೀು\\x05ыȦ')\n buf.write('\\x02ುೂ\\x05љȭ\\x02ೂೃ\\x07a\\x02\\x02ೃ')\n buf.write('ೄ\\x05хȣ\\x02ೄ\\u0cc5\\x05яȨ\\x02\\u0cc5')\n buf.write('ೆ\\x05ћȮ\\x02ೆೇ\\x05нȟ\\x02ೇ')\n buf.write('ೈ\\x05сȡ\\x02ೈ\\u0cc9\\x05нȟ\\x02\\u0cc9')\n buf.write('ೊ\\x05їȬ\\x02ೊɄ\\x03\\x02\\x02\\x02ೋೌ')\n buf.write('\\x05ѓȪ\\x02ೌ್\\x05ёȩ\\x02್\\u0cce')\n buf.write('\\x05љȭ\\x02\\u0cce\\u0ccf\\x05хȣ\\x02\\u0ccf\\u0cd0')\n buf.write('\\x05ћȮ\\x02\\u0cd0\\u0cd1\\x05хȣ\\x02\\u0cd1\\u0cd2')\n buf.write('\\x05џȰ\\x02\\u0cd2\\u0cd3\\x05нȟ\\x02\\u0cd3Ɇ')\n buf.write('\\x03\\x02\\x02\\x02\\u0cd4ೕ\\x05ѓȪ\\x02ೕೖ\\x05ё')\n buf.write('ȩ\\x02ೖ\\u0cd7\\x05љȭ\\x02\\u0cd7\\u0cd8\\x05х')\n buf.write('ȣ\\x02\\u0cd8\\u0cd9\\x05ћȮ\\x02\\u0cd9\\u0cda\\x05х')\n buf.write('ȣ\\x02\\u0cda\\u0cdb\\x05џȰ\\x02\\u0cdb\\u0cdc\\x05н')\n buf.write('ȟ\\x02\\u0cdcೝ\\x05яȨ\\x02ೝɈ\\x03\\x02\\x02')\n buf.write('\\x02ೞ\\u0cdf\\x05ѓȪ\\x02\\u0cdfೠ\\x05їȬ')\n buf.write('\\x02ೠೡ\\x05еț\\x02ೡೢ\\x05сȡ')\n buf.write('\\x02ೢೣ\\x05эȧ\\x02ೣ\\u0ce4\\x05еț')\n buf.write('\\x02\\u0ce4Ɋ\\x03\\x02\\x02\\x02\\u0ce5೦\\x05ѓȪ\\x02೦')\n buf.write('೧\\x05їȬ\\x02೧೨\\x05нȟ\\x02೨')\n buf.write('೩\\x05йȝ\\x02೩೪\\x05нȟ\\x02೪')\n buf.write('೫\\x05лȞ\\x02೫೬\\x05хȣ\\x02೬')\n buf.write('೭\\x05яȨ\\x02೭೮\\x05сȡ\\x02೮')\n buf.write('Ɍ\\x03\\x02\\x02\\x02೯\\u0cf0\\x05ѓȪ\\x02\\u0cf0ೱ')\n buf.write('\\x05їȬ\\x02ೱೲ\\x05нȟ\\x02ೲ\\u0cf3')\n buf.write('\\x05йȝ\\x02\\u0cf3\\u0cf4\\x05хȣ\\x02\\u0cf4\\u0cf5')\n buf.write('\\x05љȭ\\x02\\u0cf5\\u0cf6\\x05хȣ\\x02\\u0cf6\\u0cf7')\n buf.write('\\x05ёȩ\\x02\\u0cf7\\u0cf8\\x05яȨ\\x02\\u0cf8Ɏ')\n buf.write('\\x03\\x02\\x02\\x02\\u0cf9\\u0cfa\\x05ѓȪ\\x02\\u0cfa\\u0cfb\\x05ї')\n buf.write('Ȭ\\x02\\u0cfb\\u0cfc\\x05нȟ\\x02\\u0cfc\\u0cfd\\x05љ')\n buf.write('ȭ\\x02\\u0cfd\\u0cfe\\x05нȟ\\x02\\u0cfe\\u0cff\\x05я')\n buf.write('Ȩ\\x02\\u0cffഀ\\x05ћȮ\\x02ഀɐ\\x03\\x02\\x02')\n buf.write('\\x02ഁം\\x05ѓȪ\\x02ംഃ\\x05їȬ')\n buf.write('\\x02ഃഄ\\x05хȣ\\x02ഄഅ\\x05ёȩ')\n buf.write('\\x02അആ\\x05їȬ\\x02ആɒ\\x03\\x02\\x02\\x02ഇ')\n buf.write('ഈ\\x05ѓȪ\\x02ഈഉ\\x05їȬ\\x02ഉ')\n buf.write('ഊ\\x05ёȩ\\x02ഊഋ\\x05йȝ\\x02ഋ')\n buf.write('ഌ\\x05нȟ\\x02ഌ\\u0d0d\\x05лȞ\\x02\\u0d0d')\n buf.write('എ\\x05ѝȯ\\x02എഏ\\x05їȬ\\x02ഏ')\n buf.write('ഐ\\x05нȟ\\x02ഐɔ\\x03\\x02\\x02\\x02\\u0d11ഒ')\n buf.write('\\x05їȬ\\x02ഒഓ\\x05еț\\x02ഓഔ')\n buf.write('\\x05хȣ\\x02ഔക\\x05љȭ\\x02കഖ')\n buf.write('\\x05нȟ\\x02ഖɖ\\x03\\x02\\x02\\x02ഗഘ\\x05ї')\n buf.write('Ȭ\\x02ഘങ\\x05еț\\x02ങച\\x05я')\n buf.write('Ȩ\\x02ചഛ\\x05сȡ\\x02ഛജ\\x05н')\n buf.write('ȟ\\x02ജɘ\\x03\\x02\\x02\\x02ഝഞ\\x05їȬ')\n buf.write('\\x02ഞട\\x05еț\\x02ടഠ\\x05ѡȱ')\n buf.write('\\x02ഠɚ\\x03\\x02\\x02\\x02ഡഢ\\x05їȬ\\x02ഢ')\n buf.write('ണ\\x05нȟ\\x02ണത\\x05еț\\x02ത')\n buf.write('ഥ\\x05лȞ\\x02ഥɜ\\x03\\x02\\x02\\x02ദധ')\n buf.write('\\x05їȬ\\x02ധന\\x05нȟ\\x02നഩ')\n buf.write('\\x05еț\\x02ഩപ\\x05ыȦ\\x02പɞ')\n buf.write('\\x03\\x02\\x02\\x02ഫബ\\x05їȬ\\x02ബഭ\\x05н')\n buf.write('ȟ\\x02ഭമ\\x05йȝ\\x02മയ\\x05ё')\n buf.write('ȩ\\x02യര\\x05їȬ\\x02രറ\\x05л')\n buf.write('Ȟ\\x02റɠ\\x03\\x02\\x02\\x02ലള\\x05їȬ')\n buf.write('\\x02ളഴ\\x05нȟ\\x02ഴവ\\x05пȠ')\n buf.write('\\x02വɢ\\x03\\x02\\x02\\x02ശഷ\\x05їȬ\\x02ഷ')\n buf.write('സ\\x05нȟ\\x02സഹ\\x05пȠ\\x02ഹ')\n buf.write('ഺ\\x05нȟ\\x02ഺ഻\\x05їȬ\\x02഻')\n buf.write('഼\\x05нȟ\\x02഼ഽ\\x05яȨ\\x02ഽ')\n buf.write('ാ\\x05йȝ\\x02ാി\\x05нȟ\\x02ി')\n buf.write('ɤ\\x03\\x02\\x02\\x02ീു\\x05їȬ\\x02ുൂ')\n buf.write('\\x05нȟ\\x02ൂൃ\\x05пȠ\\x02ൃൄ')\n buf.write('\\x05нȟ\\x02ൄ\\u0d45\\x05їȬ\\x02\\u0d45െ')\n buf.write('\\x05нȟ\\x02െേ\\x05яȨ\\x02േൈ')\n buf.write('\\x05йȝ\\x02ൈ\\u0d49\\x05хȣ\\x02\\u0d49ൊ')\n buf.write('\\x05яȨ\\x02ൊോ\\x05сȡ\\x02ോɦ')\n buf.write('\\x03\\x02\\x02\\x02ൌ്\\x05їȬ\\x02്ൎ\\x05н')\n buf.write('ȟ\\x02ൎ൏\\x05чȤ\\x02൏\\u0d50\\x05н')\n buf.write('ȟ\\x02\\u0d50\\u0d51\\x05йȝ\\x02\\u0d51\\u0d52\\x05ћ')\n buf.write('Ȯ\\x02\\u0d52ɨ\\x03\\x02\\x02\\x02\\u0d53ൔ\\x05їȬ')\n buf.write('\\x02ൔൕ\\x05нȟ\\x02ൕൖ\\x05ыȦ')\n buf.write('\\x02ൖൗ\\x05хȣ\\x02ൗ൘\\x05нȟ')\n buf.write('\\x02൘൙\\x05љȭ\\x02൙൚\\x07a\\x02\\x02൚')\n buf.write('൛\\x05ёȩ\\x02൛൜\\x05яȨ\\x02൜')\n buf.write('ɪ\\x03\\x02\\x02\\x02൝൞\\x05їȬ\\x02൞ൟ')\n buf.write('\\x05нȟ\\x02ൟൠ\\x05яȨ\\x02ൠൡ')\n buf.write('\\x05еț\\x02ൡൢ\\x05эȧ\\x02ൢൣ')\n buf.write('\\x05нȟ\\x02ൣɬ\\x03\\x02\\x02\\x02\\u0d64\\u0d65\\x05ї')\n buf.write('Ȭ\\x02\\u0d65൦\\x05нȟ\\x02൦൧\\x05ѓ')\n buf.write('Ȫ\\x02൧൨\\x05ыȦ\\x02൨൩\\x05е')\n buf.write('ț\\x02൩൪\\x05йȝ\\x02൪൫\\x05н')\n buf.write('ȟ\\x02൫ɮ\\x03\\x02\\x02\\x02൬൭\\x05їȬ')\n buf.write('\\x02൭൮\\x05нȟ\\x02൮൯\\x05љȭ')\n buf.write('\\x02൯൰\\x05ѓȪ\\x02൰൱\\x05нȟ')\n buf.write('\\x02൱൲\\x05йȝ\\x02൲൳\\x05ћȮ')\n buf.write('\\x02൳ɰ\\x03\\x02\\x02\\x02൴൵\\x05їȬ\\x02൵')\n buf.write('൶\\x05нȟ\\x02൶൷\\x05љȭ\\x02൷')\n buf.write('൸\\x05ћȮ\\x02൸൹\\x05їȬ\\x02൹')\n buf.write('ൺ\\x05хȣ\\x02ൺൻ\\x05йȝ\\x02ൻ')\n buf.write('ർ\\x05ћȮ\\x02ർൽ\\x07a\\x02\\x02ൽൾ')\n buf.write('\\x05їȬ\\x02ൾൿ\\x05нȟ\\x02ൿ\\u0d80')\n buf.write('\\x05пȠ\\x02\\u0d80ඁ\\x05нȟ\\x02ඁං')\n buf.write('\\x05їȬ\\x02ංඃ\\x05нȟ\\x02ඃ\\u0d84')\n buf.write('\\x05яȨ\\x02\\u0d84අ\\x05йȝ\\x02අආ')\n buf.write('\\x05нȟ\\x02ආඇ\\x05љȭ\\x02ඇɲ')\n buf.write('\\x03\\x02\\x02\\x02ඈඉ\\x05їȬ\\x02ඉඊ\\x05н')\n buf.write('ȟ\\x02ඊඋ\\x05љȭ\\x02උඌ\\x05ѝ')\n buf.write('ȯ\\x02ඌඍ\\x05ыȦ\\x02ඍඎ\\x05ћ')\n buf.write('Ȯ\\x02ඎɴ\\x03\\x02\\x02\\x02ඏඐ\\x05їȬ')\n buf.write('\\x02ඐඑ\\x05нȟ\\x02එඒ\\x05љȭ')\n buf.write('\\x02ඒඓ\\x05ѝȯ\\x02ඓඔ\\x05ыȦ')\n buf.write('\\x02ඔඕ\\x05ћȮ\\x02ඕඖ\\x07a\\x02\\x02ඖ')\n buf.write('\\u0d97\\x05йȝ\\x02\\u0d97\\u0d98\\x05еț\\x02\\u0d98')\n buf.write('\\u0d99\\x05йȝ\\x02\\u0d99ක\\x05уȢ\\x02ක')\n buf.write('ඛ\\x05нȟ\\x02ඛɶ\\x03\\x02\\x02\\x02ගඝ')\n buf.write('\\x05їȬ\\x02ඝඞ\\x05нȟ\\x02ඞඟ')\n buf.write('\\x05ћȮ\\x02ඟච\\x05ѝȯ\\x02චඡ')\n buf.write('\\x05їȬ\\x02ඡජ\\x05яȨ\\x02ජɸ')\n buf.write('\\x03\\x02\\x02\\x02ඣඤ\\x05їȬ\\x02ඤඥ\\x05н')\n buf.write('ȟ\\x02ඥඦ\\x05ћȮ\\x02ඦට\\x05ѝ')\n buf.write('ȯ\\x02ටඨ\\x05їȬ\\x02ඨඩ\\x05я')\n buf.write('Ȩ\\x02ඩඪ\\x05хȣ\\x02ඪණ\\x05я')\n buf.write('Ȩ\\x02ණඬ\\x05сȡ\\x02ඬɺ\\x03\\x02\\x02')\n buf.write('\\x02තථ\\x05їȬ\\x02ථද\\x05нȟ')\n buf.write('\\x02දධ\\x05ѝȯ\\x02ධන\\x05љȭ')\n buf.write('\\x02න\\u0db2\\x05нȟ\\x02\\u0db2ɼ\\x03\\x02\\x02\\x02ඳ')\n buf.write('ප\\x05їȬ\\x02පඵ\\x05нȟ\\x02ඵ')\n buf.write('බ\\x05џȰ\\x02බභ\\x05нȟ\\x02භ')\n buf.write('ම\\x05їȬ\\x02මඹ\\x05љȭ\\x02ඹ')\n buf.write('ය\\x05нȟ\\x02යɾ\\x03\\x02\\x02\\x02ර\\u0dbc')\n buf.write('\\x05їȬ\\x02\\u0dbcල\\x05нȟ\\x02ල\\u0dbe')\n buf.write('\\x05џȰ\\x02\\u0dbe\\u0dbf\\x05ёȩ\\x02\\u0dbfව')\n buf.write('\\x05щȥ\\x02වශ\\x05нȟ\\x02ශʀ')\n buf.write('\\x03\\x02\\x02\\x02ෂස\\x05їȬ\\x02සහ\\x05х')\n buf.write('ȣ\\x02හළ\\x05сȡ\\x02ළෆ\\x05у')\n buf.write('Ȣ\\x02ෆ\\u0dc7\\x05ћȮ\\x02\\u0dc7ʂ\\x03\\x02\\x02')\n buf.write('\\x02\\u0dc8\\u0dc9\\x05їȬ\\x02\\u0dc9්\\x05ёȩ')\n buf.write('\\x02්\\u0dcb\\x05ыȦ\\x02\\u0dcb\\u0dcc\\x05ыȦ')\n buf.write('\\x02\\u0dcc\\u0dcd\\x05зȜ\\x02\\u0dcd\\u0dce\\x05еț')\n buf.write('\\x02\\u0dceා\\x05йȝ\\x02ාැ\\x05щȥ')\n buf.write('\\x02ැʄ\\x03\\x02\\x02\\x02ෑි\\x05їȬ\\x02ි')\n buf.write('ී\\x05ёȩ\\x02ීු\\x05ыȦ\\x02ු')\n buf.write('\\u0dd5\\x05ыȦ\\x02\\u0dd5ූ\\x05ѝȯ\\x02ූ')\n buf.write('\\u0dd7\\x05ѓȪ\\x02\\u0dd7ʆ\\x03\\x02\\x02\\x02ෘෙ')\n buf.write('\\x05їȬ\\x02ෙේ\\x05ёȩ\\x02ේෛ')\n buf.write('\\x05ѡȱ\\x02ෛʈ\\x03\\x02\\x02\\x02ොෝ\\x05ї')\n buf.write('Ȭ\\x02ෝෞ\\x05ёȩ\\x02ෞෟ\\x05ѡ')\n buf.write('ȱ\\x02ෟ\\u0de0\\x05хȣ\\x02\\u0de0\\u0de1\\x05л')\n buf.write('Ȟ\\x02\\u0de1ʊ\\x03\\x02\\x02\\x02\\u0de2\\u0de3\\x05їȬ')\n buf.write('\\x02\\u0de3\\u0de4\\x05ёȩ\\x02\\u0de4\\u0de5\\x05ѡȱ')\n buf.write('\\x02\\u0de5෦\\x05љȭ\\x02෦ʌ\\x03\\x02\\x02\\x02෧')\n buf.write('෨\\x05їȬ\\x02෨෩\\x05ѝȯ\\x02෩')\n buf.write('෪\\x05ыȦ\\x02෪෫\\x05нȟ\\x02෫')\n buf.write('෬\\x05љȭ\\x02෬ʎ\\x03\\x02\\x02\\x02෭෮')\n buf.write('\\x05љȭ\\x02෮෯\\x05еț\\x02෯\\u0df0')\n buf.write('\\x05эȧ\\x02\\u0df0\\u0df1\\x05ѓȪ\\x02\\u0df1ෲ')\n buf.write('\\x05ыȦ\\x02ෲෳ\\x05нȟ\\x02ෳʐ')\n buf.write('\\x03\\x02\\x02\\x02෴\\u0df5\\x05љȭ\\x02\\u0df5\\u0df6\\x05е')\n buf.write('ț\\x02\\u0df6\\u0df7\\x05џȰ\\x02\\u0df7\\u0df8\\x05н')\n buf.write('ȟ\\x02\\u0df8ʒ\\x03\\x02\\x02\\x02\\u0df9\\u0dfa\\x05љȭ')\n buf.write('\\x02\\u0dfa\\u0dfb\\x05еț\\x02\\u0dfb\\u0dfc\\x05џȰ')\n buf.write('\\x02\\u0dfc\\u0dfd\\x05нȟ\\x02\\u0dfd\\u0dfe\\x05ѓȪ')\n buf.write('\\x02\\u0dfe\\u0dff\\x05ёȩ\\x02\\u0dff\\u0e00\\x05хȣ')\n buf.write('\\x02\\u0e00ก\\x05яȨ\\x02กข\\x05ћȮ')\n buf.write('\\x02ขʔ\\x03\\x02\\x02\\x02ฃค\\x05љȭ\\x02ค')\n buf.write('ฅ\\x05йȝ\\x02ฅฆ\\x05уȢ\\x02ฆ')\n buf.write('ง\\x05нȟ\\x02งจ\\x05эȧ\\x02จ')\n buf.write('ฉ\\x05еț\\x02ฉʖ\\x03\\x02\\x02\\x02ชซ')\n buf.write('\\x05љȭ\\x02ซฌ\\x05йȝ\\x02ฌญ')\n buf.write('\\x05уȢ\\x02ญฎ\\x05нȟ\\x02ฎฏ')\n buf.write('\\x05эȧ\\x02ฏฐ\\x05еț\\x02ฐฑ')\n buf.write('\\x05йȝ\\x02ฑฒ\\x05уȢ\\x02ฒณ')\n buf.write('\\x05нȟ\\x02ณด\\x05йȝ\\x02ดต')\n buf.write('\\x05щȥ\\x02ตʘ\\x03\\x02\\x02\\x02ถท\\x05љ')\n buf.write('ȭ\\x02ทธ\\x05йȝ\\x02ธน\\x05я')\n buf.write('Ȩ\\x02นʚ\\x03\\x02\\x02\\x02บป\\x05љȭ')\n buf.write('\\x02ปผ\\x05нȟ\\x02ผฝ\\x05еț')\n buf.write('\\x02ฝพ\\x05їȬ\\x02พฟ\\x05йȝ')\n buf.write('\\x02ฟภ\\x05уȢ\\x02ภʜ\\x03\\x02\\x02\\x02ม')\n buf.write('ย\\x05љȭ\\x02ยร\\x05нȟ\\x02ร')\n buf.write('ฤ\\x05йȝ\\x02ฤล\\x05ёȩ\\x02ล')\n buf.write('ฦ\\x05яȨ\\x02ฦว\\x05лȞ\\x02ว')\n buf.write('ʞ\\x03\\x02\\x02\\x02ศษ\\x05љȭ\\x02ษส')\n buf.write('\\x05нȟ\\x02สห\\x05нȟ\\x02หฬ')\n buf.write('\\x05лȞ\\x02ฬʠ\\x03\\x02\\x02\\x02อฮ\\x05љ')\n buf.write('ȭ\\x02ฮฯ\\x05нȟ\\x02ฯะ\\x05с')\n buf.write('ȡ\\x02ะั\\x05эȧ\\x02ัา\\x05н')\n buf.write('ȟ\\x02าำ\\x05яȨ\\x02ำิ\\x05ћ')\n buf.write('Ȯ\\x02ิʢ\\x03\\x02\\x02\\x02ีึ\\x05љȭ')\n buf.write('\\x02ึื\\x05нȟ\\x02ืุ\\x05ыȦ')\n buf.write('\\x02ุู\\x05нȟ\\x02ฺู\\x05йȝ')\n buf.write('\\x02ฺ\\u0e3b\\x05ћȮ\\x02\\u0e3bʤ\\x03\\x02\\x02\\x02\\u0e3c')\n buf.write('\\u0e3d\\x05љȭ\\x02\\u0e3d\\u0e3e\\x05нȟ\\x02\\u0e3e')\n buf.write('฿\\x05ыȦ\\x02฿เ\\x05пȠ\\x02เ')\n buf.write('ʦ\\x03\\x02\\x02\\x02แโ\\x05љȭ\\x02โใ')\n buf.write('\\x05нȟ\\x02ใไ\\x05ѕȫ\\x02ไๅ')\n buf.write('\\x05ѝȯ\\x02ๅๆ\\x05нȟ\\x02ๆ็')\n buf.write('\\x05яȨ\\x02็่\\x05йȝ\\x02่้')\n buf.write('\\x05нȟ\\x02้ʨ\\x03\\x02\\x02\\x02๊๋\\x05љ')\n buf.write('ȭ\\x02๋์\\x05нȟ\\x02์ํ\\x05ѕ')\n buf.write('ȫ\\x02ํ๎\\x05ѝȯ\\x02๎๏\\x05н')\n buf.write('ȟ\\x02๏๐\\x05яȨ\\x02๐๑\\x05ћ')\n buf.write('Ȯ\\x02๑๒\\x05хȣ\\x02๒๓\\x05е')\n buf.write('ț\\x02๓๔\\x05ыȦ\\x02๔ʪ\\x03\\x02\\x02')\n buf.write('\\x02๕๖\\x05љȭ\\x02๖๗\\x05нȟ')\n buf.write('\\x02๗๘\\x05їȬ\\x02๘๙\\x05хȣ')\n buf.write('\\x02๙๚\\x05еț\\x02๚๛\\x05ыȦ')\n buf.write('\\x02๛\\u0e5c\\x05хȣ\\x02\\u0e5c\\u0e5d\\x05ѧȴ')\n buf.write('\\x02\\u0e5d\\u0e5e\\x05еț\\x02\\u0e5e\\u0e5f\\x05зȜ')\n buf.write('\\x02\\u0e5f\\u0e60\\x05ыȦ\\x02\\u0e60\\u0e61\\x05нȟ')\n buf.write('\\x02\\u0e61ʬ\\x03\\x02\\x02\\x02\\u0e62\\u0e63\\x05љȭ\\x02\\u0e63')\n buf.write('\\u0e64\\x05нȟ\\x02\\u0e64\\u0e65\\x05їȬ\\x02\\u0e65')\n buf.write('\\u0e66\\x05хȣ\\x02\\u0e66\\u0e67\\x05еț\\x02\\u0e67')\n buf.write('\\u0e68\\x05ыȦ\\x02\\u0e68\\u0e69\\x05ыȦ\\x02\\u0e69')\n buf.write('\\u0e6a\\x05ѥȳ\\x02\\u0e6a\\u0e6b\\x07a\\x02\\x02\\u0e6b\\u0e6c')\n buf.write('\\x05їȬ\\x02\\u0e6c\\u0e6d\\x05нȟ\\x02\\u0e6d\\u0e6e')\n buf.write('\\x05ѝȯ\\x02\\u0e6e\\u0e6f\\x05љȭ\\x02\\u0e6f\\u0e70')\n buf.write('\\x05еț\\x02\\u0e70\\u0e71\\x05зȜ\\x02\\u0e71\\u0e72')\n buf.write('\\x05ыȦ\\x02\\u0e72\\u0e73\\x05нȟ\\x02\\u0e73ʮ')\n buf.write('\\x03\\x02\\x02\\x02\\u0e74\\u0e75\\x05љȭ\\x02\\u0e75\\u0e76\\x05н')\n buf.write('ȟ\\x02\\u0e76\\u0e77\\x05їȬ\\x02\\u0e77\\u0e78\\x05џ')\n buf.write('Ȱ\\x02\\u0e78\\u0e79\\x05нȟ\\x02\\u0e79\\u0e7a\\x05ї')\n buf.write('Ȭ\\x02\\u0e7a\\u0e7b\\x05нȟ\\x02\\u0e7b\\u0e7c\\x05ї')\n buf.write('Ȭ\\x02\\u0e7c\\u0e7d\\x05їȬ\\x02\\u0e7d\\u0e7e\\x05ё')\n buf.write('ȩ\\x02\\u0e7e\\u0e7f\\x05їȬ\\x02\\u0e7fʰ\\x03\\x02\\x02')\n buf.write('\\x02\\u0e80ກ\\x05љȭ\\x02ກຂ\\x05нȟ')\n buf.write('\\x02ຂ\\u0e83\\x05љȭ\\x02\\u0e83ຄ\\x05љȭ')\n buf.write('\\x02ຄ\\u0e85\\x05хȣ\\x02\\u0e85ຆ\\x05ёȩ')\n buf.write('\\x02ຆງ\\x05яȨ\\x02ງຈ\\x05ћȮ')\n buf.write('\\x02ຈຉ\\x05хȣ\\x02ຉຊ\\x05эȧ')\n buf.write('\\x02ຊ\\u0e8b\\x05нȟ\\x02\\u0e8bຌ\\x05ѧȴ')\n buf.write('\\x02ຌຍ\\x05ёȩ\\x02ຍຎ\\x05яȨ')\n buf.write('\\x02ຎຏ\\x05нȟ\\x02ຏʲ\\x03\\x02\\x02\\x02ຐ')\n buf.write('ຑ\\x05љȭ\\x02ຑຒ\\x05нȟ\\x02ຒ')\n buf.write('ຓ\\x05ћȮ\\x02ຓʴ\\x03\\x02\\x02\\x02ດຕ')\n buf.write('\\x05љȭ\\x02ຕຖ\\x05нȟ\\x02ຖທ')\n buf.write('\\x05ћȮ\\x02ທຘ\\x05љȭ\\x02ຘʶ')\n buf.write('\\x03\\x02\\x02\\x02ນບ\\x05љȭ\\x02ບປ\\x05н')\n buf.write('ȟ\\x02ປຜ\\x05ћȮ\\x02ຜຝ\\x05ћ')\n buf.write('Ȯ\\x02ຝພ\\x05хȣ\\x02ພຟ\\x05я')\n buf.write('Ȩ\\x02ຟຠ\\x05сȡ\\x02ຠມ\\x05љ')\n buf.write('ȭ\\x02ມʸ\\x03\\x02\\x02\\x02ຢຣ\\x05љȭ')\n buf.write('\\x02ຣ\\u0ea4\\x05уȢ\\x02\\u0ea4ລ\\x05еț')\n buf.write('\\x02ລ\\u0ea6\\x05їȬ\\x02\\u0ea6ວ\\x05нȟ')\n buf.write('\\x02ວʺ\\x03\\x02\\x02\\x02ຨຩ\\x05љȭ\\x02ຩ')\n buf.write('ສ\\x05уȢ\\x02ສຫ\\x05ёȩ\\x02ຫ')\n buf.write('ຬ\\x05ѡȱ\\x02ຬʼ\\x03\\x02\\x02\\x02ອຮ')\n buf.write('\\x05љȭ\\x02ຮຯ\\x05уȢ\\x02ຯະ')\n buf.write('\\x05ѝȯ\\x02ະັ\\x05ћȮ\\x02ັາ')\n buf.write('\\x05лȞ\\x02າຳ\\x05ёȩ\\x02ຳິ')\n buf.write('\\x05ѡȱ\\x02ິີ\\x05яȨ\\x02ີʾ')\n buf.write('\\x03\\x02\\x02\\x02ຶື\\x05љȭ\\x02ືຸ\\x05х')\n buf.write('ȣ\\x02ຸູ\\x05зȜ\\x02຺ູ\\x05ы')\n buf.write('Ȧ\\x02຺ົ\\x05хȣ\\x02ົຼ\\x05я')\n buf.write('Ȩ\\x02ຼຽ\\x05сȡ\\x02ຽ\\u0ebe\\x05љ')\n buf.write('ȭ\\x02\\u0ebeˀ\\x03\\x02\\x02\\x02\\u0ebfເ\\x05љȭ')\n buf.write('\\x02ເແ\\x05хȣ\\x02ແໂ\\x05сȡ')\n buf.write('\\x02ໂໃ\\x05яȨ\\x02ໃໄ\\x05ћȮ')\n buf.write('\\x02ໄ\\u0ec5\\x05ѥȳ\\x02\\u0ec5ໆ\\x05ѓȪ')\n buf.write('\\x02ໆ\\u0ec7\\x05нȟ\\x02\\u0ec7˂\\x03\\x02\\x02\\x02່')\n buf.write('້\\x05љȭ\\x02້໊\\x05хȣ\\x02໊')\n buf.write('໋\\x05эȧ\\x02໋໌\\x05ѓȪ\\x02໌')\n buf.write('ໍ\\x05ыȦ\\x02ໍ\\u0ece\\x05нȟ\\x02\\u0ece')\n buf.write('\\u0ecf\\x07a\\x02\\x02\\u0ecf໐\\x05хȣ\\x02໐໑')\n buf.write('\\x05яȨ\\x02໑໒\\x05ћȮ\\x02໒໓')\n buf.write('\\x05нȟ\\x02໓໔\\x05сȡ\\x02໔໕')\n buf.write('\\x05нȟ\\x02໕໖\\x05їȬ\\x02໖˄')\n buf.write('\\x03\\x02\\x02\\x02໗໘\\x05љȭ\\x02໘໙\\x05х')\n buf.write('ȣ\\x02໙\\u0eda\\x05яȨ\\x02\\u0eda\\u0edb\\x05с')\n buf.write('ȡ\\x02\\u0edbໜ\\x05ыȦ\\x02ໜໝ\\x05н')\n buf.write('ȟ\\x02ໝˆ\\x03\\x02\\x02\\x02ໞໟ\\x05љȭ')\n buf.write('\\x02ໟ\\u0ee0\\x05хȣ\\x02\\u0ee0\\u0ee1\\x05ѧȴ')\n buf.write('\\x02\\u0ee1\\u0ee2\\x05нȟ\\x02\\u0ee2ˈ\\x03\\x02\\x02\\x02\\u0ee3')\n buf.write('\\u0ee4\\x05љȭ\\x02\\u0ee4\\u0ee5\\x05щȥ\\x02\\u0ee5')\n buf.write('\\u0ee6\\x05хȣ\\x02\\u0ee6\\u0ee7\\x05ѓȪ\\x02\\u0ee7')\n buf.write('ˊ\\x03\\x02\\x02\\x02\\u0ee8\\u0ee9\\x05љȭ\\x02\\u0ee9\\u0eea')\n buf.write('\\x05эȧ\\x02\\u0eea\\u0eeb\\x05еț\\x02\\u0eeb\\u0eec')\n buf.write('\\x05ыȦ\\x02\\u0eec\\u0eed\\x05ыȦ\\x02\\u0eed\\u0eee')\n buf.write('\\x05хȣ\\x02\\u0eee\\u0eef\\x05яȨ\\x02\\u0eef\\u0ef0')\n buf.write('\\x05ћȮ\\x02\\u0ef0ˌ\\x03\\x02\\x02\\x02\\u0ef1\\u0ef2\\x05љ')\n buf.write('ȭ\\x02\\u0ef2\\u0ef3\\x05яȨ\\x02\\u0ef3\\u0ef4\\x05е')\n buf.write('ț\\x02\\u0ef4\\u0ef5\\x05ѓȪ\\x02\\u0ef5\\u0ef6\\x05љ')\n buf.write('ȭ\\x02\\u0ef6\\u0ef7\\x05уȢ\\x02\\u0ef7\\u0ef8\\x05ё')\n buf.write('ȩ\\x02\\u0ef8\\u0ef9\\x05ћȮ\\x02\\u0ef9ˎ\\x03\\x02\\x02')\n buf.write('\\x02\\u0efa\\u0efb\\x05љȭ\\x02\\u0efb\\u0efc\\x05ёȩ')\n buf.write('\\x02\\u0efc\\u0efd\\x05эȧ\\x02\\u0efd\\u0efe\\x05нȟ')\n buf.write('\\x02\\u0efeː\\x03\\x02\\x02\\x02\\u0effༀ\\x05љȭ\\x02ༀ')\n buf.write('༁\\x05ѓȪ\\x02༁༂\\x05нȟ\\x02༂')\n buf.write('༃\\x05йȝ\\x02༃༄\\x05хȣ\\x02༄')\n buf.write('༅\\x05пȠ\\x02༅༆\\x05хȣ\\x02༆')\n buf.write('༇\\x05йȝ\\x02༇༈\\x05еț\\x02༈')\n buf.write('༉\\x05ћȮ\\x02༉༊\\x05хȣ\\x02༊')\n buf.write('་\\x05ёȩ\\x02་༌\\x05яȨ\\x02༌')\n buf.write('˒\\x03\\x02\\x02\\x02།༎\\x05љȭ\\x02༎༏')\n buf.write('\\x05ѕȫ\\x02༏༐\\x05ыȦ\\x02༐༑')\n buf.write('\\x05лȞ\\x02༑༒\\x05еț\\x02༒༓')\n buf.write('\\x05ћȮ\\x02༓༔\\x05еț\\x02༔˔')\n buf.write('\\x03\\x02\\x02\\x02༕༖\\x05љȭ\\x02༖༗\\x05ѕ')\n buf.write('ȫ\\x02༗༘\\x05ыȦ\\x02༘༙\\x05н')\n buf.write('ȟ\\x02༙༚\\x05їȬ\\x02༚༛\\x05ї')\n buf.write('Ȭ\\x02༛༜\\x05ёȩ\\x02༜༝\\x05ї')\n buf.write('Ȭ\\x02༝˖\\x03\\x02\\x02\\x02༞༟\\x05љȭ')\n buf.write('\\x02༟༠\\x05ћȮ\\x02༠༡\\x05еț')\n buf.write('\\x02༡༢\\x05яȨ\\x02༢༣\\x05лȞ')\n buf.write('\\x02༣༤\\x05еț\\x02༤༥\\x05ыȦ')\n buf.write('\\x02༥༦\\x05ёȩ\\x02༦༧\\x05яȨ')\n buf.write('\\x02༧༨\\x05нȟ\\x02༨˘\\x03\\x02\\x02\\x02༩')\n buf.write('༪\\x05љȭ\\x02༪༫\\x05ћȮ\\x02༫')\n buf.write('༬\\x05еț\\x02༬༭\\x05їȬ\\x02༭')\n buf.write('༮\\x05ћȮ\\x02༮˚\\x03\\x02\\x02\\x02༯༰')\n buf.write('\\x05љȭ\\x02༰༱\\x05ћȮ\\x02༱༲')\n buf.write('\\x05еț\\x02༲༳\\x05їȬ\\x02༳༴')\n buf.write('\\x05ћȮ\\x02༴༵\\x05ѝȯ\\x02༵༶')\n buf.write('\\x05ѓȪ\\x02༶˜\\x03\\x02\\x02\\x02༷༸\\x05љ')\n buf.write('ȭ\\x02༸༹\\x05ћȮ\\x02༹༺\\x05е')\n buf.write('ț\\x02༺༻\\x05ћȮ\\x02༻༼\\x05н')\n buf.write('ȟ\\x02༼༽\\x05эȧ\\x02༽༾\\x05н')\n buf.write('ȟ\\x02༾༿\\x05яȨ\\x02༿ཀ\\x05ћ')\n buf.write('Ȯ\\x02ཀ˞\\x03\\x02\\x02\\x02ཁག\\x05љȭ')\n buf.write('\\x02གགྷ\\x05ћȮ\\x02གྷང\\x05еț')\n buf.write('\\x02ངཅ\\x05ћȮ\\x02ཅཆ\\x05нȟ')\n buf.write('\\x02ཆཇ\\x05эȧ\\x02ཇ\\u0f48\\x05нȟ')\n buf.write('\\x02\\u0f48ཉ\\x05яȨ\\x02ཉཊ\\x05ћȮ')\n buf.write('\\x02ཊཋ\\x07a\\x02\\x02ཋཌ\\x05хȣ\\x02ཌ')\n buf.write('ཌྷ\\x05лȞ\\x02ཌྷˠ\\x03\\x02\\x02\\x02ཎཏ')\n buf.write('\\x05љȭ\\x02ཏཐ\\x05ћȮ\\x02ཐད')\n buf.write('\\x05еț\\x02དདྷ\\x05ћȮ\\x02དྷན')\n buf.write('\\x05хȣ\\x02ནཔ\\x05йȝ\\x02པˢ')\n buf.write('\\x03\\x02\\x02\\x02ཕབ\\x05љȭ\\x02བབྷ\\x05ћ')\n buf.write('Ȯ\\x02བྷམ\\x05еț\\x02མཙ\\x05ћ')\n buf.write('Ȯ\\x02ཙཚ\\x05хȣ\\x02ཚཛ\\x05љ')\n buf.write('ȭ\\x02ཛཛྷ\\x05ћȮ\\x02ཛྷཝ\\x05х')\n buf.write('ȣ\\x02ཝཞ\\x05йȝ\\x02ཞཟ\\x05љ')\n buf.write('ȭ\\x02ཟˤ\\x03\\x02\\x02\\x02འཡ\\x05љȭ')\n buf.write('\\x02ཡར\\x05ћȮ\\x02རལ\\x05їȬ')\n buf.write('\\x02ལཤ\\x05хȣ\\x02ཤཥ\\x05яȨ')\n buf.write('\\x02ཥས\\x05сȡ\\x02ས˦\\x03\\x02\\x02\\x02ཧ')\n buf.write('ཨ\\x05љȭ\\x02ཨཀྵ\\x05ѝȯ\\x02ཀྵ')\n buf.write('ཪ\\x05зȜ\\x02ཪཫ\\x05эȧ\\x02ཫ')\n buf.write('ཬ\\x05ѝȯ\\x02ཬ\\u0f6d\\x05ыȦ\\x02\\u0f6d')\n buf.write('\\u0f6e\\x05ћȮ\\x02\\u0f6e\\u0f6f\\x05хȣ\\x02\\u0f6f')\n buf.write('\\u0f70\\x05љȭ\\x02\\u0f70ཱ\\x05нȟ\\x02ཱ')\n buf.write('ི\\x05ћȮ\\x02ི˨\\x03\\x02\\x02\\x02ཱིུ')\n buf.write('\\x05љȭ\\x02ཱུུ\\x05ѝȯ\\x02ཱུྲྀ')\n buf.write('\\x05зȜ\\x02ྲྀཷ\\x05ѓȪ\\x02ཷླྀ')\n buf.write('\\x05еț\\x02ླྀཹ\\x05їȬ\\x02ཹེ')\n buf.write('\\x05ћȮ\\x02ེཻ\\x05хȣ\\x02ཻོ')\n buf.write('\\x05ћȮ\\x02ོཽ\\x05хȣ\\x02ཽཾ')\n buf.write('\\x05ёȩ\\x02ཾཿ\\x05яȨ\\x02ཿ˪')\n buf.write('\\x03\\x02\\x02\\x02ཱྀྀ\\x05љȭ\\x02ཱྀྂ\\x05ѝ')\n buf.write('ȯ\\x02ྂྃ\\x05зȜ\\x02྄ྃ\\x05љ')\n buf.write('ȭ\\x02྄྅\\x05ћȮ\\x02྅྆\\x05х')\n buf.write('ȣ\\x02྆྇\\x05ћȮ\\x02྇ྈ\\x05ѝ')\n buf.write('ȯ\\x02ྈྉ\\x05ћȮ\\x02ྉྊ\\x05е')\n buf.write('ț\\x02ྊྋ\\x05зȜ\\x02ྋྌ\\x05ы')\n buf.write('Ȧ\\x02ྌྍ\\x05нȟ\\x02ྍˬ\\x03\\x02\\x02')\n buf.write('\\x02ྎྏ\\x05љȭ\\x02ྏྐ\\x05ѝȯ')\n buf.write('\\x02ྐྑ\\x05зȜ\\x02ྑྒ\\x05ћȮ')\n buf.write('\\x02ྒྒྷ\\x05ѥȳ\\x02ྒྷྔ\\x05ѓȪ')\n buf.write('\\x02ྔྕ\\x05нȟ\\x02ྕˮ\\x03\\x02\\x02\\x02ྖ')\n buf.write('ྗ\\x05љȭ\\x02ྗ\\u0f98\\x05ѝȯ\\x02\\u0f98')\n buf.write('ྙ\\x05йȝ\\x02ྙྚ\\x05йȝ\\x02ྚ')\n buf.write('ྛ\\x05нȟ\\x02ྛྜ\\x05љȭ\\x02ྜ')\n buf.write('ྜྷ\\x05љȭ\\x02ྜྷ˰\\x03\\x02\\x02\\x02ྞྟ')\n buf.write('\\x05љȭ\\x02ྟྠ\\x05ѝȯ\\x02ྠྡ')\n buf.write('\\x05љȭ\\x02ྡྡྷ\\x05ѓȪ\\x02ྡྷྣ')\n buf.write('\\x05нȟ\\x02ྣྤ\\x05яȨ\\x02ྤྥ')\n buf.write('\\x05лȞ\\x02ྥ˲\\x03\\x02\\x02\\x02ྦྦྷ\\x05ћ')\n buf.write('Ȯ\\x02ྦྷྨ\\x05еț\\x02ྨྩ\\x05з')\n buf.write('Ȝ\\x02ྩྪ\\x05ыȦ\\x02ྪྫ\\x05н')\n buf.write('ȟ\\x02ྫ˴\\x03\\x02\\x02\\x02ྫྷྭ\\x05ћȮ')\n buf.write('\\x02ྭྮ\\x05уȢ\\x02ྮྯ\\x05нȟ')\n buf.write('\\x02ྯ˶\\x03\\x02\\x02\\x02ྰྱ\\x05ћȮ\\x02ྱ')\n buf.write('ྲ\\x05уȢ\\x02ྲླ\\x05нȟ\\x02ླ')\n buf.write('ྴ\\x05яȨ\\x02ྴ˸\\x03\\x02\\x02\\x02ྵྶ')\n buf.write('\\x05ћȮ\\x02ྶྷ\\x05хȣ\\x02ྷྸ')\n buf.write('\\x05эȧ\\x02ྸྐྵ\\x05нȟ\\x02ྐྵ˺')\n buf.write('\\x03\\x02\\x02\\x02ྺྻ\\x05ћȮ\\x02ྻྼ\\x05х')\n buf.write('ȣ\\x02ྼ\\u0fbd\\x05эȧ\\x02\\u0fbd྾\\x05н')\n buf.write('ȟ\\x02྾྿\\x05љȭ\\x02྿࿀\\x05ћ')\n buf.write('Ȯ\\x02࿀࿁\\x05еț\\x02࿁࿂\\x05э')\n buf.write('ȧ\\x02࿂࿃\\x05ѓȪ\\x02࿃˼\\x03\\x02\\x02')\n buf.write('\\x02࿄࿅\\x05ћȮ\\x02࿅࿆\\x05хȣ')\n buf.write('\\x02࿆࿇\\x05эȧ\\x02࿇࿈\\x05нȟ')\n buf.write('\\x02࿈࿉\\x05љȭ\\x02࿉࿊\\x05ћȮ')\n buf.write('\\x02࿊࿋\\x05еț\\x02࿋࿌\\x05эȧ')\n buf.write('\\x02࿌\\u0fcd\\x05ѓȪ\\x02\\u0fcd࿎\\x07a\\x02\\x02࿎')\n buf.write('࿏\\x05ыȦ\\x02࿏࿐\\x05ћȮ\\x02࿐')\n buf.write('࿑\\x05ѧȴ\\x02࿑࿒\\x07a\\x02\\x02࿒࿓')\n buf.write('\\x05ѝȯ\\x02࿓࿔\\x05яȨ\\x02࿔࿕')\n buf.write('\\x05йȝ\\x02࿕࿖\\x05ёȩ\\x02࿖࿗')\n buf.write('\\x05яȨ\\x02࿗࿘\\x05љȭ\\x02࿘࿙')\n buf.write('\\x05ћȮ\\x02࿙࿚\\x05їȬ\\x02࿚\\u0fdb')\n buf.write('\\x05еț\\x02\\u0fdb\\u0fdc\\x05хȣ\\x02\\u0fdc\\u0fdd')\n buf.write('\\x05яȨ\\x02\\u0fdd\\u0fde\\x05нȟ\\x02\\u0fde\\u0fdf')\n buf.write('\\x05лȞ\\x02\\u0fdf˾\\x03\\x02\\x02\\x02\\u0fe0\\u0fe1\\x05ћ')\n buf.write('Ȯ\\x02\\u0fe1\\u0fe2\\x05хȣ\\x02\\u0fe2\\u0fe3\\x05э')\n buf.write('ȧ\\x02\\u0fe3\\u0fe4\\x05нȟ\\x02\\u0fe4\\u0fe5\\x05љ')\n buf.write('ȭ\\x02\\u0fe5\\u0fe6\\x05ћȮ\\x02\\u0fe6\\u0fe7\\x05е')\n buf.write('ț\\x02\\u0fe7\\u0fe8\\x05эȧ\\x02\\u0fe8\\u0fe9\\x05ѓ')\n buf.write('Ȫ\\x02\\u0fe9\\u0fea\\x07a\\x02\\x02\\u0fea\\u0feb\\x05ћȮ')\n buf.write('\\x02\\u0feb\\u0fec\\x05ѧȴ\\x02\\u0fec\\u0fed\\x07a\\x02\\x02\\u0fed')\n buf.write('\\u0fee\\x05ѝȯ\\x02\\u0fee\\u0fef\\x05яȨ\\x02\\u0fef')\n buf.write('\\u0ff0\\x05йȝ\\x02\\u0ff0\\u0ff1\\x05ёȩ\\x02\\u0ff1')\n buf.write('\\u0ff2\\x05яȨ\\x02\\u0ff2\\u0ff3\\x05љȭ\\x02\\u0ff3')\n buf.write('\\u0ff4\\x05ћȮ\\x02\\u0ff4\\u0ff5\\x05їȬ\\x02\\u0ff5')\n buf.write('\\u0ff6\\x05еț\\x02\\u0ff6\\u0ff7\\x05хȣ\\x02\\u0ff7')\n buf.write('\\u0ff8\\x05яȨ\\x02\\u0ff8\\u0ff9\\x05нȟ\\x02\\u0ff9')\n buf.write('\\u0ffa\\x05лȞ\\x02\\u0ffà\\x03\\x02\\x02\\x02\\u0ffb\\u0ffc')\n buf.write('\\x05ћȮ\\x02\\u0ffc\\u0ffd\\x05хȣ\\x02\\u0ffd\\u0ffe')\n buf.write('\\x05эȧ\\x02\\u0ffe\\u0fff\\x05нȟ\\x02\\u0fffက')\n buf.write('\\x05љȭ\\x02ကခ\\x05ћȮ\\x02ခဂ')\n buf.write('\\x05еț\\x02ဂဃ\\x05эȧ\\x02ဃင')\n buf.write('\\x05ѓȪ\\x02ငစ\\x07a\\x02\\x02စဆ\\x05ѝ')\n buf.write('ȯ\\x02ဆဇ\\x05яȨ\\x02ဇဈ\\x05й')\n buf.write('ȝ\\x02ဈဉ\\x05ёȩ\\x02ဉည\\x05я')\n buf.write('Ȩ\\x02ညဋ\\x05љȭ\\x02ဋဌ\\x05ћ')\n buf.write('Ȯ\\x02ဌဍ\\x05їȬ\\x02ဍဎ\\x05е')\n buf.write('ț\\x02ဎဏ\\x05хȣ\\x02ဏတ\\x05я')\n buf.write('Ȩ\\x02တထ\\x05нȟ\\x02ထဒ\\x05л')\n buf.write('Ȟ\\x02ဒ̂\\x03\\x02\\x02\\x02ဓန\\x05ћȮ')\n buf.write('\\x02နပ\\x05хȣ\\x02ပဖ\\x05эȧ')\n buf.write('\\x02ဖဗ\\x05нȟ\\x02ဗဘ\\x05ѧȴ')\n buf.write('\\x02ဘမ\\x05ёȩ\\x02မယ\\x05яȨ')\n buf.write('\\x02ယရ\\x05нȟ\\x02ရလ\\x07a\\x02\\x02လ')\n buf.write('ဝ\\x05еț\\x02ဝသ\\x05зȜ\\x02သ')\n buf.write('ဟ\\x05зȜ\\x02ဟဠ\\x05їȬ\\x02ဠ')\n buf.write('̄\\x03\\x02\\x02\\x02အဢ\\x05ћȮ\\x02ဢဣ')\n buf.write('\\x05хȣ\\x02ဣဤ\\x05эȧ\\x02ဤဥ')\n buf.write('\\x05нȟ\\x02ဥဦ\\x05ѧȴ\\x02ဦဧ')\n buf.write('\\x05ёȩ\\x02ဧဨ\\x05яȨ\\x02ဨဩ')\n buf.write('\\x05нȟ\\x02ဩဪ\\x07a\\x02\\x02ဪါ\\x05у')\n buf.write('Ȣ\\x02ါာ\\x05ёȩ\\x02ာိ\\x05ѝ')\n buf.write('ȯ\\x02ိီ\\x05їȬ\\x02ီ̆\\x03\\x02\\x02')\n buf.write('\\x02ုူ\\x05ћȮ\\x02ူေ\\x05хȣ')\n buf.write('\\x02ေဲ\\x05эȧ\\x02ဲဳ\\x05нȟ')\n buf.write('\\x02ဳဴ\\x05ѧȴ\\x02ဴဵ\\x05ёȩ')\n buf.write('\\x02ဵံ\\x05яȨ\\x02ံ့\\x05нȟ')\n buf.write('\\x02့း\\x07a\\x02\\x02း္\\x05эȧ\\x02္')\n buf.write('်\\x05хȣ\\x02်ျ\\x05яȨ\\x02ျ')\n buf.write('ြ\\x05ѝȯ\\x02ြွ\\x05ћȮ\\x02ွ')\n buf.write('ှ\\x05нȟ\\x02ှ̈\\x03\\x02\\x02\\x02ဿ၀')\n buf.write('\\x05ћȮ\\x02၀၁\\x05хȣ\\x02၁၂')\n buf.write('\\x05эȧ\\x02၂၃\\x05нȟ\\x02၃၄')\n buf.write('\\x05ѧȴ\\x02၄၅\\x05ёȩ\\x02၅၆')\n buf.write('\\x05яȨ\\x02၆၇\\x05нȟ\\x02၇၈')\n buf.write('\\x07a\\x02\\x02၈၉\\x05їȬ\\x02၉၊\\x05н')\n buf.write('ȟ\\x02၊။\\x05сȡ\\x02။၌\\x05х')\n buf.write('ȣ\\x02၌၍\\x05ёȩ\\x02၍၎\\x05я')\n buf.write('Ȩ\\x02၎̊\\x03\\x02\\x02\\x02၏ၐ\\x05ћȮ')\n buf.write('\\x02ၐၑ\\x05ёȩ\\x02ၑ̌\\x03\\x02\\x02\\x02ၒ')\n buf.write('ၓ\\x05ћȮ\\x02ၓၔ\\x05їȬ\\x02ၔ')\n buf.write('ၕ\\x05еț\\x02ၕၖ\\x05хȣ\\x02ၖ')\n buf.write('ၗ\\x05ыȦ\\x02ၗၘ\\x05хȣ\\x02ၘ')\n buf.write('ၙ\\x05яȨ\\x02ၙၚ\\x05сȡ\\x02ၚ')\n buf.write('̎\\x03\\x02\\x02\\x02ၛၜ\\x05ћȮ\\x02ၜၝ')\n buf.write('\\x05їȬ\\x02ၝၞ\\x05еț\\x02ၞၟ')\n buf.write('\\x05яȨ\\x02ၟၠ\\x05љȭ\\x02ၠၡ')\n buf.write('\\x05еț\\x02ၡၢ\\x05йȝ\\x02ၢၣ')\n buf.write('\\x05ћȮ\\x02ၣၤ\\x05хȣ\\x02ၤၥ')\n buf.write('\\x05ёȩ\\x02ၥၦ\\x05яȨ\\x02ၦ̐')\n buf.write('\\x03\\x02\\x02\\x02ၧၨ\\x05ћȮ\\x02ၨၩ\\x05ї')\n buf.write('Ȭ\\x02ၩၪ\\x05еț\\x02ၪၫ\\x05я')\n buf.write('Ȩ\\x02ၫၬ\\x05љȭ\\x02ၬၭ\\x05ы')\n buf.write('Ȧ\\x02ၭၮ\\x05еț\\x02ၮၯ\\x05ћ')\n buf.write('Ȯ\\x02ၯၰ\\x05нȟ\\x02ၰ̒\\x03\\x02\\x02')\n buf.write('\\x02ၱၲ\\x05ћȮ\\x02ၲၳ\\x05їȬ')\n buf.write('\\x02ၳၴ\\x05нȟ\\x02ၴၵ\\x05еț')\n buf.write('\\x02ၵၶ\\x05ћȮ\\x02ၶ̔\\x03\\x02\\x02\\x02ၷ')\n buf.write('ၸ\\x05ћȮ\\x02ၸၹ\\x05їȬ\\x02ၹ')\n buf.write('ၺ\\x05хȣ\\x02ၺၻ\\x05сȡ\\x02ၻ')\n buf.write('ၼ\\x05сȡ\\x02ၼၽ\\x05нȟ\\x02ၽ')\n buf.write('ၾ\\x05їȬ\\x02ၾ̖\\x03\\x02\\x02\\x02ၿႀ')\n buf.write('\\x05ћȮ\\x02ႀႁ\\x05їȬ\\x02ႁႂ')\n buf.write('\\x05хȣ\\x02ႂႃ\\x05эȧ\\x02ႃ̘')\n buf.write('\\x03\\x02\\x02\\x02ႄႅ\\x05ћȮ\\x02ႅႆ\\x05ї')\n buf.write('Ȭ\\x02ႆႇ\\x05ѝȯ\\x02ႇႈ\\x05н')\n buf.write('ȟ\\x02ႈ̚\\x03\\x02\\x02\\x02ႉႊ\\x05ћȮ')\n buf.write('\\x02ႊႋ\\x05їȬ\\x02ႋႌ\\x05ѝȯ')\n buf.write('\\x02ႌႍ\\x05яȨ\\x02ႍႎ\\x05йȝ')\n buf.write('\\x02ႎႏ\\x05еț\\x02ႏ႐\\x05ћȮ')\n buf.write('\\x02႐႑\\x05нȟ\\x02႑̜\\x03\\x02\\x02\\x02႒')\n buf.write('႓\\x05ћȮ\\x02႓႔\\x05ѥȳ\\x02႔')\n buf.write('႕\\x05ѓȪ\\x02႕႖\\x05нȟ\\x02႖')\n buf.write('̞\\x03\\x02\\x02\\x02႗႘\\x05ѝȯ\\x02႘႙')\n buf.write('\\x05яȨ\\x02႙ႚ\\x05зȜ\\x02ႚႛ')\n buf.write('\\x05ёȩ\\x02ႛႜ\\x05ѝȯ\\x02ႜႝ')\n buf.write('\\x05яȨ\\x02ႝ႞\\x05лȞ\\x02႞႟')\n buf.write('\\x05нȟ\\x02႟Ⴀ\\x05лȞ\\x02Ⴀ̠')\n buf.write('\\x03\\x02\\x02\\x02ႡႢ\\x05ѝȯ\\x02ႢႣ\\x05я')\n buf.write('Ȩ\\x02ႣႤ\\x05лȞ\\x02ႤႥ\\x05н')\n buf.write('ȟ\\x02ႥႦ\\x05їȬ\\x02Ⴆ̢\\x03\\x02\\x02')\n buf.write('\\x02ႧႨ\\x05ѝȯ\\x02ႨႩ\\x05яȨ')\n buf.write('\\x02ႩႪ\\x05хȣ\\x02ႪႫ\\x05ёȩ')\n buf.write('\\x02ႫႬ\\x05яȨ\\x02Ⴌ̤\\x03\\x02\\x02\\x02Ⴍ')\n buf.write('Ⴎ\\x05ѝȯ\\x02ႮႯ\\x05яȨ\\x02Ⴏ')\n buf.write('Ⴐ\\x05хȣ\\x02ႰႱ\\x05ѕȫ\\x02Ⴑ')\n buf.write('Ⴒ\\x05ѝȯ\\x02ႲႳ\\x05нȟ\\x02Ⴓ')\n buf.write('̦\\x03\\x02\\x02\\x02ႴႵ\\x05ѝȯ\\x02ႵႶ')\n buf.write('\\x05яȨ\\x02ႶႷ\\x05ыȦ\\x02ႷႸ')\n buf.write('\\x05хȣ\\x02ႸႹ\\x05эȧ\\x02ႹႺ')\n buf.write('\\x05хȣ\\x02ႺႻ\\x05ћȮ\\x02ႻႼ')\n buf.write('\\x05нȟ\\x02ႼႽ\\x05лȞ\\x02Ⴝ̨')\n buf.write('\\x03\\x02\\x02\\x02ႾႿ\\x05ѝȯ\\x02ႿჀ\\x05я')\n buf.write('Ȩ\\x02ჀჁ\\x05ѓȪ\\x02ჁჂ\\x05х')\n buf.write('ȣ\\x02ჂჃ\\x05џȰ\\x02ჃჄ\\x05ё')\n buf.write('ȩ\\x02ჄჅ\\x05ћȮ\\x02Ⴥ̪\\x03\\x02\\x02')\n buf.write('\\x02\\u10c6Ⴧ\\x05ѝȯ\\x02Ⴧ\\u10c8\\x05яȨ')\n buf.write('\\x02\\u10c8\\u10c9\\x05ћȮ\\x02\\u10c9\\u10ca\\x05хȣ')\n buf.write('\\x02\\u10ca\\u10cb\\x05ыȦ\\x02\\u10cb̬\\x03\\x02\\x02\\x02\\u10cc')\n buf.write('Ⴭ\\x05ѝȯ\\x02Ⴭ\\u10ce\\x05ѓȪ\\x02\\u10ce')\n buf.write('\\u10cf\\x05лȞ\\x02\\u10cfა\\x05еț\\x02ა')\n buf.write('ბ\\x05ћȮ\\x02ბგ\\x05нȟ\\x02გ')\n buf.write('̮\\x03\\x02\\x02\\x02დე\\x05ѝȯ\\x02ევ')\n buf.write('\\x05ѓȪ\\x02ვზ\\x05лȞ\\x02ზთ')\n buf.write('\\x05еț\\x02თი\\x05ћȮ\\x02იკ')\n buf.write('\\x05нȟ\\x02კლ\\x05лȞ\\x02ლ̰')\n buf.write('\\x03\\x02\\x02\\x02მნ\\x05ѝȯ\\x02ნო\\x05ѓ')\n buf.write('Ȫ\\x02ოპ\\x05љȭ\\x02პჟ\\x05н')\n buf.write('ȟ\\x02ჟრ\\x05їȬ\\x02რს\\x05ћ')\n buf.write('Ȯ\\x02ს̲\\x03\\x02\\x02\\x02ტუ\\x05ѝȯ')\n buf.write('\\x02უფ\\x05їȬ\\x02ფქ\\x05ёȩ')\n buf.write('\\x02ქღ\\x05ѡȱ\\x02ღყ\\x05хȣ')\n buf.write('\\x02ყშ\\x05лȞ\\x02შ̴\\x03\\x02\\x02\\x02ჩ')\n buf.write('ც\\x05ѝȯ\\x02ცძ\\x05љȭ\\x02ძ')\n buf.write('წ\\x05нȟ\\x02წ̶\\x03\\x02\\x02\\x02ჭხ')\n buf.write('\\x05ѝȯ\\x02ხჯ\\x05љȭ\\x02ჯჰ')\n buf.write('\\x05хȣ\\x02ჰჱ\\x05яȨ\\x02ჱჲ')\n buf.write('\\x05сȡ\\x02ჲ̸\\x03\\x02\\x02\\x02ჳჴ\\x05џ')\n buf.write('Ȱ\\x02ჴჵ\\x05еț\\x02ჵჶ\\x05ы')\n buf.write('Ȧ\\x02ჶჷ\\x05хȣ\\x02ჷჸ\\x05л')\n buf.write('Ȟ\\x02ჸჹ\\x05еț\\x02ჹჺ\\x05ћ')\n buf.write('Ȯ\\x02ჺ჻\\x05нȟ\\x02჻̺\\x03\\x02\\x02')\n buf.write('\\x02ჼჽ\\x05џȰ\\x02ჽჾ\\x05еț')\n buf.write('\\x02ჾჿ\\x05ыȦ\\x02ჿᄀ\\x05ѝȯ')\n buf.write('\\x02ᄀᄁ\\x05нȟ\\x02ᄁ̼\\x03\\x02\\x02\\x02ᄂ')\n buf.write('ᄃ\\x05џȰ\\x02ᄃᄄ\\x05еț\\x02ᄄ')\n buf.write('ᄅ\\x05ыȦ\\x02ᄅᄆ\\x05ѝȯ\\x02ᄆ')\n buf.write('ᄇ\\x05нȟ\\x02ᄇᄈ\\x05љȭ\\x02ᄈ')\n buf.write('̾\\x03\\x02\\x02\\x02ᄉᄊ\\x05џȰ\\x02ᄊᄋ')\n buf.write('\\x05еț\\x02ᄋᄌ\\x05їȬ\\x02ᄌᄍ')\n buf.write('\\x05йȝ\\x02ᄍᄎ\\x05уȢ\\x02ᄎᄏ')\n buf.write('\\x05еț\\x02ᄏᄐ\\x05їȬ\\x02ᄐ̀')\n buf.write('\\x03\\x02\\x02\\x02ᄑᄒ\\x05џȰ\\x02ᄒᄓ\\x05е')\n buf.write('ț\\x02ᄓᄔ\\x05їȬ\\x02ᄔᄕ\\x05й')\n buf.write('ȝ\\x02ᄕᄖ\\x05уȢ\\x02ᄖᄗ\\x05е')\n buf.write('ț\\x02ᄗᄘ\\x05їȬ\\x02ᄘᄙ\\x074')\n buf.write('\\x02\\x02ᄙ͂\\x03\\x02\\x02\\x02ᄚᄛ\\x05џȰ\\x02ᄛ')\n buf.write('ᄜ\\x05еț\\x02ᄜᄝ\\x05їȬ\\x02ᄝ')\n buf.write('ᄞ\\x05хȣ\\x02ᄞᄟ\\x05еț\\x02ᄟ')\n buf.write('ᄠ\\x05зȜ\\x02ᄠᄡ\\x05ыȦ\\x02ᄡ')\n buf.write('ᄢ\\x05нȟ\\x02ᄢ̈́\\x03\\x02\\x02\\x02ᄣᄤ')\n buf.write('\\x05џȰ\\x02ᄤᄥ\\x05еț\\x02ᄥᄦ')\n buf.write('\\x05їȬ\\x02ᄦᄧ\\x05їȬ\\x02ᄧᄨ')\n buf.write('\\x05еț\\x02ᄨᄩ\\x05ѥȳ\\x02ᄩ͆')\n buf.write('\\x03\\x02\\x02\\x02ᄪᄫ\\x05џȰ\\x02ᄫᄬ\\x05е')\n buf.write('ț\\x02ᄬᄭ\\x05їȬ\\x02ᄭᄮ\\x05ѥ')\n buf.write('ȳ\\x02ᄮᄯ\\x05хȣ\\x02ᄯᄰ\\x05я')\n buf.write('Ȩ\\x02ᄰᄱ\\x05сȡ\\x02ᄱ͈\\x03\\x02\\x02')\n buf.write('\\x02ᄲᄳ\\x05џȰ\\x02ᄳᄴ\\x05нȟ')\n buf.write('\\x02ᄴᄵ\\x05їȬ\\x02ᄵᄶ\\x05љȭ')\n buf.write('\\x02ᄶᄷ\\x05хȣ\\x02ᄷᄸ\\x05ёȩ')\n buf.write('\\x02ᄸᄹ\\x05яȨ\\x02ᄹ͊\\x03\\x02\\x02\\x02ᄺ')\n buf.write('ᄻ\\x05џȰ\\x02ᄻᄼ\\x05нȟ\\x02ᄼ')\n buf.write('ᄽ\\x05їȬ\\x02ᄽᄾ\\x05љȭ\\x02ᄾ')\n buf.write('ᄿ\\x05хȣ\\x02ᄿᅀ\\x05ёȩ\\x02ᅀ')\n buf.write('ᅁ\\x05яȨ\\x02ᅁᅂ\\x05љȭ\\x02ᅂ')\n buf.write('͌\\x03\\x02\\x02\\x02ᅃᅄ\\x05ѡȱ\\x02ᅄᅅ')\n buf.write('\\x05еț\\x02ᅅᅆ\\x05хȣ\\x02ᅆᅇ')\n buf.write('\\x05ћȮ\\x02ᅇ͎\\x03\\x02\\x02\\x02ᅈᅉ\\x05ѡ')\n buf.write('ȱ\\x02ᅉᅊ\\x05еț\\x02ᅊᅋ\\x05ї')\n buf.write('Ȭ\\x02ᅋᅌ\\x05яȨ\\x02ᅌᅍ\\x05х')\n buf.write('ȣ\\x02ᅍᅎ\\x05яȨ\\x02ᅎᅏ\\x05с')\n buf.write('ȡ\\x02ᅏ͐\\x03\\x02\\x02\\x02ᅐᅑ\\x05ѡȱ')\n buf.write('\\x02ᅑᅒ\\x05нȟ\\x02ᅒᅓ\\x05ыȦ')\n buf.write('\\x02ᅓᅔ\\x05ыȦ\\x02ᅔᅕ\\x05пȠ')\n buf.write('\\x02ᅕᅖ\\x05ёȩ\\x02ᅖᅗ\\x05їȬ')\n buf.write('\\x02ᅗᅘ\\x05эȧ\\x02ᅘᅙ\\x05нȟ')\n buf.write('\\x02ᅙᅚ\\x05лȞ\\x02ᅚ͒\\x03\\x02\\x02\\x02ᅛ')\n buf.write('ᅜ\\x05ѡȱ\\x02ᅜᅝ\\x05уȢ\\x02ᅝ')\n buf.write('ᅞ\\x05нȟ\\x02ᅞᅟ\\x05яȨ\\x02ᅟ')\n buf.write('͔\\x03\\x02\\x02\\x02ᅠᅡ\\x05ѡȱ\\x02ᅡᅢ')\n buf.write('\\x05уȢ\\x02ᅢᅣ\\x05нȟ\\x02ᅣᅤ')\n buf.write('\\x05яȨ\\x02ᅤᅥ\\x05нȟ\\x02ᅥᅦ')\n buf.write('\\x05џȰ\\x02ᅦᅧ\\x05нȟ\\x02ᅧᅨ')\n buf.write('\\x05їȬ\\x02ᅨ͖\\x03\\x02\\x02\\x02ᅩᅪ\\x05ѡ')\n buf.write('ȱ\\x02ᅪᅫ\\x05уȢ\\x02ᅫᅬ\\x05н')\n buf.write('ȟ\\x02ᅬᅭ\\x05їȬ\\x02ᅭᅮ\\x05н')\n buf.write('ȟ\\x02ᅮ͘\\x03\\x02\\x02\\x02ᅯᅰ\\x05ѡȱ')\n buf.write('\\x02ᅰᅱ\\x05уȢ\\x02ᅱᅲ\\x05хȣ')\n buf.write('\\x02ᅲᅳ\\x05ыȦ\\x02ᅳᅴ\\x05нȟ')\n buf.write('\\x02ᅴ͚\\x03\\x02\\x02\\x02ᅵᅶ\\x05ѡȱ\\x02ᅶ')\n buf.write('ᅷ\\x05хȣ\\x02ᅷᅸ\\x05ћȮ\\x02ᅸ')\n buf.write('ᅹ\\x05уȢ\\x02ᅹ͜\\x03\\x02\\x02\\x02ᅺᅻ')\n buf.write('\\x05ѡȱ\\x02ᅻᅼ\\x05хȣ\\x02ᅼᅽ')\n buf.write('\\x05ћȮ\\x02ᅽᅾ\\x05уȢ\\x02ᅾᅿ')\n buf.write('\\x05хȣ\\x02ᅿᆀ\\x05яȨ\\x02ᆀ͞')\n buf.write('\\x03\\x02\\x02\\x02ᆁᆂ\\x05ѡȱ\\x02ᆂᆃ\\x05ё')\n buf.write('ȩ\\x02ᆃᆄ\\x05їȬ\\x02ᆄᆅ\\x05щ')\n buf.write('ȥ\\x02ᆅ͠\\x03\\x02\\x02\\x02ᆆᆇ\\x05ѡȱ')\n buf.write('\\x02ᆇᆈ\\x05їȬ\\x02ᆈᆉ\\x05хȣ')\n buf.write('\\x02ᆉᆊ\\x05ћȮ\\x02ᆊᆋ\\x05нȟ')\n buf.write('\\x02ᆋ͢\\x03\\x02\\x02\\x02ᆌᆍ\\x05ѣȲ\\x02ᆍ')\n buf.write('ᆎ\\x05эȧ\\x02ᆎᆏ\\x05ыȦ\\x02ᆏ')\n buf.write('ͤ\\x03\\x02\\x02\\x02ᆐᆑ\\x05ѣȲ\\x02ᆑᆒ')\n buf.write('\\x05эȧ\\x02ᆒᆓ\\x05ыȦ\\x02ᆓᆔ')\n buf.write('\\x05еț\\x02ᆔᆕ\\x05сȡ\\x02ᆕᆖ')\n buf.write('\\x05сȡ\\x02ᆖͦ\\x03\\x02\\x02\\x02ᆗᆘ\\x05ѣ')\n buf.write('Ȳ\\x02ᆘᆙ\\x05эȧ\\x02ᆙᆚ\\x05ы')\n buf.write('Ȧ\\x02ᆚᆛ\\x05еț\\x02ᆛᆜ\\x05ћ')\n buf.write('Ȯ\\x02ᆜᆝ\\x05ћȮ\\x02ᆝᆞ\\x05ї')\n buf.write('Ȭ\\x02ᆞᆟ\\x05хȣ\\x02ᆟᆠ\\x05з')\n buf.write('Ȝ\\x02ᆠᆡ\\x05ѝȯ\\x02ᆡᆢ\\x05ћ')\n buf.write('Ȯ\\x02ᆢᆣ\\x05нȟ\\x02ᆣᆤ\\x05љ')\n buf.write('ȭ\\x02ᆤͨ\\x03\\x02\\x02\\x02ᆥᆦ\\x05ѣȲ')\n buf.write('\\x02ᆦᆧ\\x05эȧ\\x02ᆧᆨ\\x05ыȦ')\n buf.write('\\x02ᆨᆩ\\x05йȝ\\x02ᆩᆪ\\x05еț')\n buf.write('\\x02ᆪᆫ\\x05љȭ\\x02ᆫᆬ\\x05ћȮ')\n buf.write('\\x02ᆬͪ\\x03\\x02\\x02\\x02ᆭᆮ\\x05ѣȲ\\x02ᆮ')\n buf.write('ᆯ\\x05эȧ\\x02ᆯᆰ\\x05ыȦ\\x02ᆰ')\n buf.write('ᆱ\\x05йȝ\\x02ᆱᆲ\\x05ёȩ\\x02ᆲ')\n buf.write('ᆳ\\x05ыȦ\\x02ᆳᆴ\\x05еț\\x02ᆴ')\n buf.write('ᆵ\\x05ћȮ\\x02ᆵᆶ\\x05ћȮ\\x02ᆶ')\n buf.write('ᆷ\\x05џȰ\\x02ᆷᆸ\\x05еț\\x02ᆸ')\n buf.write('ᆹ\\x05ыȦ\\x02ᆹͬ\\x03\\x02\\x02\\x02ᆺᆻ')\n buf.write('\\x05ѣȲ\\x02ᆻᆼ\\x05эȧ\\x02ᆼᆽ')\n buf.write('\\x05ыȦ\\x02ᆽᆾ\\x05нȟ\\x02ᆾᆿ')\n buf.write('\\x05ыȦ\\x02ᆿᇀ\\x05нȟ\\x02ᇀᇁ')\n buf.write('\\x05эȧ\\x02ᇁᇂ\\x05нȟ\\x02ᇂᇃ')\n buf.write('\\x05яȨ\\x02ᇃᇄ\\x05ћȮ\\x02ᇄͮ')\n buf.write('\\x03\\x02\\x02\\x02ᇅᇆ\\x05ѣȲ\\x02ᇆᇇ\\x05э')\n buf.write('ȧ\\x02ᇇᇈ\\x05ыȦ\\x02ᇈᇉ\\x05н')\n buf.write('ȟ\\x02ᇉᇊ\\x05ѣȲ\\x02ᇊᇋ\\x05х')\n buf.write('ȣ\\x02ᇋᇌ\\x05љȭ\\x02ᇌᇍ\\x05ћ')\n buf.write('Ȯ\\x02ᇍᇎ\\x05љȭ\\x02ᇎͰ\\x03\\x02\\x02')\n buf.write('\\x02ᇏᇐ\\x05ѣȲ\\x02ᇐᇑ\\x05эȧ')\n buf.write('\\x02ᇑᇒ\\x05ыȦ\\x02ᇒᇓ\\x05пȠ')\n buf.write('\\x02ᇓᇔ\\x05ёȩ\\x02ᇔᇕ\\x05їȬ')\n buf.write('\\x02ᇕᇖ\\x05нȟ\\x02ᇖᇗ\\x05љȭ')\n buf.write('\\x02ᇗᇘ\\x05ћȮ\\x02ᇘͲ\\x03\\x02\\x02\\x02ᇙ')\n buf.write('ᇚ\\x05ѣȲ\\x02ᇚᇛ\\x05эȧ\\x02ᇛ')\n buf.write('ᇜ\\x05ыȦ\\x02ᇜᇝ\\x05яȨ\\x02ᇝ')\n buf.write('ᇞ\\x05еț\\x02ᇞᇟ\\x05эȧ\\x02ᇟ')\n buf.write('ᇠ\\x05нȟ\\x02ᇠᇡ\\x05љȭ\\x02ᇡ')\n buf.write('ᇢ\\x05ѓȪ\\x02ᇢᇣ\\x05еț\\x02ᇣ')\n buf.write('ᇤ\\x05йȝ\\x02ᇤᇥ\\x05нȟ\\x02ᇥ')\n buf.write('ᇦ\\x05љȭ\\x02ᇦʹ\\x03\\x02\\x02\\x02ᇧᇨ')\n buf.write('\\x05ѣȲ\\x02ᇨᇩ\\x05эȧ\\x02ᇩᇪ')\n buf.write('\\x05ыȦ\\x02ᇪᇫ\\x05ѓȪ\\x02ᇫᇬ')\n buf.write('\\x05еț\\x02ᇬᇭ\\x05їȬ\\x02ᇭᇮ')\n buf.write('\\x05љȭ\\x02ᇮᇯ\\x05нȟ\\x02ᇯͶ')\n buf.write('\\x03\\x02\\x02\\x02ᇰᇱ\\x05ѣȲ\\x02ᇱᇲ\\x05э')\n buf.write('ȧ\\x02ᇲᇳ\\x05ыȦ\\x02ᇳᇴ\\x05ѓ')\n buf.write('Ȫ\\x02ᇴᇵ\\x05хȣ\\x02ᇵ\\u0378\\x03\\x02\\x02')\n buf.write('\\x02ᇶᇷ\\x05ѣȲ\\x02ᇷᇸ\\x05эȧ')\n buf.write('\\x02ᇸᇹ\\x05ыȦ\\x02ᇹᇺ\\x05ѕȫ')\n buf.write('\\x02ᇺᇻ\\x05ѝȯ\\x02ᇻᇼ\\x05нȟ')\n buf.write('\\x02ᇼᇽ\\x05їȬ\\x02ᇽᇾ\\x05ѥȳ')\n buf.write('\\x02ᇾͺ\\x03\\x02\\x02\\x02ᇿሀ\\x05ѣȲ\\x02ሀ')\n buf.write('ሁ\\x05эȧ\\x02ሁሂ\\x05ыȦ\\x02ሂ')\n buf.write('ሃ\\x05їȬ\\x02ሃሄ\\x05ёȩ\\x02ሄ')\n buf.write('ህ\\x05ёȩ\\x02ህሆ\\x05ћȮ\\x02ሆ')\n buf.write('ͼ\\x03\\x02\\x02\\x02ሇለ\\x05ѣȲ\\x02ለሉ')\n buf.write('\\x05эȧ\\x02ሉሊ\\x05ыȦ\\x02ሊላ')\n buf.write('\\x05љȭ\\x02ላሌ\\x05нȟ\\x02ሌል')\n buf.write('\\x05їȬ\\x02ልሎ\\x05хȣ\\x02ሎሏ')\n buf.write('\\x05еț\\x02ሏሐ\\x05ыȦ\\x02ሐሑ')\n buf.write('\\x05хȣ\\x02ሑሒ\\x05ѧȴ\\x02ሒሓ')\n buf.write('\\x05нȟ\\x02ሓ;\\x03\\x02\\x02\\x02ሔሕ\\x05ѣ')\n buf.write('Ȳ\\x02ሕሖ\\x05эȧ\\x02ሖሗ\\x05ы')\n buf.write('Ȧ\\x02ሗመ\\x05ћȮ\\x02መሙ\\x05е')\n buf.write('ț\\x02ሙሚ\\x05зȜ\\x02ሚማ\\x05ы')\n buf.write('Ȧ\\x02ማሜ\\x05нȟ\\x02ሜ\\u0380\\x03\\x02\\x02')\n buf.write('\\x02ምሞ\\x05ѥȳ\\x02ሞሟ\\x05нȟ')\n buf.write('\\x02ሟሠ\\x05еț\\x02ሠሡ\\x05їȬ')\n buf.write('\\x02ሡ\\u0382\\x03\\x02\\x02\\x02ሢሣ\\x05ѥȳ\\x02ሣ')\n buf.write('ሤ\\x05нȟ\\x02ሤሥ\\x05љȭ\\x02ሥ')\n buf.write('΄\\x03\\x02\\x02\\x02ሦሧ\\x05ѥȳ\\x02ሧረ')\n buf.write('\\x05эȧ\\x02ረሩ\\x05хȣ\\x02ሩሪ')\n buf.write('\\x05яȨ\\x02ሪራ\\x05ћȮ\\x02ራሬ')\n buf.write('\\x05нȟ\\x02ሬር\\x05їȬ\\x02ርሮ')\n buf.write('\\x05џȰ\\x02ሮሯ\\x05еț\\x02ሯሰ')\n buf.write('\\x05ыȦ\\x02ሰሱ\\x07a\\x02\\x02ሱሲ\\x05ѝ')\n buf.write('ȯ\\x02ሲሳ\\x05яȨ\\x02ሳሴ\\x05й')\n buf.write('ȝ\\x02ሴስ\\x05ёȩ\\x02ስሶ\\x05я')\n buf.write('Ȩ\\x02ሶሷ\\x05љȭ\\x02ሷሸ\\x05ћ')\n buf.write('Ȯ\\x02ሸሹ\\x05їȬ\\x02ሹሺ\\x05е')\n buf.write('ț\\x02ሺሻ\\x05хȣ\\x02ሻሼ\\x05я')\n buf.write('Ȩ\\x02ሼሽ\\x05нȟ\\x02ሽሾ\\x05л')\n buf.write('Ȟ\\x02ሾΆ\\x03\\x02\\x02\\x02ሿቀ\\x05ѧȴ')\n buf.write('\\x02ቀቁ\\x05ёȩ\\x02ቁቂ\\x05яȨ')\n buf.write('\\x02ቂቃ\\x05нȟ\\x02ቃΈ\\x03\\x02\\x02\\x02ቄ')\n buf.write('ቅ\\x05ѓȪ\\x02ቅቆ\\x05їȬ\\x02ቆ')\n buf.write('ቇ\\x05нȟ\\x02ቇቈ\\x05лȞ\\x02ቈ')\n buf.write('\\u1249\\x05хȣ\\x02\\u1249ቊ\\x05йȝ\\x02ቊ')\n buf.write('ቋ\\x05ћȮ\\x02ቋቌ\\x05хȣ\\x02ቌ')\n buf.write('ቍ\\x05ёȩ\\x02ቍ\\u124e\\x05яȨ\\x02\\u124e')\n buf.write('Ί\\x03\\x02\\x02\\x02\\u124fቐ\\x05ѓȪ\\x02ቐቑ')\n buf.write('\\x05їȬ\\x02ቑቒ\\x05нȟ\\x02ቒቓ')\n buf.write('\\x05лȞ\\x02ቓቔ\\x05хȣ\\x02ቔቕ')\n buf.write('\\x05йȝ\\x02ቕቖ\\x05ћȮ\\x02ቖ\\u1257')\n buf.write('\\x05хȣ\\x02\\u1257ቘ\\x05ёȩ\\x02ቘ\\u1259')\n buf.write('\\x05яȨ\\x02\\u1259ቚ\\x07a\\x02\\x02ቚቛ\\x05з')\n buf.write('Ȝ\\x02ቛቜ\\x05ёȩ\\x02ቜቝ\\x05ѝ')\n buf.write('ȯ\\x02ቝ\\u125e\\x05яȨ\\x02\\u125e\\u125f\\x05л')\n buf.write('Ȟ\\x02\\u125fበ\\x05љȭ\\x02በΌ\\x03\\x02\\x02')\n buf.write('\\x02ቡቢ\\x05ѓȪ\\x02ቢባ\\x05їȬ')\n buf.write('\\x02ባቤ\\x05нȟ\\x02ቤብ\\x05лȞ')\n buf.write('\\x02ብቦ\\x05хȣ\\x02ቦቧ\\x05йȝ')\n buf.write('\\x02ቧቨ\\x05ћȮ\\x02ቨቩ\\x05хȣ')\n buf.write('\\x02ቩቪ\\x05ёȩ\\x02ቪቫ\\x05яȨ')\n buf.write('\\x02ቫቬ\\x07a\\x02\\x02ቬቭ\\x05йȝ\\x02ቭ')\n buf.write('ቮ\\x05ёȩ\\x02ቮቯ\\x05љȭ\\x02ቯ')\n buf.write('ተ\\x05ћȮ\\x02ተΎ\\x03\\x02\\x02\\x02ቱቲ')\n buf.write('\\x05ѓȪ\\x02ቲታ\\x05їȬ\\x02ታቴ')\n buf.write('\\x05нȟ\\x02ቴት\\x05лȞ\\x02ትቶ')\n buf.write('\\x05хȣ\\x02ቶቷ\\x05йȝ\\x02ቷቸ')\n buf.write('\\x05ћȮ\\x02ቸቹ\\x05хȣ\\x02ቹቺ')\n buf.write('\\x05ёȩ\\x02ቺቻ\\x05яȨ\\x02ቻቼ')\n buf.write('\\x07a\\x02\\x02ቼች\\x05лȞ\\x02ችቾ\\x05н')\n buf.write('ȟ\\x02ቾቿ\\x05ћȮ\\x02ቿኀ\\x05е')\n buf.write('ț\\x02ኀኁ\\x05хȣ\\x02ኁኂ\\x05ы')\n buf.write('Ȧ\\x02ኂኃ\\x05љȭ\\x02ኃΐ\\x03\\x02\\x02')\n buf.write('\\x02ኄኅ\\x05ѓȪ\\x02ኅኆ\\x05їȬ')\n buf.write('\\x02ኆኇ\\x05нȟ\\x02ኇኈ\\x05лȞ')\n buf.write('\\x02ኈ\\u1289\\x05хȣ\\x02\\u1289ኊ\\x05йȝ')\n buf.write('\\x02ኊኋ\\x05ћȮ\\x02ኋኌ\\x05хȣ')\n buf.write('\\x02ኌኍ\\x05ёȩ\\x02ኍ\\u128e\\x05яȨ')\n buf.write('\\x02\\u128e\\u128f\\x07a\\x02\\x02\\u128fነ\\x05ѓȪ\\x02ነ')\n buf.write('ኑ\\x05їȬ\\x02ኑኒ\\x05ёȩ\\x02ኒ')\n buf.write('ና\\x05зȜ\\x02ናኔ\\x05еț\\x02ኔ')\n buf.write('ን\\x05зȜ\\x02ንኖ\\x05хȣ\\x02ኖ')\n buf.write('ኗ\\x05ыȦ\\x02ኗኘ\\x05хȣ\\x02ኘ')\n buf.write('ኙ\\x05ћȮ\\x02ኙኚ\\x05ѥȳ\\x02ኚ')\n buf.write('Β\\x03\\x02\\x02\\x02ኛኜ\\x05ѓȪ\\x02ኜኝ')\n buf.write('\\x05їȬ\\x02ኝኞ\\x05нȟ\\x02ኞኟ')\n buf.write('\\x05лȞ\\x02ኟአ\\x05хȣ\\x02አኡ')\n buf.write('\\x05йȝ\\x02ኡኢ\\x05ћȮ\\x02ኢኣ')\n buf.write('\\x05хȣ\\x02ኣኤ\\x05ёȩ\\x02ኤእ')\n buf.write('\\x05яȨ\\x02እኦ\\x07a\\x02\\x02ኦኧ\\x05љ')\n buf.write('ȭ\\x02ኧከ\\x05нȟ\\x02ከኩ\\x05ћ')\n buf.write('Ȯ\\x02ኩΔ\\x03\\x02\\x02\\x02ኪካ\\x05йȝ')\n buf.write('\\x02ካኬ\\x05ѝȯ\\x02ኬክ\\x05эȧ')\n buf.write('\\x02ክኮ\\x05нȟ\\x02ኮኯ\\x07a\\x02\\x02ኯ')\n buf.write('ኰ\\x05лȞ\\x02ኰ\\u12b1\\x05хȣ\\x02\\u12b1')\n buf.write('ኲ\\x05љȭ\\x02ኲኳ\\x05ћȮ\\x02ኳ')\n buf.write('Ζ\\x03\\x02\\x02\\x02ኴኵ\\x05лȞ\\x02ኵ\\u12b6')\n buf.write('\\x05нȟ\\x02\\u12b6\\u12b7\\x05яȨ\\x02\\u12b7ኸ')\n buf.write('\\x05љȭ\\x02ኸኹ\\x05нȟ\\x02ኹኺ')\n buf.write('\\x07a\\x02\\x02ኺኻ\\x05їȬ\\x02ኻኼ\\x05е')\n buf.write('ț\\x02ኼኽ\\x05яȨ\\x02ኽኾ\\x05щ')\n buf.write('ȥ\\x02ኾΘ\\x03\\x02\\x02\\x02\\u12bfዀ\\x05ыȦ')\n buf.write('\\x02ዀ\\u12c1\\x05хȣ\\x02\\u12c1ዂ\\x05љȭ')\n buf.write('\\x02ዂዃ\\x05ћȮ\\x02ዃዄ\\x05еț')\n buf.write('\\x02ዄዅ\\x05сȡ\\x02ዅ\\u12c6\\x05сȡ')\n buf.write('\\x02\\u12c6Κ\\x03\\x02\\x02\\x02\\u12c7ወ\\x05ѓȪ\\x02ወ')\n buf.write('ዉ\\x05нȟ\\x02ዉዊ\\x05їȬ\\x02ዊ')\n buf.write('ዋ\\x05йȝ\\x02ዋዌ\\x05нȟ\\x02ዌ')\n buf.write('ው\\x05яȨ\\x02ውዎ\\x05ћȮ\\x02ዎ')\n buf.write('ዏ\\x07a\\x02\\x02ዏዐ\\x05їȬ\\x02ዐዑ')\n buf.write('\\x05еț\\x02ዑዒ\\x05яȨ\\x02ዒዓ')\n buf.write('\\x05щȥ\\x02ዓΜ\\x03\\x02\\x02\\x02ዔዕ\\x05ѓ')\n buf.write('Ȫ\\x02ዕዖ\\x05нȟ\\x02ዖ\\u12d7\\x05ї')\n buf.write('Ȭ\\x02\\u12d7ዘ\\x05йȝ\\x02ዘዙ\\x05н')\n buf.write('ȟ\\x02ዙዚ\\x05яȨ\\x02ዚዛ\\x05ћ')\n buf.write('Ȯ\\x02ዛዜ\\x05хȣ\\x02ዜዝ\\x05ы')\n buf.write('Ȧ\\x02ዝዞ\\x05нȟ\\x02ዞዟ\\x07a\\x02')\n buf.write('\\x02ዟዠ\\x05йȝ\\x02ዠዡ\\x05ёȩ')\n buf.write('\\x02ዡዢ\\x05яȨ\\x02ዢዣ\\x05ћȮ')\n buf.write('\\x02ዣΞ\\x03\\x02\\x02\\x02ዤዥ\\x05ѓȪ\\x02ዥ')\n buf.write('ዦ\\x05нȟ\\x02ዦዧ\\x05їȬ\\x02ዧ')\n buf.write('የ\\x05йȝ\\x02የዩ\\x05нȟ\\x02ዩ')\n buf.write('ዪ\\x05яȨ\\x02ዪያ\\x05ћȮ\\x02ያ')\n buf.write('ዬ\\x05хȣ\\x02ዬይ\\x05ыȦ\\x02ይ')\n buf.write('ዮ\\x05нȟ\\x02ዮዯ\\x07a\\x02\\x02ዯደ')\n buf.write('\\x05лȞ\\x02ደዱ\\x05хȣ\\x02ዱዲ')\n buf.write('\\x05љȭ\\x02ዲዳ\\x05йȝ\\x02ዳΠ')\n buf.write('\\x03\\x02\\x02\\x02ዴድ\\x05їȬ\\x02ድዶ\\x05е')\n buf.write('ț\\x02ዶዷ\\x05яȨ\\x02ዷዸ\\x05щ')\n buf.write('ȥ\\x02ዸ\\u03a2\\x03\\x02\\x02\\x02ዹዺ\\x05еț')\n buf.write('\\x02ዺዻ\\x05џȰ\\x02ዻዼ\\x05сȡ')\n buf.write('\\x02ዼΤ\\x03\\x02\\x02\\x02ዽዾ\\x05йȝ\\x02ዾ')\n buf.write('ዿ\\x05ёȩ\\x02ዿጀ\\x05їȬ\\x02ጀ')\n buf.write('ጁ\\x05їȬ\\x02ጁΦ\\x03\\x02\\x02\\x02ጂጃ')\n buf.write('\\x05ыȦ\\x02ጃጄ\\x05еț\\x02ጄጅ')\n buf.write('\\x05сȡ\\x02ጅΨ\\x03\\x02\\x02\\x02ጆጇ\\x05ы')\n buf.write('Ȧ\\x02ጇገ\\x05нȟ\\x02ገጉ\\x05е')\n buf.write('ț\\x02ጉጊ\\x05лȞ\\x02ጊΪ\\x03\\x02\\x02')\n buf.write('\\x02ጋጌ\\x05эȧ\\x02ጌግ\\x05еț')\n buf.write('\\x02ግጎ\\x05ѣȲ\\x02ጎά\\x03\\x02\\x02\\x02ጏ')\n buf.write('ጐ\\x05эȧ\\x02ጐ\\u1311\\x05нȟ\\x02\\u1311')\n buf.write('ጒ\\x05лȞ\\x02ጒጓ\\x05хȣ\\x02ጓ')\n buf.write('ጔ\\x05еț\\x02ጔጕ\\x05яȨ\\x02ጕ')\n buf.write('ή\\x03\\x02\\x02\\x02\\u1316\\u1317\\x05эȧ\\x02\\u1317ጘ')\n buf.write('\\x05хȣ\\x02ጘጙ\\x05яȨ\\x02ጙΰ')\n buf.write('\\x03\\x02\\x02\\x02ጚጛ\\x05яȨ\\x02ጛጜ\\x05ћ')\n buf.write('Ȯ\\x02ጜጝ\\x05хȣ\\x02ጝጞ\\x05ы')\n buf.write('Ȧ\\x02ጞጟ\\x05нȟ\\x02ጟβ\\x03\\x02\\x02')\n buf.write('\\x02ጠጡ\\x05їȬ\\x02ጡጢ\\x05еț')\n buf.write('\\x02ጢጣ\\x05ћȮ\\x02ጣጤ\\x05хȣ')\n buf.write('\\x02ጤጥ\\x05ёȩ\\x02ጥጦ\\x07a\\x02\\x02ጦ')\n buf.write('ጧ\\x05ћȮ\\x02ጧጨ\\x05ёȩ\\x02ጨ')\n buf.write('ጩ\\x07a\\x02\\x02ጩጪ\\x05їȬ\\x02ጪጫ')\n buf.write('\\x05нȟ\\x02ጫጬ\\x05ѓȪ\\x02ጬጭ')\n buf.write('\\x05ёȩ\\x02ጭጮ\\x05їȬ\\x02ጮጯ')\n buf.write('\\x05ћȮ\\x02ጯδ\\x03\\x02\\x02\\x02ጰጱ\\x05ї')\n buf.write('Ȭ\\x02ጱጲ\\x05ёȩ\\x02ጲጳ\\x05ѡ')\n buf.write('ȱ\\x02ጳጴ\\x07a\\x02\\x02ጴጵ\\x05яȨ')\n buf.write('\\x02ጵጶ\\x05ѝȯ\\x02ጶጷ\\x05эȧ')\n buf.write('\\x02ጷጸ\\x05зȜ\\x02ጸጹ\\x05нȟ')\n buf.write('\\x02ጹጺ\\x05їȬ\\x02ጺζ\\x03\\x02\\x02\\x02ጻ')\n buf.write('ጼ\\x05љȭ\\x02ጼጽ\\x05ѝȯ\\x02ጽ')\n buf.write('ጾ\\x05эȧ\\x02ጾθ\\x03\\x02\\x02\\x02ጿፀ')\n buf.write('\\x05џȰ\\x02ፀፁ\\x05еț\\x02ፁፂ')\n buf.write('\\x05їȬ\\x02ፂፃ\\x05хȣ\\x02ፃፄ')\n buf.write('\\x05еț\\x02ፄፅ\\x05яȨ\\x02ፅፆ')\n buf.write('\\x05йȝ\\x02ፆፇ\\x05нȟ\\x02ፇκ')\n buf.write('\\x03\\x02\\x02\\x02ፈፉ\\x05їȬ\\x02ፉፊ\\x05н')\n buf.write('ȟ\\x02ፊፋ\\x05сȡ\\x02ፋፌ\\x05ї')\n buf.write('Ȭ\\x02ፌፍ\\x07a\\x02\\x02ፍμ\\x03\\x02\\x02\\x02ፎ')\n buf.write('ፏ\\x05љȭ\\x02ፏፐ\\x05ћȮ\\x02ፐ')\n buf.write('ፑ\\x05лȞ\\x02ፑፒ\\x05лȞ\\x02ፒ')\n buf.write('ፓ\\x05нȟ\\x02ፓፔ\\x05џȰ\\x02ፔ')\n buf.write('ξ\\x03\\x02\\x02\\x02ፕፖ\\x05џȰ\\x02ፖፗ')\n buf.write('\\x05еț\\x02ፗፘ\\x05їȬ\\x02ፘፙ')\n buf.write('\\x07a\\x02\\x02ፙπ\\x03\\x02\\x02\\x02ፚ\\u135b\\x05йȝ')\n buf.write('\\x02\\u135b\\u135c\\x05ёȩ\\x02\\u135c፝\\x05џȰ')\n buf.write('\\x02፝፞\\x05еț\\x02፞፟\\x05їȬ')\n buf.write('\\x02፟፠\\x07a\\x02\\x02፠ς\\x03\\x02\\x02\\x02፡።')\n buf.write('\\x05яȨ\\x02።፩\\x07)\\x02\\x02፣፨\\n\\x02\\x02')\n buf.write('\\x02፤፥\\x07)\\x02\\x02፥፨\\x07)\\x02\\x02፦፨\\x05')\n buf.write('Эȗ\\x02፧፣\\x03\\x02\\x02\\x02፧፤\\x03\\x02\\x02\\x02')\n buf.write('፧፦\\x03\\x02\\x02\\x02፨፫\\x03\\x02\\x02\\x02፩፧\\x03')\n buf.write('\\x02\\x02\\x02፩፪\\x03\\x02\\x02\\x02፪፬\\x03\\x02\\x02\\x02፫፩')\n buf.write('\\x03\\x02\\x02\\x02፬፭\\x07)\\x02\\x02፭τ\\x03\\x02\\x02\\x02፮')\n buf.write('፷\\x05зȜ\\x02፯፳\\x07)\\x02\\x02፰፲')\n buf.write('\\x0423\\x02፱፰\\x03\\x02\\x02\\x02፲፵\\x03\\x02\\x02\\x02፳')\n buf.write('፱\\x03\\x02\\x02\\x02፳፴\\x03\\x02\\x02\\x02፴፶\\x03\\x02\\x02\\x02')\n buf.write('፵፳\\x03\\x02\\x02\\x02፶፸\\x07)\\x02\\x02፷፯\\x03')\n buf.write('\\x02\\x02\\x02፸፹\\x03\\x02\\x02\\x02፹፷\\x03\\x02\\x02\\x02፹፺')\n buf.write('\\x03\\x02\\x02\\x02፺φ\\x03\\x02\\x02\\x02፻ᎄ\\x05ѣȲ')\n buf.write('\\x02፼ᎀ\\x07)\\x02\\x02\\u137d\\u137f\\t\\x03\\x02\\x02\\u137e\\u137d')\n buf.write(\n '\\x03\\x02\\x02\\x02\\u137fᎂ\\x03\\x02\\x02\\x02ᎀ\\u137e\\x03\\x02\\x02\\x02ᎀ')\n buf.write('ᎁ\\x03\\x02\\x02\\x02ᎁᎃ\\x03\\x02\\x02\\x02ᎂᎀ\\x03\\x02\\x02\\x02')\n buf.write('ᎃᎅ\\x07)\\x02\\x02ᎄ፼\\x03\\x02\\x02\\x02ᎅᎆ\\x03')\n buf.write('\\x02\\x02\\x02ᎆᎄ\\x03\\x02\\x02\\x02ᎆᎇ\\x03\\x02\\x02\\x02ᎇψ')\n buf.write('\\x03\\x02\\x02\\x02ᎈᎉ\\x070\\x02\\x02ᎉᎊ\\x070\\x02\\x02ᎊ')\n buf.write('ϊ\\x03\\x02\\x02\\x02ᎋᎌ\\x070\\x02\\x02ᎌό\\x03\\x02\\x02')\n buf.write('\\x02ᎍᎎ\\x05УȒ\\x02ᎎώ\\x03\\x02\\x02\\x02ᎏ')\n buf.write('᎘\\x05Хȓ\\x02᎐᎒\\t\\x04\\x02\\x02᎑᎓')\n buf.write('\\t\\x05\\x02\\x02᎒᎑\\x03\\x02\\x02\\x02᎒᎓\\x03\\x02\\x02\\x02᎓')\n buf.write('᎖\\x03\\x02\\x02\\x02᎔᎗\\x05Хȓ\\x02᎕᎗')\n buf.write('\\x05УȒ\\x02᎖᎔\\x03\\x02\\x02\\x02᎖᎕\\x03\\x02\\x02')\n buf.write('\\x02᎗᎙\\x03\\x02\\x02\\x02᎘᎐\\x03\\x02\\x02\\x02᎘᎙')\n buf.write('\\x03\\x02\\x02\\x02᎙\\u139c\\x03\\x02\\x02\\x02\\u139a\\u139d\\x05лȞ')\n buf.write(\n '\\x02\\u139b\\u139d\\x05пȠ\\x02\\u139c\\u139a\\x03\\x02\\x02\\x02\\u139c')\n buf.write(\n '\\u139b\\x03\\x02\\x02\\x02\\u139c\\u139d\\x03\\x02\\x02\\x02\\u139dϐ\\x03\\x02\\x02\\x02'\n )\n buf.write('\\u139eᎥ\\x07)\\x02\\x02\\u139fᎤ\\n\\x02\\x02\\x02ᎠᎡ\\x07')\n buf.write(')\\x02\\x02ᎡᎤ\\x07)\\x02\\x02ᎢᎤ\\x05Эȗ\\x02Ꭳ')\n buf.write('\\u139f\\x03\\x02\\x02\\x02ᎣᎠ\\x03\\x02\\x02\\x02ᎣᎢ\\x03\\x02\\x02\\x02')\n buf.write('ᎤᎧ\\x03\\x02\\x02\\x02ᎥᎣ\\x03\\x02\\x02\\x02ᎥᎦ\\x03')\n buf.write('\\x02\\x02\\x02ᎦᎨ\\x03\\x02\\x02\\x02ᎧᎥ\\x03\\x02\\x02\\x02ᎨᎩ')\n buf.write('\\x07)\\x02\\x02Ꭹϒ\\x03\\x02\\x02\\x02ᎪᎯ\\x05ѕȫ')\n buf.write('\\x02ᎫᎰ\\x05ϗǬ\\x02ᎬᎰ\\x05ϙǭ')\n buf.write('\\x02ᎭᎰ\\x05ϛǮ\\x02ᎮᎰ\\x05ϝǯ')\n buf.write('\\x02ᎯᎫ\\x03\\x02\\x02\\x02ᎯᎬ\\x03\\x02\\x02\\x02ᎯᎭ')\n buf.write('\\x03\\x02\\x02\\x02ᎯᎮ\\x03\\x02\\x02\\x02ᎰᎱ\\x03\\x02\\x02\\x02Ꮁ')\n buf.write('Ꮂ\\x08Ǫ\\x02\\x02Ꮂϔ\\x03\\x02\\x02\\x02ᎳᎴ\\x07)')\n buf.write('\\x02\\x02Ꮄϖ\\x03\\x02\\x02\\x02ᎵᎶ\\x05ϕǫ\\x02Ꮆ')\n buf.write('Ꮊ\\x07>\\x02\\x02ᎷᎹ\\x0b\\x02\\x02\\x02ᎸᎷ\\x03\\x02\\x02\\x02')\n buf.write('ᎹᎼ\\x03\\x02\\x02\\x02ᎺᎻ\\x03\\x02\\x02\\x02ᎺᎸ\\x03')\n buf.write('\\x02\\x02\\x02ᎻᎽ\\x03\\x02\\x02\\x02ᎼᎺ\\x03\\x02\\x02\\x02ᎽᎾ')\n buf.write('\\x07@\\x02\\x02ᎾᎿ\\x05ϕǫ\\x02ᎿϘ\\x03\\x02\\x02')\n buf.write('\\x02ᏀᏁ\\x05ϕǫ\\x02ᏁᏅ\\x07}\\x02\\x02Ꮒ')\n buf.write('Ꮔ\\x0b\\x02\\x02\\x02ᏃᏂ\\x03\\x02\\x02\\x02ᏄᏇ\\x03\\x02\\x02')\n buf.write('\\x02ᏅᏆ\\x03\\x02\\x02\\x02ᏅᏃ\\x03\\x02\\x02\\x02ᏆᏈ')\n buf.write('\\x03\\x02\\x02\\x02ᏇᏅ\\x03\\x02\\x02\\x02ᏈᏉ\\x07\\x7f\\x02\\x02Ꮙ')\n buf.write('Ꮚ\\x05ϕǫ\\x02ᏊϚ\\x03\\x02\\x02\\x02ᏋᏌ')\n buf.write('\\x05ϕǫ\\x02ᏌᏐ\\x07]\\x02\\x02ᏍᏏ\\x0b\\x02\\x02')\n buf.write('\\x02ᏎᏍ\\x03\\x02\\x02\\x02ᏏᏒ\\x03\\x02\\x02\\x02ᏐᏑ')\n buf.write('\\x03\\x02\\x02\\x02ᏐᏎ\\x03\\x02\\x02\\x02ᏑᏓ\\x03\\x02\\x02\\x02Ꮢ')\n buf.write('Ꮠ\\x03\\x02\\x02\\x02ᏓᏔ\\x07_\\x02\\x02ᏔᏕ\\x05ϕ')\n buf.write('ǫ\\x02ᏕϜ\\x03\\x02\\x02\\x02ᏖᏗ\\x05ϕǫ')\n buf.write('\\x02ᏗᏛ\\x07*\\x02\\x02ᏘᏚ\\x0b\\x02\\x02\\x02ᏙᏘ')\n buf.write('\\x03\\x02\\x02\\x02ᏚᏝ\\x03\\x02\\x02\\x02ᏛᏜ\\x03\\x02\\x02\\x02Ꮫ')\n buf.write('Ꮩ\\x03\\x02\\x02\\x02ᏜᏞ\\x03\\x02\\x02\\x02ᏝᏛ\\x03\\x02\\x02\\x02')\n buf.write('ᏞᏟ\\x07+\\x02\\x02ᏟᏠ\\x05ϕǫ\\x02Ꮰ')\n buf.write('Ϟ\\x03\\x02\\x02\\x02ᏡᏢ\\n\\x06\\x02\\x02ᏢϠ\\x03\\x02\\x02\\x02')\n buf.write('ᏣᏧ\\x07$\\x02\\x02ᏤᏨ\\n\\x07\\x02\\x02ᏥᏦ\\x07')\n buf.write('$\\x02\\x02ᏦᏨ\\x07$\\x02\\x02ᏧᏤ\\x03\\x02\\x02\\x02ᏧᏥ')\n buf.write('\\x03\\x02\\x02\\x02ᏨᏩ\\x03\\x02\\x02\\x02ᏩᏧ\\x03\\x02\\x02\\x02Ꮹ')\n buf.write('Ꮺ\\x03\\x02\\x02\\x02ᏪᏫ\\x03\\x02\\x02\\x02ᏫᏬ\\x07$\\x02\\x02')\n buf.write(\"ᏬϢ\\x03\\x02\\x02\\x02ᏭᏮ\\x07'\\x02\\x02ᏮϤ\\x03\")\n buf.write('\\x02\\x02\\x02ᏯᏰ\\x07(\\x02\\x02ᏰϦ\\x03\\x02\\x02\\x02ᏱᏲ')\n buf.write('\\x07*\\x02\\x02ᏲϨ\\x03\\x02\\x02\\x02ᏳᏴ\\x07+\\x02\\x02ᏴϪ')\n buf.write(\n '\\x03\\x02\\x02\\x02Ᏽ\\u13f6\\x07,\\x02\\x02\\u13f6\\u13f7\\x07,\\x02\\x02\\u13f7Ϭ'\n )\n buf.write('\\x03\\x02\\x02\\x02ᏸᏹ\\x07,\\x02\\x02ᏹϮ\\x03\\x02\\x02\\x02ᏺ')\n buf.write('ᏻ\\x07-\\x02\\x02ᏻϰ\\x03\\x02\\x02\\x02ᏼᏽ\\x07/\\x02\\x02ᏽ')\n buf.write(\n 'ϲ\\x03\\x02\\x02\\x02\\u13fe\\u13ff\\x07.\\x02\\x02\\u13ffϴ\\x03\\x02\\x02\\x02'\n )\n buf.write('᐀ᐁ\\x071\\x02\\x02ᐁ϶\\x03\\x02\\x02\\x02ᐂᐃ')\n buf.write('\\x07B\\x02\\x02ᐃϸ\\x03\\x02\\x02\\x02ᐄᐅ\\x07<\\x02\\x02ᐅᐆ')\n buf.write('\\x07?\\x02\\x02ᐆϺ\\x03\\x02\\x02\\x02ᐇᐈ\\x07<\\x02\\x02ᐈᐍ')\n buf.write('\\x05Сȑ\\x02ᐉᐌ\\x05Сȑ\\x02ᐊᐌ')\n buf.write('\\t\\x08\\x02\\x02ᐋᐉ\\x03\\x02\\x02\\x02ᐋᐊ\\x03\\x02\\x02\\x02ᐌ')\n buf.write('ᐏ\\x03\\x02\\x02\\x02ᐍᐋ\\x03\\x02\\x02\\x02ᐍᐎ\\x03\\x02\\x02\\x02')\n buf.write('ᐎᐖ\\x03\\x02\\x02\\x02ᐏᐍ\\x03\\x02\\x02\\x02ᐐᐑ\\x07')\n buf.write('<\\x02\\x02ᐑᐖ\\x05ϡDZ\\x02ᐒᐓ\\x07<\\x02\\x02ᐓ')\n buf.write('ᐖ\\x05ύǧ\\x02ᐔᐖ\\x05Бȉ\\x02ᐕ')\n buf.write('ᐇ\\x03\\x02\\x02\\x02ᐕᐐ\\x03\\x02\\x02\\x02ᐕᐒ\\x03\\x02\\x02\\x02')\n buf.write('ᐕᐔ\\x03\\x02\\x02\\x02ᐖϼ\\x03\\x02\\x02\\x02ᐗᐘ\\x07')\n buf.write('<\\x02\\x02ᐘϾ\\x03\\x02\\x02\\x02ᐙᐚ\\x07=\\x02\\x02ᐚЀ')\n buf.write('\\x03\\x02\\x02\\x02ᐛᐜ\\x07>\\x02\\x02ᐜᐝ\\x07?\\x02\\x02ᐝЂ')\n buf.write('\\x03\\x02\\x02\\x02ᐞᐟ\\x07>\\x02\\x02ᐟЄ\\x03\\x02\\x02\\x02ᐠ')\n buf.write('ᐡ\\x07@\\x02\\x02ᐡᐢ\\x07?\\x02\\x02ᐢІ\\x03\\x02\\x02\\x02ᐣ')\n buf.write('ᐤ\\x07#\\x02\\x02ᐤᐬ\\x07?\\x02\\x02ᐥᐦ\\x07>\\x02\\x02ᐦ')\n buf.write('ᐬ\\x07@\\x02\\x02ᐧᐨ\\x07`\\x02\\x02ᐨᐬ\\x07?\\x02\\x02ᐩ')\n buf.write('ᐪ\\x07\\x80\\x02\\x02ᐪᐬ\\x07?\\x02\\x02ᐫᐣ\\x03\\x02')\n buf.write('\\x02\\x02ᐫᐥ\\x03\\x02\\x02\\x02ᐫᐧ\\x03\\x02\\x02\\x02ᐫᐩ')\n buf.write('\\x03\\x02\\x02\\x02ᐬЈ\\x03\\x02\\x02\\x02ᐭᐮ\\x07`\\x02\\x02ᐮ')\n buf.write('Њ\\x03\\x02\\x02\\x02ᐯᐰ\\x07\\x80\\x02\\x02ᐰЌ\\x03\\x02')\n buf.write('\\x02\\x02ᐱᐲ\\x07#\\x02\\x02ᐲЎ\\x03\\x02\\x02\\x02ᐳᐴ')\n buf.write('\\x07@\\x02\\x02ᐴА\\x03\\x02\\x02\\x02ᐵᐶ\\x07A\\x02\\x02ᐶВ')\n buf.write('\\x03\\x02\\x02\\x02ᐷᐸ\\x07~\\x02\\x02ᐸᐹ\\x07~\\x02\\x02ᐹД')\n buf.write('\\x03\\x02\\x02\\x02ᐺᐻ\\x07~\\x02\\x02ᐻЖ\\x03\\x02\\x02\\x02ᐼ')\n buf.write('ᐽ\\x07?\\x02\\x02ᐽИ\\x03\\x02\\x02\\x02ᐾᐿ\\x07]\\x02\\x02ᐿ')\n buf.write('К\\x03\\x02\\x02\\x02ᑀᑁ\\x07_\\x02\\x02ᑁМ\\x03\\x02\\x02\\x02')\n buf.write('ᑂᑃ\\x07a\\x02\\x02ᑃО\\x03\\x02\\x02\\x02ᑄᑆ\\t')\n buf.write('\\t\\x02\\x02ᑅᑄ\\x03\\x02\\x02\\x02ᑆᑇ\\x03\\x02\\x02\\x02ᑇᑅ')\n buf.write('\\x03\\x02\\x02\\x02ᑇᑈ\\x03\\x02\\x02\\x02ᑈᑉ\\x03\\x02\\x02\\x02ᑉ')\n buf.write('ᑊ\\x08Ȑ\\x03\\x02ᑊР\\x03\\x02\\x02\\x02ᑋᑌ\\t\\n')\n buf.write('\\x02\\x02ᑌТ\\x03\\x02\\x02\\x02ᑍᑏ\\x042;\\x02ᑎᑍ')\n buf.write('\\x03\\x02\\x02\\x02ᑏᑐ\\x03\\x02\\x02\\x02ᑐᑎ\\x03\\x02\\x02\\x02ᑐ')\n buf.write('ᑑ\\x03\\x02\\x02\\x02ᑑФ\\x03\\x02\\x02\\x02ᑒᑔ\\x05ύ')\n buf.write('ǧ\\x02ᑓᑒ\\x03\\x02\\x02\\x02ᑔᑗ\\x03\\x02\\x02\\x02ᑕ')\n buf.write('ᑓ\\x03\\x02\\x02\\x02ᑕᑖ\\x03\\x02\\x02\\x02ᑖᑙ\\x03\\x02\\x02\\x02')\n buf.write('ᑗᑕ\\x03\\x02\\x02\\x02ᑘᑚ\\x070\\x02\\x02ᑙᑘ')\n buf.write('\\x03\\x02\\x02\\x02ᑙᑚ\\x03\\x02\\x02\\x02ᑚᑜ\\x03\\x02\\x02\\x02ᑛ')\n buf.write('ᑝ\\x05ύǧ\\x02ᑜᑛ\\x03\\x02\\x02\\x02ᑝᑞ')\n buf.write('\\x03\\x02\\x02\\x02ᑞᑜ\\x03\\x02\\x02\\x02ᑞᑟ\\x03\\x02\\x02\\x02ᑟ')\n buf.write('Ц\\x03\\x02\\x02\\x02ᑠᑡ\\x07/\\x02\\x02ᑡᑢ\\x07/\\x02\\x02ᑢ')\n buf.write('ᑦ\\x03\\x02\\x02\\x02ᑣᑥ\\n\\x0b\\x02\\x02ᑤᑣ\\x03\\x02\\x02')\n buf.write('\\x02ᑥᑨ\\x03\\x02\\x02\\x02ᑦᑤ\\x03\\x02\\x02\\x02ᑦᑧ')\n buf.write('\\x03\\x02\\x02\\x02ᑧᑫ\\x03\\x02\\x02\\x02ᑨᑦ\\x03\\x02\\x02\\x02ᑩ')\n buf.write('ᑬ\\x05Эȗ\\x02ᑪᑬ\\x07\\x02\\x02\\x03ᑫᑩ')\n buf.write('\\x03\\x02\\x02\\x02ᑫᑪ\\x03\\x02\\x02\\x02ᑬᑭ\\x03\\x02\\x02\\x02ᑭ')\n buf.write('ᑮ\\x08Ȕ\\x04\\x02ᑮШ\\x03\\x02\\x02\\x02ᑯᑰ\\x071')\n buf.write('\\x02\\x02ᑰᑱ\\x07,\\x02\\x02ᑱᑵ\\x03\\x02\\x02\\x02ᑲᑴ')\n buf.write('\\x0b\\x02\\x02\\x02ᑳᑲ\\x03\\x02\\x02\\x02ᑴᑷ\\x03\\x02\\x02\\x02ᑵ')\n buf.write('ᑶ\\x03\\x02\\x02\\x02ᑵᑳ\\x03\\x02\\x02\\x02ᑶᑸ\\x03\\x02\\x02\\x02')\n buf.write('ᑷᑵ\\x03\\x02\\x02\\x02ᑸᑹ\\x07,\\x02\\x02ᑹᑺ\\x07')\n buf.write('1\\x02\\x02ᑺᑻ\\x03\\x02\\x02\\x02ᑻᑼ\\x08ȕ\\x04\\x02ᑼ')\n buf.write('Ъ\\x03\\x02\\x02\\x02ᑽᑾ\\x07r\\x02\\x02ᑾᑿ\\x07t\\x02\\x02ᑿ')\n buf.write('ᒀ\\x07q\\x02\\x02ᒀᒁ\\x07o\\x02\\x02ᒁᒂ\\x07r\\x02\\x02ᒂ')\n buf.write('ᒃ\\x07v\\x02\\x02ᒃᒄ\\x03\\x02\\x02\\x02ᒄᒈ\\x05Я')\n buf.write('Ș\\x02ᒅᒇ\\n\\x0b\\x02\\x02ᒆᒅ\\x03\\x02\\x02\\x02ᒇ')\n buf.write('ᒊ\\x03\\x02\\x02\\x02ᒈᒆ\\x03\\x02\\x02\\x02ᒈᒉ\\x03\\x02\\x02\\x02')\n buf.write('ᒉᒍ\\x03\\x02\\x02\\x02ᒊᒈ\\x03\\x02\\x02\\x02ᒋᒎ\\x05')\n buf.write('Эȗ\\x02ᒌᒎ\\x07\\x02\\x02\\x03ᒍᒋ\\x03\\x02\\x02\\x02')\n buf.write('ᒍᒌ\\x03\\x02\\x02\\x02ᒎЬ\\x03\\x02\\x02\\x02ᒏᒑ\\x07')\n buf.write('\\x0f\\x02\\x02ᒐᒏ\\x03\\x02\\x02\\x02ᒐᒑ\\x03\\x02\\x02\\x02ᒑ')\n buf.write('ᒒ\\x03\\x02\\x02\\x02ᒒᒓ\\x07\\x0c\\x02\\x02ᒓЮ\\x03\\x02\\x02\\x02')\n buf.write('ᒔᒕ\\t\\x0c\\x02\\x02ᒕа\\x03\\x02\\x02\\x02ᒖᒛ\\x05')\n buf.write('Сȑ\\x02ᒗᒚ\\x05Сȑ\\x02ᒘᒚ')\n buf.write('\\t\\r\\x02\\x02ᒙᒗ\\x03\\x02\\x02\\x02ᒙᒘ\\x03\\x02\\x02\\x02ᒚ')\n buf.write('ᒝ\\x03\\x02\\x02\\x02ᒛᒙ\\x03\\x02\\x02\\x02ᒛᒜ\\x03\\x02\\x02\\x02')\n buf.write('ᒜв\\x03\\x02\\x02\\x02ᒝᒛ\\x03\\x02\\x02\\x02ᒞᒟ\\x07')\n buf.write('B\\x02\\x02ᒟᒠ\\x07#\\x02\\x02ᒠᒡ\\x03\\x02\\x02\\x02ᒡᒢ')\n buf.write('\\x08Ț\\x04\\x02ᒢд\\x03\\x02\\x02\\x02ᒣᒤ\\t\\x0e\\x02\\x02')\n buf.write('ᒤж\\x03\\x02\\x02\\x02ᒥᒦ\\t\\x0f\\x02\\x02ᒦи')\n buf.write('\\x03\\x02\\x02\\x02ᒧᒨ\\t\\x10\\x02\\x02ᒨк\\x03\\x02\\x02\\x02ᒩ')\n buf.write('ᒪ\\t\\x11\\x02\\x02ᒪм\\x03\\x02\\x02\\x02ᒫᒬ\\t\\x04\\x02')\n buf.write('\\x02ᒬо\\x03\\x02\\x02\\x02ᒭᒮ\\t\\x12\\x02\\x02ᒮр')\n buf.write('\\x03\\x02\\x02\\x02ᒯᒰ\\t\\x13\\x02\\x02ᒰт\\x03\\x02\\x02\\x02ᒱ')\n buf.write('ᒲ\\t\\x14\\x02\\x02ᒲф\\x03\\x02\\x02\\x02ᒳᒴ\\t\\x15\\x02')\n buf.write('\\x02ᒴц\\x03\\x02\\x02\\x02ᒵᒶ\\t\\x16\\x02\\x02ᒶш')\n buf.write('\\x03\\x02\\x02\\x02ᒷᒸ\\t\\x17\\x02\\x02ᒸъ\\x03\\x02\\x02\\x02ᒹ')\n buf.write('ᒺ\\t\\x18\\x02\\x02ᒺь\\x03\\x02\\x02\\x02ᒻᒼ\\t\\x19\\x02')\n buf.write('\\x02ᒼю\\x03\\x02\\x02\\x02ᒽᒾ\\t\\x1a\\x02\\x02ᒾѐ')\n buf.write('\\x03\\x02\\x02\\x02ᒿᓀ\\t\\x1b\\x02\\x02ᓀђ\\x03\\x02\\x02\\x02ᓁ')\n buf.write('ᓂ\\t\\x1c\\x02\\x02ᓂє\\x03\\x02\\x02\\x02ᓃᓄ\\t\\x1d\\x02')\n buf.write('\\x02ᓄі\\x03\\x02\\x02\\x02ᓅᓆ\\t\\x1e\\x02\\x02ᓆј')\n buf.write('\\x03\\x02\\x02\\x02ᓇᓈ\\t\\x1f\\x02\\x02ᓈњ\\x03\\x02\\x02\\x02ᓉ')\n buf.write('ᓊ\\t \\x02\\x02ᓊќ\\x03\\x02\\x02\\x02ᓋᓌ\\t!\\x02\\x02ᓌ')\n buf.write('ў\\x03\\x02\\x02\\x02ᓍᓎ\\t\"\\x02\\x02ᓎѠ\\x03\\x02\\x02\\x02')\n buf.write('ᓏᓐ\\t#\\x02\\x02ᓐѢ\\x03\\x02\\x02\\x02ᓑᓒ\\t')\n buf.write('$\\x02\\x02ᓒѤ\\x03\\x02\\x02\\x02ᓓᓔ\\t%\\x02\\x02ᓔѦ')\n buf.write(\"\\x03\\x02\\x02\\x02ᓕᓖ\\t&\\x02\\x02ᓖѨ\\x03\\x02\\x02\\x02'\\x02፧\")\n buf.write('፩፳፹ᎀᎆ᎒᎖᎘\\u139c')\n buf.write('ᎣᎥᎯᎺᏅᏐᏛᏧᏩ')\n buf.write('ᐋᐍᐕᐫᑇᑐᑕᑙᑞ')\n buf.write('ᑦᑫᑵᒈᒍᒐᒙᒛ\\x05\\tǪ')\n buf.write('\\x02\\x08\\x02\\x02\\x02\\x03\\x02')\n return buf.getvalue()\n\n\nclass PlSqlLexer(Lexer):\n atn = ATNDeserializer().deserialize(serializedATN())\n decisionsToDFA = [DFA(ds, i) for i, ds in enumerate(atn.decisionToState)]\n T__0 = 1\n A_LETTER = 2\n ADD = 3\n AFTER = 4\n AGENT = 5\n AGGREGATE = 6\n ALL = 7\n ALTER = 8\n ANALYZE = 9\n AND = 10\n ANY = 11\n ARRAY = 12\n AS = 13\n ASSUME = 14\n ASSERT = 15\n ASC = 16\n ASSOCIATE = 17\n AT = 18\n ATTRIBUTE = 19\n AUDIT = 20\n AUTHID = 21\n AUTO = 22\n AUTOMATIC = 23\n AUTONOMOUS_TRANSACTION = 24\n BATCH = 25\n BEFORE = 26\n BEGIN = 27\n BETWEEN = 28\n BFILE = 29\n BINARY_DOUBLE = 30\n BINARY_FLOAT = 31\n BINARY_INTEGER = 32\n BLOB = 33\n BLOCK = 34\n BODY = 35\n BOOLEAN = 36\n BOTH = 37\n BREADTH = 38\n BULK = 39\n BY = 40\n BYTE = 41\n C_LETTER = 42\n CACHE = 43\n CALL = 44\n CANONICAL = 45\n CASCADE = 46\n CASE = 47\n CAST = 48\n CHAR = 49\n CHAR_CS = 50\n CHARACTER = 51\n CHECK = 52\n CHR = 53\n CLOB = 54\n CLOSE = 55\n CLUSTER = 56\n COLLECT = 57\n COLUMNS = 58\n COMMENT = 59\n COMMIT = 60\n COMMITTED = 61\n COMPATIBILITY = 62\n COMPILE = 63\n COMPOUND = 64\n CONNECT = 65\n CONNECT_BY_ROOT = 66\n CONSTANT = 67\n CONSTRAINT = 68\n CONSTRAINTS = 69\n CONSTRUCTOR = 70\n CONTENT = 71\n CONTEXT = 72\n CONTINUE = 73\n CONVERT = 74\n CORRUPT_XID = 75\n CORRUPT_XID_ALL = 76\n COST = 77\n COUNT = 78\n CREATE = 79\n CROSS = 80\n CUBE = 81\n CURRENT = 82\n CURRENT_USER = 83\n CURSOR = 84\n CUSTOMDATUM = 85\n CYCLE = 86\n DATA = 87\n DATABASE = 88\n DATE = 89\n DAY = 90\n DB_ROLE_CHANGE = 91\n DBTIMEZONE = 92\n DDL = 93\n DEBUG = 94\n DEC = 95\n DECIMAL = 96\n DECLARE = 97\n DECOMPOSE = 98\n DECREMENT = 99\n DEFAULT = 100\n DEFAULTS = 101\n DEFERRED = 102\n DEFINER = 103\n DELETE = 104\n DEPTH = 105\n DESC = 106\n DETERMINISTIC = 107\n DIMENSION = 108\n DISABLE = 109\n DISASSOCIATE = 110\n DISTINCT = 111\n DOCUMENT = 112\n DOUBLE = 113\n DROP = 114\n DSINTERVAL_UNCONSTRAINED = 115\n EACH = 116\n ELEMENT = 117\n ELSE = 118\n ELSIF = 119\n EMPTY = 120\n ENABLE = 121\n ENCODING = 122\n END = 123\n ENTITYESCAPING = 124\n ERR = 125\n ERRORS = 126\n ESCAPE = 127\n EVALNAME = 128\n EXCEPT = 129\n EXCEPTION = 130\n EXCEPTION_INIT = 131\n EXCEPTIONS = 132\n EXCLUDE = 133\n EXCLUSIVE = 134\n EXECUTE = 135\n EXISTS = 136\n EXIT = 137\n EXPLAIN = 138\n EXTERNAL = 139\n EXTRACT = 140\n FAILURE = 141\n FALSE = 142\n FETCH = 143\n FINAL = 144\n FIRST = 145\n FIRST_VALUE = 146\n FLOAT = 147\n FOLLOWING = 148\n FOLLOWS = 149\n FOR = 150\n FORALL = 151\n FORCE = 152\n FROM = 153\n FULL = 154\n FUNCTION = 155\n GOTO = 156\n GRANT = 157\n GROUP = 158\n GROUPING = 159\n HASH = 160\n HAVING = 161\n HIDE = 162\n HOUR = 163\n IF = 164\n IGNORE = 165\n IMMEDIATE = 166\n IN = 167\n INCLUDE = 168\n INCLUDING = 169\n INCREMENT = 170\n INDENT = 171\n INDEX = 172\n INDEXED = 173\n INDICATOR = 174\n INDICES = 175\n INFINITE = 176\n INLINE = 177\n INNER = 178\n INOUT = 179\n INSERT = 180\n INSTANTIABLE = 181\n INSTEAD = 182\n INT = 183\n INTEGER = 184\n INTERSECT = 185\n INTERVAL = 186\n INTO = 187\n INVALIDATE = 188\n IS = 189\n ISOLATION = 190\n ITERATE = 191\n JAVA = 192\n JOIN = 193\n KEEP = 194\n LANGUAGE = 195\n LAST = 196\n LAST_VALUE = 197\n LEADING = 198\n LEFT = 199\n LEVEL = 200\n LIBRARY = 201\n LIKE = 202\n LIKE2 = 203\n LIKE4 = 204\n LIKEC = 205\n LIMIT = 206\n LOCAL = 207\n LOCK = 208\n LOCKED = 209\n LOG = 210\n LOGOFF = 211\n LOGON = 212\n LONG = 213\n LOOP = 214\n MAIN = 215\n MAP = 216\n MATCHED = 217\n MAXVALUE = 218\n MEASURES = 219\n MEMBER = 220\n MERGE = 221\n MINUS = 222\n MINUTE = 223\n MINVALUE = 224\n MLSLABEL = 225\n MODE = 226\n MODEL = 227\n MODIFY = 228\n MONTH = 229\n MULTISET = 230\n NAME = 231\n NAN = 232\n NATURAL = 233\n NATURALN = 234\n NAV = 235\n NCHAR = 236\n NCHAR_CS = 237\n NCLOB = 238\n NESTED = 239\n NEW = 240\n NO = 241\n NOAUDIT = 242\n NOCACHE = 243\n NOCOPY = 244\n NOCYCLE = 245\n NOENTITYESCAPING = 246\n NOMAXVALUE = 247\n NOMINVALUE = 248\n NONE = 249\n NOORDER = 250\n NOSCHEMACHECK = 251\n NOT = 252\n NOWAIT = 253\n NULL = 254\n NULLS = 255\n NUMBER = 256\n NUMERIC = 257\n NVARCHAR2 = 258\n OBJECT = 259\n OF = 260\n OFF = 261\n OID = 262\n OLD = 263\n ON = 264\n ONLY = 265\n OPEN = 266\n OPTION = 267\n OR = 268\n ORADATA = 269\n ORDER = 270\n ORDINALITY = 271\n OSERROR = 272\n OUT = 273\n OUTER = 274\n OVER = 275\n OVERRIDING = 276\n PACKAGE = 277\n PARALLEL_ENABLE = 278\n PARAMETERS = 279\n PARENT = 280\n PARTITION = 281\n PASSING = 282\n PATH = 283\n PERCENT_ROWTYPE = 284\n PERCENT_TYPE = 285\n PIPELINED = 286\n PIVOT = 287\n PLAN = 288\n PLS_INTEGER = 289\n POSITIVE = 290\n POSITIVEN = 291\n PRAGMA = 292\n PRECEDING = 293\n PRECISION = 294\n PRESENT = 295\n PRIOR = 296\n PROCEDURE = 297\n RAISE = 298\n RANGE = 299\n RAW = 300\n READ = 301\n REAL = 302\n RECORD = 303\n REF = 304\n REFERENCE = 305\n REFERENCING = 306\n REJECT = 307\n RELIES_ON = 308\n RENAME = 309\n REPLACE = 310\n RESPECT = 311\n RESTRICT_REFERENCES = 312\n RESULT = 313\n RESULT_CACHE = 314\n RETURN = 315\n RETURNING = 316\n REUSE = 317\n REVERSE = 318\n REVOKE = 319\n RIGHT = 320\n ROLLBACK = 321\n ROLLUP = 322\n ROW = 323\n ROWID = 324\n ROWS = 325\n RULES = 326\n SAMPLE = 327\n SAVE = 328\n SAVEPOINT = 329\n SCHEMA = 330\n SCHEMACHECK = 331\n SCN = 332\n SEARCH = 333\n SECOND = 334\n SEED = 335\n SEGMENT = 336\n SELECT = 337\n SELF = 338\n SEQUENCE = 339\n SEQUENTIAL = 340\n SERIALIZABLE = 341\n SERIALLY_REUSABLE = 342\n SERVERERROR = 343\n SESSIONTIMEZONE = 344\n SET = 345\n SETS = 346\n SETTINGS = 347\n SHARE = 348\n SHOW = 349\n SHUTDOWN = 350\n SIBLINGS = 351\n SIGNTYPE = 352\n SIMPLE_INTEGER = 353\n SINGLE = 354\n SIZE = 355\n SKIP_ = 356\n SMALLINT = 357\n SNAPSHOT = 358\n SOME = 359\n SPECIFICATION = 360\n SQLDATA = 361\n SQLERROR = 362\n STANDALONE = 363\n START = 364\n STARTUP = 365\n STATEMENT = 366\n STATEMENT_ID = 367\n STATIC = 368\n STATISTICS = 369\n STRING = 370\n SUBMULTISET = 371\n SUBPARTITION = 372\n SUBSTITUTABLE = 373\n SUBTYPE = 374\n SUCCESS = 375\n SUSPEND = 376\n TABLE = 377\n THE = 378\n THEN = 379\n TIME = 380\n TIMESTAMP = 381\n TIMESTAMP_LTZ_UNCONSTRAINED = 382\n TIMESTAMP_TZ_UNCONSTRAINED = 383\n TIMESTAMP_UNCONSTRAINED = 384\n TIMEZONE_ABBR = 385\n TIMEZONE_HOUR = 386\n TIMEZONE_MINUTE = 387\n TIMEZONE_REGION = 388\n TO = 389\n TRAILING = 390\n TRANSACTION = 391\n TRANSLATE = 392\n TREAT = 393\n TRIGGER = 394\n TRIM = 395\n TRUE = 396\n TRUNCATE = 397\n TYPE = 398\n UNBOUNDED = 399\n UNDER = 400\n UNION = 401\n UNIQUE = 402\n UNLIMITED = 403\n UNPIVOT = 404\n UNTIL = 405\n UPDATE = 406\n UPDATED = 407\n UPSERT = 408\n UROWID = 409\n USE = 410\n USING = 411\n VALIDATE = 412\n VALUE = 413\n VALUES = 414\n VARCHAR = 415\n VARCHAR2 = 416\n VARIABLE = 417\n VARRAY = 418\n VARYING = 419\n VERSION = 420\n VERSIONS = 421\n WAIT = 422\n WARNING = 423\n WELLFORMED = 424\n WHEN = 425\n WHENEVER = 426\n WHERE = 427\n WHILE = 428\n WITH = 429\n WITHIN = 430\n WORK = 431\n WRITE = 432\n XML = 433\n XMLAGG = 434\n XMLATTRIBUTES = 435\n XMLCAST = 436\n XMLCOLATTVAL = 437\n XMLELEMENT = 438\n XMLEXISTS = 439\n XMLFOREST = 440\n XMLNAMESPACES = 441\n XMLPARSE = 442\n XMLPI = 443\n XMLQUERY = 444\n XMLROOT = 445\n XMLSERIALIZE = 446\n XMLTABLE = 447\n YEAR = 448\n YES = 449\n YMINTERVAL_UNCONSTRAINED = 450\n ZONE = 451\n PREDICTION = 452\n PREDICTION_BOUNDS = 453\n PREDICTION_COST = 454\n PREDICTION_DETAILS = 455\n PREDICTION_PROBABILITY = 456\n PREDICTION_SET = 457\n CUME_DIST = 458\n DENSE_RANK = 459\n LISTAGG = 460\n PERCENT_RANK = 461\n PERCENTILE_CONT = 462\n PERCENTILE_DISC = 463\n RANK = 464\n AVG = 465\n CORR = 466\n LAG = 467\n LEAD = 468\n MAX = 469\n MEDIAN = 470\n MIN = 471\n NTILE = 472\n RATIO_TO_REPORT = 473\n ROW_NUMBER = 474\n SUM = 475\n VARIANCE = 476\n REGR_ = 477\n STDDEV = 478\n VAR_ = 479\n COVAR_ = 480\n NATIONAL_CHAR_STRING_LIT = 481\n BIT_STRING_LIT = 482\n HEX_STRING_LIT = 483\n DOUBLE_PERIOD = 484\n PERIOD = 485\n UNSIGNED_INTEGER = 486\n APPROXIMATE_NUM_LIT = 487\n CHAR_STRING = 488\n DELIMITED_ID = 489\n PERCENT = 490\n AMPERSAND = 491\n LEFT_PAREN = 492\n RIGHT_PAREN = 493\n DOUBLE_ASTERISK = 494\n ASTERISK = 495\n PLUS_SIGN = 496\n MINUS_SIGN = 497\n COMMA = 498\n SOLIDUS = 499\n AT_SIGN = 500\n ASSIGN_OP = 501\n BINDVAR = 502\n COLON = 503\n SEMICOLON = 504\n LESS_THAN_OR_EQUALS_OP = 505\n LESS_THAN_OP = 506\n GREATER_THAN_OR_EQUALS_OP = 507\n NOT_EQUAL_OP = 508\n CARRET_OPERATOR_PART = 509\n TILDE_OPERATOR_PART = 510\n EXCLAMATION_OPERATOR_PART = 511\n GREATER_THAN_OP = 512\n CONCATENATION_OP = 513\n VERTICAL_BAR = 514\n EQUALS_OP = 515\n LEFT_BRACKET = 516\n RIGHT_BRACKET = 517\n INTRODUCER = 518\n SPACES = 519\n SINGLE_LINE_COMMENT = 520\n MULTI_LINE_COMMENT = 521\n PROMPT = 522\n REGULAR_ID = 523\n ZV = 524\n channelNames = [u'DEFAULT_TOKEN_CHANNEL', u'HIDDEN']\n modeNames = ['DEFAULT_MODE']\n literalNames = ['<INVALID>', \"'..'\", \"'.'\", \"'%'\", \"'&'\", \"'('\", \"')'\",\n \"'**'\", \"'*'\", \"'+'\", \"'-'\", \"','\", \"'/'\", \"'@'\", \"':='\", \"':'\",\n \"';'\", \"'<='\", \"'<'\", \"'>='\", \"'^'\", \"'~'\", \"'!'\", \"'>'\", \"'||'\",\n \"'|'\", \"'='\", \"'['\", \"']'\", \"'_'\", \"'@!'\"]\n symbolicNames = ['<INVALID>', 'A_LETTER', 'ADD', 'AFTER', 'AGENT',\n 'AGGREGATE', 'ALL', 'ALTER', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS',\n 'ASSUME', 'ASSERT', 'ASC', 'ASSOCIATE', 'AT', 'ATTRIBUTE', 'AUDIT',\n 'AUTHID', 'AUTO', 'AUTOMATIC', 'AUTONOMOUS_TRANSACTION', 'BATCH',\n 'BEFORE', 'BEGIN', 'BETWEEN', 'BFILE', 'BINARY_DOUBLE',\n 'BINARY_FLOAT', 'BINARY_INTEGER', 'BLOB', 'BLOCK', 'BODY',\n 'BOOLEAN', 'BOTH', 'BREADTH', 'BULK', 'BY', 'BYTE', 'C_LETTER',\n 'CACHE', 'CALL', 'CANONICAL', 'CASCADE', 'CASE', 'CAST', 'CHAR',\n 'CHAR_CS', 'CHARACTER', 'CHECK', 'CHR', 'CLOB', 'CLOSE', 'CLUSTER',\n 'COLLECT', 'COLUMNS', 'COMMENT', 'COMMIT', 'COMMITTED',\n 'COMPATIBILITY', 'COMPILE', 'COMPOUND', 'CONNECT',\n 'CONNECT_BY_ROOT', 'CONSTANT', 'CONSTRAINT', 'CONSTRAINTS',\n 'CONSTRUCTOR', 'CONTENT', 'CONTEXT', 'CONTINUE', 'CONVERT',\n 'CORRUPT_XID', 'CORRUPT_XID_ALL', 'COST', 'COUNT', 'CREATE',\n 'CROSS', 'CUBE', 'CURRENT', 'CURRENT_USER', 'CURSOR', 'CUSTOMDATUM',\n 'CYCLE', 'DATA', 'DATABASE', 'DATE', 'DAY', 'DB_ROLE_CHANGE',\n 'DBTIMEZONE', 'DDL', 'DEBUG', 'DEC', 'DECIMAL', 'DECLARE',\n 'DECOMPOSE', 'DECREMENT', 'DEFAULT', 'DEFAULTS', 'DEFERRED',\n 'DEFINER', 'DELETE', 'DEPTH', 'DESC', 'DETERMINISTIC', 'DIMENSION',\n 'DISABLE', 'DISASSOCIATE', 'DISTINCT', 'DOCUMENT', 'DOUBLE', 'DROP',\n 'DSINTERVAL_UNCONSTRAINED', 'EACH', 'ELEMENT', 'ELSE', 'ELSIF',\n 'EMPTY', 'ENABLE', 'ENCODING', 'END', 'ENTITYESCAPING', 'ERR',\n 'ERRORS', 'ESCAPE', 'EVALNAME', 'EXCEPT', 'EXCEPTION',\n 'EXCEPTION_INIT', 'EXCEPTIONS', 'EXCLUDE', 'EXCLUSIVE', 'EXECUTE',\n 'EXISTS', 'EXIT', 'EXPLAIN', 'EXTERNAL', 'EXTRACT', 'FAILURE',\n 'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FIRST_VALUE', 'FLOAT',\n 'FOLLOWING', 'FOLLOWS', 'FOR', 'FORALL', 'FORCE', 'FROM', 'FULL',\n 'FUNCTION', 'GOTO', 'GRANT', 'GROUP', 'GROUPING', 'HASH', 'HAVING',\n 'HIDE', 'HOUR', 'IF', 'IGNORE', 'IMMEDIATE', 'IN', 'INCLUDE',\n 'INCLUDING', 'INCREMENT', 'INDENT', 'INDEX', 'INDEXED', 'INDICATOR',\n 'INDICES', 'INFINITE', 'INLINE', 'INNER', 'INOUT', 'INSERT',\n 'INSTANTIABLE', 'INSTEAD', 'INT', 'INTEGER', 'INTERSECT',\n 'INTERVAL', 'INTO', 'INVALIDATE', 'IS', 'ISOLATION', 'ITERATE',\n 'JAVA', 'JOIN', 'KEEP', 'LANGUAGE', 'LAST', 'LAST_VALUE', 'LEADING',\n 'LEFT', 'LEVEL', 'LIBRARY', 'LIKE', 'LIKE2', 'LIKE4', 'LIKEC',\n 'LIMIT', 'LOCAL', 'LOCK', 'LOCKED', 'LOG', 'LOGOFF', 'LOGON',\n 'LONG', 'LOOP', 'MAIN', 'MAP', 'MATCHED', 'MAXVALUE', 'MEASURES',\n 'MEMBER', 'MERGE', 'MINUS', 'MINUTE', 'MINVALUE', 'MLSLABEL',\n 'MODE', 'MODEL', 'MODIFY', 'MONTH', 'MULTISET', 'NAME', 'NAN',\n 'NATURAL', 'NATURALN', 'NAV', 'NCHAR', 'NCHAR_CS', 'NCLOB',\n 'NESTED', 'NEW', 'NO', 'NOAUDIT', 'NOCACHE', 'NOCOPY', 'NOCYCLE',\n 'NOENTITYESCAPING', 'NOMAXVALUE', 'NOMINVALUE', 'NONE', 'NOORDER',\n 'NOSCHEMACHECK', 'NOT', 'NOWAIT', 'NULL', 'NULLS', 'NUMBER',\n 'NUMERIC', 'NVARCHAR2', 'OBJECT', 'OF', 'OFF', 'OID', 'OLD', 'ON',\n 'ONLY', 'OPEN', 'OPTION', 'OR', 'ORADATA', 'ORDER', 'ORDINALITY',\n 'OSERROR', 'OUT', 'OUTER', 'OVER', 'OVERRIDING', 'PACKAGE',\n 'PARALLEL_ENABLE', 'PARAMETERS', 'PARENT', 'PARTITION', 'PASSING',\n 'PATH', 'PERCENT_ROWTYPE', 'PERCENT_TYPE', 'PIPELINED', 'PIVOT',\n 'PLAN', 'PLS_INTEGER', 'POSITIVE', 'POSITIVEN', 'PRAGMA',\n 'PRECEDING', 'PRECISION', 'PRESENT', 'PRIOR', 'PROCEDURE', 'RAISE',\n 'RANGE', 'RAW', 'READ', 'REAL', 'RECORD', 'REF', 'REFERENCE',\n 'REFERENCING', 'REJECT', 'RELIES_ON', 'RENAME', 'REPLACE',\n 'RESPECT', 'RESTRICT_REFERENCES', 'RESULT', 'RESULT_CACHE',\n 'RETURN', 'RETURNING', 'REUSE', 'REVERSE', 'REVOKE', 'RIGHT',\n 'ROLLBACK', 'ROLLUP', 'ROW', 'ROWID', 'ROWS', 'RULES', 'SAMPLE',\n 'SAVE', 'SAVEPOINT', 'SCHEMA', 'SCHEMACHECK', 'SCN', 'SEARCH',\n 'SECOND', 'SEED', 'SEGMENT', 'SELECT', 'SELF', 'SEQUENCE',\n 'SEQUENTIAL', 'SERIALIZABLE', 'SERIALLY_REUSABLE', 'SERVERERROR',\n 'SESSIONTIMEZONE', 'SET', 'SETS', 'SETTINGS', 'SHARE', 'SHOW',\n 'SHUTDOWN', 'SIBLINGS', 'SIGNTYPE', 'SIMPLE_INTEGER', 'SINGLE',\n 'SIZE', 'SKIP_', 'SMALLINT', 'SNAPSHOT', 'SOME', 'SPECIFICATION',\n 'SQLDATA', 'SQLERROR', 'STANDALONE', 'START', 'STARTUP',\n 'STATEMENT', 'STATEMENT_ID', 'STATIC', 'STATISTICS', 'STRING',\n 'SUBMULTISET', 'SUBPARTITION', 'SUBSTITUTABLE', 'SUBTYPE',\n 'SUCCESS', 'SUSPEND', 'TABLE', 'THE', 'THEN', 'TIME', 'TIMESTAMP',\n 'TIMESTAMP_LTZ_UNCONSTRAINED', 'TIMESTAMP_TZ_UNCONSTRAINED',\n 'TIMESTAMP_UNCONSTRAINED', 'TIMEZONE_ABBR', 'TIMEZONE_HOUR',\n 'TIMEZONE_MINUTE', 'TIMEZONE_REGION', 'TO', 'TRAILING',\n 'TRANSACTION', 'TRANSLATE', 'TREAT', 'TRIGGER', 'TRIM', 'TRUE',\n 'TRUNCATE', 'TYPE', 'UNBOUNDED', 'UNDER', 'UNION', 'UNIQUE',\n 'UNLIMITED', 'UNPIVOT', 'UNTIL', 'UPDATE', 'UPDATED', 'UPSERT',\n 'UROWID', 'USE', 'USING', 'VALIDATE', 'VALUE', 'VALUES', 'VARCHAR',\n 'VARCHAR2', 'VARIABLE', 'VARRAY', 'VARYING', 'VERSION', 'VERSIONS',\n 'WAIT', 'WARNING', 'WELLFORMED', 'WHEN', 'WHENEVER', 'WHERE',\n 'WHILE', 'WITH', 'WITHIN', 'WORK', 'WRITE', 'XML', 'XMLAGG',\n 'XMLATTRIBUTES', 'XMLCAST', 'XMLCOLATTVAL', 'XMLELEMENT',\n 'XMLEXISTS', 'XMLFOREST', 'XMLNAMESPACES', 'XMLPARSE', 'XMLPI',\n 'XMLQUERY', 'XMLROOT', 'XMLSERIALIZE', 'XMLTABLE', 'YEAR', 'YES',\n 'YMINTERVAL_UNCONSTRAINED', 'ZONE', 'PREDICTION',\n 'PREDICTION_BOUNDS', 'PREDICTION_COST', 'PREDICTION_DETAILS',\n 'PREDICTION_PROBABILITY', 'PREDICTION_SET', 'CUME_DIST',\n 'DENSE_RANK', 'LISTAGG', 'PERCENT_RANK', 'PERCENTILE_CONT',\n 'PERCENTILE_DISC', 'RANK', 'AVG', 'CORR', 'LAG', 'LEAD', 'MAX',\n 'MEDIAN', 'MIN', 'NTILE', 'RATIO_TO_REPORT', 'ROW_NUMBER', 'SUM',\n 'VARIANCE', 'REGR_', 'STDDEV', 'VAR_', 'COVAR_',\n 'NATIONAL_CHAR_STRING_LIT', 'BIT_STRING_LIT', 'HEX_STRING_LIT',\n 'DOUBLE_PERIOD', 'PERIOD', 'UNSIGNED_INTEGER',\n 'APPROXIMATE_NUM_LIT', 'CHAR_STRING', 'DELIMITED_ID', 'PERCENT',\n 'AMPERSAND', 'LEFT_PAREN', 'RIGHT_PAREN', 'DOUBLE_ASTERISK',\n 'ASTERISK', 'PLUS_SIGN', 'MINUS_SIGN', 'COMMA', 'SOLIDUS',\n 'AT_SIGN', 'ASSIGN_OP', 'BINDVAR', 'COLON', 'SEMICOLON',\n 'LESS_THAN_OR_EQUALS_OP', 'LESS_THAN_OP',\n 'GREATER_THAN_OR_EQUALS_OP', 'NOT_EQUAL_OP', 'CARRET_OPERATOR_PART',\n 'TILDE_OPERATOR_PART', 'EXCLAMATION_OPERATOR_PART',\n 'GREATER_THAN_OP', 'CONCATENATION_OP', 'VERTICAL_BAR', 'EQUALS_OP',\n 'LEFT_BRACKET', 'RIGHT_BRACKET', 'INTRODUCER', 'SPACES',\n 'SINGLE_LINE_COMMENT', 'MULTI_LINE_COMMENT', 'PROMPT', 'REGULAR_ID',\n 'ZV']\n ruleNames = ['T__0', 'A_LETTER', 'ADD', 'AFTER', 'AGENT', 'AGGREGATE',\n 'ALL', 'ALTER', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS', 'ASSUME',\n 'ASSERT', 'ASC', 'ASSOCIATE', 'AT', 'ATTRIBUTE', 'AUDIT', 'AUTHID',\n 'AUTO', 'AUTOMATIC', 'AUTONOMOUS_TRANSACTION', 'BATCH', 'BEFORE',\n 'BEGIN', 'BETWEEN', 'BFILE', 'BINARY_DOUBLE', 'BINARY_FLOAT',\n 'BINARY_INTEGER', 'BLOB', 'BLOCK', 'BODY', 'BOOLEAN', 'BOTH',\n 'BREADTH', 'BULK', 'BY', 'BYTE', 'C_LETTER', 'CACHE', 'CALL',\n 'CANONICAL', 'CASCADE', 'CASE', 'CAST', 'CHAR', 'CHAR_CS',\n 'CHARACTER', 'CHECK', 'CHR', 'CLOB', 'CLOSE', 'CLUSTER', 'COLLECT',\n 'COLUMNS', 'COMMENT', 'COMMIT', 'COMMITTED', 'COMPATIBILITY',\n 'COMPILE', 'COMPOUND', 'CONNECT', 'CONNECT_BY_ROOT', 'CONSTANT',\n 'CONSTRAINT', 'CONSTRAINTS', 'CONSTRUCTOR', 'CONTENT', 'CONTEXT',\n 'CONTINUE', 'CONVERT', 'CORRUPT_XID', 'CORRUPT_XID_ALL', 'COST',\n 'COUNT', 'CREATE', 'CROSS', 'CUBE', 'CURRENT', 'CURRENT_USER',\n 'CURSOR', 'CUSTOMDATUM', 'CYCLE', 'DATA', 'DATABASE', 'DATE', 'DAY',\n 'DB_ROLE_CHANGE', 'DBTIMEZONE', 'DDL', 'DEBUG', 'DEC', 'DECIMAL',\n 'DECLARE', 'DECOMPOSE', 'DECREMENT', 'DEFAULT', 'DEFAULTS',\n 'DEFERRED', 'DEFINER', 'DELETE', 'DEPTH', 'DESC', 'DETERMINISTIC',\n 'DIMENSION', 'DISABLE', 'DISASSOCIATE', 'DISTINCT', 'DOCUMENT',\n 'DOUBLE', 'DROP', 'DSINTERVAL_UNCONSTRAINED', 'EACH', 'ELEMENT',\n 'ELSE', 'ELSIF', 'EMPTY', 'ENABLE', 'ENCODING', 'END',\n 'ENTITYESCAPING', 'ERR', 'ERRORS', 'ESCAPE', 'EVALNAME', 'EXCEPT',\n 'EXCEPTION', 'EXCEPTION_INIT', 'EXCEPTIONS', 'EXCLUDE', 'EXCLUSIVE',\n 'EXECUTE', 'EXISTS', 'EXIT', 'EXPLAIN', 'EXTERNAL', 'EXTRACT',\n 'FAILURE', 'FALSE', 'FETCH', 'FINAL', 'FIRST', 'FIRST_VALUE',\n 'FLOAT', 'FOLLOWING', 'FOLLOWS', 'FOR', 'FORALL', 'FORCE', 'FROM',\n 'FULL', 'FUNCTION', 'GOTO', 'GRANT', 'GROUP', 'GROUPING', 'HASH',\n 'HAVING', 'HIDE', 'HOUR', 'IF', 'IGNORE', 'IMMEDIATE', 'IN',\n 'INCLUDE', 'INCLUDING', 'INCREMENT', 'INDENT', 'INDEX', 'INDEXED',\n 'INDICATOR', 'INDICES', 'INFINITE', 'INLINE', 'INNER', 'INOUT',\n 'INSERT', 'INSTANTIABLE', 'INSTEAD', 'INT', 'INTEGER', 'INTERSECT',\n 'INTERVAL', 'INTO', 'INVALIDATE', 'IS', 'ISOLATION', 'ITERATE',\n 'JAVA', 'JOIN', 'KEEP', 'LANGUAGE', 'LAST', 'LAST_VALUE', 'LEADING',\n 'LEFT', 'LEVEL', 'LIBRARY', 'LIKE', 'LIKE2', 'LIKE4', 'LIKEC',\n 'LIMIT', 'LOCAL', 'LOCK', 'LOCKED', 'LOG', 'LOGOFF', 'LOGON',\n 'LONG', 'LOOP', 'MAIN', 'MAP', 'MATCHED', 'MAXVALUE', 'MEASURES',\n 'MEMBER', 'MERGE', 'MINUS', 'MINUTE', 'MINVALUE', 'MLSLABEL',\n 'MODE', 'MODEL', 'MODIFY', 'MONTH', 'MULTISET', 'NAME', 'NAN',\n 'NATURAL', 'NATURALN', 'NAV', 'NCHAR', 'NCHAR_CS', 'NCLOB',\n 'NESTED', 'NEW', 'NO', 'NOAUDIT', 'NOCACHE', 'NOCOPY', 'NOCYCLE',\n 'NOENTITYESCAPING', 'NOMAXVALUE', 'NOMINVALUE', 'NONE', 'NOORDER',\n 'NOSCHEMACHECK', 'NOT', 'NOWAIT', 'NULL', 'NULLS', 'NUMBER',\n 'NUMERIC', 'NVARCHAR2', 'OBJECT', 'OF', 'OFF', 'OID', 'OLD', 'ON',\n 'ONLY', 'OPEN', 'OPTION', 'OR', 'ORADATA', 'ORDER', 'ORDINALITY',\n 'OSERROR', 'OUT', 'OUTER', 'OVER', 'OVERRIDING', 'PACKAGE',\n 'PARALLEL_ENABLE', 'PARAMETERS', 'PARENT', 'PARTITION', 'PASSING',\n 'PATH', 'PERCENT_ROWTYPE', 'PERCENT_TYPE', 'PIPELINED', 'PIVOT',\n 'PLAN', 'PLS_INTEGER', 'POSITIVE', 'POSITIVEN', 'PRAGMA',\n 'PRECEDING', 'PRECISION', 'PRESENT', 'PRIOR', 'PROCEDURE', 'RAISE',\n 'RANGE', 'RAW', 'READ', 'REAL', 'RECORD', 'REF', 'REFERENCE',\n 'REFERENCING', 'REJECT', 'RELIES_ON', 'RENAME', 'REPLACE',\n 'RESPECT', 'RESTRICT_REFERENCES', 'RESULT', 'RESULT_CACHE',\n 'RETURN', 'RETURNING', 'REUSE', 'REVERSE', 'REVOKE', 'RIGHT',\n 'ROLLBACK', 'ROLLUP', 'ROW', 'ROWID', 'ROWS', 'RULES', 'SAMPLE',\n 'SAVE', 'SAVEPOINT', 'SCHEMA', 'SCHEMACHECK', 'SCN', 'SEARCH',\n 'SECOND', 'SEED', 'SEGMENT', 'SELECT', 'SELF', 'SEQUENCE',\n 'SEQUENTIAL', 'SERIALIZABLE', 'SERIALLY_REUSABLE', 'SERVERERROR',\n 'SESSIONTIMEZONE', 'SET', 'SETS', 'SETTINGS', 'SHARE', 'SHOW',\n 'SHUTDOWN', 'SIBLINGS', 'SIGNTYPE', 'SIMPLE_INTEGER', 'SINGLE',\n 'SIZE', 'SKIP_', 'SMALLINT', 'SNAPSHOT', 'SOME', 'SPECIFICATION',\n 'SQLDATA', 'SQLERROR', 'STANDALONE', 'START', 'STARTUP',\n 'STATEMENT', 'STATEMENT_ID', 'STATIC', 'STATISTICS', 'STRING',\n 'SUBMULTISET', 'SUBPARTITION', 'SUBSTITUTABLE', 'SUBTYPE',\n 'SUCCESS', 'SUSPEND', 'TABLE', 'THE', 'THEN', 'TIME', 'TIMESTAMP',\n 'TIMESTAMP_LTZ_UNCONSTRAINED', 'TIMESTAMP_TZ_UNCONSTRAINED',\n 'TIMESTAMP_UNCONSTRAINED', 'TIMEZONE_ABBR', 'TIMEZONE_HOUR',\n 'TIMEZONE_MINUTE', 'TIMEZONE_REGION', 'TO', 'TRAILING',\n 'TRANSACTION', 'TRANSLATE', 'TREAT', 'TRIGGER', 'TRIM', 'TRUE',\n 'TRUNCATE', 'TYPE', 'UNBOUNDED', 'UNDER', 'UNION', 'UNIQUE',\n 'UNLIMITED', 'UNPIVOT', 'UNTIL', 'UPDATE', 'UPDATED', 'UPSERT',\n 'UROWID', 'USE', 'USING', 'VALIDATE', 'VALUE', 'VALUES', 'VARCHAR',\n 'VARCHAR2', 'VARIABLE', 'VARRAY', 'VARYING', 'VERSION', 'VERSIONS',\n 'WAIT', 'WARNING', 'WELLFORMED', 'WHEN', 'WHENEVER', 'WHERE',\n 'WHILE', 'WITH', 'WITHIN', 'WORK', 'WRITE', 'XML', 'XMLAGG',\n 'XMLATTRIBUTES', 'XMLCAST', 'XMLCOLATTVAL', 'XMLELEMENT',\n 'XMLEXISTS', 'XMLFOREST', 'XMLNAMESPACES', 'XMLPARSE', 'XMLPI',\n 'XMLQUERY', 'XMLROOT', 'XMLSERIALIZE', 'XMLTABLE', 'YEAR', 'YES',\n 'YMINTERVAL_UNCONSTRAINED', 'ZONE', 'PREDICTION',\n 'PREDICTION_BOUNDS', 'PREDICTION_COST', 'PREDICTION_DETAILS',\n 'PREDICTION_PROBABILITY', 'PREDICTION_SET', 'CUME_DIST',\n 'DENSE_RANK', 'LISTAGG', 'PERCENT_RANK', 'PERCENTILE_CONT',\n 'PERCENTILE_DISC', 'RANK', 'AVG', 'CORR', 'LAG', 'LEAD', 'MAX',\n 'MEDIAN', 'MIN', 'NTILE', 'RATIO_TO_REPORT', 'ROW_NUMBER', 'SUM',\n 'VARIANCE', 'REGR_', 'STDDEV', 'VAR_', 'COVAR_',\n 'NATIONAL_CHAR_STRING_LIT', 'BIT_STRING_LIT', 'HEX_STRING_LIT',\n 'DOUBLE_PERIOD', 'PERIOD', 'UNSIGNED_INTEGER',\n 'APPROXIMATE_NUM_LIT', 'CHAR_STRING', 'CHAR_STRING_PERL', 'QUOTE',\n 'QS_ANGLE', 'QS_BRACE', 'QS_BRACK', 'QS_PAREN', 'QS_OTHER_CH',\n 'DELIMITED_ID', 'PERCENT', 'AMPERSAND', 'LEFT_PAREN', 'RIGHT_PAREN',\n 'DOUBLE_ASTERISK', 'ASTERISK', 'PLUS_SIGN', 'MINUS_SIGN', 'COMMA',\n 'SOLIDUS', 'AT_SIGN', 'ASSIGN_OP', 'BINDVAR', 'COLON', 'SEMICOLON',\n 'LESS_THAN_OR_EQUALS_OP', 'LESS_THAN_OP',\n 'GREATER_THAN_OR_EQUALS_OP', 'NOT_EQUAL_OP', 'CARRET_OPERATOR_PART',\n 'TILDE_OPERATOR_PART', 'EXCLAMATION_OPERATOR_PART',\n 'GREATER_THAN_OP', 'QUESTION_MARK', 'CONCATENATION_OP',\n 'VERTICAL_BAR', 'EQUALS_OP', 'LEFT_BRACKET', 'RIGHT_BRACKET',\n 'INTRODUCER', 'SPACES', 'SIMPLE_LETTER',\n 'UNSIGNED_INTEGER_FRAGMENT', 'FLOAT_FRAGMENT',\n 'SINGLE_LINE_COMMENT', 'MULTI_LINE_COMMENT', 'PROMPT', 'NEWLINE',\n 'SPACE', 'REGULAR_ID', 'ZV', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',\n 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',\n 'V', 'W', 'X', 'Y', 'Z']\n grammarFileName = 'PlSql.g4'\n\n def __init__(self, input=None, output: TextIO=sys.stdout):\n super().__init__(input, output)\n self.checkVersion('4.7.2')\n self._interp = LexerATNSimulator(self, self.atn, self.\n decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None\n",
"step-5": "# Generated from /home/mridul/PycharmProjects/BTP_2k18-19/PlSql.g4 by ANTLR 4.7.2\nfrom antlr4 import *\nfrom io import StringIO\nfrom typing.io import TextIO\nimport sys\n\n\n\ndef serializedATN():\n with StringIO() as buf:\n buf.write(\"\\3\\u608b\\ua72a\\u8133\\ub9ed\\u417c\\u3be7\\u7786\\u5964\\2\\u020e\")\n buf.write(\"\\u14d7\\b\\1\\4\\2\\t\\2\\4\\3\\t\\3\\4\\4\\t\\4\\4\\5\\t\\5\\4\\6\\t\\6\\4\\7\")\n buf.write(\"\\t\\7\\4\\b\\t\\b\\4\\t\\t\\t\\4\\n\\t\\n\\4\\13\\t\\13\\4\\f\\t\\f\\4\\r\\t\\r\")\n buf.write(\"\\4\\16\\t\\16\\4\\17\\t\\17\\4\\20\\t\\20\\4\\21\\t\\21\\4\\22\\t\\22\\4\\23\")\n buf.write(\"\\t\\23\\4\\24\\t\\24\\4\\25\\t\\25\\4\\26\\t\\26\\4\\27\\t\\27\\4\\30\\t\\30\")\n buf.write(\"\\4\\31\\t\\31\\4\\32\\t\\32\\4\\33\\t\\33\\4\\34\\t\\34\\4\\35\\t\\35\\4\\36\")\n buf.write(\"\\t\\36\\4\\37\\t\\37\\4 \\t \\4!\\t!\\4\\\"\\t\\\"\\4#\\t#\\4$\\t$\\4%\\t%\")\n buf.write(\"\\4&\\t&\\4\\'\\t\\'\\4(\\t(\\4)\\t)\\4*\\t*\\4+\\t+\\4,\\t,\\4-\\t-\\4.\")\n buf.write(\"\\t.\\4/\\t/\\4\\60\\t\\60\\4\\61\\t\\61\\4\\62\\t\\62\\4\\63\\t\\63\\4\\64\")\n buf.write(\"\\t\\64\\4\\65\\t\\65\\4\\66\\t\\66\\4\\67\\t\\67\\48\\t8\\49\\t9\\4:\\t:\")\n buf.write(\"\\4;\\t;\\4<\\t<\\4=\\t=\\4>\\t>\\4?\\t?\\4@\\t@\\4A\\tA\\4B\\tB\\4C\\t\")\n buf.write(\"C\\4D\\tD\\4E\\tE\\4F\\tF\\4G\\tG\\4H\\tH\\4I\\tI\\4J\\tJ\\4K\\tK\\4L\\t\")\n buf.write(\"L\\4M\\tM\\4N\\tN\\4O\\tO\\4P\\tP\\4Q\\tQ\\4R\\tR\\4S\\tS\\4T\\tT\\4U\\t\")\n buf.write(\"U\\4V\\tV\\4W\\tW\\4X\\tX\\4Y\\tY\\4Z\\tZ\\4[\\t[\\4\\\\\\t\\\\\\4]\\t]\\4\")\n buf.write(\"^\\t^\\4_\\t_\\4`\\t`\\4a\\ta\\4b\\tb\\4c\\tc\\4d\\td\\4e\\te\\4f\\tf\\4\")\n buf.write(\"g\\tg\\4h\\th\\4i\\ti\\4j\\tj\\4k\\tk\\4l\\tl\\4m\\tm\\4n\\tn\\4o\\to\\4\")\n buf.write(\"p\\tp\\4q\\tq\\4r\\tr\\4s\\ts\\4t\\tt\\4u\\tu\\4v\\tv\\4w\\tw\\4x\\tx\\4\")\n buf.write(\"y\\ty\\4z\\tz\\4{\\t{\\4|\\t|\\4}\\t}\\4~\\t~\\4\\177\\t\\177\\4\\u0080\")\n buf.write(\"\\t\\u0080\\4\\u0081\\t\\u0081\\4\\u0082\\t\\u0082\\4\\u0083\\t\\u0083\")\n buf.write(\"\\4\\u0084\\t\\u0084\\4\\u0085\\t\\u0085\\4\\u0086\\t\\u0086\\4\\u0087\")\n buf.write(\"\\t\\u0087\\4\\u0088\\t\\u0088\\4\\u0089\\t\\u0089\\4\\u008a\\t\\u008a\")\n buf.write(\"\\4\\u008b\\t\\u008b\\4\\u008c\\t\\u008c\\4\\u008d\\t\\u008d\\4\\u008e\")\n buf.write(\"\\t\\u008e\\4\\u008f\\t\\u008f\\4\\u0090\\t\\u0090\\4\\u0091\\t\\u0091\")\n buf.write(\"\\4\\u0092\\t\\u0092\\4\\u0093\\t\\u0093\\4\\u0094\\t\\u0094\\4\\u0095\")\n buf.write(\"\\t\\u0095\\4\\u0096\\t\\u0096\\4\\u0097\\t\\u0097\\4\\u0098\\t\\u0098\")\n buf.write(\"\\4\\u0099\\t\\u0099\\4\\u009a\\t\\u009a\\4\\u009b\\t\\u009b\\4\\u009c\")\n buf.write(\"\\t\\u009c\\4\\u009d\\t\\u009d\\4\\u009e\\t\\u009e\\4\\u009f\\t\\u009f\")\n buf.write(\"\\4\\u00a0\\t\\u00a0\\4\\u00a1\\t\\u00a1\\4\\u00a2\\t\\u00a2\\4\\u00a3\")\n buf.write(\"\\t\\u00a3\\4\\u00a4\\t\\u00a4\\4\\u00a5\\t\\u00a5\\4\\u00a6\\t\\u00a6\")\n buf.write(\"\\4\\u00a7\\t\\u00a7\\4\\u00a8\\t\\u00a8\\4\\u00a9\\t\\u00a9\\4\\u00aa\")\n buf.write(\"\\t\\u00aa\\4\\u00ab\\t\\u00ab\\4\\u00ac\\t\\u00ac\\4\\u00ad\\t\\u00ad\")\n buf.write(\"\\4\\u00ae\\t\\u00ae\\4\\u00af\\t\\u00af\\4\\u00b0\\t\\u00b0\\4\\u00b1\")\n buf.write(\"\\t\\u00b1\\4\\u00b2\\t\\u00b2\\4\\u00b3\\t\\u00b3\\4\\u00b4\\t\\u00b4\")\n buf.write(\"\\4\\u00b5\\t\\u00b5\\4\\u00b6\\t\\u00b6\\4\\u00b7\\t\\u00b7\\4\\u00b8\")\n buf.write(\"\\t\\u00b8\\4\\u00b9\\t\\u00b9\\4\\u00ba\\t\\u00ba\\4\\u00bb\\t\\u00bb\")\n buf.write(\"\\4\\u00bc\\t\\u00bc\\4\\u00bd\\t\\u00bd\\4\\u00be\\t\\u00be\\4\\u00bf\")\n buf.write(\"\\t\\u00bf\\4\\u00c0\\t\\u00c0\\4\\u00c1\\t\\u00c1\\4\\u00c2\\t\\u00c2\")\n buf.write(\"\\4\\u00c3\\t\\u00c3\\4\\u00c4\\t\\u00c4\\4\\u00c5\\t\\u00c5\\4\\u00c6\")\n buf.write(\"\\t\\u00c6\\4\\u00c7\\t\\u00c7\\4\\u00c8\\t\\u00c8\\4\\u00c9\\t\\u00c9\")\n buf.write(\"\\4\\u00ca\\t\\u00ca\\4\\u00cb\\t\\u00cb\\4\\u00cc\\t\\u00cc\\4\\u00cd\")\n buf.write(\"\\t\\u00cd\\4\\u00ce\\t\\u00ce\\4\\u00cf\\t\\u00cf\\4\\u00d0\\t\\u00d0\")\n buf.write(\"\\4\\u00d1\\t\\u00d1\\4\\u00d2\\t\\u00d2\\4\\u00d3\\t\\u00d3\\4\\u00d4\")\n buf.write(\"\\t\\u00d4\\4\\u00d5\\t\\u00d5\\4\\u00d6\\t\\u00d6\\4\\u00d7\\t\\u00d7\")\n buf.write(\"\\4\\u00d8\\t\\u00d8\\4\\u00d9\\t\\u00d9\\4\\u00da\\t\\u00da\\4\\u00db\")\n buf.write(\"\\t\\u00db\\4\\u00dc\\t\\u00dc\\4\\u00dd\\t\\u00dd\\4\\u00de\\t\\u00de\")\n buf.write(\"\\4\\u00df\\t\\u00df\\4\\u00e0\\t\\u00e0\\4\\u00e1\\t\\u00e1\\4\\u00e2\")\n buf.write(\"\\t\\u00e2\\4\\u00e3\\t\\u00e3\\4\\u00e4\\t\\u00e4\\4\\u00e5\\t\\u00e5\")\n buf.write(\"\\4\\u00e6\\t\\u00e6\\4\\u00e7\\t\\u00e7\\4\\u00e8\\t\\u00e8\\4\\u00e9\")\n buf.write(\"\\t\\u00e9\\4\\u00ea\\t\\u00ea\\4\\u00eb\\t\\u00eb\\4\\u00ec\\t\\u00ec\")\n buf.write(\"\\4\\u00ed\\t\\u00ed\\4\\u00ee\\t\\u00ee\\4\\u00ef\\t\\u00ef\\4\\u00f0\")\n buf.write(\"\\t\\u00f0\\4\\u00f1\\t\\u00f1\\4\\u00f2\\t\\u00f2\\4\\u00f3\\t\\u00f3\")\n buf.write(\"\\4\\u00f4\\t\\u00f4\\4\\u00f5\\t\\u00f5\\4\\u00f6\\t\\u00f6\\4\\u00f7\")\n buf.write(\"\\t\\u00f7\\4\\u00f8\\t\\u00f8\\4\\u00f9\\t\\u00f9\\4\\u00fa\\t\\u00fa\")\n buf.write(\"\\4\\u00fb\\t\\u00fb\\4\\u00fc\\t\\u00fc\\4\\u00fd\\t\\u00fd\\4\\u00fe\")\n buf.write(\"\\t\\u00fe\\4\\u00ff\\t\\u00ff\\4\\u0100\\t\\u0100\\4\\u0101\\t\\u0101\")\n buf.write(\"\\4\\u0102\\t\\u0102\\4\\u0103\\t\\u0103\\4\\u0104\\t\\u0104\\4\\u0105\")\n buf.write(\"\\t\\u0105\\4\\u0106\\t\\u0106\\4\\u0107\\t\\u0107\\4\\u0108\\t\\u0108\")\n buf.write(\"\\4\\u0109\\t\\u0109\\4\\u010a\\t\\u010a\\4\\u010b\\t\\u010b\\4\\u010c\")\n buf.write(\"\\t\\u010c\\4\\u010d\\t\\u010d\\4\\u010e\\t\\u010e\\4\\u010f\\t\\u010f\")\n buf.write(\"\\4\\u0110\\t\\u0110\\4\\u0111\\t\\u0111\\4\\u0112\\t\\u0112\\4\\u0113\")\n buf.write(\"\\t\\u0113\\4\\u0114\\t\\u0114\\4\\u0115\\t\\u0115\\4\\u0116\\t\\u0116\")\n buf.write(\"\\4\\u0117\\t\\u0117\\4\\u0118\\t\\u0118\\4\\u0119\\t\\u0119\\4\\u011a\")\n buf.write(\"\\t\\u011a\\4\\u011b\\t\\u011b\\4\\u011c\\t\\u011c\\4\\u011d\\t\\u011d\")\n buf.write(\"\\4\\u011e\\t\\u011e\\4\\u011f\\t\\u011f\\4\\u0120\\t\\u0120\\4\\u0121\")\n buf.write(\"\\t\\u0121\\4\\u0122\\t\\u0122\\4\\u0123\\t\\u0123\\4\\u0124\\t\\u0124\")\n buf.write(\"\\4\\u0125\\t\\u0125\\4\\u0126\\t\\u0126\\4\\u0127\\t\\u0127\\4\\u0128\")\n buf.write(\"\\t\\u0128\\4\\u0129\\t\\u0129\\4\\u012a\\t\\u012a\\4\\u012b\\t\\u012b\")\n buf.write(\"\\4\\u012c\\t\\u012c\\4\\u012d\\t\\u012d\\4\\u012e\\t\\u012e\\4\\u012f\")\n buf.write(\"\\t\\u012f\\4\\u0130\\t\\u0130\\4\\u0131\\t\\u0131\\4\\u0132\\t\\u0132\")\n buf.write(\"\\4\\u0133\\t\\u0133\\4\\u0134\\t\\u0134\\4\\u0135\\t\\u0135\\4\\u0136\")\n buf.write(\"\\t\\u0136\\4\\u0137\\t\\u0137\\4\\u0138\\t\\u0138\\4\\u0139\\t\\u0139\")\n buf.write(\"\\4\\u013a\\t\\u013a\\4\\u013b\\t\\u013b\\4\\u013c\\t\\u013c\\4\\u013d\")\n buf.write(\"\\t\\u013d\\4\\u013e\\t\\u013e\\4\\u013f\\t\\u013f\\4\\u0140\\t\\u0140\")\n buf.write(\"\\4\\u0141\\t\\u0141\\4\\u0142\\t\\u0142\\4\\u0143\\t\\u0143\\4\\u0144\")\n buf.write(\"\\t\\u0144\\4\\u0145\\t\\u0145\\4\\u0146\\t\\u0146\\4\\u0147\\t\\u0147\")\n buf.write(\"\\4\\u0148\\t\\u0148\\4\\u0149\\t\\u0149\\4\\u014a\\t\\u014a\\4\\u014b\")\n buf.write(\"\\t\\u014b\\4\\u014c\\t\\u014c\\4\\u014d\\t\\u014d\\4\\u014e\\t\\u014e\")\n buf.write(\"\\4\\u014f\\t\\u014f\\4\\u0150\\t\\u0150\\4\\u0151\\t\\u0151\\4\\u0152\")\n buf.write(\"\\t\\u0152\\4\\u0153\\t\\u0153\\4\\u0154\\t\\u0154\\4\\u0155\\t\\u0155\")\n buf.write(\"\\4\\u0156\\t\\u0156\\4\\u0157\\t\\u0157\\4\\u0158\\t\\u0158\\4\\u0159\")\n buf.write(\"\\t\\u0159\\4\\u015a\\t\\u015a\\4\\u015b\\t\\u015b\\4\\u015c\\t\\u015c\")\n buf.write(\"\\4\\u015d\\t\\u015d\\4\\u015e\\t\\u015e\\4\\u015f\\t\\u015f\\4\\u0160\")\n buf.write(\"\\t\\u0160\\4\\u0161\\t\\u0161\\4\\u0162\\t\\u0162\\4\\u0163\\t\\u0163\")\n buf.write(\"\\4\\u0164\\t\\u0164\\4\\u0165\\t\\u0165\\4\\u0166\\t\\u0166\\4\\u0167\")\n buf.write(\"\\t\\u0167\\4\\u0168\\t\\u0168\\4\\u0169\\t\\u0169\\4\\u016a\\t\\u016a\")\n buf.write(\"\\4\\u016b\\t\\u016b\\4\\u016c\\t\\u016c\\4\\u016d\\t\\u016d\\4\\u016e\")\n buf.write(\"\\t\\u016e\\4\\u016f\\t\\u016f\\4\\u0170\\t\\u0170\\4\\u0171\\t\\u0171\")\n buf.write(\"\\4\\u0172\\t\\u0172\\4\\u0173\\t\\u0173\\4\\u0174\\t\\u0174\\4\\u0175\")\n buf.write(\"\\t\\u0175\\4\\u0176\\t\\u0176\\4\\u0177\\t\\u0177\\4\\u0178\\t\\u0178\")\n buf.write(\"\\4\\u0179\\t\\u0179\\4\\u017a\\t\\u017a\\4\\u017b\\t\\u017b\\4\\u017c\")\n buf.write(\"\\t\\u017c\\4\\u017d\\t\\u017d\\4\\u017e\\t\\u017e\\4\\u017f\\t\\u017f\")\n buf.write(\"\\4\\u0180\\t\\u0180\\4\\u0181\\t\\u0181\\4\\u0182\\t\\u0182\\4\\u0183\")\n buf.write(\"\\t\\u0183\\4\\u0184\\t\\u0184\\4\\u0185\\t\\u0185\\4\\u0186\\t\\u0186\")\n buf.write(\"\\4\\u0187\\t\\u0187\\4\\u0188\\t\\u0188\\4\\u0189\\t\\u0189\\4\\u018a\")\n buf.write(\"\\t\\u018a\\4\\u018b\\t\\u018b\\4\\u018c\\t\\u018c\\4\\u018d\\t\\u018d\")\n buf.write(\"\\4\\u018e\\t\\u018e\\4\\u018f\\t\\u018f\\4\\u0190\\t\\u0190\\4\\u0191\")\n buf.write(\"\\t\\u0191\\4\\u0192\\t\\u0192\\4\\u0193\\t\\u0193\\4\\u0194\\t\\u0194\")\n buf.write(\"\\4\\u0195\\t\\u0195\\4\\u0196\\t\\u0196\\4\\u0197\\t\\u0197\\4\\u0198\")\n buf.write(\"\\t\\u0198\\4\\u0199\\t\\u0199\\4\\u019a\\t\\u019a\\4\\u019b\\t\\u019b\")\n buf.write(\"\\4\\u019c\\t\\u019c\\4\\u019d\\t\\u019d\\4\\u019e\\t\\u019e\\4\\u019f\")\n buf.write(\"\\t\\u019f\\4\\u01a0\\t\\u01a0\\4\\u01a1\\t\\u01a1\\4\\u01a2\\t\\u01a2\")\n buf.write(\"\\4\\u01a3\\t\\u01a3\\4\\u01a4\\t\\u01a4\\4\\u01a5\\t\\u01a5\\4\\u01a6\")\n buf.write(\"\\t\\u01a6\\4\\u01a7\\t\\u01a7\\4\\u01a8\\t\\u01a8\\4\\u01a9\\t\\u01a9\")\n buf.write(\"\\4\\u01aa\\t\\u01aa\\4\\u01ab\\t\\u01ab\\4\\u01ac\\t\\u01ac\\4\\u01ad\")\n buf.write(\"\\t\\u01ad\\4\\u01ae\\t\\u01ae\\4\\u01af\\t\\u01af\\4\\u01b0\\t\\u01b0\")\n buf.write(\"\\4\\u01b1\\t\\u01b1\\4\\u01b2\\t\\u01b2\\4\\u01b3\\t\\u01b3\\4\\u01b4\")\n buf.write(\"\\t\\u01b4\\4\\u01b5\\t\\u01b5\\4\\u01b6\\t\\u01b6\\4\\u01b7\\t\\u01b7\")\n buf.write(\"\\4\\u01b8\\t\\u01b8\\4\\u01b9\\t\\u01b9\\4\\u01ba\\t\\u01ba\\4\\u01bb\")\n buf.write(\"\\t\\u01bb\\4\\u01bc\\t\\u01bc\\4\\u01bd\\t\\u01bd\\4\\u01be\\t\\u01be\")\n buf.write(\"\\4\\u01bf\\t\\u01bf\\4\\u01c0\\t\\u01c0\\4\\u01c1\\t\\u01c1\\4\\u01c2\")\n buf.write(\"\\t\\u01c2\\4\\u01c3\\t\\u01c3\\4\\u01c4\\t\\u01c4\\4\\u01c5\\t\\u01c5\")\n buf.write(\"\\4\\u01c6\\t\\u01c6\\4\\u01c7\\t\\u01c7\\4\\u01c8\\t\\u01c8\\4\\u01c9\")\n buf.write(\"\\t\\u01c9\\4\\u01ca\\t\\u01ca\\4\\u01cb\\t\\u01cb\\4\\u01cc\\t\\u01cc\")\n buf.write(\"\\4\\u01cd\\t\\u01cd\\4\\u01ce\\t\\u01ce\\4\\u01cf\\t\\u01cf\\4\\u01d0\")\n buf.write(\"\\t\\u01d0\\4\\u01d1\\t\\u01d1\\4\\u01d2\\t\\u01d2\\4\\u01d3\\t\\u01d3\")\n buf.write(\"\\4\\u01d4\\t\\u01d4\\4\\u01d5\\t\\u01d5\\4\\u01d6\\t\\u01d6\\4\\u01d7\")\n buf.write(\"\\t\\u01d7\\4\\u01d8\\t\\u01d8\\4\\u01d9\\t\\u01d9\\4\\u01da\\t\\u01da\")\n buf.write(\"\\4\\u01db\\t\\u01db\\4\\u01dc\\t\\u01dc\\4\\u01dd\\t\\u01dd\\4\\u01de\")\n buf.write(\"\\t\\u01de\\4\\u01df\\t\\u01df\\4\\u01e0\\t\\u01e0\\4\\u01e1\\t\\u01e1\")\n buf.write(\"\\4\\u01e2\\t\\u01e2\\4\\u01e3\\t\\u01e3\\4\\u01e4\\t\\u01e4\\4\\u01e5\")\n buf.write(\"\\t\\u01e5\\4\\u01e6\\t\\u01e6\\4\\u01e7\\t\\u01e7\\4\\u01e8\\t\\u01e8\")\n buf.write(\"\\4\\u01e9\\t\\u01e9\\4\\u01ea\\t\\u01ea\\4\\u01eb\\t\\u01eb\\4\\u01ec\")\n buf.write(\"\\t\\u01ec\\4\\u01ed\\t\\u01ed\\4\\u01ee\\t\\u01ee\\4\\u01ef\\t\\u01ef\")\n buf.write(\"\\4\\u01f0\\t\\u01f0\\4\\u01f1\\t\\u01f1\\4\\u01f2\\t\\u01f2\\4\\u01f3\")\n buf.write(\"\\t\\u01f3\\4\\u01f4\\t\\u01f4\\4\\u01f5\\t\\u01f5\\4\\u01f6\\t\\u01f6\")\n buf.write(\"\\4\\u01f7\\t\\u01f7\\4\\u01f8\\t\\u01f8\\4\\u01f9\\t\\u01f9\\4\\u01fa\")\n buf.write(\"\\t\\u01fa\\4\\u01fb\\t\\u01fb\\4\\u01fc\\t\\u01fc\\4\\u01fd\\t\\u01fd\")\n buf.write(\"\\4\\u01fe\\t\\u01fe\\4\\u01ff\\t\\u01ff\\4\\u0200\\t\\u0200\\4\\u0201\")\n buf.write(\"\\t\\u0201\\4\\u0202\\t\\u0202\\4\\u0203\\t\\u0203\\4\\u0204\\t\\u0204\")\n buf.write(\"\\4\\u0205\\t\\u0205\\4\\u0206\\t\\u0206\\4\\u0207\\t\\u0207\\4\\u0208\")\n buf.write(\"\\t\\u0208\\4\\u0209\\t\\u0209\\4\\u020a\\t\\u020a\\4\\u020b\\t\\u020b\")\n buf.write(\"\\4\\u020c\\t\\u020c\\4\\u020d\\t\\u020d\\4\\u020e\\t\\u020e\\4\\u020f\")\n buf.write(\"\\t\\u020f\\4\\u0210\\t\\u0210\\4\\u0211\\t\\u0211\\4\\u0212\\t\\u0212\")\n buf.write(\"\\4\\u0213\\t\\u0213\\4\\u0214\\t\\u0214\\4\\u0215\\t\\u0215\\4\\u0216\")\n buf.write(\"\\t\\u0216\\4\\u0217\\t\\u0217\\4\\u0218\\t\\u0218\\4\\u0219\\t\\u0219\")\n buf.write(\"\\4\\u021a\\t\\u021a\\4\\u021b\\t\\u021b\\4\\u021c\\t\\u021c\\4\\u021d\")\n buf.write(\"\\t\\u021d\\4\\u021e\\t\\u021e\\4\\u021f\\t\\u021f\\4\\u0220\\t\\u0220\")\n buf.write(\"\\4\\u0221\\t\\u0221\\4\\u0222\\t\\u0222\\4\\u0223\\t\\u0223\\4\\u0224\")\n buf.write(\"\\t\\u0224\\4\\u0225\\t\\u0225\\4\\u0226\\t\\u0226\\4\\u0227\\t\\u0227\")\n buf.write(\"\\4\\u0228\\t\\u0228\\4\\u0229\\t\\u0229\\4\\u022a\\t\\u022a\\4\\u022b\")\n buf.write(\"\\t\\u022b\\4\\u022c\\t\\u022c\\4\\u022d\\t\\u022d\\4\\u022e\\t\\u022e\")\n buf.write(\"\\4\\u022f\\t\\u022f\\4\\u0230\\t\\u0230\\4\\u0231\\t\\u0231\\4\\u0232\")\n buf.write(\"\\t\\u0232\\4\\u0233\\t\\u0233\\4\\u0234\\t\\u0234\\3\\2\\3\\2\\3\\2\\3\")\n buf.write(\"\\3\\3\\3\\3\\4\\3\\4\\3\\4\\3\\4\\3\\5\\3\\5\\3\\5\\3\\5\\3\\5\\3\\5\\3\\6\\3\\6\")\n buf.write(\"\\3\\6\\3\\6\\3\\6\\3\\6\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\3\\7\\3\")\n buf.write(\"\\7\\3\\b\\3\\b\\3\\b\\3\\b\\3\\t\\3\\t\\3\\t\\3\\t\\3\\t\\3\\t\\3\\n\\3\\n\\3\\n\")\n buf.write(\"\\3\\n\\3\\n\\3\\n\\3\\n\\3\\n\\3\\13\\3\\13\\3\\13\\3\\13\\3\\f\\3\\f\\3\\f\\3\")\n buf.write(\"\\f\\3\\r\\3\\r\\3\\r\\3\\r\\3\\r\\3\\r\\3\\16\\3\\16\\3\\16\\3\\17\\3\\17\\3\")\n buf.write(\"\\17\\3\\17\\3\\17\\3\\17\\3\\17\\3\\20\\3\\20\\3\\20\\3\\20\\3\\20\\3\\20\")\n buf.write(\"\\3\\20\\3\\21\\3\\21\\3\\21\\3\\21\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\\3\\22\")\n buf.write(\"\\3\\22\\3\\22\\3\\22\\3\\22\\3\\23\\3\\23\\3\\23\\3\\24\\3\\24\\3\\24\\3\\24\")\n buf.write(\"\\3\\24\\3\\24\\3\\24\\3\\24\\3\\24\\3\\24\\3\\25\\3\\25\\3\\25\\3\\25\\3\\25\")\n buf.write(\"\\3\\25\\3\\26\\3\\26\\3\\26\\3\\26\\3\\26\\3\\26\\3\\26\\3\\27\\3\\27\\3\\27\")\n buf.write(\"\\3\\27\\3\\27\\3\\30\\3\\30\\3\\30\\3\\30\\3\\30\\3\\30\\3\\30\\3\\30\\3\\30\")\n buf.write(\"\\3\\30\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\")\n buf.write(\"\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\\3\\31\")\n buf.write(\"\\3\\31\\3\\31\\3\\32\\3\\32\\3\\32\\3\\32\\3\\32\\3\\32\\3\\33\\3\\33\\3\\33\")\n buf.write(\"\\3\\33\\3\\33\\3\\33\\3\\33\\3\\34\\3\\34\\3\\34\\3\\34\\3\\34\\3\\34\\3\\35\")\n buf.write(\"\\3\\35\\3\\35\\3\\35\\3\\35\\3\\35\\3\\35\\3\\35\\3\\36\\3\\36\\3\\36\\3\\36\")\n buf.write(\"\\3\\36\\3\\36\\3\\37\\3\\37\\3\\37\\3\\37\\3\\37\\3\\37\\3\\37\\3\\37\\3\\37\")\n buf.write(\"\\3\\37\\3\\37\\3\\37\\3\\37\\3\\37\\3 \\3 \\3 \\3 \\3 \\3 \\3 \\3 \\3 \\3\")\n buf.write(\" \\3 \\3 \\3 \\3!\\3!\\3!\\3!\\3!\\3!\\3!\\3!\\3!\\3!\\3!\\3!\\3!\\3!\\3\")\n buf.write(\"!\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3\\\"\\3#\\3#\\3#\\3#\\3#\\3#\\3$\\3$\\3$\\3$\\3\")\n buf.write(\"$\\3%\\3%\\3%\\3%\\3%\\3%\\3%\\3%\\3&\\3&\\3&\\3&\\3&\\3\\'\\3\\'\\3\\'\\3\")\n buf.write(\"\\'\\3\\'\\3\\'\\3\\'\\3\\'\\3(\\3(\\3(\\3(\\3(\\3)\\3)\\3)\\3*\\3*\\3*\\3\")\n buf.write(\"*\\3*\\3+\\3+\\3,\\3,\\3,\\3,\\3,\\3,\\3-\\3-\\3-\\3-\\3-\\3.\\3.\\3.\\3\")\n buf.write(\".\\3.\\3.\\3.\\3.\\3.\\3.\\3/\\3/\\3/\\3/\\3/\\3/\\3/\\3/\\3\\60\\3\\60\")\n buf.write(\"\\3\\60\\3\\60\\3\\60\\3\\61\\3\\61\\3\\61\\3\\61\\3\\61\\3\\62\\3\\62\\3\\62\")\n buf.write(\"\\3\\62\\3\\62\\3\\63\\3\\63\\3\\63\\3\\63\\3\\63\\3\\63\\3\\63\\3\\63\\3\\64\")\n buf.write(\"\\3\\64\\3\\64\\3\\64\\3\\64\\3\\64\\3\\64\\3\\64\\3\\64\\3\\64\\3\\65\\3\\65\")\n buf.write(\"\\3\\65\\3\\65\\3\\65\\3\\65\\3\\66\\3\\66\\3\\66\\3\\66\\3\\67\\3\\67\\3\\67\")\n buf.write(\"\\3\\67\\3\\67\\38\\38\\38\\38\\38\\38\\39\\39\\39\\39\\39\\39\\39\\39\\3\")\n buf.write(\":\\3:\\3:\\3:\\3:\\3:\\3:\\3:\\3;\\3;\\3;\\3;\\3;\\3;\\3;\\3;\\3<\\3<\\3\")\n buf.write(\"<\\3<\\3<\\3<\\3<\\3<\\3=\\3=\\3=\\3=\\3=\\3=\\3=\\3>\\3>\\3>\\3>\\3>\\3\")\n buf.write(\">\\3>\\3>\\3>\\3>\\3?\\3?\\3?\\3?\\3?\\3?\\3?\\3?\\3?\\3?\\3?\\3?\\3?\\3\")\n buf.write(\"?\\3@\\3@\\3@\\3@\\3@\\3@\\3@\\3@\\3A\\3A\\3A\\3A\\3A\\3A\\3A\\3A\\3A\\3\")\n buf.write(\"B\\3B\\3B\\3B\\3B\\3B\\3B\\3B\\3C\\3C\\3C\\3C\\3C\\3C\\3C\\3C\\3C\\3C\\3\")\n buf.write(\"C\\3C\\3C\\3C\\3C\\3C\\3D\\3D\\3D\\3D\\3D\\3D\\3D\\3D\\3D\\3E\\3E\\3E\\3\")\n buf.write(\"E\\3E\\3E\\3E\\3E\\3E\\3E\\3E\\3F\\3F\\3F\\3F\\3F\\3F\\3F\\3F\\3F\\3F\\3\")\n buf.write(\"F\\3F\\3G\\3G\\3G\\3G\\3G\\3G\\3G\\3G\\3G\\3G\\3G\\3G\\3H\\3H\\3H\\3H\\3\")\n buf.write(\"H\\3H\\3H\\3H\\3I\\3I\\3I\\3I\\3I\\3I\\3I\\3I\\3J\\3J\\3J\\3J\\3J\\3J\\3\")\n buf.write(\"J\\3J\\3J\\3K\\3K\\3K\\3K\\3K\\3K\\3K\\3K\\3L\\3L\\3L\\3L\\3L\\3L\\3L\\3\")\n buf.write(\"L\\3L\\3L\\3L\\3L\\3M\\3M\\3M\\3M\\3M\\3M\\3M\\3M\\3M\\3M\\3M\\3M\\3M\\3\")\n buf.write(\"M\\3M\\3M\\3N\\3N\\3N\\3N\\3N\\3O\\3O\\3O\\3O\\3O\\3O\\3P\\3P\\3P\\3P\\3\")\n buf.write(\"P\\3P\\3P\\3Q\\3Q\\3Q\\3Q\\3Q\\3Q\\3R\\3R\\3R\\3R\\3R\\3S\\3S\\3S\\3S\\3\")\n buf.write(\"S\\3S\\3S\\3S\\3T\\3T\\3T\\3T\\3T\\3T\\3T\\3T\\3T\\3T\\3T\\3T\\3T\\3U\\3\")\n buf.write(\"U\\3U\\3U\\3U\\3U\\3U\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3V\\3\")\n buf.write(\"W\\3W\\3W\\3W\\3W\\3W\\3X\\3X\\3X\\3X\\3X\\3Y\\3Y\\3Y\\3Y\\3Y\\3Y\\3Y\\3\")\n buf.write(\"Y\\3Y\\3Z\\3Z\\3Z\\3Z\\3Z\\3[\\3[\\3[\\3[\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3\")\n buf.write(\"\\\\\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3\\\\\\3]\\3]\\3]\\3]\\3]\")\n buf.write(\"\\3]\\3]\\3]\\3]\\3]\\3]\\3^\\3^\\3^\\3^\\3_\\3_\\3_\\3_\\3_\\3_\\3`\\3\")\n buf.write(\"`\\3`\\3`\\3a\\3a\\3a\\3a\\3a\\3a\\3a\\3a\\3b\\3b\\3b\\3b\\3b\\3b\\3b\\3\")\n buf.write(\"b\\3c\\3c\\3c\\3c\\3c\\3c\\3c\\3c\\3c\\3c\\3d\\3d\\3d\\3d\\3d\\3d\\3d\\3\")\n buf.write(\"d\\3d\\3d\\3e\\3e\\3e\\3e\\3e\\3e\\3e\\3e\\3f\\3f\\3f\\3f\\3f\\3f\\3f\\3\")\n buf.write(\"f\\3f\\3g\\3g\\3g\\3g\\3g\\3g\\3g\\3g\\3g\\3h\\3h\\3h\\3h\\3h\\3h\\3h\\3\")\n buf.write(\"h\\3i\\3i\\3i\\3i\\3i\\3i\\3i\\3j\\3j\\3j\\3j\\3j\\3j\\3k\\3k\\3k\\3k\\3\")\n buf.write(\"k\\3l\\3l\\3l\\3l\\3l\\3l\\3l\\3l\\3l\\3l\\3l\\3l\\3l\\3l\\3m\\3m\\3m\\3\")\n buf.write(\"m\\3m\\3m\\3m\\3m\\3m\\3m\\3n\\3n\\3n\\3n\\3n\\3n\\3n\\3n\\3o\\3o\\3o\\3\")\n buf.write(\"o\\3o\\3o\\3o\\3o\\3o\\3o\\3o\\3o\\3o\\3p\\3p\\3p\\3p\\3p\\3p\\3p\\3p\\3\")\n buf.write(\"p\\3q\\3q\\3q\\3q\\3q\\3q\\3q\\3q\\3q\\3r\\3r\\3r\\3r\\3r\\3r\\3r\\3s\\3\")\n buf.write(\"s\\3s\\3s\\3s\\3t\\3t\\3t\\3t\\3t\\3t\\3t\\3t\\3t\\3t\\3t\\3t\\3t\\3t\\3\")\n buf.write(\"t\\3t\\3t\\3t\\3t\\3t\\3t\\3t\\3t\\3t\\3t\\3u\\3u\\3u\\3u\\3u\\3v\\3v\\3\")\n buf.write(\"v\\3v\\3v\\3v\\3v\\3v\\3w\\3w\\3w\\3w\\3w\\3x\\3x\\3x\\3x\\3x\\3x\\3y\\3\")\n buf.write(\"y\\3y\\3y\\3y\\3y\\3z\\3z\\3z\\3z\\3z\\3z\\3z\\3{\\3{\\3{\\3{\\3{\\3{\\3\")\n buf.write(\"{\\3{\\3{\\3|\\3|\\3|\\3|\\3}\\3}\\3}\\3}\\3}\\3}\\3}\\3}\\3}\\3}\\3}\\3\")\n buf.write(\"}\\3}\\3}\\3}\\3~\\3~\\3~\\3~\\3\\177\\3\\177\\3\\177\\3\\177\\3\\177\\3\")\n buf.write(\"\\177\\3\\177\\3\\u0080\\3\\u0080\\3\\u0080\\3\\u0080\\3\\u0080\\3\\u0080\")\n buf.write(\"\\3\\u0080\\3\\u0081\\3\\u0081\\3\\u0081\\3\\u0081\\3\\u0081\\3\\u0081\")\n buf.write(\"\\3\\u0081\\3\\u0081\\3\\u0081\\3\\u0082\\3\\u0082\\3\\u0082\\3\\u0082\")\n buf.write(\"\\3\\u0082\\3\\u0082\\3\\u0082\\3\\u0083\\3\\u0083\\3\\u0083\\3\\u0083\")\n buf.write(\"\\3\\u0083\\3\\u0083\\3\\u0083\\3\\u0083\\3\\u0083\\3\\u0083\\3\\u0084\")\n buf.write(\"\\3\\u0084\\3\\u0084\\3\\u0084\\3\\u0084\\3\\u0084\\3\\u0084\\3\\u0084\")\n buf.write(\"\\3\\u0084\\3\\u0084\\3\\u0084\\3\\u0084\\3\\u0084\\3\\u0084\\3\\u0084\")\n buf.write(\"\\3\\u0085\\3\\u0085\\3\\u0085\\3\\u0085\\3\\u0085\\3\\u0085\\3\\u0085\")\n buf.write(\"\\3\\u0085\\3\\u0085\\3\\u0085\\3\\u0085\\3\\u0086\\3\\u0086\\3\\u0086\")\n buf.write(\"\\3\\u0086\\3\\u0086\\3\\u0086\\3\\u0086\\3\\u0086\\3\\u0087\\3\\u0087\")\n buf.write(\"\\3\\u0087\\3\\u0087\\3\\u0087\\3\\u0087\\3\\u0087\\3\\u0087\\3\\u0087\")\n buf.write(\"\\3\\u0087\\3\\u0088\\3\\u0088\\3\\u0088\\3\\u0088\\3\\u0088\\3\\u0088\")\n buf.write(\"\\3\\u0088\\3\\u0088\\3\\u0089\\3\\u0089\\3\\u0089\\3\\u0089\\3\\u0089\")\n buf.write(\"\\3\\u0089\\3\\u0089\\3\\u008a\\3\\u008a\\3\\u008a\\3\\u008a\\3\\u008a\")\n buf.write(\"\\3\\u008b\\3\\u008b\\3\\u008b\\3\\u008b\\3\\u008b\\3\\u008b\\3\\u008b\")\n buf.write(\"\\3\\u008b\\3\\u008c\\3\\u008c\\3\\u008c\\3\\u008c\\3\\u008c\\3\\u008c\")\n buf.write(\"\\3\\u008c\\3\\u008c\\3\\u008c\\3\\u008d\\3\\u008d\\3\\u008d\\3\\u008d\")\n buf.write(\"\\3\\u008d\\3\\u008d\\3\\u008d\\3\\u008d\\3\\u008e\\3\\u008e\\3\\u008e\")\n buf.write(\"\\3\\u008e\\3\\u008e\\3\\u008e\\3\\u008e\\3\\u008e\\3\\u008f\\3\\u008f\")\n buf.write(\"\\3\\u008f\\3\\u008f\\3\\u008f\\3\\u008f\\3\\u0090\\3\\u0090\\3\\u0090\")\n buf.write(\"\\3\\u0090\\3\\u0090\\3\\u0090\\3\\u0091\\3\\u0091\\3\\u0091\\3\\u0091\")\n buf.write(\"\\3\\u0091\\3\\u0091\\3\\u0092\\3\\u0092\\3\\u0092\\3\\u0092\\3\\u0092\")\n buf.write(\"\\3\\u0092\\3\\u0093\\3\\u0093\\3\\u0093\\3\\u0093\\3\\u0093\\3\\u0093\")\n buf.write(\"\\3\\u0093\\3\\u0093\\3\\u0093\\3\\u0093\\3\\u0093\\3\\u0093\\3\\u0094\")\n buf.write(\"\\3\\u0094\\3\\u0094\\3\\u0094\\3\\u0094\\3\\u0094\\3\\u0095\\3\\u0095\")\n buf.write(\"\\3\\u0095\\3\\u0095\\3\\u0095\\3\\u0095\\3\\u0095\\3\\u0095\\3\\u0095\")\n buf.write(\"\\3\\u0095\\3\\u0096\\3\\u0096\\3\\u0096\\3\\u0096\\3\\u0096\\3\\u0096\")\n buf.write(\"\\3\\u0096\\3\\u0096\\3\\u0097\\3\\u0097\\3\\u0097\\3\\u0097\\3\\u0098\")\n buf.write(\"\\3\\u0098\\3\\u0098\\3\\u0098\\3\\u0098\\3\\u0098\\3\\u0098\\3\\u0099\")\n buf.write(\"\\3\\u0099\\3\\u0099\\3\\u0099\\3\\u0099\\3\\u0099\\3\\u009a\\3\\u009a\")\n buf.write(\"\\3\\u009a\\3\\u009a\\3\\u009a\\3\\u009b\\3\\u009b\\3\\u009b\\3\\u009b\")\n buf.write(\"\\3\\u009b\\3\\u009c\\3\\u009c\\3\\u009c\\3\\u009c\\3\\u009c\\3\\u009c\")\n buf.write(\"\\3\\u009c\\3\\u009c\\3\\u009c\\3\\u009d\\3\\u009d\\3\\u009d\\3\\u009d\")\n buf.write(\"\\3\\u009d\\3\\u009e\\3\\u009e\\3\\u009e\\3\\u009e\\3\\u009e\\3\\u009e\")\n buf.write(\"\\3\\u009f\\3\\u009f\\3\\u009f\\3\\u009f\\3\\u009f\\3\\u009f\\3\\u00a0\")\n buf.write(\"\\3\\u00a0\\3\\u00a0\\3\\u00a0\\3\\u00a0\\3\\u00a0\\3\\u00a0\\3\\u00a0\")\n buf.write(\"\\3\\u00a0\\3\\u00a1\\3\\u00a1\\3\\u00a1\\3\\u00a1\\3\\u00a1\\3\\u00a2\")\n buf.write(\"\\3\\u00a2\\3\\u00a2\\3\\u00a2\\3\\u00a2\\3\\u00a2\\3\\u00a2\\3\\u00a3\")\n buf.write(\"\\3\\u00a3\\3\\u00a3\\3\\u00a3\\3\\u00a3\\3\\u00a4\\3\\u00a4\\3\\u00a4\")\n buf.write(\"\\3\\u00a4\\3\\u00a4\\3\\u00a5\\3\\u00a5\\3\\u00a5\\3\\u00a6\\3\\u00a6\")\n buf.write(\"\\3\\u00a6\\3\\u00a6\\3\\u00a6\\3\\u00a6\\3\\u00a6\\3\\u00a7\\3\\u00a7\")\n buf.write(\"\\3\\u00a7\\3\\u00a7\\3\\u00a7\\3\\u00a7\\3\\u00a7\\3\\u00a7\\3\\u00a7\")\n buf.write(\"\\3\\u00a7\\3\\u00a8\\3\\u00a8\\3\\u00a8\\3\\u00a9\\3\\u00a9\\3\\u00a9\")\n buf.write(\"\\3\\u00a9\\3\\u00a9\\3\\u00a9\\3\\u00a9\\3\\u00a9\\3\\u00aa\\3\\u00aa\")\n buf.write(\"\\3\\u00aa\\3\\u00aa\\3\\u00aa\\3\\u00aa\\3\\u00aa\\3\\u00aa\\3\\u00aa\")\n buf.write(\"\\3\\u00aa\\3\\u00ab\\3\\u00ab\\3\\u00ab\\3\\u00ab\\3\\u00ab\\3\\u00ab\")\n buf.write(\"\\3\\u00ab\\3\\u00ab\\3\\u00ab\\3\\u00ab\\3\\u00ac\\3\\u00ac\\3\\u00ac\")\n buf.write(\"\\3\\u00ac\\3\\u00ac\\3\\u00ac\\3\\u00ac\\3\\u00ad\\3\\u00ad\\3\\u00ad\")\n buf.write(\"\\3\\u00ad\\3\\u00ad\\3\\u00ad\\3\\u00ae\\3\\u00ae\\3\\u00ae\\3\\u00ae\")\n buf.write(\"\\3\\u00ae\\3\\u00ae\\3\\u00ae\\3\\u00ae\\3\\u00af\\3\\u00af\\3\\u00af\")\n buf.write(\"\\3\\u00af\\3\\u00af\\3\\u00af\\3\\u00af\\3\\u00af\\3\\u00af\\3\\u00af\")\n buf.write(\"\\3\\u00b0\\3\\u00b0\\3\\u00b0\\3\\u00b0\\3\\u00b0\\3\\u00b0\\3\\u00b0\")\n buf.write(\"\\3\\u00b0\\3\\u00b1\\3\\u00b1\\3\\u00b1\\3\\u00b1\\3\\u00b1\\3\\u00b1\")\n buf.write(\"\\3\\u00b1\\3\\u00b1\\3\\u00b1\\3\\u00b2\\3\\u00b2\\3\\u00b2\\3\\u00b2\")\n buf.write(\"\\3\\u00b2\\3\\u00b2\\3\\u00b2\\3\\u00b3\\3\\u00b3\\3\\u00b3\\3\\u00b3\")\n buf.write(\"\\3\\u00b3\\3\\u00b3\\3\\u00b4\\3\\u00b4\\3\\u00b4\\3\\u00b4\\3\\u00b4\")\n buf.write(\"\\3\\u00b4\\3\\u00b5\\3\\u00b5\\3\\u00b5\\3\\u00b5\\3\\u00b5\\3\\u00b5\")\n buf.write(\"\\3\\u00b5\\3\\u00b6\\3\\u00b6\\3\\u00b6\\3\\u00b6\\3\\u00b6\\3\\u00b6\")\n buf.write(\"\\3\\u00b6\\3\\u00b6\\3\\u00b6\\3\\u00b6\\3\\u00b6\\3\\u00b6\\3\\u00b6\")\n buf.write(\"\\3\\u00b7\\3\\u00b7\\3\\u00b7\\3\\u00b7\\3\\u00b7\\3\\u00b7\\3\\u00b7\")\n buf.write(\"\\3\\u00b7\\3\\u00b8\\3\\u00b8\\3\\u00b8\\3\\u00b8\\3\\u00b9\\3\\u00b9\")\n buf.write(\"\\3\\u00b9\\3\\u00b9\\3\\u00b9\\3\\u00b9\\3\\u00b9\\3\\u00b9\\3\\u00ba\")\n buf.write(\"\\3\\u00ba\\3\\u00ba\\3\\u00ba\\3\\u00ba\\3\\u00ba\\3\\u00ba\\3\\u00ba\")\n buf.write(\"\\3\\u00ba\\3\\u00ba\\3\\u00bb\\3\\u00bb\\3\\u00bb\\3\\u00bb\\3\\u00bb\")\n buf.write(\"\\3\\u00bb\\3\\u00bb\\3\\u00bb\\3\\u00bb\\3\\u00bc\\3\\u00bc\\3\\u00bc\")\n buf.write(\"\\3\\u00bc\\3\\u00bc\\3\\u00bd\\3\\u00bd\\3\\u00bd\\3\\u00bd\\3\\u00bd\")\n buf.write(\"\\3\\u00bd\\3\\u00bd\\3\\u00bd\\3\\u00bd\\3\\u00bd\\3\\u00bd\\3\\u00be\")\n buf.write(\"\\3\\u00be\\3\\u00be\\3\\u00bf\\3\\u00bf\\3\\u00bf\\3\\u00bf\\3\\u00bf\")\n buf.write(\"\\3\\u00bf\\3\\u00bf\\3\\u00bf\\3\\u00bf\\3\\u00bf\\3\\u00c0\\3\\u00c0\")\n buf.write(\"\\3\\u00c0\\3\\u00c0\\3\\u00c0\\3\\u00c0\\3\\u00c0\\3\\u00c0\\3\\u00c1\")\n buf.write(\"\\3\\u00c1\\3\\u00c1\\3\\u00c1\\3\\u00c1\\3\\u00c2\\3\\u00c2\\3\\u00c2\")\n buf.write(\"\\3\\u00c2\\3\\u00c2\\3\\u00c3\\3\\u00c3\\3\\u00c3\\3\\u00c3\\3\\u00c3\")\n buf.write(\"\\3\\u00c4\\3\\u00c4\\3\\u00c4\\3\\u00c4\\3\\u00c4\\3\\u00c4\\3\\u00c4\")\n buf.write(\"\\3\\u00c4\\3\\u00c4\\3\\u00c5\\3\\u00c5\\3\\u00c5\\3\\u00c5\\3\\u00c5\")\n buf.write(\"\\3\\u00c6\\3\\u00c6\\3\\u00c6\\3\\u00c6\\3\\u00c6\\3\\u00c6\\3\\u00c6\")\n buf.write(\"\\3\\u00c6\\3\\u00c6\\3\\u00c6\\3\\u00c6\\3\\u00c7\\3\\u00c7\\3\\u00c7\")\n buf.write(\"\\3\\u00c7\\3\\u00c7\\3\\u00c7\\3\\u00c7\\3\\u00c7\\3\\u00c8\\3\\u00c8\")\n buf.write(\"\\3\\u00c8\\3\\u00c8\\3\\u00c8\\3\\u00c9\\3\\u00c9\\3\\u00c9\\3\\u00c9\")\n buf.write(\"\\3\\u00c9\\3\\u00c9\\3\\u00ca\\3\\u00ca\\3\\u00ca\\3\\u00ca\\3\\u00ca\")\n buf.write(\"\\3\\u00ca\\3\\u00ca\\3\\u00ca\\3\\u00cb\\3\\u00cb\\3\\u00cb\\3\\u00cb\")\n buf.write(\"\\3\\u00cb\\3\\u00cc\\3\\u00cc\\3\\u00cc\\3\\u00cc\\3\\u00cc\\3\\u00cc\")\n buf.write(\"\\3\\u00cd\\3\\u00cd\\3\\u00cd\\3\\u00cd\\3\\u00cd\\3\\u00cd\\3\\u00ce\")\n buf.write(\"\\3\\u00ce\\3\\u00ce\\3\\u00ce\\3\\u00ce\\3\\u00ce\\3\\u00cf\\3\\u00cf\")\n buf.write(\"\\3\\u00cf\\3\\u00cf\\3\\u00cf\\3\\u00cf\\3\\u00d0\\3\\u00d0\\3\\u00d0\")\n buf.write(\"\\3\\u00d0\\3\\u00d0\\3\\u00d0\\3\\u00d1\\3\\u00d1\\3\\u00d1\\3\\u00d1\")\n buf.write(\"\\3\\u00d1\\3\\u00d2\\3\\u00d2\\3\\u00d2\\3\\u00d2\\3\\u00d2\\3\\u00d2\")\n buf.write(\"\\3\\u00d2\\3\\u00d3\\3\\u00d3\\3\\u00d3\\3\\u00d3\\3\\u00d4\\3\\u00d4\")\n buf.write(\"\\3\\u00d4\\3\\u00d4\\3\\u00d4\\3\\u00d4\\3\\u00d4\\3\\u00d5\\3\\u00d5\")\n buf.write(\"\\3\\u00d5\\3\\u00d5\\3\\u00d5\\3\\u00d5\\3\\u00d6\\3\\u00d6\\3\\u00d6\")\n buf.write(\"\\3\\u00d6\\3\\u00d6\\3\\u00d7\\3\\u00d7\\3\\u00d7\\3\\u00d7\\3\\u00d7\")\n buf.write(\"\\3\\u00d8\\3\\u00d8\\3\\u00d8\\3\\u00d8\\3\\u00d8\\3\\u00d9\\3\\u00d9\")\n buf.write(\"\\3\\u00d9\\3\\u00d9\\3\\u00da\\3\\u00da\\3\\u00da\\3\\u00da\\3\\u00da\")\n buf.write(\"\\3\\u00da\\3\\u00da\\3\\u00da\\3\\u00db\\3\\u00db\\3\\u00db\\3\\u00db\")\n buf.write(\"\\3\\u00db\\3\\u00db\\3\\u00db\\3\\u00db\\3\\u00db\\3\\u00dc\\3\\u00dc\")\n buf.write(\"\\3\\u00dc\\3\\u00dc\\3\\u00dc\\3\\u00dc\\3\\u00dc\\3\\u00dc\\3\\u00dc\")\n buf.write(\"\\3\\u00dd\\3\\u00dd\\3\\u00dd\\3\\u00dd\\3\\u00dd\\3\\u00dd\\3\\u00dd\")\n buf.write(\"\\3\\u00de\\3\\u00de\\3\\u00de\\3\\u00de\\3\\u00de\\3\\u00de\\3\\u00df\")\n buf.write(\"\\3\\u00df\\3\\u00df\\3\\u00df\\3\\u00df\\3\\u00df\\3\\u00e0\\3\\u00e0\")\n buf.write(\"\\3\\u00e0\\3\\u00e0\\3\\u00e0\\3\\u00e0\\3\\u00e0\\3\\u00e1\\3\\u00e1\")\n buf.write(\"\\3\\u00e1\\3\\u00e1\\3\\u00e1\\3\\u00e1\\3\\u00e1\\3\\u00e1\\3\\u00e1\")\n buf.write(\"\\3\\u00e2\\3\\u00e2\\3\\u00e2\\3\\u00e2\\3\\u00e2\\3\\u00e2\\3\\u00e2\")\n buf.write(\"\\3\\u00e2\\3\\u00e2\\3\\u00e3\\3\\u00e3\\3\\u00e3\\3\\u00e3\\3\\u00e3\")\n buf.write(\"\\3\\u00e4\\3\\u00e4\\3\\u00e4\\3\\u00e4\\3\\u00e4\\3\\u00e4\\3\\u00e5\")\n buf.write(\"\\3\\u00e5\\3\\u00e5\\3\\u00e5\\3\\u00e5\\3\\u00e5\\3\\u00e5\\3\\u00e6\")\n buf.write(\"\\3\\u00e6\\3\\u00e6\\3\\u00e6\\3\\u00e6\\3\\u00e6\\3\\u00e7\\3\\u00e7\")\n buf.write(\"\\3\\u00e7\\3\\u00e7\\3\\u00e7\\3\\u00e7\\3\\u00e7\\3\\u00e7\\3\\u00e7\")\n buf.write(\"\\3\\u00e8\\3\\u00e8\\3\\u00e8\\3\\u00e8\\3\\u00e8\\3\\u00e9\\3\\u00e9\")\n buf.write(\"\\3\\u00e9\\3\\u00e9\\3\\u00ea\\3\\u00ea\\3\\u00ea\\3\\u00ea\\3\\u00ea\")\n buf.write(\"\\3\\u00ea\\3\\u00ea\\3\\u00ea\\3\\u00eb\\3\\u00eb\\3\\u00eb\\3\\u00eb\")\n buf.write(\"\\3\\u00eb\\3\\u00eb\\3\\u00eb\\3\\u00eb\\3\\u00eb\\3\\u00ec\\3\\u00ec\")\n buf.write(\"\\3\\u00ec\\3\\u00ec\\3\\u00ed\\3\\u00ed\\3\\u00ed\\3\\u00ed\\3\\u00ed\")\n buf.write(\"\\3\\u00ed\\3\\u00ee\\3\\u00ee\\3\\u00ee\\3\\u00ee\\3\\u00ee\\3\\u00ee\")\n buf.write(\"\\3\\u00ee\\3\\u00ee\\3\\u00ee\\3\\u00ef\\3\\u00ef\\3\\u00ef\\3\\u00ef\")\n buf.write(\"\\3\\u00ef\\3\\u00ef\\3\\u00f0\\3\\u00f0\\3\\u00f0\\3\\u00f0\\3\\u00f0\")\n buf.write(\"\\3\\u00f0\\3\\u00f0\\3\\u00f1\\3\\u00f1\\3\\u00f1\\3\\u00f1\\3\\u00f2\")\n buf.write(\"\\3\\u00f2\\3\\u00f2\\3\\u00f3\\3\\u00f3\\3\\u00f3\\3\\u00f3\\3\\u00f3\")\n buf.write(\"\\3\\u00f3\\3\\u00f3\\3\\u00f3\\3\\u00f4\\3\\u00f4\\3\\u00f4\\3\\u00f4\")\n buf.write(\"\\3\\u00f4\\3\\u00f4\\3\\u00f4\\3\\u00f4\\3\\u00f5\\3\\u00f5\\3\\u00f5\")\n buf.write(\"\\3\\u00f5\\3\\u00f5\\3\\u00f5\\3\\u00f5\\3\\u00f6\\3\\u00f6\\3\\u00f6\")\n buf.write(\"\\3\\u00f6\\3\\u00f6\\3\\u00f6\\3\\u00f6\\3\\u00f6\\3\\u00f7\\3\\u00f7\")\n buf.write(\"\\3\\u00f7\\3\\u00f7\\3\\u00f7\\3\\u00f7\\3\\u00f7\\3\\u00f7\\3\\u00f7\")\n buf.write(\"\\3\\u00f7\\3\\u00f7\\3\\u00f7\\3\\u00f7\\3\\u00f7\\3\\u00f7\\3\\u00f7\")\n buf.write(\"\\3\\u00f7\\3\\u00f8\\3\\u00f8\\3\\u00f8\\3\\u00f8\\3\\u00f8\\3\\u00f8\")\n buf.write(\"\\3\\u00f8\\3\\u00f8\\3\\u00f8\\3\\u00f8\\3\\u00f8\\3\\u00f9\\3\\u00f9\")\n buf.write(\"\\3\\u00f9\\3\\u00f9\\3\\u00f9\\3\\u00f9\\3\\u00f9\\3\\u00f9\\3\\u00f9\")\n buf.write(\"\\3\\u00f9\\3\\u00f9\\3\\u00fa\\3\\u00fa\\3\\u00fa\\3\\u00fa\\3\\u00fa\")\n buf.write(\"\\3\\u00fb\\3\\u00fb\\3\\u00fb\\3\\u00fb\\3\\u00fb\\3\\u00fb\\3\\u00fb\")\n buf.write(\"\\3\\u00fb\\3\\u00fc\\3\\u00fc\\3\\u00fc\\3\\u00fc\\3\\u00fc\\3\\u00fc\")\n buf.write(\"\\3\\u00fc\\3\\u00fc\\3\\u00fc\\3\\u00fc\\3\\u00fc\\3\\u00fc\\3\\u00fc\")\n buf.write(\"\\3\\u00fc\\3\\u00fd\\3\\u00fd\\3\\u00fd\\3\\u00fd\\3\\u00fe\\3\\u00fe\")\n buf.write(\"\\3\\u00fe\\3\\u00fe\\3\\u00fe\\3\\u00fe\\3\\u00fe\\3\\u00ff\\3\\u00ff\")\n buf.write(\"\\3\\u00ff\\3\\u00ff\\3\\u00ff\\3\\u0100\\3\\u0100\\3\\u0100\\3\\u0100\")\n buf.write(\"\\3\\u0100\\3\\u0100\\3\\u0101\\3\\u0101\\3\\u0101\\3\\u0101\\3\\u0101\")\n buf.write(\"\\3\\u0101\\3\\u0101\\3\\u0102\\3\\u0102\\3\\u0102\\3\\u0102\\3\\u0102\")\n buf.write(\"\\3\\u0102\\3\\u0102\\3\\u0102\\3\\u0103\\3\\u0103\\3\\u0103\\3\\u0103\")\n buf.write(\"\\3\\u0103\\3\\u0103\\3\\u0103\\3\\u0103\\3\\u0103\\3\\u0103\\3\\u0104\")\n buf.write(\"\\3\\u0104\\3\\u0104\\3\\u0104\\3\\u0104\\3\\u0104\\3\\u0104\\3\\u0105\")\n buf.write(\"\\3\\u0105\\3\\u0105\\3\\u0106\\3\\u0106\\3\\u0106\\3\\u0106\\3\\u0107\")\n buf.write(\"\\3\\u0107\\3\\u0107\\3\\u0107\\3\\u0108\\3\\u0108\\3\\u0108\\3\\u0108\")\n buf.write(\"\\3\\u0109\\3\\u0109\\3\\u0109\\3\\u010a\\3\\u010a\\3\\u010a\\3\\u010a\")\n buf.write(\"\\3\\u010a\\3\\u010b\\3\\u010b\\3\\u010b\\3\\u010b\\3\\u010b\\3\\u010c\")\n buf.write(\"\\3\\u010c\\3\\u010c\\3\\u010c\\3\\u010c\\3\\u010c\\3\\u010c\\3\\u010d\")\n buf.write(\"\\3\\u010d\\3\\u010d\\3\\u010e\\3\\u010e\\3\\u010e\\3\\u010e\\3\\u010e\")\n buf.write(\"\\3\\u010e\\3\\u010e\\3\\u010e\\3\\u010f\\3\\u010f\\3\\u010f\\3\\u010f\")\n buf.write(\"\\3\\u010f\\3\\u010f\\3\\u0110\\3\\u0110\\3\\u0110\\3\\u0110\\3\\u0110\")\n buf.write(\"\\3\\u0110\\3\\u0110\\3\\u0110\\3\\u0110\\3\\u0110\\3\\u0110\\3\\u0111\")\n buf.write(\"\\3\\u0111\\3\\u0111\\3\\u0111\\3\\u0111\\3\\u0111\\3\\u0111\\3\\u0111\")\n buf.write(\"\\3\\u0112\\3\\u0112\\3\\u0112\\3\\u0112\\3\\u0113\\3\\u0113\\3\\u0113\")\n buf.write(\"\\3\\u0113\\3\\u0113\\3\\u0113\\3\\u0114\\3\\u0114\\3\\u0114\\3\\u0114\")\n buf.write(\"\\3\\u0114\\3\\u0115\\3\\u0115\\3\\u0115\\3\\u0115\\3\\u0115\\3\\u0115\")\n buf.write(\"\\3\\u0115\\3\\u0115\\3\\u0115\\3\\u0115\\3\\u0115\\3\\u0116\\3\\u0116\")\n buf.write(\"\\3\\u0116\\3\\u0116\\3\\u0116\\3\\u0116\\3\\u0116\\3\\u0116\\3\\u0117\")\n buf.write(\"\\3\\u0117\\3\\u0117\\3\\u0117\\3\\u0117\\3\\u0117\\3\\u0117\\3\\u0117\")\n buf.write(\"\\3\\u0117\\3\\u0117\\3\\u0117\\3\\u0117\\3\\u0117\\3\\u0117\\3\\u0117\")\n buf.write(\"\\3\\u0117\\3\\u0118\\3\\u0118\\3\\u0118\\3\\u0118\\3\\u0118\\3\\u0118\")\n buf.write(\"\\3\\u0118\\3\\u0118\\3\\u0118\\3\\u0118\\3\\u0118\\3\\u0119\\3\\u0119\")\n buf.write(\"\\3\\u0119\\3\\u0119\\3\\u0119\\3\\u0119\\3\\u0119\\3\\u011a\\3\\u011a\")\n buf.write(\"\\3\\u011a\\3\\u011a\\3\\u011a\\3\\u011a\\3\\u011a\\3\\u011a\\3\\u011a\")\n buf.write(\"\\3\\u011a\\3\\u011b\\3\\u011b\\3\\u011b\\3\\u011b\\3\\u011b\\3\\u011b\")\n buf.write(\"\\3\\u011b\\3\\u011b\\3\\u011c\\3\\u011c\\3\\u011c\\3\\u011c\\3\\u011c\")\n buf.write(\"\\3\\u011d\\3\\u011d\\3\\u011d\\3\\u011d\\3\\u011d\\3\\u011d\\3\\u011d\")\n buf.write(\"\\3\\u011d\\3\\u011d\\3\\u011e\\3\\u011e\\3\\u011e\\3\\u011e\\3\\u011e\")\n buf.write(\"\\3\\u011e\\3\\u011f\\3\\u011f\\3\\u011f\\3\\u011f\\3\\u011f\\3\\u011f\")\n buf.write(\"\\3\\u011f\\3\\u011f\\3\\u011f\\3\\u011f\\3\\u0120\\3\\u0120\\3\\u0120\")\n buf.write(\"\\3\\u0120\\3\\u0120\\3\\u0120\\3\\u0121\\3\\u0121\\3\\u0121\\3\\u0121\")\n buf.write(\"\\3\\u0121\\3\\u0122\\3\\u0122\\3\\u0122\\3\\u0122\\3\\u0122\\3\\u0122\")\n buf.write(\"\\3\\u0122\\3\\u0122\\3\\u0122\\3\\u0122\\3\\u0122\\3\\u0122\\3\\u0123\")\n buf.write(\"\\3\\u0123\\3\\u0123\\3\\u0123\\3\\u0123\\3\\u0123\\3\\u0123\\3\\u0123\")\n buf.write(\"\\3\\u0123\\3\\u0124\\3\\u0124\\3\\u0124\\3\\u0124\\3\\u0124\\3\\u0124\")\n buf.write(\"\\3\\u0124\\3\\u0124\\3\\u0124\\3\\u0124\\3\\u0125\\3\\u0125\\3\\u0125\")\n buf.write(\"\\3\\u0125\\3\\u0125\\3\\u0125\\3\\u0125\\3\\u0126\\3\\u0126\\3\\u0126\")\n buf.write(\"\\3\\u0126\\3\\u0126\\3\\u0126\\3\\u0126\\3\\u0126\\3\\u0126\\3\\u0126\")\n buf.write(\"\\3\\u0127\\3\\u0127\\3\\u0127\\3\\u0127\\3\\u0127\\3\\u0127\\3\\u0127\")\n buf.write(\"\\3\\u0127\\3\\u0127\\3\\u0127\\3\\u0128\\3\\u0128\\3\\u0128\\3\\u0128\")\n buf.write(\"\\3\\u0128\\3\\u0128\\3\\u0128\\3\\u0128\\3\\u0129\\3\\u0129\\3\\u0129\")\n buf.write(\"\\3\\u0129\\3\\u0129\\3\\u0129\\3\\u012a\\3\\u012a\\3\\u012a\\3\\u012a\")\n buf.write(\"\\3\\u012a\\3\\u012a\\3\\u012a\\3\\u012a\\3\\u012a\\3\\u012a\\3\\u012b\")\n buf.write(\"\\3\\u012b\\3\\u012b\\3\\u012b\\3\\u012b\\3\\u012b\\3\\u012c\\3\\u012c\")\n buf.write(\"\\3\\u012c\\3\\u012c\\3\\u012c\\3\\u012c\\3\\u012d\\3\\u012d\\3\\u012d\")\n buf.write(\"\\3\\u012d\\3\\u012e\\3\\u012e\\3\\u012e\\3\\u012e\\3\\u012e\\3\\u012f\")\n buf.write(\"\\3\\u012f\\3\\u012f\\3\\u012f\\3\\u012f\\3\\u0130\\3\\u0130\\3\\u0130\")\n buf.write(\"\\3\\u0130\\3\\u0130\\3\\u0130\\3\\u0130\\3\\u0131\\3\\u0131\\3\\u0131\")\n buf.write(\"\\3\\u0131\\3\\u0132\\3\\u0132\\3\\u0132\\3\\u0132\\3\\u0132\\3\\u0132\")\n buf.write(\"\\3\\u0132\\3\\u0132\\3\\u0132\\3\\u0132\\3\\u0133\\3\\u0133\\3\\u0133\")\n buf.write(\"\\3\\u0133\\3\\u0133\\3\\u0133\\3\\u0133\\3\\u0133\\3\\u0133\\3\\u0133\")\n buf.write(\"\\3\\u0133\\3\\u0133\\3\\u0134\\3\\u0134\\3\\u0134\\3\\u0134\\3\\u0134\")\n buf.write(\"\\3\\u0134\\3\\u0134\\3\\u0135\\3\\u0135\\3\\u0135\\3\\u0135\\3\\u0135\")\n buf.write(\"\\3\\u0135\\3\\u0135\\3\\u0135\\3\\u0135\\3\\u0135\\3\\u0136\\3\\u0136\")\n buf.write(\"\\3\\u0136\\3\\u0136\\3\\u0136\\3\\u0136\\3\\u0136\\3\\u0137\\3\\u0137\")\n buf.write(\"\\3\\u0137\\3\\u0137\\3\\u0137\\3\\u0137\\3\\u0137\\3\\u0137\\3\\u0138\")\n buf.write(\"\\3\\u0138\\3\\u0138\\3\\u0138\\3\\u0138\\3\\u0138\\3\\u0138\\3\\u0138\")\n buf.write(\"\\3\\u0139\\3\\u0139\\3\\u0139\\3\\u0139\\3\\u0139\\3\\u0139\\3\\u0139\")\n buf.write(\"\\3\\u0139\\3\\u0139\\3\\u0139\\3\\u0139\\3\\u0139\\3\\u0139\\3\\u0139\")\n buf.write(\"\\3\\u0139\\3\\u0139\\3\\u0139\\3\\u0139\\3\\u0139\\3\\u0139\\3\\u013a\")\n buf.write(\"\\3\\u013a\\3\\u013a\\3\\u013a\\3\\u013a\\3\\u013a\\3\\u013a\\3\\u013b\")\n buf.write(\"\\3\\u013b\\3\\u013b\\3\\u013b\\3\\u013b\\3\\u013b\\3\\u013b\\3\\u013b\")\n buf.write(\"\\3\\u013b\\3\\u013b\\3\\u013b\\3\\u013b\\3\\u013b\\3\\u013c\\3\\u013c\")\n buf.write(\"\\3\\u013c\\3\\u013c\\3\\u013c\\3\\u013c\\3\\u013c\\3\\u013d\\3\\u013d\")\n buf.write(\"\\3\\u013d\\3\\u013d\\3\\u013d\\3\\u013d\\3\\u013d\\3\\u013d\\3\\u013d\")\n buf.write(\"\\3\\u013d\\3\\u013e\\3\\u013e\\3\\u013e\\3\\u013e\\3\\u013e\\3\\u013e\")\n buf.write(\"\\3\\u013f\\3\\u013f\\3\\u013f\\3\\u013f\\3\\u013f\\3\\u013f\\3\\u013f\")\n buf.write(\"\\3\\u013f\\3\\u0140\\3\\u0140\\3\\u0140\\3\\u0140\\3\\u0140\\3\\u0140\")\n buf.write(\"\\3\\u0140\\3\\u0141\\3\\u0141\\3\\u0141\\3\\u0141\\3\\u0141\\3\\u0141\")\n buf.write(\"\\3\\u0142\\3\\u0142\\3\\u0142\\3\\u0142\\3\\u0142\\3\\u0142\\3\\u0142\")\n buf.write(\"\\3\\u0142\\3\\u0142\\3\\u0143\\3\\u0143\\3\\u0143\\3\\u0143\\3\\u0143\")\n buf.write(\"\\3\\u0143\\3\\u0143\\3\\u0144\\3\\u0144\\3\\u0144\\3\\u0144\\3\\u0145\")\n buf.write(\"\\3\\u0145\\3\\u0145\\3\\u0145\\3\\u0145\\3\\u0145\\3\\u0146\\3\\u0146\")\n buf.write(\"\\3\\u0146\\3\\u0146\\3\\u0146\\3\\u0147\\3\\u0147\\3\\u0147\\3\\u0147\")\n buf.write(\"\\3\\u0147\\3\\u0147\\3\\u0148\\3\\u0148\\3\\u0148\\3\\u0148\\3\\u0148\")\n buf.write(\"\\3\\u0148\\3\\u0148\\3\\u0149\\3\\u0149\\3\\u0149\\3\\u0149\\3\\u0149\")\n buf.write(\"\\3\\u014a\\3\\u014a\\3\\u014a\\3\\u014a\\3\\u014a\\3\\u014a\\3\\u014a\")\n buf.write(\"\\3\\u014a\\3\\u014a\\3\\u014a\\3\\u014b\\3\\u014b\\3\\u014b\\3\\u014b\")\n buf.write(\"\\3\\u014b\\3\\u014b\\3\\u014b\\3\\u014c\\3\\u014c\\3\\u014c\\3\\u014c\")\n buf.write(\"\\3\\u014c\\3\\u014c\\3\\u014c\\3\\u014c\\3\\u014c\\3\\u014c\\3\\u014c\")\n buf.write(\"\\3\\u014c\\3\\u014d\\3\\u014d\\3\\u014d\\3\\u014d\\3\\u014e\\3\\u014e\")\n buf.write(\"\\3\\u014e\\3\\u014e\\3\\u014e\\3\\u014e\\3\\u014e\\3\\u014f\\3\\u014f\")\n buf.write(\"\\3\\u014f\\3\\u014f\\3\\u014f\\3\\u014f\\3\\u014f\\3\\u0150\\3\\u0150\")\n buf.write(\"\\3\\u0150\\3\\u0150\\3\\u0150\\3\\u0151\\3\\u0151\\3\\u0151\\3\\u0151\")\n buf.write(\"\\3\\u0151\\3\\u0151\\3\\u0151\\3\\u0151\\3\\u0152\\3\\u0152\\3\\u0152\")\n buf.write(\"\\3\\u0152\\3\\u0152\\3\\u0152\\3\\u0152\\3\\u0153\\3\\u0153\\3\\u0153\")\n buf.write(\"\\3\\u0153\\3\\u0153\\3\\u0154\\3\\u0154\\3\\u0154\\3\\u0154\\3\\u0154\")\n buf.write(\"\\3\\u0154\\3\\u0154\\3\\u0154\\3\\u0154\\3\\u0155\\3\\u0155\\3\\u0155\")\n buf.write(\"\\3\\u0155\\3\\u0155\\3\\u0155\\3\\u0155\\3\\u0155\\3\\u0155\\3\\u0155\")\n buf.write(\"\\3\\u0155\\3\\u0156\\3\\u0156\\3\\u0156\\3\\u0156\\3\\u0156\\3\\u0156\")\n buf.write(\"\\3\\u0156\\3\\u0156\\3\\u0156\\3\\u0156\\3\\u0156\\3\\u0156\\3\\u0156\")\n buf.write(\"\\3\\u0157\\3\\u0157\\3\\u0157\\3\\u0157\\3\\u0157\\3\\u0157\\3\\u0157\")\n buf.write(\"\\3\\u0157\\3\\u0157\\3\\u0157\\3\\u0157\\3\\u0157\\3\\u0157\\3\\u0157\")\n buf.write(\"\\3\\u0157\\3\\u0157\\3\\u0157\\3\\u0157\\3\\u0158\\3\\u0158\\3\\u0158\")\n buf.write(\"\\3\\u0158\\3\\u0158\\3\\u0158\\3\\u0158\\3\\u0158\\3\\u0158\\3\\u0158\")\n buf.write(\"\\3\\u0158\\3\\u0158\\3\\u0159\\3\\u0159\\3\\u0159\\3\\u0159\\3\\u0159\")\n buf.write(\"\\3\\u0159\\3\\u0159\\3\\u0159\\3\\u0159\\3\\u0159\\3\\u0159\\3\\u0159\")\n buf.write(\"\\3\\u0159\\3\\u0159\\3\\u0159\\3\\u0159\\3\\u015a\\3\\u015a\\3\\u015a\")\n buf.write(\"\\3\\u015a\\3\\u015b\\3\\u015b\\3\\u015b\\3\\u015b\\3\\u015b\\3\\u015c\")\n buf.write(\"\\3\\u015c\\3\\u015c\\3\\u015c\\3\\u015c\\3\\u015c\\3\\u015c\\3\\u015c\")\n buf.write(\"\\3\\u015c\\3\\u015d\\3\\u015d\\3\\u015d\\3\\u015d\\3\\u015d\\3\\u015d\")\n buf.write(\"\\3\\u015e\\3\\u015e\\3\\u015e\\3\\u015e\\3\\u015e\\3\\u015f\\3\\u015f\")\n buf.write(\"\\3\\u015f\\3\\u015f\\3\\u015f\\3\\u015f\\3\\u015f\\3\\u015f\\3\\u015f\")\n buf.write(\"\\3\\u0160\\3\\u0160\\3\\u0160\\3\\u0160\\3\\u0160\\3\\u0160\\3\\u0160\")\n buf.write(\"\\3\\u0160\\3\\u0160\\3\\u0161\\3\\u0161\\3\\u0161\\3\\u0161\\3\\u0161\")\n buf.write(\"\\3\\u0161\\3\\u0161\\3\\u0161\\3\\u0161\\3\\u0162\\3\\u0162\\3\\u0162\")\n buf.write(\"\\3\\u0162\\3\\u0162\\3\\u0162\\3\\u0162\\3\\u0162\\3\\u0162\\3\\u0162\")\n buf.write(\"\\3\\u0162\\3\\u0162\\3\\u0162\\3\\u0162\\3\\u0162\\3\\u0163\\3\\u0163\")\n buf.write(\"\\3\\u0163\\3\\u0163\\3\\u0163\\3\\u0163\\3\\u0163\\3\\u0164\\3\\u0164\")\n buf.write(\"\\3\\u0164\\3\\u0164\\3\\u0164\\3\\u0165\\3\\u0165\\3\\u0165\\3\\u0165\")\n buf.write(\"\\3\\u0165\\3\\u0166\\3\\u0166\\3\\u0166\\3\\u0166\\3\\u0166\\3\\u0166\")\n buf.write(\"\\3\\u0166\\3\\u0166\\3\\u0166\\3\\u0167\\3\\u0167\\3\\u0167\\3\\u0167\")\n buf.write(\"\\3\\u0167\\3\\u0167\\3\\u0167\\3\\u0167\\3\\u0167\\3\\u0168\\3\\u0168\")\n buf.write(\"\\3\\u0168\\3\\u0168\\3\\u0168\\3\\u0169\\3\\u0169\\3\\u0169\\3\\u0169\")\n buf.write(\"\\3\\u0169\\3\\u0169\\3\\u0169\\3\\u0169\\3\\u0169\\3\\u0169\\3\\u0169\")\n buf.write(\"\\3\\u0169\\3\\u0169\\3\\u0169\\3\\u016a\\3\\u016a\\3\\u016a\\3\\u016a\")\n buf.write(\"\\3\\u016a\\3\\u016a\\3\\u016a\\3\\u016a\\3\\u016b\\3\\u016b\\3\\u016b\")\n buf.write(\"\\3\\u016b\\3\\u016b\\3\\u016b\\3\\u016b\\3\\u016b\\3\\u016b\\3\\u016c\")\n buf.write(\"\\3\\u016c\\3\\u016c\\3\\u016c\\3\\u016c\\3\\u016c\\3\\u016c\\3\\u016c\")\n buf.write(\"\\3\\u016c\\3\\u016c\\3\\u016c\\3\\u016d\\3\\u016d\\3\\u016d\\3\\u016d\")\n buf.write(\"\\3\\u016d\\3\\u016d\\3\\u016e\\3\\u016e\\3\\u016e\\3\\u016e\\3\\u016e\")\n buf.write(\"\\3\\u016e\\3\\u016e\\3\\u016e\\3\\u016f\\3\\u016f\\3\\u016f\\3\\u016f\")\n buf.write(\"\\3\\u016f\\3\\u016f\\3\\u016f\\3\\u016f\\3\\u016f\\3\\u016f\\3\\u0170\")\n buf.write(\"\\3\\u0170\\3\\u0170\\3\\u0170\\3\\u0170\\3\\u0170\\3\\u0170\\3\\u0170\")\n buf.write(\"\\3\\u0170\\3\\u0170\\3\\u0170\\3\\u0170\\3\\u0170\\3\\u0171\\3\\u0171\")\n buf.write(\"\\3\\u0171\\3\\u0171\\3\\u0171\\3\\u0171\\3\\u0171\\3\\u0172\\3\\u0172\")\n buf.write(\"\\3\\u0172\\3\\u0172\\3\\u0172\\3\\u0172\\3\\u0172\\3\\u0172\\3\\u0172\")\n buf.write(\"\\3\\u0172\\3\\u0172\\3\\u0173\\3\\u0173\\3\\u0173\\3\\u0173\\3\\u0173\")\n buf.write(\"\\3\\u0173\\3\\u0173\\3\\u0174\\3\\u0174\\3\\u0174\\3\\u0174\\3\\u0174\")\n buf.write(\"\\3\\u0174\\3\\u0174\\3\\u0174\\3\\u0174\\3\\u0174\\3\\u0174\\3\\u0174\")\n buf.write(\"\\3\\u0175\\3\\u0175\\3\\u0175\\3\\u0175\\3\\u0175\\3\\u0175\\3\\u0175\")\n buf.write(\"\\3\\u0175\\3\\u0175\\3\\u0175\\3\\u0175\\3\\u0175\\3\\u0175\\3\\u0176\")\n buf.write(\"\\3\\u0176\\3\\u0176\\3\\u0176\\3\\u0176\\3\\u0176\\3\\u0176\\3\\u0176\")\n buf.write(\"\\3\\u0176\\3\\u0176\\3\\u0176\\3\\u0176\\3\\u0176\\3\\u0176\\3\\u0177\")\n buf.write(\"\\3\\u0177\\3\\u0177\\3\\u0177\\3\\u0177\\3\\u0177\\3\\u0177\\3\\u0177\")\n buf.write(\"\\3\\u0178\\3\\u0178\\3\\u0178\\3\\u0178\\3\\u0178\\3\\u0178\\3\\u0178\")\n buf.write(\"\\3\\u0178\\3\\u0179\\3\\u0179\\3\\u0179\\3\\u0179\\3\\u0179\\3\\u0179\")\n buf.write(\"\\3\\u0179\\3\\u0179\\3\\u017a\\3\\u017a\\3\\u017a\\3\\u017a\\3\\u017a\")\n buf.write(\"\\3\\u017a\\3\\u017b\\3\\u017b\\3\\u017b\\3\\u017b\\3\\u017c\\3\\u017c\")\n buf.write(\"\\3\\u017c\\3\\u017c\\3\\u017c\\3\\u017d\\3\\u017d\\3\\u017d\\3\\u017d\")\n buf.write(\"\\3\\u017d\\3\\u017e\\3\\u017e\\3\\u017e\\3\\u017e\\3\\u017e\\3\\u017e\")\n buf.write(\"\\3\\u017e\\3\\u017e\\3\\u017e\\3\\u017e\\3\\u017f\\3\\u017f\\3\\u017f\")\n buf.write(\"\\3\\u017f\\3\\u017f\\3\\u017f\\3\\u017f\\3\\u017f\\3\\u017f\\3\\u017f\")\n buf.write(\"\\3\\u017f\\3\\u017f\\3\\u017f\\3\\u017f\\3\\u017f\\3\\u017f\\3\\u017f\")\n buf.write(\"\\3\\u017f\\3\\u017f\\3\\u017f\\3\\u017f\\3\\u017f\\3\\u017f\\3\\u017f\")\n buf.write(\"\\3\\u017f\\3\\u017f\\3\\u017f\\3\\u017f\\3\\u0180\\3\\u0180\\3\\u0180\")\n buf.write(\"\\3\\u0180\\3\\u0180\\3\\u0180\\3\\u0180\\3\\u0180\\3\\u0180\\3\\u0180\")\n buf.write(\"\\3\\u0180\\3\\u0180\\3\\u0180\\3\\u0180\\3\\u0180\\3\\u0180\\3\\u0180\")\n buf.write(\"\\3\\u0180\\3\\u0180\\3\\u0180\\3\\u0180\\3\\u0180\\3\\u0180\\3\\u0180\")\n buf.write(\"\\3\\u0180\\3\\u0180\\3\\u0180\\3\\u0181\\3\\u0181\\3\\u0181\\3\\u0181\")\n buf.write(\"\\3\\u0181\\3\\u0181\\3\\u0181\\3\\u0181\\3\\u0181\\3\\u0181\\3\\u0181\")\n buf.write(\"\\3\\u0181\\3\\u0181\\3\\u0181\\3\\u0181\\3\\u0181\\3\\u0181\\3\\u0181\")\n buf.write(\"\\3\\u0181\\3\\u0181\\3\\u0181\\3\\u0181\\3\\u0181\\3\\u0181\\3\\u0182\")\n buf.write(\"\\3\\u0182\\3\\u0182\\3\\u0182\\3\\u0182\\3\\u0182\\3\\u0182\\3\\u0182\")\n buf.write(\"\\3\\u0182\\3\\u0182\\3\\u0182\\3\\u0182\\3\\u0182\\3\\u0182\\3\\u0183\")\n buf.write(\"\\3\\u0183\\3\\u0183\\3\\u0183\\3\\u0183\\3\\u0183\\3\\u0183\\3\\u0183\")\n buf.write(\"\\3\\u0183\\3\\u0183\\3\\u0183\\3\\u0183\\3\\u0183\\3\\u0183\\3\\u0184\")\n buf.write(\"\\3\\u0184\\3\\u0184\\3\\u0184\\3\\u0184\\3\\u0184\\3\\u0184\\3\\u0184\")\n buf.write(\"\\3\\u0184\\3\\u0184\\3\\u0184\\3\\u0184\\3\\u0184\\3\\u0184\\3\\u0184\")\n buf.write(\"\\3\\u0184\\3\\u0185\\3\\u0185\\3\\u0185\\3\\u0185\\3\\u0185\\3\\u0185\")\n buf.write(\"\\3\\u0185\\3\\u0185\\3\\u0185\\3\\u0185\\3\\u0185\\3\\u0185\\3\\u0185\")\n buf.write(\"\\3\\u0185\\3\\u0185\\3\\u0185\\3\\u0186\\3\\u0186\\3\\u0186\\3\\u0187\")\n buf.write(\"\\3\\u0187\\3\\u0187\\3\\u0187\\3\\u0187\\3\\u0187\\3\\u0187\\3\\u0187\")\n buf.write(\"\\3\\u0187\\3\\u0188\\3\\u0188\\3\\u0188\\3\\u0188\\3\\u0188\\3\\u0188\")\n buf.write(\"\\3\\u0188\\3\\u0188\\3\\u0188\\3\\u0188\\3\\u0188\\3\\u0188\\3\\u0189\")\n buf.write(\"\\3\\u0189\\3\\u0189\\3\\u0189\\3\\u0189\\3\\u0189\\3\\u0189\\3\\u0189\")\n buf.write(\"\\3\\u0189\\3\\u0189\\3\\u018a\\3\\u018a\\3\\u018a\\3\\u018a\\3\\u018a\")\n buf.write(\"\\3\\u018a\\3\\u018b\\3\\u018b\\3\\u018b\\3\\u018b\\3\\u018b\\3\\u018b\")\n buf.write(\"\\3\\u018b\\3\\u018b\\3\\u018c\\3\\u018c\\3\\u018c\\3\\u018c\\3\\u018c\")\n buf.write(\"\\3\\u018d\\3\\u018d\\3\\u018d\\3\\u018d\\3\\u018d\\3\\u018e\\3\\u018e\")\n buf.write(\"\\3\\u018e\\3\\u018e\\3\\u018e\\3\\u018e\\3\\u018e\\3\\u018e\\3\\u018e\")\n buf.write(\"\\3\\u018f\\3\\u018f\\3\\u018f\\3\\u018f\\3\\u018f\\3\\u0190\\3\\u0190\")\n buf.write(\"\\3\\u0190\\3\\u0190\\3\\u0190\\3\\u0190\\3\\u0190\\3\\u0190\\3\\u0190\")\n buf.write(\"\\3\\u0190\\3\\u0191\\3\\u0191\\3\\u0191\\3\\u0191\\3\\u0191\\3\\u0191\")\n buf.write(\"\\3\\u0192\\3\\u0192\\3\\u0192\\3\\u0192\\3\\u0192\\3\\u0192\\3\\u0193\")\n buf.write(\"\\3\\u0193\\3\\u0193\\3\\u0193\\3\\u0193\\3\\u0193\\3\\u0193\\3\\u0194\")\n buf.write(\"\\3\\u0194\\3\\u0194\\3\\u0194\\3\\u0194\\3\\u0194\\3\\u0194\\3\\u0194\")\n buf.write(\"\\3\\u0194\\3\\u0194\\3\\u0195\\3\\u0195\\3\\u0195\\3\\u0195\\3\\u0195\")\n buf.write(\"\\3\\u0195\\3\\u0195\\3\\u0195\\3\\u0196\\3\\u0196\\3\\u0196\\3\\u0196\")\n buf.write(\"\\3\\u0196\\3\\u0196\\3\\u0197\\3\\u0197\\3\\u0197\\3\\u0197\\3\\u0197\")\n buf.write(\"\\3\\u0197\\3\\u0197\\3\\u0198\\3\\u0198\\3\\u0198\\3\\u0198\\3\\u0198\")\n buf.write(\"\\3\\u0198\\3\\u0198\\3\\u0198\\3\\u0199\\3\\u0199\\3\\u0199\\3\\u0199\")\n buf.write(\"\\3\\u0199\\3\\u0199\\3\\u0199\\3\\u019a\\3\\u019a\\3\\u019a\\3\\u019a\")\n buf.write(\"\\3\\u019a\\3\\u019a\\3\\u019a\\3\\u019b\\3\\u019b\\3\\u019b\\3\\u019b\")\n buf.write(\"\\3\\u019c\\3\\u019c\\3\\u019c\\3\\u019c\\3\\u019c\\3\\u019c\\3\\u019d\")\n buf.write(\"\\3\\u019d\\3\\u019d\\3\\u019d\\3\\u019d\\3\\u019d\\3\\u019d\\3\\u019d\")\n buf.write(\"\\3\\u019d\\3\\u019e\\3\\u019e\\3\\u019e\\3\\u019e\\3\\u019e\\3\\u019e\")\n buf.write(\"\\3\\u019f\\3\\u019f\\3\\u019f\\3\\u019f\\3\\u019f\\3\\u019f\\3\\u019f\")\n buf.write(\"\\3\\u01a0\\3\\u01a0\\3\\u01a0\\3\\u01a0\\3\\u01a0\\3\\u01a0\\3\\u01a0\")\n buf.write(\"\\3\\u01a0\\3\\u01a1\\3\\u01a1\\3\\u01a1\\3\\u01a1\\3\\u01a1\\3\\u01a1\")\n buf.write(\"\\3\\u01a1\\3\\u01a1\\3\\u01a1\\3\\u01a2\\3\\u01a2\\3\\u01a2\\3\\u01a2\")\n buf.write(\"\\3\\u01a2\\3\\u01a2\\3\\u01a2\\3\\u01a2\\3\\u01a2\\3\\u01a3\\3\\u01a3\")\n buf.write(\"\\3\\u01a3\\3\\u01a3\\3\\u01a3\\3\\u01a3\\3\\u01a3\\3\\u01a4\\3\\u01a4\")\n buf.write(\"\\3\\u01a4\\3\\u01a4\\3\\u01a4\\3\\u01a4\\3\\u01a4\\3\\u01a4\\3\\u01a5\")\n buf.write(\"\\3\\u01a5\\3\\u01a5\\3\\u01a5\\3\\u01a5\\3\\u01a5\\3\\u01a5\\3\\u01a5\")\n buf.write(\"\\3\\u01a6\\3\\u01a6\\3\\u01a6\\3\\u01a6\\3\\u01a6\\3\\u01a6\\3\\u01a6\")\n buf.write(\"\\3\\u01a6\\3\\u01a6\\3\\u01a7\\3\\u01a7\\3\\u01a7\\3\\u01a7\\3\\u01a7\")\n buf.write(\"\\3\\u01a8\\3\\u01a8\\3\\u01a8\\3\\u01a8\\3\\u01a8\\3\\u01a8\\3\\u01a8\")\n buf.write(\"\\3\\u01a8\\3\\u01a9\\3\\u01a9\\3\\u01a9\\3\\u01a9\\3\\u01a9\\3\\u01a9\")\n buf.write(\"\\3\\u01a9\\3\\u01a9\\3\\u01a9\\3\\u01a9\\3\\u01a9\\3\\u01aa\\3\\u01aa\")\n buf.write(\"\\3\\u01aa\\3\\u01aa\\3\\u01aa\\3\\u01ab\\3\\u01ab\\3\\u01ab\\3\\u01ab\")\n buf.write(\"\\3\\u01ab\\3\\u01ab\\3\\u01ab\\3\\u01ab\\3\\u01ab\\3\\u01ac\\3\\u01ac\")\n buf.write(\"\\3\\u01ac\\3\\u01ac\\3\\u01ac\\3\\u01ac\\3\\u01ad\\3\\u01ad\\3\\u01ad\")\n buf.write(\"\\3\\u01ad\\3\\u01ad\\3\\u01ad\\3\\u01ae\\3\\u01ae\\3\\u01ae\\3\\u01ae\")\n buf.write(\"\\3\\u01ae\\3\\u01af\\3\\u01af\\3\\u01af\\3\\u01af\\3\\u01af\\3\\u01af\")\n buf.write(\"\\3\\u01af\\3\\u01b0\\3\\u01b0\\3\\u01b0\\3\\u01b0\\3\\u01b0\\3\\u01b1\")\n buf.write(\"\\3\\u01b1\\3\\u01b1\\3\\u01b1\\3\\u01b1\\3\\u01b1\\3\\u01b2\\3\\u01b2\")\n buf.write(\"\\3\\u01b2\\3\\u01b2\\3\\u01b3\\3\\u01b3\\3\\u01b3\\3\\u01b3\\3\\u01b3\")\n buf.write(\"\\3\\u01b3\\3\\u01b3\\3\\u01b4\\3\\u01b4\\3\\u01b4\\3\\u01b4\\3\\u01b4\")\n buf.write(\"\\3\\u01b4\\3\\u01b4\\3\\u01b4\\3\\u01b4\\3\\u01b4\\3\\u01b4\\3\\u01b4\")\n buf.write(\"\\3\\u01b4\\3\\u01b4\\3\\u01b5\\3\\u01b5\\3\\u01b5\\3\\u01b5\\3\\u01b5\")\n buf.write(\"\\3\\u01b5\\3\\u01b5\\3\\u01b5\\3\\u01b6\\3\\u01b6\\3\\u01b6\\3\\u01b6\")\n buf.write(\"\\3\\u01b6\\3\\u01b6\\3\\u01b6\\3\\u01b6\\3\\u01b6\\3\\u01b6\\3\\u01b6\")\n buf.write(\"\\3\\u01b6\\3\\u01b6\\3\\u01b7\\3\\u01b7\\3\\u01b7\\3\\u01b7\\3\\u01b7\")\n buf.write(\"\\3\\u01b7\\3\\u01b7\\3\\u01b7\\3\\u01b7\\3\\u01b7\\3\\u01b7\\3\\u01b8\")\n buf.write(\"\\3\\u01b8\\3\\u01b8\\3\\u01b8\\3\\u01b8\\3\\u01b8\\3\\u01b8\\3\\u01b8\")\n buf.write(\"\\3\\u01b8\\3\\u01b8\\3\\u01b9\\3\\u01b9\\3\\u01b9\\3\\u01b9\\3\\u01b9\")\n buf.write(\"\\3\\u01b9\\3\\u01b9\\3\\u01b9\\3\\u01b9\\3\\u01b9\\3\\u01ba\\3\\u01ba\")\n buf.write(\"\\3\\u01ba\\3\\u01ba\\3\\u01ba\\3\\u01ba\\3\\u01ba\\3\\u01ba\\3\\u01ba\")\n buf.write(\"\\3\\u01ba\\3\\u01ba\\3\\u01ba\\3\\u01ba\\3\\u01ba\\3\\u01bb\\3\\u01bb\")\n buf.write(\"\\3\\u01bb\\3\\u01bb\\3\\u01bb\\3\\u01bb\\3\\u01bb\\3\\u01bb\\3\\u01bb\")\n buf.write(\"\\3\\u01bc\\3\\u01bc\\3\\u01bc\\3\\u01bc\\3\\u01bc\\3\\u01bc\\3\\u01bd\")\n buf.write(\"\\3\\u01bd\\3\\u01bd\\3\\u01bd\\3\\u01bd\\3\\u01bd\\3\\u01bd\\3\\u01bd\")\n buf.write(\"\\3\\u01bd\\3\\u01be\\3\\u01be\\3\\u01be\\3\\u01be\\3\\u01be\\3\\u01be\")\n buf.write(\"\\3\\u01be\\3\\u01be\\3\\u01bf\\3\\u01bf\\3\\u01bf\\3\\u01bf\\3\\u01bf\")\n buf.write(\"\\3\\u01bf\\3\\u01bf\\3\\u01bf\\3\\u01bf\\3\\u01bf\\3\\u01bf\\3\\u01bf\")\n buf.write(\"\\3\\u01bf\\3\\u01c0\\3\\u01c0\\3\\u01c0\\3\\u01c0\\3\\u01c0\\3\\u01c0\")\n buf.write(\"\\3\\u01c0\\3\\u01c0\\3\\u01c0\\3\\u01c1\\3\\u01c1\\3\\u01c1\\3\\u01c1\")\n buf.write(\"\\3\\u01c1\\3\\u01c2\\3\\u01c2\\3\\u01c2\\3\\u01c2\\3\\u01c3\\3\\u01c3\")\n buf.write(\"\\3\\u01c3\\3\\u01c3\\3\\u01c3\\3\\u01c3\\3\\u01c3\\3\\u01c3\\3\\u01c3\")\n buf.write(\"\\3\\u01c3\\3\\u01c3\\3\\u01c3\\3\\u01c3\\3\\u01c3\\3\\u01c3\\3\\u01c3\")\n buf.write(\"\\3\\u01c3\\3\\u01c3\\3\\u01c3\\3\\u01c3\\3\\u01c3\\3\\u01c3\\3\\u01c3\")\n buf.write(\"\\3\\u01c3\\3\\u01c3\\3\\u01c4\\3\\u01c4\\3\\u01c4\\3\\u01c4\\3\\u01c4\")\n buf.write(\"\\3\\u01c5\\3\\u01c5\\3\\u01c5\\3\\u01c5\\3\\u01c5\\3\\u01c5\\3\\u01c5\")\n buf.write(\"\\3\\u01c5\\3\\u01c5\\3\\u01c5\\3\\u01c5\\3\\u01c6\\3\\u01c6\\3\\u01c6\")\n buf.write(\"\\3\\u01c6\\3\\u01c6\\3\\u01c6\\3\\u01c6\\3\\u01c6\\3\\u01c6\\3\\u01c6\")\n buf.write(\"\\3\\u01c6\\3\\u01c6\\3\\u01c6\\3\\u01c6\\3\\u01c6\\3\\u01c6\\3\\u01c6\")\n buf.write(\"\\3\\u01c6\\3\\u01c7\\3\\u01c7\\3\\u01c7\\3\\u01c7\\3\\u01c7\\3\\u01c7\")\n buf.write(\"\\3\\u01c7\\3\\u01c7\\3\\u01c7\\3\\u01c7\\3\\u01c7\\3\\u01c7\\3\\u01c7\")\n buf.write(\"\\3\\u01c7\\3\\u01c7\\3\\u01c7\\3\\u01c8\\3\\u01c8\\3\\u01c8\\3\\u01c8\")\n buf.write(\"\\3\\u01c8\\3\\u01c8\\3\\u01c8\\3\\u01c8\\3\\u01c8\\3\\u01c8\\3\\u01c8\")\n buf.write(\"\\3\\u01c8\\3\\u01c8\\3\\u01c8\\3\\u01c8\\3\\u01c8\\3\\u01c8\\3\\u01c8\")\n buf.write(\"\\3\\u01c8\\3\\u01c9\\3\\u01c9\\3\\u01c9\\3\\u01c9\\3\\u01c9\\3\\u01c9\")\n buf.write(\"\\3\\u01c9\\3\\u01c9\\3\\u01c9\\3\\u01c9\\3\\u01c9\\3\\u01c9\\3\\u01c9\")\n buf.write(\"\\3\\u01c9\\3\\u01c9\\3\\u01c9\\3\\u01c9\\3\\u01c9\\3\\u01c9\\3\\u01c9\")\n buf.write(\"\\3\\u01c9\\3\\u01c9\\3\\u01c9\\3\\u01ca\\3\\u01ca\\3\\u01ca\\3\\u01ca\")\n buf.write(\"\\3\\u01ca\\3\\u01ca\\3\\u01ca\\3\\u01ca\\3\\u01ca\\3\\u01ca\\3\\u01ca\")\n buf.write(\"\\3\\u01ca\\3\\u01ca\\3\\u01ca\\3\\u01ca\\3\\u01cb\\3\\u01cb\\3\\u01cb\")\n buf.write(\"\\3\\u01cb\\3\\u01cb\\3\\u01cb\\3\\u01cb\\3\\u01cb\\3\\u01cb\\3\\u01cb\")\n buf.write(\"\\3\\u01cc\\3\\u01cc\\3\\u01cc\\3\\u01cc\\3\\u01cc\\3\\u01cc\\3\\u01cc\")\n buf.write(\"\\3\\u01cc\\3\\u01cc\\3\\u01cc\\3\\u01cc\\3\\u01cd\\3\\u01cd\\3\\u01cd\")\n buf.write(\"\\3\\u01cd\\3\\u01cd\\3\\u01cd\\3\\u01cd\\3\\u01cd\\3\\u01ce\\3\\u01ce\")\n buf.write(\"\\3\\u01ce\\3\\u01ce\\3\\u01ce\\3\\u01ce\\3\\u01ce\\3\\u01ce\\3\\u01ce\")\n buf.write(\"\\3\\u01ce\\3\\u01ce\\3\\u01ce\\3\\u01ce\\3\\u01cf\\3\\u01cf\\3\\u01cf\")\n buf.write(\"\\3\\u01cf\\3\\u01cf\\3\\u01cf\\3\\u01cf\\3\\u01cf\\3\\u01cf\\3\\u01cf\")\n buf.write(\"\\3\\u01cf\\3\\u01cf\\3\\u01cf\\3\\u01cf\\3\\u01cf\\3\\u01cf\\3\\u01d0\")\n buf.write(\"\\3\\u01d0\\3\\u01d0\\3\\u01d0\\3\\u01d0\\3\\u01d0\\3\\u01d0\\3\\u01d0\")\n buf.write(\"\\3\\u01d0\\3\\u01d0\\3\\u01d0\\3\\u01d0\\3\\u01d0\\3\\u01d0\\3\\u01d0\")\n buf.write(\"\\3\\u01d0\\3\\u01d1\\3\\u01d1\\3\\u01d1\\3\\u01d1\\3\\u01d1\\3\\u01d2\")\n buf.write(\"\\3\\u01d2\\3\\u01d2\\3\\u01d2\\3\\u01d3\\3\\u01d3\\3\\u01d3\\3\\u01d3\")\n buf.write(\"\\3\\u01d3\\3\\u01d4\\3\\u01d4\\3\\u01d4\\3\\u01d4\\3\\u01d5\\3\\u01d5\")\n buf.write(\"\\3\\u01d5\\3\\u01d5\\3\\u01d5\\3\\u01d6\\3\\u01d6\\3\\u01d6\\3\\u01d6\")\n buf.write(\"\\3\\u01d7\\3\\u01d7\\3\\u01d7\\3\\u01d7\\3\\u01d7\\3\\u01d7\\3\\u01d7\")\n buf.write(\"\\3\\u01d8\\3\\u01d8\\3\\u01d8\\3\\u01d8\\3\\u01d9\\3\\u01d9\\3\\u01d9\")\n buf.write(\"\\3\\u01d9\\3\\u01d9\\3\\u01d9\\3\\u01da\\3\\u01da\\3\\u01da\\3\\u01da\")\n buf.write(\"\\3\\u01da\\3\\u01da\\3\\u01da\\3\\u01da\\3\\u01da\\3\\u01da\\3\\u01da\")\n buf.write(\"\\3\\u01da\\3\\u01da\\3\\u01da\\3\\u01da\\3\\u01da\\3\\u01db\\3\\u01db\")\n buf.write(\"\\3\\u01db\\3\\u01db\\3\\u01db\\3\\u01db\\3\\u01db\\3\\u01db\\3\\u01db\")\n buf.write(\"\\3\\u01db\\3\\u01db\\3\\u01dc\\3\\u01dc\\3\\u01dc\\3\\u01dc\\3\\u01dd\")\n buf.write(\"\\3\\u01dd\\3\\u01dd\\3\\u01dd\\3\\u01dd\\3\\u01dd\\3\\u01dd\\3\\u01dd\")\n buf.write(\"\\3\\u01dd\\3\\u01de\\3\\u01de\\3\\u01de\\3\\u01de\\3\\u01de\\3\\u01de\")\n buf.write(\"\\3\\u01df\\3\\u01df\\3\\u01df\\3\\u01df\\3\\u01df\\3\\u01df\\3\\u01df\")\n buf.write(\"\\3\\u01e0\\3\\u01e0\\3\\u01e0\\3\\u01e0\\3\\u01e0\\3\\u01e1\\3\\u01e1\")\n buf.write(\"\\3\\u01e1\\3\\u01e1\\3\\u01e1\\3\\u01e1\\3\\u01e1\\3\\u01e2\\3\\u01e2\")\n buf.write(\"\\3\\u01e2\\3\\u01e2\\3\\u01e2\\3\\u01e2\\7\\u01e2\\u1368\\n\\u01e2\")\n buf.write(\"\\f\\u01e2\\16\\u01e2\\u136b\\13\\u01e2\\3\\u01e2\\3\\u01e2\\3\\u01e3\")\n buf.write(\"\\3\\u01e3\\3\\u01e3\\7\\u01e3\\u1372\\n\\u01e3\\f\\u01e3\\16\\u01e3\")\n buf.write(\"\\u1375\\13\\u01e3\\3\\u01e3\\6\\u01e3\\u1378\\n\\u01e3\\r\\u01e3\")\n buf.write(\"\\16\\u01e3\\u1379\\3\\u01e4\\3\\u01e4\\3\\u01e4\\7\\u01e4\\u137f\")\n buf.write(\"\\n\\u01e4\\f\\u01e4\\16\\u01e4\\u1382\\13\\u01e4\\3\\u01e4\\6\\u01e4\")\n buf.write(\"\\u1385\\n\\u01e4\\r\\u01e4\\16\\u01e4\\u1386\\3\\u01e5\\3\\u01e5\")\n buf.write(\"\\3\\u01e5\\3\\u01e6\\3\\u01e6\\3\\u01e7\\3\\u01e7\\3\\u01e8\\3\\u01e8\")\n buf.write(\"\\3\\u01e8\\5\\u01e8\\u1393\\n\\u01e8\\3\\u01e8\\3\\u01e8\\5\\u01e8\")\n buf.write(\"\\u1397\\n\\u01e8\\5\\u01e8\\u1399\\n\\u01e8\\3\\u01e8\\3\\u01e8\\5\")\n buf.write(\"\\u01e8\\u139d\\n\\u01e8\\3\\u01e9\\3\\u01e9\\3\\u01e9\\3\\u01e9\\3\")\n buf.write(\"\\u01e9\\7\\u01e9\\u13a4\\n\\u01e9\\f\\u01e9\\16\\u01e9\\u13a7\\13\")\n buf.write(\"\\u01e9\\3\\u01e9\\3\\u01e9\\3\\u01ea\\3\\u01ea\\3\\u01ea\\3\\u01ea\")\n buf.write(\"\\3\\u01ea\\5\\u01ea\\u13b0\\n\\u01ea\\3\\u01ea\\3\\u01ea\\3\\u01eb\")\n buf.write(\"\\3\\u01eb\\3\\u01ec\\3\\u01ec\\3\\u01ec\\7\\u01ec\\u13b9\\n\\u01ec\")\n buf.write(\"\\f\\u01ec\\16\\u01ec\\u13bc\\13\\u01ec\\3\\u01ec\\3\\u01ec\\3\\u01ec\")\n buf.write(\"\\3\\u01ed\\3\\u01ed\\3\\u01ed\\7\\u01ed\\u13c4\\n\\u01ed\\f\\u01ed\")\n buf.write(\"\\16\\u01ed\\u13c7\\13\\u01ed\\3\\u01ed\\3\\u01ed\\3\\u01ed\\3\\u01ee\")\n buf.write(\"\\3\\u01ee\\3\\u01ee\\7\\u01ee\\u13cf\\n\\u01ee\\f\\u01ee\\16\\u01ee\")\n buf.write(\"\\u13d2\\13\\u01ee\\3\\u01ee\\3\\u01ee\\3\\u01ee\\3\\u01ef\\3\\u01ef\")\n buf.write(\"\\3\\u01ef\\7\\u01ef\\u13da\\n\\u01ef\\f\\u01ef\\16\\u01ef\\u13dd\")\n buf.write(\"\\13\\u01ef\\3\\u01ef\\3\\u01ef\\3\\u01ef\\3\\u01f0\\3\\u01f0\\3\\u01f1\")\n buf.write(\"\\3\\u01f1\\3\\u01f1\\3\\u01f1\\6\\u01f1\\u13e8\\n\\u01f1\\r\\u01f1\")\n buf.write(\"\\16\\u01f1\\u13e9\\3\\u01f1\\3\\u01f1\\3\\u01f2\\3\\u01f2\\3\\u01f3\")\n buf.write(\"\\3\\u01f3\\3\\u01f4\\3\\u01f4\\3\\u01f5\\3\\u01f5\\3\\u01f6\\3\\u01f6\")\n buf.write(\"\\3\\u01f6\\3\\u01f7\\3\\u01f7\\3\\u01f8\\3\\u01f8\\3\\u01f9\\3\\u01f9\")\n buf.write(\"\\3\\u01fa\\3\\u01fa\\3\\u01fb\\3\\u01fb\\3\\u01fc\\3\\u01fc\\3\\u01fd\")\n buf.write(\"\\3\\u01fd\\3\\u01fd\\3\\u01fe\\3\\u01fe\\3\\u01fe\\3\\u01fe\\7\\u01fe\")\n buf.write(\"\\u140c\\n\\u01fe\\f\\u01fe\\16\\u01fe\\u140f\\13\\u01fe\\3\\u01fe\")\n buf.write(\"\\3\\u01fe\\3\\u01fe\\3\\u01fe\\3\\u01fe\\5\\u01fe\\u1416\\n\\u01fe\")\n buf.write(\"\\3\\u01ff\\3\\u01ff\\3\\u0200\\3\\u0200\\3\\u0201\\3\\u0201\\3\\u0201\")\n buf.write(\"\\3\\u0202\\3\\u0202\\3\\u0203\\3\\u0203\\3\\u0203\\3\\u0204\\3\\u0204\")\n buf.write(\"\\3\\u0204\\3\\u0204\\3\\u0204\\3\\u0204\\3\\u0204\\3\\u0204\\5\\u0204\")\n buf.write(\"\\u142c\\n\\u0204\\3\\u0205\\3\\u0205\\3\\u0206\\3\\u0206\\3\\u0207\")\n buf.write(\"\\3\\u0207\\3\\u0208\\3\\u0208\\3\\u0209\\3\\u0209\\3\\u020a\\3\\u020a\")\n buf.write(\"\\3\\u020a\\3\\u020b\\3\\u020b\\3\\u020c\\3\\u020c\\3\\u020d\\3\\u020d\")\n buf.write(\"\\3\\u020e\\3\\u020e\\3\\u020f\\3\\u020f\\3\\u0210\\6\\u0210\\u1446\")\n buf.write(\"\\n\\u0210\\r\\u0210\\16\\u0210\\u1447\\3\\u0210\\3\\u0210\\3\\u0211\")\n buf.write(\"\\3\\u0211\\3\\u0212\\6\\u0212\\u144f\\n\\u0212\\r\\u0212\\16\\u0212\")\n buf.write(\"\\u1450\\3\\u0213\\7\\u0213\\u1454\\n\\u0213\\f\\u0213\\16\\u0213\")\n buf.write(\"\\u1457\\13\\u0213\\3\\u0213\\5\\u0213\\u145a\\n\\u0213\\3\\u0213\")\n buf.write(\"\\6\\u0213\\u145d\\n\\u0213\\r\\u0213\\16\\u0213\\u145e\\3\\u0214\")\n buf.write(\"\\3\\u0214\\3\\u0214\\3\\u0214\\7\\u0214\\u1465\\n\\u0214\\f\\u0214\")\n buf.write(\"\\16\\u0214\\u1468\\13\\u0214\\3\\u0214\\3\\u0214\\5\\u0214\\u146c\")\n buf.write(\"\\n\\u0214\\3\\u0214\\3\\u0214\\3\\u0215\\3\\u0215\\3\\u0215\\3\\u0215\")\n buf.write(\"\\7\\u0215\\u1474\\n\\u0215\\f\\u0215\\16\\u0215\\u1477\\13\\u0215\")\n buf.write(\"\\3\\u0215\\3\\u0215\\3\\u0215\\3\\u0215\\3\\u0215\\3\\u0216\\3\\u0216\")\n buf.write(\"\\3\\u0216\\3\\u0216\\3\\u0216\\3\\u0216\\3\\u0216\\3\\u0216\\3\\u0216\")\n buf.write(\"\\7\\u0216\\u1487\\n\\u0216\\f\\u0216\\16\\u0216\\u148a\\13\\u0216\")\n buf.write(\"\\3\\u0216\\3\\u0216\\5\\u0216\\u148e\\n\\u0216\\3\\u0217\\5\\u0217\")\n buf.write(\"\\u1491\\n\\u0217\\3\\u0217\\3\\u0217\\3\\u0218\\3\\u0218\\3\\u0219\")\n buf.write(\"\\3\\u0219\\3\\u0219\\7\\u0219\\u149a\\n\\u0219\\f\\u0219\\16\\u0219\")\n buf.write(\"\\u149d\\13\\u0219\\3\\u021a\\3\\u021a\\3\\u021a\\3\\u021a\\3\\u021a\")\n buf.write(\"\\3\\u021b\\3\\u021b\\3\\u021c\\3\\u021c\\3\\u021d\\3\\u021d\\3\\u021e\")\n buf.write(\"\\3\\u021e\\3\\u021f\\3\\u021f\\3\\u0220\\3\\u0220\\3\\u0221\\3\\u0221\")\n buf.write(\"\\3\\u0222\\3\\u0222\\3\\u0223\\3\\u0223\\3\\u0224\\3\\u0224\\3\\u0225\")\n buf.write(\"\\3\\u0225\\3\\u0226\\3\\u0226\\3\\u0227\\3\\u0227\\3\\u0228\\3\\u0228\")\n buf.write(\"\\3\\u0229\\3\\u0229\\3\\u022a\\3\\u022a\\3\\u022b\\3\\u022b\\3\\u022c\")\n buf.write(\"\\3\\u022c\\3\\u022d\\3\\u022d\\3\\u022e\\3\\u022e\\3\\u022f\\3\\u022f\")\n buf.write(\"\\3\\u0230\\3\\u0230\\3\\u0231\\3\\u0231\\3\\u0232\\3\\u0232\\3\\u0233\")\n buf.write(\"\\3\\u0233\\3\\u0234\\3\\u0234\\7\\u13ba\\u13c5\\u13d0\\u13db\\u1475\")\n buf.write(\"\\2\\u0235\\3\\3\\5\\4\\7\\5\\t\\6\\13\\7\\r\\b\\17\\t\\21\\n\\23\\13\\25\\f\")\n buf.write(\"\\27\\r\\31\\16\\33\\17\\35\\20\\37\\21!\\22#\\23%\\24\\'\\25)\\26+\\27\")\n buf.write(\"-\\30/\\31\\61\\32\\63\\33\\65\\34\\67\\359\\36;\\37= ?!A\\\"C#E$G%\")\n buf.write(\"I&K\\'M(O)Q*S+U,W-Y.[/]\\60_\\61a\\62c\\63e\\64g\\65i\\66k\\67\")\n buf.write(\"m8o9q:s;u<w=y>{?}@\\177A\\u0081B\\u0083C\\u0085D\\u0087E\\u0089\")\n buf.write(\"F\\u008bG\\u008dH\\u008fI\\u0091J\\u0093K\\u0095L\\u0097M\\u0099\")\n buf.write(\"N\\u009bO\\u009dP\\u009fQ\\u00a1R\\u00a3S\\u00a5T\\u00a7U\\u00a9\")\n buf.write(\"V\\u00abW\\u00adX\\u00afY\\u00b1Z\\u00b3[\\u00b5\\\\\\u00b7]\\u00b9\")\n buf.write(\"^\\u00bb_\\u00bd`\\u00bfa\\u00c1b\\u00c3c\\u00c5d\\u00c7e\\u00c9\")\n buf.write(\"f\\u00cbg\\u00cdh\\u00cfi\\u00d1j\\u00d3k\\u00d5l\\u00d7m\\u00d9\")\n buf.write(\"n\\u00dbo\\u00ddp\\u00dfq\\u00e1r\\u00e3s\\u00e5t\\u00e7u\\u00e9\")\n buf.write(\"v\\u00ebw\\u00edx\\u00efy\\u00f1z\\u00f3{\\u00f5|\\u00f7}\\u00f9\")\n buf.write(\"~\\u00fb\\177\\u00fd\\u0080\\u00ff\\u0081\\u0101\\u0082\\u0103\")\n buf.write(\"\\u0083\\u0105\\u0084\\u0107\\u0085\\u0109\\u0086\\u010b\\u0087\")\n buf.write(\"\\u010d\\u0088\\u010f\\u0089\\u0111\\u008a\\u0113\\u008b\\u0115\")\n buf.write(\"\\u008c\\u0117\\u008d\\u0119\\u008e\\u011b\\u008f\\u011d\\u0090\")\n buf.write(\"\\u011f\\u0091\\u0121\\u0092\\u0123\\u0093\\u0125\\u0094\\u0127\")\n buf.write(\"\\u0095\\u0129\\u0096\\u012b\\u0097\\u012d\\u0098\\u012f\\u0099\")\n buf.write(\"\\u0131\\u009a\\u0133\\u009b\\u0135\\u009c\\u0137\\u009d\\u0139\")\n buf.write(\"\\u009e\\u013b\\u009f\\u013d\\u00a0\\u013f\\u00a1\\u0141\\u00a2\")\n buf.write(\"\\u0143\\u00a3\\u0145\\u00a4\\u0147\\u00a5\\u0149\\u00a6\\u014b\")\n buf.write(\"\\u00a7\\u014d\\u00a8\\u014f\\u00a9\\u0151\\u00aa\\u0153\\u00ab\")\n buf.write(\"\\u0155\\u00ac\\u0157\\u00ad\\u0159\\u00ae\\u015b\\u00af\\u015d\")\n buf.write(\"\\u00b0\\u015f\\u00b1\\u0161\\u00b2\\u0163\\u00b3\\u0165\\u00b4\")\n buf.write(\"\\u0167\\u00b5\\u0169\\u00b6\\u016b\\u00b7\\u016d\\u00b8\\u016f\")\n buf.write(\"\\u00b9\\u0171\\u00ba\\u0173\\u00bb\\u0175\\u00bc\\u0177\\u00bd\")\n buf.write(\"\\u0179\\u00be\\u017b\\u00bf\\u017d\\u00c0\\u017f\\u00c1\\u0181\")\n buf.write(\"\\u00c2\\u0183\\u00c3\\u0185\\u00c4\\u0187\\u00c5\\u0189\\u00c6\")\n buf.write(\"\\u018b\\u00c7\\u018d\\u00c8\\u018f\\u00c9\\u0191\\u00ca\\u0193\")\n buf.write(\"\\u00cb\\u0195\\u00cc\\u0197\\u00cd\\u0199\\u00ce\\u019b\\u00cf\")\n buf.write(\"\\u019d\\u00d0\\u019f\\u00d1\\u01a1\\u00d2\\u01a3\\u00d3\\u01a5\")\n buf.write(\"\\u00d4\\u01a7\\u00d5\\u01a9\\u00d6\\u01ab\\u00d7\\u01ad\\u00d8\")\n buf.write(\"\\u01af\\u00d9\\u01b1\\u00da\\u01b3\\u00db\\u01b5\\u00dc\\u01b7\")\n buf.write(\"\\u00dd\\u01b9\\u00de\\u01bb\\u00df\\u01bd\\u00e0\\u01bf\\u00e1\")\n buf.write(\"\\u01c1\\u00e2\\u01c3\\u00e3\\u01c5\\u00e4\\u01c7\\u00e5\\u01c9\")\n buf.write(\"\\u00e6\\u01cb\\u00e7\\u01cd\\u00e8\\u01cf\\u00e9\\u01d1\\u00ea\")\n buf.write(\"\\u01d3\\u00eb\\u01d5\\u00ec\\u01d7\\u00ed\\u01d9\\u00ee\\u01db\")\n buf.write(\"\\u00ef\\u01dd\\u00f0\\u01df\\u00f1\\u01e1\\u00f2\\u01e3\\u00f3\")\n buf.write(\"\\u01e5\\u00f4\\u01e7\\u00f5\\u01e9\\u00f6\\u01eb\\u00f7\\u01ed\")\n buf.write(\"\\u00f8\\u01ef\\u00f9\\u01f1\\u00fa\\u01f3\\u00fb\\u01f5\\u00fc\")\n buf.write(\"\\u01f7\\u00fd\\u01f9\\u00fe\\u01fb\\u00ff\\u01fd\\u0100\\u01ff\")\n buf.write(\"\\u0101\\u0201\\u0102\\u0203\\u0103\\u0205\\u0104\\u0207\\u0105\")\n buf.write(\"\\u0209\\u0106\\u020b\\u0107\\u020d\\u0108\\u020f\\u0109\\u0211\")\n buf.write(\"\\u010a\\u0213\\u010b\\u0215\\u010c\\u0217\\u010d\\u0219\\u010e\")\n buf.write(\"\\u021b\\u010f\\u021d\\u0110\\u021f\\u0111\\u0221\\u0112\\u0223\")\n buf.write(\"\\u0113\\u0225\\u0114\\u0227\\u0115\\u0229\\u0116\\u022b\\u0117\")\n buf.write(\"\\u022d\\u0118\\u022f\\u0119\\u0231\\u011a\\u0233\\u011b\\u0235\")\n buf.write(\"\\u011c\\u0237\\u011d\\u0239\\u011e\\u023b\\u011f\\u023d\\u0120\")\n buf.write(\"\\u023f\\u0121\\u0241\\u0122\\u0243\\u0123\\u0245\\u0124\\u0247\")\n buf.write(\"\\u0125\\u0249\\u0126\\u024b\\u0127\\u024d\\u0128\\u024f\\u0129\")\n buf.write(\"\\u0251\\u012a\\u0253\\u012b\\u0255\\u012c\\u0257\\u012d\\u0259\")\n buf.write(\"\\u012e\\u025b\\u012f\\u025d\\u0130\\u025f\\u0131\\u0261\\u0132\")\n buf.write(\"\\u0263\\u0133\\u0265\\u0134\\u0267\\u0135\\u0269\\u0136\\u026b\")\n buf.write(\"\\u0137\\u026d\\u0138\\u026f\\u0139\\u0271\\u013a\\u0273\\u013b\")\n buf.write(\"\\u0275\\u013c\\u0277\\u013d\\u0279\\u013e\\u027b\\u013f\\u027d\")\n buf.write(\"\\u0140\\u027f\\u0141\\u0281\\u0142\\u0283\\u0143\\u0285\\u0144\")\n buf.write(\"\\u0287\\u0145\\u0289\\u0146\\u028b\\u0147\\u028d\\u0148\\u028f\")\n buf.write(\"\\u0149\\u0291\\u014a\\u0293\\u014b\\u0295\\u014c\\u0297\\u014d\")\n buf.write(\"\\u0299\\u014e\\u029b\\u014f\\u029d\\u0150\\u029f\\u0151\\u02a1\")\n buf.write(\"\\u0152\\u02a3\\u0153\\u02a5\\u0154\\u02a7\\u0155\\u02a9\\u0156\")\n buf.write(\"\\u02ab\\u0157\\u02ad\\u0158\\u02af\\u0159\\u02b1\\u015a\\u02b3\")\n buf.write(\"\\u015b\\u02b5\\u015c\\u02b7\\u015d\\u02b9\\u015e\\u02bb\\u015f\")\n buf.write(\"\\u02bd\\u0160\\u02bf\\u0161\\u02c1\\u0162\\u02c3\\u0163\\u02c5\")\n buf.write(\"\\u0164\\u02c7\\u0165\\u02c9\\u0166\\u02cb\\u0167\\u02cd\\u0168\")\n buf.write(\"\\u02cf\\u0169\\u02d1\\u016a\\u02d3\\u016b\\u02d5\\u016c\\u02d7\")\n buf.write(\"\\u016d\\u02d9\\u016e\\u02db\\u016f\\u02dd\\u0170\\u02df\\u0171\")\n buf.write(\"\\u02e1\\u0172\\u02e3\\u0173\\u02e5\\u0174\\u02e7\\u0175\\u02e9\")\n buf.write(\"\\u0176\\u02eb\\u0177\\u02ed\\u0178\\u02ef\\u0179\\u02f1\\u017a\")\n buf.write(\"\\u02f3\\u017b\\u02f5\\u017c\\u02f7\\u017d\\u02f9\\u017e\\u02fb\")\n buf.write(\"\\u017f\\u02fd\\u0180\\u02ff\\u0181\\u0301\\u0182\\u0303\\u0183\")\n buf.write(\"\\u0305\\u0184\\u0307\\u0185\\u0309\\u0186\\u030b\\u0187\\u030d\")\n buf.write(\"\\u0188\\u030f\\u0189\\u0311\\u018a\\u0313\\u018b\\u0315\\u018c\")\n buf.write(\"\\u0317\\u018d\\u0319\\u018e\\u031b\\u018f\\u031d\\u0190\\u031f\")\n buf.write(\"\\u0191\\u0321\\u0192\\u0323\\u0193\\u0325\\u0194\\u0327\\u0195\")\n buf.write(\"\\u0329\\u0196\\u032b\\u0197\\u032d\\u0198\\u032f\\u0199\\u0331\")\n buf.write(\"\\u019a\\u0333\\u019b\\u0335\\u019c\\u0337\\u019d\\u0339\\u019e\")\n buf.write(\"\\u033b\\u019f\\u033d\\u01a0\\u033f\\u01a1\\u0341\\u01a2\\u0343\")\n buf.write(\"\\u01a3\\u0345\\u01a4\\u0347\\u01a5\\u0349\\u01a6\\u034b\\u01a7\")\n buf.write(\"\\u034d\\u01a8\\u034f\\u01a9\\u0351\\u01aa\\u0353\\u01ab\\u0355\")\n buf.write(\"\\u01ac\\u0357\\u01ad\\u0359\\u01ae\\u035b\\u01af\\u035d\\u01b0\")\n buf.write(\"\\u035f\\u01b1\\u0361\\u01b2\\u0363\\u01b3\\u0365\\u01b4\\u0367\")\n buf.write(\"\\u01b5\\u0369\\u01b6\\u036b\\u01b7\\u036d\\u01b8\\u036f\\u01b9\")\n buf.write(\"\\u0371\\u01ba\\u0373\\u01bb\\u0375\\u01bc\\u0377\\u01bd\\u0379\")\n buf.write(\"\\u01be\\u037b\\u01bf\\u037d\\u01c0\\u037f\\u01c1\\u0381\\u01c2\")\n buf.write(\"\\u0383\\u01c3\\u0385\\u01c4\\u0387\\u01c5\\u0389\\u01c6\\u038b\")\n buf.write(\"\\u01c7\\u038d\\u01c8\\u038f\\u01c9\\u0391\\u01ca\\u0393\\u01cb\")\n buf.write(\"\\u0395\\u01cc\\u0397\\u01cd\\u0399\\u01ce\\u039b\\u01cf\\u039d\")\n buf.write(\"\\u01d0\\u039f\\u01d1\\u03a1\\u01d2\\u03a3\\u01d3\\u03a5\\u01d4\")\n buf.write(\"\\u03a7\\u01d5\\u03a9\\u01d6\\u03ab\\u01d7\\u03ad\\u01d8\\u03af\")\n buf.write(\"\\u01d9\\u03b1\\u01da\\u03b3\\u01db\\u03b5\\u01dc\\u03b7\\u01dd\")\n buf.write(\"\\u03b9\\u01de\\u03bb\\u01df\\u03bd\\u01e0\\u03bf\\u01e1\\u03c1\")\n buf.write(\"\\u01e2\\u03c3\\u01e3\\u03c5\\u01e4\\u03c7\\u01e5\\u03c9\\u01e6\")\n buf.write(\"\\u03cb\\u01e7\\u03cd\\u01e8\\u03cf\\u01e9\\u03d1\\u01ea\\u03d3\")\n buf.write(\"\\2\\u03d5\\2\\u03d7\\2\\u03d9\\2\\u03db\\2\\u03dd\\2\\u03df\\2\\u03e1\")\n buf.write(\"\\u01eb\\u03e3\\u01ec\\u03e5\\u01ed\\u03e7\\u01ee\\u03e9\\u01ef\")\n buf.write(\"\\u03eb\\u01f0\\u03ed\\u01f1\\u03ef\\u01f2\\u03f1\\u01f3\\u03f3\")\n buf.write(\"\\u01f4\\u03f5\\u01f5\\u03f7\\u01f6\\u03f9\\u01f7\\u03fb\\u01f8\")\n buf.write(\"\\u03fd\\u01f9\\u03ff\\u01fa\\u0401\\u01fb\\u0403\\u01fc\\u0405\")\n buf.write(\"\\u01fd\\u0407\\u01fe\\u0409\\u01ff\\u040b\\u0200\\u040d\\u0201\")\n buf.write(\"\\u040f\\u0202\\u0411\\2\\u0413\\u0203\\u0415\\u0204\\u0417\\u0205\")\n buf.write(\"\\u0419\\u0206\\u041b\\u0207\\u041d\\u0208\\u041f\\u0209\\u0421\")\n buf.write(\"\\2\\u0423\\2\\u0425\\2\\u0427\\u020a\\u0429\\u020b\\u042b\\u020c\")\n buf.write(\"\\u042d\\2\\u042f\\2\\u0431\\u020d\\u0433\\u020e\\u0435\\2\\u0437\")\n buf.write(\"\\2\\u0439\\2\\u043b\\2\\u043d\\2\\u043f\\2\\u0441\\2\\u0443\\2\\u0445\")\n buf.write(\"\\2\\u0447\\2\\u0449\\2\\u044b\\2\\u044d\\2\\u044f\\2\\u0451\\2\\u0453\")\n buf.write(\"\\2\\u0455\\2\\u0457\\2\\u0459\\2\\u045b\\2\\u045d\\2\\u045f\\2\\u0461\")\n buf.write(\"\\2\\u0463\\2\\u0465\\2\\u0467\\2\\3\\2\\'\\5\\2\\f\\f\\17\\17))\\5\\2\\62\")\n buf.write(\";CHch\\4\\2GGgg\\4\\2--//\\t\\2\\13\\f\\17\\17\\\"\\\"**>>]]}}\\5\\2\\f\")\n buf.write(\"\\f\\17\\17$$\\4\\2\\62;aa\\5\\2\\13\\f\\17\\17\\\"\\\"\\4\\2C\\\\c|\\4\\2\\f\")\n buf.write(\"\\f\\17\\17\\4\\2\\13\\13\\\"\\\"\\5\\2%&\\62;aa\\4\\2CCcc\\4\\2DDdd\\4\\2\")\n buf.write(\"EEee\\4\\2FFff\\4\\2HHhh\\4\\2IIii\\4\\2JJjj\\4\\2KKkk\\4\\2LLll\\4\")\n buf.write(\"\\2MMmm\\4\\2NNnn\\4\\2OOoo\\4\\2PPpp\\4\\2QQqq\\4\\2RRrr\\4\\2SSs\")\n buf.write(\"s\\4\\2TTtt\\4\\2UUuu\\4\\2VVvv\\4\\2WWww\\4\\2XXxx\\4\\2YYyy\\4\\2\")\n buf.write(\"ZZzz\\4\\2[[{{\\4\\2\\\\\\\\||\\2\\u14dd\\2\\3\\3\\2\\2\\2\\2\\5\\3\\2\\2\\2\")\n buf.write(\"\\2\\7\\3\\2\\2\\2\\2\\t\\3\\2\\2\\2\\2\\13\\3\\2\\2\\2\\2\\r\\3\\2\\2\\2\\2\\17\")\n buf.write(\"\\3\\2\\2\\2\\2\\21\\3\\2\\2\\2\\2\\23\\3\\2\\2\\2\\2\\25\\3\\2\\2\\2\\2\\27\\3\")\n buf.write(\"\\2\\2\\2\\2\\31\\3\\2\\2\\2\\2\\33\\3\\2\\2\\2\\2\\35\\3\\2\\2\\2\\2\\37\\3\\2\")\n buf.write(\"\\2\\2\\2!\\3\\2\\2\\2\\2#\\3\\2\\2\\2\\2%\\3\\2\\2\\2\\2\\'\\3\\2\\2\\2\\2)\\3\")\n buf.write(\"\\2\\2\\2\\2+\\3\\2\\2\\2\\2-\\3\\2\\2\\2\\2/\\3\\2\\2\\2\\2\\61\\3\\2\\2\\2\\2\")\n buf.write(\"\\63\\3\\2\\2\\2\\2\\65\\3\\2\\2\\2\\2\\67\\3\\2\\2\\2\\29\\3\\2\\2\\2\\2;\\3\")\n buf.write(\"\\2\\2\\2\\2=\\3\\2\\2\\2\\2?\\3\\2\\2\\2\\2A\\3\\2\\2\\2\\2C\\3\\2\\2\\2\\2E\")\n buf.write(\"\\3\\2\\2\\2\\2G\\3\\2\\2\\2\\2I\\3\\2\\2\\2\\2K\\3\\2\\2\\2\\2M\\3\\2\\2\\2\\2\")\n buf.write(\"O\\3\\2\\2\\2\\2Q\\3\\2\\2\\2\\2S\\3\\2\\2\\2\\2U\\3\\2\\2\\2\\2W\\3\\2\\2\\2\")\n buf.write(\"\\2Y\\3\\2\\2\\2\\2[\\3\\2\\2\\2\\2]\\3\\2\\2\\2\\2_\\3\\2\\2\\2\\2a\\3\\2\\2\")\n buf.write(\"\\2\\2c\\3\\2\\2\\2\\2e\\3\\2\\2\\2\\2g\\3\\2\\2\\2\\2i\\3\\2\\2\\2\\2k\\3\\2\")\n buf.write(\"\\2\\2\\2m\\3\\2\\2\\2\\2o\\3\\2\\2\\2\\2q\\3\\2\\2\\2\\2s\\3\\2\\2\\2\\2u\\3\")\n buf.write(\"\\2\\2\\2\\2w\\3\\2\\2\\2\\2y\\3\\2\\2\\2\\2{\\3\\2\\2\\2\\2}\\3\\2\\2\\2\\2\\177\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0081\\3\\2\\2\\2\\2\\u0083\\3\\2\\2\\2\\2\\u0085\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0087\\3\\2\\2\\2\\2\\u0089\\3\\2\\2\\2\\2\\u008b\\3\\2\\2\\2\\2\\u008d\")\n buf.write(\"\\3\\2\\2\\2\\2\\u008f\\3\\2\\2\\2\\2\\u0091\\3\\2\\2\\2\\2\\u0093\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0095\\3\\2\\2\\2\\2\\u0097\\3\\2\\2\\2\\2\\u0099\\3\\2\\2\\2\\2\\u009b\")\n buf.write(\"\\3\\2\\2\\2\\2\\u009d\\3\\2\\2\\2\\2\\u009f\\3\\2\\2\\2\\2\\u00a1\\3\\2\\2\")\n buf.write(\"\\2\\2\\u00a3\\3\\2\\2\\2\\2\\u00a5\\3\\2\\2\\2\\2\\u00a7\\3\\2\\2\\2\\2\\u00a9\")\n buf.write(\"\\3\\2\\2\\2\\2\\u00ab\\3\\2\\2\\2\\2\\u00ad\\3\\2\\2\\2\\2\\u00af\\3\\2\\2\")\n buf.write(\"\\2\\2\\u00b1\\3\\2\\2\\2\\2\\u00b3\\3\\2\\2\\2\\2\\u00b5\\3\\2\\2\\2\\2\\u00b7\")\n buf.write(\"\\3\\2\\2\\2\\2\\u00b9\\3\\2\\2\\2\\2\\u00bb\\3\\2\\2\\2\\2\\u00bd\\3\\2\\2\")\n buf.write(\"\\2\\2\\u00bf\\3\\2\\2\\2\\2\\u00c1\\3\\2\\2\\2\\2\\u00c3\\3\\2\\2\\2\\2\\u00c5\")\n buf.write(\"\\3\\2\\2\\2\\2\\u00c7\\3\\2\\2\\2\\2\\u00c9\\3\\2\\2\\2\\2\\u00cb\\3\\2\\2\")\n buf.write(\"\\2\\2\\u00cd\\3\\2\\2\\2\\2\\u00cf\\3\\2\\2\\2\\2\\u00d1\\3\\2\\2\\2\\2\\u00d3\")\n buf.write(\"\\3\\2\\2\\2\\2\\u00d5\\3\\2\\2\\2\\2\\u00d7\\3\\2\\2\\2\\2\\u00d9\\3\\2\\2\")\n buf.write(\"\\2\\2\\u00db\\3\\2\\2\\2\\2\\u00dd\\3\\2\\2\\2\\2\\u00df\\3\\2\\2\\2\\2\\u00e1\")\n buf.write(\"\\3\\2\\2\\2\\2\\u00e3\\3\\2\\2\\2\\2\\u00e5\\3\\2\\2\\2\\2\\u00e7\\3\\2\\2\")\n buf.write(\"\\2\\2\\u00e9\\3\\2\\2\\2\\2\\u00eb\\3\\2\\2\\2\\2\\u00ed\\3\\2\\2\\2\\2\\u00ef\")\n buf.write(\"\\3\\2\\2\\2\\2\\u00f1\\3\\2\\2\\2\\2\\u00f3\\3\\2\\2\\2\\2\\u00f5\\3\\2\\2\")\n buf.write(\"\\2\\2\\u00f7\\3\\2\\2\\2\\2\\u00f9\\3\\2\\2\\2\\2\\u00fb\\3\\2\\2\\2\\2\\u00fd\")\n buf.write(\"\\3\\2\\2\\2\\2\\u00ff\\3\\2\\2\\2\\2\\u0101\\3\\2\\2\\2\\2\\u0103\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0105\\3\\2\\2\\2\\2\\u0107\\3\\2\\2\\2\\2\\u0109\\3\\2\\2\\2\\2\\u010b\")\n buf.write(\"\\3\\2\\2\\2\\2\\u010d\\3\\2\\2\\2\\2\\u010f\\3\\2\\2\\2\\2\\u0111\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0113\\3\\2\\2\\2\\2\\u0115\\3\\2\\2\\2\\2\\u0117\\3\\2\\2\\2\\2\\u0119\")\n buf.write(\"\\3\\2\\2\\2\\2\\u011b\\3\\2\\2\\2\\2\\u011d\\3\\2\\2\\2\\2\\u011f\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0121\\3\\2\\2\\2\\2\\u0123\\3\\2\\2\\2\\2\\u0125\\3\\2\\2\\2\\2\\u0127\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0129\\3\\2\\2\\2\\2\\u012b\\3\\2\\2\\2\\2\\u012d\\3\\2\\2\")\n buf.write(\"\\2\\2\\u012f\\3\\2\\2\\2\\2\\u0131\\3\\2\\2\\2\\2\\u0133\\3\\2\\2\\2\\2\\u0135\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0137\\3\\2\\2\\2\\2\\u0139\\3\\2\\2\\2\\2\\u013b\\3\\2\\2\")\n buf.write(\"\\2\\2\\u013d\\3\\2\\2\\2\\2\\u013f\\3\\2\\2\\2\\2\\u0141\\3\\2\\2\\2\\2\\u0143\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0145\\3\\2\\2\\2\\2\\u0147\\3\\2\\2\\2\\2\\u0149\\3\\2\\2\")\n buf.write(\"\\2\\2\\u014b\\3\\2\\2\\2\\2\\u014d\\3\\2\\2\\2\\2\\u014f\\3\\2\\2\\2\\2\\u0151\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0153\\3\\2\\2\\2\\2\\u0155\\3\\2\\2\\2\\2\\u0157\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0159\\3\\2\\2\\2\\2\\u015b\\3\\2\\2\\2\\2\\u015d\\3\\2\\2\\2\\2\\u015f\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0161\\3\\2\\2\\2\\2\\u0163\\3\\2\\2\\2\\2\\u0165\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0167\\3\\2\\2\\2\\2\\u0169\\3\\2\\2\\2\\2\\u016b\\3\\2\\2\\2\\2\\u016d\")\n buf.write(\"\\3\\2\\2\\2\\2\\u016f\\3\\2\\2\\2\\2\\u0171\\3\\2\\2\\2\\2\\u0173\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0175\\3\\2\\2\\2\\2\\u0177\\3\\2\\2\\2\\2\\u0179\\3\\2\\2\\2\\2\\u017b\")\n buf.write(\"\\3\\2\\2\\2\\2\\u017d\\3\\2\\2\\2\\2\\u017f\\3\\2\\2\\2\\2\\u0181\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0183\\3\\2\\2\\2\\2\\u0185\\3\\2\\2\\2\\2\\u0187\\3\\2\\2\\2\\2\\u0189\")\n buf.write(\"\\3\\2\\2\\2\\2\\u018b\\3\\2\\2\\2\\2\\u018d\\3\\2\\2\\2\\2\\u018f\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0191\\3\\2\\2\\2\\2\\u0193\\3\\2\\2\\2\\2\\u0195\\3\\2\\2\\2\\2\\u0197\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0199\\3\\2\\2\\2\\2\\u019b\\3\\2\\2\\2\\2\\u019d\\3\\2\\2\")\n buf.write(\"\\2\\2\\u019f\\3\\2\\2\\2\\2\\u01a1\\3\\2\\2\\2\\2\\u01a3\\3\\2\\2\\2\\2\\u01a5\")\n buf.write(\"\\3\\2\\2\\2\\2\\u01a7\\3\\2\\2\\2\\2\\u01a9\\3\\2\\2\\2\\2\\u01ab\\3\\2\\2\")\n buf.write(\"\\2\\2\\u01ad\\3\\2\\2\\2\\2\\u01af\\3\\2\\2\\2\\2\\u01b1\\3\\2\\2\\2\\2\\u01b3\")\n buf.write(\"\\3\\2\\2\\2\\2\\u01b5\\3\\2\\2\\2\\2\\u01b7\\3\\2\\2\\2\\2\\u01b9\\3\\2\\2\")\n buf.write(\"\\2\\2\\u01bb\\3\\2\\2\\2\\2\\u01bd\\3\\2\\2\\2\\2\\u01bf\\3\\2\\2\\2\\2\\u01c1\")\n buf.write(\"\\3\\2\\2\\2\\2\\u01c3\\3\\2\\2\\2\\2\\u01c5\\3\\2\\2\\2\\2\\u01c7\\3\\2\\2\")\n buf.write(\"\\2\\2\\u01c9\\3\\2\\2\\2\\2\\u01cb\\3\\2\\2\\2\\2\\u01cd\\3\\2\\2\\2\\2\\u01cf\")\n buf.write(\"\\3\\2\\2\\2\\2\\u01d1\\3\\2\\2\\2\\2\\u01d3\\3\\2\\2\\2\\2\\u01d5\\3\\2\\2\")\n buf.write(\"\\2\\2\\u01d7\\3\\2\\2\\2\\2\\u01d9\\3\\2\\2\\2\\2\\u01db\\3\\2\\2\\2\\2\\u01dd\")\n buf.write(\"\\3\\2\\2\\2\\2\\u01df\\3\\2\\2\\2\\2\\u01e1\\3\\2\\2\\2\\2\\u01e3\\3\\2\\2\")\n buf.write(\"\\2\\2\\u01e5\\3\\2\\2\\2\\2\\u01e7\\3\\2\\2\\2\\2\\u01e9\\3\\2\\2\\2\\2\\u01eb\")\n buf.write(\"\\3\\2\\2\\2\\2\\u01ed\\3\\2\\2\\2\\2\\u01ef\\3\\2\\2\\2\\2\\u01f1\\3\\2\\2\")\n buf.write(\"\\2\\2\\u01f3\\3\\2\\2\\2\\2\\u01f5\\3\\2\\2\\2\\2\\u01f7\\3\\2\\2\\2\\2\\u01f9\")\n buf.write(\"\\3\\2\\2\\2\\2\\u01fb\\3\\2\\2\\2\\2\\u01fd\\3\\2\\2\\2\\2\\u01ff\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0201\\3\\2\\2\\2\\2\\u0203\\3\\2\\2\\2\\2\\u0205\\3\\2\\2\\2\\2\\u0207\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0209\\3\\2\\2\\2\\2\\u020b\\3\\2\\2\\2\\2\\u020d\\3\\2\\2\")\n buf.write(\"\\2\\2\\u020f\\3\\2\\2\\2\\2\\u0211\\3\\2\\2\\2\\2\\u0213\\3\\2\\2\\2\\2\\u0215\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0217\\3\\2\\2\\2\\2\\u0219\\3\\2\\2\\2\\2\\u021b\\3\\2\\2\")\n buf.write(\"\\2\\2\\u021d\\3\\2\\2\\2\\2\\u021f\\3\\2\\2\\2\\2\\u0221\\3\\2\\2\\2\\2\\u0223\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0225\\3\\2\\2\\2\\2\\u0227\\3\\2\\2\\2\\2\\u0229\\3\\2\\2\")\n buf.write(\"\\2\\2\\u022b\\3\\2\\2\\2\\2\\u022d\\3\\2\\2\\2\\2\\u022f\\3\\2\\2\\2\\2\\u0231\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0233\\3\\2\\2\\2\\2\\u0235\\3\\2\\2\\2\\2\\u0237\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0239\\3\\2\\2\\2\\2\\u023b\\3\\2\\2\\2\\2\\u023d\\3\\2\\2\\2\\2\\u023f\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0241\\3\\2\\2\\2\\2\\u0243\\3\\2\\2\\2\\2\\u0245\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0247\\3\\2\\2\\2\\2\\u0249\\3\\2\\2\\2\\2\\u024b\\3\\2\\2\\2\\2\\u024d\")\n buf.write(\"\\3\\2\\2\\2\\2\\u024f\\3\\2\\2\\2\\2\\u0251\\3\\2\\2\\2\\2\\u0253\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0255\\3\\2\\2\\2\\2\\u0257\\3\\2\\2\\2\\2\\u0259\\3\\2\\2\\2\\2\\u025b\")\n buf.write(\"\\3\\2\\2\\2\\2\\u025d\\3\\2\\2\\2\\2\\u025f\\3\\2\\2\\2\\2\\u0261\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0263\\3\\2\\2\\2\\2\\u0265\\3\\2\\2\\2\\2\\u0267\\3\\2\\2\\2\\2\\u0269\")\n buf.write(\"\\3\\2\\2\\2\\2\\u026b\\3\\2\\2\\2\\2\\u026d\\3\\2\\2\\2\\2\\u026f\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0271\\3\\2\\2\\2\\2\\u0273\\3\\2\\2\\2\\2\\u0275\\3\\2\\2\\2\\2\\u0277\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0279\\3\\2\\2\\2\\2\\u027b\\3\\2\\2\\2\\2\\u027d\\3\\2\\2\")\n buf.write(\"\\2\\2\\u027f\\3\\2\\2\\2\\2\\u0281\\3\\2\\2\\2\\2\\u0283\\3\\2\\2\\2\\2\\u0285\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0287\\3\\2\\2\\2\\2\\u0289\\3\\2\\2\\2\\2\\u028b\\3\\2\\2\")\n buf.write(\"\\2\\2\\u028d\\3\\2\\2\\2\\2\\u028f\\3\\2\\2\\2\\2\\u0291\\3\\2\\2\\2\\2\\u0293\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0295\\3\\2\\2\\2\\2\\u0297\\3\\2\\2\\2\\2\\u0299\\3\\2\\2\")\n buf.write(\"\\2\\2\\u029b\\3\\2\\2\\2\\2\\u029d\\3\\2\\2\\2\\2\\u029f\\3\\2\\2\\2\\2\\u02a1\")\n buf.write(\"\\3\\2\\2\\2\\2\\u02a3\\3\\2\\2\\2\\2\\u02a5\\3\\2\\2\\2\\2\\u02a7\\3\\2\\2\")\n buf.write(\"\\2\\2\\u02a9\\3\\2\\2\\2\\2\\u02ab\\3\\2\\2\\2\\2\\u02ad\\3\\2\\2\\2\\2\\u02af\")\n buf.write(\"\\3\\2\\2\\2\\2\\u02b1\\3\\2\\2\\2\\2\\u02b3\\3\\2\\2\\2\\2\\u02b5\\3\\2\\2\")\n buf.write(\"\\2\\2\\u02b7\\3\\2\\2\\2\\2\\u02b9\\3\\2\\2\\2\\2\\u02bb\\3\\2\\2\\2\\2\\u02bd\")\n buf.write(\"\\3\\2\\2\\2\\2\\u02bf\\3\\2\\2\\2\\2\\u02c1\\3\\2\\2\\2\\2\\u02c3\\3\\2\\2\")\n buf.write(\"\\2\\2\\u02c5\\3\\2\\2\\2\\2\\u02c7\\3\\2\\2\\2\\2\\u02c9\\3\\2\\2\\2\\2\\u02cb\")\n buf.write(\"\\3\\2\\2\\2\\2\\u02cd\\3\\2\\2\\2\\2\\u02cf\\3\\2\\2\\2\\2\\u02d1\\3\\2\\2\")\n buf.write(\"\\2\\2\\u02d3\\3\\2\\2\\2\\2\\u02d5\\3\\2\\2\\2\\2\\u02d7\\3\\2\\2\\2\\2\\u02d9\")\n buf.write(\"\\3\\2\\2\\2\\2\\u02db\\3\\2\\2\\2\\2\\u02dd\\3\\2\\2\\2\\2\\u02df\\3\\2\\2\")\n buf.write(\"\\2\\2\\u02e1\\3\\2\\2\\2\\2\\u02e3\\3\\2\\2\\2\\2\\u02e5\\3\\2\\2\\2\\2\\u02e7\")\n buf.write(\"\\3\\2\\2\\2\\2\\u02e9\\3\\2\\2\\2\\2\\u02eb\\3\\2\\2\\2\\2\\u02ed\\3\\2\\2\")\n buf.write(\"\\2\\2\\u02ef\\3\\2\\2\\2\\2\\u02f1\\3\\2\\2\\2\\2\\u02f3\\3\\2\\2\\2\\2\\u02f5\")\n buf.write(\"\\3\\2\\2\\2\\2\\u02f7\\3\\2\\2\\2\\2\\u02f9\\3\\2\\2\\2\\2\\u02fb\\3\\2\\2\")\n buf.write(\"\\2\\2\\u02fd\\3\\2\\2\\2\\2\\u02ff\\3\\2\\2\\2\\2\\u0301\\3\\2\\2\\2\\2\\u0303\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0305\\3\\2\\2\\2\\2\\u0307\\3\\2\\2\\2\\2\\u0309\\3\\2\\2\")\n buf.write(\"\\2\\2\\u030b\\3\\2\\2\\2\\2\\u030d\\3\\2\\2\\2\\2\\u030f\\3\\2\\2\\2\\2\\u0311\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0313\\3\\2\\2\\2\\2\\u0315\\3\\2\\2\\2\\2\\u0317\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0319\\3\\2\\2\\2\\2\\u031b\\3\\2\\2\\2\\2\\u031d\\3\\2\\2\\2\\2\\u031f\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0321\\3\\2\\2\\2\\2\\u0323\\3\\2\\2\\2\\2\\u0325\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0327\\3\\2\\2\\2\\2\\u0329\\3\\2\\2\\2\\2\\u032b\\3\\2\\2\\2\\2\\u032d\")\n buf.write(\"\\3\\2\\2\\2\\2\\u032f\\3\\2\\2\\2\\2\\u0331\\3\\2\\2\\2\\2\\u0333\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0335\\3\\2\\2\\2\\2\\u0337\\3\\2\\2\\2\\2\\u0339\\3\\2\\2\\2\\2\\u033b\")\n buf.write(\"\\3\\2\\2\\2\\2\\u033d\\3\\2\\2\\2\\2\\u033f\\3\\2\\2\\2\\2\\u0341\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0343\\3\\2\\2\\2\\2\\u0345\\3\\2\\2\\2\\2\\u0347\\3\\2\\2\\2\\2\\u0349\")\n buf.write(\"\\3\\2\\2\\2\\2\\u034b\\3\\2\\2\\2\\2\\u034d\\3\\2\\2\\2\\2\\u034f\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0351\\3\\2\\2\\2\\2\\u0353\\3\\2\\2\\2\\2\\u0355\\3\\2\\2\\2\\2\\u0357\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0359\\3\\2\\2\\2\\2\\u035b\\3\\2\\2\\2\\2\\u035d\\3\\2\\2\")\n buf.write(\"\\2\\2\\u035f\\3\\2\\2\\2\\2\\u0361\\3\\2\\2\\2\\2\\u0363\\3\\2\\2\\2\\2\\u0365\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0367\\3\\2\\2\\2\\2\\u0369\\3\\2\\2\\2\\2\\u036b\\3\\2\\2\")\n buf.write(\"\\2\\2\\u036d\\3\\2\\2\\2\\2\\u036f\\3\\2\\2\\2\\2\\u0371\\3\\2\\2\\2\\2\\u0373\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0375\\3\\2\\2\\2\\2\\u0377\\3\\2\\2\\2\\2\\u0379\\3\\2\\2\")\n buf.write(\"\\2\\2\\u037b\\3\\2\\2\\2\\2\\u037d\\3\\2\\2\\2\\2\\u037f\\3\\2\\2\\2\\2\\u0381\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0383\\3\\2\\2\\2\\2\\u0385\\3\\2\\2\\2\\2\\u0387\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0389\\3\\2\\2\\2\\2\\u038b\\3\\2\\2\\2\\2\\u038d\\3\\2\\2\\2\\2\\u038f\")\n buf.write(\"\\3\\2\\2\\2\\2\\u0391\\3\\2\\2\\2\\2\\u0393\\3\\2\\2\\2\\2\\u0395\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0397\\3\\2\\2\\2\\2\\u0399\\3\\2\\2\\2\\2\\u039b\\3\\2\\2\\2\\2\\u039d\")\n buf.write(\"\\3\\2\\2\\2\\2\\u039f\\3\\2\\2\\2\\2\\u03a1\\3\\2\\2\\2\\2\\u03a3\\3\\2\\2\")\n buf.write(\"\\2\\2\\u03a5\\3\\2\\2\\2\\2\\u03a7\\3\\2\\2\\2\\2\\u03a9\\3\\2\\2\\2\\2\\u03ab\")\n buf.write(\"\\3\\2\\2\\2\\2\\u03ad\\3\\2\\2\\2\\2\\u03af\\3\\2\\2\\2\\2\\u03b1\\3\\2\\2\")\n buf.write(\"\\2\\2\\u03b3\\3\\2\\2\\2\\2\\u03b5\\3\\2\\2\\2\\2\\u03b7\\3\\2\\2\\2\\2\\u03b9\")\n buf.write(\"\\3\\2\\2\\2\\2\\u03bb\\3\\2\\2\\2\\2\\u03bd\\3\\2\\2\\2\\2\\u03bf\\3\\2\\2\")\n buf.write(\"\\2\\2\\u03c1\\3\\2\\2\\2\\2\\u03c3\\3\\2\\2\\2\\2\\u03c5\\3\\2\\2\\2\\2\\u03c7\")\n buf.write(\"\\3\\2\\2\\2\\2\\u03c9\\3\\2\\2\\2\\2\\u03cb\\3\\2\\2\\2\\2\\u03cd\\3\\2\\2\")\n buf.write(\"\\2\\2\\u03cf\\3\\2\\2\\2\\2\\u03d1\\3\\2\\2\\2\\2\\u03d3\\3\\2\\2\\2\\2\\u03e1\")\n buf.write(\"\\3\\2\\2\\2\\2\\u03e3\\3\\2\\2\\2\\2\\u03e5\\3\\2\\2\\2\\2\\u03e7\\3\\2\\2\")\n buf.write(\"\\2\\2\\u03e9\\3\\2\\2\\2\\2\\u03eb\\3\\2\\2\\2\\2\\u03ed\\3\\2\\2\\2\\2\\u03ef\")\n buf.write(\"\\3\\2\\2\\2\\2\\u03f1\\3\\2\\2\\2\\2\\u03f3\\3\\2\\2\\2\\2\\u03f5\\3\\2\\2\")\n buf.write(\"\\2\\2\\u03f7\\3\\2\\2\\2\\2\\u03f9\\3\\2\\2\\2\\2\\u03fb\\3\\2\\2\\2\\2\\u03fd\")\n buf.write(\"\\3\\2\\2\\2\\2\\u03ff\\3\\2\\2\\2\\2\\u0401\\3\\2\\2\\2\\2\\u0403\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0405\\3\\2\\2\\2\\2\\u0407\\3\\2\\2\\2\\2\\u0409\\3\\2\\2\\2\\2\\u040b\")\n buf.write(\"\\3\\2\\2\\2\\2\\u040d\\3\\2\\2\\2\\2\\u040f\\3\\2\\2\\2\\2\\u0413\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0415\\3\\2\\2\\2\\2\\u0417\\3\\2\\2\\2\\2\\u0419\\3\\2\\2\\2\\2\\u041b\")\n buf.write(\"\\3\\2\\2\\2\\2\\u041d\\3\\2\\2\\2\\2\\u041f\\3\\2\\2\\2\\2\\u0427\\3\\2\\2\")\n buf.write(\"\\2\\2\\u0429\\3\\2\\2\\2\\2\\u042b\\3\\2\\2\\2\\2\\u0431\\3\\2\\2\\2\\2\\u0433\")\n buf.write(\"\\3\\2\\2\\2\\3\\u0469\\3\\2\\2\\2\\5\\u046c\\3\\2\\2\\2\\7\\u046e\\3\\2\\2\")\n buf.write(\"\\2\\t\\u0472\\3\\2\\2\\2\\13\\u0478\\3\\2\\2\\2\\r\\u047e\\3\\2\\2\\2\\17\")\n buf.write(\"\\u0488\\3\\2\\2\\2\\21\\u048c\\3\\2\\2\\2\\23\\u0492\\3\\2\\2\\2\\25\\u049a\")\n buf.write(\"\\3\\2\\2\\2\\27\\u049e\\3\\2\\2\\2\\31\\u04a2\\3\\2\\2\\2\\33\\u04a8\\3\")\n buf.write(\"\\2\\2\\2\\35\\u04ab\\3\\2\\2\\2\\37\\u04b2\\3\\2\\2\\2!\\u04b9\\3\\2\\2\")\n buf.write(\"\\2#\\u04bd\\3\\2\\2\\2%\\u04c7\\3\\2\\2\\2\\'\\u04ca\\3\\2\\2\\2)\\u04d4\")\n buf.write(\"\\3\\2\\2\\2+\\u04da\\3\\2\\2\\2-\\u04e1\\3\\2\\2\\2/\\u04e6\\3\\2\\2\\2\")\n buf.write(\"\\61\\u04f0\\3\\2\\2\\2\\63\\u0507\\3\\2\\2\\2\\65\\u050d\\3\\2\\2\\2\\67\")\n buf.write(\"\\u0514\\3\\2\\2\\29\\u051a\\3\\2\\2\\2;\\u0522\\3\\2\\2\\2=\\u0528\\3\")\n buf.write(\"\\2\\2\\2?\\u0536\\3\\2\\2\\2A\\u0543\\3\\2\\2\\2C\\u0552\\3\\2\\2\\2E\\u0557\")\n buf.write(\"\\3\\2\\2\\2G\\u055d\\3\\2\\2\\2I\\u0562\\3\\2\\2\\2K\\u056a\\3\\2\\2\\2\")\n buf.write(\"M\\u056f\\3\\2\\2\\2O\\u0577\\3\\2\\2\\2Q\\u057c\\3\\2\\2\\2S\\u057f\\3\")\n buf.write(\"\\2\\2\\2U\\u0584\\3\\2\\2\\2W\\u0586\\3\\2\\2\\2Y\\u058c\\3\\2\\2\\2[\\u0591\")\n buf.write(\"\\3\\2\\2\\2]\\u059b\\3\\2\\2\\2_\\u05a3\\3\\2\\2\\2a\\u05a8\\3\\2\\2\\2\")\n buf.write(\"c\\u05ad\\3\\2\\2\\2e\\u05b2\\3\\2\\2\\2g\\u05ba\\3\\2\\2\\2i\\u05c4\\3\")\n buf.write(\"\\2\\2\\2k\\u05ca\\3\\2\\2\\2m\\u05ce\\3\\2\\2\\2o\\u05d3\\3\\2\\2\\2q\\u05d9\")\n buf.write(\"\\3\\2\\2\\2s\\u05e1\\3\\2\\2\\2u\\u05e9\\3\\2\\2\\2w\\u05f1\\3\\2\\2\\2\")\n buf.write(\"y\\u05f9\\3\\2\\2\\2{\\u0600\\3\\2\\2\\2}\\u060a\\3\\2\\2\\2\\177\\u0618\")\n buf.write(\"\\3\\2\\2\\2\\u0081\\u0620\\3\\2\\2\\2\\u0083\\u0629\\3\\2\\2\\2\\u0085\")\n buf.write(\"\\u0631\\3\\2\\2\\2\\u0087\\u0641\\3\\2\\2\\2\\u0089\\u064a\\3\\2\\2\\2\")\n buf.write(\"\\u008b\\u0655\\3\\2\\2\\2\\u008d\\u0661\\3\\2\\2\\2\\u008f\\u066d\\3\")\n buf.write(\"\\2\\2\\2\\u0091\\u0675\\3\\2\\2\\2\\u0093\\u067d\\3\\2\\2\\2\\u0095\\u0686\")\n buf.write(\"\\3\\2\\2\\2\\u0097\\u068e\\3\\2\\2\\2\\u0099\\u069a\\3\\2\\2\\2\\u009b\")\n buf.write(\"\\u06aa\\3\\2\\2\\2\\u009d\\u06af\\3\\2\\2\\2\\u009f\\u06b5\\3\\2\\2\\2\")\n buf.write(\"\\u00a1\\u06bc\\3\\2\\2\\2\\u00a3\\u06c2\\3\\2\\2\\2\\u00a5\\u06c7\\3\")\n buf.write(\"\\2\\2\\2\\u00a7\\u06cf\\3\\2\\2\\2\\u00a9\\u06dc\\3\\2\\2\\2\\u00ab\\u06e3\")\n buf.write(\"\\3\\2\\2\\2\\u00ad\\u06ef\\3\\2\\2\\2\\u00af\\u06f5\\3\\2\\2\\2\\u00b1\")\n buf.write(\"\\u06fa\\3\\2\\2\\2\\u00b3\\u0703\\3\\2\\2\\2\\u00b5\\u0708\\3\\2\\2\\2\")\n buf.write(\"\\u00b7\\u070c\\3\\2\\2\\2\\u00b9\\u071b\\3\\2\\2\\2\\u00bb\\u0726\\3\")\n buf.write(\"\\2\\2\\2\\u00bd\\u072a\\3\\2\\2\\2\\u00bf\\u0730\\3\\2\\2\\2\\u00c1\\u0734\")\n buf.write(\"\\3\\2\\2\\2\\u00c3\\u073c\\3\\2\\2\\2\\u00c5\\u0744\\3\\2\\2\\2\\u00c7\")\n buf.write(\"\\u074e\\3\\2\\2\\2\\u00c9\\u0758\\3\\2\\2\\2\\u00cb\\u0760\\3\\2\\2\\2\")\n buf.write(\"\\u00cd\\u0769\\3\\2\\2\\2\\u00cf\\u0772\\3\\2\\2\\2\\u00d1\\u077a\\3\")\n buf.write(\"\\2\\2\\2\\u00d3\\u0781\\3\\2\\2\\2\\u00d5\\u0787\\3\\2\\2\\2\\u00d7\\u078c\")\n buf.write(\"\\3\\2\\2\\2\\u00d9\\u079a\\3\\2\\2\\2\\u00db\\u07a4\\3\\2\\2\\2\\u00dd\")\n buf.write(\"\\u07ac\\3\\2\\2\\2\\u00df\\u07b9\\3\\2\\2\\2\\u00e1\\u07c2\\3\\2\\2\\2\")\n buf.write(\"\\u00e3\\u07cb\\3\\2\\2\\2\\u00e5\\u07d2\\3\\2\\2\\2\\u00e7\\u07d7\\3\")\n buf.write(\"\\2\\2\\2\\u00e9\\u07f0\\3\\2\\2\\2\\u00eb\\u07f5\\3\\2\\2\\2\\u00ed\\u07fd\")\n buf.write(\"\\3\\2\\2\\2\\u00ef\\u0802\\3\\2\\2\\2\\u00f1\\u0808\\3\\2\\2\\2\\u00f3\")\n buf.write(\"\\u080e\\3\\2\\2\\2\\u00f5\\u0815\\3\\2\\2\\2\\u00f7\\u081e\\3\\2\\2\\2\")\n buf.write(\"\\u00f9\\u0822\\3\\2\\2\\2\\u00fb\\u0831\\3\\2\\2\\2\\u00fd\\u0835\\3\")\n buf.write(\"\\2\\2\\2\\u00ff\\u083c\\3\\2\\2\\2\\u0101\\u0843\\3\\2\\2\\2\\u0103\\u084c\")\n buf.write(\"\\3\\2\\2\\2\\u0105\\u0853\\3\\2\\2\\2\\u0107\\u085d\\3\\2\\2\\2\\u0109\")\n buf.write(\"\\u086c\\3\\2\\2\\2\\u010b\\u0877\\3\\2\\2\\2\\u010d\\u087f\\3\\2\\2\\2\")\n buf.write(\"\\u010f\\u0889\\3\\2\\2\\2\\u0111\\u0891\\3\\2\\2\\2\\u0113\\u0898\\3\")\n buf.write(\"\\2\\2\\2\\u0115\\u089d\\3\\2\\2\\2\\u0117\\u08a5\\3\\2\\2\\2\\u0119\\u08ae\")\n buf.write(\"\\3\\2\\2\\2\\u011b\\u08b6\\3\\2\\2\\2\\u011d\\u08be\\3\\2\\2\\2\\u011f\")\n buf.write(\"\\u08c4\\3\\2\\2\\2\\u0121\\u08ca\\3\\2\\2\\2\\u0123\\u08d0\\3\\2\\2\\2\")\n buf.write(\"\\u0125\\u08d6\\3\\2\\2\\2\\u0127\\u08e2\\3\\2\\2\\2\\u0129\\u08e8\\3\")\n buf.write(\"\\2\\2\\2\\u012b\\u08f2\\3\\2\\2\\2\\u012d\\u08fa\\3\\2\\2\\2\\u012f\\u08fe\")\n buf.write(\"\\3\\2\\2\\2\\u0131\\u0905\\3\\2\\2\\2\\u0133\\u090b\\3\\2\\2\\2\\u0135\")\n buf.write(\"\\u0910\\3\\2\\2\\2\\u0137\\u0915\\3\\2\\2\\2\\u0139\\u091e\\3\\2\\2\\2\")\n buf.write(\"\\u013b\\u0923\\3\\2\\2\\2\\u013d\\u0929\\3\\2\\2\\2\\u013f\\u092f\\3\")\n buf.write(\"\\2\\2\\2\\u0141\\u0938\\3\\2\\2\\2\\u0143\\u093d\\3\\2\\2\\2\\u0145\\u0944\")\n buf.write(\"\\3\\2\\2\\2\\u0147\\u0949\\3\\2\\2\\2\\u0149\\u094e\\3\\2\\2\\2\\u014b\")\n buf.write(\"\\u0951\\3\\2\\2\\2\\u014d\\u0958\\3\\2\\2\\2\\u014f\\u0962\\3\\2\\2\\2\")\n buf.write(\"\\u0151\\u0965\\3\\2\\2\\2\\u0153\\u096d\\3\\2\\2\\2\\u0155\\u0977\\3\")\n buf.write(\"\\2\\2\\2\\u0157\\u0981\\3\\2\\2\\2\\u0159\\u0988\\3\\2\\2\\2\\u015b\\u098e\")\n buf.write(\"\\3\\2\\2\\2\\u015d\\u0996\\3\\2\\2\\2\\u015f\\u09a0\\3\\2\\2\\2\\u0161\")\n buf.write(\"\\u09a8\\3\\2\\2\\2\\u0163\\u09b1\\3\\2\\2\\2\\u0165\\u09b8\\3\\2\\2\\2\")\n buf.write(\"\\u0167\\u09be\\3\\2\\2\\2\\u0169\\u09c4\\3\\2\\2\\2\\u016b\\u09cb\\3\")\n buf.write(\"\\2\\2\\2\\u016d\\u09d8\\3\\2\\2\\2\\u016f\\u09e0\\3\\2\\2\\2\\u0171\\u09e4\")\n buf.write(\"\\3\\2\\2\\2\\u0173\\u09ec\\3\\2\\2\\2\\u0175\\u09f6\\3\\2\\2\\2\\u0177\")\n buf.write(\"\\u09ff\\3\\2\\2\\2\\u0179\\u0a04\\3\\2\\2\\2\\u017b\\u0a0f\\3\\2\\2\\2\")\n buf.write(\"\\u017d\\u0a12\\3\\2\\2\\2\\u017f\\u0a1c\\3\\2\\2\\2\\u0181\\u0a24\\3\")\n buf.write(\"\\2\\2\\2\\u0183\\u0a29\\3\\2\\2\\2\\u0185\\u0a2e\\3\\2\\2\\2\\u0187\\u0a33\")\n buf.write(\"\\3\\2\\2\\2\\u0189\\u0a3c\\3\\2\\2\\2\\u018b\\u0a41\\3\\2\\2\\2\\u018d\")\n buf.write(\"\\u0a4c\\3\\2\\2\\2\\u018f\\u0a54\\3\\2\\2\\2\\u0191\\u0a59\\3\\2\\2\\2\")\n buf.write(\"\\u0193\\u0a5f\\3\\2\\2\\2\\u0195\\u0a67\\3\\2\\2\\2\\u0197\\u0a6c\\3\")\n buf.write(\"\\2\\2\\2\\u0199\\u0a72\\3\\2\\2\\2\\u019b\\u0a78\\3\\2\\2\\2\\u019d\\u0a7e\")\n buf.write(\"\\3\\2\\2\\2\\u019f\\u0a84\\3\\2\\2\\2\\u01a1\\u0a8a\\3\\2\\2\\2\\u01a3\")\n buf.write(\"\\u0a8f\\3\\2\\2\\2\\u01a5\\u0a96\\3\\2\\2\\2\\u01a7\\u0a9a\\3\\2\\2\\2\")\n buf.write(\"\\u01a9\\u0aa1\\3\\2\\2\\2\\u01ab\\u0aa7\\3\\2\\2\\2\\u01ad\\u0aac\\3\")\n buf.write(\"\\2\\2\\2\\u01af\\u0ab1\\3\\2\\2\\2\\u01b1\\u0ab6\\3\\2\\2\\2\\u01b3\\u0aba\")\n buf.write(\"\\3\\2\\2\\2\\u01b5\\u0ac2\\3\\2\\2\\2\\u01b7\\u0acb\\3\\2\\2\\2\\u01b9\")\n buf.write(\"\\u0ad4\\3\\2\\2\\2\\u01bb\\u0adb\\3\\2\\2\\2\\u01bd\\u0ae1\\3\\2\\2\\2\")\n buf.write(\"\\u01bf\\u0ae7\\3\\2\\2\\2\\u01c1\\u0aee\\3\\2\\2\\2\\u01c3\\u0af7\\3\")\n buf.write(\"\\2\\2\\2\\u01c5\\u0b00\\3\\2\\2\\2\\u01c7\\u0b05\\3\\2\\2\\2\\u01c9\\u0b0b\")\n buf.write(\"\\3\\2\\2\\2\\u01cb\\u0b12\\3\\2\\2\\2\\u01cd\\u0b18\\3\\2\\2\\2\\u01cf\")\n buf.write(\"\\u0b21\\3\\2\\2\\2\\u01d1\\u0b26\\3\\2\\2\\2\\u01d3\\u0b2a\\3\\2\\2\\2\")\n buf.write(\"\\u01d5\\u0b32\\3\\2\\2\\2\\u01d7\\u0b3b\\3\\2\\2\\2\\u01d9\\u0b3f\\3\")\n buf.write(\"\\2\\2\\2\\u01db\\u0b45\\3\\2\\2\\2\\u01dd\\u0b4e\\3\\2\\2\\2\\u01df\\u0b54\")\n buf.write(\"\\3\\2\\2\\2\\u01e1\\u0b5b\\3\\2\\2\\2\\u01e3\\u0b5f\\3\\2\\2\\2\\u01e5\")\n buf.write(\"\\u0b62\\3\\2\\2\\2\\u01e7\\u0b6a\\3\\2\\2\\2\\u01e9\\u0b72\\3\\2\\2\\2\")\n buf.write(\"\\u01eb\\u0b79\\3\\2\\2\\2\\u01ed\\u0b81\\3\\2\\2\\2\\u01ef\\u0b92\\3\")\n buf.write(\"\\2\\2\\2\\u01f1\\u0b9d\\3\\2\\2\\2\\u01f3\\u0ba8\\3\\2\\2\\2\\u01f5\\u0bad\")\n buf.write(\"\\3\\2\\2\\2\\u01f7\\u0bb5\\3\\2\\2\\2\\u01f9\\u0bc3\\3\\2\\2\\2\\u01fb\")\n buf.write(\"\\u0bc7\\3\\2\\2\\2\\u01fd\\u0bce\\3\\2\\2\\2\\u01ff\\u0bd3\\3\\2\\2\\2\")\n buf.write(\"\\u0201\\u0bd9\\3\\2\\2\\2\\u0203\\u0be0\\3\\2\\2\\2\\u0205\\u0be8\\3\")\n buf.write(\"\\2\\2\\2\\u0207\\u0bf2\\3\\2\\2\\2\\u0209\\u0bf9\\3\\2\\2\\2\\u020b\\u0bfc\")\n buf.write(\"\\3\\2\\2\\2\\u020d\\u0c00\\3\\2\\2\\2\\u020f\\u0c04\\3\\2\\2\\2\\u0211\")\n buf.write(\"\\u0c08\\3\\2\\2\\2\\u0213\\u0c0b\\3\\2\\2\\2\\u0215\\u0c10\\3\\2\\2\\2\")\n buf.write(\"\\u0217\\u0c15\\3\\2\\2\\2\\u0219\\u0c1c\\3\\2\\2\\2\\u021b\\u0c1f\\3\")\n buf.write(\"\\2\\2\\2\\u021d\\u0c27\\3\\2\\2\\2\\u021f\\u0c2d\\3\\2\\2\\2\\u0221\\u0c38\")\n buf.write(\"\\3\\2\\2\\2\\u0223\\u0c40\\3\\2\\2\\2\\u0225\\u0c44\\3\\2\\2\\2\\u0227\")\n buf.write(\"\\u0c4a\\3\\2\\2\\2\\u0229\\u0c4f\\3\\2\\2\\2\\u022b\\u0c5a\\3\\2\\2\\2\")\n buf.write(\"\\u022d\\u0c62\\3\\2\\2\\2\\u022f\\u0c72\\3\\2\\2\\2\\u0231\\u0c7d\\3\")\n buf.write(\"\\2\\2\\2\\u0233\\u0c84\\3\\2\\2\\2\\u0235\\u0c8e\\3\\2\\2\\2\\u0237\\u0c96\")\n buf.write(\"\\3\\2\\2\\2\\u0239\\u0c9b\\3\\2\\2\\2\\u023b\\u0ca4\\3\\2\\2\\2\\u023d\")\n buf.write(\"\\u0caa\\3\\2\\2\\2\\u023f\\u0cb4\\3\\2\\2\\2\\u0241\\u0cba\\3\\2\\2\\2\")\n buf.write(\"\\u0243\\u0cbf\\3\\2\\2\\2\\u0245\\u0ccb\\3\\2\\2\\2\\u0247\\u0cd4\\3\")\n buf.write(\"\\2\\2\\2\\u0249\\u0cde\\3\\2\\2\\2\\u024b\\u0ce5\\3\\2\\2\\2\\u024d\\u0cef\")\n buf.write(\"\\3\\2\\2\\2\\u024f\\u0cf9\\3\\2\\2\\2\\u0251\\u0d01\\3\\2\\2\\2\\u0253\")\n buf.write(\"\\u0d07\\3\\2\\2\\2\\u0255\\u0d11\\3\\2\\2\\2\\u0257\\u0d17\\3\\2\\2\\2\")\n buf.write(\"\\u0259\\u0d1d\\3\\2\\2\\2\\u025b\\u0d21\\3\\2\\2\\2\\u025d\\u0d26\\3\")\n buf.write(\"\\2\\2\\2\\u025f\\u0d2b\\3\\2\\2\\2\\u0261\\u0d32\\3\\2\\2\\2\\u0263\\u0d36\")\n buf.write(\"\\3\\2\\2\\2\\u0265\\u0d40\\3\\2\\2\\2\\u0267\\u0d4c\\3\\2\\2\\2\\u0269\")\n buf.write(\"\\u0d53\\3\\2\\2\\2\\u026b\\u0d5d\\3\\2\\2\\2\\u026d\\u0d64\\3\\2\\2\\2\")\n buf.write(\"\\u026f\\u0d6c\\3\\2\\2\\2\\u0271\\u0d74\\3\\2\\2\\2\\u0273\\u0d88\\3\")\n buf.write(\"\\2\\2\\2\\u0275\\u0d8f\\3\\2\\2\\2\\u0277\\u0d9c\\3\\2\\2\\2\\u0279\\u0da3\")\n buf.write(\"\\3\\2\\2\\2\\u027b\\u0dad\\3\\2\\2\\2\\u027d\\u0db3\\3\\2\\2\\2\\u027f\")\n buf.write(\"\\u0dbb\\3\\2\\2\\2\\u0281\\u0dc2\\3\\2\\2\\2\\u0283\\u0dc8\\3\\2\\2\\2\")\n buf.write(\"\\u0285\\u0dd1\\3\\2\\2\\2\\u0287\\u0dd8\\3\\2\\2\\2\\u0289\\u0ddc\\3\")\n buf.write(\"\\2\\2\\2\\u028b\\u0de2\\3\\2\\2\\2\\u028d\\u0de7\\3\\2\\2\\2\\u028f\\u0ded\")\n buf.write(\"\\3\\2\\2\\2\\u0291\\u0df4\\3\\2\\2\\2\\u0293\\u0df9\\3\\2\\2\\2\\u0295\")\n buf.write(\"\\u0e03\\3\\2\\2\\2\\u0297\\u0e0a\\3\\2\\2\\2\\u0299\\u0e16\\3\\2\\2\\2\")\n buf.write(\"\\u029b\\u0e1a\\3\\2\\2\\2\\u029d\\u0e21\\3\\2\\2\\2\\u029f\\u0e28\\3\")\n buf.write(\"\\2\\2\\2\\u02a1\\u0e2d\\3\\2\\2\\2\\u02a3\\u0e35\\3\\2\\2\\2\\u02a5\\u0e3c\")\n buf.write(\"\\3\\2\\2\\2\\u02a7\\u0e41\\3\\2\\2\\2\\u02a9\\u0e4a\\3\\2\\2\\2\\u02ab\")\n buf.write(\"\\u0e55\\3\\2\\2\\2\\u02ad\\u0e62\\3\\2\\2\\2\\u02af\\u0e74\\3\\2\\2\\2\")\n buf.write(\"\\u02b1\\u0e80\\3\\2\\2\\2\\u02b3\\u0e90\\3\\2\\2\\2\\u02b5\\u0e94\\3\")\n buf.write(\"\\2\\2\\2\\u02b7\\u0e99\\3\\2\\2\\2\\u02b9\\u0ea2\\3\\2\\2\\2\\u02bb\\u0ea8\")\n buf.write(\"\\3\\2\\2\\2\\u02bd\\u0ead\\3\\2\\2\\2\\u02bf\\u0eb6\\3\\2\\2\\2\\u02c1\")\n buf.write(\"\\u0ebf\\3\\2\\2\\2\\u02c3\\u0ec8\\3\\2\\2\\2\\u02c5\\u0ed7\\3\\2\\2\\2\")\n buf.write(\"\\u02c7\\u0ede\\3\\2\\2\\2\\u02c9\\u0ee3\\3\\2\\2\\2\\u02cb\\u0ee8\\3\")\n buf.write(\"\\2\\2\\2\\u02cd\\u0ef1\\3\\2\\2\\2\\u02cf\\u0efa\\3\\2\\2\\2\\u02d1\\u0eff\")\n buf.write(\"\\3\\2\\2\\2\\u02d3\\u0f0d\\3\\2\\2\\2\\u02d5\\u0f15\\3\\2\\2\\2\\u02d7\")\n buf.write(\"\\u0f1e\\3\\2\\2\\2\\u02d9\\u0f29\\3\\2\\2\\2\\u02db\\u0f2f\\3\\2\\2\\2\")\n buf.write(\"\\u02dd\\u0f37\\3\\2\\2\\2\\u02df\\u0f41\\3\\2\\2\\2\\u02e1\\u0f4e\\3\")\n buf.write(\"\\2\\2\\2\\u02e3\\u0f55\\3\\2\\2\\2\\u02e5\\u0f60\\3\\2\\2\\2\\u02e7\\u0f67\")\n buf.write(\"\\3\\2\\2\\2\\u02e9\\u0f73\\3\\2\\2\\2\\u02eb\\u0f80\\3\\2\\2\\2\\u02ed\")\n buf.write(\"\\u0f8e\\3\\2\\2\\2\\u02ef\\u0f96\\3\\2\\2\\2\\u02f1\\u0f9e\\3\\2\\2\\2\")\n buf.write(\"\\u02f3\\u0fa6\\3\\2\\2\\2\\u02f5\\u0fac\\3\\2\\2\\2\\u02f7\\u0fb0\\3\")\n buf.write(\"\\2\\2\\2\\u02f9\\u0fb5\\3\\2\\2\\2\\u02fb\\u0fba\\3\\2\\2\\2\\u02fd\\u0fc4\")\n buf.write(\"\\3\\2\\2\\2\\u02ff\\u0fe0\\3\\2\\2\\2\\u0301\\u0ffb\\3\\2\\2\\2\\u0303\")\n buf.write(\"\\u1013\\3\\2\\2\\2\\u0305\\u1021\\3\\2\\2\\2\\u0307\\u102f\\3\\2\\2\\2\")\n buf.write(\"\\u0309\\u103f\\3\\2\\2\\2\\u030b\\u104f\\3\\2\\2\\2\\u030d\\u1052\\3\")\n buf.write(\"\\2\\2\\2\\u030f\\u105b\\3\\2\\2\\2\\u0311\\u1067\\3\\2\\2\\2\\u0313\\u1071\")\n buf.write(\"\\3\\2\\2\\2\\u0315\\u1077\\3\\2\\2\\2\\u0317\\u107f\\3\\2\\2\\2\\u0319\")\n buf.write(\"\\u1084\\3\\2\\2\\2\\u031b\\u1089\\3\\2\\2\\2\\u031d\\u1092\\3\\2\\2\\2\")\n buf.write(\"\\u031f\\u1097\\3\\2\\2\\2\\u0321\\u10a1\\3\\2\\2\\2\\u0323\\u10a7\\3\")\n buf.write(\"\\2\\2\\2\\u0325\\u10ad\\3\\2\\2\\2\\u0327\\u10b4\\3\\2\\2\\2\\u0329\\u10be\")\n buf.write(\"\\3\\2\\2\\2\\u032b\\u10c6\\3\\2\\2\\2\\u032d\\u10cc\\3\\2\\2\\2\\u032f\")\n buf.write(\"\\u10d3\\3\\2\\2\\2\\u0331\\u10db\\3\\2\\2\\2\\u0333\\u10e2\\3\\2\\2\\2\")\n buf.write(\"\\u0335\\u10e9\\3\\2\\2\\2\\u0337\\u10ed\\3\\2\\2\\2\\u0339\\u10f3\\3\")\n buf.write(\"\\2\\2\\2\\u033b\\u10fc\\3\\2\\2\\2\\u033d\\u1102\\3\\2\\2\\2\\u033f\\u1109\")\n buf.write(\"\\3\\2\\2\\2\\u0341\\u1111\\3\\2\\2\\2\\u0343\\u111a\\3\\2\\2\\2\\u0345\")\n buf.write(\"\\u1123\\3\\2\\2\\2\\u0347\\u112a\\3\\2\\2\\2\\u0349\\u1132\\3\\2\\2\\2\")\n buf.write(\"\\u034b\\u113a\\3\\2\\2\\2\\u034d\\u1143\\3\\2\\2\\2\\u034f\\u1148\\3\")\n buf.write(\"\\2\\2\\2\\u0351\\u1150\\3\\2\\2\\2\\u0353\\u115b\\3\\2\\2\\2\\u0355\\u1160\")\n buf.write(\"\\3\\2\\2\\2\\u0357\\u1169\\3\\2\\2\\2\\u0359\\u116f\\3\\2\\2\\2\\u035b\")\n buf.write(\"\\u1175\\3\\2\\2\\2\\u035d\\u117a\\3\\2\\2\\2\\u035f\\u1181\\3\\2\\2\\2\")\n buf.write(\"\\u0361\\u1186\\3\\2\\2\\2\\u0363\\u118c\\3\\2\\2\\2\\u0365\\u1190\\3\")\n buf.write(\"\\2\\2\\2\\u0367\\u1197\\3\\2\\2\\2\\u0369\\u11a5\\3\\2\\2\\2\\u036b\\u11ad\")\n buf.write(\"\\3\\2\\2\\2\\u036d\\u11ba\\3\\2\\2\\2\\u036f\\u11c5\\3\\2\\2\\2\\u0371\")\n buf.write(\"\\u11cf\\3\\2\\2\\2\\u0373\\u11d9\\3\\2\\2\\2\\u0375\\u11e7\\3\\2\\2\\2\")\n buf.write(\"\\u0377\\u11f0\\3\\2\\2\\2\\u0379\\u11f6\\3\\2\\2\\2\\u037b\\u11ff\\3\")\n buf.write(\"\\2\\2\\2\\u037d\\u1207\\3\\2\\2\\2\\u037f\\u1214\\3\\2\\2\\2\\u0381\\u121d\")\n buf.write(\"\\3\\2\\2\\2\\u0383\\u1222\\3\\2\\2\\2\\u0385\\u1226\\3\\2\\2\\2\\u0387\")\n buf.write(\"\\u123f\\3\\2\\2\\2\\u0389\\u1244\\3\\2\\2\\2\\u038b\\u124f\\3\\2\\2\\2\")\n buf.write(\"\\u038d\\u1261\\3\\2\\2\\2\\u038f\\u1271\\3\\2\\2\\2\\u0391\\u1284\\3\")\n buf.write(\"\\2\\2\\2\\u0393\\u129b\\3\\2\\2\\2\\u0395\\u12aa\\3\\2\\2\\2\\u0397\\u12b4\")\n buf.write(\"\\3\\2\\2\\2\\u0399\\u12bf\\3\\2\\2\\2\\u039b\\u12c7\\3\\2\\2\\2\\u039d\")\n buf.write(\"\\u12d4\\3\\2\\2\\2\\u039f\\u12e4\\3\\2\\2\\2\\u03a1\\u12f4\\3\\2\\2\\2\")\n buf.write(\"\\u03a3\\u12f9\\3\\2\\2\\2\\u03a5\\u12fd\\3\\2\\2\\2\\u03a7\\u1302\\3\")\n buf.write(\"\\2\\2\\2\\u03a9\\u1306\\3\\2\\2\\2\\u03ab\\u130b\\3\\2\\2\\2\\u03ad\\u130f\")\n buf.write(\"\\3\\2\\2\\2\\u03af\\u1316\\3\\2\\2\\2\\u03b1\\u131a\\3\\2\\2\\2\\u03b3\")\n buf.write(\"\\u1320\\3\\2\\2\\2\\u03b5\\u1330\\3\\2\\2\\2\\u03b7\\u133b\\3\\2\\2\\2\")\n buf.write(\"\\u03b9\\u133f\\3\\2\\2\\2\\u03bb\\u1348\\3\\2\\2\\2\\u03bd\\u134e\\3\")\n buf.write(\"\\2\\2\\2\\u03bf\\u1355\\3\\2\\2\\2\\u03c1\\u135a\\3\\2\\2\\2\\u03c3\\u1361\")\n buf.write(\"\\3\\2\\2\\2\\u03c5\\u136e\\3\\2\\2\\2\\u03c7\\u137b\\3\\2\\2\\2\\u03c9\")\n buf.write(\"\\u1388\\3\\2\\2\\2\\u03cb\\u138b\\3\\2\\2\\2\\u03cd\\u138d\\3\\2\\2\\2\")\n buf.write(\"\\u03cf\\u138f\\3\\2\\2\\2\\u03d1\\u139e\\3\\2\\2\\2\\u03d3\\u13aa\\3\")\n buf.write(\"\\2\\2\\2\\u03d5\\u13b3\\3\\2\\2\\2\\u03d7\\u13b5\\3\\2\\2\\2\\u03d9\\u13c0\")\n buf.write(\"\\3\\2\\2\\2\\u03db\\u13cb\\3\\2\\2\\2\\u03dd\\u13d6\\3\\2\\2\\2\\u03df\")\n buf.write(\"\\u13e1\\3\\2\\2\\2\\u03e1\\u13e3\\3\\2\\2\\2\\u03e3\\u13ed\\3\\2\\2\\2\")\n buf.write(\"\\u03e5\\u13ef\\3\\2\\2\\2\\u03e7\\u13f1\\3\\2\\2\\2\\u03e9\\u13f3\\3\")\n buf.write(\"\\2\\2\\2\\u03eb\\u13f5\\3\\2\\2\\2\\u03ed\\u13f8\\3\\2\\2\\2\\u03ef\\u13fa\")\n buf.write(\"\\3\\2\\2\\2\\u03f1\\u13fc\\3\\2\\2\\2\\u03f3\\u13fe\\3\\2\\2\\2\\u03f5\")\n buf.write(\"\\u1400\\3\\2\\2\\2\\u03f7\\u1402\\3\\2\\2\\2\\u03f9\\u1404\\3\\2\\2\\2\")\n buf.write(\"\\u03fb\\u1415\\3\\2\\2\\2\\u03fd\\u1417\\3\\2\\2\\2\\u03ff\\u1419\\3\")\n buf.write(\"\\2\\2\\2\\u0401\\u141b\\3\\2\\2\\2\\u0403\\u141e\\3\\2\\2\\2\\u0405\\u1420\")\n buf.write(\"\\3\\2\\2\\2\\u0407\\u142b\\3\\2\\2\\2\\u0409\\u142d\\3\\2\\2\\2\\u040b\")\n buf.write(\"\\u142f\\3\\2\\2\\2\\u040d\\u1431\\3\\2\\2\\2\\u040f\\u1433\\3\\2\\2\\2\")\n buf.write(\"\\u0411\\u1435\\3\\2\\2\\2\\u0413\\u1437\\3\\2\\2\\2\\u0415\\u143a\\3\")\n buf.write(\"\\2\\2\\2\\u0417\\u143c\\3\\2\\2\\2\\u0419\\u143e\\3\\2\\2\\2\\u041b\\u1440\")\n buf.write(\"\\3\\2\\2\\2\\u041d\\u1442\\3\\2\\2\\2\\u041f\\u1445\\3\\2\\2\\2\\u0421\")\n buf.write(\"\\u144b\\3\\2\\2\\2\\u0423\\u144e\\3\\2\\2\\2\\u0425\\u1455\\3\\2\\2\\2\")\n buf.write(\"\\u0427\\u1460\\3\\2\\2\\2\\u0429\\u146f\\3\\2\\2\\2\\u042b\\u147d\\3\")\n buf.write(\"\\2\\2\\2\\u042d\\u1490\\3\\2\\2\\2\\u042f\\u1494\\3\\2\\2\\2\\u0431\\u1496\")\n buf.write(\"\\3\\2\\2\\2\\u0433\\u149e\\3\\2\\2\\2\\u0435\\u14a3\\3\\2\\2\\2\\u0437\")\n buf.write(\"\\u14a5\\3\\2\\2\\2\\u0439\\u14a7\\3\\2\\2\\2\\u043b\\u14a9\\3\\2\\2\\2\")\n buf.write(\"\\u043d\\u14ab\\3\\2\\2\\2\\u043f\\u14ad\\3\\2\\2\\2\\u0441\\u14af\\3\")\n buf.write(\"\\2\\2\\2\\u0443\\u14b1\\3\\2\\2\\2\\u0445\\u14b3\\3\\2\\2\\2\\u0447\\u14b5\")\n buf.write(\"\\3\\2\\2\\2\\u0449\\u14b7\\3\\2\\2\\2\\u044b\\u14b9\\3\\2\\2\\2\\u044d\")\n buf.write(\"\\u14bb\\3\\2\\2\\2\\u044f\\u14bd\\3\\2\\2\\2\\u0451\\u14bf\\3\\2\\2\\2\")\n buf.write(\"\\u0453\\u14c1\\3\\2\\2\\2\\u0455\\u14c3\\3\\2\\2\\2\\u0457\\u14c5\\3\")\n buf.write(\"\\2\\2\\2\\u0459\\u14c7\\3\\2\\2\\2\\u045b\\u14c9\\3\\2\\2\\2\\u045d\\u14cb\")\n buf.write(\"\\3\\2\\2\\2\\u045f\\u14cd\\3\\2\\2\\2\\u0461\\u14cf\\3\\2\\2\\2\\u0463\")\n buf.write(\"\\u14d1\\3\\2\\2\\2\\u0465\\u14d3\\3\\2\\2\\2\\u0467\\u14d5\\3\\2\\2\\2\")\n buf.write(\"\\u0469\\u046a\\7\\60\\2\\2\\u046a\\u046b\\7\\60\\2\\2\\u046b\\4\\3\\2\")\n buf.write(\"\\2\\2\\u046c\\u046d\\5\\u0435\\u021b\\2\\u046d\\6\\3\\2\\2\\2\\u046e\")\n buf.write(\"\\u046f\\5\\u0435\\u021b\\2\\u046f\\u0470\\5\\u043b\\u021e\\2\\u0470\")\n buf.write(\"\\u0471\\5\\u043b\\u021e\\2\\u0471\\b\\3\\2\\2\\2\\u0472\\u0473\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0473\\u0474\\5\\u043f\\u0220\\2\\u0474\\u0475\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0475\\u0476\\5\\u043d\\u021f\\2\\u0476\\u0477\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u0477\\n\\3\\2\\2\\2\\u0478\\u0479\\5\\u0435\\u021b\\2\\u0479\")\n buf.write(\"\\u047a\\5\\u0441\\u0221\\2\\u047a\\u047b\\5\\u043d\\u021f\\2\\u047b\")\n buf.write(\"\\u047c\\5\\u044f\\u0228\\2\\u047c\\u047d\\5\\u045b\\u022e\\2\\u047d\")\n buf.write(\"\\f\\3\\2\\2\\2\\u047e\\u047f\\5\\u0435\\u021b\\2\\u047f\\u0480\\5\\u0441\")\n buf.write(\"\\u0221\\2\\u0480\\u0481\\5\\u0441\\u0221\\2\\u0481\\u0482\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u0482\\u0483\\5\\u043d\\u021f\\2\\u0483\\u0484\\5\\u0441\")\n buf.write(\"\\u0221\\2\\u0484\\u0485\\5\\u0435\\u021b\\2\\u0485\\u0486\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0486\\u0487\\5\\u043d\\u021f\\2\\u0487\\16\\3\\2\\2\\2\")\n buf.write(\"\\u0488\\u0489\\5\\u0435\\u021b\\2\\u0489\\u048a\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u048a\\u048b\\5\\u044b\\u0226\\2\\u048b\\20\\3\\2\\2\\2\\u048c\")\n buf.write(\"\\u048d\\5\\u0435\\u021b\\2\\u048d\\u048e\\5\\u044b\\u0226\\2\\u048e\")\n buf.write(\"\\u048f\\5\\u045b\\u022e\\2\\u048f\\u0490\\5\\u043d\\u021f\\2\\u0490\")\n buf.write(\"\\u0491\\5\\u0457\\u022c\\2\\u0491\\22\\3\\2\\2\\2\\u0492\\u0493\\5\")\n buf.write(\"\\u0435\\u021b\\2\\u0493\\u0494\\5\\u044f\\u0228\\2\\u0494\\u0495\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0495\\u0496\\5\\u044b\\u0226\\2\\u0496\\u0497\")\n buf.write(\"\\5\\u0465\\u0233\\2\\u0497\\u0498\\5\\u0467\\u0234\\2\\u0498\\u0499\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0499\\24\\3\\2\\2\\2\\u049a\\u049b\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u049b\\u049c\\5\\u044f\\u0228\\2\\u049c\\u049d\\5\\u043b\")\n buf.write(\"\\u021e\\2\\u049d\\26\\3\\2\\2\\2\\u049e\\u049f\\5\\u0435\\u021b\\2\")\n buf.write(\"\\u049f\\u04a0\\5\\u044f\\u0228\\2\\u04a0\\u04a1\\5\\u0465\\u0233\")\n buf.write(\"\\2\\u04a1\\30\\3\\2\\2\\2\\u04a2\\u04a3\\5\\u0435\\u021b\\2\\u04a3\")\n buf.write(\"\\u04a4\\5\\u0457\\u022c\\2\\u04a4\\u04a5\\5\\u0457\\u022c\\2\\u04a5\")\n buf.write(\"\\u04a6\\5\\u0435\\u021b\\2\\u04a6\\u04a7\\5\\u0465\\u0233\\2\\u04a7\")\n buf.write(\"\\32\\3\\2\\2\\2\\u04a8\\u04a9\\5\\u0435\\u021b\\2\\u04a9\\u04aa\\5\")\n buf.write(\"\\u0459\\u022d\\2\\u04aa\\34\\3\\2\\2\\2\\u04ab\\u04ac\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u04ac\\u04ad\\5\\u0459\\u022d\\2\\u04ad\\u04ae\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u04ae\\u04af\\5\\u045d\\u022f\\2\\u04af\\u04b0\\5\\u044d\\u0227\")\n buf.write(\"\\2\\u04b0\\u04b1\\5\\u043d\\u021f\\2\\u04b1\\36\\3\\2\\2\\2\\u04b2\")\n buf.write(\"\\u04b3\\5\\u0435\\u021b\\2\\u04b3\\u04b4\\5\\u0459\\u022d\\2\\u04b4\")\n buf.write(\"\\u04b5\\5\\u0459\\u022d\\2\\u04b5\\u04b6\\5\\u043d\\u021f\\2\\u04b6\")\n buf.write(\"\\u04b7\\5\\u0457\\u022c\\2\\u04b7\\u04b8\\5\\u045b\\u022e\\2\\u04b8\")\n buf.write(\" \\3\\2\\2\\2\\u04b9\\u04ba\\5\\u0435\\u021b\\2\\u04ba\\u04bb\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u04bb\\u04bc\\5\\u0439\\u021d\\2\\u04bc\\\"\\3\\2\\2\\2\\u04bd\")\n buf.write(\"\\u04be\\5\\u0435\\u021b\\2\\u04be\\u04bf\\5\\u0459\\u022d\\2\\u04bf\")\n buf.write(\"\\u04c0\\5\\u0459\\u022d\\2\\u04c0\\u04c1\\5\\u0451\\u0229\\2\\u04c1\")\n buf.write(\"\\u04c2\\5\\u0439\\u021d\\2\\u04c2\\u04c3\\5\\u0445\\u0223\\2\\u04c3\")\n buf.write(\"\\u04c4\\5\\u0435\\u021b\\2\\u04c4\\u04c5\\5\\u045b\\u022e\\2\\u04c5\")\n buf.write(\"\\u04c6\\5\\u043d\\u021f\\2\\u04c6$\\3\\2\\2\\2\\u04c7\\u04c8\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u04c8\\u04c9\\5\\u045b\\u022e\\2\\u04c9&\\3\\2\\2\\2\\u04ca\")\n buf.write(\"\\u04cb\\5\\u0435\\u021b\\2\\u04cb\\u04cc\\5\\u045b\\u022e\\2\\u04cc\")\n buf.write(\"\\u04cd\\5\\u045b\\u022e\\2\\u04cd\\u04ce\\5\\u0457\\u022c\\2\\u04ce\")\n buf.write(\"\\u04cf\\5\\u0445\\u0223\\2\\u04cf\\u04d0\\5\\u0437\\u021c\\2\\u04d0\")\n buf.write(\"\\u04d1\\5\\u045d\\u022f\\2\\u04d1\\u04d2\\5\\u045b\\u022e\\2\\u04d2\")\n buf.write(\"\\u04d3\\5\\u043d\\u021f\\2\\u04d3(\\3\\2\\2\\2\\u04d4\\u04d5\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u04d5\\u04d6\\5\\u045d\\u022f\\2\\u04d6\\u04d7\\5\\u043b\")\n buf.write(\"\\u021e\\2\\u04d7\\u04d8\\5\\u0445\\u0223\\2\\u04d8\\u04d9\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u04d9*\\3\\2\\2\\2\\u04da\\u04db\\5\\u0435\\u021b\\2\\u04db\")\n buf.write(\"\\u04dc\\5\\u045d\\u022f\\2\\u04dc\\u04dd\\5\\u045b\\u022e\\2\\u04dd\")\n buf.write(\"\\u04de\\5\\u0443\\u0222\\2\\u04de\\u04df\\5\\u0445\\u0223\\2\\u04df\")\n buf.write(\"\\u04e0\\5\\u043b\\u021e\\2\\u04e0,\\3\\2\\2\\2\\u04e1\\u04e2\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u04e2\\u04e3\\5\\u045d\\u022f\\2\\u04e3\\u04e4\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u04e4\\u04e5\\5\\u0451\\u0229\\2\\u04e5.\\3\\2\\2\\2\\u04e6\")\n buf.write(\"\\u04e7\\5\\u0435\\u021b\\2\\u04e7\\u04e8\\5\\u045d\\u022f\\2\\u04e8\")\n buf.write(\"\\u04e9\\5\\u045b\\u022e\\2\\u04e9\\u04ea\\5\\u0451\\u0229\\2\\u04ea\")\n buf.write(\"\\u04eb\\5\\u044d\\u0227\\2\\u04eb\\u04ec\\5\\u0435\\u021b\\2\\u04ec\")\n buf.write(\"\\u04ed\\5\\u045b\\u022e\\2\\u04ed\\u04ee\\5\\u0445\\u0223\\2\\u04ee\")\n buf.write(\"\\u04ef\\5\\u0439\\u021d\\2\\u04ef\\60\\3\\2\\2\\2\\u04f0\\u04f1\\5\")\n buf.write(\"\\u0435\\u021b\\2\\u04f1\\u04f2\\5\\u045d\\u022f\\2\\u04f2\\u04f3\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u04f3\\u04f4\\5\\u0451\\u0229\\2\\u04f4\\u04f5\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u04f5\\u04f6\\5\\u0451\\u0229\\2\\u04f6\\u04f7\")\n buf.write(\"\\5\\u044d\\u0227\\2\\u04f7\\u04f8\\5\\u0451\\u0229\\2\\u04f8\\u04f9\")\n buf.write(\"\\5\\u045d\\u022f\\2\\u04f9\\u04fa\\5\\u0459\\u022d\\2\\u04fa\\u04fb\")\n buf.write(\"\\7a\\2\\2\\u04fb\\u04fc\\5\\u045b\\u022e\\2\\u04fc\\u04fd\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u04fd\\u04fe\\5\\u0435\\u021b\\2\\u04fe\\u04ff\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u04ff\\u0500\\5\\u0459\\u022d\\2\\u0500\\u0501\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0501\\u0502\\5\\u0439\\u021d\\2\\u0502\\u0503\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0503\\u0504\\5\\u0445\\u0223\\2\\u0504\\u0505\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u0505\\u0506\\5\\u044f\\u0228\\2\\u0506\\62\\3\\2\\2\\2\")\n buf.write(\"\\u0507\\u0508\\5\\u0437\\u021c\\2\\u0508\\u0509\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u0509\\u050a\\5\\u045b\\u022e\\2\\u050a\\u050b\\5\\u0439\\u021d\")\n buf.write(\"\\2\\u050b\\u050c\\5\\u0443\\u0222\\2\\u050c\\64\\3\\2\\2\\2\\u050d\")\n buf.write(\"\\u050e\\5\\u0437\\u021c\\2\\u050e\\u050f\\5\\u043d\\u021f\\2\\u050f\")\n buf.write(\"\\u0510\\5\\u043f\\u0220\\2\\u0510\\u0511\\5\\u0451\\u0229\\2\\u0511\")\n buf.write(\"\\u0512\\5\\u0457\\u022c\\2\\u0512\\u0513\\5\\u043d\\u021f\\2\\u0513\")\n buf.write(\"\\66\\3\\2\\2\\2\\u0514\\u0515\\5\\u0437\\u021c\\2\\u0515\\u0516\\5\")\n buf.write(\"\\u043d\\u021f\\2\\u0516\\u0517\\5\\u0441\\u0221\\2\\u0517\\u0518\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0518\\u0519\\5\\u044f\\u0228\\2\\u05198\\3\")\n buf.write(\"\\2\\2\\2\\u051a\\u051b\\5\\u0437\\u021c\\2\\u051b\\u051c\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u051c\\u051d\\5\\u045b\\u022e\\2\\u051d\\u051e\\5\\u0461\")\n buf.write(\"\\u0231\\2\\u051e\\u051f\\5\\u043d\\u021f\\2\\u051f\\u0520\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0520\\u0521\\5\\u044f\\u0228\\2\\u0521:\\3\\2\\2\\2\\u0522\")\n buf.write(\"\\u0523\\5\\u0437\\u021c\\2\\u0523\\u0524\\5\\u043f\\u0220\\2\\u0524\")\n buf.write(\"\\u0525\\5\\u0445\\u0223\\2\\u0525\\u0526\\5\\u044b\\u0226\\2\\u0526\")\n buf.write(\"\\u0527\\5\\u043d\\u021f\\2\\u0527<\\3\\2\\2\\2\\u0528\\u0529\\5\\u0437\")\n buf.write(\"\\u021c\\2\\u0529\\u052a\\5\\u0445\\u0223\\2\\u052a\\u052b\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u052b\\u052c\\5\\u0435\\u021b\\2\\u052c\\u052d\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u052d\\u052e\\5\\u0465\\u0233\\2\\u052e\\u052f\\7a\\2\")\n buf.write(\"\\2\\u052f\\u0530\\5\\u043b\\u021e\\2\\u0530\\u0531\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0531\\u0532\\5\\u045d\\u022f\\2\\u0532\\u0533\\5\\u0437\\u021c\")\n buf.write(\"\\2\\u0533\\u0534\\5\\u044b\\u0226\\2\\u0534\\u0535\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0535>\\3\\2\\2\\2\\u0536\\u0537\\5\\u0437\\u021c\\2\\u0537\\u0538\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0538\\u0539\\5\\u044f\\u0228\\2\\u0539\\u053a\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u053a\\u053b\\5\\u0457\\u022c\\2\\u053b\\u053c\")\n buf.write(\"\\5\\u0465\\u0233\\2\\u053c\\u053d\\7a\\2\\2\\u053d\\u053e\\5\\u043f\")\n buf.write(\"\\u0220\\2\\u053e\\u053f\\5\\u044b\\u0226\\2\\u053f\\u0540\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u0540\\u0541\\5\\u0435\\u021b\\2\\u0541\\u0542\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0542@\\3\\2\\2\\2\\u0543\\u0544\\5\\u0437\\u021c\\2\\u0544\")\n buf.write(\"\\u0545\\5\\u0445\\u0223\\2\\u0545\\u0546\\5\\u044f\\u0228\\2\\u0546\")\n buf.write(\"\\u0547\\5\\u0435\\u021b\\2\\u0547\\u0548\\5\\u0457\\u022c\\2\\u0548\")\n buf.write(\"\\u0549\\5\\u0465\\u0233\\2\\u0549\\u054a\\7a\\2\\2\\u054a\\u054b\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u054b\\u054c\\5\\u044f\\u0228\\2\\u054c\\u054d\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u054d\\u054e\\5\\u043d\\u021f\\2\\u054e\\u054f\")\n buf.write(\"\\5\\u0441\\u0221\\2\\u054f\\u0550\\5\\u043d\\u021f\\2\\u0550\\u0551\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0551B\\3\\2\\2\\2\\u0552\\u0553\\5\\u0437\\u021c\")\n buf.write(\"\\2\\u0553\\u0554\\5\\u044b\\u0226\\2\\u0554\\u0555\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0555\\u0556\\5\\u0437\\u021c\\2\\u0556D\\3\\2\\2\\2\\u0557\\u0558\")\n buf.write(\"\\5\\u0437\\u021c\\2\\u0558\\u0559\\5\\u044b\\u0226\\2\\u0559\\u055a\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u055a\\u055b\\5\\u0439\\u021d\\2\\u055b\\u055c\")\n buf.write(\"\\5\\u0449\\u0225\\2\\u055cF\\3\\2\\2\\2\\u055d\\u055e\\5\\u0437\\u021c\")\n buf.write(\"\\2\\u055e\\u055f\\5\\u0451\\u0229\\2\\u055f\\u0560\\5\\u043b\\u021e\")\n buf.write(\"\\2\\u0560\\u0561\\5\\u0465\\u0233\\2\\u0561H\\3\\2\\2\\2\\u0562\\u0563\")\n buf.write(\"\\5\\u0437\\u021c\\2\\u0563\\u0564\\5\\u0451\\u0229\\2\\u0564\\u0565\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0565\\u0566\\5\\u044b\\u0226\\2\\u0566\\u0567\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0567\\u0568\\5\\u0435\\u021b\\2\\u0568\\u0569\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u0569J\\3\\2\\2\\2\\u056a\\u056b\\5\\u0437\\u021c\")\n buf.write(\"\\2\\u056b\\u056c\\5\\u0451\\u0229\\2\\u056c\\u056d\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u056d\\u056e\\5\\u0443\\u0222\\2\\u056eL\\3\\2\\2\\2\\u056f\\u0570\")\n buf.write(\"\\5\\u0437\\u021c\\2\\u0570\\u0571\\5\\u0457\\u022c\\2\\u0571\\u0572\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0572\\u0573\\5\\u0435\\u021b\\2\\u0573\\u0574\")\n buf.write(\"\\5\\u043b\\u021e\\2\\u0574\\u0575\\5\\u045b\\u022e\\2\\u0575\\u0576\")\n buf.write(\"\\5\\u0443\\u0222\\2\\u0576N\\3\\2\\2\\2\\u0577\\u0578\\5\\u0437\\u021c\")\n buf.write(\"\\2\\u0578\\u0579\\5\\u045d\\u022f\\2\\u0579\\u057a\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u057a\\u057b\\5\\u0449\\u0225\\2\\u057bP\\3\\2\\2\\2\\u057c\\u057d\")\n buf.write(\"\\5\\u0437\\u021c\\2\\u057d\\u057e\\5\\u0465\\u0233\\2\\u057eR\\3\")\n buf.write(\"\\2\\2\\2\\u057f\\u0580\\5\\u0437\\u021c\\2\\u0580\\u0581\\5\\u0465\")\n buf.write(\"\\u0233\\2\\u0581\\u0582\\5\\u045b\\u022e\\2\\u0582\\u0583\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0583T\\3\\2\\2\\2\\u0584\\u0585\\5\\u0439\\u021d\\2\\u0585\")\n buf.write(\"V\\3\\2\\2\\2\\u0586\\u0587\\5\\u0439\\u021d\\2\\u0587\\u0588\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0588\\u0589\\5\\u0439\\u021d\\2\\u0589\\u058a\\5\\u0443\")\n buf.write(\"\\u0222\\2\\u058a\\u058b\\5\\u043d\\u021f\\2\\u058bX\\3\\2\\2\\2\\u058c\")\n buf.write(\"\\u058d\\5\\u0439\\u021d\\2\\u058d\\u058e\\5\\u0435\\u021b\\2\\u058e\")\n buf.write(\"\\u058f\\5\\u044b\\u0226\\2\\u058f\\u0590\\5\\u044b\\u0226\\2\\u0590\")\n buf.write(\"Z\\3\\2\\2\\2\\u0591\\u0592\\5\\u0439\\u021d\\2\\u0592\\u0593\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0593\\u0594\\5\\u044f\\u0228\\2\\u0594\\u0595\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u0595\\u0596\\5\\u044f\\u0228\\2\\u0596\\u0597\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0597\\u0598\\5\\u0439\\u021d\\2\\u0598\\u0599\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0599\\u059a\\5\\u044b\\u0226\\2\\u059a\\\\\\3\\2\\2\\2\\u059b\")\n buf.write(\"\\u059c\\5\\u0439\\u021d\\2\\u059c\\u059d\\5\\u0435\\u021b\\2\\u059d\")\n buf.write(\"\\u059e\\5\\u0459\\u022d\\2\\u059e\\u059f\\5\\u0439\\u021d\\2\\u059f\")\n buf.write(\"\\u05a0\\5\\u0435\\u021b\\2\\u05a0\\u05a1\\5\\u043b\\u021e\\2\\u05a1\")\n buf.write(\"\\u05a2\\5\\u043d\\u021f\\2\\u05a2^\\3\\2\\2\\2\\u05a3\\u05a4\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u05a4\\u05a5\\5\\u0435\\u021b\\2\\u05a5\\u05a6\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u05a6\\u05a7\\5\\u043d\\u021f\\2\\u05a7`\\3\\2\\2\\2\\u05a8\")\n buf.write(\"\\u05a9\\5\\u0439\\u021d\\2\\u05a9\\u05aa\\5\\u0435\\u021b\\2\\u05aa\")\n buf.write(\"\\u05ab\\5\\u0459\\u022d\\2\\u05ab\\u05ac\\5\\u045b\\u022e\\2\\u05ac\")\n buf.write(\"b\\3\\2\\2\\2\\u05ad\\u05ae\\5\\u0439\\u021d\\2\\u05ae\\u05af\\5\\u0443\")\n buf.write(\"\\u0222\\2\\u05af\\u05b0\\5\\u0435\\u021b\\2\\u05b0\\u05b1\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u05b1d\\3\\2\\2\\2\\u05b2\\u05b3\\5\\u0439\\u021d\\2\\u05b3\")\n buf.write(\"\\u05b4\\5\\u0443\\u0222\\2\\u05b4\\u05b5\\5\\u0435\\u021b\\2\\u05b5\")\n buf.write(\"\\u05b6\\5\\u0457\\u022c\\2\\u05b6\\u05b7\\7a\\2\\2\\u05b7\\u05b8\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u05b8\\u05b9\\5\\u0459\\u022d\\2\\u05b9f\\3\")\n buf.write(\"\\2\\2\\2\\u05ba\\u05bb\\5\\u0439\\u021d\\2\\u05bb\\u05bc\\5\\u0443\")\n buf.write(\"\\u0222\\2\\u05bc\\u05bd\\5\\u0435\\u021b\\2\\u05bd\\u05be\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u05be\\u05bf\\5\\u0435\\u021b\\2\\u05bf\\u05c0\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u05c0\\u05c1\\5\\u045b\\u022e\\2\\u05c1\\u05c2\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u05c2\\u05c3\\5\\u0457\\u022c\\2\\u05c3h\\3\\2\\2\\2\\u05c4\")\n buf.write(\"\\u05c5\\5\\u0439\\u021d\\2\\u05c5\\u05c6\\5\\u0443\\u0222\\2\\u05c6\")\n buf.write(\"\\u05c7\\5\\u043d\\u021f\\2\\u05c7\\u05c8\\5\\u0439\\u021d\\2\\u05c8\")\n buf.write(\"\\u05c9\\5\\u0449\\u0225\\2\\u05c9j\\3\\2\\2\\2\\u05ca\\u05cb\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u05cb\\u05cc\\5\\u0443\\u0222\\2\\u05cc\\u05cd\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u05cdl\\3\\2\\2\\2\\u05ce\\u05cf\\5\\u0439\\u021d\\2\\u05cf\")\n buf.write(\"\\u05d0\\5\\u044b\\u0226\\2\\u05d0\\u05d1\\5\\u0451\\u0229\\2\\u05d1\")\n buf.write(\"\\u05d2\\5\\u0437\\u021c\\2\\u05d2n\\3\\2\\2\\2\\u05d3\\u05d4\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u05d4\\u05d5\\5\\u044b\\u0226\\2\\u05d5\\u05d6\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u05d6\\u05d7\\5\\u0459\\u022d\\2\\u05d7\\u05d8\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u05d8p\\3\\2\\2\\2\\u05d9\\u05da\\5\\u0439\\u021d\\2\\u05da\")\n buf.write(\"\\u05db\\5\\u044b\\u0226\\2\\u05db\\u05dc\\5\\u045d\\u022f\\2\\u05dc\")\n buf.write(\"\\u05dd\\5\\u0459\\u022d\\2\\u05dd\\u05de\\5\\u045b\\u022e\\2\\u05de\")\n buf.write(\"\\u05df\\5\\u043d\\u021f\\2\\u05df\\u05e0\\5\\u0457\\u022c\\2\\u05e0\")\n buf.write(\"r\\3\\2\\2\\2\\u05e1\\u05e2\\5\\u0439\\u021d\\2\\u05e2\\u05e3\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u05e3\\u05e4\\5\\u044b\\u0226\\2\\u05e4\\u05e5\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u05e5\\u05e6\\5\\u043d\\u021f\\2\\u05e6\\u05e7\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u05e7\\u05e8\\5\\u045b\\u022e\\2\\u05e8t\\3\\2\\2\\2\\u05e9\")\n buf.write(\"\\u05ea\\5\\u0439\\u021d\\2\\u05ea\\u05eb\\5\\u0451\\u0229\\2\\u05eb\")\n buf.write(\"\\u05ec\\5\\u044b\\u0226\\2\\u05ec\\u05ed\\5\\u045d\\u022f\\2\\u05ed\")\n buf.write(\"\\u05ee\\5\\u044d\\u0227\\2\\u05ee\\u05ef\\5\\u044f\\u0228\\2\\u05ef\")\n buf.write(\"\\u05f0\\5\\u0459\\u022d\\2\\u05f0v\\3\\2\\2\\2\\u05f1\\u05f2\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u05f2\\u05f3\\5\\u0451\\u0229\\2\\u05f3\\u05f4\\5\\u044d\")\n buf.write(\"\\u0227\\2\\u05f4\\u05f5\\5\\u044d\\u0227\\2\\u05f5\\u05f6\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u05f6\\u05f7\\5\\u044f\\u0228\\2\\u05f7\\u05f8\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u05f8x\\3\\2\\2\\2\\u05f9\\u05fa\\5\\u0439\\u021d\\2\\u05fa\")\n buf.write(\"\\u05fb\\5\\u0451\\u0229\\2\\u05fb\\u05fc\\5\\u044d\\u0227\\2\\u05fc\")\n buf.write(\"\\u05fd\\5\\u044d\\u0227\\2\\u05fd\\u05fe\\5\\u0445\\u0223\\2\\u05fe\")\n buf.write(\"\\u05ff\\5\\u045b\\u022e\\2\\u05ffz\\3\\2\\2\\2\\u0600\\u0601\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u0601\\u0602\\5\\u0451\\u0229\\2\\u0602\\u0603\\5\\u044d\")\n buf.write(\"\\u0227\\2\\u0603\\u0604\\5\\u044d\\u0227\\2\\u0604\\u0605\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0605\\u0606\\5\\u045b\\u022e\\2\\u0606\\u0607\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0607\\u0608\\5\\u043d\\u021f\\2\\u0608\\u0609\\5\\u043b\")\n buf.write(\"\\u021e\\2\\u0609|\\3\\2\\2\\2\\u060a\\u060b\\5\\u0439\\u021d\\2\\u060b\")\n buf.write(\"\\u060c\\5\\u0451\\u0229\\2\\u060c\\u060d\\5\\u044d\\u0227\\2\\u060d\")\n buf.write(\"\\u060e\\5\\u0453\\u022a\\2\\u060e\\u060f\\5\\u0435\\u021b\\2\\u060f\")\n buf.write(\"\\u0610\\5\\u045b\\u022e\\2\\u0610\\u0611\\5\\u0445\\u0223\\2\\u0611\")\n buf.write(\"\\u0612\\5\\u0437\\u021c\\2\\u0612\\u0613\\5\\u0445\\u0223\\2\\u0613\")\n buf.write(\"\\u0614\\5\\u044b\\u0226\\2\\u0614\\u0615\\5\\u0445\\u0223\\2\\u0615\")\n buf.write(\"\\u0616\\5\\u045b\\u022e\\2\\u0616\\u0617\\5\\u0465\\u0233\\2\\u0617\")\n buf.write(\"~\\3\\2\\2\\2\\u0618\\u0619\\5\\u0439\\u021d\\2\\u0619\\u061a\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u061a\\u061b\\5\\u044d\\u0227\\2\\u061b\\u061c\\5\\u0453\")\n buf.write(\"\\u022a\\2\\u061c\\u061d\\5\\u0445\\u0223\\2\\u061d\\u061e\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u061e\\u061f\\5\\u043d\\u021f\\2\\u061f\\u0080\\3\\2\\2\")\n buf.write(\"\\2\\u0620\\u0621\\5\\u0439\\u021d\\2\\u0621\\u0622\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0622\\u0623\\5\\u044d\\u0227\\2\\u0623\\u0624\\5\\u0453\\u022a\")\n buf.write(\"\\2\\u0624\\u0625\\5\\u0451\\u0229\\2\\u0625\\u0626\\5\\u045d\\u022f\")\n buf.write(\"\\2\\u0626\\u0627\\5\\u044f\\u0228\\2\\u0627\\u0628\\5\\u043b\\u021e\")\n buf.write(\"\\2\\u0628\\u0082\\3\\2\\2\\2\\u0629\\u062a\\5\\u0439\\u021d\\2\\u062a\")\n buf.write(\"\\u062b\\5\\u0451\\u0229\\2\\u062b\\u062c\\5\\u044f\\u0228\\2\\u062c\")\n buf.write(\"\\u062d\\5\\u044f\\u0228\\2\\u062d\\u062e\\5\\u043d\\u021f\\2\\u062e\")\n buf.write(\"\\u062f\\5\\u0439\\u021d\\2\\u062f\\u0630\\5\\u045b\\u022e\\2\\u0630\")\n buf.write(\"\\u0084\\3\\2\\2\\2\\u0631\\u0632\\5\\u0439\\u021d\\2\\u0632\\u0633\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0633\\u0634\\5\\u044f\\u0228\\2\\u0634\\u0635\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u0635\\u0636\\5\\u043d\\u021f\\2\\u0636\\u0637\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u0637\\u0638\\5\\u045b\\u022e\\2\\u0638\\u0639\")\n buf.write(\"\\7a\\2\\2\\u0639\\u063a\\5\\u0437\\u021c\\2\\u063a\\u063b\\5\\u0465\")\n buf.write(\"\\u0233\\2\\u063b\\u063c\\7a\\2\\2\\u063c\\u063d\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u063d\\u063e\\5\\u0451\\u0229\\2\\u063e\\u063f\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u063f\\u0640\\5\\u045b\\u022e\\2\\u0640\\u0086\\3\\2\\2\\2\\u0641\")\n buf.write(\"\\u0642\\5\\u0439\\u021d\\2\\u0642\\u0643\\5\\u0451\\u0229\\2\\u0643\")\n buf.write(\"\\u0644\\5\\u044f\\u0228\\2\\u0644\\u0645\\5\\u0459\\u022d\\2\\u0645\")\n buf.write(\"\\u0646\\5\\u045b\\u022e\\2\\u0646\\u0647\\5\\u0435\\u021b\\2\\u0647\")\n buf.write(\"\\u0648\\5\\u044f\\u0228\\2\\u0648\\u0649\\5\\u045b\\u022e\\2\\u0649\")\n buf.write(\"\\u0088\\3\\2\\2\\2\\u064a\\u064b\\5\\u0439\\u021d\\2\\u064b\\u064c\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u064c\\u064d\\5\\u044f\\u0228\\2\\u064d\\u064e\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u064e\\u064f\\5\\u045b\\u022e\\2\\u064f\\u0650\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0650\\u0651\\5\\u0435\\u021b\\2\\u0651\\u0652\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0652\\u0653\\5\\u044f\\u0228\\2\\u0653\\u0654\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0654\\u008a\\3\\2\\2\\2\\u0655\\u0656\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u0656\\u0657\\5\\u0451\\u0229\\2\\u0657\\u0658\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0658\\u0659\\5\\u0459\\u022d\\2\\u0659\\u065a\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u065a\\u065b\\5\\u0457\\u022c\\2\\u065b\\u065c\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u065c\\u065d\\5\\u0445\\u0223\\2\\u065d\\u065e\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u065e\\u065f\\5\\u045b\\u022e\\2\\u065f\\u0660\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0660\\u008c\\3\\2\\2\\2\\u0661\\u0662\\5\\u0439\\u021d\")\n buf.write(\"\\2\\u0662\\u0663\\5\\u0451\\u0229\\2\\u0663\\u0664\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u0664\\u0665\\5\\u0459\\u022d\\2\\u0665\\u0666\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u0666\\u0667\\5\\u0457\\u022c\\2\\u0667\\u0668\\5\\u045d\\u022f\")\n buf.write(\"\\2\\u0668\\u0669\\5\\u0439\\u021d\\2\\u0669\\u066a\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u066a\\u066b\\5\\u0451\\u0229\\2\\u066b\\u066c\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u066c\\u008e\\3\\2\\2\\2\\u066d\\u066e\\5\\u0439\\u021d\\2\\u066e\")\n buf.write(\"\\u066f\\5\\u0451\\u0229\\2\\u066f\\u0670\\5\\u044f\\u0228\\2\\u0670\")\n buf.write(\"\\u0671\\5\\u045b\\u022e\\2\\u0671\\u0672\\5\\u043d\\u021f\\2\\u0672\")\n buf.write(\"\\u0673\\5\\u044f\\u0228\\2\\u0673\\u0674\\5\\u045b\\u022e\\2\\u0674\")\n buf.write(\"\\u0090\\3\\2\\2\\2\\u0675\\u0676\\5\\u0439\\u021d\\2\\u0676\\u0677\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0677\\u0678\\5\\u044f\\u0228\\2\\u0678\\u0679\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0679\\u067a\\5\\u043d\\u021f\\2\\u067a\\u067b\")\n buf.write(\"\\5\\u0463\\u0232\\2\\u067b\\u067c\\5\\u045b\\u022e\\2\\u067c\\u0092\")\n buf.write(\"\\3\\2\\2\\2\\u067d\\u067e\\5\\u0439\\u021d\\2\\u067e\\u067f\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u067f\\u0680\\5\\u044f\\u0228\\2\\u0680\\u0681\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0681\\u0682\\5\\u0445\\u0223\\2\\u0682\\u0683\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0683\\u0684\\5\\u045d\\u022f\\2\\u0684\\u0685\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0685\\u0094\\3\\2\\2\\2\\u0686\\u0687\\5\\u0439\\u021d\")\n buf.write(\"\\2\\u0687\\u0688\\5\\u0451\\u0229\\2\\u0688\\u0689\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u0689\\u068a\\5\\u045f\\u0230\\2\\u068a\\u068b\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u068b\\u068c\\5\\u0457\\u022c\\2\\u068c\\u068d\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u068d\\u0096\\3\\2\\2\\2\\u068e\\u068f\\5\\u0439\\u021d\\2\\u068f\")\n buf.write(\"\\u0690\\5\\u0451\\u0229\\2\\u0690\\u0691\\5\\u0457\\u022c\\2\\u0691\")\n buf.write(\"\\u0692\\5\\u0457\\u022c\\2\\u0692\\u0693\\5\\u045d\\u022f\\2\\u0693\")\n buf.write(\"\\u0694\\5\\u0453\\u022a\\2\\u0694\\u0695\\5\\u045b\\u022e\\2\\u0695\")\n buf.write(\"\\u0696\\7a\\2\\2\\u0696\\u0697\\5\\u0463\\u0232\\2\\u0697\\u0698\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0698\\u0699\\5\\u043b\\u021e\\2\\u0699\\u0098\")\n buf.write(\"\\3\\2\\2\\2\\u069a\\u069b\\5\\u0439\\u021d\\2\\u069b\\u069c\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u069c\\u069d\\5\\u0457\\u022c\\2\\u069d\\u069e\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u069e\\u069f\\5\\u045d\\u022f\\2\\u069f\\u06a0\\5\\u0453\")\n buf.write(\"\\u022a\\2\\u06a0\\u06a1\\5\\u045b\\u022e\\2\\u06a1\\u06a2\\7a\\2\")\n buf.write(\"\\2\\u06a2\\u06a3\\5\\u0463\\u0232\\2\\u06a3\\u06a4\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u06a4\\u06a5\\5\\u043b\\u021e\\2\\u06a5\\u06a6\\7a\\2\\2\\u06a6\")\n buf.write(\"\\u06a7\\5\\u0435\\u021b\\2\\u06a7\\u06a8\\5\\u044b\\u0226\\2\\u06a8\")\n buf.write(\"\\u06a9\\5\\u044b\\u0226\\2\\u06a9\\u009a\\3\\2\\2\\2\\u06aa\\u06ab\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u06ab\\u06ac\\5\\u0451\\u0229\\2\\u06ac\\u06ad\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u06ad\\u06ae\\5\\u045b\\u022e\\2\\u06ae\\u009c\")\n buf.write(\"\\3\\2\\2\\2\\u06af\\u06b0\\5\\u0439\\u021d\\2\\u06b0\\u06b1\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u06b1\\u06b2\\5\\u045d\\u022f\\2\\u06b2\\u06b3\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u06b3\\u06b4\\5\\u045b\\u022e\\2\\u06b4\\u009e\\3\\2\\2\")\n buf.write(\"\\2\\u06b5\\u06b6\\5\\u0439\\u021d\\2\\u06b6\\u06b7\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u06b7\\u06b8\\5\\u043d\\u021f\\2\\u06b8\\u06b9\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u06b9\\u06ba\\5\\u045b\\u022e\\2\\u06ba\\u06bb\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u06bb\\u00a0\\3\\2\\2\\2\\u06bc\\u06bd\\5\\u0439\\u021d\\2\\u06bd\")\n buf.write(\"\\u06be\\5\\u0457\\u022c\\2\\u06be\\u06bf\\5\\u0451\\u0229\\2\\u06bf\")\n buf.write(\"\\u06c0\\5\\u0459\\u022d\\2\\u06c0\\u06c1\\5\\u0459\\u022d\\2\\u06c1\")\n buf.write(\"\\u00a2\\3\\2\\2\\2\\u06c2\\u06c3\\5\\u0439\\u021d\\2\\u06c3\\u06c4\")\n buf.write(\"\\5\\u045d\\u022f\\2\\u06c4\\u06c5\\5\\u0437\\u021c\\2\\u06c5\\u06c6\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u06c6\\u00a4\\3\\2\\2\\2\\u06c7\\u06c8\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u06c8\\u06c9\\5\\u045d\\u022f\\2\\u06c9\\u06ca\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u06ca\\u06cb\\5\\u0457\\u022c\\2\\u06cb\\u06cc\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u06cc\\u06cd\\5\\u044f\\u0228\\2\\u06cd\\u06ce\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u06ce\\u00a6\\3\\2\\2\\2\\u06cf\\u06d0\\5\\u0439\\u021d\")\n buf.write(\"\\2\\u06d0\\u06d1\\5\\u045d\\u022f\\2\\u06d1\\u06d2\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u06d2\\u06d3\\5\\u0457\\u022c\\2\\u06d3\\u06d4\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u06d4\\u06d5\\5\\u044f\\u0228\\2\\u06d5\\u06d6\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u06d6\\u06d7\\7a\\2\\2\\u06d7\\u06d8\\5\\u045d\\u022f\\2\\u06d8\")\n buf.write(\"\\u06d9\\5\\u0459\\u022d\\2\\u06d9\\u06da\\5\\u043d\\u021f\\2\\u06da\")\n buf.write(\"\\u06db\\5\\u0457\\u022c\\2\\u06db\\u00a8\\3\\2\\2\\2\\u06dc\\u06dd\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u06dd\\u06de\\5\\u045d\\u022f\\2\\u06de\\u06df\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u06df\\u06e0\\5\\u0459\\u022d\\2\\u06e0\\u06e1\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u06e1\\u06e2\\5\\u0457\\u022c\\2\\u06e2\\u00aa\")\n buf.write(\"\\3\\2\\2\\2\\u06e3\\u06e4\\5\\u0439\\u021d\\2\\u06e4\\u06e5\\5\\u045d\")\n buf.write(\"\\u022f\\2\\u06e5\\u06e6\\5\\u0459\\u022d\\2\\u06e6\\u06e7\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u06e7\\u06e8\\5\\u0451\\u0229\\2\\u06e8\\u06e9\\5\\u044d\")\n buf.write(\"\\u0227\\2\\u06e9\\u06ea\\5\\u043b\\u021e\\2\\u06ea\\u06eb\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u06eb\\u06ec\\5\\u045b\\u022e\\2\\u06ec\\u06ed\\5\\u045d\")\n buf.write(\"\\u022f\\2\\u06ed\\u06ee\\5\\u044d\\u0227\\2\\u06ee\\u00ac\\3\\2\\2\")\n buf.write(\"\\2\\u06ef\\u06f0\\5\\u0439\\u021d\\2\\u06f0\\u06f1\\5\\u0465\\u0233\")\n buf.write(\"\\2\\u06f1\\u06f2\\5\\u0439\\u021d\\2\\u06f2\\u06f3\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u06f3\\u06f4\\5\\u043d\\u021f\\2\\u06f4\\u00ae\\3\\2\\2\\2\\u06f5\")\n buf.write(\"\\u06f6\\5\\u043b\\u021e\\2\\u06f6\\u06f7\\5\\u0435\\u021b\\2\\u06f7\")\n buf.write(\"\\u06f8\\5\\u045b\\u022e\\2\\u06f8\\u06f9\\5\\u0435\\u021b\\2\\u06f9\")\n buf.write(\"\\u00b0\\3\\2\\2\\2\\u06fa\\u06fb\\5\\u043b\\u021e\\2\\u06fb\\u06fc\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u06fc\\u06fd\\5\\u045b\\u022e\\2\\u06fd\\u06fe\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u06fe\\u06ff\\5\\u0437\\u021c\\2\\u06ff\\u0700\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0700\\u0701\\5\\u0459\\u022d\\2\\u0701\\u0702\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0702\\u00b2\\3\\2\\2\\2\\u0703\\u0704\\5\\u043b\")\n buf.write(\"\\u021e\\2\\u0704\\u0705\\5\\u0435\\u021b\\2\\u0705\\u0706\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0706\\u0707\\5\\u043d\\u021f\\2\\u0707\\u00b4\\3\\2\\2\")\n buf.write(\"\\2\\u0708\\u0709\\5\\u043b\\u021e\\2\\u0709\\u070a\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u070a\\u070b\\5\\u0465\\u0233\\2\\u070b\\u00b6\\3\\2\\2\\2\\u070c\")\n buf.write(\"\\u070d\\5\\u043b\\u021e\\2\\u070d\\u070e\\5\\u0437\\u021c\\2\\u070e\")\n buf.write(\"\\u070f\\7a\\2\\2\\u070f\\u0710\\5\\u0457\\u022c\\2\\u0710\\u0711\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0711\\u0712\\5\\u044b\\u0226\\2\\u0712\\u0713\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0713\\u0714\\7a\\2\\2\\u0714\\u0715\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u0715\\u0716\\5\\u0443\\u0222\\2\\u0716\\u0717\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0717\\u0718\\5\\u044f\\u0228\\2\\u0718\\u0719\\5\\u0441\")\n buf.write(\"\\u0221\\2\\u0719\\u071a\\5\\u043d\\u021f\\2\\u071a\\u00b8\\3\\2\\2\")\n buf.write(\"\\2\\u071b\\u071c\\5\\u043b\\u021e\\2\\u071c\\u071d\\5\\u0437\\u021c\")\n buf.write(\"\\2\\u071d\\u071e\\5\\u045b\\u022e\\2\\u071e\\u071f\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u071f\\u0720\\5\\u044d\\u0227\\2\\u0720\\u0721\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0721\\u0722\\5\\u0467\\u0234\\2\\u0722\\u0723\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0723\\u0724\\5\\u044f\\u0228\\2\\u0724\\u0725\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0725\\u00ba\\3\\2\\2\\2\\u0726\\u0727\\5\\u043b\\u021e\\2\\u0727\")\n buf.write(\"\\u0728\\5\\u043b\\u021e\\2\\u0728\\u0729\\5\\u044b\\u0226\\2\\u0729\")\n buf.write(\"\\u00bc\\3\\2\\2\\2\\u072a\\u072b\\5\\u043b\\u021e\\2\\u072b\\u072c\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u072c\\u072d\\5\\u0437\\u021c\\2\\u072d\\u072e\")\n buf.write(\"\\5\\u045d\\u022f\\2\\u072e\\u072f\\5\\u0441\\u0221\\2\\u072f\\u00be\")\n buf.write(\"\\3\\2\\2\\2\\u0730\\u0731\\5\\u043b\\u021e\\2\\u0731\\u0732\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0732\\u0733\\5\\u0439\\u021d\\2\\u0733\\u00c0\\3\\2\\2\")\n buf.write(\"\\2\\u0734\\u0735\\5\\u043b\\u021e\\2\\u0735\\u0736\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0736\\u0737\\5\\u0439\\u021d\\2\\u0737\\u0738\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u0738\\u0739\\5\\u044d\\u0227\\2\\u0739\\u073a\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u073a\\u073b\\5\\u044b\\u0226\\2\\u073b\\u00c2\\3\\2\\2\\2\\u073c\")\n buf.write(\"\\u073d\\5\\u043b\\u021e\\2\\u073d\\u073e\\5\\u043d\\u021f\\2\\u073e\")\n buf.write(\"\\u073f\\5\\u0439\\u021d\\2\\u073f\\u0740\\5\\u044b\\u0226\\2\\u0740\")\n buf.write(\"\\u0741\\5\\u0435\\u021b\\2\\u0741\\u0742\\5\\u0457\\u022c\\2\\u0742\")\n buf.write(\"\\u0743\\5\\u043d\\u021f\\2\\u0743\\u00c4\\3\\2\\2\\2\\u0744\\u0745\")\n buf.write(\"\\5\\u043b\\u021e\\2\\u0745\\u0746\\5\\u043d\\u021f\\2\\u0746\\u0747\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u0747\\u0748\\5\\u0451\\u0229\\2\\u0748\\u0749\")\n buf.write(\"\\5\\u044d\\u0227\\2\\u0749\\u074a\\5\\u0453\\u022a\\2\\u074a\\u074b\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u074b\\u074c\\5\\u0459\\u022d\\2\\u074c\\u074d\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u074d\\u00c6\\3\\2\\2\\2\\u074e\\u074f\\5\\u043b\")\n buf.write(\"\\u021e\\2\\u074f\\u0750\\5\\u043d\\u021f\\2\\u0750\\u0751\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u0751\\u0752\\5\\u0457\\u022c\\2\\u0752\\u0753\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0753\\u0754\\5\\u044d\\u0227\\2\\u0754\\u0755\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0755\\u0756\\5\\u044f\\u0228\\2\\u0756\\u0757\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0757\\u00c8\\3\\2\\2\\2\\u0758\\u0759\\5\\u043b\\u021e\")\n buf.write(\"\\2\\u0759\\u075a\\5\\u043d\\u021f\\2\\u075a\\u075b\\5\\u043f\\u0220\")\n buf.write(\"\\2\\u075b\\u075c\\5\\u0435\\u021b\\2\\u075c\\u075d\\5\\u045d\\u022f\")\n buf.write(\"\\2\\u075d\\u075e\\5\\u044b\\u0226\\2\\u075e\\u075f\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u075f\\u00ca\\3\\2\\2\\2\\u0760\\u0761\\5\\u043b\\u021e\\2\\u0761\")\n buf.write(\"\\u0762\\5\\u043d\\u021f\\2\\u0762\\u0763\\5\\u043f\\u0220\\2\\u0763\")\n buf.write(\"\\u0764\\5\\u0435\\u021b\\2\\u0764\\u0765\\5\\u045d\\u022f\\2\\u0765\")\n buf.write(\"\\u0766\\5\\u044b\\u0226\\2\\u0766\\u0767\\5\\u045b\\u022e\\2\\u0767\")\n buf.write(\"\\u0768\\5\\u0459\\u022d\\2\\u0768\\u00cc\\3\\2\\2\\2\\u0769\\u076a\")\n buf.write(\"\\5\\u043b\\u021e\\2\\u076a\\u076b\\5\\u043d\\u021f\\2\\u076b\\u076c\")\n buf.write(\"\\5\\u043f\\u0220\\2\\u076c\\u076d\\5\\u043d\\u021f\\2\\u076d\\u076e\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u076e\\u076f\\5\\u0457\\u022c\\2\\u076f\\u0770\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0770\\u0771\\5\\u043b\\u021e\\2\\u0771\\u00ce\")\n buf.write(\"\\3\\2\\2\\2\\u0772\\u0773\\5\\u043b\\u021e\\2\\u0773\\u0774\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0774\\u0775\\5\\u043f\\u0220\\2\\u0775\\u0776\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0776\\u0777\\5\\u044f\\u0228\\2\\u0777\\u0778\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0778\\u0779\\5\\u0457\\u022c\\2\\u0779\\u00d0\\3\\2\\2\")\n buf.write(\"\\2\\u077a\\u077b\\5\\u043b\\u021e\\2\\u077b\\u077c\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u077c\\u077d\\5\\u044b\\u0226\\2\\u077d\\u077e\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u077e\\u077f\\5\\u045b\\u022e\\2\\u077f\\u0780\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0780\\u00d2\\3\\2\\2\\2\\u0781\\u0782\\5\\u043b\\u021e\\2\\u0782\")\n buf.write(\"\\u0783\\5\\u043d\\u021f\\2\\u0783\\u0784\\5\\u0453\\u022a\\2\\u0784\")\n buf.write(\"\\u0785\\5\\u045b\\u022e\\2\\u0785\\u0786\\5\\u0443\\u0222\\2\\u0786\")\n buf.write(\"\\u00d4\\3\\2\\2\\2\\u0787\\u0788\\5\\u043b\\u021e\\2\\u0788\\u0789\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0789\\u078a\\5\\u0459\\u022d\\2\\u078a\\u078b\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u078b\\u00d6\\3\\2\\2\\2\\u078c\\u078d\\5\\u043b\")\n buf.write(\"\\u021e\\2\\u078d\\u078e\\5\\u043d\\u021f\\2\\u078e\\u078f\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u078f\\u0790\\5\\u043d\\u021f\\2\\u0790\\u0791\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u0791\\u0792\\5\\u044d\\u0227\\2\\u0792\\u0793\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0793\\u0794\\5\\u044f\\u0228\\2\\u0794\\u0795\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0795\\u0796\\5\\u0459\\u022d\\2\\u0796\\u0797\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0797\\u0798\\5\\u0445\\u0223\\2\\u0798\\u0799\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u0799\\u00d8\\3\\2\\2\\2\\u079a\\u079b\\5\\u043b\\u021e\")\n buf.write(\"\\2\\u079b\\u079c\\5\\u0445\\u0223\\2\\u079c\\u079d\\5\\u044d\\u0227\")\n buf.write(\"\\2\\u079d\\u079e\\5\\u043d\\u021f\\2\\u079e\\u079f\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u079f\\u07a0\\5\\u0459\\u022d\\2\\u07a0\\u07a1\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u07a1\\u07a2\\5\\u0451\\u0229\\2\\u07a2\\u07a3\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u07a3\\u00da\\3\\2\\2\\2\\u07a4\\u07a5\\5\\u043b\\u021e\\2\\u07a5\")\n buf.write(\"\\u07a6\\5\\u0445\\u0223\\2\\u07a6\\u07a7\\5\\u0459\\u022d\\2\\u07a7\")\n buf.write(\"\\u07a8\\5\\u0435\\u021b\\2\\u07a8\\u07a9\\5\\u0437\\u021c\\2\\u07a9\")\n buf.write(\"\\u07aa\\5\\u044b\\u0226\\2\\u07aa\\u07ab\\5\\u043d\\u021f\\2\\u07ab\")\n buf.write(\"\\u00dc\\3\\2\\2\\2\\u07ac\\u07ad\\5\\u043b\\u021e\\2\\u07ad\\u07ae\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u07ae\\u07af\\5\\u0459\\u022d\\2\\u07af\\u07b0\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u07b0\\u07b1\\5\\u0459\\u022d\\2\\u07b1\\u07b2\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u07b2\\u07b3\\5\\u0451\\u0229\\2\\u07b3\\u07b4\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u07b4\\u07b5\\5\\u0445\\u0223\\2\\u07b5\\u07b6\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u07b6\\u07b7\\5\\u045b\\u022e\\2\\u07b7\\u07b8\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u07b8\\u00de\\3\\2\\2\\2\\u07b9\\u07ba\\5\\u043b\")\n buf.write(\"\\u021e\\2\\u07ba\\u07bb\\5\\u0445\\u0223\\2\\u07bb\\u07bc\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u07bc\\u07bd\\5\\u045b\\u022e\\2\\u07bd\\u07be\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u07be\\u07bf\\5\\u044f\\u0228\\2\\u07bf\\u07c0\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u07c0\\u07c1\\5\\u045b\\u022e\\2\\u07c1\\u00e0\\3\\2\\2\")\n buf.write(\"\\2\\u07c2\\u07c3\\5\\u043b\\u021e\\2\\u07c3\\u07c4\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u07c4\\u07c5\\5\\u0439\\u021d\\2\\u07c5\\u07c6\\5\\u045d\\u022f\")\n buf.write(\"\\2\\u07c6\\u07c7\\5\\u044d\\u0227\\2\\u07c7\\u07c8\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u07c8\\u07c9\\5\\u044f\\u0228\\2\\u07c9\\u07ca\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u07ca\\u00e2\\3\\2\\2\\2\\u07cb\\u07cc\\5\\u043b\\u021e\\2\\u07cc\")\n buf.write(\"\\u07cd\\5\\u0451\\u0229\\2\\u07cd\\u07ce\\5\\u045d\\u022f\\2\\u07ce\")\n buf.write(\"\\u07cf\\5\\u0437\\u021c\\2\\u07cf\\u07d0\\5\\u044b\\u0226\\2\\u07d0\")\n buf.write(\"\\u07d1\\5\\u043d\\u021f\\2\\u07d1\\u00e4\\3\\2\\2\\2\\u07d2\\u07d3\")\n buf.write(\"\\5\\u043b\\u021e\\2\\u07d3\\u07d4\\5\\u0457\\u022c\\2\\u07d4\\u07d5\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u07d5\\u07d6\\5\\u0453\\u022a\\2\\u07d6\\u00e6\")\n buf.write(\"\\3\\2\\2\\2\\u07d7\\u07d8\\5\\u043b\\u021e\\2\\u07d8\\u07d9\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u07d9\\u07da\\5\\u0445\\u0223\\2\\u07da\\u07db\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u07db\\u07dc\\5\\u045b\\u022e\\2\\u07dc\\u07dd\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u07dd\\u07de\\5\\u0457\\u022c\\2\\u07de\\u07df\\5\\u045f\")\n buf.write(\"\\u0230\\2\\u07df\\u07e0\\5\\u0435\\u021b\\2\\u07e0\\u07e1\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u07e1\\u07e2\\7a\\2\\2\\u07e2\\u07e3\\5\\u045d\\u022f\")\n buf.write(\"\\2\\u07e3\\u07e4\\5\\u044f\\u0228\\2\\u07e4\\u07e5\\5\\u0439\\u021d\")\n buf.write(\"\\2\\u07e5\\u07e6\\5\\u0451\\u0229\\2\\u07e6\\u07e7\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u07e7\\u07e8\\5\\u0459\\u022d\\2\\u07e8\\u07e9\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u07e9\\u07ea\\5\\u0457\\u022c\\2\\u07ea\\u07eb\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u07eb\\u07ec\\5\\u0445\\u0223\\2\\u07ec\\u07ed\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u07ed\\u07ee\\5\\u043d\\u021f\\2\\u07ee\\u07ef\\5\\u043b\\u021e\")\n buf.write(\"\\2\\u07ef\\u00e8\\3\\2\\2\\2\\u07f0\\u07f1\\5\\u043d\\u021f\\2\\u07f1\")\n buf.write(\"\\u07f2\\5\\u0435\\u021b\\2\\u07f2\\u07f3\\5\\u0439\\u021d\\2\\u07f3\")\n buf.write(\"\\u07f4\\5\\u0443\\u0222\\2\\u07f4\\u00ea\\3\\2\\2\\2\\u07f5\\u07f6\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u07f6\\u07f7\\5\\u044b\\u0226\\2\\u07f7\\u07f8\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u07f8\\u07f9\\5\\u044d\\u0227\\2\\u07f9\\u07fa\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u07fa\\u07fb\\5\\u044f\\u0228\\2\\u07fb\\u07fc\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u07fc\\u00ec\\3\\2\\2\\2\\u07fd\\u07fe\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u07fe\\u07ff\\5\\u044b\\u0226\\2\\u07ff\\u0800\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0800\\u0801\\5\\u043d\\u021f\\2\\u0801\\u00ee\\3\\2\\2\")\n buf.write(\"\\2\\u0802\\u0803\\5\\u043d\\u021f\\2\\u0803\\u0804\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u0804\\u0805\\5\\u0459\\u022d\\2\\u0805\\u0806\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u0806\\u0807\\5\\u043f\\u0220\\2\\u0807\\u00f0\\3\\2\\2\\2\\u0808\")\n buf.write(\"\\u0809\\5\\u043d\\u021f\\2\\u0809\\u080a\\5\\u044d\\u0227\\2\\u080a\")\n buf.write(\"\\u080b\\5\\u0453\\u022a\\2\\u080b\\u080c\\5\\u045b\\u022e\\2\\u080c\")\n buf.write(\"\\u080d\\5\\u0465\\u0233\\2\\u080d\\u00f2\\3\\2\\2\\2\\u080e\\u080f\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u080f\\u0810\\5\\u044f\\u0228\\2\\u0810\\u0811\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0811\\u0812\\5\\u0437\\u021c\\2\\u0812\\u0813\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u0813\\u0814\\5\\u043d\\u021f\\2\\u0814\\u00f4\")\n buf.write(\"\\3\\2\\2\\2\\u0815\\u0816\\5\\u043d\\u021f\\2\\u0816\\u0817\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0817\\u0818\\5\\u0439\\u021d\\2\\u0818\\u0819\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u0819\\u081a\\5\\u043b\\u021e\\2\\u081a\\u081b\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u081b\\u081c\\5\\u044f\\u0228\\2\\u081c\\u081d\\5\\u0441\")\n buf.write(\"\\u0221\\2\\u081d\\u00f6\\3\\2\\2\\2\\u081e\\u081f\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u081f\\u0820\\5\\u044f\\u0228\\2\\u0820\\u0821\\5\\u043b\\u021e\")\n buf.write(\"\\2\\u0821\\u00f8\\3\\2\\2\\2\\u0822\\u0823\\5\\u043d\\u021f\\2\\u0823\")\n buf.write(\"\\u0824\\5\\u044f\\u0228\\2\\u0824\\u0825\\5\\u045b\\u022e\\2\\u0825\")\n buf.write(\"\\u0826\\5\\u0445\\u0223\\2\\u0826\\u0827\\5\\u045b\\u022e\\2\\u0827\")\n buf.write(\"\\u0828\\5\\u0465\\u0233\\2\\u0828\\u0829\\5\\u043d\\u021f\\2\\u0829\")\n buf.write(\"\\u082a\\5\\u0459\\u022d\\2\\u082a\\u082b\\5\\u0439\\u021d\\2\\u082b\")\n buf.write(\"\\u082c\\5\\u0435\\u021b\\2\\u082c\\u082d\\5\\u0453\\u022a\\2\\u082d\")\n buf.write(\"\\u082e\\5\\u0445\\u0223\\2\\u082e\\u082f\\5\\u044f\\u0228\\2\\u082f\")\n buf.write(\"\\u0830\\5\\u0441\\u0221\\2\\u0830\\u00fa\\3\\2\\2\\2\\u0831\\u0832\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0832\\u0833\\5\\u0457\\u022c\\2\\u0833\\u0834\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0834\\u00fc\\3\\2\\2\\2\\u0835\\u0836\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0836\\u0837\\5\\u0457\\u022c\\2\\u0837\\u0838\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u0838\\u0839\\5\\u0451\\u0229\\2\\u0839\\u083a\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u083a\\u083b\\5\\u0459\\u022d\\2\\u083b\\u00fe\\3\\2\\2\")\n buf.write(\"\\2\\u083c\\u083d\\5\\u043d\\u021f\\2\\u083d\\u083e\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u083e\\u083f\\5\\u0439\\u021d\\2\\u083f\\u0840\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u0840\\u0841\\5\\u0453\\u022a\\2\\u0841\\u0842\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0842\\u0100\\3\\2\\2\\2\\u0843\\u0844\\5\\u043d\\u021f\\2\\u0844\")\n buf.write(\"\\u0845\\5\\u045f\\u0230\\2\\u0845\\u0846\\5\\u0435\\u021b\\2\\u0846\")\n buf.write(\"\\u0847\\5\\u044b\\u0226\\2\\u0847\\u0848\\5\\u044f\\u0228\\2\\u0848\")\n buf.write(\"\\u0849\\5\\u0435\\u021b\\2\\u0849\\u084a\\5\\u044d\\u0227\\2\\u084a\")\n buf.write(\"\\u084b\\5\\u043d\\u021f\\2\\u084b\\u0102\\3\\2\\2\\2\\u084c\\u084d\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u084d\\u084e\\5\\u0463\\u0232\\2\\u084e\\u084f\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u084f\\u0850\\5\\u043d\\u021f\\2\\u0850\\u0851\")\n buf.write(\"\\5\\u0453\\u022a\\2\\u0851\\u0852\\5\\u045b\\u022e\\2\\u0852\\u0104\")\n buf.write(\"\\3\\2\\2\\2\\u0853\\u0854\\5\\u043d\\u021f\\2\\u0854\\u0855\\5\\u0463\")\n buf.write(\"\\u0232\\2\\u0855\\u0856\\5\\u0439\\u021d\\2\\u0856\\u0857\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0857\\u0858\\5\\u0453\\u022a\\2\\u0858\\u0859\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0859\\u085a\\5\\u0445\\u0223\\2\\u085a\\u085b\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u085b\\u085c\\5\\u044f\\u0228\\2\\u085c\\u0106\\3\\2\\2\")\n buf.write(\"\\2\\u085d\\u085e\\5\\u043d\\u021f\\2\\u085e\\u085f\\5\\u0463\\u0232\")\n buf.write(\"\\2\\u085f\\u0860\\5\\u0439\\u021d\\2\\u0860\\u0861\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0861\\u0862\\5\\u0453\\u022a\\2\\u0862\\u0863\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u0863\\u0864\\5\\u0445\\u0223\\2\\u0864\\u0865\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0865\\u0866\\5\\u044f\\u0228\\2\\u0866\\u0867\\7a\\2\\2\\u0867\")\n buf.write(\"\\u0868\\5\\u0445\\u0223\\2\\u0868\\u0869\\5\\u044f\\u0228\\2\\u0869\")\n buf.write(\"\\u086a\\5\\u0445\\u0223\\2\\u086a\\u086b\\5\\u045b\\u022e\\2\\u086b\")\n buf.write(\"\\u0108\\3\\2\\2\\2\\u086c\\u086d\\5\\u043d\\u021f\\2\\u086d\\u086e\")\n buf.write(\"\\5\\u0463\\u0232\\2\\u086e\\u086f\\5\\u0439\\u021d\\2\\u086f\\u0870\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0870\\u0871\\5\\u0453\\u022a\\2\\u0871\\u0872\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0872\\u0873\\5\\u0445\\u0223\\2\\u0873\\u0874\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0874\\u0875\\5\\u044f\\u0228\\2\\u0875\\u0876\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u0876\\u010a\\3\\2\\2\\2\\u0877\\u0878\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0878\\u0879\\5\\u0463\\u0232\\2\\u0879\\u087a\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u087a\\u087b\\5\\u044b\\u0226\\2\\u087b\\u087c\\5\\u045d\")\n buf.write(\"\\u022f\\2\\u087c\\u087d\\5\\u043b\\u021e\\2\\u087d\\u087e\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u087e\\u010c\\3\\2\\2\\2\\u087f\\u0880\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0880\\u0881\\5\\u0463\\u0232\\2\\u0881\\u0882\\5\\u0439\\u021d\")\n buf.write(\"\\2\\u0882\\u0883\\5\\u044b\\u0226\\2\\u0883\\u0884\\5\\u045d\\u022f\")\n buf.write(\"\\2\\u0884\\u0885\\5\\u0459\\u022d\\2\\u0885\\u0886\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u0886\\u0887\\5\\u045f\\u0230\\2\\u0887\\u0888\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0888\\u010e\\3\\2\\2\\2\\u0889\\u088a\\5\\u043d\\u021f\\2\\u088a\")\n buf.write(\"\\u088b\\5\\u0463\\u0232\\2\\u088b\\u088c\\5\\u043d\\u021f\\2\\u088c\")\n buf.write(\"\\u088d\\5\\u0439\\u021d\\2\\u088d\\u088e\\5\\u045d\\u022f\\2\\u088e\")\n buf.write(\"\\u088f\\5\\u045b\\u022e\\2\\u088f\\u0890\\5\\u043d\\u021f\\2\\u0890\")\n buf.write(\"\\u0110\\3\\2\\2\\2\\u0891\\u0892\\5\\u043d\\u021f\\2\\u0892\\u0893\")\n buf.write(\"\\5\\u0463\\u0232\\2\\u0893\\u0894\\5\\u0445\\u0223\\2\\u0894\\u0895\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u0895\\u0896\\5\\u045b\\u022e\\2\\u0896\\u0897\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u0897\\u0112\\3\\2\\2\\2\\u0898\\u0899\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0899\\u089a\\5\\u0463\\u0232\\2\\u089a\\u089b\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u089b\\u089c\\5\\u045b\\u022e\\2\\u089c\\u0114\\3\\2\\2\")\n buf.write(\"\\2\\u089d\\u089e\\5\\u043d\\u021f\\2\\u089e\\u089f\\5\\u0463\\u0232\")\n buf.write(\"\\2\\u089f\\u08a0\\5\\u0453\\u022a\\2\\u08a0\\u08a1\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u08a1\\u08a2\\5\\u0435\\u021b\\2\\u08a2\\u08a3\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u08a3\\u08a4\\5\\u044f\\u0228\\2\\u08a4\\u0116\\3\\2\\2\\2\\u08a5\")\n buf.write(\"\\u08a6\\5\\u043d\\u021f\\2\\u08a6\\u08a7\\5\\u0463\\u0232\\2\\u08a7\")\n buf.write(\"\\u08a8\\5\\u045b\\u022e\\2\\u08a8\\u08a9\\5\\u043d\\u021f\\2\\u08a9\")\n buf.write(\"\\u08aa\\5\\u0457\\u022c\\2\\u08aa\\u08ab\\5\\u044f\\u0228\\2\\u08ab\")\n buf.write(\"\\u08ac\\5\\u0435\\u021b\\2\\u08ac\\u08ad\\5\\u044b\\u0226\\2\\u08ad\")\n buf.write(\"\\u0118\\3\\2\\2\\2\\u08ae\\u08af\\5\\u043d\\u021f\\2\\u08af\\u08b0\")\n buf.write(\"\\5\\u0463\\u0232\\2\\u08b0\\u08b1\\5\\u045b\\u022e\\2\\u08b1\\u08b2\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u08b2\\u08b3\\5\\u0435\\u021b\\2\\u08b3\\u08b4\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u08b4\\u08b5\\5\\u045b\\u022e\\2\\u08b5\\u011a\")\n buf.write(\"\\3\\2\\2\\2\\u08b6\\u08b7\\5\\u043f\\u0220\\2\\u08b7\\u08b8\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u08b8\\u08b9\\5\\u0445\\u0223\\2\\u08b9\\u08ba\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u08ba\\u08bb\\5\\u045d\\u022f\\2\\u08bb\\u08bc\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u08bc\\u08bd\\5\\u043d\\u021f\\2\\u08bd\\u011c\\3\\2\\2\")\n buf.write(\"\\2\\u08be\\u08bf\\5\\u043f\\u0220\\2\\u08bf\\u08c0\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u08c0\\u08c1\\5\\u044b\\u0226\\2\\u08c1\\u08c2\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u08c2\\u08c3\\5\\u043d\\u021f\\2\\u08c3\\u011e\\3\\2\\2\\2\\u08c4\")\n buf.write(\"\\u08c5\\5\\u043f\\u0220\\2\\u08c5\\u08c6\\5\\u043d\\u021f\\2\\u08c6\")\n buf.write(\"\\u08c7\\5\\u045b\\u022e\\2\\u08c7\\u08c8\\5\\u0439\\u021d\\2\\u08c8\")\n buf.write(\"\\u08c9\\5\\u0443\\u0222\\2\\u08c9\\u0120\\3\\2\\2\\2\\u08ca\\u08cb\")\n buf.write(\"\\5\\u043f\\u0220\\2\\u08cb\\u08cc\\5\\u0445\\u0223\\2\\u08cc\\u08cd\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u08cd\\u08ce\\5\\u0435\\u021b\\2\\u08ce\\u08cf\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u08cf\\u0122\\3\\2\\2\\2\\u08d0\\u08d1\\5\\u043f\")\n buf.write(\"\\u0220\\2\\u08d1\\u08d2\\5\\u0445\\u0223\\2\\u08d2\\u08d3\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u08d3\\u08d4\\5\\u0459\\u022d\\2\\u08d4\\u08d5\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u08d5\\u0124\\3\\2\\2\\2\\u08d6\\u08d7\\5\\u043f\\u0220\")\n buf.write(\"\\2\\u08d7\\u08d8\\5\\u0445\\u0223\\2\\u08d8\\u08d9\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u08d9\\u08da\\5\\u0459\\u022d\\2\\u08da\\u08db\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u08db\\u08dc\\7a\\2\\2\\u08dc\\u08dd\\5\\u045f\\u0230\\2\\u08dd\")\n buf.write(\"\\u08de\\5\\u0435\\u021b\\2\\u08de\\u08df\\5\\u044b\\u0226\\2\\u08df\")\n buf.write(\"\\u08e0\\5\\u045d\\u022f\\2\\u08e0\\u08e1\\5\\u043d\\u021f\\2\\u08e1\")\n buf.write(\"\\u0126\\3\\2\\2\\2\\u08e2\\u08e3\\5\\u043f\\u0220\\2\\u08e3\\u08e4\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u08e4\\u08e5\\5\\u0451\\u0229\\2\\u08e5\\u08e6\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u08e6\\u08e7\\5\\u045b\\u022e\\2\\u08e7\\u0128\")\n buf.write(\"\\3\\2\\2\\2\\u08e8\\u08e9\\5\\u043f\\u0220\\2\\u08e9\\u08ea\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u08ea\\u08eb\\5\\u044b\\u0226\\2\\u08eb\\u08ec\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u08ec\\u08ed\\5\\u0451\\u0229\\2\\u08ed\\u08ee\\5\\u0461\")\n buf.write(\"\\u0231\\2\\u08ee\\u08ef\\5\\u0445\\u0223\\2\\u08ef\\u08f0\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u08f0\\u08f1\\5\\u0441\\u0221\\2\\u08f1\\u012a\\3\\2\\2\")\n buf.write(\"\\2\\u08f2\\u08f3\\5\\u043f\\u0220\\2\\u08f3\\u08f4\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u08f4\\u08f5\\5\\u044b\\u0226\\2\\u08f5\\u08f6\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u08f6\\u08f7\\5\\u0451\\u0229\\2\\u08f7\\u08f8\\5\\u0461\\u0231\")\n buf.write(\"\\2\\u08f8\\u08f9\\5\\u0459\\u022d\\2\\u08f9\\u012c\\3\\2\\2\\2\\u08fa\")\n buf.write(\"\\u08fb\\5\\u043f\\u0220\\2\\u08fb\\u08fc\\5\\u0451\\u0229\\2\\u08fc\")\n buf.write(\"\\u08fd\\5\\u0457\\u022c\\2\\u08fd\\u012e\\3\\2\\2\\2\\u08fe\\u08ff\")\n buf.write(\"\\5\\u043f\\u0220\\2\\u08ff\\u0900\\5\\u0451\\u0229\\2\\u0900\\u0901\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0901\\u0902\\5\\u0435\\u021b\\2\\u0902\\u0903\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u0903\\u0904\\5\\u044b\\u0226\\2\\u0904\\u0130\")\n buf.write(\"\\3\\2\\2\\2\\u0905\\u0906\\5\\u043f\\u0220\\2\\u0906\\u0907\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u0907\\u0908\\5\\u0457\\u022c\\2\\u0908\\u0909\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u0909\\u090a\\5\\u043d\\u021f\\2\\u090a\\u0132\\3\\2\\2\")\n buf.write(\"\\2\\u090b\\u090c\\5\\u043f\\u0220\\2\\u090c\\u090d\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u090d\\u090e\\5\\u0451\\u0229\\2\\u090e\\u090f\\5\\u044d\\u0227\")\n buf.write(\"\\2\\u090f\\u0134\\3\\2\\2\\2\\u0910\\u0911\\5\\u043f\\u0220\\2\\u0911\")\n buf.write(\"\\u0912\\5\\u045d\\u022f\\2\\u0912\\u0913\\5\\u044b\\u0226\\2\\u0913\")\n buf.write(\"\\u0914\\5\\u044b\\u0226\\2\\u0914\\u0136\\3\\2\\2\\2\\u0915\\u0916\")\n buf.write(\"\\5\\u043f\\u0220\\2\\u0916\\u0917\\5\\u045d\\u022f\\2\\u0917\\u0918\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u0918\\u0919\\5\\u0439\\u021d\\2\\u0919\\u091a\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u091a\\u091b\\5\\u0445\\u0223\\2\\u091b\\u091c\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u091c\\u091d\\5\\u044f\\u0228\\2\\u091d\\u0138\")\n buf.write(\"\\3\\2\\2\\2\\u091e\\u091f\\5\\u0441\\u0221\\2\\u091f\\u0920\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u0920\\u0921\\5\\u045b\\u022e\\2\\u0921\\u0922\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u0922\\u013a\\3\\2\\2\\2\\u0923\\u0924\\5\\u0441\\u0221\")\n buf.write(\"\\2\\u0924\\u0925\\5\\u0457\\u022c\\2\\u0925\\u0926\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u0926\\u0927\\5\\u044f\\u0228\\2\\u0927\\u0928\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u0928\\u013c\\3\\2\\2\\2\\u0929\\u092a\\5\\u0441\\u0221\\2\\u092a\")\n buf.write(\"\\u092b\\5\\u0457\\u022c\\2\\u092b\\u092c\\5\\u0451\\u0229\\2\\u092c\")\n buf.write(\"\\u092d\\5\\u045d\\u022f\\2\\u092d\\u092e\\5\\u0453\\u022a\\2\\u092e\")\n buf.write(\"\\u013e\\3\\2\\2\\2\\u092f\\u0930\\5\\u0441\\u0221\\2\\u0930\\u0931\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0931\\u0932\\5\\u0451\\u0229\\2\\u0932\\u0933\")\n buf.write(\"\\5\\u045d\\u022f\\2\\u0933\\u0934\\5\\u0453\\u022a\\2\\u0934\\u0935\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0935\\u0936\\5\\u044f\\u0228\\2\\u0936\\u0937\")\n buf.write(\"\\5\\u0441\\u0221\\2\\u0937\\u0140\\3\\2\\2\\2\\u0938\\u0939\\5\\u0443\")\n buf.write(\"\\u0222\\2\\u0939\\u093a\\5\\u0435\\u021b\\2\\u093a\\u093b\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u093b\\u093c\\5\\u0443\\u0222\\2\\u093c\\u0142\\3\\2\\2\")\n buf.write(\"\\2\\u093d\\u093e\\5\\u0443\\u0222\\2\\u093e\\u093f\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u093f\\u0940\\5\\u045f\\u0230\\2\\u0940\\u0941\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u0941\\u0942\\5\\u044f\\u0228\\2\\u0942\\u0943\\5\\u0441\\u0221\")\n buf.write(\"\\2\\u0943\\u0144\\3\\2\\2\\2\\u0944\\u0945\\5\\u0443\\u0222\\2\\u0945\")\n buf.write(\"\\u0946\\5\\u0445\\u0223\\2\\u0946\\u0947\\5\\u043b\\u021e\\2\\u0947\")\n buf.write(\"\\u0948\\5\\u043d\\u021f\\2\\u0948\\u0146\\3\\2\\2\\2\\u0949\\u094a\")\n buf.write(\"\\5\\u0443\\u0222\\2\\u094a\\u094b\\5\\u0451\\u0229\\2\\u094b\\u094c\")\n buf.write(\"\\5\\u045d\\u022f\\2\\u094c\\u094d\\5\\u0457\\u022c\\2\\u094d\\u0148\")\n buf.write(\"\\3\\2\\2\\2\\u094e\\u094f\\5\\u0445\\u0223\\2\\u094f\\u0950\\5\\u043f\")\n buf.write(\"\\u0220\\2\\u0950\\u014a\\3\\2\\2\\2\\u0951\\u0952\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u0952\\u0953\\5\\u0441\\u0221\\2\\u0953\\u0954\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u0954\\u0955\\5\\u0451\\u0229\\2\\u0955\\u0956\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u0956\\u0957\\5\\u043d\\u021f\\2\\u0957\\u014c\\3\\2\\2\\2\\u0958\")\n buf.write(\"\\u0959\\5\\u0445\\u0223\\2\\u0959\\u095a\\5\\u044d\\u0227\\2\\u095a\")\n buf.write(\"\\u095b\\5\\u044d\\u0227\\2\\u095b\\u095c\\5\\u043d\\u021f\\2\\u095c\")\n buf.write(\"\\u095d\\5\\u043b\\u021e\\2\\u095d\\u095e\\5\\u0445\\u0223\\2\\u095e\")\n buf.write(\"\\u095f\\5\\u0435\\u021b\\2\\u095f\\u0960\\5\\u045b\\u022e\\2\\u0960\")\n buf.write(\"\\u0961\\5\\u043d\\u021f\\2\\u0961\\u014e\\3\\2\\2\\2\\u0962\\u0963\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0963\\u0964\\5\\u044f\\u0228\\2\\u0964\\u0150\")\n buf.write(\"\\3\\2\\2\\2\\u0965\\u0966\\5\\u0445\\u0223\\2\\u0966\\u0967\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0967\\u0968\\5\\u0439\\u021d\\2\\u0968\\u0969\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u0969\\u096a\\5\\u045d\\u022f\\2\\u096a\\u096b\\5\\u043b\")\n buf.write(\"\\u021e\\2\\u096b\\u096c\\5\\u043d\\u021f\\2\\u096c\\u0152\\3\\2\\2\")\n buf.write(\"\\2\\u096d\\u096e\\5\\u0445\\u0223\\2\\u096e\\u096f\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u096f\\u0970\\5\\u0439\\u021d\\2\\u0970\\u0971\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u0971\\u0972\\5\\u045d\\u022f\\2\\u0972\\u0973\\5\\u043b\\u021e\")\n buf.write(\"\\2\\u0973\\u0974\\5\\u0445\\u0223\\2\\u0974\\u0975\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u0975\\u0976\\5\\u0441\\u0221\\2\\u0976\\u0154\\3\\2\\2\\2\\u0977\")\n buf.write(\"\\u0978\\5\\u0445\\u0223\\2\\u0978\\u0979\\5\\u044f\\u0228\\2\\u0979\")\n buf.write(\"\\u097a\\5\\u0439\\u021d\\2\\u097a\\u097b\\5\\u0457\\u022c\\2\\u097b\")\n buf.write(\"\\u097c\\5\\u043d\\u021f\\2\\u097c\\u097d\\5\\u044d\\u0227\\2\\u097d\")\n buf.write(\"\\u097e\\5\\u043d\\u021f\\2\\u097e\\u097f\\5\\u044f\\u0228\\2\\u097f\")\n buf.write(\"\\u0980\\5\\u045b\\u022e\\2\\u0980\\u0156\\3\\2\\2\\2\\u0981\\u0982\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0982\\u0983\\5\\u044f\\u0228\\2\\u0983\\u0984\")\n buf.write(\"\\5\\u043b\\u021e\\2\\u0984\\u0985\\5\\u043d\\u021f\\2\\u0985\\u0986\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u0986\\u0987\\5\\u045b\\u022e\\2\\u0987\\u0158\")\n buf.write(\"\\3\\2\\2\\2\\u0988\\u0989\\5\\u0445\\u0223\\2\\u0989\\u098a\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u098a\\u098b\\5\\u043b\\u021e\\2\\u098b\\u098c\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u098c\\u098d\\5\\u0463\\u0232\\2\\u098d\\u015a\\3\\2\\2\")\n buf.write(\"\\2\\u098e\\u098f\\5\\u0445\\u0223\\2\\u098f\\u0990\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u0990\\u0991\\5\\u043b\\u021e\\2\\u0991\\u0992\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0992\\u0993\\5\\u0463\\u0232\\2\\u0993\\u0994\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0994\\u0995\\5\\u043b\\u021e\\2\\u0995\\u015c\\3\\2\\2\\2\\u0996\")\n buf.write(\"\\u0997\\5\\u0445\\u0223\\2\\u0997\\u0998\\5\\u044f\\u0228\\2\\u0998\")\n buf.write(\"\\u0999\\5\\u043b\\u021e\\2\\u0999\\u099a\\5\\u0445\\u0223\\2\\u099a\")\n buf.write(\"\\u099b\\5\\u0439\\u021d\\2\\u099b\\u099c\\5\\u0435\\u021b\\2\\u099c\")\n buf.write(\"\\u099d\\5\\u045b\\u022e\\2\\u099d\\u099e\\5\\u0451\\u0229\\2\\u099e\")\n buf.write(\"\\u099f\\5\\u0457\\u022c\\2\\u099f\\u015e\\3\\2\\2\\2\\u09a0\\u09a1\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u09a1\\u09a2\\5\\u044f\\u0228\\2\\u09a2\\u09a3\")\n buf.write(\"\\5\\u043b\\u021e\\2\\u09a3\\u09a4\\5\\u0445\\u0223\\2\\u09a4\\u09a5\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u09a5\\u09a6\\5\\u043d\\u021f\\2\\u09a6\\u09a7\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u09a7\\u0160\\3\\2\\2\\2\\u09a8\\u09a9\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u09a9\\u09aa\\5\\u044f\\u0228\\2\\u09aa\\u09ab\\5\\u043f\")\n buf.write(\"\\u0220\\2\\u09ab\\u09ac\\5\\u0445\\u0223\\2\\u09ac\\u09ad\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u09ad\\u09ae\\5\\u0445\\u0223\\2\\u09ae\\u09af\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u09af\\u09b0\\5\\u043d\\u021f\\2\\u09b0\\u0162\\3\\2\\2\")\n buf.write(\"\\2\\u09b1\\u09b2\\5\\u0445\\u0223\\2\\u09b2\\u09b3\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u09b3\\u09b4\\5\\u044b\\u0226\\2\\u09b4\\u09b5\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u09b5\\u09b6\\5\\u044f\\u0228\\2\\u09b6\\u09b7\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u09b7\\u0164\\3\\2\\2\\2\\u09b8\\u09b9\\5\\u0445\\u0223\\2\\u09b9\")\n buf.write(\"\\u09ba\\5\\u044f\\u0228\\2\\u09ba\\u09bb\\5\\u044f\\u0228\\2\\u09bb\")\n buf.write(\"\\u09bc\\5\\u043d\\u021f\\2\\u09bc\\u09bd\\5\\u0457\\u022c\\2\\u09bd\")\n buf.write(\"\\u0166\\3\\2\\2\\2\\u09be\\u09bf\\5\\u0445\\u0223\\2\\u09bf\\u09c0\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u09c0\\u09c1\\5\\u0451\\u0229\\2\\u09c1\\u09c2\")\n buf.write(\"\\5\\u045d\\u022f\\2\\u09c2\\u09c3\\5\\u045b\\u022e\\2\\u09c3\\u0168\")\n buf.write(\"\\3\\2\\2\\2\\u09c4\\u09c5\\5\\u0445\\u0223\\2\\u09c5\\u09c6\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u09c6\\u09c7\\5\\u0459\\u022d\\2\\u09c7\\u09c8\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u09c8\\u09c9\\5\\u0457\\u022c\\2\\u09c9\\u09ca\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u09ca\\u016a\\3\\2\\2\\2\\u09cb\\u09cc\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u09cc\\u09cd\\5\\u044f\\u0228\\2\\u09cd\\u09ce\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u09ce\\u09cf\\5\\u045b\\u022e\\2\\u09cf\\u09d0\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u09d0\\u09d1\\5\\u044f\\u0228\\2\\u09d1\\u09d2\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u09d2\\u09d3\\5\\u0445\\u0223\\2\\u09d3\\u09d4\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u09d4\\u09d5\\5\\u0437\\u021c\\2\\u09d5\\u09d6\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u09d6\\u09d7\\5\\u043d\\u021f\\2\\u09d7\\u016c\\3\\2\\2\\2\\u09d8\")\n buf.write(\"\\u09d9\\5\\u0445\\u0223\\2\\u09d9\\u09da\\5\\u044f\\u0228\\2\\u09da\")\n buf.write(\"\\u09db\\5\\u0459\\u022d\\2\\u09db\\u09dc\\5\\u045b\\u022e\\2\\u09dc\")\n buf.write(\"\\u09dd\\5\\u043d\\u021f\\2\\u09dd\\u09de\\5\\u0435\\u021b\\2\\u09de\")\n buf.write(\"\\u09df\\5\\u043b\\u021e\\2\\u09df\\u016e\\3\\2\\2\\2\\u09e0\\u09e1\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u09e1\\u09e2\\5\\u044f\\u0228\\2\\u09e2\\u09e3\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u09e3\\u0170\\3\\2\\2\\2\\u09e4\\u09e5\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u09e5\\u09e6\\5\\u044f\\u0228\\2\\u09e6\\u09e7\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u09e7\\u09e8\\5\\u043d\\u021f\\2\\u09e8\\u09e9\\5\\u0441\")\n buf.write(\"\\u0221\\2\\u09e9\\u09ea\\5\\u043d\\u021f\\2\\u09ea\\u09eb\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u09eb\\u0172\\3\\2\\2\\2\\u09ec\\u09ed\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u09ed\\u09ee\\5\\u044f\\u0228\\2\\u09ee\\u09ef\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u09ef\\u09f0\\5\\u043d\\u021f\\2\\u09f0\\u09f1\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u09f1\\u09f2\\5\\u0459\\u022d\\2\\u09f2\\u09f3\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u09f3\\u09f4\\5\\u0439\\u021d\\2\\u09f4\\u09f5\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u09f5\\u0174\\3\\2\\2\\2\\u09f6\\u09f7\\5\\u0445\\u0223\\2\\u09f7\")\n buf.write(\"\\u09f8\\5\\u044f\\u0228\\2\\u09f8\\u09f9\\5\\u045b\\u022e\\2\\u09f9\")\n buf.write(\"\\u09fa\\5\\u043d\\u021f\\2\\u09fa\\u09fb\\5\\u0457\\u022c\\2\\u09fb\")\n buf.write(\"\\u09fc\\5\\u045f\\u0230\\2\\u09fc\\u09fd\\5\\u0435\\u021b\\2\\u09fd\")\n buf.write(\"\\u09fe\\5\\u044b\\u0226\\2\\u09fe\\u0176\\3\\2\\2\\2\\u09ff\\u0a00\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0a00\\u0a01\\5\\u044f\\u0228\\2\\u0a01\\u0a02\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0a02\\u0a03\\5\\u0451\\u0229\\2\\u0a03\\u0178\")\n buf.write(\"\\3\\2\\2\\2\\u0a04\\u0a05\\5\\u0445\\u0223\\2\\u0a05\\u0a06\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0a06\\u0a07\\5\\u045f\\u0230\\2\\u0a07\\u0a08\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0a08\\u0a09\\5\\u044b\\u0226\\2\\u0a09\\u0a0a\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0a0a\\u0a0b\\5\\u043b\\u021e\\2\\u0a0b\\u0a0c\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0a0c\\u0a0d\\5\\u045b\\u022e\\2\\u0a0d\\u0a0e\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0a0e\\u017a\\3\\2\\2\\2\\u0a0f\\u0a10\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u0a10\\u0a11\\5\\u0459\\u022d\\2\\u0a11\\u017c\\3\\2\\2\\2\\u0a12\")\n buf.write(\"\\u0a13\\5\\u0445\\u0223\\2\\u0a13\\u0a14\\5\\u0459\\u022d\\2\\u0a14\")\n buf.write(\"\\u0a15\\5\\u0451\\u0229\\2\\u0a15\\u0a16\\5\\u044b\\u0226\\2\\u0a16\")\n buf.write(\"\\u0a17\\5\\u0435\\u021b\\2\\u0a17\\u0a18\\5\\u045b\\u022e\\2\\u0a18\")\n buf.write(\"\\u0a19\\5\\u0445\\u0223\\2\\u0a19\\u0a1a\\5\\u0451\\u0229\\2\\u0a1a\")\n buf.write(\"\\u0a1b\\5\\u044f\\u0228\\2\\u0a1b\\u017e\\3\\2\\2\\2\\u0a1c\\u0a1d\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0a1d\\u0a1e\\5\\u045b\\u022e\\2\\u0a1e\\u0a1f\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0a1f\\u0a20\\5\\u0457\\u022c\\2\\u0a20\\u0a21\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0a21\\u0a22\\5\\u045b\\u022e\\2\\u0a22\\u0a23\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0a23\\u0180\\3\\2\\2\\2\\u0a24\\u0a25\\5\\u0447\")\n buf.write(\"\\u0224\\2\\u0a25\\u0a26\\5\\u0435\\u021b\\2\\u0a26\\u0a27\\5\\u045f\")\n buf.write(\"\\u0230\\2\\u0a27\\u0a28\\5\\u0435\\u021b\\2\\u0a28\\u0182\\3\\2\\2\")\n buf.write(\"\\2\\u0a29\\u0a2a\\5\\u0447\\u0224\\2\\u0a2a\\u0a2b\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0a2b\\u0a2c\\5\\u0445\\u0223\\2\\u0a2c\\u0a2d\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u0a2d\\u0184\\3\\2\\2\\2\\u0a2e\\u0a2f\\5\\u0449\\u0225\\2\\u0a2f\")\n buf.write(\"\\u0a30\\5\\u043d\\u021f\\2\\u0a30\\u0a31\\5\\u043d\\u021f\\2\\u0a31\")\n buf.write(\"\\u0a32\\5\\u0453\\u022a\\2\\u0a32\\u0186\\3\\2\\2\\2\\u0a33\\u0a34\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u0a34\\u0a35\\5\\u0435\\u021b\\2\\u0a35\\u0a36\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u0a36\\u0a37\\5\\u0441\\u0221\\2\\u0a37\\u0a38\")\n buf.write(\"\\5\\u045d\\u022f\\2\\u0a38\\u0a39\\5\\u0435\\u021b\\2\\u0a39\\u0a3a\")\n buf.write(\"\\5\\u0441\\u0221\\2\\u0a3a\\u0a3b\\5\\u043d\\u021f\\2\\u0a3b\\u0188\")\n buf.write(\"\\3\\2\\2\\2\\u0a3c\\u0a3d\\5\\u044b\\u0226\\2\\u0a3d\\u0a3e\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0a3e\\u0a3f\\5\\u0459\\u022d\\2\\u0a3f\\u0a40\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0a40\\u018a\\3\\2\\2\\2\\u0a41\\u0a42\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u0a42\\u0a43\\5\\u0435\\u021b\\2\\u0a43\\u0a44\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0a44\\u0a45\\5\\u045b\\u022e\\2\\u0a45\\u0a46\\7a\\2\\2\\u0a46\")\n buf.write(\"\\u0a47\\5\\u045f\\u0230\\2\\u0a47\\u0a48\\5\\u0435\\u021b\\2\\u0a48\")\n buf.write(\"\\u0a49\\5\\u044b\\u0226\\2\\u0a49\\u0a4a\\5\\u045d\\u022f\\2\\u0a4a\")\n buf.write(\"\\u0a4b\\5\\u043d\\u021f\\2\\u0a4b\\u018c\\3\\2\\2\\2\\u0a4c\\u0a4d\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u0a4d\\u0a4e\\5\\u043d\\u021f\\2\\u0a4e\\u0a4f\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0a4f\\u0a50\\5\\u043b\\u021e\\2\\u0a50\\u0a51\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0a51\\u0a52\\5\\u044f\\u0228\\2\\u0a52\\u0a53\")\n buf.write(\"\\5\\u0441\\u0221\\2\\u0a53\\u018e\\3\\2\\2\\2\\u0a54\\u0a55\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u0a55\\u0a56\\5\\u043d\\u021f\\2\\u0a56\\u0a57\\5\\u043f\")\n buf.write(\"\\u0220\\2\\u0a57\\u0a58\\5\\u045b\\u022e\\2\\u0a58\\u0190\\3\\2\\2\")\n buf.write(\"\\2\\u0a59\\u0a5a\\5\\u044b\\u0226\\2\\u0a5a\\u0a5b\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0a5b\\u0a5c\\5\\u045f\\u0230\\2\\u0a5c\\u0a5d\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0a5d\\u0a5e\\5\\u044b\\u0226\\2\\u0a5e\\u0192\\3\\2\\2\\2\\u0a5f\")\n buf.write(\"\\u0a60\\5\\u044b\\u0226\\2\\u0a60\\u0a61\\5\\u0445\\u0223\\2\\u0a61\")\n buf.write(\"\\u0a62\\5\\u0437\\u021c\\2\\u0a62\\u0a63\\5\\u0457\\u022c\\2\\u0a63\")\n buf.write(\"\\u0a64\\5\\u0435\\u021b\\2\\u0a64\\u0a65\\5\\u0457\\u022c\\2\\u0a65\")\n buf.write(\"\\u0a66\\5\\u0465\\u0233\\2\\u0a66\\u0194\\3\\2\\2\\2\\u0a67\\u0a68\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u0a68\\u0a69\\5\\u0445\\u0223\\2\\u0a69\\u0a6a\")\n buf.write(\"\\5\\u0449\\u0225\\2\\u0a6a\\u0a6b\\5\\u043d\\u021f\\2\\u0a6b\\u0196\")\n buf.write(\"\\3\\2\\2\\2\\u0a6c\\u0a6d\\5\\u044b\\u0226\\2\\u0a6d\\u0a6e\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0a6e\\u0a6f\\5\\u0449\\u0225\\2\\u0a6f\\u0a70\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0a70\\u0a71\\7\\64\\2\\2\\u0a71\\u0198\\3\\2\\2\\2\\u0a72\")\n buf.write(\"\\u0a73\\5\\u044b\\u0226\\2\\u0a73\\u0a74\\5\\u0445\\u0223\\2\\u0a74\")\n buf.write(\"\\u0a75\\5\\u0449\\u0225\\2\\u0a75\\u0a76\\5\\u043d\\u021f\\2\\u0a76\")\n buf.write(\"\\u0a77\\7\\66\\2\\2\\u0a77\\u019a\\3\\2\\2\\2\\u0a78\\u0a79\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u0a79\\u0a7a\\5\\u0445\\u0223\\2\\u0a7a\\u0a7b\\5\\u0449\")\n buf.write(\"\\u0225\\2\\u0a7b\\u0a7c\\5\\u043d\\u021f\\2\\u0a7c\\u0a7d\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u0a7d\\u019c\\3\\2\\2\\2\\u0a7e\\u0a7f\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u0a7f\\u0a80\\5\\u0445\\u0223\\2\\u0a80\\u0a81\\5\\u044d\\u0227\")\n buf.write(\"\\2\\u0a81\\u0a82\\5\\u0445\\u0223\\2\\u0a82\\u0a83\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u0a83\\u019e\\3\\2\\2\\2\\u0a84\\u0a85\\5\\u044b\\u0226\\2\\u0a85\")\n buf.write(\"\\u0a86\\5\\u0451\\u0229\\2\\u0a86\\u0a87\\5\\u0439\\u021d\\2\\u0a87\")\n buf.write(\"\\u0a88\\5\\u0435\\u021b\\2\\u0a88\\u0a89\\5\\u044b\\u0226\\2\\u0a89\")\n buf.write(\"\\u01a0\\3\\2\\2\\2\\u0a8a\\u0a8b\\5\\u044b\\u0226\\2\\u0a8b\\u0a8c\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0a8c\\u0a8d\\5\\u0439\\u021d\\2\\u0a8d\\u0a8e\")\n buf.write(\"\\5\\u0449\\u0225\\2\\u0a8e\\u01a2\\3\\2\\2\\2\\u0a8f\\u0a90\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u0a90\\u0a91\\5\\u0451\\u0229\\2\\u0a91\\u0a92\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u0a92\\u0a93\\5\\u0449\\u0225\\2\\u0a93\\u0a94\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0a94\\u0a95\\5\\u043b\\u021e\\2\\u0a95\\u01a4\\3\\2\\2\")\n buf.write(\"\\2\\u0a96\\u0a97\\5\\u044b\\u0226\\2\\u0a97\\u0a98\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0a98\\u0a99\\5\\u0441\\u0221\\2\\u0a99\\u01a6\\3\\2\\2\\2\\u0a9a\")\n buf.write(\"\\u0a9b\\5\\u044b\\u0226\\2\\u0a9b\\u0a9c\\5\\u0451\\u0229\\2\\u0a9c\")\n buf.write(\"\\u0a9d\\5\\u0441\\u0221\\2\\u0a9d\\u0a9e\\5\\u0451\\u0229\\2\\u0a9e\")\n buf.write(\"\\u0a9f\\5\\u043f\\u0220\\2\\u0a9f\\u0aa0\\5\\u043f\\u0220\\2\\u0aa0\")\n buf.write(\"\\u01a8\\3\\2\\2\\2\\u0aa1\\u0aa2\\5\\u044b\\u0226\\2\\u0aa2\\u0aa3\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0aa3\\u0aa4\\5\\u0441\\u0221\\2\\u0aa4\\u0aa5\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0aa5\\u0aa6\\5\\u044f\\u0228\\2\\u0aa6\\u01aa\")\n buf.write(\"\\3\\2\\2\\2\\u0aa7\\u0aa8\\5\\u044b\\u0226\\2\\u0aa8\\u0aa9\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u0aa9\\u0aaa\\5\\u044f\\u0228\\2\\u0aaa\\u0aab\\5\\u0441\")\n buf.write(\"\\u0221\\2\\u0aab\\u01ac\\3\\2\\2\\2\\u0aac\\u0aad\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u0aad\\u0aae\\5\\u0451\\u0229\\2\\u0aae\\u0aaf\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0aaf\\u0ab0\\5\\u0453\\u022a\\2\\u0ab0\\u01ae\\3\\2\\2\\2\\u0ab1\")\n buf.write(\"\\u0ab2\\5\\u044d\\u0227\\2\\u0ab2\\u0ab3\\5\\u0435\\u021b\\2\\u0ab3\")\n buf.write(\"\\u0ab4\\5\\u0445\\u0223\\2\\u0ab4\\u0ab5\\5\\u044f\\u0228\\2\\u0ab5\")\n buf.write(\"\\u01b0\\3\\2\\2\\2\\u0ab6\\u0ab7\\5\\u044d\\u0227\\2\\u0ab7\\u0ab8\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0ab8\\u0ab9\\5\\u0453\\u022a\\2\\u0ab9\\u01b2\")\n buf.write(\"\\3\\2\\2\\2\\u0aba\\u0abb\\5\\u044d\\u0227\\2\\u0abb\\u0abc\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0abc\\u0abd\\5\\u045b\\u022e\\2\\u0abd\\u0abe\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u0abe\\u0abf\\5\\u0443\\u0222\\2\\u0abf\\u0ac0\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0ac0\\u0ac1\\5\\u043b\\u021e\\2\\u0ac1\\u01b4\\3\\2\\2\")\n buf.write(\"\\2\\u0ac2\\u0ac3\\5\\u044d\\u0227\\2\\u0ac3\\u0ac4\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u0ac4\\u0ac5\\5\\u0463\\u0232\\2\\u0ac5\\u0ac6\\5\\u045f\\u0230\")\n buf.write(\"\\2\\u0ac6\\u0ac7\\5\\u0435\\u021b\\2\\u0ac7\\u0ac8\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u0ac8\\u0ac9\\5\\u045d\\u022f\\2\\u0ac9\\u0aca\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0aca\\u01b6\\3\\2\\2\\2\\u0acb\\u0acc\\5\\u044d\\u0227\\2\\u0acc\")\n buf.write(\"\\u0acd\\5\\u043d\\u021f\\2\\u0acd\\u0ace\\5\\u0435\\u021b\\2\\u0ace\")\n buf.write(\"\\u0acf\\5\\u0459\\u022d\\2\\u0acf\\u0ad0\\5\\u045d\\u022f\\2\\u0ad0\")\n buf.write(\"\\u0ad1\\5\\u0457\\u022c\\2\\u0ad1\\u0ad2\\5\\u043d\\u021f\\2\\u0ad2\")\n buf.write(\"\\u0ad3\\5\\u0459\\u022d\\2\\u0ad3\\u01b8\\3\\2\\2\\2\\u0ad4\\u0ad5\")\n buf.write(\"\\5\\u044d\\u0227\\2\\u0ad5\\u0ad6\\5\\u043d\\u021f\\2\\u0ad6\\u0ad7\")\n buf.write(\"\\5\\u044d\\u0227\\2\\u0ad7\\u0ad8\\5\\u0437\\u021c\\2\\u0ad8\\u0ad9\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0ad9\\u0ada\\5\\u0457\\u022c\\2\\u0ada\\u01ba\")\n buf.write(\"\\3\\2\\2\\2\\u0adb\\u0adc\\5\\u044d\\u0227\\2\\u0adc\\u0add\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0add\\u0ade\\5\\u0457\\u022c\\2\\u0ade\\u0adf\\5\\u0441\")\n buf.write(\"\\u0221\\2\\u0adf\\u0ae0\\5\\u043d\\u021f\\2\\u0ae0\\u01bc\\3\\2\\2\")\n buf.write(\"\\2\\u0ae1\\u0ae2\\5\\u044d\\u0227\\2\\u0ae2\\u0ae3\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u0ae3\\u0ae4\\5\\u044f\\u0228\\2\\u0ae4\\u0ae5\\5\\u045d\\u022f\")\n buf.write(\"\\2\\u0ae5\\u0ae6\\5\\u0459\\u022d\\2\\u0ae6\\u01be\\3\\2\\2\\2\\u0ae7\")\n buf.write(\"\\u0ae8\\5\\u044d\\u0227\\2\\u0ae8\\u0ae9\\5\\u0445\\u0223\\2\\u0ae9\")\n buf.write(\"\\u0aea\\5\\u044f\\u0228\\2\\u0aea\\u0aeb\\5\\u045d\\u022f\\2\\u0aeb\")\n buf.write(\"\\u0aec\\5\\u045b\\u022e\\2\\u0aec\\u0aed\\5\\u043d\\u021f\\2\\u0aed\")\n buf.write(\"\\u01c0\\3\\2\\2\\2\\u0aee\\u0aef\\5\\u044d\\u0227\\2\\u0aef\\u0af0\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0af0\\u0af1\\5\\u044f\\u0228\\2\\u0af1\\u0af2\")\n buf.write(\"\\5\\u045f\\u0230\\2\\u0af2\\u0af3\\5\\u0435\\u021b\\2\\u0af3\\u0af4\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u0af4\\u0af5\\5\\u045d\\u022f\\2\\u0af5\\u0af6\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0af6\\u01c2\\3\\2\\2\\2\\u0af7\\u0af8\\5\\u044d\")\n buf.write(\"\\u0227\\2\\u0af8\\u0af9\\5\\u044b\\u0226\\2\\u0af9\\u0afa\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0afa\\u0afb\\5\\u044b\\u0226\\2\\u0afb\\u0afc\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0afc\\u0afd\\5\\u0437\\u021c\\2\\u0afd\\u0afe\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0afe\\u0aff\\5\\u044b\\u0226\\2\\u0aff\\u01c4\\3\\2\\2\")\n buf.write(\"\\2\\u0b00\\u0b01\\5\\u044d\\u0227\\2\\u0b01\\u0b02\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0b02\\u0b03\\5\\u043b\\u021e\\2\\u0b03\\u0b04\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0b04\\u01c6\\3\\2\\2\\2\\u0b05\\u0b06\\5\\u044d\\u0227\\2\\u0b06\")\n buf.write(\"\\u0b07\\5\\u0451\\u0229\\2\\u0b07\\u0b08\\5\\u043b\\u021e\\2\\u0b08\")\n buf.write(\"\\u0b09\\5\\u043d\\u021f\\2\\u0b09\\u0b0a\\5\\u044b\\u0226\\2\\u0b0a\")\n buf.write(\"\\u01c8\\3\\2\\2\\2\\u0b0b\\u0b0c\\5\\u044d\\u0227\\2\\u0b0c\\u0b0d\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0b0d\\u0b0e\\5\\u043b\\u021e\\2\\u0b0e\\u0b0f\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0b0f\\u0b10\\5\\u043f\\u0220\\2\\u0b10\\u0b11\")\n buf.write(\"\\5\\u0465\\u0233\\2\\u0b11\\u01ca\\3\\2\\2\\2\\u0b12\\u0b13\\5\\u044d\")\n buf.write(\"\\u0227\\2\\u0b13\\u0b14\\5\\u0451\\u0229\\2\\u0b14\\u0b15\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0b15\\u0b16\\5\\u045b\\u022e\\2\\u0b16\\u0b17\\5\\u0443\")\n buf.write(\"\\u0222\\2\\u0b17\\u01cc\\3\\2\\2\\2\\u0b18\\u0b19\\5\\u044d\\u0227\")\n buf.write(\"\\2\\u0b19\\u0b1a\\5\\u045d\\u022f\\2\\u0b1a\\u0b1b\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u0b1b\\u0b1c\\5\\u045b\\u022e\\2\\u0b1c\\u0b1d\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u0b1d\\u0b1e\\5\\u0459\\u022d\\2\\u0b1e\\u0b1f\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0b1f\\u0b20\\5\\u045b\\u022e\\2\\u0b20\\u01ce\\3\\2\\2\\2\\u0b21\")\n buf.write(\"\\u0b22\\5\\u044f\\u0228\\2\\u0b22\\u0b23\\5\\u0435\\u021b\\2\\u0b23\")\n buf.write(\"\\u0b24\\5\\u044d\\u0227\\2\\u0b24\\u0b25\\5\\u043d\\u021f\\2\\u0b25\")\n buf.write(\"\\u01d0\\3\\2\\2\\2\\u0b26\\u0b27\\5\\u044f\\u0228\\2\\u0b27\\u0b28\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0b28\\u0b29\\5\\u044f\\u0228\\2\\u0b29\\u01d2\")\n buf.write(\"\\3\\2\\2\\2\\u0b2a\\u0b2b\\5\\u044f\\u0228\\2\\u0b2b\\u0b2c\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0b2c\\u0b2d\\5\\u045b\\u022e\\2\\u0b2d\\u0b2e\\5\\u045d\")\n buf.write(\"\\u022f\\2\\u0b2e\\u0b2f\\5\\u0457\\u022c\\2\\u0b2f\\u0b30\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0b30\\u0b31\\5\\u044b\\u0226\\2\\u0b31\\u01d4\\3\\2\\2\")\n buf.write(\"\\2\\u0b32\\u0b33\\5\\u044f\\u0228\\2\\u0b33\\u0b34\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u0b34\\u0b35\\5\\u045b\\u022e\\2\\u0b35\\u0b36\\5\\u045d\\u022f\")\n buf.write(\"\\2\\u0b36\\u0b37\\5\\u0457\\u022c\\2\\u0b37\\u0b38\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u0b38\\u0b39\\5\\u044b\\u0226\\2\\u0b39\\u0b3a\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u0b3a\\u01d6\\3\\2\\2\\2\\u0b3b\\u0b3c\\5\\u044f\\u0228\\2\\u0b3c\")\n buf.write(\"\\u0b3d\\5\\u0435\\u021b\\2\\u0b3d\\u0b3e\\5\\u045f\\u0230\\2\\u0b3e\")\n buf.write(\"\\u01d8\\3\\2\\2\\2\\u0b3f\\u0b40\\5\\u044f\\u0228\\2\\u0b40\\u0b41\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u0b41\\u0b42\\5\\u0443\\u0222\\2\\u0b42\\u0b43\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0b43\\u0b44\\5\\u0457\\u022c\\2\\u0b44\\u01da\")\n buf.write(\"\\3\\2\\2\\2\\u0b45\\u0b46\\5\\u044f\\u0228\\2\\u0b46\\u0b47\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u0b47\\u0b48\\5\\u0443\\u0222\\2\\u0b48\\u0b49\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0b49\\u0b4a\\5\\u0457\\u022c\\2\\u0b4a\\u0b4b\\7a\\2\")\n buf.write(\"\\2\\u0b4b\\u0b4c\\5\\u0439\\u021d\\2\\u0b4c\\u0b4d\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0b4d\\u01dc\\3\\2\\2\\2\\u0b4e\\u0b4f\\5\\u044f\\u0228\\2\\u0b4f\")\n buf.write(\"\\u0b50\\5\\u0439\\u021d\\2\\u0b50\\u0b51\\5\\u044b\\u0226\\2\\u0b51\")\n buf.write(\"\\u0b52\\5\\u0451\\u0229\\2\\u0b52\\u0b53\\5\\u0437\\u021c\\2\\u0b53\")\n buf.write(\"\\u01de\\3\\2\\2\\2\\u0b54\\u0b55\\5\\u044f\\u0228\\2\\u0b55\\u0b56\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0b56\\u0b57\\5\\u0459\\u022d\\2\\u0b57\\u0b58\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0b58\\u0b59\\5\\u043d\\u021f\\2\\u0b59\\u0b5a\")\n buf.write(\"\\5\\u043b\\u021e\\2\\u0b5a\\u01e0\\3\\2\\2\\2\\u0b5b\\u0b5c\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0b5c\\u0b5d\\5\\u043d\\u021f\\2\\u0b5d\\u0b5e\\5\\u0461\")\n buf.write(\"\\u0231\\2\\u0b5e\\u01e2\\3\\2\\2\\2\\u0b5f\\u0b60\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u0b60\\u0b61\\5\\u0451\\u0229\\2\\u0b61\\u01e4\\3\\2\\2\\2\\u0b62\")\n buf.write(\"\\u0b63\\5\\u044f\\u0228\\2\\u0b63\\u0b64\\5\\u0451\\u0229\\2\\u0b64\")\n buf.write(\"\\u0b65\\5\\u0435\\u021b\\2\\u0b65\\u0b66\\5\\u045d\\u022f\\2\\u0b66\")\n buf.write(\"\\u0b67\\5\\u043b\\u021e\\2\\u0b67\\u0b68\\5\\u0445\\u0223\\2\\u0b68\")\n buf.write(\"\\u0b69\\5\\u045b\\u022e\\2\\u0b69\\u01e6\\3\\2\\2\\2\\u0b6a\\u0b6b\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u0b6b\\u0b6c\\5\\u0451\\u0229\\2\\u0b6c\\u0b6d\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u0b6d\\u0b6e\\5\\u0435\\u021b\\2\\u0b6e\\u0b6f\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u0b6f\\u0b70\\5\\u0443\\u0222\\2\\u0b70\\u0b71\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0b71\\u01e8\\3\\2\\2\\2\\u0b72\\u0b73\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0b73\\u0b74\\5\\u0451\\u0229\\2\\u0b74\\u0b75\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u0b75\\u0b76\\5\\u0451\\u0229\\2\\u0b76\\u0b77\\5\\u0453\")\n buf.write(\"\\u022a\\2\\u0b77\\u0b78\\5\\u0465\\u0233\\2\\u0b78\\u01ea\\3\\2\\2\")\n buf.write(\"\\2\\u0b79\\u0b7a\\5\\u044f\\u0228\\2\\u0b7a\\u0b7b\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0b7b\\u0b7c\\5\\u0439\\u021d\\2\\u0b7c\\u0b7d\\5\\u0465\\u0233\")\n buf.write(\"\\2\\u0b7d\\u0b7e\\5\\u0439\\u021d\\2\\u0b7e\\u0b7f\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u0b7f\\u0b80\\5\\u043d\\u021f\\2\\u0b80\\u01ec\\3\\2\\2\\2\\u0b81\")\n buf.write(\"\\u0b82\\5\\u044f\\u0228\\2\\u0b82\\u0b83\\5\\u0451\\u0229\\2\\u0b83\")\n buf.write(\"\\u0b84\\5\\u043d\\u021f\\2\\u0b84\\u0b85\\5\\u044f\\u0228\\2\\u0b85\")\n buf.write(\"\\u0b86\\5\\u045b\\u022e\\2\\u0b86\\u0b87\\5\\u0445\\u0223\\2\\u0b87\")\n buf.write(\"\\u0b88\\5\\u045b\\u022e\\2\\u0b88\\u0b89\\5\\u0465\\u0233\\2\\u0b89\")\n buf.write(\"\\u0b8a\\5\\u043d\\u021f\\2\\u0b8a\\u0b8b\\5\\u0459\\u022d\\2\\u0b8b\")\n buf.write(\"\\u0b8c\\5\\u0439\\u021d\\2\\u0b8c\\u0b8d\\5\\u0435\\u021b\\2\\u0b8d\")\n buf.write(\"\\u0b8e\\5\\u0453\\u022a\\2\\u0b8e\\u0b8f\\5\\u0445\\u0223\\2\\u0b8f\")\n buf.write(\"\\u0b90\\5\\u044f\\u0228\\2\\u0b90\\u0b91\\5\\u0441\\u0221\\2\\u0b91\")\n buf.write(\"\\u01ee\\3\\2\\2\\2\\u0b92\\u0b93\\5\\u044f\\u0228\\2\\u0b93\\u0b94\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0b94\\u0b95\\5\\u044d\\u0227\\2\\u0b95\\u0b96\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0b96\\u0b97\\5\\u0463\\u0232\\2\\u0b97\\u0b98\")\n buf.write(\"\\5\\u045f\\u0230\\2\\u0b98\\u0b99\\5\\u0435\\u021b\\2\\u0b99\\u0b9a\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u0b9a\\u0b9b\\5\\u045d\\u022f\\2\\u0b9b\\u0b9c\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0b9c\\u01f0\\3\\2\\2\\2\\u0b9d\\u0b9e\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0b9e\\u0b9f\\5\\u0451\\u0229\\2\\u0b9f\\u0ba0\\5\\u044d\")\n buf.write(\"\\u0227\\2\\u0ba0\\u0ba1\\5\\u0445\\u0223\\2\\u0ba1\\u0ba2\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0ba2\\u0ba3\\5\\u045f\\u0230\\2\\u0ba3\\u0ba4\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0ba4\\u0ba5\\5\\u044b\\u0226\\2\\u0ba5\\u0ba6\\5\\u045d\")\n buf.write(\"\\u022f\\2\\u0ba6\\u0ba7\\5\\u043d\\u021f\\2\\u0ba7\\u01f2\\3\\2\\2\")\n buf.write(\"\\2\\u0ba8\\u0ba9\\5\\u044f\\u0228\\2\\u0ba9\\u0baa\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0baa\\u0bab\\5\\u044f\\u0228\\2\\u0bab\\u0bac\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0bac\\u01f4\\3\\2\\2\\2\\u0bad\\u0bae\\5\\u044f\\u0228\\2\\u0bae\")\n buf.write(\"\\u0baf\\5\\u0451\\u0229\\2\\u0baf\\u0bb0\\5\\u0451\\u0229\\2\\u0bb0\")\n buf.write(\"\\u0bb1\\5\\u0457\\u022c\\2\\u0bb1\\u0bb2\\5\\u043b\\u021e\\2\\u0bb2\")\n buf.write(\"\\u0bb3\\5\\u043d\\u021f\\2\\u0bb3\\u0bb4\\5\\u0457\\u022c\\2\\u0bb4\")\n buf.write(\"\\u01f6\\3\\2\\2\\2\\u0bb5\\u0bb6\\5\\u044f\\u0228\\2\\u0bb6\\u0bb7\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0bb7\\u0bb8\\5\\u0459\\u022d\\2\\u0bb8\\u0bb9\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u0bb9\\u0bba\\5\\u0443\\u0222\\2\\u0bba\\u0bbb\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0bbb\\u0bbc\\5\\u044d\\u0227\\2\\u0bbc\\u0bbd\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0bbd\\u0bbe\\5\\u0439\\u021d\\2\\u0bbe\\u0bbf\")\n buf.write(\"\\5\\u0443\\u0222\\2\\u0bbf\\u0bc0\\5\\u043d\\u021f\\2\\u0bc0\\u0bc1\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u0bc1\\u0bc2\\5\\u0449\\u0225\\2\\u0bc2\\u01f8\")\n buf.write(\"\\3\\2\\2\\2\\u0bc3\\u0bc4\\5\\u044f\\u0228\\2\\u0bc4\\u0bc5\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u0bc5\\u0bc6\\5\\u045b\\u022e\\2\\u0bc6\\u01fa\\3\\2\\2\")\n buf.write(\"\\2\\u0bc7\\u0bc8\\5\\u044f\\u0228\\2\\u0bc8\\u0bc9\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0bc9\\u0bca\\5\\u0461\\u0231\\2\\u0bca\\u0bcb\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u0bcb\\u0bcc\\5\\u0445\\u0223\\2\\u0bcc\\u0bcd\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u0bcd\\u01fc\\3\\2\\2\\2\\u0bce\\u0bcf\\5\\u044f\\u0228\\2\\u0bcf\")\n buf.write(\"\\u0bd0\\5\\u045d\\u022f\\2\\u0bd0\\u0bd1\\5\\u044b\\u0226\\2\\u0bd1\")\n buf.write(\"\\u0bd2\\5\\u044b\\u0226\\2\\u0bd2\\u01fe\\3\\2\\2\\2\\u0bd3\\u0bd4\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u0bd4\\u0bd5\\5\\u045d\\u022f\\2\\u0bd5\\u0bd6\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u0bd6\\u0bd7\\5\\u044b\\u0226\\2\\u0bd7\\u0bd8\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u0bd8\\u0200\\3\\2\\2\\2\\u0bd9\\u0bda\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0bda\\u0bdb\\5\\u045d\\u022f\\2\\u0bdb\\u0bdc\\5\\u044d\")\n buf.write(\"\\u0227\\2\\u0bdc\\u0bdd\\5\\u0437\\u021c\\2\\u0bdd\\u0bde\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0bde\\u0bdf\\5\\u0457\\u022c\\2\\u0bdf\\u0202\\3\\2\\2\")\n buf.write(\"\\2\\u0be0\\u0be1\\5\\u044f\\u0228\\2\\u0be1\\u0be2\\5\\u045d\\u022f\")\n buf.write(\"\\2\\u0be2\\u0be3\\5\\u044d\\u0227\\2\\u0be3\\u0be4\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0be4\\u0be5\\5\\u0457\\u022c\\2\\u0be5\\u0be6\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u0be6\\u0be7\\5\\u0439\\u021d\\2\\u0be7\\u0204\\3\\2\\2\\2\\u0be8\")\n buf.write(\"\\u0be9\\5\\u044f\\u0228\\2\\u0be9\\u0bea\\5\\u045f\\u0230\\2\\u0bea\")\n buf.write(\"\\u0beb\\5\\u0435\\u021b\\2\\u0beb\\u0bec\\5\\u0457\\u022c\\2\\u0bec\")\n buf.write(\"\\u0bed\\5\\u0439\\u021d\\2\\u0bed\\u0bee\\5\\u0443\\u0222\\2\\u0bee\")\n buf.write(\"\\u0bef\\5\\u0435\\u021b\\2\\u0bef\\u0bf0\\5\\u0457\\u022c\\2\\u0bf0\")\n buf.write(\"\\u0bf1\\7\\64\\2\\2\\u0bf1\\u0206\\3\\2\\2\\2\\u0bf2\\u0bf3\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u0bf3\\u0bf4\\5\\u0437\\u021c\\2\\u0bf4\\u0bf5\\5\\u0447\")\n buf.write(\"\\u0224\\2\\u0bf5\\u0bf6\\5\\u043d\\u021f\\2\\u0bf6\\u0bf7\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u0bf7\\u0bf8\\5\\u045b\\u022e\\2\\u0bf8\\u0208\\3\\2\\2\")\n buf.write(\"\\2\\u0bf9\\u0bfa\\5\\u0451\\u0229\\2\\u0bfa\\u0bfb\\5\\u043f\\u0220\")\n buf.write(\"\\2\\u0bfb\\u020a\\3\\2\\2\\2\\u0bfc\\u0bfd\\5\\u0451\\u0229\\2\\u0bfd\")\n buf.write(\"\\u0bfe\\5\\u043f\\u0220\\2\\u0bfe\\u0bff\\5\\u043f\\u0220\\2\\u0bff\")\n buf.write(\"\\u020c\\3\\2\\2\\2\\u0c00\\u0c01\\5\\u0451\\u0229\\2\\u0c01\\u0c02\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0c02\\u0c03\\5\\u043b\\u021e\\2\\u0c03\\u020e\")\n buf.write(\"\\3\\2\\2\\2\\u0c04\\u0c05\\5\\u0451\\u0229\\2\\u0c05\\u0c06\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u0c06\\u0c07\\5\\u043b\\u021e\\2\\u0c07\\u0210\\3\\2\\2\")\n buf.write(\"\\2\\u0c08\\u0c09\\5\\u0451\\u0229\\2\\u0c09\\u0c0a\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u0c0a\\u0212\\3\\2\\2\\2\\u0c0b\\u0c0c\\5\\u0451\\u0229\\2\\u0c0c\")\n buf.write(\"\\u0c0d\\5\\u044f\\u0228\\2\\u0c0d\\u0c0e\\5\\u044b\\u0226\\2\\u0c0e\")\n buf.write(\"\\u0c0f\\5\\u0465\\u0233\\2\\u0c0f\\u0214\\3\\2\\2\\2\\u0c10\\u0c11\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0c11\\u0c12\\5\\u0453\\u022a\\2\\u0c12\\u0c13\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0c13\\u0c14\\5\\u044f\\u0228\\2\\u0c14\\u0216\")\n buf.write(\"\\3\\2\\2\\2\\u0c15\\u0c16\\5\\u0451\\u0229\\2\\u0c16\\u0c17\\5\\u0453\")\n buf.write(\"\\u022a\\2\\u0c17\\u0c18\\5\\u045b\\u022e\\2\\u0c18\\u0c19\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0c19\\u0c1a\\5\\u0451\\u0229\\2\\u0c1a\\u0c1b\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0c1b\\u0218\\3\\2\\2\\2\\u0c1c\\u0c1d\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0c1d\\u0c1e\\5\\u0457\\u022c\\2\\u0c1e\\u021a\\3\\2\\2\\2\\u0c1f\")\n buf.write(\"\\u0c20\\5\\u0451\\u0229\\2\\u0c20\\u0c21\\5\\u0457\\u022c\\2\\u0c21\")\n buf.write(\"\\u0c22\\5\\u0435\\u021b\\2\\u0c22\\u0c23\\5\\u043b\\u021e\\2\\u0c23\")\n buf.write(\"\\u0c24\\5\\u0435\\u021b\\2\\u0c24\\u0c25\\5\\u045b\\u022e\\2\\u0c25\")\n buf.write(\"\\u0c26\\5\\u0435\\u021b\\2\\u0c26\\u021c\\3\\2\\2\\2\\u0c27\\u0c28\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0c28\\u0c29\\5\\u0457\\u022c\\2\\u0c29\\u0c2a\")\n buf.write(\"\\5\\u043b\\u021e\\2\\u0c2a\\u0c2b\\5\\u043d\\u021f\\2\\u0c2b\\u0c2c\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0c2c\\u021e\\3\\2\\2\\2\\u0c2d\\u0c2e\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u0c2e\\u0c2f\\5\\u0457\\u022c\\2\\u0c2f\\u0c30\\5\\u043b\")\n buf.write(\"\\u021e\\2\\u0c30\\u0c31\\5\\u0445\\u0223\\2\\u0c31\\u0c32\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0c32\\u0c33\\5\\u0435\\u021b\\2\\u0c33\\u0c34\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u0c34\\u0c35\\5\\u0445\\u0223\\2\\u0c35\\u0c36\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0c36\\u0c37\\5\\u0465\\u0233\\2\\u0c37\\u0220\\3\\2\\2\")\n buf.write(\"\\2\\u0c38\\u0c39\\5\\u0451\\u0229\\2\\u0c39\\u0c3a\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0c3a\\u0c3b\\5\\u043d\\u021f\\2\\u0c3b\\u0c3c\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u0c3c\\u0c3d\\5\\u0457\\u022c\\2\\u0c3d\\u0c3e\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0c3e\\u0c3f\\5\\u0457\\u022c\\2\\u0c3f\\u0222\\3\\2\\2\\2\\u0c40\")\n buf.write(\"\\u0c41\\5\\u0451\\u0229\\2\\u0c41\\u0c42\\5\\u045d\\u022f\\2\\u0c42\")\n buf.write(\"\\u0c43\\5\\u045b\\u022e\\2\\u0c43\\u0224\\3\\2\\2\\2\\u0c44\\u0c45\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0c45\\u0c46\\5\\u045d\\u022f\\2\\u0c46\\u0c47\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0c47\\u0c48\\5\\u043d\\u021f\\2\\u0c48\\u0c49\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0c49\\u0226\\3\\2\\2\\2\\u0c4a\\u0c4b\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u0c4b\\u0c4c\\5\\u045f\\u0230\\2\\u0c4c\\u0c4d\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0c4d\\u0c4e\\5\\u0457\\u022c\\2\\u0c4e\\u0228\\3\\2\\2\")\n buf.write(\"\\2\\u0c4f\\u0c50\\5\\u0451\\u0229\\2\\u0c50\\u0c51\\5\\u045f\\u0230\")\n buf.write(\"\\2\\u0c51\\u0c52\\5\\u043d\\u021f\\2\\u0c52\\u0c53\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u0c53\\u0c54\\5\\u0457\\u022c\\2\\u0c54\\u0c55\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u0c55\\u0c56\\5\\u043b\\u021e\\2\\u0c56\\u0c57\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u0c57\\u0c58\\5\\u044f\\u0228\\2\\u0c58\\u0c59\\5\\u0441\\u0221\")\n buf.write(\"\\2\\u0c59\\u022a\\3\\2\\2\\2\\u0c5a\\u0c5b\\5\\u0453\\u022a\\2\\u0c5b\")\n buf.write(\"\\u0c5c\\5\\u0435\\u021b\\2\\u0c5c\\u0c5d\\5\\u0439\\u021d\\2\\u0c5d\")\n buf.write(\"\\u0c5e\\5\\u0449\\u0225\\2\\u0c5e\\u0c5f\\5\\u0435\\u021b\\2\\u0c5f\")\n buf.write(\"\\u0c60\\5\\u0441\\u0221\\2\\u0c60\\u0c61\\5\\u043d\\u021f\\2\\u0c61\")\n buf.write(\"\\u022c\\3\\2\\2\\2\\u0c62\\u0c63\\5\\u0453\\u022a\\2\\u0c63\\u0c64\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0c64\\u0c65\\5\\u0457\\u022c\\2\\u0c65\\u0c66\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0c66\\u0c67\\5\\u044b\\u0226\\2\\u0c67\\u0c68\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u0c68\\u0c69\\5\\u043d\\u021f\\2\\u0c69\\u0c6a\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u0c6a\\u0c6b\\7a\\2\\2\\u0c6b\\u0c6c\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0c6c\\u0c6d\\5\\u044f\\u0228\\2\\u0c6d\\u0c6e\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0c6e\\u0c6f\\5\\u0437\\u021c\\2\\u0c6f\\u0c70\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u0c70\\u0c71\\5\\u043d\\u021f\\2\\u0c71\\u022e\\3\\2\\2\")\n buf.write(\"\\2\\u0c72\\u0c73\\5\\u0453\\u022a\\2\\u0c73\\u0c74\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u0c74\\u0c75\\5\\u0457\\u022c\\2\\u0c75\\u0c76\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u0c76\\u0c77\\5\\u044d\\u0227\\2\\u0c77\\u0c78\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0c78\\u0c79\\5\\u045b\\u022e\\2\\u0c79\\u0c7a\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0c7a\\u0c7b\\5\\u0457\\u022c\\2\\u0c7b\\u0c7c\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0c7c\\u0230\\3\\2\\2\\2\\u0c7d\\u0c7e\\5\\u0453\\u022a\\2\\u0c7e\")\n buf.write(\"\\u0c7f\\5\\u0435\\u021b\\2\\u0c7f\\u0c80\\5\\u0457\\u022c\\2\\u0c80\")\n buf.write(\"\\u0c81\\5\\u043d\\u021f\\2\\u0c81\\u0c82\\5\\u044f\\u0228\\2\\u0c82\")\n buf.write(\"\\u0c83\\5\\u045b\\u022e\\2\\u0c83\\u0232\\3\\2\\2\\2\\u0c84\\u0c85\")\n buf.write(\"\\5\\u0453\\u022a\\2\\u0c85\\u0c86\\5\\u0435\\u021b\\2\\u0c86\\u0c87\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0c87\\u0c88\\5\\u045b\\u022e\\2\\u0c88\\u0c89\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0c89\\u0c8a\\5\\u045b\\u022e\\2\\u0c8a\\u0c8b\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0c8b\\u0c8c\\5\\u0451\\u0229\\2\\u0c8c\\u0c8d\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u0c8d\\u0234\\3\\2\\2\\2\\u0c8e\\u0c8f\\5\\u0453\")\n buf.write(\"\\u022a\\2\\u0c8f\\u0c90\\5\\u0435\\u021b\\2\\u0c90\\u0c91\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0c91\\u0c92\\5\\u0459\\u022d\\2\\u0c92\\u0c93\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0c93\\u0c94\\5\\u044f\\u0228\\2\\u0c94\\u0c95\\5\\u0441\")\n buf.write(\"\\u0221\\2\\u0c95\\u0236\\3\\2\\2\\2\\u0c96\\u0c97\\5\\u0453\\u022a\")\n buf.write(\"\\2\\u0c97\\u0c98\\5\\u0435\\u021b\\2\\u0c98\\u0c99\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u0c99\\u0c9a\\5\\u0443\\u0222\\2\\u0c9a\\u0238\\3\\2\\2\\2\\u0c9b\")\n buf.write(\"\\u0c9c\\7\\'\\2\\2\\u0c9c\\u0c9d\\5\\u0457\\u022c\\2\\u0c9d\\u0c9e\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0c9e\\u0c9f\\5\\u0461\\u0231\\2\\u0c9f\\u0ca0\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0ca0\\u0ca1\\5\\u0465\\u0233\\2\\u0ca1\\u0ca2\")\n buf.write(\"\\5\\u0453\\u022a\\2\\u0ca2\\u0ca3\\5\\u043d\\u021f\\2\\u0ca3\\u023a\")\n buf.write(\"\\3\\2\\2\\2\\u0ca4\\u0ca5\\7\\'\\2\\2\\u0ca5\\u0ca6\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u0ca6\\u0ca7\\5\\u0465\\u0233\\2\\u0ca7\\u0ca8\\5\\u0453\\u022a\")\n buf.write(\"\\2\\u0ca8\\u0ca9\\5\\u043d\\u021f\\2\\u0ca9\\u023c\\3\\2\\2\\2\\u0caa\")\n buf.write(\"\\u0cab\\5\\u0453\\u022a\\2\\u0cab\\u0cac\\5\\u0445\\u0223\\2\\u0cac\")\n buf.write(\"\\u0cad\\5\\u0453\\u022a\\2\\u0cad\\u0cae\\5\\u043d\\u021f\\2\\u0cae\")\n buf.write(\"\\u0caf\\5\\u044b\\u0226\\2\\u0caf\\u0cb0\\5\\u0445\\u0223\\2\\u0cb0\")\n buf.write(\"\\u0cb1\\5\\u044f\\u0228\\2\\u0cb1\\u0cb2\\5\\u043d\\u021f\\2\\u0cb2\")\n buf.write(\"\\u0cb3\\5\\u043b\\u021e\\2\\u0cb3\\u023e\\3\\2\\2\\2\\u0cb4\\u0cb5\")\n buf.write(\"\\5\\u0453\\u022a\\2\\u0cb5\\u0cb6\\5\\u0445\\u0223\\2\\u0cb6\\u0cb7\")\n buf.write(\"\\5\\u045f\\u0230\\2\\u0cb7\\u0cb8\\5\\u0451\\u0229\\2\\u0cb8\\u0cb9\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0cb9\\u0240\\3\\2\\2\\2\\u0cba\\u0cbb\\5\\u0453\")\n buf.write(\"\\u022a\\2\\u0cbb\\u0cbc\\5\\u044b\\u0226\\2\\u0cbc\\u0cbd\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0cbd\\u0cbe\\5\\u044f\\u0228\\2\\u0cbe\\u0242\\3\\2\\2\")\n buf.write(\"\\2\\u0cbf\\u0cc0\\5\\u0453\\u022a\\2\\u0cc0\\u0cc1\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u0cc1\\u0cc2\\5\\u0459\\u022d\\2\\u0cc2\\u0cc3\\7a\\2\\2\\u0cc3\")\n buf.write(\"\\u0cc4\\5\\u0445\\u0223\\2\\u0cc4\\u0cc5\\5\\u044f\\u0228\\2\\u0cc5\")\n buf.write(\"\\u0cc6\\5\\u045b\\u022e\\2\\u0cc6\\u0cc7\\5\\u043d\\u021f\\2\\u0cc7\")\n buf.write(\"\\u0cc8\\5\\u0441\\u0221\\2\\u0cc8\\u0cc9\\5\\u043d\\u021f\\2\\u0cc9\")\n buf.write(\"\\u0cca\\5\\u0457\\u022c\\2\\u0cca\\u0244\\3\\2\\2\\2\\u0ccb\\u0ccc\")\n buf.write(\"\\5\\u0453\\u022a\\2\\u0ccc\\u0ccd\\5\\u0451\\u0229\\2\\u0ccd\\u0cce\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u0cce\\u0ccf\\5\\u0445\\u0223\\2\\u0ccf\\u0cd0\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0cd0\\u0cd1\\5\\u0445\\u0223\\2\\u0cd1\\u0cd2\")\n buf.write(\"\\5\\u045f\\u0230\\2\\u0cd2\\u0cd3\\5\\u043d\\u021f\\2\\u0cd3\\u0246\")\n buf.write(\"\\3\\2\\2\\2\\u0cd4\\u0cd5\\5\\u0453\\u022a\\2\\u0cd5\\u0cd6\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u0cd6\\u0cd7\\5\\u0459\\u022d\\2\\u0cd7\\u0cd8\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0cd8\\u0cd9\\5\\u045b\\u022e\\2\\u0cd9\\u0cda\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0cda\\u0cdb\\5\\u045f\\u0230\\2\\u0cdb\\u0cdc\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0cdc\\u0cdd\\5\\u044f\\u0228\\2\\u0cdd\\u0248\\3\\2\\2\")\n buf.write(\"\\2\\u0cde\\u0cdf\\5\\u0453\\u022a\\2\\u0cdf\\u0ce0\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u0ce0\\u0ce1\\5\\u0435\\u021b\\2\\u0ce1\\u0ce2\\5\\u0441\\u0221\")\n buf.write(\"\\2\\u0ce2\\u0ce3\\5\\u044d\\u0227\\2\\u0ce3\\u0ce4\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u0ce4\\u024a\\3\\2\\2\\2\\u0ce5\\u0ce6\\5\\u0453\\u022a\\2\\u0ce6\")\n buf.write(\"\\u0ce7\\5\\u0457\\u022c\\2\\u0ce7\\u0ce8\\5\\u043d\\u021f\\2\\u0ce8\")\n buf.write(\"\\u0ce9\\5\\u0439\\u021d\\2\\u0ce9\\u0cea\\5\\u043d\\u021f\\2\\u0cea\")\n buf.write(\"\\u0ceb\\5\\u043b\\u021e\\2\\u0ceb\\u0cec\\5\\u0445\\u0223\\2\\u0cec\")\n buf.write(\"\\u0ced\\5\\u044f\\u0228\\2\\u0ced\\u0cee\\5\\u0441\\u0221\\2\\u0cee\")\n buf.write(\"\\u024c\\3\\2\\2\\2\\u0cef\\u0cf0\\5\\u0453\\u022a\\2\\u0cf0\\u0cf1\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0cf1\\u0cf2\\5\\u043d\\u021f\\2\\u0cf2\\u0cf3\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u0cf3\\u0cf4\\5\\u0445\\u0223\\2\\u0cf4\\u0cf5\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u0cf5\\u0cf6\\5\\u0445\\u0223\\2\\u0cf6\\u0cf7\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0cf7\\u0cf8\\5\\u044f\\u0228\\2\\u0cf8\\u024e\")\n buf.write(\"\\3\\2\\2\\2\\u0cf9\\u0cfa\\5\\u0453\\u022a\\2\\u0cfa\\u0cfb\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u0cfb\\u0cfc\\5\\u043d\\u021f\\2\\u0cfc\\u0cfd\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0cfd\\u0cfe\\5\\u043d\\u021f\\2\\u0cfe\\u0cff\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0cff\\u0d00\\5\\u045b\\u022e\\2\\u0d00\\u0250\\3\\2\\2\")\n buf.write(\"\\2\\u0d01\\u0d02\\5\\u0453\\u022a\\2\\u0d02\\u0d03\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u0d03\\u0d04\\5\\u0445\\u0223\\2\\u0d04\\u0d05\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0d05\\u0d06\\5\\u0457\\u022c\\2\\u0d06\\u0252\\3\\2\\2\\2\\u0d07\")\n buf.write(\"\\u0d08\\5\\u0453\\u022a\\2\\u0d08\\u0d09\\5\\u0457\\u022c\\2\\u0d09\")\n buf.write(\"\\u0d0a\\5\\u0451\\u0229\\2\\u0d0a\\u0d0b\\5\\u0439\\u021d\\2\\u0d0b\")\n buf.write(\"\\u0d0c\\5\\u043d\\u021f\\2\\u0d0c\\u0d0d\\5\\u043b\\u021e\\2\\u0d0d\")\n buf.write(\"\\u0d0e\\5\\u045d\\u022f\\2\\u0d0e\\u0d0f\\5\\u0457\\u022c\\2\\u0d0f\")\n buf.write(\"\\u0d10\\5\\u043d\\u021f\\2\\u0d10\\u0254\\3\\2\\2\\2\\u0d11\\u0d12\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0d12\\u0d13\\5\\u0435\\u021b\\2\\u0d13\\u0d14\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0d14\\u0d15\\5\\u0459\\u022d\\2\\u0d15\\u0d16\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0d16\\u0256\\3\\2\\2\\2\\u0d17\\u0d18\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u0d18\\u0d19\\5\\u0435\\u021b\\2\\u0d19\\u0d1a\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0d1a\\u0d1b\\5\\u0441\\u0221\\2\\u0d1b\\u0d1c\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0d1c\\u0258\\3\\2\\2\\2\\u0d1d\\u0d1e\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u0d1e\\u0d1f\\5\\u0435\\u021b\\2\\u0d1f\\u0d20\\5\\u0461\\u0231\")\n buf.write(\"\\2\\u0d20\\u025a\\3\\2\\2\\2\\u0d21\\u0d22\\5\\u0457\\u022c\\2\\u0d22\")\n buf.write(\"\\u0d23\\5\\u043d\\u021f\\2\\u0d23\\u0d24\\5\\u0435\\u021b\\2\\u0d24\")\n buf.write(\"\\u0d25\\5\\u043b\\u021e\\2\\u0d25\\u025c\\3\\2\\2\\2\\u0d26\\u0d27\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0d27\\u0d28\\5\\u043d\\u021f\\2\\u0d28\\u0d29\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0d29\\u0d2a\\5\\u044b\\u0226\\2\\u0d2a\\u025e\")\n buf.write(\"\\3\\2\\2\\2\\u0d2b\\u0d2c\\5\\u0457\\u022c\\2\\u0d2c\\u0d2d\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0d2d\\u0d2e\\5\\u0439\\u021d\\2\\u0d2e\\u0d2f\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u0d2f\\u0d30\\5\\u0457\\u022c\\2\\u0d30\\u0d31\\5\\u043b\")\n buf.write(\"\\u021e\\2\\u0d31\\u0260\\3\\2\\2\\2\\u0d32\\u0d33\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u0d33\\u0d34\\5\\u043d\\u021f\\2\\u0d34\\u0d35\\5\\u043f\\u0220\")\n buf.write(\"\\2\\u0d35\\u0262\\3\\2\\2\\2\\u0d36\\u0d37\\5\\u0457\\u022c\\2\\u0d37\")\n buf.write(\"\\u0d38\\5\\u043d\\u021f\\2\\u0d38\\u0d39\\5\\u043f\\u0220\\2\\u0d39\")\n buf.write(\"\\u0d3a\\5\\u043d\\u021f\\2\\u0d3a\\u0d3b\\5\\u0457\\u022c\\2\\u0d3b\")\n buf.write(\"\\u0d3c\\5\\u043d\\u021f\\2\\u0d3c\\u0d3d\\5\\u044f\\u0228\\2\\u0d3d\")\n buf.write(\"\\u0d3e\\5\\u0439\\u021d\\2\\u0d3e\\u0d3f\\5\\u043d\\u021f\\2\\u0d3f\")\n buf.write(\"\\u0264\\3\\2\\2\\2\\u0d40\\u0d41\\5\\u0457\\u022c\\2\\u0d41\\u0d42\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0d42\\u0d43\\5\\u043f\\u0220\\2\\u0d43\\u0d44\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0d44\\u0d45\\5\\u0457\\u022c\\2\\u0d45\\u0d46\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0d46\\u0d47\\5\\u044f\\u0228\\2\\u0d47\\u0d48\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u0d48\\u0d49\\5\\u0445\\u0223\\2\\u0d49\\u0d4a\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u0d4a\\u0d4b\\5\\u0441\\u0221\\2\\u0d4b\\u0266\")\n buf.write(\"\\3\\2\\2\\2\\u0d4c\\u0d4d\\5\\u0457\\u022c\\2\\u0d4d\\u0d4e\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0d4e\\u0d4f\\5\\u0447\\u0224\\2\\u0d4f\\u0d50\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0d50\\u0d51\\5\\u0439\\u021d\\2\\u0d51\\u0d52\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0d52\\u0268\\3\\2\\2\\2\\u0d53\\u0d54\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u0d54\\u0d55\\5\\u043d\\u021f\\2\\u0d55\\u0d56\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u0d56\\u0d57\\5\\u0445\\u0223\\2\\u0d57\\u0d58\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0d58\\u0d59\\5\\u0459\\u022d\\2\\u0d59\\u0d5a\\7a\\2\\2\\u0d5a\")\n buf.write(\"\\u0d5b\\5\\u0451\\u0229\\2\\u0d5b\\u0d5c\\5\\u044f\\u0228\\2\\u0d5c\")\n buf.write(\"\\u026a\\3\\2\\2\\2\\u0d5d\\u0d5e\\5\\u0457\\u022c\\2\\u0d5e\\u0d5f\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0d5f\\u0d60\\5\\u044f\\u0228\\2\\u0d60\\u0d61\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0d61\\u0d62\\5\\u044d\\u0227\\2\\u0d62\\u0d63\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0d63\\u026c\\3\\2\\2\\2\\u0d64\\u0d65\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u0d65\\u0d66\\5\\u043d\\u021f\\2\\u0d66\\u0d67\\5\\u0453\")\n buf.write(\"\\u022a\\2\\u0d67\\u0d68\\5\\u044b\\u0226\\2\\u0d68\\u0d69\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0d69\\u0d6a\\5\\u0439\\u021d\\2\\u0d6a\\u0d6b\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0d6b\\u026e\\3\\2\\2\\2\\u0d6c\\u0d6d\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u0d6d\\u0d6e\\5\\u043d\\u021f\\2\\u0d6e\\u0d6f\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0d6f\\u0d70\\5\\u0453\\u022a\\2\\u0d70\\u0d71\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0d71\\u0d72\\5\\u0439\\u021d\\2\\u0d72\\u0d73\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u0d73\\u0270\\3\\2\\2\\2\\u0d74\\u0d75\\5\\u0457\\u022c\\2\\u0d75\")\n buf.write(\"\\u0d76\\5\\u043d\\u021f\\2\\u0d76\\u0d77\\5\\u0459\\u022d\\2\\u0d77\")\n buf.write(\"\\u0d78\\5\\u045b\\u022e\\2\\u0d78\\u0d79\\5\\u0457\\u022c\\2\\u0d79\")\n buf.write(\"\\u0d7a\\5\\u0445\\u0223\\2\\u0d7a\\u0d7b\\5\\u0439\\u021d\\2\\u0d7b\")\n buf.write(\"\\u0d7c\\5\\u045b\\u022e\\2\\u0d7c\\u0d7d\\7a\\2\\2\\u0d7d\\u0d7e\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0d7e\\u0d7f\\5\\u043d\\u021f\\2\\u0d7f\\u0d80\")\n buf.write(\"\\5\\u043f\\u0220\\2\\u0d80\\u0d81\\5\\u043d\\u021f\\2\\u0d81\\u0d82\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0d82\\u0d83\\5\\u043d\\u021f\\2\\u0d83\\u0d84\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u0d84\\u0d85\\5\\u0439\\u021d\\2\\u0d85\\u0d86\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0d86\\u0d87\\5\\u0459\\u022d\\2\\u0d87\\u0272\")\n buf.write(\"\\3\\2\\2\\2\\u0d88\\u0d89\\5\\u0457\\u022c\\2\\u0d89\\u0d8a\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0d8a\\u0d8b\\5\\u0459\\u022d\\2\\u0d8b\\u0d8c\\5\\u045d\")\n buf.write(\"\\u022f\\2\\u0d8c\\u0d8d\\5\\u044b\\u0226\\2\\u0d8d\\u0d8e\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0d8e\\u0274\\3\\2\\2\\2\\u0d8f\\u0d90\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u0d90\\u0d91\\5\\u043d\\u021f\\2\\u0d91\\u0d92\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0d92\\u0d93\\5\\u045d\\u022f\\2\\u0d93\\u0d94\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u0d94\\u0d95\\5\\u045b\\u022e\\2\\u0d95\\u0d96\\7a\\2\\2\\u0d96\")\n buf.write(\"\\u0d97\\5\\u0439\\u021d\\2\\u0d97\\u0d98\\5\\u0435\\u021b\\2\\u0d98\")\n buf.write(\"\\u0d99\\5\\u0439\\u021d\\2\\u0d99\\u0d9a\\5\\u0443\\u0222\\2\\u0d9a\")\n buf.write(\"\\u0d9b\\5\\u043d\\u021f\\2\\u0d9b\\u0276\\3\\2\\2\\2\\u0d9c\\u0d9d\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0d9d\\u0d9e\\5\\u043d\\u021f\\2\\u0d9e\\u0d9f\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0d9f\\u0da0\\5\\u045d\\u022f\\2\\u0da0\\u0da1\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0da1\\u0da2\\5\\u044f\\u0228\\2\\u0da2\\u0278\")\n buf.write(\"\\3\\2\\2\\2\\u0da3\\u0da4\\5\\u0457\\u022c\\2\\u0da4\\u0da5\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0da5\\u0da6\\5\\u045b\\u022e\\2\\u0da6\\u0da7\\5\\u045d\")\n buf.write(\"\\u022f\\2\\u0da7\\u0da8\\5\\u0457\\u022c\\2\\u0da8\\u0da9\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0da9\\u0daa\\5\\u0445\\u0223\\2\\u0daa\\u0dab\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0dab\\u0dac\\5\\u0441\\u0221\\2\\u0dac\\u027a\\3\\2\\2\")\n buf.write(\"\\2\\u0dad\\u0dae\\5\\u0457\\u022c\\2\\u0dae\\u0daf\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0daf\\u0db0\\5\\u045d\\u022f\\2\\u0db0\\u0db1\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0db1\\u0db2\\5\\u043d\\u021f\\2\\u0db2\\u027c\\3\\2\\2\\2\\u0db3\")\n buf.write(\"\\u0db4\\5\\u0457\\u022c\\2\\u0db4\\u0db5\\5\\u043d\\u021f\\2\\u0db5\")\n buf.write(\"\\u0db6\\5\\u045f\\u0230\\2\\u0db6\\u0db7\\5\\u043d\\u021f\\2\\u0db7\")\n buf.write(\"\\u0db8\\5\\u0457\\u022c\\2\\u0db8\\u0db9\\5\\u0459\\u022d\\2\\u0db9\")\n buf.write(\"\\u0dba\\5\\u043d\\u021f\\2\\u0dba\\u027e\\3\\2\\2\\2\\u0dbb\\u0dbc\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0dbc\\u0dbd\\5\\u043d\\u021f\\2\\u0dbd\\u0dbe\")\n buf.write(\"\\5\\u045f\\u0230\\2\\u0dbe\\u0dbf\\5\\u0451\\u0229\\2\\u0dbf\\u0dc0\")\n buf.write(\"\\5\\u0449\\u0225\\2\\u0dc0\\u0dc1\\5\\u043d\\u021f\\2\\u0dc1\\u0280\")\n buf.write(\"\\3\\2\\2\\2\\u0dc2\\u0dc3\\5\\u0457\\u022c\\2\\u0dc3\\u0dc4\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0dc4\\u0dc5\\5\\u0441\\u0221\\2\\u0dc5\\u0dc6\\5\\u0443\")\n buf.write(\"\\u0222\\2\\u0dc6\\u0dc7\\5\\u045b\\u022e\\2\\u0dc7\\u0282\\3\\2\\2\")\n buf.write(\"\\2\\u0dc8\\u0dc9\\5\\u0457\\u022c\\2\\u0dc9\\u0dca\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0dca\\u0dcb\\5\\u044b\\u0226\\2\\u0dcb\\u0dcc\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u0dcc\\u0dcd\\5\\u0437\\u021c\\2\\u0dcd\\u0dce\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u0dce\\u0dcf\\5\\u0439\\u021d\\2\\u0dcf\\u0dd0\\5\\u0449\\u0225\")\n buf.write(\"\\2\\u0dd0\\u0284\\3\\2\\2\\2\\u0dd1\\u0dd2\\5\\u0457\\u022c\\2\\u0dd2\")\n buf.write(\"\\u0dd3\\5\\u0451\\u0229\\2\\u0dd3\\u0dd4\\5\\u044b\\u0226\\2\\u0dd4\")\n buf.write(\"\\u0dd5\\5\\u044b\\u0226\\2\\u0dd5\\u0dd6\\5\\u045d\\u022f\\2\\u0dd6\")\n buf.write(\"\\u0dd7\\5\\u0453\\u022a\\2\\u0dd7\\u0286\\3\\2\\2\\2\\u0dd8\\u0dd9\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0dd9\\u0dda\\5\\u0451\\u0229\\2\\u0dda\\u0ddb\")\n buf.write(\"\\5\\u0461\\u0231\\2\\u0ddb\\u0288\\3\\2\\2\\2\\u0ddc\\u0ddd\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u0ddd\\u0dde\\5\\u0451\\u0229\\2\\u0dde\\u0ddf\\5\\u0461\")\n buf.write(\"\\u0231\\2\\u0ddf\\u0de0\\5\\u0445\\u0223\\2\\u0de0\\u0de1\\5\\u043b\")\n buf.write(\"\\u021e\\2\\u0de1\\u028a\\3\\2\\2\\2\\u0de2\\u0de3\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u0de3\\u0de4\\5\\u0451\\u0229\\2\\u0de4\\u0de5\\5\\u0461\\u0231\")\n buf.write(\"\\2\\u0de5\\u0de6\\5\\u0459\\u022d\\2\\u0de6\\u028c\\3\\2\\2\\2\\u0de7\")\n buf.write(\"\\u0de8\\5\\u0457\\u022c\\2\\u0de8\\u0de9\\5\\u045d\\u022f\\2\\u0de9\")\n buf.write(\"\\u0dea\\5\\u044b\\u0226\\2\\u0dea\\u0deb\\5\\u043d\\u021f\\2\\u0deb\")\n buf.write(\"\\u0dec\\5\\u0459\\u022d\\2\\u0dec\\u028e\\3\\2\\2\\2\\u0ded\\u0dee\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u0dee\\u0def\\5\\u0435\\u021b\\2\\u0def\\u0df0\")\n buf.write(\"\\5\\u044d\\u0227\\2\\u0df0\\u0df1\\5\\u0453\\u022a\\2\\u0df1\\u0df2\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u0df2\\u0df3\\5\\u043d\\u021f\\2\\u0df3\\u0290\")\n buf.write(\"\\3\\2\\2\\2\\u0df4\\u0df5\\5\\u0459\\u022d\\2\\u0df5\\u0df6\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0df6\\u0df7\\5\\u045f\\u0230\\2\\u0df7\\u0df8\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0df8\\u0292\\3\\2\\2\\2\\u0df9\\u0dfa\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0dfa\\u0dfb\\5\\u0435\\u021b\\2\\u0dfb\\u0dfc\\5\\u045f\\u0230\")\n buf.write(\"\\2\\u0dfc\\u0dfd\\5\\u043d\\u021f\\2\\u0dfd\\u0dfe\\5\\u0453\\u022a\")\n buf.write(\"\\2\\u0dfe\\u0dff\\5\\u0451\\u0229\\2\\u0dff\\u0e00\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u0e00\\u0e01\\5\\u044f\\u0228\\2\\u0e01\\u0e02\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u0e02\\u0294\\3\\2\\2\\2\\u0e03\\u0e04\\5\\u0459\\u022d\\2\\u0e04\")\n buf.write(\"\\u0e05\\5\\u0439\\u021d\\2\\u0e05\\u0e06\\5\\u0443\\u0222\\2\\u0e06\")\n buf.write(\"\\u0e07\\5\\u043d\\u021f\\2\\u0e07\\u0e08\\5\\u044d\\u0227\\2\\u0e08\")\n buf.write(\"\\u0e09\\5\\u0435\\u021b\\2\\u0e09\\u0296\\3\\2\\2\\2\\u0e0a\\u0e0b\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u0e0b\\u0e0c\\5\\u0439\\u021d\\2\\u0e0c\\u0e0d\")\n buf.write(\"\\5\\u0443\\u0222\\2\\u0e0d\\u0e0e\\5\\u043d\\u021f\\2\\u0e0e\\u0e0f\")\n buf.write(\"\\5\\u044d\\u0227\\2\\u0e0f\\u0e10\\5\\u0435\\u021b\\2\\u0e10\\u0e11\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u0e11\\u0e12\\5\\u0443\\u0222\\2\\u0e12\\u0e13\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0e13\\u0e14\\5\\u0439\\u021d\\2\\u0e14\\u0e15\")\n buf.write(\"\\5\\u0449\\u0225\\2\\u0e15\\u0298\\3\\2\\2\\2\\u0e16\\u0e17\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0e17\\u0e18\\5\\u0439\\u021d\\2\\u0e18\\u0e19\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0e19\\u029a\\3\\2\\2\\2\\u0e1a\\u0e1b\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0e1b\\u0e1c\\5\\u043d\\u021f\\2\\u0e1c\\u0e1d\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u0e1d\\u0e1e\\5\\u0457\\u022c\\2\\u0e1e\\u0e1f\\5\\u0439\\u021d\")\n buf.write(\"\\2\\u0e1f\\u0e20\\5\\u0443\\u0222\\2\\u0e20\\u029c\\3\\2\\2\\2\\u0e21\")\n buf.write(\"\\u0e22\\5\\u0459\\u022d\\2\\u0e22\\u0e23\\5\\u043d\\u021f\\2\\u0e23\")\n buf.write(\"\\u0e24\\5\\u0439\\u021d\\2\\u0e24\\u0e25\\5\\u0451\\u0229\\2\\u0e25\")\n buf.write(\"\\u0e26\\5\\u044f\\u0228\\2\\u0e26\\u0e27\\5\\u043b\\u021e\\2\\u0e27\")\n buf.write(\"\\u029e\\3\\2\\2\\2\\u0e28\\u0e29\\5\\u0459\\u022d\\2\\u0e29\\u0e2a\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0e2a\\u0e2b\\5\\u043d\\u021f\\2\\u0e2b\\u0e2c\")\n buf.write(\"\\5\\u043b\\u021e\\2\\u0e2c\\u02a0\\3\\2\\2\\2\\u0e2d\\u0e2e\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0e2e\\u0e2f\\5\\u043d\\u021f\\2\\u0e2f\\u0e30\\5\\u0441\")\n buf.write(\"\\u0221\\2\\u0e30\\u0e31\\5\\u044d\\u0227\\2\\u0e31\\u0e32\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0e32\\u0e33\\5\\u044f\\u0228\\2\\u0e33\\u0e34\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0e34\\u02a2\\3\\2\\2\\2\\u0e35\\u0e36\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0e36\\u0e37\\5\\u043d\\u021f\\2\\u0e37\\u0e38\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u0e38\\u0e39\\5\\u043d\\u021f\\2\\u0e39\\u0e3a\\5\\u0439\\u021d\")\n buf.write(\"\\2\\u0e3a\\u0e3b\\5\\u045b\\u022e\\2\\u0e3b\\u02a4\\3\\2\\2\\2\\u0e3c\")\n buf.write(\"\\u0e3d\\5\\u0459\\u022d\\2\\u0e3d\\u0e3e\\5\\u043d\\u021f\\2\\u0e3e\")\n buf.write(\"\\u0e3f\\5\\u044b\\u0226\\2\\u0e3f\\u0e40\\5\\u043f\\u0220\\2\\u0e40\")\n buf.write(\"\\u02a6\\3\\2\\2\\2\\u0e41\\u0e42\\5\\u0459\\u022d\\2\\u0e42\\u0e43\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0e43\\u0e44\\5\\u0455\\u022b\\2\\u0e44\\u0e45\")\n buf.write(\"\\5\\u045d\\u022f\\2\\u0e45\\u0e46\\5\\u043d\\u021f\\2\\u0e46\\u0e47\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u0e47\\u0e48\\5\\u0439\\u021d\\2\\u0e48\\u0e49\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0e49\\u02a8\\3\\2\\2\\2\\u0e4a\\u0e4b\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0e4b\\u0e4c\\5\\u043d\\u021f\\2\\u0e4c\\u0e4d\\5\\u0455\")\n buf.write(\"\\u022b\\2\\u0e4d\\u0e4e\\5\\u045d\\u022f\\2\\u0e4e\\u0e4f\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0e4f\\u0e50\\5\\u044f\\u0228\\2\\u0e50\\u0e51\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0e51\\u0e52\\5\\u0445\\u0223\\2\\u0e52\\u0e53\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0e53\\u0e54\\5\\u044b\\u0226\\2\\u0e54\\u02aa\\3\\2\\2\")\n buf.write(\"\\2\\u0e55\\u0e56\\5\\u0459\\u022d\\2\\u0e56\\u0e57\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0e57\\u0e58\\5\\u0457\\u022c\\2\\u0e58\\u0e59\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u0e59\\u0e5a\\5\\u0435\\u021b\\2\\u0e5a\\u0e5b\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u0e5b\\u0e5c\\5\\u0445\\u0223\\2\\u0e5c\\u0e5d\\5\\u0467\\u0234\")\n buf.write(\"\\2\\u0e5d\\u0e5e\\5\\u0435\\u021b\\2\\u0e5e\\u0e5f\\5\\u0437\\u021c\")\n buf.write(\"\\2\\u0e5f\\u0e60\\5\\u044b\\u0226\\2\\u0e60\\u0e61\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0e61\\u02ac\\3\\2\\2\\2\\u0e62\\u0e63\\5\\u0459\\u022d\\2\\u0e63\")\n buf.write(\"\\u0e64\\5\\u043d\\u021f\\2\\u0e64\\u0e65\\5\\u0457\\u022c\\2\\u0e65\")\n buf.write(\"\\u0e66\\5\\u0445\\u0223\\2\\u0e66\\u0e67\\5\\u0435\\u021b\\2\\u0e67\")\n buf.write(\"\\u0e68\\5\\u044b\\u0226\\2\\u0e68\\u0e69\\5\\u044b\\u0226\\2\\u0e69\")\n buf.write(\"\\u0e6a\\5\\u0465\\u0233\\2\\u0e6a\\u0e6b\\7a\\2\\2\\u0e6b\\u0e6c\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u0e6c\\u0e6d\\5\\u043d\\u021f\\2\\u0e6d\\u0e6e\")\n buf.write(\"\\5\\u045d\\u022f\\2\\u0e6e\\u0e6f\\5\\u0459\\u022d\\2\\u0e6f\\u0e70\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0e70\\u0e71\\5\\u0437\\u021c\\2\\u0e71\\u0e72\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u0e72\\u0e73\\5\\u043d\\u021f\\2\\u0e73\\u02ae\")\n buf.write(\"\\3\\2\\2\\2\\u0e74\\u0e75\\5\\u0459\\u022d\\2\\u0e75\\u0e76\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0e76\\u0e77\\5\\u0457\\u022c\\2\\u0e77\\u0e78\\5\\u045f\")\n buf.write(\"\\u0230\\2\\u0e78\\u0e79\\5\\u043d\\u021f\\2\\u0e79\\u0e7a\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u0e7a\\u0e7b\\5\\u043d\\u021f\\2\\u0e7b\\u0e7c\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u0e7c\\u0e7d\\5\\u0457\\u022c\\2\\u0e7d\\u0e7e\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u0e7e\\u0e7f\\5\\u0457\\u022c\\2\\u0e7f\\u02b0\\3\\2\\2\")\n buf.write(\"\\2\\u0e80\\u0e81\\5\\u0459\\u022d\\2\\u0e81\\u0e82\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0e82\\u0e83\\5\\u0459\\u022d\\2\\u0e83\\u0e84\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0e84\\u0e85\\5\\u0445\\u0223\\2\\u0e85\\u0e86\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0e86\\u0e87\\5\\u044f\\u0228\\2\\u0e87\\u0e88\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u0e88\\u0e89\\5\\u0445\\u0223\\2\\u0e89\\u0e8a\\5\\u044d\\u0227\")\n buf.write(\"\\2\\u0e8a\\u0e8b\\5\\u043d\\u021f\\2\\u0e8b\\u0e8c\\5\\u0467\\u0234\")\n buf.write(\"\\2\\u0e8c\\u0e8d\\5\\u0451\\u0229\\2\\u0e8d\\u0e8e\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u0e8e\\u0e8f\\5\\u043d\\u021f\\2\\u0e8f\\u02b2\\3\\2\\2\\2\\u0e90\")\n buf.write(\"\\u0e91\\5\\u0459\\u022d\\2\\u0e91\\u0e92\\5\\u043d\\u021f\\2\\u0e92\")\n buf.write(\"\\u0e93\\5\\u045b\\u022e\\2\\u0e93\\u02b4\\3\\2\\2\\2\\u0e94\\u0e95\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u0e95\\u0e96\\5\\u043d\\u021f\\2\\u0e96\\u0e97\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0e97\\u0e98\\5\\u0459\\u022d\\2\\u0e98\\u02b6\")\n buf.write(\"\\3\\2\\2\\2\\u0e99\\u0e9a\\5\\u0459\\u022d\\2\\u0e9a\\u0e9b\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0e9b\\u0e9c\\5\\u045b\\u022e\\2\\u0e9c\\u0e9d\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0e9d\\u0e9e\\5\\u0445\\u0223\\2\\u0e9e\\u0e9f\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0e9f\\u0ea0\\5\\u0441\\u0221\\2\\u0ea0\\u0ea1\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0ea1\\u02b8\\3\\2\\2\\2\\u0ea2\\u0ea3\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0ea3\\u0ea4\\5\\u0443\\u0222\\2\\u0ea4\\u0ea5\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u0ea5\\u0ea6\\5\\u0457\\u022c\\2\\u0ea6\\u0ea7\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0ea7\\u02ba\\3\\2\\2\\2\\u0ea8\\u0ea9\\5\\u0459\\u022d\\2\\u0ea9\")\n buf.write(\"\\u0eaa\\5\\u0443\\u0222\\2\\u0eaa\\u0eab\\5\\u0451\\u0229\\2\\u0eab\")\n buf.write(\"\\u0eac\\5\\u0461\\u0231\\2\\u0eac\\u02bc\\3\\2\\2\\2\\u0ead\\u0eae\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u0eae\\u0eaf\\5\\u0443\\u0222\\2\\u0eaf\\u0eb0\")\n buf.write(\"\\5\\u045d\\u022f\\2\\u0eb0\\u0eb1\\5\\u045b\\u022e\\2\\u0eb1\\u0eb2\")\n buf.write(\"\\5\\u043b\\u021e\\2\\u0eb2\\u0eb3\\5\\u0451\\u0229\\2\\u0eb3\\u0eb4\")\n buf.write(\"\\5\\u0461\\u0231\\2\\u0eb4\\u0eb5\\5\\u044f\\u0228\\2\\u0eb5\\u02be\")\n buf.write(\"\\3\\2\\2\\2\\u0eb6\\u0eb7\\5\\u0459\\u022d\\2\\u0eb7\\u0eb8\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0eb8\\u0eb9\\5\\u0437\\u021c\\2\\u0eb9\\u0eba\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u0eba\\u0ebb\\5\\u0445\\u0223\\2\\u0ebb\\u0ebc\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u0ebc\\u0ebd\\5\\u0441\\u0221\\2\\u0ebd\\u0ebe\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0ebe\\u02c0\\3\\2\\2\\2\\u0ebf\\u0ec0\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0ec0\\u0ec1\\5\\u0445\\u0223\\2\\u0ec1\\u0ec2\\5\\u0441\\u0221\")\n buf.write(\"\\2\\u0ec2\\u0ec3\\5\\u044f\\u0228\\2\\u0ec3\\u0ec4\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u0ec4\\u0ec5\\5\\u0465\\u0233\\2\\u0ec5\\u0ec6\\5\\u0453\\u022a\")\n buf.write(\"\\2\\u0ec6\\u0ec7\\5\\u043d\\u021f\\2\\u0ec7\\u02c2\\3\\2\\2\\2\\u0ec8\")\n buf.write(\"\\u0ec9\\5\\u0459\\u022d\\2\\u0ec9\\u0eca\\5\\u0445\\u0223\\2\\u0eca\")\n buf.write(\"\\u0ecb\\5\\u044d\\u0227\\2\\u0ecb\\u0ecc\\5\\u0453\\u022a\\2\\u0ecc\")\n buf.write(\"\\u0ecd\\5\\u044b\\u0226\\2\\u0ecd\\u0ece\\5\\u043d\\u021f\\2\\u0ece\")\n buf.write(\"\\u0ecf\\7a\\2\\2\\u0ecf\\u0ed0\\5\\u0445\\u0223\\2\\u0ed0\\u0ed1\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u0ed1\\u0ed2\\5\\u045b\\u022e\\2\\u0ed2\\u0ed3\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0ed3\\u0ed4\\5\\u0441\\u0221\\2\\u0ed4\\u0ed5\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0ed5\\u0ed6\\5\\u0457\\u022c\\2\\u0ed6\\u02c4\")\n buf.write(\"\\3\\2\\2\\2\\u0ed7\\u0ed8\\5\\u0459\\u022d\\2\\u0ed8\\u0ed9\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0ed9\\u0eda\\5\\u044f\\u0228\\2\\u0eda\\u0edb\\5\\u0441\")\n buf.write(\"\\u0221\\2\\u0edb\\u0edc\\5\\u044b\\u0226\\2\\u0edc\\u0edd\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0edd\\u02c6\\3\\2\\2\\2\\u0ede\\u0edf\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0edf\\u0ee0\\5\\u0445\\u0223\\2\\u0ee0\\u0ee1\\5\\u0467\\u0234\")\n buf.write(\"\\2\\u0ee1\\u0ee2\\5\\u043d\\u021f\\2\\u0ee2\\u02c8\\3\\2\\2\\2\\u0ee3\")\n buf.write(\"\\u0ee4\\5\\u0459\\u022d\\2\\u0ee4\\u0ee5\\5\\u0449\\u0225\\2\\u0ee5\")\n buf.write(\"\\u0ee6\\5\\u0445\\u0223\\2\\u0ee6\\u0ee7\\5\\u0453\\u022a\\2\\u0ee7\")\n buf.write(\"\\u02ca\\3\\2\\2\\2\\u0ee8\\u0ee9\\5\\u0459\\u022d\\2\\u0ee9\\u0eea\")\n buf.write(\"\\5\\u044d\\u0227\\2\\u0eea\\u0eeb\\5\\u0435\\u021b\\2\\u0eeb\\u0eec\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u0eec\\u0eed\\5\\u044b\\u0226\\2\\u0eed\\u0eee\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0eee\\u0eef\\5\\u044f\\u0228\\2\\u0eef\\u0ef0\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0ef0\\u02cc\\3\\2\\2\\2\\u0ef1\\u0ef2\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0ef2\\u0ef3\\5\\u044f\\u0228\\2\\u0ef3\\u0ef4\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0ef4\\u0ef5\\5\\u0453\\u022a\\2\\u0ef5\\u0ef6\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0ef6\\u0ef7\\5\\u0443\\u0222\\2\\u0ef7\\u0ef8\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u0ef8\\u0ef9\\5\\u045b\\u022e\\2\\u0ef9\\u02ce\\3\\2\\2\")\n buf.write(\"\\2\\u0efa\\u0efb\\5\\u0459\\u022d\\2\\u0efb\\u0efc\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u0efc\\u0efd\\5\\u044d\\u0227\\2\\u0efd\\u0efe\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0efe\\u02d0\\3\\2\\2\\2\\u0eff\\u0f00\\5\\u0459\\u022d\\2\\u0f00\")\n buf.write(\"\\u0f01\\5\\u0453\\u022a\\2\\u0f01\\u0f02\\5\\u043d\\u021f\\2\\u0f02\")\n buf.write(\"\\u0f03\\5\\u0439\\u021d\\2\\u0f03\\u0f04\\5\\u0445\\u0223\\2\\u0f04\")\n buf.write(\"\\u0f05\\5\\u043f\\u0220\\2\\u0f05\\u0f06\\5\\u0445\\u0223\\2\\u0f06\")\n buf.write(\"\\u0f07\\5\\u0439\\u021d\\2\\u0f07\\u0f08\\5\\u0435\\u021b\\2\\u0f08\")\n buf.write(\"\\u0f09\\5\\u045b\\u022e\\2\\u0f09\\u0f0a\\5\\u0445\\u0223\\2\\u0f0a\")\n buf.write(\"\\u0f0b\\5\\u0451\\u0229\\2\\u0f0b\\u0f0c\\5\\u044f\\u0228\\2\\u0f0c\")\n buf.write(\"\\u02d2\\3\\2\\2\\2\\u0f0d\\u0f0e\\5\\u0459\\u022d\\2\\u0f0e\\u0f0f\")\n buf.write(\"\\5\\u0455\\u022b\\2\\u0f0f\\u0f10\\5\\u044b\\u0226\\2\\u0f10\\u0f11\")\n buf.write(\"\\5\\u043b\\u021e\\2\\u0f11\\u0f12\\5\\u0435\\u021b\\2\\u0f12\\u0f13\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0f13\\u0f14\\5\\u0435\\u021b\\2\\u0f14\\u02d4\")\n buf.write(\"\\3\\2\\2\\2\\u0f15\\u0f16\\5\\u0459\\u022d\\2\\u0f16\\u0f17\\5\\u0455\")\n buf.write(\"\\u022b\\2\\u0f17\\u0f18\\5\\u044b\\u0226\\2\\u0f18\\u0f19\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0f19\\u0f1a\\5\\u0457\\u022c\\2\\u0f1a\\u0f1b\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u0f1b\\u0f1c\\5\\u0451\\u0229\\2\\u0f1c\\u0f1d\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u0f1d\\u02d6\\3\\2\\2\\2\\u0f1e\\u0f1f\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0f1f\\u0f20\\5\\u045b\\u022e\\2\\u0f20\\u0f21\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u0f21\\u0f22\\5\\u044f\\u0228\\2\\u0f22\\u0f23\\5\\u043b\\u021e\")\n buf.write(\"\\2\\u0f23\\u0f24\\5\\u0435\\u021b\\2\\u0f24\\u0f25\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u0f25\\u0f26\\5\\u0451\\u0229\\2\\u0f26\\u0f27\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u0f27\\u0f28\\5\\u043d\\u021f\\2\\u0f28\\u02d8\\3\\2\\2\\2\\u0f29\")\n buf.write(\"\\u0f2a\\5\\u0459\\u022d\\2\\u0f2a\\u0f2b\\5\\u045b\\u022e\\2\\u0f2b\")\n buf.write(\"\\u0f2c\\5\\u0435\\u021b\\2\\u0f2c\\u0f2d\\5\\u0457\\u022c\\2\\u0f2d\")\n buf.write(\"\\u0f2e\\5\\u045b\\u022e\\2\\u0f2e\\u02da\\3\\2\\2\\2\\u0f2f\\u0f30\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u0f30\\u0f31\\5\\u045b\\u022e\\2\\u0f31\\u0f32\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0f32\\u0f33\\5\\u0457\\u022c\\2\\u0f33\\u0f34\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0f34\\u0f35\\5\\u045d\\u022f\\2\\u0f35\\u0f36\")\n buf.write(\"\\5\\u0453\\u022a\\2\\u0f36\\u02dc\\3\\2\\2\\2\\u0f37\\u0f38\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0f38\\u0f39\\5\\u045b\\u022e\\2\\u0f39\\u0f3a\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0f3a\\u0f3b\\5\\u045b\\u022e\\2\\u0f3b\\u0f3c\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0f3c\\u0f3d\\5\\u044d\\u0227\\2\\u0f3d\\u0f3e\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0f3e\\u0f3f\\5\\u044f\\u0228\\2\\u0f3f\\u0f40\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0f40\\u02de\\3\\2\\2\\2\\u0f41\\u0f42\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0f42\\u0f43\\5\\u045b\\u022e\\2\\u0f43\\u0f44\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u0f44\\u0f45\\5\\u045b\\u022e\\2\\u0f45\\u0f46\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0f46\\u0f47\\5\\u044d\\u0227\\2\\u0f47\\u0f48\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0f48\\u0f49\\5\\u044f\\u0228\\2\\u0f49\\u0f4a\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u0f4a\\u0f4b\\7a\\2\\2\\u0f4b\\u0f4c\\5\\u0445\\u0223\\2\\u0f4c\")\n buf.write(\"\\u0f4d\\5\\u043b\\u021e\\2\\u0f4d\\u02e0\\3\\2\\2\\2\\u0f4e\\u0f4f\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u0f4f\\u0f50\\5\\u045b\\u022e\\2\\u0f50\\u0f51\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0f51\\u0f52\\5\\u045b\\u022e\\2\\u0f52\\u0f53\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u0f53\\u0f54\\5\\u0439\\u021d\\2\\u0f54\\u02e2\")\n buf.write(\"\\3\\2\\2\\2\\u0f55\\u0f56\\5\\u0459\\u022d\\2\\u0f56\\u0f57\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0f57\\u0f58\\5\\u0435\\u021b\\2\\u0f58\\u0f59\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0f59\\u0f5a\\5\\u0445\\u0223\\2\\u0f5a\\u0f5b\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0f5b\\u0f5c\\5\\u045b\\u022e\\2\\u0f5c\\u0f5d\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0f5d\\u0f5e\\5\\u0439\\u021d\\2\\u0f5e\\u0f5f\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0f5f\\u02e4\\3\\2\\2\\2\\u0f60\\u0f61\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u0f61\\u0f62\\5\\u045b\\u022e\\2\\u0f62\\u0f63\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u0f63\\u0f64\\5\\u0445\\u0223\\2\\u0f64\\u0f65\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u0f65\\u0f66\\5\\u0441\\u0221\\2\\u0f66\\u02e6\\3\\2\\2\\2\\u0f67\")\n buf.write(\"\\u0f68\\5\\u0459\\u022d\\2\\u0f68\\u0f69\\5\\u045d\\u022f\\2\\u0f69\")\n buf.write(\"\\u0f6a\\5\\u0437\\u021c\\2\\u0f6a\\u0f6b\\5\\u044d\\u0227\\2\\u0f6b\")\n buf.write(\"\\u0f6c\\5\\u045d\\u022f\\2\\u0f6c\\u0f6d\\5\\u044b\\u0226\\2\\u0f6d\")\n buf.write(\"\\u0f6e\\5\\u045b\\u022e\\2\\u0f6e\\u0f6f\\5\\u0445\\u0223\\2\\u0f6f\")\n buf.write(\"\\u0f70\\5\\u0459\\u022d\\2\\u0f70\\u0f71\\5\\u043d\\u021f\\2\\u0f71\")\n buf.write(\"\\u0f72\\5\\u045b\\u022e\\2\\u0f72\\u02e8\\3\\2\\2\\2\\u0f73\\u0f74\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u0f74\\u0f75\\5\\u045d\\u022f\\2\\u0f75\\u0f76\")\n buf.write(\"\\5\\u0437\\u021c\\2\\u0f76\\u0f77\\5\\u0453\\u022a\\2\\u0f77\\u0f78\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0f78\\u0f79\\5\\u0457\\u022c\\2\\u0f79\\u0f7a\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0f7a\\u0f7b\\5\\u0445\\u0223\\2\\u0f7b\\u0f7c\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0f7c\\u0f7d\\5\\u0445\\u0223\\2\\u0f7d\\u0f7e\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u0f7e\\u0f7f\\5\\u044f\\u0228\\2\\u0f7f\\u02ea\")\n buf.write(\"\\3\\2\\2\\2\\u0f80\\u0f81\\5\\u0459\\u022d\\2\\u0f81\\u0f82\\5\\u045d\")\n buf.write(\"\\u022f\\2\\u0f82\\u0f83\\5\\u0437\\u021c\\2\\u0f83\\u0f84\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0f84\\u0f85\\5\\u045b\\u022e\\2\\u0f85\\u0f86\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0f86\\u0f87\\5\\u045b\\u022e\\2\\u0f87\\u0f88\\5\\u045d\")\n buf.write(\"\\u022f\\2\\u0f88\\u0f89\\5\\u045b\\u022e\\2\\u0f89\\u0f8a\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0f8a\\u0f8b\\5\\u0437\\u021c\\2\\u0f8b\\u0f8c\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u0f8c\\u0f8d\\5\\u043d\\u021f\\2\\u0f8d\\u02ec\\3\\2\\2\")\n buf.write(\"\\2\\u0f8e\\u0f8f\\5\\u0459\\u022d\\2\\u0f8f\\u0f90\\5\\u045d\\u022f\")\n buf.write(\"\\2\\u0f90\\u0f91\\5\\u0437\\u021c\\2\\u0f91\\u0f92\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u0f92\\u0f93\\5\\u0465\\u0233\\2\\u0f93\\u0f94\\5\\u0453\\u022a\")\n buf.write(\"\\2\\u0f94\\u0f95\\5\\u043d\\u021f\\2\\u0f95\\u02ee\\3\\2\\2\\2\\u0f96\")\n buf.write(\"\\u0f97\\5\\u0459\\u022d\\2\\u0f97\\u0f98\\5\\u045d\\u022f\\2\\u0f98\")\n buf.write(\"\\u0f99\\5\\u0439\\u021d\\2\\u0f99\\u0f9a\\5\\u0439\\u021d\\2\\u0f9a\")\n buf.write(\"\\u0f9b\\5\\u043d\\u021f\\2\\u0f9b\\u0f9c\\5\\u0459\\u022d\\2\\u0f9c\")\n buf.write(\"\\u0f9d\\5\\u0459\\u022d\\2\\u0f9d\\u02f0\\3\\2\\2\\2\\u0f9e\\u0f9f\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u0f9f\\u0fa0\\5\\u045d\\u022f\\2\\u0fa0\\u0fa1\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u0fa1\\u0fa2\\5\\u0453\\u022a\\2\\u0fa2\\u0fa3\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u0fa3\\u0fa4\\5\\u044f\\u0228\\2\\u0fa4\\u0fa5\")\n buf.write(\"\\5\\u043b\\u021e\\2\\u0fa5\\u02f2\\3\\2\\2\\2\\u0fa6\\u0fa7\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0fa7\\u0fa8\\5\\u0435\\u021b\\2\\u0fa8\\u0fa9\\5\\u0437\")\n buf.write(\"\\u021c\\2\\u0fa9\\u0faa\\5\\u044b\\u0226\\2\\u0faa\\u0fab\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0fab\\u02f4\\3\\2\\2\\2\\u0fac\\u0fad\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u0fad\\u0fae\\5\\u0443\\u0222\\2\\u0fae\\u0faf\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0faf\\u02f6\\3\\2\\2\\2\\u0fb0\\u0fb1\\5\\u045b\\u022e\\2\\u0fb1\")\n buf.write(\"\\u0fb2\\5\\u0443\\u0222\\2\\u0fb2\\u0fb3\\5\\u043d\\u021f\\2\\u0fb3\")\n buf.write(\"\\u0fb4\\5\\u044f\\u0228\\2\\u0fb4\\u02f8\\3\\2\\2\\2\\u0fb5\\u0fb6\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0fb6\\u0fb7\\5\\u0445\\u0223\\2\\u0fb7\\u0fb8\")\n buf.write(\"\\5\\u044d\\u0227\\2\\u0fb8\\u0fb9\\5\\u043d\\u021f\\2\\u0fb9\\u02fa\")\n buf.write(\"\\3\\2\\2\\2\\u0fba\\u0fbb\\5\\u045b\\u022e\\2\\u0fbb\\u0fbc\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u0fbc\\u0fbd\\5\\u044d\\u0227\\2\\u0fbd\\u0fbe\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u0fbe\\u0fbf\\5\\u0459\\u022d\\2\\u0fbf\\u0fc0\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0fc0\\u0fc1\\5\\u0435\\u021b\\2\\u0fc1\\u0fc2\\5\\u044d\")\n buf.write(\"\\u0227\\2\\u0fc2\\u0fc3\\5\\u0453\\u022a\\2\\u0fc3\\u02fc\\3\\2\\2\")\n buf.write(\"\\2\\u0fc4\\u0fc5\\5\\u045b\\u022e\\2\\u0fc5\\u0fc6\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u0fc6\\u0fc7\\5\\u044d\\u0227\\2\\u0fc7\\u0fc8\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u0fc8\\u0fc9\\5\\u0459\\u022d\\2\\u0fc9\\u0fca\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u0fca\\u0fcb\\5\\u0435\\u021b\\2\\u0fcb\\u0fcc\\5\\u044d\\u0227\")\n buf.write(\"\\2\\u0fcc\\u0fcd\\5\\u0453\\u022a\\2\\u0fcd\\u0fce\\7a\\2\\2\\u0fce\")\n buf.write(\"\\u0fcf\\5\\u044b\\u0226\\2\\u0fcf\\u0fd0\\5\\u045b\\u022e\\2\\u0fd0\")\n buf.write(\"\\u0fd1\\5\\u0467\\u0234\\2\\u0fd1\\u0fd2\\7a\\2\\2\\u0fd2\\u0fd3\")\n buf.write(\"\\5\\u045d\\u022f\\2\\u0fd3\\u0fd4\\5\\u044f\\u0228\\2\\u0fd4\\u0fd5\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u0fd5\\u0fd6\\5\\u0451\\u0229\\2\\u0fd6\\u0fd7\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u0fd7\\u0fd8\\5\\u0459\\u022d\\2\\u0fd8\\u0fd9\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0fd9\\u0fda\\5\\u0457\\u022c\\2\\u0fda\\u0fdb\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u0fdb\\u0fdc\\5\\u0445\\u0223\\2\\u0fdc\\u0fdd\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u0fdd\\u0fde\\5\\u043d\\u021f\\2\\u0fde\\u0fdf\")\n buf.write(\"\\5\\u043b\\u021e\\2\\u0fdf\\u02fe\\3\\2\\2\\2\\u0fe0\\u0fe1\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u0fe1\\u0fe2\\5\\u0445\\u0223\\2\\u0fe2\\u0fe3\\5\\u044d\")\n buf.write(\"\\u0227\\2\\u0fe3\\u0fe4\\5\\u043d\\u021f\\2\\u0fe4\\u0fe5\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u0fe5\\u0fe6\\5\\u045b\\u022e\\2\\u0fe6\\u0fe7\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u0fe7\\u0fe8\\5\\u044d\\u0227\\2\\u0fe8\\u0fe9\\5\\u0453\")\n buf.write(\"\\u022a\\2\\u0fe9\\u0fea\\7a\\2\\2\\u0fea\\u0feb\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u0feb\\u0fec\\5\\u0467\\u0234\\2\\u0fec\\u0fed\\7a\\2\\2\\u0fed\")\n buf.write(\"\\u0fee\\5\\u045d\\u022f\\2\\u0fee\\u0fef\\5\\u044f\\u0228\\2\\u0fef\")\n buf.write(\"\\u0ff0\\5\\u0439\\u021d\\2\\u0ff0\\u0ff1\\5\\u0451\\u0229\\2\\u0ff1\")\n buf.write(\"\\u0ff2\\5\\u044f\\u0228\\2\\u0ff2\\u0ff3\\5\\u0459\\u022d\\2\\u0ff3\")\n buf.write(\"\\u0ff4\\5\\u045b\\u022e\\2\\u0ff4\\u0ff5\\5\\u0457\\u022c\\2\\u0ff5\")\n buf.write(\"\\u0ff6\\5\\u0435\\u021b\\2\\u0ff6\\u0ff7\\5\\u0445\\u0223\\2\\u0ff7\")\n buf.write(\"\\u0ff8\\5\\u044f\\u0228\\2\\u0ff8\\u0ff9\\5\\u043d\\u021f\\2\\u0ff9\")\n buf.write(\"\\u0ffa\\5\\u043b\\u021e\\2\\u0ffa\\u0300\\3\\2\\2\\2\\u0ffb\\u0ffc\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u0ffc\\u0ffd\\5\\u0445\\u0223\\2\\u0ffd\\u0ffe\")\n buf.write(\"\\5\\u044d\\u0227\\2\\u0ffe\\u0fff\\5\\u043d\\u021f\\2\\u0fff\\u1000\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u1000\\u1001\\5\\u045b\\u022e\\2\\u1001\\u1002\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u1002\\u1003\\5\\u044d\\u0227\\2\\u1003\\u1004\")\n buf.write(\"\\5\\u0453\\u022a\\2\\u1004\\u1005\\7a\\2\\2\\u1005\\u1006\\5\\u045d\")\n buf.write(\"\\u022f\\2\\u1006\\u1007\\5\\u044f\\u0228\\2\\u1007\\u1008\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u1008\\u1009\\5\\u0451\\u0229\\2\\u1009\\u100a\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u100a\\u100b\\5\\u0459\\u022d\\2\\u100b\\u100c\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u100c\\u100d\\5\\u0457\\u022c\\2\\u100d\\u100e\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u100e\\u100f\\5\\u0445\\u0223\\2\\u100f\\u1010\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u1010\\u1011\\5\\u043d\\u021f\\2\\u1011\\u1012\\5\\u043b\")\n buf.write(\"\\u021e\\2\\u1012\\u0302\\3\\2\\2\\2\\u1013\\u1014\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u1014\\u1015\\5\\u0445\\u0223\\2\\u1015\\u1016\\5\\u044d\\u0227\")\n buf.write(\"\\2\\u1016\\u1017\\5\\u043d\\u021f\\2\\u1017\\u1018\\5\\u0467\\u0234\")\n buf.write(\"\\2\\u1018\\u1019\\5\\u0451\\u0229\\2\\u1019\\u101a\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u101a\\u101b\\5\\u043d\\u021f\\2\\u101b\\u101c\\7a\\2\\2\\u101c\")\n buf.write(\"\\u101d\\5\\u0435\\u021b\\2\\u101d\\u101e\\5\\u0437\\u021c\\2\\u101e\")\n buf.write(\"\\u101f\\5\\u0437\\u021c\\2\\u101f\\u1020\\5\\u0457\\u022c\\2\\u1020\")\n buf.write(\"\\u0304\\3\\2\\2\\2\\u1021\\u1022\\5\\u045b\\u022e\\2\\u1022\\u1023\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u1023\\u1024\\5\\u044d\\u0227\\2\\u1024\\u1025\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u1025\\u1026\\5\\u0467\\u0234\\2\\u1026\\u1027\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u1027\\u1028\\5\\u044f\\u0228\\2\\u1028\\u1029\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u1029\\u102a\\7a\\2\\2\\u102a\\u102b\\5\\u0443\")\n buf.write(\"\\u0222\\2\\u102b\\u102c\\5\\u0451\\u0229\\2\\u102c\\u102d\\5\\u045d\")\n buf.write(\"\\u022f\\2\\u102d\\u102e\\5\\u0457\\u022c\\2\\u102e\\u0306\\3\\2\\2\")\n buf.write(\"\\2\\u102f\\u1030\\5\\u045b\\u022e\\2\\u1030\\u1031\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u1031\\u1032\\5\\u044d\\u0227\\2\\u1032\\u1033\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u1033\\u1034\\5\\u0467\\u0234\\2\\u1034\\u1035\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u1035\\u1036\\5\\u044f\\u0228\\2\\u1036\\u1037\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u1037\\u1038\\7a\\2\\2\\u1038\\u1039\\5\\u044d\\u0227\\2\\u1039\")\n buf.write(\"\\u103a\\5\\u0445\\u0223\\2\\u103a\\u103b\\5\\u044f\\u0228\\2\\u103b\")\n buf.write(\"\\u103c\\5\\u045d\\u022f\\2\\u103c\\u103d\\5\\u045b\\u022e\\2\\u103d\")\n buf.write(\"\\u103e\\5\\u043d\\u021f\\2\\u103e\\u0308\\3\\2\\2\\2\\u103f\\u1040\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u1040\\u1041\\5\\u0445\\u0223\\2\\u1041\\u1042\")\n buf.write(\"\\5\\u044d\\u0227\\2\\u1042\\u1043\\5\\u043d\\u021f\\2\\u1043\\u1044\")\n buf.write(\"\\5\\u0467\\u0234\\2\\u1044\\u1045\\5\\u0451\\u0229\\2\\u1045\\u1046\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u1046\\u1047\\5\\u043d\\u021f\\2\\u1047\\u1048\")\n buf.write(\"\\7a\\2\\2\\u1048\\u1049\\5\\u0457\\u022c\\2\\u1049\\u104a\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u104a\\u104b\\5\\u0441\\u0221\\2\\u104b\\u104c\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u104c\\u104d\\5\\u0451\\u0229\\2\\u104d\\u104e\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u104e\\u030a\\3\\2\\2\\2\\u104f\\u1050\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u1050\\u1051\\5\\u0451\\u0229\\2\\u1051\\u030c\\3\\2\\2\\2\\u1052\")\n buf.write(\"\\u1053\\5\\u045b\\u022e\\2\\u1053\\u1054\\5\\u0457\\u022c\\2\\u1054\")\n buf.write(\"\\u1055\\5\\u0435\\u021b\\2\\u1055\\u1056\\5\\u0445\\u0223\\2\\u1056\")\n buf.write(\"\\u1057\\5\\u044b\\u0226\\2\\u1057\\u1058\\5\\u0445\\u0223\\2\\u1058\")\n buf.write(\"\\u1059\\5\\u044f\\u0228\\2\\u1059\\u105a\\5\\u0441\\u0221\\2\\u105a\")\n buf.write(\"\\u030e\\3\\2\\2\\2\\u105b\\u105c\\5\\u045b\\u022e\\2\\u105c\\u105d\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u105d\\u105e\\5\\u0435\\u021b\\2\\u105e\\u105f\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u105f\\u1060\\5\\u0459\\u022d\\2\\u1060\\u1061\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u1061\\u1062\\5\\u0439\\u021d\\2\\u1062\\u1063\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u1063\\u1064\\5\\u0445\\u0223\\2\\u1064\\u1065\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u1065\\u1066\\5\\u044f\\u0228\\2\\u1066\\u0310\")\n buf.write(\"\\3\\2\\2\\2\\u1067\\u1068\\5\\u045b\\u022e\\2\\u1068\\u1069\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u1069\\u106a\\5\\u0435\\u021b\\2\\u106a\\u106b\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u106b\\u106c\\5\\u0459\\u022d\\2\\u106c\\u106d\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u106d\\u106e\\5\\u0435\\u021b\\2\\u106e\\u106f\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u106f\\u1070\\5\\u043d\\u021f\\2\\u1070\\u0312\\3\\2\\2\")\n buf.write(\"\\2\\u1071\\u1072\\5\\u045b\\u022e\\2\\u1072\\u1073\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u1073\\u1074\\5\\u043d\\u021f\\2\\u1074\\u1075\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u1075\\u1076\\5\\u045b\\u022e\\2\\u1076\\u0314\\3\\2\\2\\2\\u1077\")\n buf.write(\"\\u1078\\5\\u045b\\u022e\\2\\u1078\\u1079\\5\\u0457\\u022c\\2\\u1079\")\n buf.write(\"\\u107a\\5\\u0445\\u0223\\2\\u107a\\u107b\\5\\u0441\\u0221\\2\\u107b\")\n buf.write(\"\\u107c\\5\\u0441\\u0221\\2\\u107c\\u107d\\5\\u043d\\u021f\\2\\u107d\")\n buf.write(\"\\u107e\\5\\u0457\\u022c\\2\\u107e\\u0316\\3\\2\\2\\2\\u107f\\u1080\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u1080\\u1081\\5\\u0457\\u022c\\2\\u1081\\u1082\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u1082\\u1083\\5\\u044d\\u0227\\2\\u1083\\u0318\")\n buf.write(\"\\3\\2\\2\\2\\u1084\\u1085\\5\\u045b\\u022e\\2\\u1085\\u1086\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u1086\\u1087\\5\\u045d\\u022f\\2\\u1087\\u1088\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u1088\\u031a\\3\\2\\2\\2\\u1089\\u108a\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u108a\\u108b\\5\\u0457\\u022c\\2\\u108b\\u108c\\5\\u045d\\u022f\")\n buf.write(\"\\2\\u108c\\u108d\\5\\u044f\\u0228\\2\\u108d\\u108e\\5\\u0439\\u021d\")\n buf.write(\"\\2\\u108e\\u108f\\5\\u0435\\u021b\\2\\u108f\\u1090\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u1090\\u1091\\5\\u043d\\u021f\\2\\u1091\\u031c\\3\\2\\2\\2\\u1092\")\n buf.write(\"\\u1093\\5\\u045b\\u022e\\2\\u1093\\u1094\\5\\u0465\\u0233\\2\\u1094\")\n buf.write(\"\\u1095\\5\\u0453\\u022a\\2\\u1095\\u1096\\5\\u043d\\u021f\\2\\u1096\")\n buf.write(\"\\u031e\\3\\2\\2\\2\\u1097\\u1098\\5\\u045d\\u022f\\2\\u1098\\u1099\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u1099\\u109a\\5\\u0437\\u021c\\2\\u109a\\u109b\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u109b\\u109c\\5\\u045d\\u022f\\2\\u109c\\u109d\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u109d\\u109e\\5\\u043b\\u021e\\2\\u109e\\u109f\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u109f\\u10a0\\5\\u043b\\u021e\\2\\u10a0\\u0320\")\n buf.write(\"\\3\\2\\2\\2\\u10a1\\u10a2\\5\\u045d\\u022f\\2\\u10a2\\u10a3\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u10a3\\u10a4\\5\\u043b\\u021e\\2\\u10a4\\u10a5\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u10a5\\u10a6\\5\\u0457\\u022c\\2\\u10a6\\u0322\\3\\2\\2\")\n buf.write(\"\\2\\u10a7\\u10a8\\5\\u045d\\u022f\\2\\u10a8\\u10a9\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u10a9\\u10aa\\5\\u0445\\u0223\\2\\u10aa\\u10ab\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u10ab\\u10ac\\5\\u044f\\u0228\\2\\u10ac\\u0324\\3\\2\\2\\2\\u10ad\")\n buf.write(\"\\u10ae\\5\\u045d\\u022f\\2\\u10ae\\u10af\\5\\u044f\\u0228\\2\\u10af\")\n buf.write(\"\\u10b0\\5\\u0445\\u0223\\2\\u10b0\\u10b1\\5\\u0455\\u022b\\2\\u10b1\")\n buf.write(\"\\u10b2\\5\\u045d\\u022f\\2\\u10b2\\u10b3\\5\\u043d\\u021f\\2\\u10b3\")\n buf.write(\"\\u0326\\3\\2\\2\\2\\u10b4\\u10b5\\5\\u045d\\u022f\\2\\u10b5\\u10b6\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u10b6\\u10b7\\5\\u044b\\u0226\\2\\u10b7\\u10b8\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u10b8\\u10b9\\5\\u044d\\u0227\\2\\u10b9\\u10ba\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u10ba\\u10bb\\5\\u045b\\u022e\\2\\u10bb\\u10bc\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u10bc\\u10bd\\5\\u043b\\u021e\\2\\u10bd\\u0328\")\n buf.write(\"\\3\\2\\2\\2\\u10be\\u10bf\\5\\u045d\\u022f\\2\\u10bf\\u10c0\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u10c0\\u10c1\\5\\u0453\\u022a\\2\\u10c1\\u10c2\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u10c2\\u10c3\\5\\u045f\\u0230\\2\\u10c3\\u10c4\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u10c4\\u10c5\\5\\u045b\\u022e\\2\\u10c5\\u032a\\3\\2\\2\")\n buf.write(\"\\2\\u10c6\\u10c7\\5\\u045d\\u022f\\2\\u10c7\\u10c8\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u10c8\\u10c9\\5\\u045b\\u022e\\2\\u10c9\\u10ca\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u10ca\\u10cb\\5\\u044b\\u0226\\2\\u10cb\\u032c\\3\\2\\2\\2\\u10cc\")\n buf.write(\"\\u10cd\\5\\u045d\\u022f\\2\\u10cd\\u10ce\\5\\u0453\\u022a\\2\\u10ce\")\n buf.write(\"\\u10cf\\5\\u043b\\u021e\\2\\u10cf\\u10d0\\5\\u0435\\u021b\\2\\u10d0\")\n buf.write(\"\\u10d1\\5\\u045b\\u022e\\2\\u10d1\\u10d2\\5\\u043d\\u021f\\2\\u10d2\")\n buf.write(\"\\u032e\\3\\2\\2\\2\\u10d3\\u10d4\\5\\u045d\\u022f\\2\\u10d4\\u10d5\")\n buf.write(\"\\5\\u0453\\u022a\\2\\u10d5\\u10d6\\5\\u043b\\u021e\\2\\u10d6\\u10d7\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u10d7\\u10d8\\5\\u045b\\u022e\\2\\u10d8\\u10d9\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u10d9\\u10da\\5\\u043b\\u021e\\2\\u10da\\u0330\")\n buf.write(\"\\3\\2\\2\\2\\u10db\\u10dc\\5\\u045d\\u022f\\2\\u10dc\\u10dd\\5\\u0453\")\n buf.write(\"\\u022a\\2\\u10dd\\u10de\\5\\u0459\\u022d\\2\\u10de\\u10df\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u10df\\u10e0\\5\\u0457\\u022c\\2\\u10e0\\u10e1\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u10e1\\u0332\\3\\2\\2\\2\\u10e2\\u10e3\\5\\u045d\\u022f\")\n buf.write(\"\\2\\u10e3\\u10e4\\5\\u0457\\u022c\\2\\u10e4\\u10e5\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u10e5\\u10e6\\5\\u0461\\u0231\\2\\u10e6\\u10e7\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u10e7\\u10e8\\5\\u043b\\u021e\\2\\u10e8\\u0334\\3\\2\\2\\2\\u10e9\")\n buf.write(\"\\u10ea\\5\\u045d\\u022f\\2\\u10ea\\u10eb\\5\\u0459\\u022d\\2\\u10eb\")\n buf.write(\"\\u10ec\\5\\u043d\\u021f\\2\\u10ec\\u0336\\3\\2\\2\\2\\u10ed\\u10ee\")\n buf.write(\"\\5\\u045d\\u022f\\2\\u10ee\\u10ef\\5\\u0459\\u022d\\2\\u10ef\\u10f0\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u10f0\\u10f1\\5\\u044f\\u0228\\2\\u10f1\\u10f2\")\n buf.write(\"\\5\\u0441\\u0221\\2\\u10f2\\u0338\\3\\2\\2\\2\\u10f3\\u10f4\\5\\u045f\")\n buf.write(\"\\u0230\\2\\u10f4\\u10f5\\5\\u0435\\u021b\\2\\u10f5\\u10f6\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u10f6\\u10f7\\5\\u0445\\u0223\\2\\u10f7\\u10f8\\5\\u043b\")\n buf.write(\"\\u021e\\2\\u10f8\\u10f9\\5\\u0435\\u021b\\2\\u10f9\\u10fa\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u10fa\\u10fb\\5\\u043d\\u021f\\2\\u10fb\\u033a\\3\\2\\2\")\n buf.write(\"\\2\\u10fc\\u10fd\\5\\u045f\\u0230\\2\\u10fd\\u10fe\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u10fe\\u10ff\\5\\u044b\\u0226\\2\\u10ff\\u1100\\5\\u045d\\u022f\")\n buf.write(\"\\2\\u1100\\u1101\\5\\u043d\\u021f\\2\\u1101\\u033c\\3\\2\\2\\2\\u1102\")\n buf.write(\"\\u1103\\5\\u045f\\u0230\\2\\u1103\\u1104\\5\\u0435\\u021b\\2\\u1104\")\n buf.write(\"\\u1105\\5\\u044b\\u0226\\2\\u1105\\u1106\\5\\u045d\\u022f\\2\\u1106\")\n buf.write(\"\\u1107\\5\\u043d\\u021f\\2\\u1107\\u1108\\5\\u0459\\u022d\\2\\u1108\")\n buf.write(\"\\u033e\\3\\2\\2\\2\\u1109\\u110a\\5\\u045f\\u0230\\2\\u110a\\u110b\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u110b\\u110c\\5\\u0457\\u022c\\2\\u110c\\u110d\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u110d\\u110e\\5\\u0443\\u0222\\2\\u110e\\u110f\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u110f\\u1110\\5\\u0457\\u022c\\2\\u1110\\u0340\")\n buf.write(\"\\3\\2\\2\\2\\u1111\\u1112\\5\\u045f\\u0230\\2\\u1112\\u1113\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u1113\\u1114\\5\\u0457\\u022c\\2\\u1114\\u1115\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u1115\\u1116\\5\\u0443\\u0222\\2\\u1116\\u1117\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u1117\\u1118\\5\\u0457\\u022c\\2\\u1118\\u1119\\7\\64\")\n buf.write(\"\\2\\2\\u1119\\u0342\\3\\2\\2\\2\\u111a\\u111b\\5\\u045f\\u0230\\2\\u111b\")\n buf.write(\"\\u111c\\5\\u0435\\u021b\\2\\u111c\\u111d\\5\\u0457\\u022c\\2\\u111d\")\n buf.write(\"\\u111e\\5\\u0445\\u0223\\2\\u111e\\u111f\\5\\u0435\\u021b\\2\\u111f\")\n buf.write(\"\\u1120\\5\\u0437\\u021c\\2\\u1120\\u1121\\5\\u044b\\u0226\\2\\u1121\")\n buf.write(\"\\u1122\\5\\u043d\\u021f\\2\\u1122\\u0344\\3\\2\\2\\2\\u1123\\u1124\")\n buf.write(\"\\5\\u045f\\u0230\\2\\u1124\\u1125\\5\\u0435\\u021b\\2\\u1125\\u1126\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u1126\\u1127\\5\\u0457\\u022c\\2\\u1127\\u1128\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u1128\\u1129\\5\\u0465\\u0233\\2\\u1129\\u0346\")\n buf.write(\"\\3\\2\\2\\2\\u112a\\u112b\\5\\u045f\\u0230\\2\\u112b\\u112c\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u112c\\u112d\\5\\u0457\\u022c\\2\\u112d\\u112e\\5\\u0465\")\n buf.write(\"\\u0233\\2\\u112e\\u112f\\5\\u0445\\u0223\\2\\u112f\\u1130\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u1130\\u1131\\5\\u0441\\u0221\\2\\u1131\\u0348\\3\\2\\2\")\n buf.write(\"\\2\\u1132\\u1133\\5\\u045f\\u0230\\2\\u1133\\u1134\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u1134\\u1135\\5\\u0457\\u022c\\2\\u1135\\u1136\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u1136\\u1137\\5\\u0445\\u0223\\2\\u1137\\u1138\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u1138\\u1139\\5\\u044f\\u0228\\2\\u1139\\u034a\\3\\2\\2\\2\\u113a\")\n buf.write(\"\\u113b\\5\\u045f\\u0230\\2\\u113b\\u113c\\5\\u043d\\u021f\\2\\u113c\")\n buf.write(\"\\u113d\\5\\u0457\\u022c\\2\\u113d\\u113e\\5\\u0459\\u022d\\2\\u113e\")\n buf.write(\"\\u113f\\5\\u0445\\u0223\\2\\u113f\\u1140\\5\\u0451\\u0229\\2\\u1140\")\n buf.write(\"\\u1141\\5\\u044f\\u0228\\2\\u1141\\u1142\\5\\u0459\\u022d\\2\\u1142\")\n buf.write(\"\\u034c\\3\\2\\2\\2\\u1143\\u1144\\5\\u0461\\u0231\\2\\u1144\\u1145\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u1145\\u1146\\5\\u0445\\u0223\\2\\u1146\\u1147\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u1147\\u034e\\3\\2\\2\\2\\u1148\\u1149\\5\\u0461\")\n buf.write(\"\\u0231\\2\\u1149\\u114a\\5\\u0435\\u021b\\2\\u114a\\u114b\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u114b\\u114c\\5\\u044f\\u0228\\2\\u114c\\u114d\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u114d\\u114e\\5\\u044f\\u0228\\2\\u114e\\u114f\\5\\u0441\")\n buf.write(\"\\u0221\\2\\u114f\\u0350\\3\\2\\2\\2\\u1150\\u1151\\5\\u0461\\u0231\")\n buf.write(\"\\2\\u1151\\u1152\\5\\u043d\\u021f\\2\\u1152\\u1153\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u1153\\u1154\\5\\u044b\\u0226\\2\\u1154\\u1155\\5\\u043f\\u0220\")\n buf.write(\"\\2\\u1155\\u1156\\5\\u0451\\u0229\\2\\u1156\\u1157\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u1157\\u1158\\5\\u044d\\u0227\\2\\u1158\\u1159\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u1159\\u115a\\5\\u043b\\u021e\\2\\u115a\\u0352\\3\\2\\2\\2\\u115b\")\n buf.write(\"\\u115c\\5\\u0461\\u0231\\2\\u115c\\u115d\\5\\u0443\\u0222\\2\\u115d\")\n buf.write(\"\\u115e\\5\\u043d\\u021f\\2\\u115e\\u115f\\5\\u044f\\u0228\\2\\u115f\")\n buf.write(\"\\u0354\\3\\2\\2\\2\\u1160\\u1161\\5\\u0461\\u0231\\2\\u1161\\u1162\")\n buf.write(\"\\5\\u0443\\u0222\\2\\u1162\\u1163\\5\\u043d\\u021f\\2\\u1163\\u1164\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u1164\\u1165\\5\\u043d\\u021f\\2\\u1165\\u1166\")\n buf.write(\"\\5\\u045f\\u0230\\2\\u1166\\u1167\\5\\u043d\\u021f\\2\\u1167\\u1168\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u1168\\u0356\\3\\2\\2\\2\\u1169\\u116a\\5\\u0461\")\n buf.write(\"\\u0231\\2\\u116a\\u116b\\5\\u0443\\u0222\\2\\u116b\\u116c\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u116c\\u116d\\5\\u0457\\u022c\\2\\u116d\\u116e\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u116e\\u0358\\3\\2\\2\\2\\u116f\\u1170\\5\\u0461\\u0231\")\n buf.write(\"\\2\\u1170\\u1171\\5\\u0443\\u0222\\2\\u1171\\u1172\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u1172\\u1173\\5\\u044b\\u0226\\2\\u1173\\u1174\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u1174\\u035a\\3\\2\\2\\2\\u1175\\u1176\\5\\u0461\\u0231\\2\\u1176\")\n buf.write(\"\\u1177\\5\\u0445\\u0223\\2\\u1177\\u1178\\5\\u045b\\u022e\\2\\u1178\")\n buf.write(\"\\u1179\\5\\u0443\\u0222\\2\\u1179\\u035c\\3\\2\\2\\2\\u117a\\u117b\")\n buf.write(\"\\5\\u0461\\u0231\\2\\u117b\\u117c\\5\\u0445\\u0223\\2\\u117c\\u117d\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u117d\\u117e\\5\\u0443\\u0222\\2\\u117e\\u117f\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u117f\\u1180\\5\\u044f\\u0228\\2\\u1180\\u035e\")\n buf.write(\"\\3\\2\\2\\2\\u1181\\u1182\\5\\u0461\\u0231\\2\\u1182\\u1183\\5\\u0451\")\n buf.write(\"\\u0229\\2\\u1183\\u1184\\5\\u0457\\u022c\\2\\u1184\\u1185\\5\\u0449\")\n buf.write(\"\\u0225\\2\\u1185\\u0360\\3\\2\\2\\2\\u1186\\u1187\\5\\u0461\\u0231\")\n buf.write(\"\\2\\u1187\\u1188\\5\\u0457\\u022c\\2\\u1188\\u1189\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u1189\\u118a\\5\\u045b\\u022e\\2\\u118a\\u118b\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u118b\\u0362\\3\\2\\2\\2\\u118c\\u118d\\5\\u0463\\u0232\\2\\u118d\")\n buf.write(\"\\u118e\\5\\u044d\\u0227\\2\\u118e\\u118f\\5\\u044b\\u0226\\2\\u118f\")\n buf.write(\"\\u0364\\3\\2\\2\\2\\u1190\\u1191\\5\\u0463\\u0232\\2\\u1191\\u1192\")\n buf.write(\"\\5\\u044d\\u0227\\2\\u1192\\u1193\\5\\u044b\\u0226\\2\\u1193\\u1194\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u1194\\u1195\\5\\u0441\\u0221\\2\\u1195\\u1196\")\n buf.write(\"\\5\\u0441\\u0221\\2\\u1196\\u0366\\3\\2\\2\\2\\u1197\\u1198\\5\\u0463\")\n buf.write(\"\\u0232\\2\\u1198\\u1199\\5\\u044d\\u0227\\2\\u1199\\u119a\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u119a\\u119b\\5\\u0435\\u021b\\2\\u119b\\u119c\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u119c\\u119d\\5\\u045b\\u022e\\2\\u119d\\u119e\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u119e\\u119f\\5\\u0445\\u0223\\2\\u119f\\u11a0\\5\\u0437\")\n buf.write(\"\\u021c\\2\\u11a0\\u11a1\\5\\u045d\\u022f\\2\\u11a1\\u11a2\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u11a2\\u11a3\\5\\u043d\\u021f\\2\\u11a3\\u11a4\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u11a4\\u0368\\3\\2\\2\\2\\u11a5\\u11a6\\5\\u0463\\u0232\")\n buf.write(\"\\2\\u11a6\\u11a7\\5\\u044d\\u0227\\2\\u11a7\\u11a8\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u11a8\\u11a9\\5\\u0439\\u021d\\2\\u11a9\\u11aa\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u11aa\\u11ab\\5\\u0459\\u022d\\2\\u11ab\\u11ac\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u11ac\\u036a\\3\\2\\2\\2\\u11ad\\u11ae\\5\\u0463\\u0232\\2\\u11ae\")\n buf.write(\"\\u11af\\5\\u044d\\u0227\\2\\u11af\\u11b0\\5\\u044b\\u0226\\2\\u11b0\")\n buf.write(\"\\u11b1\\5\\u0439\\u021d\\2\\u11b1\\u11b2\\5\\u0451\\u0229\\2\\u11b2\")\n buf.write(\"\\u11b3\\5\\u044b\\u0226\\2\\u11b3\\u11b4\\5\\u0435\\u021b\\2\\u11b4\")\n buf.write(\"\\u11b5\\5\\u045b\\u022e\\2\\u11b5\\u11b6\\5\\u045b\\u022e\\2\\u11b6\")\n buf.write(\"\\u11b7\\5\\u045f\\u0230\\2\\u11b7\\u11b8\\5\\u0435\\u021b\\2\\u11b8\")\n buf.write(\"\\u11b9\\5\\u044b\\u0226\\2\\u11b9\\u036c\\3\\2\\2\\2\\u11ba\\u11bb\")\n buf.write(\"\\5\\u0463\\u0232\\2\\u11bb\\u11bc\\5\\u044d\\u0227\\2\\u11bc\\u11bd\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u11bd\\u11be\\5\\u043d\\u021f\\2\\u11be\\u11bf\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u11bf\\u11c0\\5\\u043d\\u021f\\2\\u11c0\\u11c1\")\n buf.write(\"\\5\\u044d\\u0227\\2\\u11c1\\u11c2\\5\\u043d\\u021f\\2\\u11c2\\u11c3\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u11c3\\u11c4\\5\\u045b\\u022e\\2\\u11c4\\u036e\")\n buf.write(\"\\3\\2\\2\\2\\u11c5\\u11c6\\5\\u0463\\u0232\\2\\u11c6\\u11c7\\5\\u044d\")\n buf.write(\"\\u0227\\2\\u11c7\\u11c8\\5\\u044b\\u0226\\2\\u11c8\\u11c9\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u11c9\\u11ca\\5\\u0463\\u0232\\2\\u11ca\\u11cb\\5\\u0445\")\n buf.write(\"\\u0223\\2\\u11cb\\u11cc\\5\\u0459\\u022d\\2\\u11cc\\u11cd\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u11cd\\u11ce\\5\\u0459\\u022d\\2\\u11ce\\u0370\\3\\2\\2\")\n buf.write(\"\\2\\u11cf\\u11d0\\5\\u0463\\u0232\\2\\u11d0\\u11d1\\5\\u044d\\u0227\")\n buf.write(\"\\2\\u11d1\\u11d2\\5\\u044b\\u0226\\2\\u11d2\\u11d3\\5\\u043f\\u0220\")\n buf.write(\"\\2\\u11d3\\u11d4\\5\\u0451\\u0229\\2\\u11d4\\u11d5\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u11d5\\u11d6\\5\\u043d\\u021f\\2\\u11d6\\u11d7\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u11d7\\u11d8\\5\\u045b\\u022e\\2\\u11d8\\u0372\\3\\2\\2\\2\\u11d9\")\n buf.write(\"\\u11da\\5\\u0463\\u0232\\2\\u11da\\u11db\\5\\u044d\\u0227\\2\\u11db\")\n buf.write(\"\\u11dc\\5\\u044b\\u0226\\2\\u11dc\\u11dd\\5\\u044f\\u0228\\2\\u11dd\")\n buf.write(\"\\u11de\\5\\u0435\\u021b\\2\\u11de\\u11df\\5\\u044d\\u0227\\2\\u11df\")\n buf.write(\"\\u11e0\\5\\u043d\\u021f\\2\\u11e0\\u11e1\\5\\u0459\\u022d\\2\\u11e1\")\n buf.write(\"\\u11e2\\5\\u0453\\u022a\\2\\u11e2\\u11e3\\5\\u0435\\u021b\\2\\u11e3\")\n buf.write(\"\\u11e4\\5\\u0439\\u021d\\2\\u11e4\\u11e5\\5\\u043d\\u021f\\2\\u11e5\")\n buf.write(\"\\u11e6\\5\\u0459\\u022d\\2\\u11e6\\u0374\\3\\2\\2\\2\\u11e7\\u11e8\")\n buf.write(\"\\5\\u0463\\u0232\\2\\u11e8\\u11e9\\5\\u044d\\u0227\\2\\u11e9\\u11ea\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u11ea\\u11eb\\5\\u0453\\u022a\\2\\u11eb\\u11ec\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u11ec\\u11ed\\5\\u0457\\u022c\\2\\u11ed\\u11ee\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u11ee\\u11ef\\5\\u043d\\u021f\\2\\u11ef\\u0376\")\n buf.write(\"\\3\\2\\2\\2\\u11f0\\u11f1\\5\\u0463\\u0232\\2\\u11f1\\u11f2\\5\\u044d\")\n buf.write(\"\\u0227\\2\\u11f2\\u11f3\\5\\u044b\\u0226\\2\\u11f3\\u11f4\\5\\u0453\")\n buf.write(\"\\u022a\\2\\u11f4\\u11f5\\5\\u0445\\u0223\\2\\u11f5\\u0378\\3\\2\\2\")\n buf.write(\"\\2\\u11f6\\u11f7\\5\\u0463\\u0232\\2\\u11f7\\u11f8\\5\\u044d\\u0227\")\n buf.write(\"\\2\\u11f8\\u11f9\\5\\u044b\\u0226\\2\\u11f9\\u11fa\\5\\u0455\\u022b\")\n buf.write(\"\\2\\u11fa\\u11fb\\5\\u045d\\u022f\\2\\u11fb\\u11fc\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u11fc\\u11fd\\5\\u0457\\u022c\\2\\u11fd\\u11fe\\5\\u0465\\u0233\")\n buf.write(\"\\2\\u11fe\\u037a\\3\\2\\2\\2\\u11ff\\u1200\\5\\u0463\\u0232\\2\\u1200\")\n buf.write(\"\\u1201\\5\\u044d\\u0227\\2\\u1201\\u1202\\5\\u044b\\u0226\\2\\u1202\")\n buf.write(\"\\u1203\\5\\u0457\\u022c\\2\\u1203\\u1204\\5\\u0451\\u0229\\2\\u1204\")\n buf.write(\"\\u1205\\5\\u0451\\u0229\\2\\u1205\\u1206\\5\\u045b\\u022e\\2\\u1206\")\n buf.write(\"\\u037c\\3\\2\\2\\2\\u1207\\u1208\\5\\u0463\\u0232\\2\\u1208\\u1209\")\n buf.write(\"\\5\\u044d\\u0227\\2\\u1209\\u120a\\5\\u044b\\u0226\\2\\u120a\\u120b\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u120b\\u120c\\5\\u043d\\u021f\\2\\u120c\\u120d\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u120d\\u120e\\5\\u0445\\u0223\\2\\u120e\\u120f\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u120f\\u1210\\5\\u044b\\u0226\\2\\u1210\\u1211\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u1211\\u1212\\5\\u0467\\u0234\\2\\u1212\\u1213\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u1213\\u037e\\3\\2\\2\\2\\u1214\\u1215\\5\\u0463\")\n buf.write(\"\\u0232\\2\\u1215\\u1216\\5\\u044d\\u0227\\2\\u1216\\u1217\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u1217\\u1218\\5\\u045b\\u022e\\2\\u1218\\u1219\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u1219\\u121a\\5\\u0437\\u021c\\2\\u121a\\u121b\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u121b\\u121c\\5\\u043d\\u021f\\2\\u121c\\u0380\\3\\2\\2\")\n buf.write(\"\\2\\u121d\\u121e\\5\\u0465\\u0233\\2\\u121e\\u121f\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u121f\\u1220\\5\\u0435\\u021b\\2\\u1220\\u1221\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u1221\\u0382\\3\\2\\2\\2\\u1222\\u1223\\5\\u0465\\u0233\\2\\u1223\")\n buf.write(\"\\u1224\\5\\u043d\\u021f\\2\\u1224\\u1225\\5\\u0459\\u022d\\2\\u1225\")\n buf.write(\"\\u0384\\3\\2\\2\\2\\u1226\\u1227\\5\\u0465\\u0233\\2\\u1227\\u1228\")\n buf.write(\"\\5\\u044d\\u0227\\2\\u1228\\u1229\\5\\u0445\\u0223\\2\\u1229\\u122a\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u122a\\u122b\\5\\u045b\\u022e\\2\\u122b\\u122c\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u122c\\u122d\\5\\u0457\\u022c\\2\\u122d\\u122e\")\n buf.write(\"\\5\\u045f\\u0230\\2\\u122e\\u122f\\5\\u0435\\u021b\\2\\u122f\\u1230\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u1230\\u1231\\7a\\2\\2\\u1231\\u1232\\5\\u045d\")\n buf.write(\"\\u022f\\2\\u1232\\u1233\\5\\u044f\\u0228\\2\\u1233\\u1234\\5\\u0439\")\n buf.write(\"\\u021d\\2\\u1234\\u1235\\5\\u0451\\u0229\\2\\u1235\\u1236\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u1236\\u1237\\5\\u0459\\u022d\\2\\u1237\\u1238\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u1238\\u1239\\5\\u0457\\u022c\\2\\u1239\\u123a\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u123a\\u123b\\5\\u0445\\u0223\\2\\u123b\\u123c\\5\\u044f\")\n buf.write(\"\\u0228\\2\\u123c\\u123d\\5\\u043d\\u021f\\2\\u123d\\u123e\\5\\u043b\")\n buf.write(\"\\u021e\\2\\u123e\\u0386\\3\\2\\2\\2\\u123f\\u1240\\5\\u0467\\u0234\")\n buf.write(\"\\2\\u1240\\u1241\\5\\u0451\\u0229\\2\\u1241\\u1242\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u1242\\u1243\\5\\u043d\\u021f\\2\\u1243\\u0388\\3\\2\\2\\2\\u1244\")\n buf.write(\"\\u1245\\5\\u0453\\u022a\\2\\u1245\\u1246\\5\\u0457\\u022c\\2\\u1246\")\n buf.write(\"\\u1247\\5\\u043d\\u021f\\2\\u1247\\u1248\\5\\u043b\\u021e\\2\\u1248\")\n buf.write(\"\\u1249\\5\\u0445\\u0223\\2\\u1249\\u124a\\5\\u0439\\u021d\\2\\u124a\")\n buf.write(\"\\u124b\\5\\u045b\\u022e\\2\\u124b\\u124c\\5\\u0445\\u0223\\2\\u124c\")\n buf.write(\"\\u124d\\5\\u0451\\u0229\\2\\u124d\\u124e\\5\\u044f\\u0228\\2\\u124e\")\n buf.write(\"\\u038a\\3\\2\\2\\2\\u124f\\u1250\\5\\u0453\\u022a\\2\\u1250\\u1251\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u1251\\u1252\\5\\u043d\\u021f\\2\\u1252\\u1253\")\n buf.write(\"\\5\\u043b\\u021e\\2\\u1253\\u1254\\5\\u0445\\u0223\\2\\u1254\\u1255\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u1255\\u1256\\5\\u045b\\u022e\\2\\u1256\\u1257\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u1257\\u1258\\5\\u0451\\u0229\\2\\u1258\\u1259\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u1259\\u125a\\7a\\2\\2\\u125a\\u125b\\5\\u0437\")\n buf.write(\"\\u021c\\2\\u125b\\u125c\\5\\u0451\\u0229\\2\\u125c\\u125d\\5\\u045d\")\n buf.write(\"\\u022f\\2\\u125d\\u125e\\5\\u044f\\u0228\\2\\u125e\\u125f\\5\\u043b\")\n buf.write(\"\\u021e\\2\\u125f\\u1260\\5\\u0459\\u022d\\2\\u1260\\u038c\\3\\2\\2\")\n buf.write(\"\\2\\u1261\\u1262\\5\\u0453\\u022a\\2\\u1262\\u1263\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u1263\\u1264\\5\\u043d\\u021f\\2\\u1264\\u1265\\5\\u043b\\u021e\")\n buf.write(\"\\2\\u1265\\u1266\\5\\u0445\\u0223\\2\\u1266\\u1267\\5\\u0439\\u021d\")\n buf.write(\"\\2\\u1267\\u1268\\5\\u045b\\u022e\\2\\u1268\\u1269\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u1269\\u126a\\5\\u0451\\u0229\\2\\u126a\\u126b\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u126b\\u126c\\7a\\2\\2\\u126c\\u126d\\5\\u0439\\u021d\\2\\u126d\")\n buf.write(\"\\u126e\\5\\u0451\\u0229\\2\\u126e\\u126f\\5\\u0459\\u022d\\2\\u126f\")\n buf.write(\"\\u1270\\5\\u045b\\u022e\\2\\u1270\\u038e\\3\\2\\2\\2\\u1271\\u1272\")\n buf.write(\"\\5\\u0453\\u022a\\2\\u1272\\u1273\\5\\u0457\\u022c\\2\\u1273\\u1274\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u1274\\u1275\\5\\u043b\\u021e\\2\\u1275\\u1276\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u1276\\u1277\\5\\u0439\\u021d\\2\\u1277\\u1278\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u1278\\u1279\\5\\u0445\\u0223\\2\\u1279\\u127a\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u127a\\u127b\\5\\u044f\\u0228\\2\\u127b\\u127c\")\n buf.write(\"\\7a\\2\\2\\u127c\\u127d\\5\\u043b\\u021e\\2\\u127d\\u127e\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u127e\\u127f\\5\\u045b\\u022e\\2\\u127f\\u1280\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u1280\\u1281\\5\\u0445\\u0223\\2\\u1281\\u1282\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u1282\\u1283\\5\\u0459\\u022d\\2\\u1283\\u0390\\3\\2\\2\")\n buf.write(\"\\2\\u1284\\u1285\\5\\u0453\\u022a\\2\\u1285\\u1286\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u1286\\u1287\\5\\u043d\\u021f\\2\\u1287\\u1288\\5\\u043b\\u021e\")\n buf.write(\"\\2\\u1288\\u1289\\5\\u0445\\u0223\\2\\u1289\\u128a\\5\\u0439\\u021d\")\n buf.write(\"\\2\\u128a\\u128b\\5\\u045b\\u022e\\2\\u128b\\u128c\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u128c\\u128d\\5\\u0451\\u0229\\2\\u128d\\u128e\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u128e\\u128f\\7a\\2\\2\\u128f\\u1290\\5\\u0453\\u022a\\2\\u1290\")\n buf.write(\"\\u1291\\5\\u0457\\u022c\\2\\u1291\\u1292\\5\\u0451\\u0229\\2\\u1292\")\n buf.write(\"\\u1293\\5\\u0437\\u021c\\2\\u1293\\u1294\\5\\u0435\\u021b\\2\\u1294\")\n buf.write(\"\\u1295\\5\\u0437\\u021c\\2\\u1295\\u1296\\5\\u0445\\u0223\\2\\u1296\")\n buf.write(\"\\u1297\\5\\u044b\\u0226\\2\\u1297\\u1298\\5\\u0445\\u0223\\2\\u1298\")\n buf.write(\"\\u1299\\5\\u045b\\u022e\\2\\u1299\\u129a\\5\\u0465\\u0233\\2\\u129a\")\n buf.write(\"\\u0392\\3\\2\\2\\2\\u129b\\u129c\\5\\u0453\\u022a\\2\\u129c\\u129d\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u129d\\u129e\\5\\u043d\\u021f\\2\\u129e\\u129f\")\n buf.write(\"\\5\\u043b\\u021e\\2\\u129f\\u12a0\\5\\u0445\\u0223\\2\\u12a0\\u12a1\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u12a1\\u12a2\\5\\u045b\\u022e\\2\\u12a2\\u12a3\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u12a3\\u12a4\\5\\u0451\\u0229\\2\\u12a4\\u12a5\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u12a5\\u12a6\\7a\\2\\2\\u12a6\\u12a7\\5\\u0459\")\n buf.write(\"\\u022d\\2\\u12a7\\u12a8\\5\\u043d\\u021f\\2\\u12a8\\u12a9\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u12a9\\u0394\\3\\2\\2\\2\\u12aa\\u12ab\\5\\u0439\\u021d\")\n buf.write(\"\\2\\u12ab\\u12ac\\5\\u045d\\u022f\\2\\u12ac\\u12ad\\5\\u044d\\u0227\")\n buf.write(\"\\2\\u12ad\\u12ae\\5\\u043d\\u021f\\2\\u12ae\\u12af\\7a\\2\\2\\u12af\")\n buf.write(\"\\u12b0\\5\\u043b\\u021e\\2\\u12b0\\u12b1\\5\\u0445\\u0223\\2\\u12b1\")\n buf.write(\"\\u12b2\\5\\u0459\\u022d\\2\\u12b2\\u12b3\\5\\u045b\\u022e\\2\\u12b3\")\n buf.write(\"\\u0396\\3\\2\\2\\2\\u12b4\\u12b5\\5\\u043b\\u021e\\2\\u12b5\\u12b6\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u12b6\\u12b7\\5\\u044f\\u0228\\2\\u12b7\\u12b8\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u12b8\\u12b9\\5\\u043d\\u021f\\2\\u12b9\\u12ba\")\n buf.write(\"\\7a\\2\\2\\u12ba\\u12bb\\5\\u0457\\u022c\\2\\u12bb\\u12bc\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u12bc\\u12bd\\5\\u044f\\u0228\\2\\u12bd\\u12be\\5\\u0449\")\n buf.write(\"\\u0225\\2\\u12be\\u0398\\3\\2\\2\\2\\u12bf\\u12c0\\5\\u044b\\u0226\")\n buf.write(\"\\2\\u12c0\\u12c1\\5\\u0445\\u0223\\2\\u12c1\\u12c2\\5\\u0459\\u022d\")\n buf.write(\"\\2\\u12c2\\u12c3\\5\\u045b\\u022e\\2\\u12c3\\u12c4\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u12c4\\u12c5\\5\\u0441\\u0221\\2\\u12c5\\u12c6\\5\\u0441\\u0221\")\n buf.write(\"\\2\\u12c6\\u039a\\3\\2\\2\\2\\u12c7\\u12c8\\5\\u0453\\u022a\\2\\u12c8\")\n buf.write(\"\\u12c9\\5\\u043d\\u021f\\2\\u12c9\\u12ca\\5\\u0457\\u022c\\2\\u12ca\")\n buf.write(\"\\u12cb\\5\\u0439\\u021d\\2\\u12cb\\u12cc\\5\\u043d\\u021f\\2\\u12cc\")\n buf.write(\"\\u12cd\\5\\u044f\\u0228\\2\\u12cd\\u12ce\\5\\u045b\\u022e\\2\\u12ce\")\n buf.write(\"\\u12cf\\7a\\2\\2\\u12cf\\u12d0\\5\\u0457\\u022c\\2\\u12d0\\u12d1\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u12d1\\u12d2\\5\\u044f\\u0228\\2\\u12d2\\u12d3\")\n buf.write(\"\\5\\u0449\\u0225\\2\\u12d3\\u039c\\3\\2\\2\\2\\u12d4\\u12d5\\5\\u0453\")\n buf.write(\"\\u022a\\2\\u12d5\\u12d6\\5\\u043d\\u021f\\2\\u12d6\\u12d7\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u12d7\\u12d8\\5\\u0439\\u021d\\2\\u12d8\\u12d9\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u12d9\\u12da\\5\\u044f\\u0228\\2\\u12da\\u12db\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u12db\\u12dc\\5\\u0445\\u0223\\2\\u12dc\\u12dd\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u12dd\\u12de\\5\\u043d\\u021f\\2\\u12de\\u12df\\7a\\2\")\n buf.write(\"\\2\\u12df\\u12e0\\5\\u0439\\u021d\\2\\u12e0\\u12e1\\5\\u0451\\u0229\")\n buf.write(\"\\2\\u12e1\\u12e2\\5\\u044f\\u0228\\2\\u12e2\\u12e3\\5\\u045b\\u022e\")\n buf.write(\"\\2\\u12e3\\u039e\\3\\2\\2\\2\\u12e4\\u12e5\\5\\u0453\\u022a\\2\\u12e5\")\n buf.write(\"\\u12e6\\5\\u043d\\u021f\\2\\u12e6\\u12e7\\5\\u0457\\u022c\\2\\u12e7\")\n buf.write(\"\\u12e8\\5\\u0439\\u021d\\2\\u12e8\\u12e9\\5\\u043d\\u021f\\2\\u12e9\")\n buf.write(\"\\u12ea\\5\\u044f\\u0228\\2\\u12ea\\u12eb\\5\\u045b\\u022e\\2\\u12eb\")\n buf.write(\"\\u12ec\\5\\u0445\\u0223\\2\\u12ec\\u12ed\\5\\u044b\\u0226\\2\\u12ed\")\n buf.write(\"\\u12ee\\5\\u043d\\u021f\\2\\u12ee\\u12ef\\7a\\2\\2\\u12ef\\u12f0\")\n buf.write(\"\\5\\u043b\\u021e\\2\\u12f0\\u12f1\\5\\u0445\\u0223\\2\\u12f1\\u12f2\")\n buf.write(\"\\5\\u0459\\u022d\\2\\u12f2\\u12f3\\5\\u0439\\u021d\\2\\u12f3\\u03a0\")\n buf.write(\"\\3\\2\\2\\2\\u12f4\\u12f5\\5\\u0457\\u022c\\2\\u12f5\\u12f6\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u12f6\\u12f7\\5\\u044f\\u0228\\2\\u12f7\\u12f8\\5\\u0449\")\n buf.write(\"\\u0225\\2\\u12f8\\u03a2\\3\\2\\2\\2\\u12f9\\u12fa\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u12fa\\u12fb\\5\\u045f\\u0230\\2\\u12fb\\u12fc\\5\\u0441\\u0221\")\n buf.write(\"\\2\\u12fc\\u03a4\\3\\2\\2\\2\\u12fd\\u12fe\\5\\u0439\\u021d\\2\\u12fe\")\n buf.write(\"\\u12ff\\5\\u0451\\u0229\\2\\u12ff\\u1300\\5\\u0457\\u022c\\2\\u1300\")\n buf.write(\"\\u1301\\5\\u0457\\u022c\\2\\u1301\\u03a6\\3\\2\\2\\2\\u1302\\u1303\")\n buf.write(\"\\5\\u044b\\u0226\\2\\u1303\\u1304\\5\\u0435\\u021b\\2\\u1304\\u1305\")\n buf.write(\"\\5\\u0441\\u0221\\2\\u1305\\u03a8\\3\\2\\2\\2\\u1306\\u1307\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u1307\\u1308\\5\\u043d\\u021f\\2\\u1308\\u1309\\5\\u0435\")\n buf.write(\"\\u021b\\2\\u1309\\u130a\\5\\u043b\\u021e\\2\\u130a\\u03aa\\3\\2\\2\")\n buf.write(\"\\2\\u130b\\u130c\\5\\u044d\\u0227\\2\\u130c\\u130d\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u130d\\u130e\\5\\u0463\\u0232\\2\\u130e\\u03ac\\3\\2\\2\\2\\u130f\")\n buf.write(\"\\u1310\\5\\u044d\\u0227\\2\\u1310\\u1311\\5\\u043d\\u021f\\2\\u1311\")\n buf.write(\"\\u1312\\5\\u043b\\u021e\\2\\u1312\\u1313\\5\\u0445\\u0223\\2\\u1313\")\n buf.write(\"\\u1314\\5\\u0435\\u021b\\2\\u1314\\u1315\\5\\u044f\\u0228\\2\\u1315\")\n buf.write(\"\\u03ae\\3\\2\\2\\2\\u1316\\u1317\\5\\u044d\\u0227\\2\\u1317\\u1318\")\n buf.write(\"\\5\\u0445\\u0223\\2\\u1318\\u1319\\5\\u044f\\u0228\\2\\u1319\\u03b0\")\n buf.write(\"\\3\\2\\2\\2\\u131a\\u131b\\5\\u044f\\u0228\\2\\u131b\\u131c\\5\\u045b\")\n buf.write(\"\\u022e\\2\\u131c\\u131d\\5\\u0445\\u0223\\2\\u131d\\u131e\\5\\u044b\")\n buf.write(\"\\u0226\\2\\u131e\\u131f\\5\\u043d\\u021f\\2\\u131f\\u03b2\\3\\2\\2\")\n buf.write(\"\\2\\u1320\\u1321\\5\\u0457\\u022c\\2\\u1321\\u1322\\5\\u0435\\u021b\")\n buf.write(\"\\2\\u1322\\u1323\\5\\u045b\\u022e\\2\\u1323\\u1324\\5\\u0445\\u0223\")\n buf.write(\"\\2\\u1324\\u1325\\5\\u0451\\u0229\\2\\u1325\\u1326\\7a\\2\\2\\u1326\")\n buf.write(\"\\u1327\\5\\u045b\\u022e\\2\\u1327\\u1328\\5\\u0451\\u0229\\2\\u1328\")\n buf.write(\"\\u1329\\7a\\2\\2\\u1329\\u132a\\5\\u0457\\u022c\\2\\u132a\\u132b\")\n buf.write(\"\\5\\u043d\\u021f\\2\\u132b\\u132c\\5\\u0453\\u022a\\2\\u132c\\u132d\")\n buf.write(\"\\5\\u0451\\u0229\\2\\u132d\\u132e\\5\\u0457\\u022c\\2\\u132e\\u132f\")\n buf.write(\"\\5\\u045b\\u022e\\2\\u132f\\u03b4\\3\\2\\2\\2\\u1330\\u1331\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u1331\\u1332\\5\\u0451\\u0229\\2\\u1332\\u1333\\5\\u0461\")\n buf.write(\"\\u0231\\2\\u1333\\u1334\\7a\\2\\2\\u1334\\u1335\\5\\u044f\\u0228\")\n buf.write(\"\\2\\u1335\\u1336\\5\\u045d\\u022f\\2\\u1336\\u1337\\5\\u044d\\u0227\")\n buf.write(\"\\2\\u1337\\u1338\\5\\u0437\\u021c\\2\\u1338\\u1339\\5\\u043d\\u021f\")\n buf.write(\"\\2\\u1339\\u133a\\5\\u0457\\u022c\\2\\u133a\\u03b6\\3\\2\\2\\2\\u133b\")\n buf.write(\"\\u133c\\5\\u0459\\u022d\\2\\u133c\\u133d\\5\\u045d\\u022f\\2\\u133d\")\n buf.write(\"\\u133e\\5\\u044d\\u0227\\2\\u133e\\u03b8\\3\\2\\2\\2\\u133f\\u1340\")\n buf.write(\"\\5\\u045f\\u0230\\2\\u1340\\u1341\\5\\u0435\\u021b\\2\\u1341\\u1342\")\n buf.write(\"\\5\\u0457\\u022c\\2\\u1342\\u1343\\5\\u0445\\u0223\\2\\u1343\\u1344\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u1344\\u1345\\5\\u044f\\u0228\\2\\u1345\\u1346\")\n buf.write(\"\\5\\u0439\\u021d\\2\\u1346\\u1347\\5\\u043d\\u021f\\2\\u1347\\u03ba\")\n buf.write(\"\\3\\2\\2\\2\\u1348\\u1349\\5\\u0457\\u022c\\2\\u1349\\u134a\\5\\u043d\")\n buf.write(\"\\u021f\\2\\u134a\\u134b\\5\\u0441\\u0221\\2\\u134b\\u134c\\5\\u0457\")\n buf.write(\"\\u022c\\2\\u134c\\u134d\\7a\\2\\2\\u134d\\u03bc\\3\\2\\2\\2\\u134e\")\n buf.write(\"\\u134f\\5\\u0459\\u022d\\2\\u134f\\u1350\\5\\u045b\\u022e\\2\\u1350\")\n buf.write(\"\\u1351\\5\\u043b\\u021e\\2\\u1351\\u1352\\5\\u043b\\u021e\\2\\u1352\")\n buf.write(\"\\u1353\\5\\u043d\\u021f\\2\\u1353\\u1354\\5\\u045f\\u0230\\2\\u1354\")\n buf.write(\"\\u03be\\3\\2\\2\\2\\u1355\\u1356\\5\\u045f\\u0230\\2\\u1356\\u1357\")\n buf.write(\"\\5\\u0435\\u021b\\2\\u1357\\u1358\\5\\u0457\\u022c\\2\\u1358\\u1359\")\n buf.write(\"\\7a\\2\\2\\u1359\\u03c0\\3\\2\\2\\2\\u135a\\u135b\\5\\u0439\\u021d\")\n buf.write(\"\\2\\u135b\\u135c\\5\\u0451\\u0229\\2\\u135c\\u135d\\5\\u045f\\u0230\")\n buf.write(\"\\2\\u135d\\u135e\\5\\u0435\\u021b\\2\\u135e\\u135f\\5\\u0457\\u022c\")\n buf.write(\"\\2\\u135f\\u1360\\7a\\2\\2\\u1360\\u03c2\\3\\2\\2\\2\\u1361\\u1362\")\n buf.write(\"\\5\\u044f\\u0228\\2\\u1362\\u1369\\7)\\2\\2\\u1363\\u1368\\n\\2\\2\")\n buf.write(\"\\2\\u1364\\u1365\\7)\\2\\2\\u1365\\u1368\\7)\\2\\2\\u1366\\u1368\\5\")\n buf.write(\"\\u042d\\u0217\\2\\u1367\\u1363\\3\\2\\2\\2\\u1367\\u1364\\3\\2\\2\\2\")\n buf.write(\"\\u1367\\u1366\\3\\2\\2\\2\\u1368\\u136b\\3\\2\\2\\2\\u1369\\u1367\\3\")\n buf.write(\"\\2\\2\\2\\u1369\\u136a\\3\\2\\2\\2\\u136a\\u136c\\3\\2\\2\\2\\u136b\\u1369\")\n buf.write(\"\\3\\2\\2\\2\\u136c\\u136d\\7)\\2\\2\\u136d\\u03c4\\3\\2\\2\\2\\u136e\")\n buf.write(\"\\u1377\\5\\u0437\\u021c\\2\\u136f\\u1373\\7)\\2\\2\\u1370\\u1372\")\n buf.write(\"\\4\\62\\63\\2\\u1371\\u1370\\3\\2\\2\\2\\u1372\\u1375\\3\\2\\2\\2\\u1373\")\n buf.write(\"\\u1371\\3\\2\\2\\2\\u1373\\u1374\\3\\2\\2\\2\\u1374\\u1376\\3\\2\\2\\2\")\n buf.write(\"\\u1375\\u1373\\3\\2\\2\\2\\u1376\\u1378\\7)\\2\\2\\u1377\\u136f\\3\")\n buf.write(\"\\2\\2\\2\\u1378\\u1379\\3\\2\\2\\2\\u1379\\u1377\\3\\2\\2\\2\\u1379\\u137a\")\n buf.write(\"\\3\\2\\2\\2\\u137a\\u03c6\\3\\2\\2\\2\\u137b\\u1384\\5\\u0463\\u0232\")\n buf.write(\"\\2\\u137c\\u1380\\7)\\2\\2\\u137d\\u137f\\t\\3\\2\\2\\u137e\\u137d\")\n buf.write(\"\\3\\2\\2\\2\\u137f\\u1382\\3\\2\\2\\2\\u1380\\u137e\\3\\2\\2\\2\\u1380\")\n buf.write(\"\\u1381\\3\\2\\2\\2\\u1381\\u1383\\3\\2\\2\\2\\u1382\\u1380\\3\\2\\2\\2\")\n buf.write(\"\\u1383\\u1385\\7)\\2\\2\\u1384\\u137c\\3\\2\\2\\2\\u1385\\u1386\\3\")\n buf.write(\"\\2\\2\\2\\u1386\\u1384\\3\\2\\2\\2\\u1386\\u1387\\3\\2\\2\\2\\u1387\\u03c8\")\n buf.write(\"\\3\\2\\2\\2\\u1388\\u1389\\7\\60\\2\\2\\u1389\\u138a\\7\\60\\2\\2\\u138a\")\n buf.write(\"\\u03ca\\3\\2\\2\\2\\u138b\\u138c\\7\\60\\2\\2\\u138c\\u03cc\\3\\2\\2\")\n buf.write(\"\\2\\u138d\\u138e\\5\\u0423\\u0212\\2\\u138e\\u03ce\\3\\2\\2\\2\\u138f\")\n buf.write(\"\\u1398\\5\\u0425\\u0213\\2\\u1390\\u1392\\t\\4\\2\\2\\u1391\\u1393\")\n buf.write(\"\\t\\5\\2\\2\\u1392\\u1391\\3\\2\\2\\2\\u1392\\u1393\\3\\2\\2\\2\\u1393\")\n buf.write(\"\\u1396\\3\\2\\2\\2\\u1394\\u1397\\5\\u0425\\u0213\\2\\u1395\\u1397\")\n buf.write(\"\\5\\u0423\\u0212\\2\\u1396\\u1394\\3\\2\\2\\2\\u1396\\u1395\\3\\2\\2\")\n buf.write(\"\\2\\u1397\\u1399\\3\\2\\2\\2\\u1398\\u1390\\3\\2\\2\\2\\u1398\\u1399\")\n buf.write(\"\\3\\2\\2\\2\\u1399\\u139c\\3\\2\\2\\2\\u139a\\u139d\\5\\u043b\\u021e\")\n buf.write(\"\\2\\u139b\\u139d\\5\\u043f\\u0220\\2\\u139c\\u139a\\3\\2\\2\\2\\u139c\")\n buf.write(\"\\u139b\\3\\2\\2\\2\\u139c\\u139d\\3\\2\\2\\2\\u139d\\u03d0\\3\\2\\2\\2\")\n buf.write(\"\\u139e\\u13a5\\7)\\2\\2\\u139f\\u13a4\\n\\2\\2\\2\\u13a0\\u13a1\\7\")\n buf.write(\")\\2\\2\\u13a1\\u13a4\\7)\\2\\2\\u13a2\\u13a4\\5\\u042d\\u0217\\2\\u13a3\")\n buf.write(\"\\u139f\\3\\2\\2\\2\\u13a3\\u13a0\\3\\2\\2\\2\\u13a3\\u13a2\\3\\2\\2\\2\")\n buf.write(\"\\u13a4\\u13a7\\3\\2\\2\\2\\u13a5\\u13a3\\3\\2\\2\\2\\u13a5\\u13a6\\3\")\n buf.write(\"\\2\\2\\2\\u13a6\\u13a8\\3\\2\\2\\2\\u13a7\\u13a5\\3\\2\\2\\2\\u13a8\\u13a9\")\n buf.write(\"\\7)\\2\\2\\u13a9\\u03d2\\3\\2\\2\\2\\u13aa\\u13af\\5\\u0455\\u022b\")\n buf.write(\"\\2\\u13ab\\u13b0\\5\\u03d7\\u01ec\\2\\u13ac\\u13b0\\5\\u03d9\\u01ed\")\n buf.write(\"\\2\\u13ad\\u13b0\\5\\u03db\\u01ee\\2\\u13ae\\u13b0\\5\\u03dd\\u01ef\")\n buf.write(\"\\2\\u13af\\u13ab\\3\\2\\2\\2\\u13af\\u13ac\\3\\2\\2\\2\\u13af\\u13ad\")\n buf.write(\"\\3\\2\\2\\2\\u13af\\u13ae\\3\\2\\2\\2\\u13b0\\u13b1\\3\\2\\2\\2\\u13b1\")\n buf.write(\"\\u13b2\\b\\u01ea\\2\\2\\u13b2\\u03d4\\3\\2\\2\\2\\u13b3\\u13b4\\7)\")\n buf.write(\"\\2\\2\\u13b4\\u03d6\\3\\2\\2\\2\\u13b5\\u13b6\\5\\u03d5\\u01eb\\2\\u13b6\")\n buf.write(\"\\u13ba\\7>\\2\\2\\u13b7\\u13b9\\13\\2\\2\\2\\u13b8\\u13b7\\3\\2\\2\\2\")\n buf.write(\"\\u13b9\\u13bc\\3\\2\\2\\2\\u13ba\\u13bb\\3\\2\\2\\2\\u13ba\\u13b8\\3\")\n buf.write(\"\\2\\2\\2\\u13bb\\u13bd\\3\\2\\2\\2\\u13bc\\u13ba\\3\\2\\2\\2\\u13bd\\u13be\")\n buf.write(\"\\7@\\2\\2\\u13be\\u13bf\\5\\u03d5\\u01eb\\2\\u13bf\\u03d8\\3\\2\\2\")\n buf.write(\"\\2\\u13c0\\u13c1\\5\\u03d5\\u01eb\\2\\u13c1\\u13c5\\7}\\2\\2\\u13c2\")\n buf.write(\"\\u13c4\\13\\2\\2\\2\\u13c3\\u13c2\\3\\2\\2\\2\\u13c4\\u13c7\\3\\2\\2\")\n buf.write(\"\\2\\u13c5\\u13c6\\3\\2\\2\\2\\u13c5\\u13c3\\3\\2\\2\\2\\u13c6\\u13c8\")\n buf.write(\"\\3\\2\\2\\2\\u13c7\\u13c5\\3\\2\\2\\2\\u13c8\\u13c9\\7\\177\\2\\2\\u13c9\")\n buf.write(\"\\u13ca\\5\\u03d5\\u01eb\\2\\u13ca\\u03da\\3\\2\\2\\2\\u13cb\\u13cc\")\n buf.write(\"\\5\\u03d5\\u01eb\\2\\u13cc\\u13d0\\7]\\2\\2\\u13cd\\u13cf\\13\\2\\2\")\n buf.write(\"\\2\\u13ce\\u13cd\\3\\2\\2\\2\\u13cf\\u13d2\\3\\2\\2\\2\\u13d0\\u13d1\")\n buf.write(\"\\3\\2\\2\\2\\u13d0\\u13ce\\3\\2\\2\\2\\u13d1\\u13d3\\3\\2\\2\\2\\u13d2\")\n buf.write(\"\\u13d0\\3\\2\\2\\2\\u13d3\\u13d4\\7_\\2\\2\\u13d4\\u13d5\\5\\u03d5\")\n buf.write(\"\\u01eb\\2\\u13d5\\u03dc\\3\\2\\2\\2\\u13d6\\u13d7\\5\\u03d5\\u01eb\")\n buf.write(\"\\2\\u13d7\\u13db\\7*\\2\\2\\u13d8\\u13da\\13\\2\\2\\2\\u13d9\\u13d8\")\n buf.write(\"\\3\\2\\2\\2\\u13da\\u13dd\\3\\2\\2\\2\\u13db\\u13dc\\3\\2\\2\\2\\u13db\")\n buf.write(\"\\u13d9\\3\\2\\2\\2\\u13dc\\u13de\\3\\2\\2\\2\\u13dd\\u13db\\3\\2\\2\\2\")\n buf.write(\"\\u13de\\u13df\\7+\\2\\2\\u13df\\u13e0\\5\\u03d5\\u01eb\\2\\u13e0\")\n buf.write(\"\\u03de\\3\\2\\2\\2\\u13e1\\u13e2\\n\\6\\2\\2\\u13e2\\u03e0\\3\\2\\2\\2\")\n buf.write(\"\\u13e3\\u13e7\\7$\\2\\2\\u13e4\\u13e8\\n\\7\\2\\2\\u13e5\\u13e6\\7\")\n buf.write(\"$\\2\\2\\u13e6\\u13e8\\7$\\2\\2\\u13e7\\u13e4\\3\\2\\2\\2\\u13e7\\u13e5\")\n buf.write(\"\\3\\2\\2\\2\\u13e8\\u13e9\\3\\2\\2\\2\\u13e9\\u13e7\\3\\2\\2\\2\\u13e9\")\n buf.write(\"\\u13ea\\3\\2\\2\\2\\u13ea\\u13eb\\3\\2\\2\\2\\u13eb\\u13ec\\7$\\2\\2\")\n buf.write(\"\\u13ec\\u03e2\\3\\2\\2\\2\\u13ed\\u13ee\\7\\'\\2\\2\\u13ee\\u03e4\\3\")\n buf.write(\"\\2\\2\\2\\u13ef\\u13f0\\7(\\2\\2\\u13f0\\u03e6\\3\\2\\2\\2\\u13f1\\u13f2\")\n buf.write(\"\\7*\\2\\2\\u13f2\\u03e8\\3\\2\\2\\2\\u13f3\\u13f4\\7+\\2\\2\\u13f4\\u03ea\")\n buf.write(\"\\3\\2\\2\\2\\u13f5\\u13f6\\7,\\2\\2\\u13f6\\u13f7\\7,\\2\\2\\u13f7\\u03ec\")\n buf.write(\"\\3\\2\\2\\2\\u13f8\\u13f9\\7,\\2\\2\\u13f9\\u03ee\\3\\2\\2\\2\\u13fa\")\n buf.write(\"\\u13fb\\7-\\2\\2\\u13fb\\u03f0\\3\\2\\2\\2\\u13fc\\u13fd\\7/\\2\\2\\u13fd\")\n buf.write(\"\\u03f2\\3\\2\\2\\2\\u13fe\\u13ff\\7.\\2\\2\\u13ff\\u03f4\\3\\2\\2\\2\")\n buf.write(\"\\u1400\\u1401\\7\\61\\2\\2\\u1401\\u03f6\\3\\2\\2\\2\\u1402\\u1403\")\n buf.write(\"\\7B\\2\\2\\u1403\\u03f8\\3\\2\\2\\2\\u1404\\u1405\\7<\\2\\2\\u1405\\u1406\")\n buf.write(\"\\7?\\2\\2\\u1406\\u03fa\\3\\2\\2\\2\\u1407\\u1408\\7<\\2\\2\\u1408\\u140d\")\n buf.write(\"\\5\\u0421\\u0211\\2\\u1409\\u140c\\5\\u0421\\u0211\\2\\u140a\\u140c\")\n buf.write(\"\\t\\b\\2\\2\\u140b\\u1409\\3\\2\\2\\2\\u140b\\u140a\\3\\2\\2\\2\\u140c\")\n buf.write(\"\\u140f\\3\\2\\2\\2\\u140d\\u140b\\3\\2\\2\\2\\u140d\\u140e\\3\\2\\2\\2\")\n buf.write(\"\\u140e\\u1416\\3\\2\\2\\2\\u140f\\u140d\\3\\2\\2\\2\\u1410\\u1411\\7\")\n buf.write(\"<\\2\\2\\u1411\\u1416\\5\\u03e1\\u01f1\\2\\u1412\\u1413\\7<\\2\\2\\u1413\")\n buf.write(\"\\u1416\\5\\u03cd\\u01e7\\2\\u1414\\u1416\\5\\u0411\\u0209\\2\\u1415\")\n buf.write(\"\\u1407\\3\\2\\2\\2\\u1415\\u1410\\3\\2\\2\\2\\u1415\\u1412\\3\\2\\2\\2\")\n buf.write(\"\\u1415\\u1414\\3\\2\\2\\2\\u1416\\u03fc\\3\\2\\2\\2\\u1417\\u1418\\7\")\n buf.write(\"<\\2\\2\\u1418\\u03fe\\3\\2\\2\\2\\u1419\\u141a\\7=\\2\\2\\u141a\\u0400\")\n buf.write(\"\\3\\2\\2\\2\\u141b\\u141c\\7>\\2\\2\\u141c\\u141d\\7?\\2\\2\\u141d\\u0402\")\n buf.write(\"\\3\\2\\2\\2\\u141e\\u141f\\7>\\2\\2\\u141f\\u0404\\3\\2\\2\\2\\u1420\")\n buf.write(\"\\u1421\\7@\\2\\2\\u1421\\u1422\\7?\\2\\2\\u1422\\u0406\\3\\2\\2\\2\\u1423\")\n buf.write(\"\\u1424\\7#\\2\\2\\u1424\\u142c\\7?\\2\\2\\u1425\\u1426\\7>\\2\\2\\u1426\")\n buf.write(\"\\u142c\\7@\\2\\2\\u1427\\u1428\\7`\\2\\2\\u1428\\u142c\\7?\\2\\2\\u1429\")\n buf.write(\"\\u142a\\7\\u0080\\2\\2\\u142a\\u142c\\7?\\2\\2\\u142b\\u1423\\3\\2\")\n buf.write(\"\\2\\2\\u142b\\u1425\\3\\2\\2\\2\\u142b\\u1427\\3\\2\\2\\2\\u142b\\u1429\")\n buf.write(\"\\3\\2\\2\\2\\u142c\\u0408\\3\\2\\2\\2\\u142d\\u142e\\7`\\2\\2\\u142e\")\n buf.write(\"\\u040a\\3\\2\\2\\2\\u142f\\u1430\\7\\u0080\\2\\2\\u1430\\u040c\\3\\2\")\n buf.write(\"\\2\\2\\u1431\\u1432\\7#\\2\\2\\u1432\\u040e\\3\\2\\2\\2\\u1433\\u1434\")\n buf.write(\"\\7@\\2\\2\\u1434\\u0410\\3\\2\\2\\2\\u1435\\u1436\\7A\\2\\2\\u1436\\u0412\")\n buf.write(\"\\3\\2\\2\\2\\u1437\\u1438\\7~\\2\\2\\u1438\\u1439\\7~\\2\\2\\u1439\\u0414\")\n buf.write(\"\\3\\2\\2\\2\\u143a\\u143b\\7~\\2\\2\\u143b\\u0416\\3\\2\\2\\2\\u143c\")\n buf.write(\"\\u143d\\7?\\2\\2\\u143d\\u0418\\3\\2\\2\\2\\u143e\\u143f\\7]\\2\\2\\u143f\")\n buf.write(\"\\u041a\\3\\2\\2\\2\\u1440\\u1441\\7_\\2\\2\\u1441\\u041c\\3\\2\\2\\2\")\n buf.write(\"\\u1442\\u1443\\7a\\2\\2\\u1443\\u041e\\3\\2\\2\\2\\u1444\\u1446\\t\")\n buf.write(\"\\t\\2\\2\\u1445\\u1444\\3\\2\\2\\2\\u1446\\u1447\\3\\2\\2\\2\\u1447\\u1445\")\n buf.write(\"\\3\\2\\2\\2\\u1447\\u1448\\3\\2\\2\\2\\u1448\\u1449\\3\\2\\2\\2\\u1449\")\n buf.write(\"\\u144a\\b\\u0210\\3\\2\\u144a\\u0420\\3\\2\\2\\2\\u144b\\u144c\\t\\n\")\n buf.write(\"\\2\\2\\u144c\\u0422\\3\\2\\2\\2\\u144d\\u144f\\4\\62;\\2\\u144e\\u144d\")\n buf.write(\"\\3\\2\\2\\2\\u144f\\u1450\\3\\2\\2\\2\\u1450\\u144e\\3\\2\\2\\2\\u1450\")\n buf.write(\"\\u1451\\3\\2\\2\\2\\u1451\\u0424\\3\\2\\2\\2\\u1452\\u1454\\5\\u03cd\")\n buf.write(\"\\u01e7\\2\\u1453\\u1452\\3\\2\\2\\2\\u1454\\u1457\\3\\2\\2\\2\\u1455\")\n buf.write(\"\\u1453\\3\\2\\2\\2\\u1455\\u1456\\3\\2\\2\\2\\u1456\\u1459\\3\\2\\2\\2\")\n buf.write(\"\\u1457\\u1455\\3\\2\\2\\2\\u1458\\u145a\\7\\60\\2\\2\\u1459\\u1458\")\n buf.write(\"\\3\\2\\2\\2\\u1459\\u145a\\3\\2\\2\\2\\u145a\\u145c\\3\\2\\2\\2\\u145b\")\n buf.write(\"\\u145d\\5\\u03cd\\u01e7\\2\\u145c\\u145b\\3\\2\\2\\2\\u145d\\u145e\")\n buf.write(\"\\3\\2\\2\\2\\u145e\\u145c\\3\\2\\2\\2\\u145e\\u145f\\3\\2\\2\\2\\u145f\")\n buf.write(\"\\u0426\\3\\2\\2\\2\\u1460\\u1461\\7/\\2\\2\\u1461\\u1462\\7/\\2\\2\\u1462\")\n buf.write(\"\\u1466\\3\\2\\2\\2\\u1463\\u1465\\n\\13\\2\\2\\u1464\\u1463\\3\\2\\2\")\n buf.write(\"\\2\\u1465\\u1468\\3\\2\\2\\2\\u1466\\u1464\\3\\2\\2\\2\\u1466\\u1467\")\n buf.write(\"\\3\\2\\2\\2\\u1467\\u146b\\3\\2\\2\\2\\u1468\\u1466\\3\\2\\2\\2\\u1469\")\n buf.write(\"\\u146c\\5\\u042d\\u0217\\2\\u146a\\u146c\\7\\2\\2\\3\\u146b\\u1469\")\n buf.write(\"\\3\\2\\2\\2\\u146b\\u146a\\3\\2\\2\\2\\u146c\\u146d\\3\\2\\2\\2\\u146d\")\n buf.write(\"\\u146e\\b\\u0214\\4\\2\\u146e\\u0428\\3\\2\\2\\2\\u146f\\u1470\\7\\61\")\n buf.write(\"\\2\\2\\u1470\\u1471\\7,\\2\\2\\u1471\\u1475\\3\\2\\2\\2\\u1472\\u1474\")\n buf.write(\"\\13\\2\\2\\2\\u1473\\u1472\\3\\2\\2\\2\\u1474\\u1477\\3\\2\\2\\2\\u1475\")\n buf.write(\"\\u1476\\3\\2\\2\\2\\u1475\\u1473\\3\\2\\2\\2\\u1476\\u1478\\3\\2\\2\\2\")\n buf.write(\"\\u1477\\u1475\\3\\2\\2\\2\\u1478\\u1479\\7,\\2\\2\\u1479\\u147a\\7\")\n buf.write(\"\\61\\2\\2\\u147a\\u147b\\3\\2\\2\\2\\u147b\\u147c\\b\\u0215\\4\\2\\u147c\")\n buf.write(\"\\u042a\\3\\2\\2\\2\\u147d\\u147e\\7r\\2\\2\\u147e\\u147f\\7t\\2\\2\\u147f\")\n buf.write(\"\\u1480\\7q\\2\\2\\u1480\\u1481\\7o\\2\\2\\u1481\\u1482\\7r\\2\\2\\u1482\")\n buf.write(\"\\u1483\\7v\\2\\2\\u1483\\u1484\\3\\2\\2\\2\\u1484\\u1488\\5\\u042f\")\n buf.write(\"\\u0218\\2\\u1485\\u1487\\n\\13\\2\\2\\u1486\\u1485\\3\\2\\2\\2\\u1487\")\n buf.write(\"\\u148a\\3\\2\\2\\2\\u1488\\u1486\\3\\2\\2\\2\\u1488\\u1489\\3\\2\\2\\2\")\n buf.write(\"\\u1489\\u148d\\3\\2\\2\\2\\u148a\\u1488\\3\\2\\2\\2\\u148b\\u148e\\5\")\n buf.write(\"\\u042d\\u0217\\2\\u148c\\u148e\\7\\2\\2\\3\\u148d\\u148b\\3\\2\\2\\2\")\n buf.write(\"\\u148d\\u148c\\3\\2\\2\\2\\u148e\\u042c\\3\\2\\2\\2\\u148f\\u1491\\7\")\n buf.write(\"\\17\\2\\2\\u1490\\u148f\\3\\2\\2\\2\\u1490\\u1491\\3\\2\\2\\2\\u1491\")\n buf.write(\"\\u1492\\3\\2\\2\\2\\u1492\\u1493\\7\\f\\2\\2\\u1493\\u042e\\3\\2\\2\\2\")\n buf.write(\"\\u1494\\u1495\\t\\f\\2\\2\\u1495\\u0430\\3\\2\\2\\2\\u1496\\u149b\\5\")\n buf.write(\"\\u0421\\u0211\\2\\u1497\\u149a\\5\\u0421\\u0211\\2\\u1498\\u149a\")\n buf.write(\"\\t\\r\\2\\2\\u1499\\u1497\\3\\2\\2\\2\\u1499\\u1498\\3\\2\\2\\2\\u149a\")\n buf.write(\"\\u149d\\3\\2\\2\\2\\u149b\\u1499\\3\\2\\2\\2\\u149b\\u149c\\3\\2\\2\\2\")\n buf.write(\"\\u149c\\u0432\\3\\2\\2\\2\\u149d\\u149b\\3\\2\\2\\2\\u149e\\u149f\\7\")\n buf.write(\"B\\2\\2\\u149f\\u14a0\\7#\\2\\2\\u14a0\\u14a1\\3\\2\\2\\2\\u14a1\\u14a2\")\n buf.write(\"\\b\\u021a\\4\\2\\u14a2\\u0434\\3\\2\\2\\2\\u14a3\\u14a4\\t\\16\\2\\2\")\n buf.write(\"\\u14a4\\u0436\\3\\2\\2\\2\\u14a5\\u14a6\\t\\17\\2\\2\\u14a6\\u0438\")\n buf.write(\"\\3\\2\\2\\2\\u14a7\\u14a8\\t\\20\\2\\2\\u14a8\\u043a\\3\\2\\2\\2\\u14a9\")\n buf.write(\"\\u14aa\\t\\21\\2\\2\\u14aa\\u043c\\3\\2\\2\\2\\u14ab\\u14ac\\t\\4\\2\")\n buf.write(\"\\2\\u14ac\\u043e\\3\\2\\2\\2\\u14ad\\u14ae\\t\\22\\2\\2\\u14ae\\u0440\")\n buf.write(\"\\3\\2\\2\\2\\u14af\\u14b0\\t\\23\\2\\2\\u14b0\\u0442\\3\\2\\2\\2\\u14b1\")\n buf.write(\"\\u14b2\\t\\24\\2\\2\\u14b2\\u0444\\3\\2\\2\\2\\u14b3\\u14b4\\t\\25\\2\")\n buf.write(\"\\2\\u14b4\\u0446\\3\\2\\2\\2\\u14b5\\u14b6\\t\\26\\2\\2\\u14b6\\u0448\")\n buf.write(\"\\3\\2\\2\\2\\u14b7\\u14b8\\t\\27\\2\\2\\u14b8\\u044a\\3\\2\\2\\2\\u14b9\")\n buf.write(\"\\u14ba\\t\\30\\2\\2\\u14ba\\u044c\\3\\2\\2\\2\\u14bb\\u14bc\\t\\31\\2\")\n buf.write(\"\\2\\u14bc\\u044e\\3\\2\\2\\2\\u14bd\\u14be\\t\\32\\2\\2\\u14be\\u0450\")\n buf.write(\"\\3\\2\\2\\2\\u14bf\\u14c0\\t\\33\\2\\2\\u14c0\\u0452\\3\\2\\2\\2\\u14c1\")\n buf.write(\"\\u14c2\\t\\34\\2\\2\\u14c2\\u0454\\3\\2\\2\\2\\u14c3\\u14c4\\t\\35\\2\")\n buf.write(\"\\2\\u14c4\\u0456\\3\\2\\2\\2\\u14c5\\u14c6\\t\\36\\2\\2\\u14c6\\u0458\")\n buf.write(\"\\3\\2\\2\\2\\u14c7\\u14c8\\t\\37\\2\\2\\u14c8\\u045a\\3\\2\\2\\2\\u14c9\")\n buf.write(\"\\u14ca\\t \\2\\2\\u14ca\\u045c\\3\\2\\2\\2\\u14cb\\u14cc\\t!\\2\\2\\u14cc\")\n buf.write(\"\\u045e\\3\\2\\2\\2\\u14cd\\u14ce\\t\\\"\\2\\2\\u14ce\\u0460\\3\\2\\2\\2\")\n buf.write(\"\\u14cf\\u14d0\\t#\\2\\2\\u14d0\\u0462\\3\\2\\2\\2\\u14d1\\u14d2\\t\")\n buf.write(\"$\\2\\2\\u14d2\\u0464\\3\\2\\2\\2\\u14d3\\u14d4\\t%\\2\\2\\u14d4\\u0466\")\n buf.write(\"\\3\\2\\2\\2\\u14d5\\u14d6\\t&\\2\\2\\u14d6\\u0468\\3\\2\\2\\2\\'\\2\\u1367\")\n buf.write(\"\\u1369\\u1373\\u1379\\u1380\\u1386\\u1392\\u1396\\u1398\\u139c\")\n buf.write(\"\\u13a3\\u13a5\\u13af\\u13ba\\u13c5\\u13d0\\u13db\\u13e7\\u13e9\")\n buf.write(\"\\u140b\\u140d\\u1415\\u142b\\u1447\\u1450\\u1455\\u1459\\u145e\")\n buf.write(\"\\u1466\\u146b\\u1475\\u1488\\u148d\\u1490\\u1499\\u149b\\5\\t\\u01ea\")\n buf.write(\"\\2\\b\\2\\2\\2\\3\\2\")\n return buf.getvalue()\n\n\nclass PlSqlLexer(Lexer):\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n T__0 = 1\n A_LETTER = 2\n ADD = 3\n AFTER = 4\n AGENT = 5\n AGGREGATE = 6\n ALL = 7\n ALTER = 8\n ANALYZE = 9\n AND = 10\n ANY = 11\n ARRAY = 12\n AS = 13\n ASSUME = 14\n ASSERT = 15\n ASC = 16\n ASSOCIATE = 17\n AT = 18\n ATTRIBUTE = 19\n AUDIT = 20\n AUTHID = 21\n AUTO = 22\n AUTOMATIC = 23\n AUTONOMOUS_TRANSACTION = 24\n BATCH = 25\n BEFORE = 26\n BEGIN = 27\n BETWEEN = 28\n BFILE = 29\n BINARY_DOUBLE = 30\n BINARY_FLOAT = 31\n BINARY_INTEGER = 32\n BLOB = 33\n BLOCK = 34\n BODY = 35\n BOOLEAN = 36\n BOTH = 37\n BREADTH = 38\n BULK = 39\n BY = 40\n BYTE = 41\n C_LETTER = 42\n CACHE = 43\n CALL = 44\n CANONICAL = 45\n CASCADE = 46\n CASE = 47\n CAST = 48\n CHAR = 49\n CHAR_CS = 50\n CHARACTER = 51\n CHECK = 52\n CHR = 53\n CLOB = 54\n CLOSE = 55\n CLUSTER = 56\n COLLECT = 57\n COLUMNS = 58\n COMMENT = 59\n COMMIT = 60\n COMMITTED = 61\n COMPATIBILITY = 62\n COMPILE = 63\n COMPOUND = 64\n CONNECT = 65\n CONNECT_BY_ROOT = 66\n CONSTANT = 67\n CONSTRAINT = 68\n CONSTRAINTS = 69\n CONSTRUCTOR = 70\n CONTENT = 71\n CONTEXT = 72\n CONTINUE = 73\n CONVERT = 74\n CORRUPT_XID = 75\n CORRUPT_XID_ALL = 76\n COST = 77\n COUNT = 78\n CREATE = 79\n CROSS = 80\n CUBE = 81\n CURRENT = 82\n CURRENT_USER = 83\n CURSOR = 84\n CUSTOMDATUM = 85\n CYCLE = 86\n DATA = 87\n DATABASE = 88\n DATE = 89\n DAY = 90\n DB_ROLE_CHANGE = 91\n DBTIMEZONE = 92\n DDL = 93\n DEBUG = 94\n DEC = 95\n DECIMAL = 96\n DECLARE = 97\n DECOMPOSE = 98\n DECREMENT = 99\n DEFAULT = 100\n DEFAULTS = 101\n DEFERRED = 102\n DEFINER = 103\n DELETE = 104\n DEPTH = 105\n DESC = 106\n DETERMINISTIC = 107\n DIMENSION = 108\n DISABLE = 109\n DISASSOCIATE = 110\n DISTINCT = 111\n DOCUMENT = 112\n DOUBLE = 113\n DROP = 114\n DSINTERVAL_UNCONSTRAINED = 115\n EACH = 116\n ELEMENT = 117\n ELSE = 118\n ELSIF = 119\n EMPTY = 120\n ENABLE = 121\n ENCODING = 122\n END = 123\n ENTITYESCAPING = 124\n ERR = 125\n ERRORS = 126\n ESCAPE = 127\n EVALNAME = 128\n EXCEPT = 129\n EXCEPTION = 130\n EXCEPTION_INIT = 131\n EXCEPTIONS = 132\n EXCLUDE = 133\n EXCLUSIVE = 134\n EXECUTE = 135\n EXISTS = 136\n EXIT = 137\n EXPLAIN = 138\n EXTERNAL = 139\n EXTRACT = 140\n FAILURE = 141\n FALSE = 142\n FETCH = 143\n FINAL = 144\n FIRST = 145\n FIRST_VALUE = 146\n FLOAT = 147\n FOLLOWING = 148\n FOLLOWS = 149\n FOR = 150\n FORALL = 151\n FORCE = 152\n FROM = 153\n FULL = 154\n FUNCTION = 155\n GOTO = 156\n GRANT = 157\n GROUP = 158\n GROUPING = 159\n HASH = 160\n HAVING = 161\n HIDE = 162\n HOUR = 163\n IF = 164\n IGNORE = 165\n IMMEDIATE = 166\n IN = 167\n INCLUDE = 168\n INCLUDING = 169\n INCREMENT = 170\n INDENT = 171\n INDEX = 172\n INDEXED = 173\n INDICATOR = 174\n INDICES = 175\n INFINITE = 176\n INLINE = 177\n INNER = 178\n INOUT = 179\n INSERT = 180\n INSTANTIABLE = 181\n INSTEAD = 182\n INT = 183\n INTEGER = 184\n INTERSECT = 185\n INTERVAL = 186\n INTO = 187\n INVALIDATE = 188\n IS = 189\n ISOLATION = 190\n ITERATE = 191\n JAVA = 192\n JOIN = 193\n KEEP = 194\n LANGUAGE = 195\n LAST = 196\n LAST_VALUE = 197\n LEADING = 198\n LEFT = 199\n LEVEL = 200\n LIBRARY = 201\n LIKE = 202\n LIKE2 = 203\n LIKE4 = 204\n LIKEC = 205\n LIMIT = 206\n LOCAL = 207\n LOCK = 208\n LOCKED = 209\n LOG = 210\n LOGOFF = 211\n LOGON = 212\n LONG = 213\n LOOP = 214\n MAIN = 215\n MAP = 216\n MATCHED = 217\n MAXVALUE = 218\n MEASURES = 219\n MEMBER = 220\n MERGE = 221\n MINUS = 222\n MINUTE = 223\n MINVALUE = 224\n MLSLABEL = 225\n MODE = 226\n MODEL = 227\n MODIFY = 228\n MONTH = 229\n MULTISET = 230\n NAME = 231\n NAN = 232\n NATURAL = 233\n NATURALN = 234\n NAV = 235\n NCHAR = 236\n NCHAR_CS = 237\n NCLOB = 238\n NESTED = 239\n NEW = 240\n NO = 241\n NOAUDIT = 242\n NOCACHE = 243\n NOCOPY = 244\n NOCYCLE = 245\n NOENTITYESCAPING = 246\n NOMAXVALUE = 247\n NOMINVALUE = 248\n NONE = 249\n NOORDER = 250\n NOSCHEMACHECK = 251\n NOT = 252\n NOWAIT = 253\n NULL = 254\n NULLS = 255\n NUMBER = 256\n NUMERIC = 257\n NVARCHAR2 = 258\n OBJECT = 259\n OF = 260\n OFF = 261\n OID = 262\n OLD = 263\n ON = 264\n ONLY = 265\n OPEN = 266\n OPTION = 267\n OR = 268\n ORADATA = 269\n ORDER = 270\n ORDINALITY = 271\n OSERROR = 272\n OUT = 273\n OUTER = 274\n OVER = 275\n OVERRIDING = 276\n PACKAGE = 277\n PARALLEL_ENABLE = 278\n PARAMETERS = 279\n PARENT = 280\n PARTITION = 281\n PASSING = 282\n PATH = 283\n PERCENT_ROWTYPE = 284\n PERCENT_TYPE = 285\n PIPELINED = 286\n PIVOT = 287\n PLAN = 288\n PLS_INTEGER = 289\n POSITIVE = 290\n POSITIVEN = 291\n PRAGMA = 292\n PRECEDING = 293\n PRECISION = 294\n PRESENT = 295\n PRIOR = 296\n PROCEDURE = 297\n RAISE = 298\n RANGE = 299\n RAW = 300\n READ = 301\n REAL = 302\n RECORD = 303\n REF = 304\n REFERENCE = 305\n REFERENCING = 306\n REJECT = 307\n RELIES_ON = 308\n RENAME = 309\n REPLACE = 310\n RESPECT = 311\n RESTRICT_REFERENCES = 312\n RESULT = 313\n RESULT_CACHE = 314\n RETURN = 315\n RETURNING = 316\n REUSE = 317\n REVERSE = 318\n REVOKE = 319\n RIGHT = 320\n ROLLBACK = 321\n ROLLUP = 322\n ROW = 323\n ROWID = 324\n ROWS = 325\n RULES = 326\n SAMPLE = 327\n SAVE = 328\n SAVEPOINT = 329\n SCHEMA = 330\n SCHEMACHECK = 331\n SCN = 332\n SEARCH = 333\n SECOND = 334\n SEED = 335\n SEGMENT = 336\n SELECT = 337\n SELF = 338\n SEQUENCE = 339\n SEQUENTIAL = 340\n SERIALIZABLE = 341\n SERIALLY_REUSABLE = 342\n SERVERERROR = 343\n SESSIONTIMEZONE = 344\n SET = 345\n SETS = 346\n SETTINGS = 347\n SHARE = 348\n SHOW = 349\n SHUTDOWN = 350\n SIBLINGS = 351\n SIGNTYPE = 352\n SIMPLE_INTEGER = 353\n SINGLE = 354\n SIZE = 355\n SKIP_ = 356\n SMALLINT = 357\n SNAPSHOT = 358\n SOME = 359\n SPECIFICATION = 360\n SQLDATA = 361\n SQLERROR = 362\n STANDALONE = 363\n START = 364\n STARTUP = 365\n STATEMENT = 366\n STATEMENT_ID = 367\n STATIC = 368\n STATISTICS = 369\n STRING = 370\n SUBMULTISET = 371\n SUBPARTITION = 372\n SUBSTITUTABLE = 373\n SUBTYPE = 374\n SUCCESS = 375\n SUSPEND = 376\n TABLE = 377\n THE = 378\n THEN = 379\n TIME = 380\n TIMESTAMP = 381\n TIMESTAMP_LTZ_UNCONSTRAINED = 382\n TIMESTAMP_TZ_UNCONSTRAINED = 383\n TIMESTAMP_UNCONSTRAINED = 384\n TIMEZONE_ABBR = 385\n TIMEZONE_HOUR = 386\n TIMEZONE_MINUTE = 387\n TIMEZONE_REGION = 388\n TO = 389\n TRAILING = 390\n TRANSACTION = 391\n TRANSLATE = 392\n TREAT = 393\n TRIGGER = 394\n TRIM = 395\n TRUE = 396\n TRUNCATE = 397\n TYPE = 398\n UNBOUNDED = 399\n UNDER = 400\n UNION = 401\n UNIQUE = 402\n UNLIMITED = 403\n UNPIVOT = 404\n UNTIL = 405\n UPDATE = 406\n UPDATED = 407\n UPSERT = 408\n UROWID = 409\n USE = 410\n USING = 411\n VALIDATE = 412\n VALUE = 413\n VALUES = 414\n VARCHAR = 415\n VARCHAR2 = 416\n VARIABLE = 417\n VARRAY = 418\n VARYING = 419\n VERSION = 420\n VERSIONS = 421\n WAIT = 422\n WARNING = 423\n WELLFORMED = 424\n WHEN = 425\n WHENEVER = 426\n WHERE = 427\n WHILE = 428\n WITH = 429\n WITHIN = 430\n WORK = 431\n WRITE = 432\n XML = 433\n XMLAGG = 434\n XMLATTRIBUTES = 435\n XMLCAST = 436\n XMLCOLATTVAL = 437\n XMLELEMENT = 438\n XMLEXISTS = 439\n XMLFOREST = 440\n XMLNAMESPACES = 441\n XMLPARSE = 442\n XMLPI = 443\n XMLQUERY = 444\n XMLROOT = 445\n XMLSERIALIZE = 446\n XMLTABLE = 447\n YEAR = 448\n YES = 449\n YMINTERVAL_UNCONSTRAINED = 450\n ZONE = 451\n PREDICTION = 452\n PREDICTION_BOUNDS = 453\n PREDICTION_COST = 454\n PREDICTION_DETAILS = 455\n PREDICTION_PROBABILITY = 456\n PREDICTION_SET = 457\n CUME_DIST = 458\n DENSE_RANK = 459\n LISTAGG = 460\n PERCENT_RANK = 461\n PERCENTILE_CONT = 462\n PERCENTILE_DISC = 463\n RANK = 464\n AVG = 465\n CORR = 466\n LAG = 467\n LEAD = 468\n MAX = 469\n MEDIAN = 470\n MIN = 471\n NTILE = 472\n RATIO_TO_REPORT = 473\n ROW_NUMBER = 474\n SUM = 475\n VARIANCE = 476\n REGR_ = 477\n STDDEV = 478\n VAR_ = 479\n COVAR_ = 480\n NATIONAL_CHAR_STRING_LIT = 481\n BIT_STRING_LIT = 482\n HEX_STRING_LIT = 483\n DOUBLE_PERIOD = 484\n PERIOD = 485\n UNSIGNED_INTEGER = 486\n APPROXIMATE_NUM_LIT = 487\n CHAR_STRING = 488\n DELIMITED_ID = 489\n PERCENT = 490\n AMPERSAND = 491\n LEFT_PAREN = 492\n RIGHT_PAREN = 493\n DOUBLE_ASTERISK = 494\n ASTERISK = 495\n PLUS_SIGN = 496\n MINUS_SIGN = 497\n COMMA = 498\n SOLIDUS = 499\n AT_SIGN = 500\n ASSIGN_OP = 501\n BINDVAR = 502\n COLON = 503\n SEMICOLON = 504\n LESS_THAN_OR_EQUALS_OP = 505\n LESS_THAN_OP = 506\n GREATER_THAN_OR_EQUALS_OP = 507\n NOT_EQUAL_OP = 508\n CARRET_OPERATOR_PART = 509\n TILDE_OPERATOR_PART = 510\n EXCLAMATION_OPERATOR_PART = 511\n GREATER_THAN_OP = 512\n CONCATENATION_OP = 513\n VERTICAL_BAR = 514\n EQUALS_OP = 515\n LEFT_BRACKET = 516\n RIGHT_BRACKET = 517\n INTRODUCER = 518\n SPACES = 519\n SINGLE_LINE_COMMENT = 520\n MULTI_LINE_COMMENT = 521\n PROMPT = 522\n REGULAR_ID = 523\n ZV = 524\n\n channelNames = [ u\"DEFAULT_TOKEN_CHANNEL\", u\"HIDDEN\" ]\n\n modeNames = [ \"DEFAULT_MODE\" ]\n\n literalNames = [ \"<INVALID>\",\n \"'..'\", \"'.'\", \"'%'\", \"'&'\", \"'('\", \"')'\", \"'**'\", \"'*'\", \"'+'\", \n \"'-'\", \"','\", \"'/'\", \"'@'\", \"':='\", \"':'\", \"';'\", \"'<='\", \"'<'\", \n \"'>='\", \"'^'\", \"'~'\", \"'!'\", \"'>'\", \"'||'\", \"'|'\", \"'='\", \"'['\", \n \"']'\", \"'_'\", \"'@!'\" ]\n\n symbolicNames = [ \"<INVALID>\",\n \"A_LETTER\", \"ADD\", \"AFTER\", \"AGENT\", \"AGGREGATE\", \"ALL\", \"ALTER\", \n \"ANALYZE\", \"AND\", \"ANY\", \"ARRAY\", \"AS\", \"ASSUME\", \"ASSERT\", \n \"ASC\", \"ASSOCIATE\", \"AT\", \"ATTRIBUTE\", \"AUDIT\", \"AUTHID\", \"AUTO\", \n \"AUTOMATIC\", \"AUTONOMOUS_TRANSACTION\", \"BATCH\", \"BEFORE\", \"BEGIN\", \n \"BETWEEN\", \"BFILE\", \"BINARY_DOUBLE\", \"BINARY_FLOAT\", \"BINARY_INTEGER\", \n \"BLOB\", \"BLOCK\", \"BODY\", \"BOOLEAN\", \"BOTH\", \"BREADTH\", \"BULK\", \n \"BY\", \"BYTE\", \"C_LETTER\", \"CACHE\", \"CALL\", \"CANONICAL\", \"CASCADE\", \n \"CASE\", \"CAST\", \"CHAR\", \"CHAR_CS\", \"CHARACTER\", \"CHECK\", \"CHR\", \n \"CLOB\", \"CLOSE\", \"CLUSTER\", \"COLLECT\", \"COLUMNS\", \"COMMENT\", \n \"COMMIT\", \"COMMITTED\", \"COMPATIBILITY\", \"COMPILE\", \"COMPOUND\", \n \"CONNECT\", \"CONNECT_BY_ROOT\", \"CONSTANT\", \"CONSTRAINT\", \"CONSTRAINTS\", \n \"CONSTRUCTOR\", \"CONTENT\", \"CONTEXT\", \"CONTINUE\", \"CONVERT\", \n \"CORRUPT_XID\", \"CORRUPT_XID_ALL\", \"COST\", \"COUNT\", \"CREATE\", \n \"CROSS\", \"CUBE\", \"CURRENT\", \"CURRENT_USER\", \"CURSOR\", \"CUSTOMDATUM\", \n \"CYCLE\", \"DATA\", \"DATABASE\", \"DATE\", \"DAY\", \"DB_ROLE_CHANGE\", \n \"DBTIMEZONE\", \"DDL\", \"DEBUG\", \"DEC\", \"DECIMAL\", \"DECLARE\", \"DECOMPOSE\", \n \"DECREMENT\", \"DEFAULT\", \"DEFAULTS\", \"DEFERRED\", \"DEFINER\", \"DELETE\", \n \"DEPTH\", \"DESC\", \"DETERMINISTIC\", \"DIMENSION\", \"DISABLE\", \"DISASSOCIATE\", \n \"DISTINCT\", \"DOCUMENT\", \"DOUBLE\", \"DROP\", \"DSINTERVAL_UNCONSTRAINED\", \n \"EACH\", \"ELEMENT\", \"ELSE\", \"ELSIF\", \"EMPTY\", \"ENABLE\", \"ENCODING\", \n \"END\", \"ENTITYESCAPING\", \"ERR\", \"ERRORS\", \"ESCAPE\", \"EVALNAME\", \n \"EXCEPT\", \"EXCEPTION\", \"EXCEPTION_INIT\", \"EXCEPTIONS\", \"EXCLUDE\", \n \"EXCLUSIVE\", \"EXECUTE\", \"EXISTS\", \"EXIT\", \"EXPLAIN\", \"EXTERNAL\", \n \"EXTRACT\", \"FAILURE\", \"FALSE\", \"FETCH\", \"FINAL\", \"FIRST\", \"FIRST_VALUE\", \n \"FLOAT\", \"FOLLOWING\", \"FOLLOWS\", \"FOR\", \"FORALL\", \"FORCE\", \"FROM\", \n \"FULL\", \"FUNCTION\", \"GOTO\", \"GRANT\", \"GROUP\", \"GROUPING\", \"HASH\", \n \"HAVING\", \"HIDE\", \"HOUR\", \"IF\", \"IGNORE\", \"IMMEDIATE\", \"IN\", \n \"INCLUDE\", \"INCLUDING\", \"INCREMENT\", \"INDENT\", \"INDEX\", \"INDEXED\", \n \"INDICATOR\", \"INDICES\", \"INFINITE\", \"INLINE\", \"INNER\", \"INOUT\", \n \"INSERT\", \"INSTANTIABLE\", \"INSTEAD\", \"INT\", \"INTEGER\", \"INTERSECT\", \n \"INTERVAL\", \"INTO\", \"INVALIDATE\", \"IS\", \"ISOLATION\", \"ITERATE\", \n \"JAVA\", \"JOIN\", \"KEEP\", \"LANGUAGE\", \"LAST\", \"LAST_VALUE\", \"LEADING\", \n \"LEFT\", \"LEVEL\", \"LIBRARY\", \"LIKE\", \"LIKE2\", \"LIKE4\", \"LIKEC\", \n \"LIMIT\", \"LOCAL\", \"LOCK\", \"LOCKED\", \"LOG\", \"LOGOFF\", \"LOGON\", \n \"LONG\", \"LOOP\", \"MAIN\", \"MAP\", \"MATCHED\", \"MAXVALUE\", \"MEASURES\", \n \"MEMBER\", \"MERGE\", \"MINUS\", \"MINUTE\", \"MINVALUE\", \"MLSLABEL\", \n \"MODE\", \"MODEL\", \"MODIFY\", \"MONTH\", \"MULTISET\", \"NAME\", \"NAN\", \n \"NATURAL\", \"NATURALN\", \"NAV\", \"NCHAR\", \"NCHAR_CS\", \"NCLOB\", \n \"NESTED\", \"NEW\", \"NO\", \"NOAUDIT\", \"NOCACHE\", \"NOCOPY\", \"NOCYCLE\", \n \"NOENTITYESCAPING\", \"NOMAXVALUE\", \"NOMINVALUE\", \"NONE\", \"NOORDER\", \n \"NOSCHEMACHECK\", \"NOT\", \"NOWAIT\", \"NULL\", \"NULLS\", \"NUMBER\", \n \"NUMERIC\", \"NVARCHAR2\", \"OBJECT\", \"OF\", \"OFF\", \"OID\", \"OLD\", \n \"ON\", \"ONLY\", \"OPEN\", \"OPTION\", \"OR\", \"ORADATA\", \"ORDER\", \"ORDINALITY\", \n \"OSERROR\", \"OUT\", \"OUTER\", \"OVER\", \"OVERRIDING\", \"PACKAGE\", \n \"PARALLEL_ENABLE\", \"PARAMETERS\", \"PARENT\", \"PARTITION\", \"PASSING\", \n \"PATH\", \"PERCENT_ROWTYPE\", \"PERCENT_TYPE\", \"PIPELINED\", \"PIVOT\", \n \"PLAN\", \"PLS_INTEGER\", \"POSITIVE\", \"POSITIVEN\", \"PRAGMA\", \"PRECEDING\", \n \"PRECISION\", \"PRESENT\", \"PRIOR\", \"PROCEDURE\", \"RAISE\", \"RANGE\", \n \"RAW\", \"READ\", \"REAL\", \"RECORD\", \"REF\", \"REFERENCE\", \"REFERENCING\", \n \"REJECT\", \"RELIES_ON\", \"RENAME\", \"REPLACE\", \"RESPECT\", \"RESTRICT_REFERENCES\", \n \"RESULT\", \"RESULT_CACHE\", \"RETURN\", \"RETURNING\", \"REUSE\", \"REVERSE\", \n \"REVOKE\", \"RIGHT\", \"ROLLBACK\", \"ROLLUP\", \"ROW\", \"ROWID\", \"ROWS\", \n \"RULES\", \"SAMPLE\", \"SAVE\", \"SAVEPOINT\", \"SCHEMA\", \"SCHEMACHECK\", \n \"SCN\", \"SEARCH\", \"SECOND\", \"SEED\", \"SEGMENT\", \"SELECT\", \"SELF\", \n \"SEQUENCE\", \"SEQUENTIAL\", \"SERIALIZABLE\", \"SERIALLY_REUSABLE\", \n \"SERVERERROR\", \"SESSIONTIMEZONE\", \"SET\", \"SETS\", \"SETTINGS\", \n \"SHARE\", \"SHOW\", \"SHUTDOWN\", \"SIBLINGS\", \"SIGNTYPE\", \"SIMPLE_INTEGER\", \n \"SINGLE\", \"SIZE\", \"SKIP_\", \"SMALLINT\", \"SNAPSHOT\", \"SOME\", \"SPECIFICATION\", \n \"SQLDATA\", \"SQLERROR\", \"STANDALONE\", \"START\", \"STARTUP\", \"STATEMENT\", \n \"STATEMENT_ID\", \"STATIC\", \"STATISTICS\", \"STRING\", \"SUBMULTISET\", \n \"SUBPARTITION\", \"SUBSTITUTABLE\", \"SUBTYPE\", \"SUCCESS\", \"SUSPEND\", \n \"TABLE\", \"THE\", \"THEN\", \"TIME\", \"TIMESTAMP\", \"TIMESTAMP_LTZ_UNCONSTRAINED\", \n \"TIMESTAMP_TZ_UNCONSTRAINED\", \"TIMESTAMP_UNCONSTRAINED\", \"TIMEZONE_ABBR\", \n \"TIMEZONE_HOUR\", \"TIMEZONE_MINUTE\", \"TIMEZONE_REGION\", \"TO\", \n \"TRAILING\", \"TRANSACTION\", \"TRANSLATE\", \"TREAT\", \"TRIGGER\", \n \"TRIM\", \"TRUE\", \"TRUNCATE\", \"TYPE\", \"UNBOUNDED\", \"UNDER\", \"UNION\", \n \"UNIQUE\", \"UNLIMITED\", \"UNPIVOT\", \"UNTIL\", \"UPDATE\", \"UPDATED\", \n \"UPSERT\", \"UROWID\", \"USE\", \"USING\", \"VALIDATE\", \"VALUE\", \"VALUES\", \n \"VARCHAR\", \"VARCHAR2\", \"VARIABLE\", \"VARRAY\", \"VARYING\", \"VERSION\", \n \"VERSIONS\", \"WAIT\", \"WARNING\", \"WELLFORMED\", \"WHEN\", \"WHENEVER\", \n \"WHERE\", \"WHILE\", \"WITH\", \"WITHIN\", \"WORK\", \"WRITE\", \"XML\", \n \"XMLAGG\", \"XMLATTRIBUTES\", \"XMLCAST\", \"XMLCOLATTVAL\", \"XMLELEMENT\", \n \"XMLEXISTS\", \"XMLFOREST\", \"XMLNAMESPACES\", \"XMLPARSE\", \"XMLPI\", \n \"XMLQUERY\", \"XMLROOT\", \"XMLSERIALIZE\", \"XMLTABLE\", \"YEAR\", \"YES\", \n \"YMINTERVAL_UNCONSTRAINED\", \"ZONE\", \"PREDICTION\", \"PREDICTION_BOUNDS\", \n \"PREDICTION_COST\", \"PREDICTION_DETAILS\", \"PREDICTION_PROBABILITY\", \n \"PREDICTION_SET\", \"CUME_DIST\", \"DENSE_RANK\", \"LISTAGG\", \"PERCENT_RANK\", \n \"PERCENTILE_CONT\", \"PERCENTILE_DISC\", \"RANK\", \"AVG\", \"CORR\", \n \"LAG\", \"LEAD\", \"MAX\", \"MEDIAN\", \"MIN\", \"NTILE\", \"RATIO_TO_REPORT\", \n \"ROW_NUMBER\", \"SUM\", \"VARIANCE\", \"REGR_\", \"STDDEV\", \"VAR_\", \n \"COVAR_\", \"NATIONAL_CHAR_STRING_LIT\", \"BIT_STRING_LIT\", \"HEX_STRING_LIT\", \n \"DOUBLE_PERIOD\", \"PERIOD\", \"UNSIGNED_INTEGER\", \"APPROXIMATE_NUM_LIT\", \n \"CHAR_STRING\", \"DELIMITED_ID\", \"PERCENT\", \"AMPERSAND\", \"LEFT_PAREN\", \n \"RIGHT_PAREN\", \"DOUBLE_ASTERISK\", \"ASTERISK\", \"PLUS_SIGN\", \"MINUS_SIGN\", \n \"COMMA\", \"SOLIDUS\", \"AT_SIGN\", \"ASSIGN_OP\", \"BINDVAR\", \"COLON\", \n \"SEMICOLON\", \"LESS_THAN_OR_EQUALS_OP\", \"LESS_THAN_OP\", \"GREATER_THAN_OR_EQUALS_OP\", \n \"NOT_EQUAL_OP\", \"CARRET_OPERATOR_PART\", \"TILDE_OPERATOR_PART\", \n \"EXCLAMATION_OPERATOR_PART\", \"GREATER_THAN_OP\", \"CONCATENATION_OP\", \n \"VERTICAL_BAR\", \"EQUALS_OP\", \"LEFT_BRACKET\", \"RIGHT_BRACKET\", \n \"INTRODUCER\", \"SPACES\", \"SINGLE_LINE_COMMENT\", \"MULTI_LINE_COMMENT\", \n \"PROMPT\", \"REGULAR_ID\", \"ZV\" ]\n\n ruleNames = [ \"T__0\", \"A_LETTER\", \"ADD\", \"AFTER\", \"AGENT\", \"AGGREGATE\", \n \"ALL\", \"ALTER\", \"ANALYZE\", \"AND\", \"ANY\", \"ARRAY\", \"AS\", \n \"ASSUME\", \"ASSERT\", \"ASC\", \"ASSOCIATE\", \"AT\", \"ATTRIBUTE\", \n \"AUDIT\", \"AUTHID\", \"AUTO\", \"AUTOMATIC\", \"AUTONOMOUS_TRANSACTION\", \n \"BATCH\", \"BEFORE\", \"BEGIN\", \"BETWEEN\", \"BFILE\", \"BINARY_DOUBLE\", \n \"BINARY_FLOAT\", \"BINARY_INTEGER\", \"BLOB\", \"BLOCK\", \"BODY\", \n \"BOOLEAN\", \"BOTH\", \"BREADTH\", \"BULK\", \"BY\", \"BYTE\", \"C_LETTER\", \n \"CACHE\", \"CALL\", \"CANONICAL\", \"CASCADE\", \"CASE\", \"CAST\", \n \"CHAR\", \"CHAR_CS\", \"CHARACTER\", \"CHECK\", \"CHR\", \"CLOB\", \n \"CLOSE\", \"CLUSTER\", \"COLLECT\", \"COLUMNS\", \"COMMENT\", \"COMMIT\", \n \"COMMITTED\", \"COMPATIBILITY\", \"COMPILE\", \"COMPOUND\", \"CONNECT\", \n \"CONNECT_BY_ROOT\", \"CONSTANT\", \"CONSTRAINT\", \"CONSTRAINTS\", \n \"CONSTRUCTOR\", \"CONTENT\", \"CONTEXT\", \"CONTINUE\", \"CONVERT\", \n \"CORRUPT_XID\", \"CORRUPT_XID_ALL\", \"COST\", \"COUNT\", \"CREATE\", \n \"CROSS\", \"CUBE\", \"CURRENT\", \"CURRENT_USER\", \"CURSOR\", \n \"CUSTOMDATUM\", \"CYCLE\", \"DATA\", \"DATABASE\", \"DATE\", \"DAY\", \n \"DB_ROLE_CHANGE\", \"DBTIMEZONE\", \"DDL\", \"DEBUG\", \"DEC\", \n \"DECIMAL\", \"DECLARE\", \"DECOMPOSE\", \"DECREMENT\", \"DEFAULT\", \n \"DEFAULTS\", \"DEFERRED\", \"DEFINER\", \"DELETE\", \"DEPTH\", \n \"DESC\", \"DETERMINISTIC\", \"DIMENSION\", \"DISABLE\", \"DISASSOCIATE\", \n \"DISTINCT\", \"DOCUMENT\", \"DOUBLE\", \"DROP\", \"DSINTERVAL_UNCONSTRAINED\", \n \"EACH\", \"ELEMENT\", \"ELSE\", \"ELSIF\", \"EMPTY\", \"ENABLE\", \n \"ENCODING\", \"END\", \"ENTITYESCAPING\", \"ERR\", \"ERRORS\", \n \"ESCAPE\", \"EVALNAME\", \"EXCEPT\", \"EXCEPTION\", \"EXCEPTION_INIT\", \n \"EXCEPTIONS\", \"EXCLUDE\", \"EXCLUSIVE\", \"EXECUTE\", \"EXISTS\", \n \"EXIT\", \"EXPLAIN\", \"EXTERNAL\", \"EXTRACT\", \"FAILURE\", \"FALSE\", \n \"FETCH\", \"FINAL\", \"FIRST\", \"FIRST_VALUE\", \"FLOAT\", \"FOLLOWING\", \n \"FOLLOWS\", \"FOR\", \"FORALL\", \"FORCE\", \"FROM\", \"FULL\", \"FUNCTION\", \n \"GOTO\", \"GRANT\", \"GROUP\", \"GROUPING\", \"HASH\", \"HAVING\", \n \"HIDE\", \"HOUR\", \"IF\", \"IGNORE\", \"IMMEDIATE\", \"IN\", \"INCLUDE\", \n \"INCLUDING\", \"INCREMENT\", \"INDENT\", \"INDEX\", \"INDEXED\", \n \"INDICATOR\", \"INDICES\", \"INFINITE\", \"INLINE\", \"INNER\", \n \"INOUT\", \"INSERT\", \"INSTANTIABLE\", \"INSTEAD\", \"INT\", \"INTEGER\", \n \"INTERSECT\", \"INTERVAL\", \"INTO\", \"INVALIDATE\", \"IS\", \"ISOLATION\", \n \"ITERATE\", \"JAVA\", \"JOIN\", \"KEEP\", \"LANGUAGE\", \"LAST\", \n \"LAST_VALUE\", \"LEADING\", \"LEFT\", \"LEVEL\", \"LIBRARY\", \"LIKE\", \n \"LIKE2\", \"LIKE4\", \"LIKEC\", \"LIMIT\", \"LOCAL\", \"LOCK\", \"LOCKED\", \n \"LOG\", \"LOGOFF\", \"LOGON\", \"LONG\", \"LOOP\", \"MAIN\", \"MAP\", \n \"MATCHED\", \"MAXVALUE\", \"MEASURES\", \"MEMBER\", \"MERGE\", \n \"MINUS\", \"MINUTE\", \"MINVALUE\", \"MLSLABEL\", \"MODE\", \"MODEL\", \n \"MODIFY\", \"MONTH\", \"MULTISET\", \"NAME\", \"NAN\", \"NATURAL\", \n \"NATURALN\", \"NAV\", \"NCHAR\", \"NCHAR_CS\", \"NCLOB\", \"NESTED\", \n \"NEW\", \"NO\", \"NOAUDIT\", \"NOCACHE\", \"NOCOPY\", \"NOCYCLE\", \n \"NOENTITYESCAPING\", \"NOMAXVALUE\", \"NOMINVALUE\", \"NONE\", \n \"NOORDER\", \"NOSCHEMACHECK\", \"NOT\", \"NOWAIT\", \"NULL\", \"NULLS\", \n \"NUMBER\", \"NUMERIC\", \"NVARCHAR2\", \"OBJECT\", \"OF\", \"OFF\", \n \"OID\", \"OLD\", \"ON\", \"ONLY\", \"OPEN\", \"OPTION\", \"OR\", \"ORADATA\", \n \"ORDER\", \"ORDINALITY\", \"OSERROR\", \"OUT\", \"OUTER\", \"OVER\", \n \"OVERRIDING\", \"PACKAGE\", \"PARALLEL_ENABLE\", \"PARAMETERS\", \n \"PARENT\", \"PARTITION\", \"PASSING\", \"PATH\", \"PERCENT_ROWTYPE\", \n \"PERCENT_TYPE\", \"PIPELINED\", \"PIVOT\", \"PLAN\", \"PLS_INTEGER\", \n \"POSITIVE\", \"POSITIVEN\", \"PRAGMA\", \"PRECEDING\", \"PRECISION\", \n \"PRESENT\", \"PRIOR\", \"PROCEDURE\", \"RAISE\", \"RANGE\", \"RAW\", \n \"READ\", \"REAL\", \"RECORD\", \"REF\", \"REFERENCE\", \"REFERENCING\", \n \"REJECT\", \"RELIES_ON\", \"RENAME\", \"REPLACE\", \"RESPECT\", \n \"RESTRICT_REFERENCES\", \"RESULT\", \"RESULT_CACHE\", \"RETURN\", \n \"RETURNING\", \"REUSE\", \"REVERSE\", \"REVOKE\", \"RIGHT\", \"ROLLBACK\", \n \"ROLLUP\", \"ROW\", \"ROWID\", \"ROWS\", \"RULES\", \"SAMPLE\", \"SAVE\", \n \"SAVEPOINT\", \"SCHEMA\", \"SCHEMACHECK\", \"SCN\", \"SEARCH\", \n \"SECOND\", \"SEED\", \"SEGMENT\", \"SELECT\", \"SELF\", \"SEQUENCE\", \n \"SEQUENTIAL\", \"SERIALIZABLE\", \"SERIALLY_REUSABLE\", \"SERVERERROR\", \n \"SESSIONTIMEZONE\", \"SET\", \"SETS\", \"SETTINGS\", \"SHARE\", \n \"SHOW\", \"SHUTDOWN\", \"SIBLINGS\", \"SIGNTYPE\", \"SIMPLE_INTEGER\", \n \"SINGLE\", \"SIZE\", \"SKIP_\", \"SMALLINT\", \"SNAPSHOT\", \"SOME\", \n \"SPECIFICATION\", \"SQLDATA\", \"SQLERROR\", \"STANDALONE\", \n \"START\", \"STARTUP\", \"STATEMENT\", \"STATEMENT_ID\", \"STATIC\", \n \"STATISTICS\", \"STRING\", \"SUBMULTISET\", \"SUBPARTITION\", \n \"SUBSTITUTABLE\", \"SUBTYPE\", \"SUCCESS\", \"SUSPEND\", \"TABLE\", \n \"THE\", \"THEN\", \"TIME\", \"TIMESTAMP\", \"TIMESTAMP_LTZ_UNCONSTRAINED\", \n \"TIMESTAMP_TZ_UNCONSTRAINED\", \"TIMESTAMP_UNCONSTRAINED\", \n \"TIMEZONE_ABBR\", \"TIMEZONE_HOUR\", \"TIMEZONE_MINUTE\", \"TIMEZONE_REGION\", \n \"TO\", \"TRAILING\", \"TRANSACTION\", \"TRANSLATE\", \"TREAT\", \n \"TRIGGER\", \"TRIM\", \"TRUE\", \"TRUNCATE\", \"TYPE\", \"UNBOUNDED\", \n \"UNDER\", \"UNION\", \"UNIQUE\", \"UNLIMITED\", \"UNPIVOT\", \"UNTIL\", \n \"UPDATE\", \"UPDATED\", \"UPSERT\", \"UROWID\", \"USE\", \"USING\", \n \"VALIDATE\", \"VALUE\", \"VALUES\", \"VARCHAR\", \"VARCHAR2\", \n \"VARIABLE\", \"VARRAY\", \"VARYING\", \"VERSION\", \"VERSIONS\", \n \"WAIT\", \"WARNING\", \"WELLFORMED\", \"WHEN\", \"WHENEVER\", \"WHERE\", \n \"WHILE\", \"WITH\", \"WITHIN\", \"WORK\", \"WRITE\", \"XML\", \"XMLAGG\", \n \"XMLATTRIBUTES\", \"XMLCAST\", \"XMLCOLATTVAL\", \"XMLELEMENT\", \n \"XMLEXISTS\", \"XMLFOREST\", \"XMLNAMESPACES\", \"XMLPARSE\", \n \"XMLPI\", \"XMLQUERY\", \"XMLROOT\", \"XMLSERIALIZE\", \"XMLTABLE\", \n \"YEAR\", \"YES\", \"YMINTERVAL_UNCONSTRAINED\", \"ZONE\", \"PREDICTION\", \n \"PREDICTION_BOUNDS\", \"PREDICTION_COST\", \"PREDICTION_DETAILS\", \n \"PREDICTION_PROBABILITY\", \"PREDICTION_SET\", \"CUME_DIST\", \n \"DENSE_RANK\", \"LISTAGG\", \"PERCENT_RANK\", \"PERCENTILE_CONT\", \n \"PERCENTILE_DISC\", \"RANK\", \"AVG\", \"CORR\", \"LAG\", \"LEAD\", \n \"MAX\", \"MEDIAN\", \"MIN\", \"NTILE\", \"RATIO_TO_REPORT\", \"ROW_NUMBER\", \n \"SUM\", \"VARIANCE\", \"REGR_\", \"STDDEV\", \"VAR_\", \"COVAR_\", \n \"NATIONAL_CHAR_STRING_LIT\", \"BIT_STRING_LIT\", \"HEX_STRING_LIT\", \n \"DOUBLE_PERIOD\", \"PERIOD\", \"UNSIGNED_INTEGER\", \"APPROXIMATE_NUM_LIT\", \n \"CHAR_STRING\", \"CHAR_STRING_PERL\", \"QUOTE\", \"QS_ANGLE\", \n \"QS_BRACE\", \"QS_BRACK\", \"QS_PAREN\", \"QS_OTHER_CH\", \"DELIMITED_ID\", \n \"PERCENT\", \"AMPERSAND\", \"LEFT_PAREN\", \"RIGHT_PAREN\", \"DOUBLE_ASTERISK\", \n \"ASTERISK\", \"PLUS_SIGN\", \"MINUS_SIGN\", \"COMMA\", \"SOLIDUS\", \n \"AT_SIGN\", \"ASSIGN_OP\", \"BINDVAR\", \"COLON\", \"SEMICOLON\", \n \"LESS_THAN_OR_EQUALS_OP\", \"LESS_THAN_OP\", \"GREATER_THAN_OR_EQUALS_OP\", \n \"NOT_EQUAL_OP\", \"CARRET_OPERATOR_PART\", \"TILDE_OPERATOR_PART\", \n \"EXCLAMATION_OPERATOR_PART\", \"GREATER_THAN_OP\", \"QUESTION_MARK\", \n \"CONCATENATION_OP\", \"VERTICAL_BAR\", \"EQUALS_OP\", \"LEFT_BRACKET\", \n \"RIGHT_BRACKET\", \"INTRODUCER\", \"SPACES\", \"SIMPLE_LETTER\", \n \"UNSIGNED_INTEGER_FRAGMENT\", \"FLOAT_FRAGMENT\", \"SINGLE_LINE_COMMENT\", \n \"MULTI_LINE_COMMENT\", \"PROMPT\", \"NEWLINE\", \"SPACE\", \"REGULAR_ID\", \n \"ZV\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \n \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \n \"V\", \"W\", \"X\", \"Y\", \"Z\" ]\n\n grammarFileName = \"PlSql.g4\"\n\n def __init__(self, input=None, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.7.2\")\n self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None\n\n\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
class Classifier(object):
"""
Trained classifier
"""
def __init__(self, classifier, scaler, orient, color_space,
pix_per_cell, cell_per_block, spatial_size, hist_bins):
"""
Initializes an instance.
Parameters
----------
classifier : Trained SciPy classifier for detecting vehicles.
scaler : SciPy scaler to apply to X.
"""
self.classifier = classifier
self.scaler = scaler
self.color_space = color_space
self.orient = orient
self.pix_per_cell = pix_per_cell
self.cell_per_block = cell_per_block
self.spatial_size = spatial_size
self.hist_bins = hist_bins
|
normal
|
{
"blob_id": "9188d58a6d9e832b8908b823d57249fcdd80ff51",
"index": 171,
"step-1": "<mask token>\n",
"step-2": "class Classifier(object):\n <mask token>\n <mask token>\n",
"step-3": "class Classifier(object):\n <mask token>\n\n def __init__(self, classifier, scaler, orient, color_space,\n pix_per_cell, cell_per_block, spatial_size, hist_bins):\n \"\"\"\n Initializes an instance.\n Parameters\n ----------\n classifier : Trained SciPy classifier for detecting vehicles.\n scaler : SciPy scaler to apply to X.\n \"\"\"\n self.classifier = classifier\n self.scaler = scaler\n self.color_space = color_space\n self.orient = orient\n self.pix_per_cell = pix_per_cell\n self.cell_per_block = cell_per_block\n self.spatial_size = spatial_size\n self.hist_bins = hist_bins\n",
"step-4": "class Classifier(object):\n \"\"\"\n Trained classifier\n \"\"\"\n\n def __init__(self, classifier, scaler, orient, color_space,\n pix_per_cell, cell_per_block, spatial_size, hist_bins):\n \"\"\"\n Initializes an instance.\n Parameters\n ----------\n classifier : Trained SciPy classifier for detecting vehicles.\n scaler : SciPy scaler to apply to X.\n \"\"\"\n self.classifier = classifier\n self.scaler = scaler\n self.color_space = color_space\n self.orient = orient\n self.pix_per_cell = pix_per_cell\n self.cell_per_block = cell_per_block\n self.spatial_size = spatial_size\n self.hist_bins = hist_bins\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@register.simple_tag
def random_quote():
"""Returns a random quote to be displayed on the community sandwich page"""
quotes = [
"""Growth is never by mere chance; it is the result of forces working together.
-James Cash Penney"""
,
"""We cannot accomplish all that we need to do without working together
-Bill Richardson"""
,
"""The power of one, if fearless and focused, is formidable, but the power of many working together is better.
-Gloria Macapagal Arroyo"""
,
"""The power of one, if fearless and focused, is formidable, but the power of many working together is better.
-Jacqueline Novogratz"""
,
"""I love a sandwich that you can barely fit in your mouth because there's so much stuff on it. The bread should not be the main thing on a sandwich.
-Adrianne Palicki"""
, """Communism will win.
-Slavoj Zizek"""]
return random.choice(quotes)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
register = template.Library()
@register.simple_tag
def random_quote():
"""Returns a random quote to be displayed on the community sandwich page"""
quotes = [
"""Growth is never by mere chance; it is the result of forces working together.
-James Cash Penney"""
,
"""We cannot accomplish all that we need to do without working together
-Bill Richardson"""
,
"""The power of one, if fearless and focused, is formidable, but the power of many working together is better.
-Gloria Macapagal Arroyo"""
,
"""The power of one, if fearless and focused, is formidable, but the power of many working together is better.
-Jacqueline Novogratz"""
,
"""I love a sandwich that you can barely fit in your mouth because there's so much stuff on it. The bread should not be the main thing on a sandwich.
-Adrianne Palicki"""
, """Communism will win.
-Slavoj Zizek"""]
return random.choice(quotes)
<|reserved_special_token_1|>
from django import template
import random
register = template.Library()
@register.simple_tag
def random_quote():
"""Returns a random quote to be displayed on the community sandwich page"""
quotes = [
"""Growth is never by mere chance; it is the result of forces working together.
-James Cash Penney"""
,
"""We cannot accomplish all that we need to do without working together
-Bill Richardson"""
,
"""The power of one, if fearless and focused, is formidable, but the power of many working together is better.
-Gloria Macapagal Arroyo"""
,
"""The power of one, if fearless and focused, is formidable, but the power of many working together is better.
-Jacqueline Novogratz"""
,
"""I love a sandwich that you can barely fit in your mouth because there's so much stuff on it. The bread should not be the main thing on a sandwich.
-Adrianne Palicki"""
, """Communism will win.
-Slavoj Zizek"""]
return random.choice(quotes)
<|reserved_special_token_1|>
from django import template
import random
register = template.Library()
@register.simple_tag
def random_quote():
"""Returns a random quote to be displayed on the community sandwich page"""
quotes = [
"Growth is never by mere chance; it is the result of forces working together.\n-James Cash Penney",
"We cannot accomplish all that we need to do without working together\n-Bill Richardson",
"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Gloria Macapagal Arroyo",
"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Jacqueline Novogratz",
"I love a sandwich that you can barely fit in your mouth because there's so much stuff on it. The bread should not be the main thing on a sandwich.\n-Adrianne Palicki",
"Communism will win.\n-Slavoj Zizek",
]
return random.choice(quotes)
|
flexible
|
{
"blob_id": "6e73625adc10064cdb1b5f0546a4fc7320e9f5dc",
"index": 8366,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@register.simple_tag\ndef random_quote():\n \"\"\"Returns a random quote to be displayed on the community sandwich page\"\"\"\n quotes = [\n \"\"\"Growth is never by mere chance; it is the result of forces working together.\n-James Cash Penney\"\"\"\n ,\n \"\"\"We cannot accomplish all that we need to do without working together\n-Bill Richardson\"\"\"\n ,\n \"\"\"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Gloria Macapagal Arroyo\"\"\"\n ,\n \"\"\"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Jacqueline Novogratz\"\"\"\n ,\n \"\"\"I love a sandwich that you can barely fit in your mouth because there's so much stuff on it. The bread should not be the main thing on a sandwich.\n-Adrianne Palicki\"\"\"\n , \"\"\"Communism will win.\n-Slavoj Zizek\"\"\"]\n return random.choice(quotes)\n",
"step-3": "<mask token>\nregister = template.Library()\n\n\n@register.simple_tag\ndef random_quote():\n \"\"\"Returns a random quote to be displayed on the community sandwich page\"\"\"\n quotes = [\n \"\"\"Growth is never by mere chance; it is the result of forces working together.\n-James Cash Penney\"\"\"\n ,\n \"\"\"We cannot accomplish all that we need to do without working together\n-Bill Richardson\"\"\"\n ,\n \"\"\"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Gloria Macapagal Arroyo\"\"\"\n ,\n \"\"\"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Jacqueline Novogratz\"\"\"\n ,\n \"\"\"I love a sandwich that you can barely fit in your mouth because there's so much stuff on it. The bread should not be the main thing on a sandwich.\n-Adrianne Palicki\"\"\"\n , \"\"\"Communism will win.\n-Slavoj Zizek\"\"\"]\n return random.choice(quotes)\n",
"step-4": "from django import template\nimport random\nregister = template.Library()\n\n\n@register.simple_tag\ndef random_quote():\n \"\"\"Returns a random quote to be displayed on the community sandwich page\"\"\"\n quotes = [\n \"\"\"Growth is never by mere chance; it is the result of forces working together.\n-James Cash Penney\"\"\"\n ,\n \"\"\"We cannot accomplish all that we need to do without working together\n-Bill Richardson\"\"\"\n ,\n \"\"\"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Gloria Macapagal Arroyo\"\"\"\n ,\n \"\"\"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Jacqueline Novogratz\"\"\"\n ,\n \"\"\"I love a sandwich that you can barely fit in your mouth because there's so much stuff on it. The bread should not be the main thing on a sandwich.\n-Adrianne Palicki\"\"\"\n , \"\"\"Communism will win.\n-Slavoj Zizek\"\"\"]\n return random.choice(quotes)\n",
"step-5": "from django import template\n\nimport random\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef random_quote():\n \"\"\"Returns a random quote to be displayed on the community sandwich page\"\"\"\n quotes = [\n \"Growth is never by mere chance; it is the result of forces working together.\\n-James Cash Penney\",\n \"We cannot accomplish all that we need to do without working together\\n-Bill Richardson\",\n \"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\\n-Gloria Macapagal Arroyo\",\n \"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\\n-Jacqueline Novogratz\",\n \"I love a sandwich that you can barely fit in your mouth because there's so much stuff on it. The bread should not be the main thing on a sandwich.\\n-Adrianne Palicki\",\n \"Communism will win.\\n-Slavoj Zizek\",\n ]\n return random.choice(quotes)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
n=int(input("n="))
x=int(input("x="))
natija=pow(n,x)+pow(6,x)
print(natija)
|
normal
|
{
"blob_id": "0d6490ae5f60ef21ad344e20179bd1b0f6aa761e",
"index": 6214,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(natija)\n",
"step-3": "n = int(input('n='))\nx = int(input('x='))\nnatija = pow(n, x) + pow(6, x)\nprint(natija)\n",
"step-4": "n=int(input(\"n=\"))\r\nx=int(input(\"x=\"))\r\nnatija=pow(n,x)+pow(6,x)\r\nprint(natija)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Copyright (c) 2008-2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the `interpolation` module."""
from __future__ import division
import logging
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import pytest
from scipy.spatial import cKDTree, Delaunay
from scipy.spatial.distance import cdist
from metpy.cbook import get_test_data
from metpy.gridding.gridding_functions import calc_kappa
from metpy.gridding.interpolation import (barnes_point, barnes_weights, cressman_point,
cressman_weights, inverse_distance,
natural_neighbor, nn_point)
from metpy.gridding.triangles import dist_2, find_natural_neighbors
logging.getLogger('metpy.gridding.interpolation').setLevel(logging.ERROR)
@pytest.fixture()
def test_data():
r"""Return data used for tests in this file."""
x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)
y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)
z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156,
0.225, 3.364], dtype=float)
return x, y, z
@pytest.fixture()
def test_grid():
r"""Return grid locations used for tests in this file."""
with get_test_data('interpolation_test_grid.npz') as fobj:
data = np.load(fobj)
return data['xg'], data['yg']
def test_natural_neighbor(test_data, test_grid):
r"""Test natural neighbor interpolation function."""
xp, yp, z = test_data
xg, yg = test_grid
img = natural_neighbor(xp, yp, z, xg, yg)
with get_test_data('nn_bbox0to100.npz') as fobj:
truth = np.load(fobj)['img']
assert_array_almost_equal(truth, img)
interp_methods = ['cressman', 'barnes']
@pytest.mark.parametrize('method', interp_methods)
def test_inverse_distance(method, test_data, test_grid):
r"""Test inverse distance interpolation function."""
xp, yp, z = test_data
xg, yg = test_grid
extra_kw = {}
if method == 'cressman':
extra_kw['r'] = 20
extra_kw['min_neighbors'] = 1
test_file = 'cressman_r20_mn1.npz'
elif method == 'barnes':
extra_kw['r'] = 40
extra_kw['kappa'] = 100
test_file = 'barnes_r40_k100.npz'
img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)
with get_test_data(test_file) as fobj:
truth = np.load(fobj)['img']
assert_array_almost_equal(truth, img)
def test_nn_point(test_data):
r"""Test find natural neighbors for a point interpolation function."""
xp, yp, z = test_data
tri = Delaunay(list(zip(xp, yp)))
sim_gridx = [30]
sim_gridy = [30]
members, tri_info = find_natural_neighbors(tri,
list(zip(sim_gridx, sim_gridy)))
val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]],
tri, members[0], tri_info)
truth = 1.009
assert_almost_equal(truth, val, 3)
def test_barnes_weights():
r"""Test Barnes weights function."""
kappa = 1000000
gamma = 0.5
dist = np.array([1000, 2000, 3000, 4000])**2
weights = barnes_weights(dist, kappa, gamma) * 10000000
truth = [1353352.832366126918939,
3354.626279025118388,
.152299797447126,
.000000126641655]
assert_array_almost_equal(truth, weights)
def test_cressman_weights():
r"""Test Cressman weights function."""
r = 5000
dist = np.array([1000, 2000, 3000, 4000])**2
weights = cressman_weights(dist, r)
truth = [0.923076923076923,
0.724137931034482,
0.470588235294117,
0.219512195121951]
assert_array_almost_equal(truth, weights)
def test_cressman_point(test_data):
r"""Test Cressman interpolation for a point function."""
xp, yp, z = test_data
r = 40
obs_tree = cKDTree(list(zip(xp, yp)))
indices = obs_tree.query_ball_point([30, 30], r=r)
dists = dist_2(30, 30, xp[indices], yp[indices])
values = z[indices]
truth = 1.05499444404
value = cressman_point(dists, values, r)
assert_almost_equal(truth, value)
def test_barnes_point(test_data):
r"""Test Barnes interpolation for a point function."""
xp, yp, z = test_data
r = 40
obs_tree = cKDTree(list(zip(xp, yp)))
indices = obs_tree.query_ball_point([60, 60], r=r)
dists = dist_2(60, 60, xp[indices], yp[indices])
values = z[indices]
truth = 4.08718241061
ave_spacing = np.mean((cdist(list(zip(xp, yp)), list(zip(xp, yp)))))
kappa = calc_kappa(ave_spacing)
value = barnes_point(dists, values, kappa)
assert_almost_equal(truth, value)
|
normal
|
{
"blob_id": "9e987e057ee5322765415b84e84ef3c4d2827742",
"index": 5466,
"step-1": "<mask token>\n\n\n@pytest.fixture()\ndef test_data():\n \"\"\"Return data used for tests in this file.\"\"\"\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, \n 0.225, 3.364], dtype=float)\n return x, y, z\n\n\n@pytest.fixture()\ndef test_grid():\n \"\"\"Return grid locations used for tests in this file.\"\"\"\n with get_test_data('interpolation_test_grid.npz') as fobj:\n data = np.load(fobj)\n return data['xg'], data['yg']\n\n\ndef test_natural_neighbor(test_data, test_grid):\n \"\"\"Test natural neighbor interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n img = natural_neighbor(xp, yp, z, xg, yg)\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('method', interp_methods)\ndef test_inverse_distance(method, test_data, test_grid):\n \"\"\"Test inverse distance interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\n<mask token>\n\n\ndef test_barnes_weights():\n \"\"\"Test Barnes weights function.\"\"\"\n kappa = 1000000\n gamma = 0.5\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n truth = [1353352.8323661268, 3354.6262790251185, 0.152299797447126, \n 1.26641655e-07]\n assert_array_almost_equal(truth, weights)\n\n\n<mask token>\n\n\ndef test_cressman_point(test_data):\n \"\"\"Test Cressman interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([30, 30], r=r)\n dists = dist_2(30, 30, xp[indices], yp[indices])\n values = z[indices]\n truth = 1.05499444404\n value = cressman_point(dists, values, r)\n assert_almost_equal(truth, value)\n\n\ndef test_barnes_point(test_data):\n \"\"\"Test Barnes interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([60, 60], r=r)\n dists = dist_2(60, 60, xp[indices], yp[indices])\n values = z[indices]\n truth = 4.08718241061\n ave_spacing = np.mean(cdist(list(zip(xp, yp)), list(zip(xp, yp))))\n kappa = calc_kappa(ave_spacing)\n value = barnes_point(dists, values, kappa)\n assert_almost_equal(truth, value)\n",
"step-2": "<mask token>\n\n\n@pytest.fixture()\ndef test_data():\n \"\"\"Return data used for tests in this file.\"\"\"\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, \n 0.225, 3.364], dtype=float)\n return x, y, z\n\n\n@pytest.fixture()\ndef test_grid():\n \"\"\"Return grid locations used for tests in this file.\"\"\"\n with get_test_data('interpolation_test_grid.npz') as fobj:\n data = np.load(fobj)\n return data['xg'], data['yg']\n\n\ndef test_natural_neighbor(test_data, test_grid):\n \"\"\"Test natural neighbor interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n img = natural_neighbor(xp, yp, z, xg, yg)\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('method', interp_methods)\ndef test_inverse_distance(method, test_data, test_grid):\n \"\"\"Test inverse distance interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\ndef test_nn_point(test_data):\n \"\"\"Test find natural neighbors for a point interpolation function.\"\"\"\n xp, yp, z = test_data\n tri = Delaunay(list(zip(xp, yp)))\n sim_gridx = [30]\n sim_gridy = [30]\n members, tri_info = find_natural_neighbors(tri, list(zip(sim_gridx,\n sim_gridy)))\n val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]], tri, members[0],\n tri_info)\n truth = 1.009\n assert_almost_equal(truth, val, 3)\n\n\ndef test_barnes_weights():\n \"\"\"Test Barnes weights function.\"\"\"\n kappa = 1000000\n gamma = 0.5\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n truth = [1353352.8323661268, 3354.6262790251185, 0.152299797447126, \n 1.26641655e-07]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_weights():\n \"\"\"Test Cressman weights function.\"\"\"\n r = 5000\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = cressman_weights(dist, r)\n truth = [0.923076923076923, 0.724137931034482, 0.470588235294117, \n 0.219512195121951]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_point(test_data):\n \"\"\"Test Cressman interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([30, 30], r=r)\n dists = dist_2(30, 30, xp[indices], yp[indices])\n values = z[indices]\n truth = 1.05499444404\n value = cressman_point(dists, values, r)\n assert_almost_equal(truth, value)\n\n\ndef test_barnes_point(test_data):\n \"\"\"Test Barnes interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([60, 60], r=r)\n dists = dist_2(60, 60, xp[indices], yp[indices])\n values = z[indices]\n truth = 4.08718241061\n ave_spacing = np.mean(cdist(list(zip(xp, yp)), list(zip(xp, yp))))\n kappa = calc_kappa(ave_spacing)\n value = barnes_point(dists, values, kappa)\n assert_almost_equal(truth, value)\n",
"step-3": "<mask token>\nlogging.getLogger('metpy.gridding.interpolation').setLevel(logging.ERROR)\n\n\n@pytest.fixture()\ndef test_data():\n \"\"\"Return data used for tests in this file.\"\"\"\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, \n 0.225, 3.364], dtype=float)\n return x, y, z\n\n\n@pytest.fixture()\ndef test_grid():\n \"\"\"Return grid locations used for tests in this file.\"\"\"\n with get_test_data('interpolation_test_grid.npz') as fobj:\n data = np.load(fobj)\n return data['xg'], data['yg']\n\n\ndef test_natural_neighbor(test_data, test_grid):\n \"\"\"Test natural neighbor interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n img = natural_neighbor(xp, yp, z, xg, yg)\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('method', interp_methods)\ndef test_inverse_distance(method, test_data, test_grid):\n \"\"\"Test inverse distance interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\ndef test_nn_point(test_data):\n \"\"\"Test find natural neighbors for a point interpolation function.\"\"\"\n xp, yp, z = test_data\n tri = Delaunay(list(zip(xp, yp)))\n sim_gridx = [30]\n sim_gridy = [30]\n members, tri_info = find_natural_neighbors(tri, list(zip(sim_gridx,\n sim_gridy)))\n val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]], tri, members[0],\n tri_info)\n truth = 1.009\n assert_almost_equal(truth, val, 3)\n\n\ndef test_barnes_weights():\n \"\"\"Test Barnes weights function.\"\"\"\n kappa = 1000000\n gamma = 0.5\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n truth = [1353352.8323661268, 3354.6262790251185, 0.152299797447126, \n 1.26641655e-07]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_weights():\n \"\"\"Test Cressman weights function.\"\"\"\n r = 5000\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = cressman_weights(dist, r)\n truth = [0.923076923076923, 0.724137931034482, 0.470588235294117, \n 0.219512195121951]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_point(test_data):\n \"\"\"Test Cressman interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([30, 30], r=r)\n dists = dist_2(30, 30, xp[indices], yp[indices])\n values = z[indices]\n truth = 1.05499444404\n value = cressman_point(dists, values, r)\n assert_almost_equal(truth, value)\n\n\ndef test_barnes_point(test_data):\n \"\"\"Test Barnes interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([60, 60], r=r)\n dists = dist_2(60, 60, xp[indices], yp[indices])\n values = z[indices]\n truth = 4.08718241061\n ave_spacing = np.mean(cdist(list(zip(xp, yp)), list(zip(xp, yp))))\n kappa = calc_kappa(ave_spacing)\n value = barnes_point(dists, values, kappa)\n assert_almost_equal(truth, value)\n",
"step-4": "<mask token>\nlogging.getLogger('metpy.gridding.interpolation').setLevel(logging.ERROR)\n\n\n@pytest.fixture()\ndef test_data():\n \"\"\"Return data used for tests in this file.\"\"\"\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156, \n 0.225, 3.364], dtype=float)\n return x, y, z\n\n\n@pytest.fixture()\ndef test_grid():\n \"\"\"Return grid locations used for tests in this file.\"\"\"\n with get_test_data('interpolation_test_grid.npz') as fobj:\n data = np.load(fobj)\n return data['xg'], data['yg']\n\n\ndef test_natural_neighbor(test_data, test_grid):\n \"\"\"Test natural neighbor interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n img = natural_neighbor(xp, yp, z, xg, yg)\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\ninterp_methods = ['cressman', 'barnes']\n\n\n@pytest.mark.parametrize('method', interp_methods)\ndef test_inverse_distance(method, test_data, test_grid):\n \"\"\"Test inverse distance interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n assert_array_almost_equal(truth, img)\n\n\ndef test_nn_point(test_data):\n \"\"\"Test find natural neighbors for a point interpolation function.\"\"\"\n xp, yp, z = test_data\n tri = Delaunay(list(zip(xp, yp)))\n sim_gridx = [30]\n sim_gridy = [30]\n members, tri_info = find_natural_neighbors(tri, list(zip(sim_gridx,\n sim_gridy)))\n val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]], tri, members[0],\n tri_info)\n truth = 1.009\n assert_almost_equal(truth, val, 3)\n\n\ndef test_barnes_weights():\n \"\"\"Test Barnes weights function.\"\"\"\n kappa = 1000000\n gamma = 0.5\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n truth = [1353352.8323661268, 3354.6262790251185, 0.152299797447126, \n 1.26641655e-07]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_weights():\n \"\"\"Test Cressman weights function.\"\"\"\n r = 5000\n dist = np.array([1000, 2000, 3000, 4000]) ** 2\n weights = cressman_weights(dist, r)\n truth = [0.923076923076923, 0.724137931034482, 0.470588235294117, \n 0.219512195121951]\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_point(test_data):\n \"\"\"Test Cressman interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([30, 30], r=r)\n dists = dist_2(30, 30, xp[indices], yp[indices])\n values = z[indices]\n truth = 1.05499444404\n value = cressman_point(dists, values, r)\n assert_almost_equal(truth, value)\n\n\ndef test_barnes_point(test_data):\n \"\"\"Test Barnes interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n r = 40\n obs_tree = cKDTree(list(zip(xp, yp)))\n indices = obs_tree.query_ball_point([60, 60], r=r)\n dists = dist_2(60, 60, xp[indices], yp[indices])\n values = z[indices]\n truth = 4.08718241061\n ave_spacing = np.mean(cdist(list(zip(xp, yp)), list(zip(xp, yp))))\n kappa = calc_kappa(ave_spacing)\n value = barnes_point(dists, values, kappa)\n assert_almost_equal(truth, value)\n",
"step-5": "# Copyright (c) 2008-2016 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"Test the `interpolation` module.\"\"\"\n\nfrom __future__ import division\n\nimport logging\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal, assert_array_almost_equal\nimport pytest\nfrom scipy.spatial import cKDTree, Delaunay\nfrom scipy.spatial.distance import cdist\n\nfrom metpy.cbook import get_test_data\nfrom metpy.gridding.gridding_functions import calc_kappa\nfrom metpy.gridding.interpolation import (barnes_point, barnes_weights, cressman_point,\n cressman_weights, inverse_distance,\n natural_neighbor, nn_point)\nfrom metpy.gridding.triangles import dist_2, find_natural_neighbors\n\nlogging.getLogger('metpy.gridding.interpolation').setLevel(logging.ERROR)\n\n\n@pytest.fixture()\ndef test_data():\n r\"\"\"Return data used for tests in this file.\"\"\"\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156,\n 0.225, 3.364], dtype=float)\n\n return x, y, z\n\n\n@pytest.fixture()\ndef test_grid():\n r\"\"\"Return grid locations used for tests in this file.\"\"\"\n with get_test_data('interpolation_test_grid.npz') as fobj:\n data = np.load(fobj)\n return data['xg'], data['yg']\n\n\ndef test_natural_neighbor(test_data, test_grid):\n r\"\"\"Test natural neighbor interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n\n img = natural_neighbor(xp, yp, z, xg, yg)\n\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n\n assert_array_almost_equal(truth, img)\n\n\ninterp_methods = ['cressman', 'barnes']\n\n\n@pytest.mark.parametrize('method', interp_methods)\ndef test_inverse_distance(method, test_data, test_grid):\n r\"\"\"Test inverse distance interpolation function.\"\"\"\n xp, yp, z = test_data\n xg, yg = test_grid\n\n extra_kw = {}\n if method == 'cressman':\n extra_kw['r'] = 20\n extra_kw['min_neighbors'] = 1\n test_file = 'cressman_r20_mn1.npz'\n elif method == 'barnes':\n extra_kw['r'] = 40\n extra_kw['kappa'] = 100\n test_file = 'barnes_r40_k100.npz'\n\n img = inverse_distance(xp, yp, z, xg, yg, kind=method, **extra_kw)\n\n with get_test_data(test_file) as fobj:\n truth = np.load(fobj)['img']\n\n assert_array_almost_equal(truth, img)\n\n\ndef test_nn_point(test_data):\n r\"\"\"Test find natural neighbors for a point interpolation function.\"\"\"\n xp, yp, z = test_data\n\n tri = Delaunay(list(zip(xp, yp)))\n\n sim_gridx = [30]\n sim_gridy = [30]\n\n members, tri_info = find_natural_neighbors(tri,\n list(zip(sim_gridx, sim_gridy)))\n\n val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]],\n tri, members[0], tri_info)\n\n truth = 1.009\n\n assert_almost_equal(truth, val, 3)\n\n\ndef test_barnes_weights():\n r\"\"\"Test Barnes weights function.\"\"\"\n kappa = 1000000\n\n gamma = 0.5\n\n dist = np.array([1000, 2000, 3000, 4000])**2\n\n weights = barnes_weights(dist, kappa, gamma) * 10000000\n\n truth = [1353352.832366126918939,\n 3354.626279025118388,\n .152299797447126,\n .000000126641655]\n\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_weights():\n r\"\"\"Test Cressman weights function.\"\"\"\n r = 5000\n\n dist = np.array([1000, 2000, 3000, 4000])**2\n\n weights = cressman_weights(dist, r)\n\n truth = [0.923076923076923,\n 0.724137931034482,\n 0.470588235294117,\n 0.219512195121951]\n\n assert_array_almost_equal(truth, weights)\n\n\ndef test_cressman_point(test_data):\n r\"\"\"Test Cressman interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n\n r = 40\n\n obs_tree = cKDTree(list(zip(xp, yp)))\n\n indices = obs_tree.query_ball_point([30, 30], r=r)\n\n dists = dist_2(30, 30, xp[indices], yp[indices])\n values = z[indices]\n\n truth = 1.05499444404\n\n value = cressman_point(dists, values, r)\n\n assert_almost_equal(truth, value)\n\n\ndef test_barnes_point(test_data):\n r\"\"\"Test Barnes interpolation for a point function.\"\"\"\n xp, yp, z = test_data\n\n r = 40\n\n obs_tree = cKDTree(list(zip(xp, yp)))\n\n indices = obs_tree.query_ball_point([60, 60], r=r)\n\n dists = dist_2(60, 60, xp[indices], yp[indices])\n values = z[indices]\n\n truth = 4.08718241061\n\n ave_spacing = np.mean((cdist(list(zip(xp, yp)), list(zip(xp, yp)))))\n\n kappa = calc_kappa(ave_spacing)\n\n value = barnes_point(dists, values, kappa)\n\n assert_almost_equal(truth, value)\n",
"step-ids": [
7,
9,
10,
11,
13
]
}
|
[
7,
9,
10,
11,
13
] |
"""
Utilities for calculations based on antenna positions,
such as baseline and phase factor.
"""
import os
import numpy as np
import pickle
c = 299792458 # m / s
data_prefix = os.path.dirname(os.path.abspath(__file__)) + "/"
try:
ant_pos = dict(pickle.load(open(data_prefix + "ant_dict.pk", "rb")))
def baselength(ant_ID1, ant_ID2):
"""
(Convenience function)
Return the norm of the baseline between antennae
# @ant_ID1 and @ant_ID2
"""
return np.linalg.norm(baseline(ant_ID1, ant_ID2))
def baseline(ant_ID1, ant_ID2):
"""
Calculate the baseline between antennae
# @ant_ID1 and @ant_ID2
by a simple difference of their coordinates.
"""
return ant_pos[ant_ID2] - ant_pos[ant_ID1]
def phase_factor(ant1, ant2, r, nu=151e6):
"""
Calculate the phase factor in the direction @r (l, m)
(we assume that n is of insignificant magnitude)
and at the frequency @nu
between two antennae whose ID #s are @ant1 and @ant2.
When we calculate the baseline (u, v, w), we
assume that w is of insignificant magnitude.
"""
b = baseline(ant1, ant2)[0:2] # kill w
br = np.dot(b, r)
return np.exp(-2j * np.pi * nu * br / c)
except FileNotFoundError:
print("Failure to load antennae data.")
|
normal
|
{
"blob_id": "c455263b82c04fe2c5cc1e614f10a9962795f87e",
"index": 4349,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n ant_pos = dict(pickle.load(open(data_prefix + 'ant_dict.pk', 'rb')))\n\n def baselength(ant_ID1, ant_ID2):\n \"\"\"\n (Convenience function)\n Return the norm of the baseline between antennae\n # @ant_ID1 and @ant_ID2\n \"\"\"\n return np.linalg.norm(baseline(ant_ID1, ant_ID2))\n\n def baseline(ant_ID1, ant_ID2):\n \"\"\"\n Calculate the baseline between antennae\n # @ant_ID1 and @ant_ID2\n by a simple difference of their coordinates.\n \"\"\"\n return ant_pos[ant_ID2] - ant_pos[ant_ID1]\n\n def phase_factor(ant1, ant2, r, nu=151000000.0):\n \"\"\"\n Calculate the phase factor in the direction @r (l, m)\n (we assume that n is of insignificant magnitude)\n and at the frequency @nu\n between two antennae whose ID #s are @ant1 and @ant2.\n When we calculate the baseline (u, v, w), we\n assume that w is of insignificant magnitude.\n \"\"\"\n b = baseline(ant1, ant2)[0:2]\n br = np.dot(b, r)\n return np.exp(-2.0j * np.pi * nu * br / c)\nexcept FileNotFoundError:\n print('Failure to load antennae data.')\n",
"step-3": "<mask token>\nc = 299792458\ndata_prefix = os.path.dirname(os.path.abspath(__file__)) + '/'\ntry:\n ant_pos = dict(pickle.load(open(data_prefix + 'ant_dict.pk', 'rb')))\n\n def baselength(ant_ID1, ant_ID2):\n \"\"\"\n (Convenience function)\n Return the norm of the baseline between antennae\n # @ant_ID1 and @ant_ID2\n \"\"\"\n return np.linalg.norm(baseline(ant_ID1, ant_ID2))\n\n def baseline(ant_ID1, ant_ID2):\n \"\"\"\n Calculate the baseline between antennae\n # @ant_ID1 and @ant_ID2\n by a simple difference of their coordinates.\n \"\"\"\n return ant_pos[ant_ID2] - ant_pos[ant_ID1]\n\n def phase_factor(ant1, ant2, r, nu=151000000.0):\n \"\"\"\n Calculate the phase factor in the direction @r (l, m)\n (we assume that n is of insignificant magnitude)\n and at the frequency @nu\n between two antennae whose ID #s are @ant1 and @ant2.\n When we calculate the baseline (u, v, w), we\n assume that w is of insignificant magnitude.\n \"\"\"\n b = baseline(ant1, ant2)[0:2]\n br = np.dot(b, r)\n return np.exp(-2.0j * np.pi * nu * br / c)\nexcept FileNotFoundError:\n print('Failure to load antennae data.')\n",
"step-4": "<mask token>\nimport os\nimport numpy as np\nimport pickle\nc = 299792458\ndata_prefix = os.path.dirname(os.path.abspath(__file__)) + '/'\ntry:\n ant_pos = dict(pickle.load(open(data_prefix + 'ant_dict.pk', 'rb')))\n\n def baselength(ant_ID1, ant_ID2):\n \"\"\"\n (Convenience function)\n Return the norm of the baseline between antennae\n # @ant_ID1 and @ant_ID2\n \"\"\"\n return np.linalg.norm(baseline(ant_ID1, ant_ID2))\n\n def baseline(ant_ID1, ant_ID2):\n \"\"\"\n Calculate the baseline between antennae\n # @ant_ID1 and @ant_ID2\n by a simple difference of their coordinates.\n \"\"\"\n return ant_pos[ant_ID2] - ant_pos[ant_ID1]\n\n def phase_factor(ant1, ant2, r, nu=151000000.0):\n \"\"\"\n Calculate the phase factor in the direction @r (l, m)\n (we assume that n is of insignificant magnitude)\n and at the frequency @nu\n between two antennae whose ID #s are @ant1 and @ant2.\n When we calculate the baseline (u, v, w), we\n assume that w is of insignificant magnitude.\n \"\"\"\n b = baseline(ant1, ant2)[0:2]\n br = np.dot(b, r)\n return np.exp(-2.0j * np.pi * nu * br / c)\nexcept FileNotFoundError:\n print('Failure to load antennae data.')\n",
"step-5": "\"\"\"\nUtilities for calculations based on antenna positions,\nsuch as baseline and phase factor.\n\"\"\"\n\nimport os\nimport numpy as np\nimport pickle\n\nc = 299792458 # m / s\ndata_prefix = os.path.dirname(os.path.abspath(__file__)) + \"/\"\n\ntry:\n ant_pos = dict(pickle.load(open(data_prefix + \"ant_dict.pk\", \"rb\")))\n\n def baselength(ant_ID1, ant_ID2):\n \"\"\"\n (Convenience function)\n Return the norm of the baseline between antennae\n # @ant_ID1 and @ant_ID2\n \"\"\"\n return np.linalg.norm(baseline(ant_ID1, ant_ID2))\n\n def baseline(ant_ID1, ant_ID2):\n \"\"\"\n Calculate the baseline between antennae\n # @ant_ID1 and @ant_ID2\n by a simple difference of their coordinates.\n \"\"\"\n return ant_pos[ant_ID2] - ant_pos[ant_ID1]\n\n def phase_factor(ant1, ant2, r, nu=151e6):\n \"\"\"\n Calculate the phase factor in the direction @r (l, m)\n (we assume that n is of insignificant magnitude)\n and at the frequency @nu\n between two antennae whose ID #s are @ant1 and @ant2.\n When we calculate the baseline (u, v, w), we\n assume that w is of insignificant magnitude.\n \"\"\"\n b = baseline(ant1, ant2)[0:2] # kill w\n\n br = np.dot(b, r)\n return np.exp(-2j * np.pi * nu * br / c)\n \nexcept FileNotFoundError:\n print(\"Failure to load antennae data.\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.shortcuts import render, get_object_or_404
# Create your views here.
from django.http import HttpResponse
from .models import Post
from django.utils import timezone
def list_of_posts(request):
posts = (Post.objects
.filter(published_date__lte=timezone.now())
.order_by('published_date')
)
return render(request, 'blog/list_of_posts.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request,
'blog/post_detail.html',
{'post': post}
)
|
normal
|
{
"blob_id": "71a0900dc09b1ff55e4e5a4cc7cab617b9c73406",
"index": 4519,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n return render(request, 'blog/post_detail.html', {'post': post})\n",
"step-3": "<mask token>\n\n\ndef list_of_posts(request):\n posts = Post.objects.filter(published_date__lte=timezone.now()).order_by(\n 'published_date')\n return render(request, 'blog/list_of_posts.html', {'posts': posts})\n\n\ndef post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n return render(request, 'blog/post_detail.html', {'post': post})\n",
"step-4": "from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse\nfrom .models import Post\nfrom django.utils import timezone\n\n\ndef list_of_posts(request):\n posts = Post.objects.filter(published_date__lte=timezone.now()).order_by(\n 'published_date')\n return render(request, 'blog/list_of_posts.html', {'posts': posts})\n\n\ndef post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n return render(request, 'blog/post_detail.html', {'post': post})\n",
"step-5": "from django.shortcuts import render, get_object_or_404\n\n# Create your views here.\n\nfrom django.http import HttpResponse\nfrom .models import Post\nfrom django.utils import timezone\n\ndef list_of_posts(request):\n posts = (Post.objects\n .filter(published_date__lte=timezone.now())\n .order_by('published_date')\n )\n return render(request, 'blog/list_of_posts.html', {'posts': posts})\n\ndef post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n\n return render(request,\n 'blog/post_detail.html',\n {'post': post}\n )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
import os
import traceback
from src.properties import *
from src.utils import *
from subprocess import call
from src.entity.cursor import Cursor
from curses import *
def main(screen, file_path):
setUpEnv()
text = readFileIfExist(file_path)
while 1:
try:
text = startEditing(screen, text)
printQuitOptions(screen)
char = screen.getch()
if char == KEY_ENTER_CODE:
writeToFile(file_path, text)
return 3, None
elif char == KEY_F9:
return 2, None
else:
pass
except KeyboardInterrupt: # quit properly, when user press Ctrl + C
return 1, None
except:
error_msg = traceback.format_exc()
return -1, error_msg
def setUpEnv():
use_default_colors()
init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)
def startEditing(screen, text):
cursor = Cursor(screen, BORDER_COLOR, text)
while 1:
char = screen.getch()
if char == KEY_F1:
break
elif char == TERMINAL_RESIZE_CODE:
cursor.resizeTextBox()
elif char == KEY_RIGHT:
cursor.moveRight()
elif char == KEY_LEFT:
cursor.moveLeft()
elif char == KEY_UP:
cursor.moveUp()
elif char == KEY_DOWN:
cursor.moveDown()
elif 31 < char < 127:
cursor.writeChar(char)
elif char == KEY_DELETE_CODE:
cursor.delete()
elif char == 10 or char == 13 or char == KEY_ENTER:
cursor.newLine()
elif char == KEY_TAB_CODE:
cursor.tab()
elif char == KEY_ESCAPE_CODE:
char = screen.getch() # get the key pressed after cmd or alt
if char == KEY_LEFT or char == 98: # 98 and 102 are left and right keys produced while pressing alt, on mac terminal
cursor.moveToLeftMost()
elif char == KEY_RIGHT or char == 102: # CMD + RIGHT
cursor.moveToRightMost()
elif char == KEY_DELETE_CODE: # CMD + DELETE
cursor.deleteWholeLine()
elif char == KEY_DOWN: # CMD + DOWN
cursor.moveToRightBottomMost()
elif char == KEY_UP: # CMD + UP
cursor.moveToRightUpMost()
else: # in case char user press ESC, it produce the same effec as CMD or ALT, but that's not what we want
ungetch(char)
else:
cursor._writeString(str(char))
return cursor.getText()
def printQuitOptions(screen):
height, width = screen.getmaxyx()
screen.clear()
y = int(height / 2.5)
x = int(width / 2.5)
screen.addstr(y, x, "Quit and Save (ENTER)")
screen.addstr(y + 1, x, "Quit (F9)")
screen.addstr(y + 2, x, "Go Back (Any Key)")
screen.refresh()
def printExitMessage(exit_code, error_msg):
if exit_code == -1:
printToTerminal("Shit just happen, sorry.")
if error_msg:
printToTerminal(error_msg)
elif exit_code == 1:
printToTerminal("Quit, safe and sound.")
elif exit_code == 2:
printToTerminal("Quit without save.")
elif exit_code == 3:
printToTerminal("saved !")
elif exit_code == 4: # -version
printToTerminal(VERSION)
elif exit_code == 5: # -help
printToTerminal("======================== Welcome to Simple Editor X ========================", "GREEN")
printToTerminal("")
printToTerminal("Arguments:")
printToTerminal(" -version")
printToTerminal(" -help")
printToTerminal(" {file_name}, to start editing an existing or create a new file")
printToTerminal("")
printToTerminal("While using:")
printToTerminal(" Press F1, then ENTER to save")
printToTerminal("")
if __name__== "__main__":
if len(sys.argv) != 2:
printToTerminal("This application take exactly 1 argument")
printToTerminal("type: 'sex -help' for more details")
exit(69)
error_msg = ""
exit_code = -1
arg = sys.argv[1].lower()
file_path = sys.argv[1]
if arg == "-v" or arg == "-version":
exit_code = 4
elif arg == "-h" or arg == "-help":
exit_code = 5
else:
exit_code, error_msg = wrapper(main, file_path)
printExitMessage(exit_code, error_msg)
|
normal
|
{
"blob_id": "7a6d45ef87d93af9a15bd352b893164d3a36c399",
"index": 7545,
"step-1": "<mask token>\n\n\ndef main(screen, file_path):\n setUpEnv()\n text = readFileIfExist(file_path)\n while 1:\n try:\n text = startEditing(screen, text)\n printQuitOptions(screen)\n char = screen.getch()\n if char == KEY_ENTER_CODE:\n writeToFile(file_path, text)\n return 3, None\n elif char == KEY_F9:\n return 2, None\n else:\n pass\n except KeyboardInterrupt:\n return 1, None\n except:\n error_msg = traceback.format_exc()\n return -1, error_msg\n\n\ndef setUpEnv():\n use_default_colors()\n init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)\n\n\ndef startEditing(screen, text):\n cursor = Cursor(screen, BORDER_COLOR, text)\n while 1:\n char = screen.getch()\n if char == KEY_F1:\n break\n elif char == TERMINAL_RESIZE_CODE:\n cursor.resizeTextBox()\n elif char == KEY_RIGHT:\n cursor.moveRight()\n elif char == KEY_LEFT:\n cursor.moveLeft()\n elif char == KEY_UP:\n cursor.moveUp()\n elif char == KEY_DOWN:\n cursor.moveDown()\n elif 31 < char < 127:\n cursor.writeChar(char)\n elif char == KEY_DELETE_CODE:\n cursor.delete()\n elif char == 10 or char == 13 or char == KEY_ENTER:\n cursor.newLine()\n elif char == KEY_TAB_CODE:\n cursor.tab()\n elif char == KEY_ESCAPE_CODE:\n char = screen.getch()\n if char == KEY_LEFT or char == 98:\n cursor.moveToLeftMost()\n elif char == KEY_RIGHT or char == 102:\n cursor.moveToRightMost()\n elif char == KEY_DELETE_CODE:\n cursor.deleteWholeLine()\n elif char == KEY_DOWN:\n cursor.moveToRightBottomMost()\n elif char == KEY_UP:\n cursor.moveToRightUpMost()\n else:\n ungetch(char)\n else:\n cursor._writeString(str(char))\n return cursor.getText()\n\n\ndef printQuitOptions(screen):\n height, width = screen.getmaxyx()\n screen.clear()\n y = int(height / 2.5)\n x = int(width / 2.5)\n screen.addstr(y, x, 'Quit and Save (ENTER)')\n screen.addstr(y + 1, x, 'Quit (F9)')\n screen.addstr(y + 2, x, 'Go Back (Any Key)')\n screen.refresh()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(screen, file_path):\n setUpEnv()\n text = readFileIfExist(file_path)\n while 1:\n try:\n text = startEditing(screen, text)\n printQuitOptions(screen)\n char = screen.getch()\n if char == KEY_ENTER_CODE:\n writeToFile(file_path, text)\n return 3, None\n elif char == KEY_F9:\n return 2, None\n else:\n pass\n except KeyboardInterrupt:\n return 1, None\n except:\n error_msg = traceback.format_exc()\n return -1, error_msg\n\n\ndef setUpEnv():\n use_default_colors()\n init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)\n\n\ndef startEditing(screen, text):\n cursor = Cursor(screen, BORDER_COLOR, text)\n while 1:\n char = screen.getch()\n if char == KEY_F1:\n break\n elif char == TERMINAL_RESIZE_CODE:\n cursor.resizeTextBox()\n elif char == KEY_RIGHT:\n cursor.moveRight()\n elif char == KEY_LEFT:\n cursor.moveLeft()\n elif char == KEY_UP:\n cursor.moveUp()\n elif char == KEY_DOWN:\n cursor.moveDown()\n elif 31 < char < 127:\n cursor.writeChar(char)\n elif char == KEY_DELETE_CODE:\n cursor.delete()\n elif char == 10 or char == 13 or char == KEY_ENTER:\n cursor.newLine()\n elif char == KEY_TAB_CODE:\n cursor.tab()\n elif char == KEY_ESCAPE_CODE:\n char = screen.getch()\n if char == KEY_LEFT or char == 98:\n cursor.moveToLeftMost()\n elif char == KEY_RIGHT or char == 102:\n cursor.moveToRightMost()\n elif char == KEY_DELETE_CODE:\n cursor.deleteWholeLine()\n elif char == KEY_DOWN:\n cursor.moveToRightBottomMost()\n elif char == KEY_UP:\n cursor.moveToRightUpMost()\n else:\n ungetch(char)\n else:\n cursor._writeString(str(char))\n return cursor.getText()\n\n\ndef printQuitOptions(screen):\n height, width = screen.getmaxyx()\n screen.clear()\n y = int(height / 2.5)\n x = int(width / 2.5)\n screen.addstr(y, x, 'Quit and Save (ENTER)')\n screen.addstr(y + 1, x, 'Quit (F9)')\n screen.addstr(y + 2, x, 'Go Back (Any Key)')\n screen.refresh()\n\n\ndef printExitMessage(exit_code, error_msg):\n if exit_code == -1:\n printToTerminal('Shit just happen, sorry.')\n if error_msg:\n printToTerminal(error_msg)\n elif exit_code == 1:\n printToTerminal('Quit, safe and sound.')\n elif exit_code == 2:\n printToTerminal('Quit without save.')\n elif exit_code == 3:\n printToTerminal('saved !')\n elif exit_code == 4:\n printToTerminal(VERSION)\n elif exit_code == 5:\n printToTerminal(\n '======================== Welcome to Simple Editor X ========================'\n , 'GREEN')\n printToTerminal('')\n printToTerminal('Arguments:')\n printToTerminal(' -version')\n printToTerminal(' -help')\n printToTerminal(\n ' {file_name}, to start editing an existing or create a new file'\n )\n printToTerminal('')\n printToTerminal('While using:')\n printToTerminal(' Press F1, then ENTER to save')\n printToTerminal('')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main(screen, file_path):\n setUpEnv()\n text = readFileIfExist(file_path)\n while 1:\n try:\n text = startEditing(screen, text)\n printQuitOptions(screen)\n char = screen.getch()\n if char == KEY_ENTER_CODE:\n writeToFile(file_path, text)\n return 3, None\n elif char == KEY_F9:\n return 2, None\n else:\n pass\n except KeyboardInterrupt:\n return 1, None\n except:\n error_msg = traceback.format_exc()\n return -1, error_msg\n\n\ndef setUpEnv():\n use_default_colors()\n init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)\n\n\ndef startEditing(screen, text):\n cursor = Cursor(screen, BORDER_COLOR, text)\n while 1:\n char = screen.getch()\n if char == KEY_F1:\n break\n elif char == TERMINAL_RESIZE_CODE:\n cursor.resizeTextBox()\n elif char == KEY_RIGHT:\n cursor.moveRight()\n elif char == KEY_LEFT:\n cursor.moveLeft()\n elif char == KEY_UP:\n cursor.moveUp()\n elif char == KEY_DOWN:\n cursor.moveDown()\n elif 31 < char < 127:\n cursor.writeChar(char)\n elif char == KEY_DELETE_CODE:\n cursor.delete()\n elif char == 10 or char == 13 or char == KEY_ENTER:\n cursor.newLine()\n elif char == KEY_TAB_CODE:\n cursor.tab()\n elif char == KEY_ESCAPE_CODE:\n char = screen.getch()\n if char == KEY_LEFT or char == 98:\n cursor.moveToLeftMost()\n elif char == KEY_RIGHT or char == 102:\n cursor.moveToRightMost()\n elif char == KEY_DELETE_CODE:\n cursor.deleteWholeLine()\n elif char == KEY_DOWN:\n cursor.moveToRightBottomMost()\n elif char == KEY_UP:\n cursor.moveToRightUpMost()\n else:\n ungetch(char)\n else:\n cursor._writeString(str(char))\n return cursor.getText()\n\n\ndef printQuitOptions(screen):\n height, width = screen.getmaxyx()\n screen.clear()\n y = int(height / 2.5)\n x = int(width / 2.5)\n screen.addstr(y, x, 'Quit and Save (ENTER)')\n screen.addstr(y + 1, x, 'Quit (F9)')\n screen.addstr(y + 2, x, 'Go Back (Any Key)')\n screen.refresh()\n\n\ndef printExitMessage(exit_code, error_msg):\n if exit_code == -1:\n printToTerminal('Shit just happen, sorry.')\n if error_msg:\n printToTerminal(error_msg)\n elif exit_code == 1:\n printToTerminal('Quit, safe and sound.')\n elif exit_code == 2:\n printToTerminal('Quit without save.')\n elif exit_code == 3:\n printToTerminal('saved !')\n elif exit_code == 4:\n printToTerminal(VERSION)\n elif exit_code == 5:\n printToTerminal(\n '======================== Welcome to Simple Editor X ========================'\n , 'GREEN')\n printToTerminal('')\n printToTerminal('Arguments:')\n printToTerminal(' -version')\n printToTerminal(' -help')\n printToTerminal(\n ' {file_name}, to start editing an existing or create a new file'\n )\n printToTerminal('')\n printToTerminal('While using:')\n printToTerminal(' Press F1, then ENTER to save')\n printToTerminal('')\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n printToTerminal('This application take exactly 1 argument')\n printToTerminal(\"type: 'sex -help' for more details\")\n exit(69)\n error_msg = ''\n exit_code = -1\n arg = sys.argv[1].lower()\n file_path = sys.argv[1]\n if arg == '-v' or arg == '-version':\n exit_code = 4\n elif arg == '-h' or arg == '-help':\n exit_code = 5\n else:\n exit_code, error_msg = wrapper(main, file_path)\n printExitMessage(exit_code, error_msg)\n",
"step-4": "import sys\nimport os\nimport traceback\nfrom src.properties import *\nfrom src.utils import *\nfrom subprocess import call\nfrom src.entity.cursor import Cursor\nfrom curses import *\n\n\ndef main(screen, file_path):\n setUpEnv()\n text = readFileIfExist(file_path)\n while 1:\n try:\n text = startEditing(screen, text)\n printQuitOptions(screen)\n char = screen.getch()\n if char == KEY_ENTER_CODE:\n writeToFile(file_path, text)\n return 3, None\n elif char == KEY_F9:\n return 2, None\n else:\n pass\n except KeyboardInterrupt:\n return 1, None\n except:\n error_msg = traceback.format_exc()\n return -1, error_msg\n\n\ndef setUpEnv():\n use_default_colors()\n init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)\n\n\ndef startEditing(screen, text):\n cursor = Cursor(screen, BORDER_COLOR, text)\n while 1:\n char = screen.getch()\n if char == KEY_F1:\n break\n elif char == TERMINAL_RESIZE_CODE:\n cursor.resizeTextBox()\n elif char == KEY_RIGHT:\n cursor.moveRight()\n elif char == KEY_LEFT:\n cursor.moveLeft()\n elif char == KEY_UP:\n cursor.moveUp()\n elif char == KEY_DOWN:\n cursor.moveDown()\n elif 31 < char < 127:\n cursor.writeChar(char)\n elif char == KEY_DELETE_CODE:\n cursor.delete()\n elif char == 10 or char == 13 or char == KEY_ENTER:\n cursor.newLine()\n elif char == KEY_TAB_CODE:\n cursor.tab()\n elif char == KEY_ESCAPE_CODE:\n char = screen.getch()\n if char == KEY_LEFT or char == 98:\n cursor.moveToLeftMost()\n elif char == KEY_RIGHT or char == 102:\n cursor.moveToRightMost()\n elif char == KEY_DELETE_CODE:\n cursor.deleteWholeLine()\n elif char == KEY_DOWN:\n cursor.moveToRightBottomMost()\n elif char == KEY_UP:\n cursor.moveToRightUpMost()\n else:\n ungetch(char)\n else:\n cursor._writeString(str(char))\n return cursor.getText()\n\n\ndef printQuitOptions(screen):\n height, width = screen.getmaxyx()\n screen.clear()\n y = int(height / 2.5)\n x = int(width / 2.5)\n screen.addstr(y, x, 'Quit and Save (ENTER)')\n screen.addstr(y + 1, x, 'Quit (F9)')\n screen.addstr(y + 2, x, 'Go Back (Any Key)')\n screen.refresh()\n\n\ndef printExitMessage(exit_code, error_msg):\n if exit_code == -1:\n printToTerminal('Shit just happen, sorry.')\n if error_msg:\n printToTerminal(error_msg)\n elif exit_code == 1:\n printToTerminal('Quit, safe and sound.')\n elif exit_code == 2:\n printToTerminal('Quit without save.')\n elif exit_code == 3:\n printToTerminal('saved !')\n elif exit_code == 4:\n printToTerminal(VERSION)\n elif exit_code == 5:\n printToTerminal(\n '======================== Welcome to Simple Editor X ========================'\n , 'GREEN')\n printToTerminal('')\n printToTerminal('Arguments:')\n printToTerminal(' -version')\n printToTerminal(' -help')\n printToTerminal(\n ' {file_name}, to start editing an existing or create a new file'\n )\n printToTerminal('')\n printToTerminal('While using:')\n printToTerminal(' Press F1, then ENTER to save')\n printToTerminal('')\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n printToTerminal('This application take exactly 1 argument')\n printToTerminal(\"type: 'sex -help' for more details\")\n exit(69)\n error_msg = ''\n exit_code = -1\n arg = sys.argv[1].lower()\n file_path = sys.argv[1]\n if arg == '-v' or arg == '-version':\n exit_code = 4\n elif arg == '-h' or arg == '-help':\n exit_code = 5\n else:\n exit_code, error_msg = wrapper(main, file_path)\n printExitMessage(exit_code, error_msg)\n",
"step-5": "import sys\nimport os\nimport traceback\nfrom src.properties import *\nfrom src.utils import *\nfrom subprocess import call\nfrom src.entity.cursor import Cursor\nfrom curses import *\n\n\ndef main(screen, file_path):\n setUpEnv()\n text = readFileIfExist(file_path)\n while 1:\n try:\n text = startEditing(screen, text)\n printQuitOptions(screen)\n char = screen.getch()\n if char == KEY_ENTER_CODE:\n writeToFile(file_path, text)\n return 3, None\n elif char == KEY_F9:\n return 2, None\n else:\n pass\n except KeyboardInterrupt: # quit properly, when user press Ctrl + C\n return 1, None\n except:\n error_msg = traceback.format_exc()\n return -1, error_msg\n\n\ndef setUpEnv():\n use_default_colors()\n init_pair(BORDER_COLOR, COLOR_MAGENTA, -1)\n\n\ndef startEditing(screen, text):\n cursor = Cursor(screen, BORDER_COLOR, text)\n while 1:\n char = screen.getch()\n if char == KEY_F1:\n break\n elif char == TERMINAL_RESIZE_CODE:\n cursor.resizeTextBox()\n elif char == KEY_RIGHT:\n cursor.moveRight()\n elif char == KEY_LEFT:\n cursor.moveLeft()\n elif char == KEY_UP:\n cursor.moveUp()\n elif char == KEY_DOWN:\n cursor.moveDown()\n elif 31 < char < 127:\n cursor.writeChar(char)\n elif char == KEY_DELETE_CODE:\n cursor.delete()\n elif char == 10 or char == 13 or char == KEY_ENTER:\n cursor.newLine()\n elif char == KEY_TAB_CODE:\n cursor.tab()\n elif char == KEY_ESCAPE_CODE:\n char = screen.getch() # get the key pressed after cmd or alt\n if char == KEY_LEFT or char == 98: # 98 and 102 are left and right keys produced while pressing alt, on mac terminal\n cursor.moveToLeftMost()\n elif char == KEY_RIGHT or char == 102: # CMD + RIGHT\n cursor.moveToRightMost()\n elif char == KEY_DELETE_CODE: # CMD + DELETE\n cursor.deleteWholeLine()\n elif char == KEY_DOWN: # CMD + DOWN\n cursor.moveToRightBottomMost()\n elif char == KEY_UP: # CMD + UP\n cursor.moveToRightUpMost()\n else: # in case char user press ESC, it produce the same effec as CMD or ALT, but that's not what we want\n ungetch(char)\n else:\n cursor._writeString(str(char))\n return cursor.getText()\n\n\ndef printQuitOptions(screen):\n height, width = screen.getmaxyx()\n screen.clear()\n y = int(height / 2.5)\n x = int(width / 2.5)\n screen.addstr(y, x, \"Quit and Save (ENTER)\")\n screen.addstr(y + 1, x, \"Quit (F9)\")\n screen.addstr(y + 2, x, \"Go Back (Any Key)\")\n screen.refresh()\n\n\ndef printExitMessage(exit_code, error_msg):\n if exit_code == -1:\n printToTerminal(\"Shit just happen, sorry.\")\n if error_msg:\n printToTerminal(error_msg)\n elif exit_code == 1:\n printToTerminal(\"Quit, safe and sound.\")\n elif exit_code == 2:\n printToTerminal(\"Quit without save.\")\n elif exit_code == 3:\n printToTerminal(\"saved !\")\n elif exit_code == 4: # -version\n printToTerminal(VERSION)\n elif exit_code == 5: # -help\n printToTerminal(\"======================== Welcome to Simple Editor X ========================\", \"GREEN\")\n printToTerminal(\"\")\n printToTerminal(\"Arguments:\")\n printToTerminal(\" -version\")\n printToTerminal(\" -help\")\n printToTerminal(\" {file_name}, to start editing an existing or create a new file\")\n printToTerminal(\"\")\n printToTerminal(\"While using:\")\n printToTerminal(\" Press F1, then ENTER to save\")\n printToTerminal(\"\")\n\n\nif __name__== \"__main__\":\n if len(sys.argv) != 2:\n printToTerminal(\"This application take exactly 1 argument\")\n printToTerminal(\"type: 'sex -help' for more details\")\n exit(69)\n error_msg = \"\"\n exit_code = -1\n arg = sys.argv[1].lower()\n file_path = sys.argv[1]\n if arg == \"-v\" or arg == \"-version\":\n exit_code = 4\n elif arg == \"-h\" or arg == \"-help\":\n exit_code = 5\n else:\n exit_code, error_msg = wrapper(main, file_path)\n\n printExitMessage(exit_code, error_msg)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class ClientTaskStatus(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def start(self):
while True:
try:
self.get_task_status_info()
lines = StatusTask(self._taskstatus)
OutputManagement.output(lines)
except:
print(f'Collect taskinfo error,err:{traceback.format_exc()}')
finally:
time.sleep(self.times)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ClientTaskStatus(object):
def __init__(self):
self._taskstatus = {}
self._sqlres = DbManager
self.times = clienttaskconfig.collect_client_times
<|reserved_special_token_0|>
def start(self):
while True:
try:
self.get_task_status_info()
lines = StatusTask(self._taskstatus)
OutputManagement.output(lines)
except:
print(f'Collect taskinfo error,err:{traceback.format_exc()}')
finally:
time.sleep(self.times)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ClientTaskStatus(object):
def __init__(self):
self._taskstatus = {}
self._sqlres = DbManager
self.times = clienttaskconfig.collect_client_times
def get_task_status_info(self):
self._taskstatus['time'] = datetime.now(pytz.timezone('Asia/Shanghai')
).strftime('%Y-%m-%d %H:%M:%S')
self._taskstatus['clientid'] = 'clientid'
tasking = self._sqlres.query_task(SqlConditions(SqlCondition(
colname='taskstatus', val=ETaskStatus.New.value, comb=ESqlComb.Or))
)
self._taskstatus['tasknewcnt'] = len(tasking)
taskwaiting = self._sqlres.query_task(SqlConditions(SqlCondition(
colname='taskstatus', val=ETaskStatus.WaitForDeal.value, comb=
ESqlComb.Or)))
self._taskstatus['taskwaitingcnt'] = len(taskwaiting)
taskdownloading = self._sqlres.query_task(SqlConditions(
SqlCondition(colname='taskstatus', val=ETaskStatus.Downloading.
value, comb=ESqlComb.Or)))
self._taskstatus['taskdownloadingcnt'] = len(taskdownloading)
return
def start(self):
while True:
try:
self.get_task_status_info()
lines = StatusTask(self._taskstatus)
OutputManagement.output(lines)
except:
print(f'Collect taskinfo error,err:{traceback.format_exc()}')
finally:
time.sleep(self.times)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from datetime import datetime
import time
import traceback
import pytz
from datacontract import ETaskStatus
from datacontract.clientstatus.statustask import StatusTask
from idownclient.clientdbmanager import DbManager
from idownclient.config_task import clienttaskconfig
from outputmanagement import OutputManagement
from ..clientdbmanager.sqlcondition import ESqlComb, SqlCondition, SqlConditions
class ClientTaskStatus(object):
def __init__(self):
self._taskstatus = {}
self._sqlres = DbManager
self.times = clienttaskconfig.collect_client_times
def get_task_status_info(self):
self._taskstatus['time'] = datetime.now(pytz.timezone('Asia/Shanghai')
).strftime('%Y-%m-%d %H:%M:%S')
self._taskstatus['clientid'] = 'clientid'
tasking = self._sqlres.query_task(SqlConditions(SqlCondition(
colname='taskstatus', val=ETaskStatus.New.value, comb=ESqlComb.Or))
)
self._taskstatus['tasknewcnt'] = len(tasking)
taskwaiting = self._sqlres.query_task(SqlConditions(SqlCondition(
colname='taskstatus', val=ETaskStatus.WaitForDeal.value, comb=
ESqlComb.Or)))
self._taskstatus['taskwaitingcnt'] = len(taskwaiting)
taskdownloading = self._sqlres.query_task(SqlConditions(
SqlCondition(colname='taskstatus', val=ETaskStatus.Downloading.
value, comb=ESqlComb.Or)))
self._taskstatus['taskdownloadingcnt'] = len(taskdownloading)
return
def start(self):
while True:
try:
self.get_task_status_info()
lines = StatusTask(self._taskstatus)
OutputManagement.output(lines)
except:
print(f'Collect taskinfo error,err:{traceback.format_exc()}')
finally:
time.sleep(self.times)
<|reserved_special_token_1|>
"""
采集端任务状态统计
直接在数据库查找数据
create by judy 2018/10/22
update by judy 2019/03/05
更改统一输出为output
"""
from datetime import datetime
import time
import traceback
import pytz
from datacontract import ETaskStatus
from datacontract.clientstatus.statustask import StatusTask
from idownclient.clientdbmanager import DbManager
from idownclient.config_task import clienttaskconfig
from outputmanagement import OutputManagement
from ..clientdbmanager.sqlcondition import (ESqlComb, SqlCondition,
SqlConditions)
class ClientTaskStatus(object):
def __init__(self):
self._taskstatus = {}
self._sqlres = DbManager
self.times = clienttaskconfig.collect_client_times # 默认是5秒搜集一次
def get_task_status_info(self):
self._taskstatus['time'] = datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S')
self._taskstatus['clientid'] = 'clientid'
# 正在执行任务的数量
# tasking = self._sqlres.query_task('taskstatus', ETaskStatus.New.value)
tasking = self._sqlres.query_task(
SqlConditions(
SqlCondition(
colname='taskstatus',
val=ETaskStatus.New.value,
comb=ESqlComb.Or),
))
self._taskstatus['tasknewcnt'] = len(tasking)
# taskwaiting = self._sqlres.query_task('taskstatus', ETaskStatus.WaitForDeal.value)
taskwaiting = self._sqlres.query_task(
SqlConditions(
SqlCondition(
colname='taskstatus',
val=ETaskStatus.WaitForDeal.value,
comb=ESqlComb.Or),
))
self._taskstatus['taskwaitingcnt'] = len(taskwaiting)
# taskdownloading = self._sqlres.query_task('taskstatus', ETaskStatus.Downloading.value)
taskdownloading = self._sqlres.query_task(
SqlConditions(
SqlCondition(
colname='taskstatus',
val=ETaskStatus.Downloading.value,
comb=ESqlComb.Or),
))
self._taskstatus['taskdownloadingcnt'] = len(taskdownloading)
return
def start(self):
while True:
try:
self.get_task_status_info()
lines = StatusTask(self._taskstatus)
OutputManagement.output(lines)
except:
print(f"Collect taskinfo error,err:{traceback.format_exc()}")
finally:
time.sleep(self.times)
|
flexible
|
{
"blob_id": "de0d0588106ab651a8d6141a44cd9e286b0ad3a5",
"index": 1299,
"step-1": "<mask token>\n\n\nclass ClientTaskStatus(object):\n <mask token>\n <mask token>\n\n def start(self):\n while True:\n try:\n self.get_task_status_info()\n lines = StatusTask(self._taskstatus)\n OutputManagement.output(lines)\n except:\n print(f'Collect taskinfo error,err:{traceback.format_exc()}')\n finally:\n time.sleep(self.times)\n",
"step-2": "<mask token>\n\n\nclass ClientTaskStatus(object):\n\n def __init__(self):\n self._taskstatus = {}\n self._sqlres = DbManager\n self.times = clienttaskconfig.collect_client_times\n <mask token>\n\n def start(self):\n while True:\n try:\n self.get_task_status_info()\n lines = StatusTask(self._taskstatus)\n OutputManagement.output(lines)\n except:\n print(f'Collect taskinfo error,err:{traceback.format_exc()}')\n finally:\n time.sleep(self.times)\n",
"step-3": "<mask token>\n\n\nclass ClientTaskStatus(object):\n\n def __init__(self):\n self._taskstatus = {}\n self._sqlres = DbManager\n self.times = clienttaskconfig.collect_client_times\n\n def get_task_status_info(self):\n self._taskstatus['time'] = datetime.now(pytz.timezone('Asia/Shanghai')\n ).strftime('%Y-%m-%d %H:%M:%S')\n self._taskstatus['clientid'] = 'clientid'\n tasking = self._sqlres.query_task(SqlConditions(SqlCondition(\n colname='taskstatus', val=ETaskStatus.New.value, comb=ESqlComb.Or))\n )\n self._taskstatus['tasknewcnt'] = len(tasking)\n taskwaiting = self._sqlres.query_task(SqlConditions(SqlCondition(\n colname='taskstatus', val=ETaskStatus.WaitForDeal.value, comb=\n ESqlComb.Or)))\n self._taskstatus['taskwaitingcnt'] = len(taskwaiting)\n taskdownloading = self._sqlres.query_task(SqlConditions(\n SqlCondition(colname='taskstatus', val=ETaskStatus.Downloading.\n value, comb=ESqlComb.Or)))\n self._taskstatus['taskdownloadingcnt'] = len(taskdownloading)\n return\n\n def start(self):\n while True:\n try:\n self.get_task_status_info()\n lines = StatusTask(self._taskstatus)\n OutputManagement.output(lines)\n except:\n print(f'Collect taskinfo error,err:{traceback.format_exc()}')\n finally:\n time.sleep(self.times)\n",
"step-4": "<mask token>\nfrom datetime import datetime\nimport time\nimport traceback\nimport pytz\nfrom datacontract import ETaskStatus\nfrom datacontract.clientstatus.statustask import StatusTask\nfrom idownclient.clientdbmanager import DbManager\nfrom idownclient.config_task import clienttaskconfig\nfrom outputmanagement import OutputManagement\nfrom ..clientdbmanager.sqlcondition import ESqlComb, SqlCondition, SqlConditions\n\n\nclass ClientTaskStatus(object):\n\n def __init__(self):\n self._taskstatus = {}\n self._sqlres = DbManager\n self.times = clienttaskconfig.collect_client_times\n\n def get_task_status_info(self):\n self._taskstatus['time'] = datetime.now(pytz.timezone('Asia/Shanghai')\n ).strftime('%Y-%m-%d %H:%M:%S')\n self._taskstatus['clientid'] = 'clientid'\n tasking = self._sqlres.query_task(SqlConditions(SqlCondition(\n colname='taskstatus', val=ETaskStatus.New.value, comb=ESqlComb.Or))\n )\n self._taskstatus['tasknewcnt'] = len(tasking)\n taskwaiting = self._sqlres.query_task(SqlConditions(SqlCondition(\n colname='taskstatus', val=ETaskStatus.WaitForDeal.value, comb=\n ESqlComb.Or)))\n self._taskstatus['taskwaitingcnt'] = len(taskwaiting)\n taskdownloading = self._sqlres.query_task(SqlConditions(\n SqlCondition(colname='taskstatus', val=ETaskStatus.Downloading.\n value, comb=ESqlComb.Or)))\n self._taskstatus['taskdownloadingcnt'] = len(taskdownloading)\n return\n\n def start(self):\n while True:\n try:\n self.get_task_status_info()\n lines = StatusTask(self._taskstatus)\n OutputManagement.output(lines)\n except:\n print(f'Collect taskinfo error,err:{traceback.format_exc()}')\n finally:\n time.sleep(self.times)\n",
"step-5": "\"\"\"\n采集端任务状态统计\n直接在数据库查找数据\ncreate by judy 2018/10/22\n\nupdate by judy 2019/03/05\n更改统一输出为output\n\"\"\"\nfrom datetime import datetime\nimport time\nimport traceback\n\nimport pytz\n\nfrom datacontract import ETaskStatus\nfrom datacontract.clientstatus.statustask import StatusTask\nfrom idownclient.clientdbmanager import DbManager\nfrom idownclient.config_task import clienttaskconfig\nfrom outputmanagement import OutputManagement\nfrom ..clientdbmanager.sqlcondition import (ESqlComb, SqlCondition,\n SqlConditions)\n\n\nclass ClientTaskStatus(object):\n\n def __init__(self):\n self._taskstatus = {}\n self._sqlres = DbManager\n self.times = clienttaskconfig.collect_client_times # 默认是5秒搜集一次\n\n def get_task_status_info(self):\n self._taskstatus['time'] = datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S')\n self._taskstatus['clientid'] = 'clientid'\n # 正在执行任务的数量\n # tasking = self._sqlres.query_task('taskstatus', ETaskStatus.New.value)\n tasking = self._sqlres.query_task(\n SqlConditions(\n SqlCondition(\n colname='taskstatus',\n val=ETaskStatus.New.value,\n comb=ESqlComb.Or),\n ))\n self._taskstatus['tasknewcnt'] = len(tasking)\n # taskwaiting = self._sqlres.query_task('taskstatus', ETaskStatus.WaitForDeal.value)\n taskwaiting = self._sqlres.query_task(\n SqlConditions(\n SqlCondition(\n colname='taskstatus',\n val=ETaskStatus.WaitForDeal.value,\n comb=ESqlComb.Or),\n ))\n self._taskstatus['taskwaitingcnt'] = len(taskwaiting)\n # taskdownloading = self._sqlres.query_task('taskstatus', ETaskStatus.Downloading.value)\n taskdownloading = self._sqlres.query_task(\n SqlConditions(\n SqlCondition(\n colname='taskstatus',\n val=ETaskStatus.Downloading.value,\n comb=ESqlComb.Or),\n ))\n self._taskstatus['taskdownloadingcnt'] = len(taskdownloading)\n return\n\n def start(self):\n while True:\n try:\n self.get_task_status_info()\n lines = StatusTask(self._taskstatus)\n OutputManagement.output(lines)\n except:\n print(f\"Collect taskinfo error,err:{traceback.format_exc()}\")\n finally:\n time.sleep(self.times)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
""""Pirata barba Negra ( màs de 2 pasos a las izquierda o a la derecha y se cae):
rampa para subir a su barco (5 pasos de ancho y 15 de largo")leer por teclado un valor entero.
a) si el entero es par 1 paso hacia adelante
b)si el entero es impar , pero el entero - 1 es divisible por 4, el pirata da un paso a la derecha
c)En otro caso , el pirata da un paso a la izquierda
d)utilizar un generador de numeros pseudo aleatorios para generar un nuevo entero y repetir a la partir del paso a
Condiciones de terminacion:
** introducciòn de un nùmero negativo ( es de suponer que el pirata se durmiò sobre la rampa)
**El pirata cae por un costado de la rampa y se ahoga
**El pirata logra abordar a salvo su barco
Haga un programa que exhiba el avance del pirata en cada paso"""
from random import randint
numero_usuario =int(input("Ingrese un nùmero para empezar su tambaleada aventura "))
while numero_usuario<0:
print("Parece que el pirata se ha quedado dormido en la rampa intenta despertarlo ingresando otro nùmero ")
numero_usuario =int(input("Ingrese un nùmero para empezar su tambaleada aventura "))
pasos_izq =3 #por la posicion inicial en la tabla
pasos_der= 3
pasos_adelante=0
#considerar punto en la tabla
while pasos_adelante <15 and pasos_der<5 and pasos_izq<5:
if numero_usuario%2 ==0:
pasos_adelante =pasos_adelante+1
#para el while validar que iguale o supere lo pasos_adelante >=15
print("El pirata avanzó" ,pasos_adelante, "pasos hacia adelante")
elif numero_usuario %2 !=0 and (numero_usuario-1)%4==0:
pasos_der= pasos_der+1
pasos_izq=pasos_izq-1
#para el while validar que iguale o supere lo pasos_der>2
print("El pirata hizo" ,pasos_der, "pasos a la derecha ")
elif numero_usuario %2 !=0 and (numero_usuario-1)%4!=0:
pasos_izq=pasos_izq+1
pasos_der= pasos_der-1
#para el while validar que iguale o supere lo pasos_izq>2
print("El pirata hizo" ,pasos_izq, "pasos a la izquierda ")
aleatorio=randint(-10,1000)
print("nùmero aleatorio",aleatorio)
numero_usuario=aleatorio
if pasos_adelante >=15:
print(" Este viaje tambaleado ha sido un èxito! El Pirata llegó a su Barco!")
elif pasos_der>=5:
print("El pirata se ha caído de la rampa por el lado derecho y se ha ahogado :(")
elif pasos_izq>=5:
print("El pirata se ha caído de la rampa por el lado izquierdo y se ha ahogado :(")
|
normal
|
{
"blob_id": "1829bd8e87c470a71fea97dd3a47c30477b6e6f1",
"index": 3109,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile numero_usuario < 0:\n print(\n 'Parece que el pirata se ha quedado dormido en la rampa intenta despertarlo ingresando otro nùmero '\n )\n numero_usuario = int(input(\n 'Ingrese un nùmero para empezar su tambaleada aventura '))\n<mask token>\nwhile pasos_adelante < 15 and pasos_der < 5 and pasos_izq < 5:\n if numero_usuario % 2 == 0:\n pasos_adelante = pasos_adelante + 1\n print('El pirata avanzó', pasos_adelante, 'pasos hacia adelante')\n elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 == 0:\n pasos_der = pasos_der + 1\n pasos_izq = pasos_izq - 1\n print('El pirata hizo', pasos_der, 'pasos a la derecha ')\n elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 != 0:\n pasos_izq = pasos_izq + 1\n pasos_der = pasos_der - 1\n print('El pirata hizo', pasos_izq, 'pasos a la izquierda ')\n aleatorio = randint(-10, 1000)\n print('nùmero aleatorio', aleatorio)\n numero_usuario = aleatorio\nif pasos_adelante >= 15:\n print(\n ' Este viaje tambaleado ha sido un èxito! El Pirata llegó a su Barco!')\nelif pasos_der >= 5:\n print(\n 'El pirata se ha caído de la rampa por el lado derecho y se ha ahogado :('\n )\nelif pasos_izq >= 5:\n print(\n 'El pirata se ha caído de la rampa por el lado izquierdo y se ha ahogado :('\n )\n",
"step-3": "<mask token>\nnumero_usuario = int(input(\n 'Ingrese un nùmero para empezar su tambaleada aventura '))\nwhile numero_usuario < 0:\n print(\n 'Parece que el pirata se ha quedado dormido en la rampa intenta despertarlo ingresando otro nùmero '\n )\n numero_usuario = int(input(\n 'Ingrese un nùmero para empezar su tambaleada aventura '))\npasos_izq = 3\npasos_der = 3\npasos_adelante = 0\nwhile pasos_adelante < 15 and pasos_der < 5 and pasos_izq < 5:\n if numero_usuario % 2 == 0:\n pasos_adelante = pasos_adelante + 1\n print('El pirata avanzó', pasos_adelante, 'pasos hacia adelante')\n elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 == 0:\n pasos_der = pasos_der + 1\n pasos_izq = pasos_izq - 1\n print('El pirata hizo', pasos_der, 'pasos a la derecha ')\n elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 != 0:\n pasos_izq = pasos_izq + 1\n pasos_der = pasos_der - 1\n print('El pirata hizo', pasos_izq, 'pasos a la izquierda ')\n aleatorio = randint(-10, 1000)\n print('nùmero aleatorio', aleatorio)\n numero_usuario = aleatorio\nif pasos_adelante >= 15:\n print(\n ' Este viaje tambaleado ha sido un èxito! El Pirata llegó a su Barco!')\nelif pasos_der >= 5:\n print(\n 'El pirata se ha caído de la rampa por el lado derecho y se ha ahogado :('\n )\nelif pasos_izq >= 5:\n print(\n 'El pirata se ha caído de la rampa por el lado izquierdo y se ha ahogado :('\n )\n",
"step-4": "<mask token>\nfrom random import randint\nnumero_usuario = int(input(\n 'Ingrese un nùmero para empezar su tambaleada aventura '))\nwhile numero_usuario < 0:\n print(\n 'Parece que el pirata se ha quedado dormido en la rampa intenta despertarlo ingresando otro nùmero '\n )\n numero_usuario = int(input(\n 'Ingrese un nùmero para empezar su tambaleada aventura '))\npasos_izq = 3\npasos_der = 3\npasos_adelante = 0\nwhile pasos_adelante < 15 and pasos_der < 5 and pasos_izq < 5:\n if numero_usuario % 2 == 0:\n pasos_adelante = pasos_adelante + 1\n print('El pirata avanzó', pasos_adelante, 'pasos hacia adelante')\n elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 == 0:\n pasos_der = pasos_der + 1\n pasos_izq = pasos_izq - 1\n print('El pirata hizo', pasos_der, 'pasos a la derecha ')\n elif numero_usuario % 2 != 0 and (numero_usuario - 1) % 4 != 0:\n pasos_izq = pasos_izq + 1\n pasos_der = pasos_der - 1\n print('El pirata hizo', pasos_izq, 'pasos a la izquierda ')\n aleatorio = randint(-10, 1000)\n print('nùmero aleatorio', aleatorio)\n numero_usuario = aleatorio\nif pasos_adelante >= 15:\n print(\n ' Este viaje tambaleado ha sido un èxito! El Pirata llegó a su Barco!')\nelif pasos_der >= 5:\n print(\n 'El pirata se ha caído de la rampa por el lado derecho y se ha ahogado :('\n )\nelif pasos_izq >= 5:\n print(\n 'El pirata se ha caído de la rampa por el lado izquierdo y se ha ahogado :('\n )\n",
"step-5": "\"\"\"\"Pirata barba Negra ( màs de 2 pasos a las izquierda o a la derecha y se cae): \nrampa para subir a su barco (5 pasos de ancho y 15 de largo\")leer por teclado un valor entero.\na) si el entero es par 1 paso hacia adelante\nb)si el entero es impar , pero el entero - 1 es divisible por 4, el pirata da un paso a la derecha\nc)En otro caso , el pirata da un paso a la izquierda\nd)utilizar un generador de numeros pseudo aleatorios para generar un nuevo entero y repetir a la partir del paso a\nCondiciones de terminacion:\n** introducciòn de un nùmero negativo ( es de suponer que el pirata se durmiò sobre la rampa)\n**El pirata cae por un costado de la rampa y se ahoga\n**El pirata logra abordar a salvo su barco\nHaga un programa que exhiba el avance del pirata en cada paso\"\"\"\n\nfrom random import randint\n\nnumero_usuario =int(input(\"Ingrese un nùmero para empezar su tambaleada aventura \"))\nwhile numero_usuario<0:\n print(\"Parece que el pirata se ha quedado dormido en la rampa intenta despertarlo ingresando otro nùmero \")\n numero_usuario =int(input(\"Ingrese un nùmero para empezar su tambaleada aventura \"))\n\npasos_izq =3 #por la posicion inicial en la tabla\npasos_der= 3\npasos_adelante=0\n#considerar punto en la tabla\n\nwhile pasos_adelante <15 and pasos_der<5 and pasos_izq<5:\n if numero_usuario%2 ==0:\n pasos_adelante =pasos_adelante+1\n #para el while validar que iguale o supere lo pasos_adelante >=15\n print(\"El pirata avanzó\" ,pasos_adelante, \"pasos hacia adelante\")\n elif numero_usuario %2 !=0 and (numero_usuario-1)%4==0:\n pasos_der= pasos_der+1\n pasos_izq=pasos_izq-1\n #para el while validar que iguale o supere lo pasos_der>2\n print(\"El pirata hizo\" ,pasos_der, \"pasos a la derecha \")\n elif numero_usuario %2 !=0 and (numero_usuario-1)%4!=0:\n pasos_izq=pasos_izq+1\n pasos_der= pasos_der-1\n #para el while validar que iguale o supere lo pasos_izq>2\n print(\"El pirata hizo\" ,pasos_izq, \"pasos a la izquierda \")\n aleatorio=randint(-10,1000) \n print(\"nùmero aleatorio\",aleatorio)\n numero_usuario=aleatorio\n\nif pasos_adelante >=15: \n print(\" Este viaje tambaleado ha sido un èxito! El Pirata llegó a su Barco!\")\nelif pasos_der>=5:\n print(\"El pirata se ha caído de la rampa por el lado derecho y se ha ahogado :(\")\nelif pasos_izq>=5:\n print(\"El pirata se ha caído de la rampa por el lado izquierdo y se ha ahogado :(\") ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('exchange', '0004_auto_20170826_2120')]
operations = [migrations.AlterModelOptions(name='type', options={
'verbose_name': 'тип задания', 'verbose_name_plural':
'Типы задания'}), migrations.AlterField(model_name='task', name=
'count', field=models.IntegerField(default=0, verbose_name=
'Количество выполненных действий')), migrations.AlterField(
model_name='task', name='max_count', field=models.IntegerField(
default=1, verbose_name='Количество запланированных действий')),
migrations.AlterField(model_name='task', name='status', field=
models.CharField(choices=[('NEW', 'Новая'), ('CNF', 'Подтверждена'),
('Y', 'Активна'), ('BLC', 'Заблокирована модератором'), ('DEL',
'Удалено'), ('DON', 'Завершено')], default='NEW', max_length=3))]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('exchange', '0004_auto_20170826_2120')]
operations = [migrations.AlterModelOptions(name='type', options={
'verbose_name': 'тип задания', 'verbose_name_plural':
'Типы задания'}), migrations.AlterField(model_name='task', name=
'count', field=models.IntegerField(default=0, verbose_name=
'Количество выполненных действий')), migrations.AlterField(
model_name='task', name='max_count', field=models.IntegerField(
default=1, verbose_name='Количество запланированных действий')),
migrations.AlterField(model_name='task', name='status', field=
models.CharField(choices=[('NEW', 'Новая'), ('CNF', 'Подтверждена'),
('Y', 'Активна'), ('BLC', 'Заблокирована модератором'), ('DEL',
'Удалено'), ('DON', 'Завершено')], default='NEW', max_length=3))]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-08-26 21:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exchange', '0004_auto_20170826_2120'),
]
operations = [
migrations.AlterModelOptions(
name='type',
options={'verbose_name': '\u0442\u0438\u043f \u0437\u0430\u0434\u0430\u043d\u0438\u044f', 'verbose_name_plural': '\u0422\u0438\u043f\u044b \u0437\u0430\u0434\u0430\u043d\u0438\u044f'},
),
migrations.AlterField(
model_name='task',
name='count',
field=models.IntegerField(default=0, verbose_name='\u041a\u043e\u043b\u0438\u0447\u0435\u0441\u0442\u0432\u043e \u0432\u044b\u043f\u043e\u043b\u043d\u0435\u043d\u043d\u044b\u0445 \u0434\u0435\u0439\u0441\u0442\u0432\u0438\u0439'),
),
migrations.AlterField(
model_name='task',
name='max_count',
field=models.IntegerField(default=1, verbose_name='\u041a\u043e\u043b\u0438\u0447\u0435\u0441\u0442\u0432\u043e \u0437\u0430\u043f\u043b\u0430\u043d\u0438\u0440\u043e\u0432\u0430\u043d\u043d\u044b\u0445 \u0434\u0435\u0439\u0441\u0442\u0432\u0438\u0439'),
),
migrations.AlterField(
model_name='task',
name='status',
field=models.CharField(choices=[('NEW', '\u041d\u043e\u0432\u0430\u044f'), ('CNF', '\u041f\u043e\u0434\u0442\u0432\u0435\u0440\u0436\u0434\u0435\u043d\u0430'), ('Y', '\u0410\u043a\u0442\u0438\u0432\u043d\u0430'), ('BLC', '\u0417\u0430\u0431\u043b\u043e\u043a\u0438\u0440\u043e\u0432\u0430\u043d\u0430 \u043c\u043e\u0434\u0435\u0440\u0430\u0442\u043e\u0440\u043e\u043c'), ('DEL', '\u0423\u0434\u0430\u043b\u0435\u043d\u043e'), ('DON', '\u0417\u0430\u0432\u0435\u0440\u0448\u0435\u043d\u043e')], default='NEW', max_length=3),
),
]
|
flexible
|
{
"blob_id": "264896da4d92797b9f31e28c19a2e315efff815a",
"index": 138,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('exchange', '0004_auto_20170826_2120')]\n operations = [migrations.AlterModelOptions(name='type', options={\n 'verbose_name': 'тип задания', 'verbose_name_plural':\n 'Типы задания'}), migrations.AlterField(model_name='task', name=\n 'count', field=models.IntegerField(default=0, verbose_name=\n 'Количество выполненных действий')), migrations.AlterField(\n model_name='task', name='max_count', field=models.IntegerField(\n default=1, verbose_name='Количество запланированных действий')),\n migrations.AlterField(model_name='task', name='status', field=\n models.CharField(choices=[('NEW', 'Новая'), ('CNF', 'Подтверждена'),\n ('Y', 'Активна'), ('BLC', 'Заблокирована модератором'), ('DEL',\n 'Удалено'), ('DON', 'Завершено')], default='NEW', max_length=3))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('exchange', '0004_auto_20170826_2120')]\n operations = [migrations.AlterModelOptions(name='type', options={\n 'verbose_name': 'тип задания', 'verbose_name_plural':\n 'Типы задания'}), migrations.AlterField(model_name='task', name=\n 'count', field=models.IntegerField(default=0, verbose_name=\n 'Количество выполненных действий')), migrations.AlterField(\n model_name='task', name='max_count', field=models.IntegerField(\n default=1, verbose_name='Количество запланированных действий')),\n migrations.AlterField(model_name='task', name='status', field=\n models.CharField(choices=[('NEW', 'Новая'), ('CNF', 'Подтверждена'),\n ('Y', 'Активна'), ('BLC', 'Заблокирована модератором'), ('DEL',\n 'Удалено'), ('DON', 'Завершено')], default='NEW', max_length=3))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-08-26 21:31\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('exchange', '0004_auto_20170826_2120'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='type',\n options={'verbose_name': '\\u0442\\u0438\\u043f \\u0437\\u0430\\u0434\\u0430\\u043d\\u0438\\u044f', 'verbose_name_plural': '\\u0422\\u0438\\u043f\\u044b \\u0437\\u0430\\u0434\\u0430\\u043d\\u0438\\u044f'},\n ),\n migrations.AlterField(\n model_name='task',\n name='count',\n field=models.IntegerField(default=0, verbose_name='\\u041a\\u043e\\u043b\\u0438\\u0447\\u0435\\u0441\\u0442\\u0432\\u043e \\u0432\\u044b\\u043f\\u043e\\u043b\\u043d\\u0435\\u043d\\u043d\\u044b\\u0445 \\u0434\\u0435\\u0439\\u0441\\u0442\\u0432\\u0438\\u0439'),\n ),\n migrations.AlterField(\n model_name='task',\n name='max_count',\n field=models.IntegerField(default=1, verbose_name='\\u041a\\u043e\\u043b\\u0438\\u0447\\u0435\\u0441\\u0442\\u0432\\u043e \\u0437\\u0430\\u043f\\u043b\\u0430\\u043d\\u0438\\u0440\\u043e\\u0432\\u0430\\u043d\\u043d\\u044b\\u0445 \\u0434\\u0435\\u0439\\u0441\\u0442\\u0432\\u0438\\u0439'),\n ),\n migrations.AlterField(\n model_name='task',\n name='status',\n field=models.CharField(choices=[('NEW', '\\u041d\\u043e\\u0432\\u0430\\u044f'), ('CNF', '\\u041f\\u043e\\u0434\\u0442\\u0432\\u0435\\u0440\\u0436\\u0434\\u0435\\u043d\\u0430'), ('Y', '\\u0410\\u043a\\u0442\\u0438\\u0432\\u043d\\u0430'), ('BLC', '\\u0417\\u0430\\u0431\\u043b\\u043e\\u043a\\u0438\\u0440\\u043e\\u0432\\u0430\\u043d\\u0430 \\u043c\\u043e\\u0434\\u0435\\u0440\\u0430\\u0442\\u043e\\u0440\\u043e\\u043c'), ('DEL', '\\u0423\\u0434\\u0430\\u043b\\u0435\\u043d\\u043e'), ('DON', '\\u0417\\u0430\\u0432\\u0435\\u0440\\u0448\\u0435\\u043d\\u043e')], default='NEW', max_length=3),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
print('In Sample.py........')
ModBMW = Bmw.Bmw()
ModBMW.outModels()
ModAudi = Audi.Audi()
ModAudi.outModels()
ModNissan = Nissan.Nissan()
ModNissan.outModels()
<|reserved_special_token_1|>
from Cars import Bmw
from Cars import Audi
from Cars import Nissan
if __name__ == '__main__':
print('In Sample.py........')
ModBMW = Bmw.Bmw()
ModBMW.outModels()
ModAudi = Audi.Audi()
ModAudi.outModels()
ModNissan = Nissan.Nissan()
ModNissan.outModels()
<|reserved_special_token_1|>
from Cars import Bmw
from Cars import Audi
from Cars import Nissan
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print('In Sample.py........')
# Import classes from your brand new package
# Create an object of Bmw class & call its method
ModBMW = Bmw.Bmw()
ModBMW.outModels()
# Create an object of Audi class & call its method
ModAudi = Audi.Audi()
ModAudi.outModels()
# Create an object of Nissan class & call its method
ModNissan = Nissan.Nissan()
ModNissan.outModels()
|
flexible
|
{
"blob_id": "e15524d7ae87cbf0b10c54ee0bdc613ba589c1a9",
"index": 3812,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n print('In Sample.py........')\n ModBMW = Bmw.Bmw()\n ModBMW.outModels()\n ModAudi = Audi.Audi()\n ModAudi.outModels()\n ModNissan = Nissan.Nissan()\n ModNissan.outModels()\n",
"step-3": "from Cars import Bmw\nfrom Cars import Audi\nfrom Cars import Nissan\nif __name__ == '__main__':\n print('In Sample.py........')\n ModBMW = Bmw.Bmw()\n ModBMW.outModels()\n ModAudi = Audi.Audi()\n ModAudi.outModels()\n ModNissan = Nissan.Nissan()\n ModNissan.outModels()\n",
"step-4": "from Cars import Bmw\nfrom Cars import Audi\nfrom Cars import Nissan\n\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n print('In Sample.py........')\n\n # Import classes from your brand new package\n\n # Create an object of Bmw class & call its method\n ModBMW = Bmw.Bmw()\n ModBMW.outModels()\n\n # Create an object of Audi class & call its method\n ModAudi = Audi.Audi()\n ModAudi.outModels()\n\n # Create an object of Nissan class & call its method\n ModNissan = Nissan.Nissan()\n ModNissan.outModels()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
connection.execute(stmt)
func.update_annotations_db(Twitter_Sentiment_Analysis, connection,
'Export_csv5.csv')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
connection, Twitter_Sentiment_Analysis = func.Database_Acces(
'mysql://root@localhost/sentiment?charset=utf8mb4', 'utf8',
'Twitter_Sentiment_Analysis4')
stmt = "SET NAMES 'UTF8';"
connection.execute(stmt)
func.update_annotations_db(Twitter_Sentiment_Analysis, connection,
'Export_csv5.csv')
<|reserved_special_token_1|>
from sqlalchemy import select, update
from sqlalchemy import Table, Column, String, Integer, Float, Boolean, Date, BigInteger
from sqlalchemy import create_engine, MetaData
import API_and_Database_function as func
import pandas as pd
import re
connection, Twitter_Sentiment_Analysis = func.Database_Acces(
'mysql://root@localhost/sentiment?charset=utf8mb4', 'utf8',
'Twitter_Sentiment_Analysis4')
stmt = "SET NAMES 'UTF8';"
connection.execute(stmt)
func.update_annotations_db(Twitter_Sentiment_Analysis, connection,
'Export_csv5.csv')
<|reserved_special_token_1|>
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
from sqlalchemy import select, update
from sqlalchemy import Table, Column, String, Integer, Float, Boolean, Date, BigInteger
from sqlalchemy import create_engine, MetaData
import API_and_Database_function as func
import pandas as pd
import re
connection, Twitter_Sentiment_Analysis = func.Database_Acces("mysql://root@localhost/sentiment?charset=utf8mb4", 'utf8' , 'Twitter_Sentiment_Analysis4' )
stmt = "SET NAMES 'UTF8';"
connection.execute(stmt)
func.update_annotations_db(Twitter_Sentiment_Analysis, connection, "Export_csv5.csv")
|
flexible
|
{
"blob_id": "a558b42106b036719fe38ee6efd1c5b933290f52",
"index": 47,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nconnection.execute(stmt)\nfunc.update_annotations_db(Twitter_Sentiment_Analysis, connection,\n 'Export_csv5.csv')\n",
"step-3": "<mask token>\nconnection, Twitter_Sentiment_Analysis = func.Database_Acces(\n 'mysql://root@localhost/sentiment?charset=utf8mb4', 'utf8',\n 'Twitter_Sentiment_Analysis4')\nstmt = \"SET NAMES 'UTF8';\"\nconnection.execute(stmt)\nfunc.update_annotations_db(Twitter_Sentiment_Analysis, connection,\n 'Export_csv5.csv')\n",
"step-4": "from sqlalchemy import select, update\nfrom sqlalchemy import Table, Column, String, Integer, Float, Boolean, Date, BigInteger\nfrom sqlalchemy import create_engine, MetaData\nimport API_and_Database_function as func\nimport pandas as pd\nimport re\nconnection, Twitter_Sentiment_Analysis = func.Database_Acces(\n 'mysql://root@localhost/sentiment?charset=utf8mb4', 'utf8',\n 'Twitter_Sentiment_Analysis4')\nstmt = \"SET NAMES 'UTF8';\"\nconnection.execute(stmt)\nfunc.update_annotations_db(Twitter_Sentiment_Analysis, connection,\n 'Export_csv5.csv')\n",
"step-5": "#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import select, update\nfrom sqlalchemy import Table, Column, String, Integer, Float, Boolean, Date, BigInteger\nfrom sqlalchemy import create_engine, MetaData\nimport API_and_Database_function as func\nimport pandas as pd\nimport re\n\n\nconnection, Twitter_Sentiment_Analysis = func.Database_Acces(\"mysql://root@localhost/sentiment?charset=utf8mb4\", 'utf8' , 'Twitter_Sentiment_Analysis4' )\nstmt = \"SET NAMES 'UTF8';\"\nconnection.execute(stmt)\nfunc.update_annotations_db(Twitter_Sentiment_Analysis, connection, \"Export_csv5.csv\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
HTTP Test for channel details
'''
import sys
sys.path.append('..')
from json import load, dumps
import urllib.request
import urllib.parse
import pytest
PORT_NUMBER = '5204'
BASE_URL = 'http://127.0.0.1:' + PORT_NUMBER
#BASE_URL now is 'http://127.0.0.1:5321'
@pytest.fixture
def register_loginx2_create_invite():
'''
Registers, logs in 2 users, creates new channel
'''
# RESET
req = urllib.request.Request(
f'{BASE_URL}/workspace/reset',
headers={'Content-Type': 'application/json'},
method='POST'
)
load(urllib.request.urlopen(req))
# REGISTER user_1
register_info_1 = dumps({
'email': 'z5209488@unsw.edu.au',
'password': 'enigma',
'name_first': 'Alan',
'name_last': 'Turing'
}).encode('utf-8')
req = urllib.request.Request(
f'{BASE_URL}/auth/register',
data=register_info_1,
headers={'Content-Type': 'application/json'},
method='POST'
)
load(urllib.request.urlopen(req))
# REGISTER user_2
register_info_2 = dumps({
'email': 'z5432455@unsw.edu.au',
'password': 'lovepassword',
'name_first': 'Ada',
'name_last': 'Lovelace'
}).encode('utf-8')
req = urllib.request.Request(
f'{BASE_URL}/auth/register',
data=register_info_2,
headers={'Content-Type': 'application/json'},
method='POST'
)
load(urllib.request.urlopen(req))
# Login user_1
login_info = dumps({
'email': 'z5209488@unsw.edu.au',
'password': 'enigma'
}).encode('utf-8')
req = urllib.request.Request(
f'{BASE_URL}/auth/login',
data=login_info,
headers={'Content-Type': 'application/json'},
method='POST'
)
# Login user_2
login_info = dumps({
'email': 'z5432455@unsw.edu.au',
'password': 'lovepassword'
}).encode('utf-8')
req = urllib.request.Request(
f'{BASE_URL}/auth/login',
data=login_info,
headers={'Content-Type': 'application/json'},
method='POST'
)
load(urllib.request.urlopen(req))
#return payload
user_1_token = 'b\'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg\''
#user_2_token = 'b\'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMiJ9.UNGv0HfSeyM4FtXkAc4HfuOl_HyNLFmRMeLx_4c0Ryg\''
# user_1 creates a public channel
channel_info = dumps({
'token': user_1_token,
'name': 'a channel',
'is_public': True
}).encode('utf-8')
req = urllib.request.Request(
f'{BASE_URL}/channels/create',
data=channel_info,
headers={'Content-Type': 'application/json'},
method='POST'
)
load(urllib.request.urlopen(req))
# user_2 join user_1's channel
join_info = dumps({
'token': user_1_token,
'channel_id': 1,
'u_id': 2
}).encode('utf-8')
req = urllib.request.Request(
f'{BASE_URL}/channel/invite',
data=join_info,
headers={'Content-Type': 'application/json'},
method='POST'
)
load(urllib.request.urlopen(req))
def test_details_basic(register_loginx2_create_invite):
'''
This test should pass with no issues
'''
user_1_token = 'b\'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg\''
#user_2_token = 'b\'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMiJ9.UNGv0HfSeyM4FtXkAc4HfuOl_HyNLFmRMeLx_4c0Ryg\''
# Get channels details
queryString = urllib.parse.urlencode({
'token': user_1_token,
'channel_id': 1
})
payload = load(urllib.request.urlopen(f"{BASE_URL}/channel/details?{queryString}"))
#payload = load(urllib.request.urlopen(req))
assert payload['name'] == 'a channel'
assert payload['owner_members'] == [{"u_id": 1, "name_first": "Alan", "name_last": "Turing"}]
def test_invalid_channelID(register_loginx2_create_invite):
'''
Channel ID is not a valid channel
'''
user_1_token = 'b\'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg\''
#user_2_token = 'b\'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMiJ9.UNGv0HfSeyM4FtXkAc4HfuOl_HyNLFmRMeLx_4c0Ryg\''
queryString = urllib.parse.urlencode({
'token': user_1_token,
'channel_id': 50
})
with pytest.raises(urllib.error.HTTPError):
urllib.request.urlopen(f"{BASE_URL}/channel/details?{queryString}")
#load(urllib.request.urlopen(req))
def test_unauthorised_user(register_loginx2_create_invite):
'''
Authorised user is not a member of channel with channel_id
'''
register_info_2 = dumps({
'email': 'z5454545@unsw.edu.au',
'password': 'testPassword',
'name_first': 'Test',
'name_last': 'User'
}).encode('utf-8')
req = urllib.request.Request(
f'{BASE_URL}/auth/register',
data=register_info_2,
headers={'Content-Type': 'application/json'},
method='POST'
)
load(urllib.request.urlopen(req))
login3_info = dumps({
'email': 'z5454545@unsw.edu.au',
'password': 'testPassword'
}).encode('utf-8')
req = urllib.request.Request(
f'{BASE_URL}/auth/login',
data=login3_info,
headers={'Content-Type': 'application/json'},
method='POST'
)
user_3_token = 'b\'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMyJ9.hnzKv5QKl78L2jWvtB8w9kcxZHo1UFxGN5shF7HBK0Y\''
queryString = urllib.parse.urlencode({
'token': user_3_token,
'channel_id': 1
})
with pytest.raises(urllib.error.HTTPError):
urllib.request.urlopen(f"{BASE_URL}/channel/details?{queryString}")
|
normal
|
{
"blob_id": "c22b37bff74de7ea99f2009652dd00e57bb316b8",
"index": 4383,
"step-1": "<mask token>\n\n\n@pytest.fixture\ndef register_loginx2_create_invite():\n \"\"\"\n Registers, logs in 2 users, creates new channel\n \"\"\"\n req = urllib.request.Request(f'{BASE_URL}/workspace/reset', headers={\n 'Content-Type': 'application/json'}, method='POST')\n load(urllib.request.urlopen(req))\n register_info_1 = dumps({'email': 'z5209488@unsw.edu.au', 'password':\n 'enigma', 'name_first': 'Alan', 'name_last': 'Turing'}).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/register', data=\n register_info_1, headers={'Content-Type': 'application/json'},\n method='POST')\n load(urllib.request.urlopen(req))\n register_info_2 = dumps({'email': 'z5432455@unsw.edu.au', 'password':\n 'lovepassword', 'name_first': 'Ada', 'name_last': 'Lovelace'}).encode(\n 'utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/register', data=\n register_info_2, headers={'Content-Type': 'application/json'},\n method='POST')\n load(urllib.request.urlopen(req))\n login_info = dumps({'email': 'z5209488@unsw.edu.au', 'password': 'enigma'}\n ).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/login', data=login_info,\n headers={'Content-Type': 'application/json'}, method='POST')\n login_info = dumps({'email': 'z5432455@unsw.edu.au', 'password':\n 'lovepassword'}).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/login', data=login_info,\n headers={'Content-Type': 'application/json'}, method='POST')\n load(urllib.request.urlopen(req))\n user_1_token = (\n \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg'\"\n )\n channel_info = dumps({'token': user_1_token, 'name': 'a channel',\n 'is_public': True}).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/channels/create', data=\n channel_info, headers={'Content-Type': 'application/json'}, method=\n 'POST')\n load(urllib.request.urlopen(req))\n join_info = dumps({'token': user_1_token, 'channel_id': 1, 'u_id': 2}\n ).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/channel/invite', data=\n join_info, headers={'Content-Type': 'application/json'}, method='POST')\n load(urllib.request.urlopen(req))\n\n\ndef test_details_basic(register_loginx2_create_invite):\n \"\"\"\n This test should pass with no issues\n \"\"\"\n user_1_token = (\n \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg'\"\n )\n queryString = urllib.parse.urlencode({'token': user_1_token,\n 'channel_id': 1})\n payload = load(urllib.request.urlopen(\n f'{BASE_URL}/channel/details?{queryString}'))\n assert payload['name'] == 'a channel'\n assert payload['owner_members'] == [{'u_id': 1, 'name_first': 'Alan',\n 'name_last': 'Turing'}]\n\n\ndef test_invalid_channelID(register_loginx2_create_invite):\n \"\"\"\n Channel ID is not a valid channel\n \"\"\"\n user_1_token = (\n \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg'\"\n )\n queryString = urllib.parse.urlencode({'token': user_1_token,\n 'channel_id': 50})\n with pytest.raises(urllib.error.HTTPError):\n urllib.request.urlopen(f'{BASE_URL}/channel/details?{queryString}')\n\n\ndef test_unauthorised_user(register_loginx2_create_invite):\n \"\"\"\n Authorised user is not a member of channel with channel_id\n \"\"\"\n register_info_2 = dumps({'email': 'z5454545@unsw.edu.au', 'password':\n 'testPassword', 'name_first': 'Test', 'name_last': 'User'}).encode(\n 'utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/register', data=\n register_info_2, headers={'Content-Type': 'application/json'},\n method='POST')\n load(urllib.request.urlopen(req))\n login3_info = dumps({'email': 'z5454545@unsw.edu.au', 'password':\n 'testPassword'}).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/login', data=login3_info,\n headers={'Content-Type': 'application/json'}, method='POST')\n user_3_token = (\n \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMyJ9.hnzKv5QKl78L2jWvtB8w9kcxZHo1UFxGN5shF7HBK0Y'\"\n )\n queryString = urllib.parse.urlencode({'token': user_3_token,\n 'channel_id': 1})\n with pytest.raises(urllib.error.HTTPError):\n urllib.request.urlopen(f'{BASE_URL}/channel/details?{queryString}')\n",
"step-2": "<mask token>\nsys.path.append('..')\n<mask token>\n\n\n@pytest.fixture\ndef register_loginx2_create_invite():\n \"\"\"\n Registers, logs in 2 users, creates new channel\n \"\"\"\n req = urllib.request.Request(f'{BASE_URL}/workspace/reset', headers={\n 'Content-Type': 'application/json'}, method='POST')\n load(urllib.request.urlopen(req))\n register_info_1 = dumps({'email': 'z5209488@unsw.edu.au', 'password':\n 'enigma', 'name_first': 'Alan', 'name_last': 'Turing'}).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/register', data=\n register_info_1, headers={'Content-Type': 'application/json'},\n method='POST')\n load(urllib.request.urlopen(req))\n register_info_2 = dumps({'email': 'z5432455@unsw.edu.au', 'password':\n 'lovepassword', 'name_first': 'Ada', 'name_last': 'Lovelace'}).encode(\n 'utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/register', data=\n register_info_2, headers={'Content-Type': 'application/json'},\n method='POST')\n load(urllib.request.urlopen(req))\n login_info = dumps({'email': 'z5209488@unsw.edu.au', 'password': 'enigma'}\n ).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/login', data=login_info,\n headers={'Content-Type': 'application/json'}, method='POST')\n login_info = dumps({'email': 'z5432455@unsw.edu.au', 'password':\n 'lovepassword'}).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/login', data=login_info,\n headers={'Content-Type': 'application/json'}, method='POST')\n load(urllib.request.urlopen(req))\n user_1_token = (\n \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg'\"\n )\n channel_info = dumps({'token': user_1_token, 'name': 'a channel',\n 'is_public': True}).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/channels/create', data=\n channel_info, headers={'Content-Type': 'application/json'}, method=\n 'POST')\n load(urllib.request.urlopen(req))\n join_info = dumps({'token': user_1_token, 'channel_id': 1, 'u_id': 2}\n ).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/channel/invite', data=\n join_info, headers={'Content-Type': 'application/json'}, method='POST')\n load(urllib.request.urlopen(req))\n\n\ndef test_details_basic(register_loginx2_create_invite):\n \"\"\"\n This test should pass with no issues\n \"\"\"\n user_1_token = (\n \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg'\"\n )\n queryString = urllib.parse.urlencode({'token': user_1_token,\n 'channel_id': 1})\n payload = load(urllib.request.urlopen(\n f'{BASE_URL}/channel/details?{queryString}'))\n assert payload['name'] == 'a channel'\n assert payload['owner_members'] == [{'u_id': 1, 'name_first': 'Alan',\n 'name_last': 'Turing'}]\n\n\ndef test_invalid_channelID(register_loginx2_create_invite):\n \"\"\"\n Channel ID is not a valid channel\n \"\"\"\n user_1_token = (\n \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg'\"\n )\n queryString = urllib.parse.urlencode({'token': user_1_token,\n 'channel_id': 50})\n with pytest.raises(urllib.error.HTTPError):\n urllib.request.urlopen(f'{BASE_URL}/channel/details?{queryString}')\n\n\ndef test_unauthorised_user(register_loginx2_create_invite):\n \"\"\"\n Authorised user is not a member of channel with channel_id\n \"\"\"\n register_info_2 = dumps({'email': 'z5454545@unsw.edu.au', 'password':\n 'testPassword', 'name_first': 'Test', 'name_last': 'User'}).encode(\n 'utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/register', data=\n register_info_2, headers={'Content-Type': 'application/json'},\n method='POST')\n load(urllib.request.urlopen(req))\n login3_info = dumps({'email': 'z5454545@unsw.edu.au', 'password':\n 'testPassword'}).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/login', data=login3_info,\n headers={'Content-Type': 'application/json'}, method='POST')\n user_3_token = (\n \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMyJ9.hnzKv5QKl78L2jWvtB8w9kcxZHo1UFxGN5shF7HBK0Y'\"\n )\n queryString = urllib.parse.urlencode({'token': user_3_token,\n 'channel_id': 1})\n with pytest.raises(urllib.error.HTTPError):\n urllib.request.urlopen(f'{BASE_URL}/channel/details?{queryString}')\n",
"step-3": "<mask token>\nsys.path.append('..')\n<mask token>\nPORT_NUMBER = '5204'\nBASE_URL = 'http://127.0.0.1:' + PORT_NUMBER\n\n\n@pytest.fixture\ndef register_loginx2_create_invite():\n \"\"\"\n Registers, logs in 2 users, creates new channel\n \"\"\"\n req = urllib.request.Request(f'{BASE_URL}/workspace/reset', headers={\n 'Content-Type': 'application/json'}, method='POST')\n load(urllib.request.urlopen(req))\n register_info_1 = dumps({'email': 'z5209488@unsw.edu.au', 'password':\n 'enigma', 'name_first': 'Alan', 'name_last': 'Turing'}).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/register', data=\n register_info_1, headers={'Content-Type': 'application/json'},\n method='POST')\n load(urllib.request.urlopen(req))\n register_info_2 = dumps({'email': 'z5432455@unsw.edu.au', 'password':\n 'lovepassword', 'name_first': 'Ada', 'name_last': 'Lovelace'}).encode(\n 'utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/register', data=\n register_info_2, headers={'Content-Type': 'application/json'},\n method='POST')\n load(urllib.request.urlopen(req))\n login_info = dumps({'email': 'z5209488@unsw.edu.au', 'password': 'enigma'}\n ).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/login', data=login_info,\n headers={'Content-Type': 'application/json'}, method='POST')\n login_info = dumps({'email': 'z5432455@unsw.edu.au', 'password':\n 'lovepassword'}).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/login', data=login_info,\n headers={'Content-Type': 'application/json'}, method='POST')\n load(urllib.request.urlopen(req))\n user_1_token = (\n \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg'\"\n )\n channel_info = dumps({'token': user_1_token, 'name': 'a channel',\n 'is_public': True}).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/channels/create', data=\n channel_info, headers={'Content-Type': 'application/json'}, method=\n 'POST')\n load(urllib.request.urlopen(req))\n join_info = dumps({'token': user_1_token, 'channel_id': 1, 'u_id': 2}\n ).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/channel/invite', data=\n join_info, headers={'Content-Type': 'application/json'}, method='POST')\n load(urllib.request.urlopen(req))\n\n\ndef test_details_basic(register_loginx2_create_invite):\n \"\"\"\n This test should pass with no issues\n \"\"\"\n user_1_token = (\n \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg'\"\n )\n queryString = urllib.parse.urlencode({'token': user_1_token,\n 'channel_id': 1})\n payload = load(urllib.request.urlopen(\n f'{BASE_URL}/channel/details?{queryString}'))\n assert payload['name'] == 'a channel'\n assert payload['owner_members'] == [{'u_id': 1, 'name_first': 'Alan',\n 'name_last': 'Turing'}]\n\n\ndef test_invalid_channelID(register_loginx2_create_invite):\n \"\"\"\n Channel ID is not a valid channel\n \"\"\"\n user_1_token = (\n \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg'\"\n )\n queryString = urllib.parse.urlencode({'token': user_1_token,\n 'channel_id': 50})\n with pytest.raises(urllib.error.HTTPError):\n urllib.request.urlopen(f'{BASE_URL}/channel/details?{queryString}')\n\n\ndef test_unauthorised_user(register_loginx2_create_invite):\n \"\"\"\n Authorised user is not a member of channel with channel_id\n \"\"\"\n register_info_2 = dumps({'email': 'z5454545@unsw.edu.au', 'password':\n 'testPassword', 'name_first': 'Test', 'name_last': 'User'}).encode(\n 'utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/register', data=\n register_info_2, headers={'Content-Type': 'application/json'},\n method='POST')\n load(urllib.request.urlopen(req))\n login3_info = dumps({'email': 'z5454545@unsw.edu.au', 'password':\n 'testPassword'}).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/login', data=login3_info,\n headers={'Content-Type': 'application/json'}, method='POST')\n user_3_token = (\n \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMyJ9.hnzKv5QKl78L2jWvtB8w9kcxZHo1UFxGN5shF7HBK0Y'\"\n )\n queryString = urllib.parse.urlencode({'token': user_3_token,\n 'channel_id': 1})\n with pytest.raises(urllib.error.HTTPError):\n urllib.request.urlopen(f'{BASE_URL}/channel/details?{queryString}')\n",
"step-4": "<mask token>\nimport sys\nsys.path.append('..')\nfrom json import load, dumps\nimport urllib.request\nimport urllib.parse\nimport pytest\nPORT_NUMBER = '5204'\nBASE_URL = 'http://127.0.0.1:' + PORT_NUMBER\n\n\n@pytest.fixture\ndef register_loginx2_create_invite():\n \"\"\"\n Registers, logs in 2 users, creates new channel\n \"\"\"\n req = urllib.request.Request(f'{BASE_URL}/workspace/reset', headers={\n 'Content-Type': 'application/json'}, method='POST')\n load(urllib.request.urlopen(req))\n register_info_1 = dumps({'email': 'z5209488@unsw.edu.au', 'password':\n 'enigma', 'name_first': 'Alan', 'name_last': 'Turing'}).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/register', data=\n register_info_1, headers={'Content-Type': 'application/json'},\n method='POST')\n load(urllib.request.urlopen(req))\n register_info_2 = dumps({'email': 'z5432455@unsw.edu.au', 'password':\n 'lovepassword', 'name_first': 'Ada', 'name_last': 'Lovelace'}).encode(\n 'utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/register', data=\n register_info_2, headers={'Content-Type': 'application/json'},\n method='POST')\n load(urllib.request.urlopen(req))\n login_info = dumps({'email': 'z5209488@unsw.edu.au', 'password': 'enigma'}\n ).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/login', data=login_info,\n headers={'Content-Type': 'application/json'}, method='POST')\n login_info = dumps({'email': 'z5432455@unsw.edu.au', 'password':\n 'lovepassword'}).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/login', data=login_info,\n headers={'Content-Type': 'application/json'}, method='POST')\n load(urllib.request.urlopen(req))\n user_1_token = (\n \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg'\"\n )\n channel_info = dumps({'token': user_1_token, 'name': 'a channel',\n 'is_public': True}).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/channels/create', data=\n channel_info, headers={'Content-Type': 'application/json'}, method=\n 'POST')\n load(urllib.request.urlopen(req))\n join_info = dumps({'token': user_1_token, 'channel_id': 1, 'u_id': 2}\n ).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/channel/invite', data=\n join_info, headers={'Content-Type': 'application/json'}, method='POST')\n load(urllib.request.urlopen(req))\n\n\ndef test_details_basic(register_loginx2_create_invite):\n \"\"\"\n This test should pass with no issues\n \"\"\"\n user_1_token = (\n \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg'\"\n )\n queryString = urllib.parse.urlencode({'token': user_1_token,\n 'channel_id': 1})\n payload = load(urllib.request.urlopen(\n f'{BASE_URL}/channel/details?{queryString}'))\n assert payload['name'] == 'a channel'\n assert payload['owner_members'] == [{'u_id': 1, 'name_first': 'Alan',\n 'name_last': 'Turing'}]\n\n\ndef test_invalid_channelID(register_loginx2_create_invite):\n \"\"\"\n Channel ID is not a valid channel\n \"\"\"\n user_1_token = (\n \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg'\"\n )\n queryString = urllib.parse.urlencode({'token': user_1_token,\n 'channel_id': 50})\n with pytest.raises(urllib.error.HTTPError):\n urllib.request.urlopen(f'{BASE_URL}/channel/details?{queryString}')\n\n\ndef test_unauthorised_user(register_loginx2_create_invite):\n \"\"\"\n Authorised user is not a member of channel with channel_id\n \"\"\"\n register_info_2 = dumps({'email': 'z5454545@unsw.edu.au', 'password':\n 'testPassword', 'name_first': 'Test', 'name_last': 'User'}).encode(\n 'utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/register', data=\n register_info_2, headers={'Content-Type': 'application/json'},\n method='POST')\n load(urllib.request.urlopen(req))\n login3_info = dumps({'email': 'z5454545@unsw.edu.au', 'password':\n 'testPassword'}).encode('utf-8')\n req = urllib.request.Request(f'{BASE_URL}/auth/login', data=login3_info,\n headers={'Content-Type': 'application/json'}, method='POST')\n user_3_token = (\n \"b'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMyJ9.hnzKv5QKl78L2jWvtB8w9kcxZHo1UFxGN5shF7HBK0Y'\"\n )\n queryString = urllib.parse.urlencode({'token': user_3_token,\n 'channel_id': 1})\n with pytest.raises(urllib.error.HTTPError):\n urllib.request.urlopen(f'{BASE_URL}/channel/details?{queryString}')\n",
"step-5": "'''\nHTTP Test for channel details\n'''\nimport sys\nsys.path.append('..')\nfrom json import load, dumps\nimport urllib.request\nimport urllib.parse\nimport pytest\n\n\nPORT_NUMBER = '5204'\nBASE_URL = 'http://127.0.0.1:' + PORT_NUMBER\n#BASE_URL now is 'http://127.0.0.1:5321'\n\n@pytest.fixture\ndef register_loginx2_create_invite():\n '''\n Registers, logs in 2 users, creates new channel\n '''\n # RESET\n req = urllib.request.Request(\n f'{BASE_URL}/workspace/reset',\n headers={'Content-Type': 'application/json'},\n method='POST'\n )\n\n load(urllib.request.urlopen(req))\n\n # REGISTER user_1\n register_info_1 = dumps({\n 'email': 'z5209488@unsw.edu.au',\n 'password': 'enigma',\n 'name_first': 'Alan',\n 'name_last': 'Turing'\n }).encode('utf-8')\n\n req = urllib.request.Request(\n f'{BASE_URL}/auth/register',\n data=register_info_1,\n headers={'Content-Type': 'application/json'},\n method='POST'\n )\n\n load(urllib.request.urlopen(req))\n\n # REGISTER user_2\n register_info_2 = dumps({\n 'email': 'z5432455@unsw.edu.au',\n 'password': 'lovepassword',\n 'name_first': 'Ada',\n 'name_last': 'Lovelace'\n }).encode('utf-8')\n\n\n req = urllib.request.Request(\n f'{BASE_URL}/auth/register',\n data=register_info_2,\n headers={'Content-Type': 'application/json'},\n method='POST'\n )\n\n load(urllib.request.urlopen(req))\n\n # Login user_1\n login_info = dumps({\n 'email': 'z5209488@unsw.edu.au',\n 'password': 'enigma'\n }).encode('utf-8')\n\n req = urllib.request.Request(\n f'{BASE_URL}/auth/login',\n data=login_info,\n headers={'Content-Type': 'application/json'},\n method='POST'\n )\n\n # Login user_2\n login_info = dumps({\n 'email': 'z5432455@unsw.edu.au',\n 'password': 'lovepassword'\n }).encode('utf-8')\n\n req = urllib.request.Request(\n f'{BASE_URL}/auth/login',\n data=login_info,\n headers={'Content-Type': 'application/json'},\n method='POST'\n )\n\n load(urllib.request.urlopen(req))\n #return payload\n\n user_1_token = 'b\\'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg\\''\n #user_2_token = 'b\\'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMiJ9.UNGv0HfSeyM4FtXkAc4HfuOl_HyNLFmRMeLx_4c0Ryg\\''\n\n # user_1 creates a public channel\n channel_info = dumps({\n 'token': user_1_token,\n 'name': 'a channel',\n 'is_public': True\n }).encode('utf-8')\n\n req = urllib.request.Request(\n f'{BASE_URL}/channels/create',\n data=channel_info,\n headers={'Content-Type': 'application/json'},\n method='POST'\n )\n\n load(urllib.request.urlopen(req))\n\n # user_2 join user_1's channel\n join_info = dumps({\n 'token': user_1_token,\n 'channel_id': 1,\n 'u_id': 2\n }).encode('utf-8')\n\n req = urllib.request.Request(\n f'{BASE_URL}/channel/invite',\n data=join_info,\n headers={'Content-Type': 'application/json'},\n method='POST'\n )\n\n load(urllib.request.urlopen(req))\n\ndef test_details_basic(register_loginx2_create_invite):\n '''\n This test should pass with no issues\n '''\n user_1_token = 'b\\'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg\\''\n #user_2_token = 'b\\'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMiJ9.UNGv0HfSeyM4FtXkAc4HfuOl_HyNLFmRMeLx_4c0Ryg\\''\n\n # Get channels details\n queryString = urllib.parse.urlencode({\n 'token': user_1_token,\n 'channel_id': 1\n })\n payload = load(urllib.request.urlopen(f\"{BASE_URL}/channel/details?{queryString}\"))\n\n\n\n #payload = load(urllib.request.urlopen(req))\n\n assert payload['name'] == 'a channel'\n assert payload['owner_members'] == [{\"u_id\": 1, \"name_first\": \"Alan\", \"name_last\": \"Turing\"}]\n\ndef test_invalid_channelID(register_loginx2_create_invite):\n '''\n Channel ID is not a valid channel\n '''\n\n user_1_token = 'b\\'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMSJ9.N0asY15U0QBAYTAzxGAvdkuWG6CyqzsR_rvNQtWBmLg\\''\n #user_2_token = 'b\\'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMiJ9.UNGv0HfSeyM4FtXkAc4HfuOl_HyNLFmRMeLx_4c0Ryg\\''\n\n queryString = urllib.parse.urlencode({\n 'token': user_1_token,\n 'channel_id': 50\n })\n\n\n\n with pytest.raises(urllib.error.HTTPError):\n urllib.request.urlopen(f\"{BASE_URL}/channel/details?{queryString}\")\n\n #load(urllib.request.urlopen(req))\n\ndef test_unauthorised_user(register_loginx2_create_invite):\n '''\n Authorised user is not a member of channel with channel_id\n '''\n\n register_info_2 = dumps({\n 'email': 'z5454545@unsw.edu.au',\n 'password': 'testPassword',\n 'name_first': 'Test',\n 'name_last': 'User'\n }).encode('utf-8')\n\n req = urllib.request.Request(\n f'{BASE_URL}/auth/register',\n data=register_info_2,\n headers={'Content-Type': 'application/json'},\n method='POST'\n )\n\n load(urllib.request.urlopen(req))\n\n login3_info = dumps({\n 'email': 'z5454545@unsw.edu.au',\n 'password': 'testPassword'\n }).encode('utf-8')\n\n req = urllib.request.Request(\n f'{BASE_URL}/auth/login',\n data=login3_info,\n headers={'Content-Type': 'application/json'},\n method='POST'\n )\n\n user_3_token = 'b\\'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1X2lkIjoiMyJ9.hnzKv5QKl78L2jWvtB8w9kcxZHo1UFxGN5shF7HBK0Y\\''\n\n queryString = urllib.parse.urlencode({\n 'token': user_3_token,\n 'channel_id': 1\n })\n\n with pytest.raises(urllib.error.HTTPError):\n urllib.request.urlopen(f\"{BASE_URL}/channel/details?{queryString}\")\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
class _ProtectedClass:
pass
class MyClass:
pass
class OtherClass(MyClass):
pass
def _protected_fun() ->MyClass:
return variable
<|reserved_special_token_0|>
def my_fun2() ->MyClass:
return variable
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class _ProtectedClass:
pass
class MyClass:
pass
class OtherClass(MyClass):
pass
def _protected_fun() ->MyClass:
return variable
def my_fun() ->MyClass:
return variable
def my_fun2() ->MyClass:
return variable
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class _ProtectedClass:
pass
class MyClass:
pass
class OtherClass(MyClass):
pass
def _protected_fun() ->MyClass:
return variable
def my_fun() ->MyClass:
return variable
def my_fun2() ->MyClass:
return variable
variable: MyClass
variable_with_value: MyClass = MyClass()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class _ProtectedClass:
pass
class MyClass:
pass
class OtherClass(MyClass):
pass
def _protected_fun() ->MyClass:
return variable
def my_fun() ->MyClass:
return variable
def my_fun2() ->MyClass:
return variable
variable: MyClass
variable_with_value: MyClass = MyClass()
__all__ = ['OtherClass', 'my_fun2', 'variable']
<|reserved_special_token_1|>
class _ProtectedClass:
pass
class MyClass:
pass
class OtherClass(MyClass):
pass
def _protected_fun() -> MyClass:
return variable # noqa: F821
def my_fun() -> MyClass:
return variable # noqa: F821
def my_fun2() -> MyClass:
return variable # noqa: F821
variable: MyClass
variable_with_value: MyClass = MyClass()
__all__ = [ # noqa: F822
"OtherClass",
"my_fun2",
"variable",
]
|
flexible
|
{
"blob_id": "b5949b40d731178bdbab776af8877921dcdfbf15",
"index": 3215,
"step-1": "class _ProtectedClass:\n pass\n\n\nclass MyClass:\n pass\n\n\nclass OtherClass(MyClass):\n pass\n\n\ndef _protected_fun() ->MyClass:\n return variable\n\n\n<mask token>\n\n\ndef my_fun2() ->MyClass:\n return variable\n\n\n<mask token>\n",
"step-2": "class _ProtectedClass:\n pass\n\n\nclass MyClass:\n pass\n\n\nclass OtherClass(MyClass):\n pass\n\n\ndef _protected_fun() ->MyClass:\n return variable\n\n\ndef my_fun() ->MyClass:\n return variable\n\n\ndef my_fun2() ->MyClass:\n return variable\n\n\n<mask token>\n",
"step-3": "class _ProtectedClass:\n pass\n\n\nclass MyClass:\n pass\n\n\nclass OtherClass(MyClass):\n pass\n\n\ndef _protected_fun() ->MyClass:\n return variable\n\n\ndef my_fun() ->MyClass:\n return variable\n\n\ndef my_fun2() ->MyClass:\n return variable\n\n\nvariable: MyClass\nvariable_with_value: MyClass = MyClass()\n<mask token>\n",
"step-4": "class _ProtectedClass:\n pass\n\n\nclass MyClass:\n pass\n\n\nclass OtherClass(MyClass):\n pass\n\n\ndef _protected_fun() ->MyClass:\n return variable\n\n\ndef my_fun() ->MyClass:\n return variable\n\n\ndef my_fun2() ->MyClass:\n return variable\n\n\nvariable: MyClass\nvariable_with_value: MyClass = MyClass()\n__all__ = ['OtherClass', 'my_fun2', 'variable']\n",
"step-5": "class _ProtectedClass:\n pass\n\n\nclass MyClass:\n pass\n\n\nclass OtherClass(MyClass):\n pass\n\n\ndef _protected_fun() -> MyClass:\n return variable # noqa: F821\n\n\ndef my_fun() -> MyClass:\n return variable # noqa: F821\n\n\ndef my_fun2() -> MyClass:\n return variable # noqa: F821\n\n\nvariable: MyClass\nvariable_with_value: MyClass = MyClass()\n\n\n__all__ = [ # noqa: F822\n \"OtherClass\",\n \"my_fun2\",\n \"variable\",\n]\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
"""
* author - kajol
* date - 12/24/2020
* time - 1:24 PM
* package - com.bridgelabz.basicprograms
* Title - Print a table of the powers of 2 that are less than or equal to 2^N
"""
try:
number = int(input("Enter number: "))
#print power of 2 within given range
if number < 31:
for num in range(1, number+1):
print("2 ^", num, "=", 2**num)
else:
print("Enter number in valid range")
except Exception:
print("Exception occured")
|
normal
|
{
"blob_id": "b0f0bcfb5739d46de54cbe46614e82bf5a2d13fb",
"index": 9038,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n number = int(input('Enter number: '))\n if number < 31:\n for num in range(1, number + 1):\n print('2 ^', num, '=', 2 ** num)\n else:\n print('Enter number in valid range')\nexcept Exception:\n print('Exception occured')\n",
"step-3": "\"\"\"\n * author - kajol\n * date - 12/24/2020\n * time - 1:24 PM\n * package - com.bridgelabz.basicprograms\n * Title - Print a table of the powers of 2 that are less than or equal to 2^N\n\"\"\"\n\ntry:\n number = int(input(\"Enter number: \"))\n #print power of 2 within given range\n if number < 31:\n for num in range(1, number+1):\n print(\"2 ^\", num, \"=\", 2**num)\n else:\n print(\"Enter number in valid range\")\nexcept Exception:\n print(\"Exception occured\")\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import pandas as pd
import numpy as np
import csv
#import nltk
#nltk.download('punkt')
from nltk.tokenize import sent_tokenize
csv_file=open("/home/debajit15/train+dev.csv")
pd.set_option('display.max_colwidth', None)
df=pd.read_csv(csv_file,sep=',');
df = df[pd.notnull(df['Aspects'])]
#print(df['Opinion_Words'].iloc[0:1])
def train_validate_test_split(df, train_percent=.8, validate_percent=.2, seed=None):
np.random.seed(seed)
perm = np.random.permutation(df.index)
m = len(df.index)
train_end = int(train_percent * m)
train = df.iloc[:train_end]
validate = df.iloc[train_end:]
return train, validate
trainl,vall=train_validate_test_split(df)
def get(df):
col=df[['review_body']]
print(col.head())
aspect=df[['Aspects']]
opinions=df[['Sentiments']]
print(df.shape[0])
now=""
for o in range(0,df.shape[0]):
d=col.iloc[o:o+1]
sd=d.to_string(index=False,header=None)
sd=sd[1:]
l=sent_tokenize(sd)
a=aspect.iloc[o:o+1]
sa=a.to_string(index=False,header=None)
asp=sa.split(";")
a=opinions.iloc[o:o+1]
sa=a.to_string(index=False,header=None)
senti=sa.split(";")
if(len(asp)!=len(senti) or len(l)!=len(asp) or len(l)!=len(senti)):
continue
it=0
for i in l:
chks=[x.strip() for x in senti[it].split(",")]
chka=[x.strip() for x in asp[it].split(",")]
g=[]
itr=0
if(len(chks)!=len(chka)):
continue
for k in chka:
f=k.split(" ")
num=chks[itr]
if(len(f)>1):
h=0
for x in f:
x=x.strip(' ')
x=x.strip('"')
g+=[x]
if(h<len(f)-1):
chks.insert(itr,'1')
h+=1
else:
g+=f
itr+=1
chka=g
now+=i
now+="####"
j=i.split(" ")
itr=0
for word in j:
if itr<len(chka) and word==chka[itr] :
if chks[itr]=='1':
s=word+"=T-POS"
elif chks[itr]=='0':
s=word+"=T-NEU"
else:
s=word+"=T-NEG"
itr+=1
else:
s=word+"=O"
now+=s+" "
now+="\n"
it+=1
return now
train=get(trainl)
val=get(vall)
text_file = open("/home/debajit15/train.txt", "w")
n = text_file.write(train)
text_file.close()
text_file = open("/home/debajit15/dev.txt", "w")
n = text_file.write(val)
text_file.close()
# #print(df[['review_body']])
|
normal
|
{
"blob_id": "c18c407476375fb1647fefaedb5d7ea0e0aabe3a",
"index": 929,
"step-1": "<mask token>\n\n\ndef train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,\n seed=None):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n train = df.iloc[:train_end]\n validate = df.iloc[train_end:]\n return train, validate\n\n\n<mask token>\n\n\ndef get(df):\n col = df[['review_body']]\n print(col.head())\n aspect = df[['Aspects']]\n opinions = df[['Sentiments']]\n print(df.shape[0])\n now = ''\n for o in range(0, df.shape[0]):\n d = col.iloc[o:o + 1]\n sd = d.to_string(index=False, header=None)\n sd = sd[1:]\n l = sent_tokenize(sd)\n a = aspect.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n asp = sa.split(';')\n a = opinions.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n senti = sa.split(';')\n if len(asp) != len(senti) or len(l) != len(asp) or len(l) != len(senti\n ):\n continue\n it = 0\n for i in l:\n chks = [x.strip() for x in senti[it].split(',')]\n chka = [x.strip() for x in asp[it].split(',')]\n g = []\n itr = 0\n if len(chks) != len(chka):\n continue\n for k in chka:\n f = k.split(' ')\n num = chks[itr]\n if len(f) > 1:\n h = 0\n for x in f:\n x = x.strip(' ')\n x = x.strip('\"')\n g += [x]\n if h < len(f) - 1:\n chks.insert(itr, '1')\n h += 1\n else:\n g += f\n itr += 1\n chka = g\n now += i\n now += '####'\n j = i.split(' ')\n itr = 0\n for word in j:\n if itr < len(chka) and word == chka[itr]:\n if chks[itr] == '1':\n s = word + '=T-POS'\n elif chks[itr] == '0':\n s = word + '=T-NEU'\n else:\n s = word + '=T-NEG'\n itr += 1\n else:\n s = word + '=O'\n now += s + ' '\n now += '\\n'\n it += 1\n return now\n\n\n<mask token>\n",
"step-2": "<mask token>\npd.set_option('display.max_colwidth', None)\n<mask token>\n\n\ndef train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,\n seed=None):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n train = df.iloc[:train_end]\n validate = df.iloc[train_end:]\n return train, validate\n\n\n<mask token>\n\n\ndef get(df):\n col = df[['review_body']]\n print(col.head())\n aspect = df[['Aspects']]\n opinions = df[['Sentiments']]\n print(df.shape[0])\n now = ''\n for o in range(0, df.shape[0]):\n d = col.iloc[o:o + 1]\n sd = d.to_string(index=False, header=None)\n sd = sd[1:]\n l = sent_tokenize(sd)\n a = aspect.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n asp = sa.split(';')\n a = opinions.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n senti = sa.split(';')\n if len(asp) != len(senti) or len(l) != len(asp) or len(l) != len(senti\n ):\n continue\n it = 0\n for i in l:\n chks = [x.strip() for x in senti[it].split(',')]\n chka = [x.strip() for x in asp[it].split(',')]\n g = []\n itr = 0\n if len(chks) != len(chka):\n continue\n for k in chka:\n f = k.split(' ')\n num = chks[itr]\n if len(f) > 1:\n h = 0\n for x in f:\n x = x.strip(' ')\n x = x.strip('\"')\n g += [x]\n if h < len(f) - 1:\n chks.insert(itr, '1')\n h += 1\n else:\n g += f\n itr += 1\n chka = g\n now += i\n now += '####'\n j = i.split(' ')\n itr = 0\n for word in j:\n if itr < len(chka) and word == chka[itr]:\n if chks[itr] == '1':\n s = word + '=T-POS'\n elif chks[itr] == '0':\n s = word + '=T-NEU'\n else:\n s = word + '=T-NEG'\n itr += 1\n else:\n s = word + '=O'\n now += s + ' '\n now += '\\n'\n it += 1\n return now\n\n\n<mask token>\ntext_file.close()\n<mask token>\ntext_file.close()\n",
"step-3": "<mask token>\ncsv_file = open('/home/debajit15/train+dev.csv')\npd.set_option('display.max_colwidth', None)\ndf = pd.read_csv(csv_file, sep=',')\ndf = df[pd.notnull(df['Aspects'])]\n\n\ndef train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,\n seed=None):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n train = df.iloc[:train_end]\n validate = df.iloc[train_end:]\n return train, validate\n\n\ntrainl, vall = train_validate_test_split(df)\n\n\ndef get(df):\n col = df[['review_body']]\n print(col.head())\n aspect = df[['Aspects']]\n opinions = df[['Sentiments']]\n print(df.shape[0])\n now = ''\n for o in range(0, df.shape[0]):\n d = col.iloc[o:o + 1]\n sd = d.to_string(index=False, header=None)\n sd = sd[1:]\n l = sent_tokenize(sd)\n a = aspect.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n asp = sa.split(';')\n a = opinions.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n senti = sa.split(';')\n if len(asp) != len(senti) or len(l) != len(asp) or len(l) != len(senti\n ):\n continue\n it = 0\n for i in l:\n chks = [x.strip() for x in senti[it].split(',')]\n chka = [x.strip() for x in asp[it].split(',')]\n g = []\n itr = 0\n if len(chks) != len(chka):\n continue\n for k in chka:\n f = k.split(' ')\n num = chks[itr]\n if len(f) > 1:\n h = 0\n for x in f:\n x = x.strip(' ')\n x = x.strip('\"')\n g += [x]\n if h < len(f) - 1:\n chks.insert(itr, '1')\n h += 1\n else:\n g += f\n itr += 1\n chka = g\n now += i\n now += '####'\n j = i.split(' ')\n itr = 0\n for word in j:\n if itr < len(chka) and word == chka[itr]:\n if chks[itr] == '1':\n s = word + '=T-POS'\n elif chks[itr] == '0':\n s = word + '=T-NEU'\n else:\n s = word + '=T-NEG'\n itr += 1\n else:\n s = word + '=O'\n now += s + ' '\n now += '\\n'\n it += 1\n return now\n\n\ntrain = get(trainl)\nval = get(vall)\ntext_file = open('/home/debajit15/train.txt', 'w')\nn = text_file.write(train)\ntext_file.close()\ntext_file = open('/home/debajit15/dev.txt', 'w')\nn = text_file.write(val)\ntext_file.close()\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport csv\nfrom nltk.tokenize import sent_tokenize\ncsv_file = open('/home/debajit15/train+dev.csv')\npd.set_option('display.max_colwidth', None)\ndf = pd.read_csv(csv_file, sep=',')\ndf = df[pd.notnull(df['Aspects'])]\n\n\ndef train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,\n seed=None):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n train = df.iloc[:train_end]\n validate = df.iloc[train_end:]\n return train, validate\n\n\ntrainl, vall = train_validate_test_split(df)\n\n\ndef get(df):\n col = df[['review_body']]\n print(col.head())\n aspect = df[['Aspects']]\n opinions = df[['Sentiments']]\n print(df.shape[0])\n now = ''\n for o in range(0, df.shape[0]):\n d = col.iloc[o:o + 1]\n sd = d.to_string(index=False, header=None)\n sd = sd[1:]\n l = sent_tokenize(sd)\n a = aspect.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n asp = sa.split(';')\n a = opinions.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n senti = sa.split(';')\n if len(asp) != len(senti) or len(l) != len(asp) or len(l) != len(senti\n ):\n continue\n it = 0\n for i in l:\n chks = [x.strip() for x in senti[it].split(',')]\n chka = [x.strip() for x in asp[it].split(',')]\n g = []\n itr = 0\n if len(chks) != len(chka):\n continue\n for k in chka:\n f = k.split(' ')\n num = chks[itr]\n if len(f) > 1:\n h = 0\n for x in f:\n x = x.strip(' ')\n x = x.strip('\"')\n g += [x]\n if h < len(f) - 1:\n chks.insert(itr, '1')\n h += 1\n else:\n g += f\n itr += 1\n chka = g\n now += i\n now += '####'\n j = i.split(' ')\n itr = 0\n for word in j:\n if itr < len(chka) and word == chka[itr]:\n if chks[itr] == '1':\n s = word + '=T-POS'\n elif chks[itr] == '0':\n s = word + '=T-NEU'\n else:\n s = word + '=T-NEG'\n itr += 1\n else:\n s = word + '=O'\n now += s + ' '\n now += '\\n'\n it += 1\n return now\n\n\ntrain = get(trainl)\nval = get(vall)\ntext_file = open('/home/debajit15/train.txt', 'w')\nn = text_file.write(train)\ntext_file.close()\ntext_file = open('/home/debajit15/dev.txt', 'w')\nn = text_file.write(val)\ntext_file.close()\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport csv\n#import nltk\n#nltk.download('punkt')\nfrom nltk.tokenize import sent_tokenize\ncsv_file=open(\"/home/debajit15/train+dev.csv\")\npd.set_option('display.max_colwidth', None)\ndf=pd.read_csv(csv_file,sep=',');\ndf = df[pd.notnull(df['Aspects'])]\n#print(df['Opinion_Words'].iloc[0:1])\n\ndef train_validate_test_split(df, train_percent=.8, validate_percent=.2, seed=None):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n train = df.iloc[:train_end]\n validate = df.iloc[train_end:]\n return train, validate\n\ntrainl,vall=train_validate_test_split(df)\n\ndef get(df):\n\tcol=df[['review_body']]\n\tprint(col.head())\n\taspect=df[['Aspects']]\n\topinions=df[['Sentiments']]\n\tprint(df.shape[0])\n\tnow=\"\"\n\tfor o in range(0,df.shape[0]):\n\t\td=col.iloc[o:o+1]\n\t\tsd=d.to_string(index=False,header=None)\n\t\tsd=sd[1:]\n\t\tl=sent_tokenize(sd)\n\n\t\ta=aspect.iloc[o:o+1]\n\t\tsa=a.to_string(index=False,header=None)\n\t\tasp=sa.split(\";\")\n\n\t\ta=opinions.iloc[o:o+1]\n\t\tsa=a.to_string(index=False,header=None)\n\t\tsenti=sa.split(\";\")\n\n\t\tif(len(asp)!=len(senti) or len(l)!=len(asp) or len(l)!=len(senti)):\n\t\t\tcontinue\n\t\tit=0\n\t\tfor i in l:\n\t\t\tchks=[x.strip() for x in senti[it].split(\",\")]\n\t\t\tchka=[x.strip() for x in asp[it].split(\",\")]\n\n\t\t\tg=[]\n\t\t\titr=0\n\t\t\tif(len(chks)!=len(chka)):\n\t\t\t\tcontinue\n\t\t\tfor k in chka:\n\t\t\t\tf=k.split(\" \")\n\t\t\t\tnum=chks[itr]\n\t\t\t\tif(len(f)>1):\n\t\t\t\t\th=0\n\t\t\t\t\tfor x in f:\n\t\t\t\t\t\tx=x.strip(' ')\n\t\t\t\t\t\tx=x.strip('\"')\n\t\t\t\t\t\tg+=[x]\n\t\t\t\t\t\tif(h<len(f)-1):\n\t\t\t\t\t\t\tchks.insert(itr,'1')\n\t\t\t\t\t\th+=1\n\t\t\t\telse:\n\t\t\t\t\tg+=f\n\t\t\t\titr+=1\n\t\t\tchka=g\n\t\t\tnow+=i\n\t\t\tnow+=\"####\"\n\t\t\tj=i.split(\" \")\n\t\t\titr=0\n\t\t\tfor word in j:\n\t\t\t\tif itr<len(chka) and word==chka[itr] :\n\t\t\t\t\tif chks[itr]=='1':\n\t\t\t\t\t\ts=word+\"=T-POS\"\n\t\t\t\t\telif chks[itr]=='0':\n\t\t\t\t\t\ts=word+\"=T-NEU\"\n\t\t\t\t\telse:\n\t\t\t\t\t\ts=word+\"=T-NEG\"\n\t\t\t\t\titr+=1\n\t\t\t\telse:\n\t\t\t\t\ts=word+\"=O\"\n\t\t\t\tnow+=s+\" \"\n\t\t\tnow+=\"\\n\"\n\t\t\tit+=1\n\treturn now\n\n\ntrain=get(trainl)\nval=get(vall)\n\ntext_file = open(\"/home/debajit15/train.txt\", \"w\")\nn = text_file.write(train)\ntext_file.close()\ntext_file = open(\"/home/debajit15/dev.txt\", \"w\")\nn = text_file.write(val)\ntext_file.close()\n\n\n# #print(df[['review_body']])\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.